diff --git a/ql/src/test/queries/clientpositive/acid_nullscan.q b/ql/src/test/queries/clientpositive/acid_nullscan.q index 0d8ca2a240..1447ae4483 100644 --- a/ql/src/test/queries/clientpositive/acid_nullscan.q +++ b/ql/src/test/queries/clientpositive/acid_nullscan.q @@ -7,13 +7,13 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.exec.dynamic.partition.mode=nonstrict; set hive.vectorized.execution.enabled=true; -CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10; -insert into table acid_vectorized values (1, 'bar'); +CREATE TABLE acid_vectorized_n1(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true'); +insert into table acid_vectorized_n1 select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10; +insert into table acid_vectorized_n1 values (1, 'bar'); explain extended -select sum(a) from acid_vectorized where false; +select sum(a) from acid_vectorized_n1 where false; -select sum(a) from acid_vectorized where false; +select sum(a) from acid_vectorized_n1 where false; diff --git a/ql/src/test/queries/clientpositive/acid_vectorization_original.q b/ql/src/test/queries/clientpositive/acid_vectorization_original.q index 0b91f6901a..5082aedf90 100644 --- a/ql/src/test/queries/clientpositive/acid_vectorization_original.q +++ b/ql/src/test/queries/clientpositive/acid_vectorization_original.q @@ -26,7 +26,7 @@ CREATE TEMPORARY FUNCTION runWorker AS 'org.apache.hadoop.hive.ql.udf.UDFRunWork create table mydual(a int); insert into mydual values(1); -CREATE TABLE over10k(t tinyint, +CREATE TABLE over10k_n2(t tinyint, si smallint, i int, b bigint, @@ -41,7 +41,7 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; --oddly this has 9999 rows not > 10K -LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over10k; +LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over10k_n2; CREATE TABLE over10k_orc_bucketed(t tinyint, si smallint, @@ -56,14 +56,14 @@ CREATE TABLE over10k_orc_bucketed(t tinyint, bin binary) CLUSTERED BY(si) INTO 4 BUCKETS STORED AS ORC; -- this produces about 250 distinct values across all 4 equivalence classes -select distinct si, si%4 from over10k order by si; +select distinct si, si%4 from over10k_n2 order by si; --- explain insert into over10k_orc_bucketed select * from over10k; -insert into over10k_orc_bucketed select * from over10k; +-- explain insert into over10k_orc_bucketed select * from over10k_n2; +insert into over10k_orc_bucketed select * from over10k_n2; dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/over10k_orc_bucketed; -- create copy_N files -insert into over10k_orc_bucketed select * from over10k; +insert into over10k_orc_bucketed select * from over10k_n2; -- this output of this is masked in .out - it is visible in .orig dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/over10k_orc_bucketed; diff --git a/ql/src/test/queries/clientpositive/acid_vectorization_original_tez.q b/ql/src/test/queries/clientpositive/acid_vectorization_original_tez.q index 00a3ab2ce5..bd4bb51c20 100644 --- a/ql/src/test/queries/clientpositive/acid_vectorization_original_tez.q +++ b/ql/src/test/queries/clientpositive/acid_vectorization_original_tez.q @@ -23,10 +23,10 @@ set hive.explain.user=false; CREATE TEMPORARY FUNCTION runWorker AS 'org.apache.hadoop.hive.ql.udf.UDFRunWorker'; -create table mydual(a int); -insert into mydual values(1); +create table mydual_n0(a int); +insert into mydual_n0 values(1); -CREATE TABLE over10k(t tinyint, +CREATE TABLE over10k_n9(t tinyint, si smallint, i int, b bigint, @@ -41,9 +41,9 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; --oddly this has 9999 rows not > 10K -LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over10k; +LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over10k_n9; -CREATE TABLE over10k_orc_bucketed(t tinyint, +CREATE TABLE over10k_orc_bucketed_n0(t tinyint, si smallint, i int, b bigint, @@ -56,42 +56,42 @@ CREATE TABLE over10k_orc_bucketed(t tinyint, bin binary) CLUSTERED BY(si) INTO 4 BUCKETS STORED AS ORC; -- this produces about 250 distinct values across all 4 equivalence classes -select distinct si, si%4 from over10k order by si; +select distinct si, si%4 from over10k_n9 order by si; --- explain insert into over10k_orc_bucketed select * from over10k; -insert into over10k_orc_bucketed select * from over10k; +-- explain insert into over10k_orc_bucketed_n0 select * from over10k_n9; +insert into over10k_orc_bucketed_n0 select * from over10k_n9; dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/over10k_orc_bucketed; -- create copy_N files -insert into over10k_orc_bucketed select * from over10k; +insert into over10k_orc_bucketed_n0 select * from over10k_n9; -- this output of this is masked in .out - it is visible in .orig dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/over10k_orc_bucketed; --this actually shows the data files in the .out on Tez but not LLAP -select distinct 7 as seven, INPUT__FILE__NAME from over10k_orc_bucketed; +select distinct 7 as seven, INPUT__FILE__NAME from over10k_orc_bucketed_n0; -- convert table to acid -alter table over10k_orc_bucketed set TBLPROPERTIES ('transactional'='true'); +alter table over10k_orc_bucketed_n0 set TBLPROPERTIES ('transactional'='true'); -- this should vectorize (and push predicate to storage: filterExpr in TableScan ) -- Execution mode: vectorized (both Map and Reducer) -explain select t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by t, si, i; -select t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by t, si, i; +explain select t, si, i from over10k_orc_bucketed_n0 where b = 4294967363 and t < 100 order by t, si, i; +select t, si, i from over10k_orc_bucketed_n0 where b = 4294967363 and t < 100 order by t, si, i; -- this should vectorize (and push predicate to storage: filterExpr in TableScan ) -- Execution mode: vectorized -explain select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by ROW__ID; -select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by ROW__ID; +explain select ROW__ID, t, si, i from over10k_orc_bucketed_n0 where b = 4294967363 and t < 100 order by ROW__ID; +select ROW__ID, t, si, i from over10k_orc_bucketed_n0 where b = 4294967363 and t < 100 order by ROW__ID; -- this should vectorize (and push predicate to storage: filterExpr in TableScan ) -- same as above -explain update over10k_orc_bucketed set i = 0 where b = 4294967363 and t < 100; -update over10k_orc_bucketed set i = 0 where b = 4294967363 and t < 100; +explain update over10k_orc_bucketed_n0 set i = 0 where b = 4294967363 and t < 100; +update over10k_orc_bucketed_n0 set i = 0 where b = 4294967363 and t < 100; -- this should produce the same result (data) as previous time this exact query ran -- ROW__ID will be different (same bucketProperty) -select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by ROW__ID; +select ROW__ID, t, si, i from over10k_orc_bucketed_n0 where b = 4294967363 and t < 100 order by ROW__ID; -- The idea below was to do check sum queries to ensure that ROW__IDs are unique -- to run Compaction and to check that ROW__IDs are the same before and after compaction (for rows @@ -100,23 +100,23 @@ select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < -- this doesn't vectorize -- use explain VECTORIZATION DETAIL to see -- notVectorizedReason: Key expression for GROUPBY operator: Vectorizing complex type STRUCT not supported -explain select ROW__ID, count(*) from over10k_orc_bucketed group by ROW__ID having count(*) > 1; +explain select ROW__ID, count(*) from over10k_orc_bucketed_n0 group by ROW__ID having count(*) > 1; -- this test that there are no duplicate ROW__IDs so should produce no output -select ROW__ID, count(*) from over10k_orc_bucketed group by ROW__ID having count(*) > 1; +select ROW__ID, count(*) from over10k_orc_bucketed_n0 group by ROW__ID having count(*) > 1; -- schedule compactor -alter table over10k_orc_bucketed compact 'major' WITH OVERWRITE TBLPROPERTIES ('compactor.mapreduce.map.memory.mb'='500', 'compactor.mapreduce.reduce.memory.mb'='500','compactor.mapreduce.map.memory.mb'='500', 'compactor.hive.tez.container.size'='500'); +alter table over10k_orc_bucketed_n0 compact 'major' WITH OVERWRITE TBLPROPERTIES ('compactor.mapreduce.map.memory.mb'='500', 'compactor.mapreduce.reduce.memory.mb'='500','compactor.mapreduce.map.memory.mb'='500', 'compactor.hive.tez.container.size'='500'); -- run compactor - this currently fails with -- Invalid resource request, requested memory < 0, or requested memory > max configured, requestedMemory=1536, maxMemory=512 --- select runWorker() from mydual; +-- select runWorker() from mydual_n0; -- show compactions; -- this should produce the same (data + ROW__ID) as before compaction -select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by ROW__ID; +select ROW__ID, t, si, i from over10k_orc_bucketed_n0 where b = 4294967363 and t < 100 order by ROW__ID; -- this test that there are no duplicate ROW__IDs so should produce no output -select ROW__ID, count(*) from over10k_orc_bucketed group by ROW__ID having count(*) > 1; +select ROW__ID, count(*) from over10k_orc_bucketed_n0 group by ROW__ID having count(*) > 1; diff --git a/ql/src/test/queries/clientpositive/acid_vectorization_project.q b/ql/src/test/queries/clientpositive/acid_vectorization_project.q index a726470135..526a3bc47b 100644 --- a/ql/src/test/queries/clientpositive/acid_vectorization_project.q +++ b/ql/src/test/queries/clientpositive/acid_vectorization_project.q @@ -5,9 +5,9 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.exec.dynamic.partition.mode=nonstrict; -CREATE TABLE acid_vectorized(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table acid_vectorized select cint, cstring1, cfloat from alltypesorc where cint is not null order by cint limit 10; +CREATE TABLE acid_vectorized_n2(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true'); +insert into table acid_vectorized_n2 select cint, cstring1, cfloat from alltypesorc where cint is not null order by cint limit 10; set hive.vectorized.execution.enabled=true; -select a,b from acid_vectorized order by a; -select a,c from acid_vectorized order by a; -select b,c from acid_vectorized order by b; +select a,b from acid_vectorized_n2 order by a; +select a,c from acid_vectorized_n2 order by a; +select b,c from acid_vectorized_n2 order by b; diff --git a/ql/src/test/queries/clientpositive/add_part_multiple.q b/ql/src/test/queries/clientpositive/add_part_multiple.q index 9403368396..5059d60796 100644 --- a/ql/src/test/queries/clientpositive/add_part_multiple.q +++ b/ql/src/test/queries/clientpositive/add_part_multiple.q @@ -2,25 +2,25 @@ set hive.mapred.mode=nonstrict; -- HIVE-5122 locations for 2nd, 3rd... partition are ignored -CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING); +CREATE TABLE add_part_test_n1 (key STRING, value STRING) PARTITIONED BY (ds STRING); explain -ALTER TABLE add_part_test ADD IF NOT EXISTS +ALTER TABLE add_part_test_n1 ADD IF NOT EXISTS PARTITION (ds='2010-01-01') location 'A' PARTITION (ds='2010-02-01') location 'B' PARTITION (ds='2010-03-01') PARTITION (ds='2010-04-01') location 'C'; -ALTER TABLE add_part_test ADD IF NOT EXISTS +ALTER TABLE add_part_test_n1 ADD IF NOT EXISTS PARTITION (ds='2010-01-01') location 'A' PARTITION (ds='2010-02-01') location 'B' PARTITION (ds='2010-03-01') PARTITION (ds='2010-04-01') location 'C'; from src TABLESAMPLE (1 ROWS) -insert into table add_part_test PARTITION (ds='2010-01-01') select 100,100 -insert into table add_part_test PARTITION (ds='2010-02-01') select 200,200 -insert into table add_part_test PARTITION (ds='2010-03-01') select 400,300 -insert into table add_part_test PARTITION (ds='2010-04-01') select 500,400; +insert into table add_part_test_n1 PARTITION (ds='2010-01-01') select 100,100 +insert into table add_part_test_n1 PARTITION (ds='2010-02-01') select 200,200 +insert into table add_part_test_n1 PARTITION (ds='2010-03-01') select 400,300 +insert into table add_part_test_n1 PARTITION (ds='2010-04-01') select 500,400; -select * from add_part_test; +select * from add_part_test_n1; diff --git a/ql/src/test/queries/clientpositive/allow_change_col_type_par.q b/ql/src/test/queries/clientpositive/allow_change_col_type_par.q index dba4e70ccc..aad63705f7 100644 --- a/ql/src/test/queries/clientpositive/allow_change_col_type_par.q +++ b/ql/src/test/queries/clientpositive/allow_change_col_type_par.q @@ -1,5 +1,5 @@ -- Setup -create table t1 (c1 int); +create table t1_n14 (c1 int); -- Check value of parameter set hive.metastore.disallow.incompatible.col.type.changes; @@ -13,4 +13,4 @@ set hive.metastore.disallow.incompatible.col.type.changes; set metaconf:hive.metastore.disallow.incompatible.col.type.changes; -- Change int to small int now allowed. -alter table t1 change column c1 c1 smallint; +alter table t1_n14 change column c1 c1 smallint; diff --git a/ql/src/test/queries/clientpositive/alter5.q b/ql/src/test/queries/clientpositive/alter5.q index 62a73a40b1..5abd14f075 100644 --- a/ql/src/test/queries/clientpositive/alter5.q +++ b/ql/src/test/queries/clientpositive/alter5.q @@ -5,24 +5,24 @@ create table alter5_src ( col1 string ) stored as textfile ; load data local inpath '../../data/files/test.dat' overwrite into table alter5_src ; -create table alter5 ( col1 string ) partitioned by (dt string); +create table alter5_n1 ( col1 string ) partitioned by (dt string); -- -- Here's the interesting bit for HIVE-2117 - partition subdir should be -- named "parta". -- -alter table alter5 add partition (dt='a') location 'parta'; +alter table alter5_n1 add partition (dt='a') location 'parta'; -describe extended alter5 partition (dt='a'); +describe extended alter5_n1 partition (dt='a'); -insert overwrite table alter5 partition (dt='a') select col1 from alter5_src ; -select * from alter5 where dt='a'; +insert overwrite table alter5_n1 partition (dt='a') select col1 from alter5_src ; +select * from alter5_n1 where dt='a'; -describe extended alter5 partition (dt='a'); +describe extended alter5_n1 partition (dt='a'); -- Cleanup DROP TABLE alter5_src; -DROP TABLE alter5; +DROP TABLE alter5_n1; SHOW TABLES LIKE "alter*"; -- With non-default Database @@ -34,12 +34,12 @@ SHOW TABLES; create table alter5_src ( col1 string ) stored as textfile ; load data local inpath '../../data/files/test.dat' overwrite into table alter5_src ; -create table alter5 ( col1 string ) partitioned by (dt string); -alter table alter5 add partition (dt='a') location 'parta'; +create table alter5_n1 ( col1 string ) partitioned by (dt string); +alter table alter5_n1 add partition (dt='a') location 'parta'; -describe extended alter5 partition (dt='a'); +describe extended alter5_n1 partition (dt='a'); -insert overwrite table alter5 partition (dt='a') select col1 from alter5_src ; -select * from alter5 where dt='a'; +insert overwrite table alter5_n1 partition (dt='a') select col1 from alter5_src ; +select * from alter5_n1 where dt='a'; -describe extended alter5 partition (dt='a'); +describe extended alter5_n1 partition (dt='a'); diff --git a/ql/src/test/queries/clientpositive/alterColumnStats.q b/ql/src/test/queries/clientpositive/alterColumnStats.q index 4456088327..7319be7037 100644 --- a/ql/src/test/queries/clientpositive/alterColumnStats.q +++ b/ql/src/test/queries/clientpositive/alterColumnStats.q @@ -1,21 +1,21 @@ set hive.mapred.mode=nonstrict; -drop table p; +drop table p_n0; -CREATE TABLE p(insert_num int, c1 tinyint, c2 smallint); +CREATE TABLE p_n0(insert_num int, c1 tinyint, c2 smallint); -desc formatted p; +desc formatted p_n0; -insert into p values (1,22,333); +insert into p_n0 values (1,22,333); -desc formatted p; +desc formatted p_n0; -alter table p replace columns (insert_num int, c1 STRING, c2 STRING); +alter table p_n0 replace columns (insert_num int, c1 STRING, c2 STRING); -desc formatted p; +desc formatted p_n0; -desc formatted p c1; +desc formatted p_n0 c1; -desc formatted p c2; +desc formatted p_n0 c2; diff --git a/ql/src/test/queries/clientpositive/alter_change_db_location.q b/ql/src/test/queries/clientpositive/alter_change_db_location.q index a74ba2ae59..203334645f 100644 --- a/ql/src/test/queries/clientpositive/alter_change_db_location.q +++ b/ql/src/test/queries/clientpositive/alter_change_db_location.q @@ -1,5 +1,5 @@ create database newDB location "/tmp/"; describe database extended newDB; use newDB; -create table tab (name string); -alter table tab rename to newName; +create table tab_n13 (name string); +alter table tab_n13 rename to newName; diff --git a/ql/src/test/queries/clientpositive/alter_file_format.q b/ql/src/test/queries/clientpositive/alter_file_format.q index eb1f6bfd3f..54e94ae6d3 100644 --- a/ql/src/test/queries/clientpositive/alter_file_format.q +++ b/ql/src/test/queries/clientpositive/alter_file_format.q @@ -19,27 +19,27 @@ desc FORMATTED alter_file_format_test; ALTER TABLE alter_file_format_test SET FILEFORMAT INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat' SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'; desc FORMATTED alter_file_format_test; -drop table alter_partition_format_test; +drop table alter_partition_format_test_n0; --partitioned table -create table alter_partition_format_test (key int, value string) partitioned by (ds string); +create table alter_partition_format_test_n0 (key int, value string) partitioned by (ds string); -alter table alter_partition_format_test add partition(ds='2010'); -desc FORMATTED alter_partition_format_test partition(ds='2010'); +alter table alter_partition_format_test_n0 add partition(ds='2010'); +desc FORMATTED alter_partition_format_test_n0 partition(ds='2010'); -alter table alter_partition_format_test partition(ds='2010') set fileformat rcfile; -desc FORMATTED alter_partition_format_test partition(ds='2010'); +alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat rcfile; +desc FORMATTED alter_partition_format_test_n0 partition(ds='2010'); -alter table alter_partition_format_test partition(ds='2010') set fileformat textfile; -desc FORMATTED alter_partition_format_test partition(ds='2010'); +alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat textfile; +desc FORMATTED alter_partition_format_test_n0 partition(ds='2010'); -alter table alter_partition_format_test partition(ds='2010') set fileformat rcfile; -desc FORMATTED alter_partition_format_test partition(ds='2010'); +alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat rcfile; +desc FORMATTED alter_partition_format_test_n0 partition(ds='2010'); -alter table alter_partition_format_test partition(ds='2010') set fileformat sequencefile; -desc FORMATTED alter_partition_format_test partition(ds='2010'); +alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat sequencefile; +desc FORMATTED alter_partition_format_test_n0 partition(ds='2010'); -alter table alter_partition_format_test partition(ds='2010') set fileformat parquet; -desc FORMATTED alter_partition_format_test partition(ds='2010'); +alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat parquet; +desc FORMATTED alter_partition_format_test_n0 partition(ds='2010'); -drop table alter_partition_format_test; \ No newline at end of file +drop table alter_partition_format_test_n0; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/alter_merge.q b/ql/src/test/queries/clientpositive/alter_merge.q index 1839005517..b00ce45bc5 100644 --- a/ql/src/test/queries/clientpositive/alter_merge.q +++ b/ql/src/test/queries/clientpositive/alter_merge.q @@ -1,44 +1,44 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -create table src_rc_merge_test(key int, value string) stored as rcfile; +create table src_rc_merge_test_n2(key int, value string) stored as rcfile; -load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test; -load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test; -load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test; +load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_n2; +load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_n2; +load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_n2; -show table extended like `src_rc_merge_test`; +show table extended like `src_rc_merge_test_n2`; -select count(1) from src_rc_merge_test; -select sum(hash(key)), sum(hash(value)) from src_rc_merge_test; +select count(1) from src_rc_merge_test_n2; +select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_n2; -alter table src_rc_merge_test concatenate; +alter table src_rc_merge_test_n2 concatenate; -show table extended like `src_rc_merge_test`; +show table extended like `src_rc_merge_test_n2`; -select count(1) from src_rc_merge_test; -select sum(hash(key)), sum(hash(value)) from src_rc_merge_test; +select count(1) from src_rc_merge_test_n2; +select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_n2; -create table src_rc_merge_test_part(key int, value string) partitioned by (ds string) stored as rcfile; +create table src_rc_merge_test_part_n0(key int, value string) partitioned by (ds string) stored as rcfile; -alter table src_rc_merge_test_part add partition (ds='2011'); +alter table src_rc_merge_test_part_n0 add partition (ds='2011'); -load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2011'); -load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2011'); -load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2011'); +load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part_n0 partition (ds='2011'); +load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part_n0 partition (ds='2011'); +load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part_n0 partition (ds='2011'); -show table extended like `src_rc_merge_test_part` partition (ds='2011'); +show table extended like `src_rc_merge_test_part_n0` partition (ds='2011'); -select count(1) from src_rc_merge_test_part; -select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part; +select count(1) from src_rc_merge_test_part_n0; +select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n0; -alter table src_rc_merge_test_part partition (ds='2011') concatenate; +alter table src_rc_merge_test_part_n0 partition (ds='2011') concatenate; -show table extended like `src_rc_merge_test_part` partition (ds='2011'); +show table extended like `src_rc_merge_test_part_n0` partition (ds='2011'); -select count(1) from src_rc_merge_test_part; -select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part; +select count(1) from src_rc_merge_test_part_n0; +select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n0; -drop table src_rc_merge_test; -drop table src_rc_merge_test_part; \ No newline at end of file +drop table src_rc_merge_test_n2; +drop table src_rc_merge_test_part_n0; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/alter_merge_2.q b/ql/src/test/queries/clientpositive/alter_merge_2.q index b0dd56c6d2..8d36fdfa43 100644 --- a/ql/src/test/queries/clientpositive/alter_merge_2.q +++ b/ql/src/test/queries/clientpositive/alter_merge_2.q @@ -1,21 +1,21 @@ set hive.strict.checks.bucketing=false; -create table src_rc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as rcfile; +create table src_rc_merge_test_part_n1(key int, value string) partitioned by (ds string, ts string) stored as rcfile; -alter table src_rc_merge_test_part add partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); -desc extended src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); +alter table src_rc_merge_test_part_n1 add partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); +desc extended src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); -load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); -load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); -load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); +load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); +load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); +load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); -select count(1) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'; -select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'; +select count(1) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31'; +select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31'; -alter table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate; +alter table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate; -select count(1) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'; -select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'; +select count(1) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31'; +select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31'; -drop table src_rc_merge_test_part; +drop table src_rc_merge_test_part_n1; diff --git a/ql/src/test/queries/clientpositive/alter_merge_orc.q b/ql/src/test/queries/clientpositive/alter_merge_orc.q index f88b2a48ba..143d33a206 100644 --- a/ql/src/test/queries/clientpositive/alter_merge_orc.q +++ b/ql/src/test/queries/clientpositive/alter_merge_orc.q @@ -20,25 +20,25 @@ select count(1) from src_orc_merge_test; select sum(hash(key)), sum(hash(value)) from src_orc_merge_test; -create table src_orc_merge_test_part(key int, value string) partitioned by (ds string) stored as orc; +create table src_orc_merge_test_part_n2(key int, value string) partitioned by (ds string) stored as orc; -alter table src_orc_merge_test_part add partition (ds='2011'); +alter table src_orc_merge_test_part_n2 add partition (ds='2011'); -insert overwrite table src_orc_merge_test_part partition (ds='2011') select * from src; -insert into table src_orc_merge_test_part partition (ds='2011') select * from src; -insert into table src_orc_merge_test_part partition (ds='2011') select * from src; +insert overwrite table src_orc_merge_test_part_n2 partition (ds='2011') select * from src; +insert into table src_orc_merge_test_part_n2 partition (ds='2011') select * from src; +insert into table src_orc_merge_test_part_n2 partition (ds='2011') select * from src; -show table extended like `src_orc_merge_test_part` partition (ds='2011'); +show table extended like `src_orc_merge_test_part_n2` partition (ds='2011'); -select count(1) from src_orc_merge_test_part; -select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part; +select count(1) from src_orc_merge_test_part_n2; +select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part_n2; -alter table src_orc_merge_test_part partition (ds='2011') concatenate; +alter table src_orc_merge_test_part_n2 partition (ds='2011') concatenate; -show table extended like `src_orc_merge_test_part` partition (ds='2011'); +show table extended like `src_orc_merge_test_part_n2` partition (ds='2011'); -select count(1) from src_orc_merge_test_part; -select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part; +select count(1) from src_orc_merge_test_part_n2; +select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part_n2; drop table src_orc_merge_test; -drop table src_orc_merge_test_part; +drop table src_orc_merge_test_part_n2; diff --git a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q index 1aa64011c7..a15dcbf27d 100644 --- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q +++ b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q @@ -2,84 +2,84 @@ -- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata -- the partition metadata is updated as well. -CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING); +CREATE TABLE tst1_n0(key STRING, value STRING) PARTITIONED BY (ds STRING); -DESCRIBE FORMATTED tst1; +DESCRIBE FORMATTED tst1_n0; -INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src; +INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src; -DESCRIBE FORMATTED tst1 PARTITION (ds = '1'); +DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1'); -- Test an unbucketed partition gets converted to bucketed -ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS; +ALTER TABLE tst1_n0 CLUSTERED BY (key) INTO 8 BUCKETS; -DESCRIBE FORMATTED tst1; +DESCRIBE FORMATTED tst1_n0; -INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src; +INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src; -DESCRIBE FORMATTED tst1 PARTITION (ds = '1'); +DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1'); -- Test an unsorted partition gets converted to sorted -ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS; +ALTER TABLE tst1_n0 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS; -DESCRIBE FORMATTED tst1; +DESCRIBE FORMATTED tst1_n0; -INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src; +INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src; -DESCRIBE FORMATTED tst1 PARTITION (ds = '1'); +DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1'); -- Test changing the bucket columns -ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS; +ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS; -DESCRIBE FORMATTED tst1; +DESCRIBE FORMATTED tst1_n0; -INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src; +INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src; -DESCRIBE FORMATTED tst1 PARTITION (ds = '1'); +DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1'); -- Test changing the number of buckets -ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS; +ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS; -DESCRIBE FORMATTED tst1; +DESCRIBE FORMATTED tst1_n0; -INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src; +INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src; -DESCRIBE FORMATTED tst1 PARTITION (ds = '1'); +DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1'); -- Test changing the sort columns -ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS; +ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS; -DESCRIBE FORMATTED tst1; +DESCRIBE FORMATTED tst1_n0; -INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src; +INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src; -DESCRIBE FORMATTED tst1 PARTITION (ds = '1'); +DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1'); -- Test changing the sort order -ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS; +ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS; -DESCRIBE FORMATTED tst1; +DESCRIBE FORMATTED tst1_n0; -INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src; +INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src; -DESCRIBE FORMATTED tst1 PARTITION (ds = '1'); +DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1'); -- Test a sorted partition gets converted to unsorted -ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS; +ALTER TABLE tst1_n0 CLUSTERED BY (value) INTO 4 BUCKETS; -DESCRIBE FORMATTED tst1; +DESCRIBE FORMATTED tst1_n0; -INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src; +INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src; -DESCRIBE FORMATTED tst1 PARTITION (ds = '1'); +DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1'); -- Test a bucketed partition gets converted to unbucketed -ALTER TABLE tst1 NOT CLUSTERED; +ALTER TABLE tst1_n0 NOT CLUSTERED; -DESCRIBE FORMATTED tst1; +DESCRIBE FORMATTED tst1_n0; -INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src; +INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src; -DESCRIBE FORMATTED tst1 PARTITION (ds = '1'); +DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1'); diff --git a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q index d214715a75..163ca8f2af 100644 --- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q +++ b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q @@ -1,60 +1,60 @@ --! qt:dataset:src -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) -create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets; +create table tst1_n1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets; -alter table tst1 clustered by (key) into 8 buckets; +alter table tst1_n1 clustered by (key) into 8 buckets; -describe formatted tst1; +describe formatted tst1_n1; -insert overwrite table tst1 partition (ds='1') select key, value from src; +insert overwrite table tst1_n1 partition (ds='1') select key, value from src; -describe formatted tst1 partition (ds = '1'); +describe formatted tst1_n1 partition (ds = '1'); -- Test changing bucket number -alter table tst1 clustered by (key) into 12 buckets; +alter table tst1_n1 clustered by (key) into 12 buckets; -insert overwrite table tst1 partition (ds='1') select key, value from src; +insert overwrite table tst1_n1 partition (ds='1') select key, value from src; -describe formatted tst1 partition (ds = '1'); +describe formatted tst1_n1 partition (ds = '1'); -describe formatted tst1; +describe formatted tst1_n1; -- Test changing bucket number of (table/partition) -alter table tst1 into 4 buckets; +alter table tst1_n1 into 4 buckets; -describe formatted tst1; +describe formatted tst1_n1; -describe formatted tst1 partition (ds = '1'); +describe formatted tst1_n1 partition (ds = '1'); -alter table tst1 partition (ds = '1') into 6 buckets; +alter table tst1_n1 partition (ds = '1') into 6 buckets; -describe formatted tst1; +describe formatted tst1_n1; -describe formatted tst1 partition (ds = '1'); +describe formatted tst1_n1 partition (ds = '1'); -- Test adding sort order -alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets; +alter table tst1_n1 clustered by (key) sorted by (key asc) into 12 buckets; -describe formatted tst1; +describe formatted tst1_n1; -- Test changing sort order -alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets; +alter table tst1_n1 clustered by (key) sorted by (value desc) into 12 buckets; -describe formatted tst1; +describe formatted tst1_n1; -- Test removing test order -alter table tst1 clustered by (value) into 12 buckets; +alter table tst1_n1 clustered by (value) into 12 buckets; -describe formatted tst1; +describe formatted tst1_n1; -- Test removing buckets -alter table tst1 not clustered; +alter table tst1_n1 not clustered; -describe formatted tst1; +describe formatted tst1_n1; diff --git a/ql/src/test/queries/clientpositive/alter_partition_with_whitelist.q b/ql/src/test/queries/clientpositive/alter_partition_with_whitelist.q index 354750368f..9609c6e768 100644 --- a/ql/src/test/queries/clientpositive/alter_partition_with_whitelist.q +++ b/ql/src/test/queries/clientpositive/alter_partition_with_whitelist.q @@ -2,9 +2,9 @@ SET hive.metastore.partition.name.whitelist.pattern=[A-Za-z]*; -- This pattern matches only letters. -CREATE TABLE part_whitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING); -SHOW PARTITIONS part_whitelist_test; +CREATE TABLE part_whitelist_test_n0 (key STRING, value STRING) PARTITIONED BY (ds STRING); +SHOW PARTITIONS part_whitelist_test_n0; -ALTER TABLE part_whitelist_test ADD PARTITION (ds='Part'); +ALTER TABLE part_whitelist_test_n0 ADD PARTITION (ds='Part'); -ALTER TABLE part_whitelist_test PARTITION (ds='Part') rename to partition (ds='Apart'); +ALTER TABLE part_whitelist_test_n0 PARTITION (ds='Part') rename to partition (ds='Apart'); diff --git a/ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q b/ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q index 5ffcaf0bf0..a00cc394ba 100644 --- a/ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q +++ b/ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q @@ -2,23 +2,23 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; -- SORT_BEFORE_DIFF -create table src_auth_tmp as select * from src; +create table src_auth_tmp_n1 as select * from src; -create table authorization_part (key int, value string) partitioned by (ds string); -ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE"); +create table authorization_part_n1 (key int, value string) partitioned by (ds string); +ALTER TABLE authorization_part_n1 SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE"); set hive.security.authorization.enabled=true; -grant select on table src_auth_tmp to user hive_test_user; +grant select on table src_auth_tmp_n1 to user hive_test_user; -- column grant to user -grant Create on table authorization_part to user hive_test_user; -grant Update on table authorization_part to user hive_test_user; -grant Drop on table authorization_part to user hive_test_user; +grant Create on table authorization_part_n1 to user hive_test_user; +grant Update on table authorization_part_n1 to user hive_test_user; +grant Drop on table authorization_part_n1 to user hive_test_user; -show grant user hive_test_user on table authorization_part; -grant select(key) on table authorization_part to user hive_test_user; -insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp; -show grant user hive_test_user on table authorization_part(key) partition (ds='2010'); -alter table authorization_part partition (ds='2010') rename to partition (ds='2010_tmp'); -show grant user hive_test_user on table authorization_part(key) partition (ds='2010_tmp'); +show grant user hive_test_user on table authorization_part_n1; +grant select(key) on table authorization_part_n1 to user hive_test_user; +insert overwrite table authorization_part_n1 partition (ds='2010') select key, value from src_auth_tmp_n1; +show grant user hive_test_user on table authorization_part_n1(key) partition (ds='2010'); +alter table authorization_part_n1 partition (ds='2010') rename to partition (ds='2010_tmp'); +show grant user hive_test_user on table authorization_part_n1(key) partition (ds='2010_tmp'); -drop table authorization_part; +drop table authorization_part_n1; diff --git a/ql/src/test/queries/clientpositive/alter_table_serde.q b/ql/src/test/queries/clientpositive/alter_table_serde.q index a80693e7c5..3523f21658 100644 --- a/ql/src/test/queries/clientpositive/alter_table_serde.q +++ b/ql/src/test/queries/clientpositive/alter_table_serde.q @@ -1,33 +1,33 @@ -- test table -create table test_table (id int, query string, name string); -describe extended test_table; +create table test_table_n1 (id int, query string, name string); +describe extended test_table_n1; -alter table test_table set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'; -describe extended test_table; +alter table test_table_n1 set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'; +describe extended test_table_n1; -alter table test_table set serdeproperties ('field.delim' = ','); -describe extended test_table; +alter table test_table_n1 set serdeproperties ('field.delim' = ','); +describe extended test_table_n1; -drop table test_table; +drop table test_table_n1; --- test partitioned table -create table test_table (id int, query string, name string) partitioned by (dt string); +create table test_table_n1 (id int, query string, name string) partitioned by (dt string); -alter table test_table add partition (dt = '2011'); -describe extended test_table partition (dt='2011'); +alter table test_table_n1 add partition (dt = '2011'); +describe extended test_table_n1 partition (dt='2011'); -alter table test_table set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'; -describe extended test_table partition (dt='2011'); +alter table test_table_n1 set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'; +describe extended test_table_n1 partition (dt='2011'); -alter table test_table set serdeproperties ('field.delim' = ','); -describe extended test_table partition (dt='2011'); +alter table test_table_n1 set serdeproperties ('field.delim' = ','); +describe extended test_table_n1 partition (dt='2011'); -- test partitions -alter table test_table partition(dt='2011') set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'; -describe extended test_table partition (dt='2011'); +alter table test_table_n1 partition(dt='2011') set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'; +describe extended test_table_n1 partition (dt='2011'); -alter table test_table partition(dt='2011') set serdeproperties ('field.delim' = ','); -describe extended test_table partition (dt='2011'); +alter table test_table_n1 partition(dt='2011') set serdeproperties ('field.delim' = ','); +describe extended test_table_n1 partition (dt='2011'); -drop table test_table +drop table test_table_n1 diff --git a/ql/src/test/queries/clientpositive/alter_table_serde2.q b/ql/src/test/queries/clientpositive/alter_table_serde2.q index b7717f9751..9d3048b3f1 100644 --- a/ql/src/test/queries/clientpositive/alter_table_serde2.q +++ b/ql/src/test/queries/clientpositive/alter_table_serde2.q @@ -2,20 +2,20 @@ -- Tests that when overwriting a partition in a table after altering the serde properties -- the partition metadata is updated as well. -CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING); +CREATE TABLE tst1_n5(key STRING, value STRING) PARTITIONED BY (ds STRING); -DESCRIBE FORMATTED tst1; +DESCRIBE FORMATTED tst1_n5; -INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src; +INSERT OVERWRITE TABLE tst1_n5 PARTITION (ds = '1') SELECT key, value FROM src; -DESCRIBE FORMATTED tst1 PARTITION (ds = '1'); +DESCRIBE FORMATTED tst1_n5 PARTITION (ds = '1'); -- Test altering the serde properties -ALTER TABLE tst1 SET SERDEPROPERTIES ('field.delim' = ','); +ALTER TABLE tst1_n5 SET SERDEPROPERTIES ('field.delim' = ','); -DESCRIBE FORMATTED tst1; +DESCRIBE FORMATTED tst1_n5; -INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src; +INSERT OVERWRITE TABLE tst1_n5 PARTITION (ds = '1') SELECT key, value FROM src; -DESCRIBE FORMATTED tst1 PARTITION (ds = '1'); +DESCRIBE FORMATTED tst1_n5 PARTITION (ds = '1'); diff --git a/ql/src/test/queries/clientpositive/alter_table_stats_status.q b/ql/src/test/queries/clientpositive/alter_table_stats_status.q index 9101f30023..6a7bb0e92b 100644 --- a/ql/src/test/queries/clientpositive/alter_table_stats_status.q +++ b/ql/src/test/queries/clientpositive/alter_table_stats_status.q @@ -26,25 +26,25 @@ describe formatted statstable; drop table statstable; -create table srcpart like default.srcpart; -load data local inpath '../../data/files/kv1.txt' overwrite into table srcpart partition (ds='2008-04-08', hr='11'); -load data local inpath '../../data/files/kv1.txt' overwrite into table srcpart partition (ds='2008-04-08', hr='12'); +create table srcpart_n0 like default.srcpart; +load data local inpath '../../data/files/kv1.txt' overwrite into table srcpart_n0 partition (ds='2008-04-08', hr='11'); +load data local inpath '../../data/files/kv1.txt' overwrite into table srcpart_n0 partition (ds='2008-04-08', hr='12'); -analyze table srcpart partition (ds='2008-04-08', hr='11') compute statistics; -describe formatted srcpart partition (ds='2008-04-08', hr='11'); +analyze table srcpart_n0 partition (ds='2008-04-08', hr='11') compute statistics; +describe formatted srcpart_n0 partition (ds='2008-04-08', hr='11'); -alter table srcpart touch; -alter table srcpart partition (ds='2008-04-08', hr='11') rename to partition (ds='2017-01-19', hr='11'); -alter table srcpart partition (ds='2017-01-19', hr='11') add columns (newcol string); -alter table srcpart partition (ds='2017-01-19', hr='11') change key key string; -alter table srcpart set tblproperties('testpartstats'='unchange'); -describe formatted srcpart partition (ds='2017-01-19', hr='11'); +alter table srcpart_n0 touch; +alter table srcpart_n0 partition (ds='2008-04-08', hr='11') rename to partition (ds='2017-01-19', hr='11'); +alter table srcpart_n0 partition (ds='2017-01-19', hr='11') add columns (newcol string); +alter table srcpart_n0 partition (ds='2017-01-19', hr='11') change key key string; +alter table srcpart_n0 set tblproperties('testpartstats'='unchange'); +describe formatted srcpart_n0 partition (ds='2017-01-19', hr='11'); -alter table srcpart partition (ds='2017-01-19', hr='11') update statistics set ('numRows' = '1000'); -describe formatted srcpart partition (ds='2017-01-19', hr='11'); +alter table srcpart_n0 partition (ds='2017-01-19', hr='11') update statistics set ('numRows' = '1000'); +describe formatted srcpart_n0 partition (ds='2017-01-19', hr='11'); -analyze table srcpart partition (ds='2017-01-19', hr='11') compute statistics; -describe formatted srcpart partition (ds='2017-01-19', hr='11'); +analyze table srcpart_n0 partition (ds='2017-01-19', hr='11') compute statistics; +describe formatted srcpart_n0 partition (ds='2017-01-19', hr='11'); -drop table srcpart; +drop table srcpart_n0; diff --git a/ql/src/test/queries/clientpositive/alter_table_update_status.q b/ql/src/test/queries/clientpositive/alter_table_update_status.q index 8882584d78..2cace7d5a0 100644 --- a/ql/src/test/queries/clientpositive/alter_table_update_status.q +++ b/ql/src/test/queries/clientpositive/alter_table_update_status.q @@ -1,32 +1,32 @@ --! qt:dataset:src1 -create table src_stat as select * from src1; +create table src_stat_n0 as select * from src1; -create table src_stat_int ( +create table src_stat_int_n0 ( key double, value string ); -LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE src_stat_int; +LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE src_stat_int_n0; -ANALYZE TABLE src_stat COMPUTE STATISTICS for columns key; +ANALYZE TABLE src_stat_n0 COMPUTE STATISTICS for columns key; -describe formatted src_stat key; +describe formatted src_stat_n0 key; -ALTER TABLE src_stat UPDATE STATISTICS for column key SET ('numDVs'='1111','avgColLen'='1.111'); +ALTER TABLE src_stat_n0 UPDATE STATISTICS for column key SET ('numDVs'='1111','avgColLen'='1.111'); -describe formatted src_stat key; +describe formatted src_stat_n0 key; -ALTER TABLE src_stat UPDATE STATISTICS for column value SET ('numDVs'='121','numNulls'='122','avgColLen'='1.23','maxColLen'='124'); +ALTER TABLE src_stat_n0 UPDATE STATISTICS for column value SET ('numDVs'='121','numNulls'='122','avgColLen'='1.23','maxColLen'='124'); -describe formatted src_stat value; +describe formatted src_stat_n0 value; -ANALYZE TABLE src_stat_int COMPUTE STATISTICS for columns key; +ANALYZE TABLE src_stat_int_n0 COMPUTE STATISTICS for columns key; -describe formatted src_stat_int key; +describe formatted src_stat_int_n0 key; -ALTER TABLE src_stat_int UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22'); +ALTER TABLE src_stat_int_n0 UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22'); -describe formatted src_stat_int key; +describe formatted src_stat_int_n0 key; @@ -34,19 +34,19 @@ create database if not exists dummydb; use dummydb; -ALTER TABLE default.src_stat UPDATE STATISTICS for column key SET ('numDVs'='3333','avgColLen'='2.222'); +ALTER TABLE default.src_stat_n0 UPDATE STATISTICS for column key SET ('numDVs'='3333','avgColLen'='2.222'); -describe formatted default.src_stat key; +describe formatted default.src_stat_n0 key; -ALTER TABLE default.src_stat UPDATE STATISTICS for column value SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235'); +ALTER TABLE default.src_stat_n0 UPDATE STATISTICS for column value SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235'); -describe formatted default.src_stat value; +describe formatted default.src_stat_n0 value; use default; drop database dummydb; -create table datatype_stats( +create table datatype_stats_n0( t TINYINT, s SMALLINT, i INT, @@ -62,77 +62,77 @@ create table datatype_stats( bl BOOLEAN, bin BINARY); -INSERT INTO datatype_stats values(2, 3, 45, 456, 45454.4, 454.6565, 2355, '2012-01-01 01:02:03', '2012-01-01', 'update_statistics', 'stats', 'hive', 'true', 'bin'); -INSERT INTO datatype_stats values(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); -DESC FORMATTED datatype_stats s; -DESC FORMATTED datatype_stats i; -DESC FORMATTED datatype_stats b; -DESC FORMATTED datatype_stats f; -DESC FORMATTED datatype_stats d; -DESC FORMATTED datatype_stats dem; -DESC FORMATTED datatype_stats ts; -DESC FORMATTED datatype_stats dt; -DESC FORMATTED datatype_stats str; -DESC FORMATTED datatype_stats v; -DESC FORMATTED datatype_stats c; -DESC FORMATTED datatype_stats bl; -DESC FORMATTED datatype_stats bin; +INSERT INTO datatype_stats_n0 values(2, 3, 45, 456, 45454.4, 454.6565, 2355, '2012-01-01 01:02:03', '2012-01-01', 'update_statistics', 'stats', 'hive', 'true', 'bin'); +INSERT INTO datatype_stats_n0 values(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); +DESC FORMATTED datatype_stats_n0 s; +DESC FORMATTED datatype_stats_n0 i; +DESC FORMATTED datatype_stats_n0 b; +DESC FORMATTED datatype_stats_n0 f; +DESC FORMATTED datatype_stats_n0 d; +DESC FORMATTED datatype_stats_n0 dem; +DESC FORMATTED datatype_stats_n0 ts; +DESC FORMATTED datatype_stats_n0 dt; +DESC FORMATTED datatype_stats_n0 str; +DESC FORMATTED datatype_stats_n0 v; +DESC FORMATTED datatype_stats_n0 c; +DESC FORMATTED datatype_stats_n0 bl; +DESC FORMATTED datatype_stats_n0 bin; --tinyint -DESC FORMATTED datatype_stats t; -ALTER TABLE default.datatype_stats UPDATE STATISTICS for column t SET ('numDVs'='232','numNulls'='233','highValue'='234','lowValue'='35'); -DESC FORMATTED datatype_stats t; +DESC FORMATTED datatype_stats_n0 t; +ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column t SET ('numDVs'='232','numNulls'='233','highValue'='234','lowValue'='35'); +DESC FORMATTED datatype_stats_n0 t; --smallint -DESC FORMATTED datatype_stats s; -ALTER TABLE default.datatype_stats UPDATE STATISTICS for column s SET ('numDVs'='56','numNulls'='56','highValue'='489','lowValue'='25'); -DESC FORMATTED datatype_stats s; +DESC FORMATTED datatype_stats_n0 s; +ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column s SET ('numDVs'='56','numNulls'='56','highValue'='489','lowValue'='25'); +DESC FORMATTED datatype_stats_n0 s; --int -DESC FORMATTED datatype_stats i; -ALTER TABLE default.datatype_stats UPDATE STATISTICS for column i SET ('numDVs'='59','numNulls'='1','highValue'='889','lowValue'='5'); -DESC FORMATTED datatype_stats i; +DESC FORMATTED datatype_stats_n0 i; +ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column i SET ('numDVs'='59','numNulls'='1','highValue'='889','lowValue'='5'); +DESC FORMATTED datatype_stats_n0 i; --bigint -DESC FORMATTED datatype_stats b; -ALTER TABLE default.datatype_stats UPDATE STATISTICS for column b SET ('numDVs'='9','numNulls'='14','highValue'='89','lowValue'='8'); -DESC FORMATTED datatype_stats b; +DESC FORMATTED datatype_stats_n0 b; +ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column b SET ('numDVs'='9','numNulls'='14','highValue'='89','lowValue'='8'); +DESC FORMATTED datatype_stats_n0 b; --float -DESC FORMATTED datatype_stats f; -ALTER TABLE default.datatype_stats UPDATE STATISTICS for column f SET ('numDVs'='563','numNulls'='45','highValue'='2345.656','lowValue'='8.00'); -DESC FORMATTED datatype_stats f; +DESC FORMATTED datatype_stats_n0 f; +ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column f SET ('numDVs'='563','numNulls'='45','highValue'='2345.656','lowValue'='8.00'); +DESC FORMATTED datatype_stats_n0 f; --double -DESC FORMATTED datatype_stats d; -ALTER TABLE default.datatype_stats UPDATE STATISTICS for column d SET ('numDVs'='5677','numNulls'='12','highValue'='560.3367','lowValue'='0.00455'); -DESC FORMATTED datatype_stats d; +DESC FORMATTED datatype_stats_n0 d; +ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column d SET ('numDVs'='5677','numNulls'='12','highValue'='560.3367','lowValue'='0.00455'); +DESC FORMATTED datatype_stats_n0 d; --decimal -DESC FORMATTED datatype_stats dem; -ALTER TABLE default.datatype_stats UPDATE STATISTICS for column dem SET ('numDVs'='57','numNulls'='912','highValue'='560','lowValue'='0'); -DESC FORMATTED datatype_stats dem; +DESC FORMATTED datatype_stats_n0 dem; +ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column dem SET ('numDVs'='57','numNulls'='912','highValue'='560','lowValue'='0'); +DESC FORMATTED datatype_stats_n0 dem; --timestamp -DESC FORMATTED datatype_stats ts; -ALTER TABLE default.datatype_stats UPDATE STATISTICS for column ts SET ('numDVs'='7','numNulls'='12','highValue'='1357030923','lowValue'='1357030924'); -DESC FORMATTED datatype_stats ts; +DESC FORMATTED datatype_stats_n0 ts; +ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column ts SET ('numDVs'='7','numNulls'='12','highValue'='1357030923','lowValue'='1357030924'); +DESC FORMATTED datatype_stats_n0 ts; --decimal -DESC FORMATTED datatype_stats dt; -ALTER TABLE default.datatype_stats UPDATE STATISTICS for column dt SET ('numDVs'='57','numNulls'='912','highValue'='2012-01-01','lowValue'='2001-02-04'); -DESC FORMATTED datatype_stats dt; +DESC FORMATTED datatype_stats_n0 dt; +ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column dt SET ('numDVs'='57','numNulls'='912','highValue'='2012-01-01','lowValue'='2001-02-04'); +DESC FORMATTED datatype_stats_n0 dt; --string -DESC FORMATTED datatype_stats str; -ALTER TABLE default.datatype_stats UPDATE STATISTICS for column str SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235'); -DESC FORMATTED datatype_stats str; +DESC FORMATTED datatype_stats_n0 str; +ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column str SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235'); +DESC FORMATTED datatype_stats_n0 str; --varchar -DESC FORMATTED datatype_stats v; -ALTER TABLE default.datatype_stats UPDATE STATISTICS for column v SET ('numDVs'='22','numNulls'='33','avgColLen'='4.40','maxColLen'='25'); -DESC FORMATTED datatype_stats v; +DESC FORMATTED datatype_stats_n0 v; +ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column v SET ('numDVs'='22','numNulls'='33','avgColLen'='4.40','maxColLen'='25'); +DESC FORMATTED datatype_stats_n0 v; --char -DESC FORMATTED datatype_stats c; -ALTER TABLE default.datatype_stats UPDATE STATISTICS for column c SET ('numDVs'='2','numNulls'='03','avgColLen'='9.00','maxColLen'='58'); -DESC FORMATTED datatype_stats c; +DESC FORMATTED datatype_stats_n0 c; +ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column c SET ('numDVs'='2','numNulls'='03','avgColLen'='9.00','maxColLen'='58'); +DESC FORMATTED datatype_stats_n0 c; --boolean -DESC FORMATTED datatype_stats bl; -ALTER TABLE default.datatype_stats UPDATE STATISTICS for column bl SET ('numNulls'='1','numTrues'='9','numFalses'='8'); -DESC FORMATTED datatype_stats bl; +DESC FORMATTED datatype_stats_n0 bl; +ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column bl SET ('numNulls'='1','numTrues'='9','numFalses'='8'); +DESC FORMATTED datatype_stats_n0 bl; --binary -DESC FORMATTED datatype_stats bin; -ALTER TABLE default.datatype_stats UPDATE STATISTICS for column bin SET ('numNulls'='8','avgColLen'='2.0','maxColLen'='8'); -DESC FORMATTED datatype_stats bin; +DESC FORMATTED datatype_stats_n0 bin; +ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column bin SET ('numNulls'='8','avgColLen'='2.0','maxColLen'='8'); +DESC FORMATTED datatype_stats_n0 bin; diff --git a/ql/src/test/queries/clientpositive/alter_view_rename.q b/ql/src/test/queries/clientpositive/alter_view_rename.q index 598a9428bf..95c2912c3b 100644 --- a/ql/src/test/queries/clientpositive/alter_view_rename.q +++ b/ql/src/test/queries/clientpositive/alter_view_rename.q @@ -2,15 +2,15 @@ set hive.mapred.mode=nonstrict; CREATE DATABASE tv1; CREATE DATABASE tv2; -CREATE TABLE invites (foo INT, bar STRING) PARTITIONED BY (ds STRING); -CREATE VIEW tv1.view1 as SELECT * FROM invites; +CREATE TABLE invites_n1 (foo INT, bar STRING) PARTITIONED BY (ds STRING); +CREATE VIEW tv1.view1 as SELECT * FROM invites_n1; DESCRIBE EXTENDED tv1.view1; ALTER VIEW tv1.view1 RENAME TO tv2.view2; DESCRIBE EXTENDED tv2.view2; SELECT * FROM tv2.view2; -DROP TABLE invites; +DROP TABLE invites_n1; DROP VIEW tv2.view2; DROP DATABASE tv1; diff --git a/ql/src/test/queries/clientpositive/analyze_table_null_partition.q b/ql/src/test/queries/clientpositive/analyze_table_null_partition.q index 2f7a893d33..4f2b9473b0 100644 --- a/ql/src/test/queries/clientpositive/analyze_table_null_partition.q +++ b/ql/src/test/queries/clientpositive/analyze_table_null_partition.q @@ -1,21 +1,21 @@ SET hive.exec.dynamic.partition.mode=nonstrict; -DROP TABLE IF EXISTS test1; -DROP TABLE IF EXISTS test2; +DROP TABLE IF EXISTS test1_n9; +DROP TABLE IF EXISTS test2_n6; -CREATE TABLE test1(name string, age int); -CREATE TABLE test2(name string) PARTITIONED by (age int); +CREATE TABLE test1_n9(name string, age int); +CREATE TABLE test2_n6(name string) PARTITIONED by (age int); -LOAD DATA LOCAL INPATH '../../data/files/test1.txt' INTO TABLE test1; -FROM test1 INSERT OVERWRITE TABLE test2 PARTITION(age) SELECT test1.name, test1.age; +LOAD DATA LOCAL INPATH '../../data/files/test1.txt' INTO TABLE test1_n9; +FROM test1_n9 INSERT OVERWRITE TABLE test2_n6 PARTITION(age) SELECT test1_n9.name, test1_n9.age; -ANALYZE TABLE test2 PARTITION(age) COMPUTE STATISTICS; +ANALYZE TABLE test2_n6 PARTITION(age) COMPUTE STATISTICS; -- To show stats. It doesn't show due to a bug. -DESC EXTENDED test2; +DESC EXTENDED test2_n6; -- Another way to show stats. -EXPLAIN EXTENDED select * from test2; +EXPLAIN EXTENDED select * from test2_n6; -DROP TABLE test1; -DROP TABLE test2; +DROP TABLE test1_n9; +DROP TABLE test2_n6; diff --git a/ql/src/test/queries/clientpositive/analyze_tbl_date.q b/ql/src/test/queries/clientpositive/analyze_tbl_date.q index 6726b83854..9132ea2714 100644 --- a/ql/src/test/queries/clientpositive/analyze_tbl_date.q +++ b/ql/src/test/queries/clientpositive/analyze_tbl_date.q @@ -1,14 +1,14 @@ set hive.fetch.task.conversion=none; -create table test_table(d date); +create table test_table_n7(d date); -insert into test_table values(null), (null), (null); +insert into test_table_n7 values(null), (null), (null); -analyze table test_table compute statistics for columns; +analyze table test_table_n7 compute statistics for columns; -describe formatted test_table; +describe formatted test_table_n7; -explain select * from test_table where d is not null; +explain select * from test_table_n7 where d is not null; -select * from test_table where d is not null; +select * from test_table_n7 where d is not null; diff --git a/ql/src/test/queries/clientpositive/analyze_tbl_part.q b/ql/src/test/queries/clientpositive/analyze_tbl_part.q index 719854714f..edb1999b6f 100644 --- a/ql/src/test/queries/clientpositive/analyze_tbl_part.q +++ b/ql/src/test/queries/clientpositive/analyze_tbl_part.q @@ -1,22 +1,22 @@ --! qt:dataset:src1 set hive.mapred.mode=nonstrict; -create table src_stat_part(key string, value string) partitioned by (partitionId int); +create table src_stat_part_n1(key string, value string) partitioned by (partitionId int); -insert overwrite table src_stat_part partition (partitionId=1) +insert overwrite table src_stat_part_n1 partition (partitionId=1) select * from src1; -insert overwrite table src_stat_part partition (partitionId=2) +insert overwrite table src_stat_part_n1 partition (partitionId=2) select * from src1; -ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key; +ANALYZE TABLE src_stat_part_n1 partition (partitionId) COMPUTE STATISTICS for columns key; -describe formatted src_stat_part PARTITION(partitionId=1) key; +describe formatted src_stat_part_n1 PARTITION(partitionId=1) key; -ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value; +ANALYZE TABLE src_stat_part_n1 partition (partitionId) COMPUTE STATISTICS for columns key, value; -describe formatted src_stat_part PARTITION(partitionId=1) key; +describe formatted src_stat_part_n1 PARTITION(partitionId=1) key; -describe formatted src_stat_part PARTITION(partitionId=2) value; +describe formatted src_stat_part_n1 PARTITION(partitionId=2) value; create table src_stat_string_part(key string, value string) partitioned by (partitionName string); diff --git a/ql/src/test/queries/clientpositive/annotate_stats_deep_filters.q b/ql/src/test/queries/clientpositive/annotate_stats_deep_filters.q index dd5b9dddea..a402444fba 100644 --- a/ql/src/test/queries/clientpositive/annotate_stats_deep_filters.q +++ b/ql/src/test/queries/clientpositive/annotate_stats_deep_filters.q @@ -1,4 +1,4 @@ -create table over1k( +create table over1k_n4( t tinyint, si smallint, i int, @@ -13,15 +13,15 @@ bin binary) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -load data local inpath '../../data/files/over1k' overwrite into table over1k; -load data local inpath '../../data/files/over1k' into table over1k; +load data local inpath '../../data/files/over1k' overwrite into table over1k_n4; +load data local inpath '../../data/files/over1k' into table over1k_n4; -analyze table over1k compute statistics; -analyze table over1k compute statistics for columns; +analyze table over1k_n4 compute statistics; +analyze table over1k_n4 compute statistics for columns; set hive.stats.fetch.column.stats=true; set hive.optimize.point.lookup=false; -explain select count(*) from over1k where ( +explain select count(*) from over1k_n4 where ( (t=1 and si=2) or (t=2 and si=3) or (t=3 and si=4) @@ -44,7 +44,7 @@ or (t=47 and si=48) or (t=52 and si=53)); set hive.stats.fetch.column.stats=false; -explain select count(*) from over1k where ( +explain select count(*) from over1k_n4 where ( (t=1 and si=2) or (t=2 and si=3) or (t=3 and si=4) diff --git a/ql/src/test/queries/clientpositive/annotate_stats_groupby.q b/ql/src/test/queries/clientpositive/annotate_stats_groupby.q index 77571cf44a..081f057053 100644 --- a/ql/src/test/queries/clientpositive/annotate_stats_groupby.q +++ b/ql/src/test/queries/clientpositive/annotate_stats_groupby.q @@ -21,69 +21,69 @@ set hive.map.aggr.hash.percentmemory=0.0f; -- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) -- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) -create table if not exists loc_staging ( +create table if not exists loc_staging_n2 ( state string, locid int, zip bigint, year int ) row format delimited fields terminated by '|' stored as textfile; -create table loc_orc like loc_staging; -alter table loc_orc set fileformat orc; +create table loc_orc_n2 like loc_staging_n2; +alter table loc_orc_n2 set fileformat orc; -load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging; +load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n2; -insert overwrite table loc_orc select * from loc_staging; +insert overwrite table loc_orc_n2 select * from loc_staging_n2; -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc; +explain select * from loc_orc_n2; -- partial column stats -analyze table loc_orc compute statistics for columns state; +analyze table loc_orc_n2 compute statistics for columns state; -- inner group by: map - numRows: 8 reduce - numRows: 4 -- outer group by: map - numRows: 4 reduce numRows: 2 explain select a, c, min(b) from ( select state as a, locid as b, count(*) as c - from loc_orc + from loc_orc_n2 group by state,locid ) sq1 group by a,c; -analyze table loc_orc compute statistics for columns state,locid,year; +analyze table loc_orc_n2 compute statistics for columns state,locid,year; -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 -- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select year from loc_orc group by year; +explain select year from loc_orc_n2 group by year; -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 -- Case 9: column stats, NO grouping sets - caridnality = 8 -explain select state,locid from loc_orc group by state,locid; +explain select state,locid from loc_orc_n2 group by state,locid; -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 -- Case 8: column stats, grouping sets - cardinality = 32 -explain select state,locid from loc_orc group by state,locid with cube; +explain select state,locid from loc_orc_n2 group by state,locid with cube; -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 -- Case 8: column stats, grouping sets - cardinality = 24 -explain select state,locid from loc_orc group by state,locid with rollup; -explain select state,locid from loc_orc group by rollup( state,locid ); +explain select state,locid from loc_orc_n2 group by state,locid with rollup; +explain select state,locid from loc_orc_n2 group by rollup( state,locid ); -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8 -- Case 8: column stats, grouping sets - cardinality = 8 -explain select state,locid from loc_orc group by state,locid grouping sets((state)); +explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state)); -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16 -- Case 8: column stats, grouping sets - cardinality = 16 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)); +explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid)); -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 -- Case 8: column stats, grouping sets - cardinality = 24 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()); +explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid),()); -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 -- Case 8: column stats, grouping sets - cardinality = 32 -explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()); +explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state,locid),(state),(locid),()); set hive.map.aggr.hash.percentmemory=0.5f; set mapred.max.split.size=80; @@ -91,52 +91,52 @@ set mapred.max.split.size=80; -- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 -- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select year from loc_orc group by year; +explain select year from loc_orc_n2 group by year; -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16 -- Case 8: column stats, grouping sets - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube; +explain select state,locid from loc_orc_n2 group by state,locid with cube; -- ndvProduct becomes 0 as zip does not have column stats -- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 -- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select state,zip from loc_orc group by state,zip; +explain select state,zip from loc_orc_n2 group by state,zip; set mapred.max.split.size=1000; set hive.stats.fetch.column.stats=false; -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 -- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube; +explain select state,locid from loc_orc_n2 group by state,locid with cube; -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 -- Case 7: NO column stats - cardinality = 12 -explain select state,locid from loc_orc group by state,locid with rollup; -explain select state,locid from loc_orc group by rollup (state,locid); +explain select state,locid from loc_orc_n2 group by state,locid with rollup; +explain select state,locid from loc_orc_n2 group by rollup (state,locid); -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 -- Case 7: NO column stats - cardinality = 4 -explain select state,locid from loc_orc group by state,locid grouping sets((state)); +explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state)); -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16 -- Case 7: NO column stats - cardinality = 8 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)); +explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid)); -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 -- Case 7: NO column stats - cardinality = 12 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()); +explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid),()); -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 -- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()); +explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state,locid),(state),(locid),()); set mapred.max.split.size=80; -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 -- Case 7: NO column stats - cardinality = 4 -explain select year from loc_orc group by year; +explain select year from loc_orc_n2 group by year; -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 -- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube; +explain select state,locid from loc_orc_n2 group by state,locid with cube; diff --git a/ql/src/test/queries/clientpositive/annotate_stats_join.q b/ql/src/test/queries/clientpositive/annotate_stats_join.q index 015c647414..4d24324309 100644 --- a/ql/src/test/queries/clientpositive/annotate_stats_join.q +++ b/ql/src/test/queries/clientpositive/annotate_stats_join.q @@ -1,13 +1,13 @@ set hive.stats.fetch.column.stats=true; set hive.stats.ndv.error=0.0; -create table if not exists emp ( +create table if not exists emp_n2 ( lastname string, deptid int, locid int ) row format delimited fields terminated by '|' stored as textfile; -create table if not exists dept ( +create table if not exists dept_n1 ( deptid int, deptname string ) row format delimited fields terminated by '|' stored as textfile; @@ -19,63 +19,63 @@ create table if not exists loc ( year int ) row format delimited fields terminated by '|' stored as textfile; -LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp; -LOAD DATA LOCAL INPATH '../../data/files/dept.txt' OVERWRITE INTO TABLE dept; +LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp_n2; +LOAD DATA LOCAL INPATH '../../data/files/dept.txt' OVERWRITE INTO TABLE dept_n1; LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc; -analyze table emp compute statistics; -analyze table dept compute statistics; +analyze table emp_n2 compute statistics; +analyze table dept_n1 compute statistics; analyze table loc compute statistics; -analyze table emp compute statistics for columns lastname,deptid,locid; -analyze table dept compute statistics for columns deptname,deptid; +analyze table emp_n2 compute statistics for columns lastname,deptid,locid; +analyze table dept_n1 compute statistics for columns deptname,deptid; analyze table loc compute statistics for columns state,locid,zip,year; -- number of rows --- emp - 48 --- dept - 6 +-- emp_n2 - 48 +-- dept_n1 - 6 -- loc - 8 -- count distincts for relevant columns (since count distinct values are approximate in some cases count distint values will be greater than number of rows) --- emp.deptid - 3 --- emp.lastname - 6 --- emp.locid - 7 --- dept.deptid - 7 --- dept.deptname - 6 +-- emp_n2.deptid - 3 +-- emp_n2.lastname - 6 +-- emp_n2.locid - 7 +-- dept_n1.deptid - 7 +-- dept_n1.deptname - 6 -- loc.locid - 7 -- loc.state - 6 -- 2 relations, 1 attribute -- Expected output rows: (48*6)/max(3,7) = 41 -explain select * from emp e join dept d on (e.deptid = d.deptid); +explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid); -- 2 relations, 2 attributes -- Expected output rows: (48*6)/(max(3,7) * max(6,6)) = 6 -explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname; -explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname); +explain select * from emp_n2,dept_n1 where emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname; +explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid and e.lastname = d.deptname); -- 2 relations, 3 attributes -- Expected output rows: (48*6)/(max(3,7) * max(6,6) * max(6,6)) = 1 -explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname; +explain select * from emp_n2,dept_n1 where emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname; -- 3 relations, 1 attribute -- Expected output rows: (48*6*48)/top2largest(3,7,3) = 658 -explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid); +explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid) join emp_n2 e1 on (e.deptid = e1.deptid); -- Expected output rows: (48*6*8)/top2largest(3,7,7) = 47 -explain select * from emp e join dept d on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid); +explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid); -- 3 relations and 2 attribute -- Expected output rows: (48*6*8)/top2largest(3,7,7)*top2largest(6,6,6) = 1 -explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state); +explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state); -- left outer join -explain select * from emp left outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname; +explain select * from emp_n2 left outer join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname; -- left semi join -explain select * from emp left semi join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname; +explain select * from emp_n2 left semi join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname; -- right outer join -explain select * from emp right outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname; +explain select * from emp_n2 right outer join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname; -- full outer join -explain select * from emp full outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname; +explain select * from emp_n2 full outer join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname; diff --git a/ql/src/test/queries/clientpositive/annotate_stats_join_pkfk.q b/ql/src/test/queries/clientpositive/annotate_stats_join_pkfk.q index f94994a395..72ebb1796c 100644 --- a/ql/src/test/queries/clientpositive/annotate_stats_join_pkfk.q +++ b/ql/src/test/queries/clientpositive/annotate_stats_join_pkfk.q @@ -1,13 +1,13 @@ set hive.stats.fetch.column.stats=true; -drop table store_sales; -drop table store; +drop table store_sales_n0; +drop table store_n0; drop table customer_address; -- s_store_sk is PK, ss_store_sk is FK -- ca_address_sk is PK, ss_addr_sk is FK -create table store_sales +create table store_sales_n0 ( ss_sold_date_sk int, ss_sold_time_sk int, @@ -35,7 +35,7 @@ create table store_sales ) row format delimited fields terminated by '|'; -create table store +create table store_n0 ( s_store_sk int, s_store_id string, @@ -121,44 +121,44 @@ create table customer_address ) row format delimited fields terminated by '|'; -load data local inpath '../../data/files/store.txt' overwrite into table store; +load data local inpath '../../data/files/store.txt' overwrite into table store_n0; load data local inpath '../../data/files/store.txt' overwrite into table store_bigint; -load data local inpath '../../data/files/store_sales.txt' overwrite into table store_sales; +load data local inpath '../../data/files/store_sales.txt' overwrite into table store_sales_n0; load data local inpath '../../data/files/customer_address.txt' overwrite into table customer_address; -analyze table store compute statistics; -analyze table store compute statistics for columns s_store_sk, s_floor_space; +analyze table store_n0 compute statistics; +analyze table store_n0 compute statistics for columns s_store_sk, s_floor_space; analyze table store_bigint compute statistics; analyze table store_bigint compute statistics for columns s_store_sk, s_floor_space; -analyze table store_sales compute statistics; -analyze table store_sales compute statistics for columns ss_store_sk, ss_addr_sk, ss_quantity; +analyze table store_sales_n0 compute statistics; +analyze table store_sales_n0 compute statistics for columns ss_store_sk, ss_addr_sk, ss_quantity; analyze table customer_address compute statistics; analyze table customer_address compute statistics for columns ca_address_sk; -explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk); +explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk); -- widening cast: inferred PK-FK, thus same row count as previous query -explain select s.s_store_sk from store_bigint s join store_sales ss on (s.s_store_sk = ss.ss_store_sk); +explain select s.s_store_sk from store_bigint s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk); -explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) where s.s_store_sk > 0; +explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) where s.s_store_sk > 0; -explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) where s.s_company_id > 0 and ss.ss_quantity > 10; +explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) where s.s_company_id > 0 and ss.ss_quantity > 10; -explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) where s.s_floor_space > 0; +explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) where s.s_floor_space > 0; -explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) where ss.ss_quantity > 10; +explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) where ss.ss_quantity > 10; -explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk); +explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join store_n0 s1 on (s1.s_store_sk = ss.ss_store_sk); -explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk) where s.s_store_sk > 1000; +explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join store_n0 s1 on (s1.s_store_sk = ss.ss_store_sk) where s.s_store_sk > 1000; -explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk) where s.s_floor_space > 1000; +explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join store_n0 s1 on (s1.s_store_sk = ss.ss_store_sk) where s.s_floor_space > 1000; -explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk) where ss.ss_quantity > 10; +explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join store_n0 s1 on (s1.s_store_sk = ss.ss_store_sk) where ss.ss_quantity > 10; -explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join customer_address ca on (ca.ca_address_sk = ss.ss_addr_sk); +explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join customer_address ca on (ca.ca_address_sk = ss.ss_addr_sk); -drop table store_sales; -drop table store; +drop table store_sales_n0; +drop table store_n0; drop table store_bigint; drop table customer_address; diff --git a/ql/src/test/queries/clientpositive/annotate_stats_limit.q b/ql/src/test/queries/clientpositive/annotate_stats_limit.q index b82fe30737..2fe75089a7 100644 --- a/ql/src/test/queries/clientpositive/annotate_stats_limit.q +++ b/ql/src/test/queries/clientpositive/annotate_stats_limit.q @@ -1,30 +1,30 @@ set hive.stats.fetch.column.stats=true; -create table if not exists loc_staging ( +create table if not exists loc_staging_n5 ( state string, locid int, zip bigint, year int ) row format delimited fields terminated by '|' stored as textfile; -create table loc_orc like loc_staging; -alter table loc_orc set fileformat orc; +create table loc_orc_n5 like loc_staging_n5; +alter table loc_orc_n5 set fileformat orc; -load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging; +load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n5; -insert overwrite table loc_orc select * from loc_staging; +insert overwrite table loc_orc_n5 select * from loc_staging_n5; -analyze table loc_orc compute statistics for columns state, locid, zip, year; +analyze table loc_orc_n5 compute statistics for columns state, locid, zip, year; -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc; +explain select * from loc_orc_n5; -- numRows: 4 rawDataSize: 396 -explain select * from loc_orc limit 4; +explain select * from loc_orc_n5 limit 4; -- greater than the available number of rows -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc limit 16; +explain select * from loc_orc_n5 limit 16; -- numRows: 0 rawDataSize: 0 -explain select * from loc_orc limit 0; +explain select * from loc_orc_n5 limit 0; diff --git a/ql/src/test/queries/clientpositive/annotate_stats_part.q b/ql/src/test/queries/clientpositive/annotate_stats_part.q index 5bdaaff754..c8e5c4942b 100644 --- a/ql/src/test/queries/clientpositive/annotate_stats_part.q +++ b/ql/src/test/queries/clientpositive/annotate_stats_part.q @@ -5,86 +5,86 @@ set hive.exec.dynamic.partition=true; set hive.exec.dynamic.partition.mode=nonstrict; set hive.metastore.aggregate.stats.cache.enabled=false; -create table if not exists loc_staging ( +create table if not exists loc_staging_n4 ( state string, locid int, zip bigint, year string ) row format delimited fields terminated by '|' stored as textfile; -LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc_staging; +LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc_staging_n4; -create table if not exists loc_orc ( +create table if not exists loc_orc_n4 ( state string, locid int, zip bigint ) partitioned by(year string) stored as orc; -- basicStatState: NONE colStatState: NONE -explain select * from loc_orc; +explain select * from loc_orc_n4; -insert overwrite table loc_orc partition(year) select * from loc_staging; +insert overwrite table loc_orc_n4 partition(year) select * from loc_staging_n4; -- stats are disabled. basic stats will report the file size but not raw data size. so initial statistics will be PARTIAL -- basicStatState: PARTIAL colStatState: NONE -explain select * from loc_orc; +explain select * from loc_orc_n4; -- partition level analyze statistics for specific parition -analyze table loc_orc partition(year='2001') compute statistics; +analyze table loc_orc_n4 partition(year='2001') compute statistics; -- basicStatState: PARTIAL colStatState: NONE -explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__'; +explain select * from loc_orc_n4 where year='__HIVE_DEFAULT_PARTITION__'; -- basicStatState: PARTIAL colStatState: NONE -explain select * from loc_orc; +explain select * from loc_orc_n4; -- basicStatState: COMPLETE colStatState: NONE -explain select * from loc_orc where year='2001'; +explain select * from loc_orc_n4 where year='2001'; -- partition level analyze statistics for all partitions -analyze table loc_orc partition(year) compute statistics; +analyze table loc_orc_n4 partition(year) compute statistics; -- basicStatState: COMPLETE colStatState: NONE -explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__'; +explain select * from loc_orc_n4 where year='__HIVE_DEFAULT_PARTITION__'; -- basicStatState: COMPLETE colStatState: NONE -explain select * from loc_orc; +explain select * from loc_orc_n4; -- basicStatState: COMPLETE colStatState: NONE -explain select * from loc_orc where year='2001' or year='__HIVE_DEFAULT_PARTITION__'; +explain select * from loc_orc_n4 where year='2001' or year='__HIVE_DEFAULT_PARTITION__'; -- both partitions will be pruned -- basicStatState: NONE colStatState: NONE -explain select * from loc_orc where year='2001' and year='__HIVE_DEFAULT_PARTITION__'; +explain select * from loc_orc_n4 where year='2001' and year='__HIVE_DEFAULT_PARTITION__'; -- partition level partial column statistics -analyze table loc_orc partition(year='2001') compute statistics for columns state,locid; +analyze table loc_orc_n4 partition(year='2001') compute statistics for columns state,locid; -- basicStatState: COMPLETE colStatState: NONE -explain select zip from loc_orc; +explain select zip from loc_orc_n4; -- basicStatState: COMPLETE colStatState: PARTIAL -explain select state from loc_orc; +explain select state from loc_orc_n4; -- basicStatState: COMPLETE colStatState: COMPLETE -explain select year from loc_orc; +explain select year from loc_orc_n4; -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL -- basicStatState: COMPLETE colStatState: PARTIAL -explain select state,locid from loc_orc; +explain select state,locid from loc_orc_n4; -- basicStatState: COMPLETE colStatState: COMPLETE -explain select state,locid from loc_orc where year='2001'; +explain select state,locid from loc_orc_n4 where year='2001'; -- basicStatState: COMPLETE colStatState: NONE -explain select state,locid from loc_orc where year!='2001'; +explain select state,locid from loc_orc_n4 where year!='2001'; -- basicStatState: COMPLETE colStatState: PARTIAL -explain select * from loc_orc; +explain select * from loc_orc_n4; -- This is to test filter expression evaluation on partition column -- numRows: 2 dataSize: 8 basicStatState: COMPLETE colStatState: COMPLETE -explain select locid from loc_orc where locid>0 and year='2001'; -explain select locid,year from loc_orc where locid>0 and year='2001'; -explain select * from (select locid,year from loc_orc) test where locid>0 and year='2001'; +explain select locid from loc_orc_n4 where locid>0 and year='2001'; +explain select locid,year from loc_orc_n4 where locid>0 and year='2001'; +explain select * from (select locid,year from loc_orc_n4) test where locid>0 and year='2001'; diff --git a/ql/src/test/queries/clientpositive/annotate_stats_table.q b/ql/src/test/queries/clientpositive/annotate_stats_table.q index b5389ffdb4..475baedbc7 100644 --- a/ql/src/test/queries/clientpositive/annotate_stats_table.q +++ b/ql/src/test/queries/clientpositive/annotate_stats_table.q @@ -52,5 +52,5 @@ explain select deptid from emp_orc; -- basicStatState: COMPLETE colStatState: COMPLETE explain select lastname,deptid from emp_orc; -create table tmp as select 1; -explain create table tmp as select 1; +create table tmp_n0 as select 1; +explain create table tmp_n0 as select 1; diff --git a/ql/src/test/queries/clientpositive/annotate_stats_union.q b/ql/src/test/queries/clientpositive/annotate_stats_union.q index b0017f56f4..8417661144 100644 --- a/ql/src/test/queries/clientpositive/annotate_stats_union.q +++ b/ql/src/test/queries/clientpositive/annotate_stats_union.q @@ -1,55 +1,55 @@ set hive.stats.fetch.column.stats=true; -create table if not exists loc_staging ( +create table if not exists loc_staging_n3 ( state string, locid int, zip bigint, year int ) row format delimited fields terminated by '|' stored as textfile; -create table loc_orc like loc_staging; -alter table loc_orc set fileformat orc; +create table loc_orc_n3 like loc_staging_n3; +alter table loc_orc_n3 set fileformat orc; -load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging; +load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n3; -insert overwrite table loc_orc select * from loc_staging; +insert overwrite table loc_orc_n3 select * from loc_staging_n3; -analyze table loc_orc compute statistics for columns state,locid,zip,year; +analyze table loc_orc_n3 compute statistics for columns state,locid,zip,year; -- numRows: 8 rawDataSize: 688 -explain select state from loc_orc; +explain select state from loc_orc_n3; -- numRows: 16 rawDataSize: 1376 -explain select * from (select state from loc_orc union all select state from loc_orc) tmp; +explain select * from (select state from loc_orc_n3 union all select state from loc_orc_n3) tmp; -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc; +explain select * from loc_orc_n3; -- numRows: 16 rawDataSize: 1592 -explain select * from (select * from loc_orc union all select * from loc_orc) tmp; +explain select * from (select * from loc_orc_n3 union all select * from loc_orc_n3) tmp; create database test; use test; -create table if not exists loc_staging ( +create table if not exists loc_staging_n3 ( state string, locid int, zip bigint, year int ) row format delimited fields terminated by '|' stored as textfile; -create table loc_orc like loc_staging; -alter table loc_orc set fileformat orc; +create table loc_orc_n3 like loc_staging_n3; +alter table loc_orc_n3 set fileformat orc; -load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging; +load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n3; -insert overwrite table loc_orc select * from loc_staging; +insert overwrite table loc_orc_n3 select * from loc_staging_n3; -analyze table loc_staging compute statistics; -analyze table loc_staging compute statistics for columns state,locid,zip,year; -analyze table loc_orc compute statistics for columns state,locid,zip,year; +analyze table loc_staging_n3 compute statistics; +analyze table loc_staging_n3 compute statistics for columns state,locid,zip,year; +analyze table loc_orc_n3 compute statistics for columns state,locid,zip,year; -- numRows: 16 rawDataSize: 1376 -explain select * from (select state from default.loc_orc union all select state from test.loc_orc) temp; +explain select * from (select state from default.loc_orc_n3 union all select state from test.loc_orc_n3) temp; -- numRows: 16 rawDataSize: 1376 -explain select * from (select state from test.loc_staging union all select state from test.loc_orc) temp; +explain select * from (select state from test.loc_staging_n3 union all select state from test.loc_orc_n3) temp; diff --git a/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q b/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q index 6d5e2ac25a..6f77fa8a00 100644 --- a/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q +++ b/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q @@ -5,61 +5,61 @@ set hive.archive.enabled = true; set hive.exec.submitviachild=false; set hive.exec.submit.local.task.via.child=false; -drop table tstsrc; -drop table tstsrcpart; +drop table tstsrc_n2; +drop table tstsrcpart_n2; -create table tstsrc like src; -insert overwrite table tstsrc select key, value from src; +create table tstsrc_n2 like src; +insert overwrite table tstsrc_n2 select key, value from src; -create table tstsrcpart (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 10 buckets; +create table tstsrcpart_n2 (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 10 buckets; -insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11') +insert overwrite table tstsrcpart_n2 partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' and hr='11'; -insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='12') +insert overwrite table tstsrcpart_n2 partition (ds='2008-04-08', hr='12') select key, value from srcpart where ds='2008-04-08' and hr='12'; -insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='11') +insert overwrite table tstsrcpart_n2 partition (ds='2008-04-09', hr='11') select key, value from srcpart where ds='2008-04-09' and hr='11'; -insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='12') +insert overwrite table tstsrcpart_n2 partition (ds='2008-04-09', hr='12') select key, value from srcpart where ds='2008-04-09' and hr='12'; SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2; +FROM (SELECT * FROM tstsrcpart_n2 WHERE ds='2008-04-08') subq1) subq2; -ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08', hr='12'); +ALTER TABLE tstsrcpart_n2 ARCHIVE PARTITION (ds='2008-04-08', hr='12'); SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2; +FROM (SELECT * FROM tstsrcpart_n2 WHERE ds='2008-04-08') subq1) subq2; -SELECT key, count(1) FROM tstsrcpart WHERE ds='2008-04-08' AND hr='12' AND key='0' GROUP BY key; +SELECT key, count(1) FROM tstsrcpart_n2 WHERE ds='2008-04-08' AND hr='12' AND key='0' GROUP BY key; -SELECT * FROM tstsrcpart a JOIN tstsrc b ON a.key=b.key +SELECT * FROM tstsrcpart_n2 a JOIN tstsrc_n2 b ON a.key=b.key WHERE a.ds='2008-04-08' AND a.hr='12' AND a.key='0'; -ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr='12'); +ALTER TABLE tstsrcpart_n2 UNARCHIVE PARTITION (ds='2008-04-08', hr='12'); SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2; +FROM (SELECT * FROM tstsrcpart_n2 WHERE ds='2008-04-08') subq1) subq2; CREATE TABLE harbucket(key INT) PARTITIONED by (ds STRING) CLUSTERED BY (key) INTO 10 BUCKETS; -INSERT OVERWRITE TABLE harbucket PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc WHERE key > 50; +INSERT OVERWRITE TABLE harbucket PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc_n2 WHERE key > 50; SELECT key FROM harbucket TABLESAMPLE(BUCKET 1 OUT OF 10) SORT BY key; -ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08', hr='12'); +ALTER TABLE tstsrcpart_n2 ARCHIVE PARTITION (ds='2008-04-08', hr='12'); SELECT key FROM harbucket TABLESAMPLE(BUCKET 1 OUT OF 10) SORT BY key; -ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr='12'); +ALTER TABLE tstsrcpart_n2 UNARCHIVE PARTITION (ds='2008-04-08', hr='12'); SELECT key FROM harbucket TABLESAMPLE(BUCKET 1 OUT OF 10) SORT BY key; CREATE TABLE old_name(key INT) PARTITIONED by (ds STRING); -INSERT OVERWRITE TABLE old_name PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc WHERE key > 50; +INSERT OVERWRITE TABLE old_name PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc_n2 WHERE key > 50; ALTER TABLE old_name ARCHIVE PARTITION (ds='1'); SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col FROM (SELECT * FROM old_name WHERE ds='1') subq1) subq2; @@ -67,5 +67,5 @@ ALTER TABLE old_name RENAME TO new_name; SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col FROM (SELECT * FROM new_name WHERE ds='1') subq1) subq2; -drop table tstsrc; -drop table tstsrcpart; +drop table tstsrc_n2; +drop table tstsrcpart_n2; diff --git a/ql/src/test/queries/clientpositive/array_size_estimation.q b/ql/src/test/queries/clientpositive/array_size_estimation.q index 74713c4a03..8b69b63bbd 100644 --- a/ql/src/test/queries/clientpositive/array_size_estimation.q +++ b/ql/src/test/queries/clientpositive/array_size_estimation.q @@ -1,16 +1,16 @@ set hive.stats.fetch.column.stats=true; -create table t (col string); -insert into t values ('x'); +create table t_n19 (col string); +insert into t_n19 values ('x'); explain -select array("b", "d", "c", "a") FROM t; +select array("b", "d", "c", "a") FROM t_n19; explain -select array("b", "d", "c", col) FROM t; +select array("b", "d", "c", col) FROM t_n19; explain -select sort_array(array("b", "d", "c", "a")),array("1","2") FROM t; +select sort_array(array("b", "d", "c", "a")),array("1","2") FROM t_n19; explain -select sort_array(array("b", "d", "c", col)),array("1","2") FROM t; +select sort_array(array("b", "d", "c", col)),array("1","2") FROM t_n19; diff --git a/ql/src/test/queries/clientpositive/authorization_1.q b/ql/src/test/queries/clientpositive/authorization_1.q index 42abff0fc3..80e7a5d6a9 100644 --- a/ql/src/test/queries/clientpositive/authorization_1.q +++ b/ql/src/test/queries/clientpositive/authorization_1.q @@ -3,61 +3,61 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.autho -- SORT_BEFORE_DIFF -create table src_autho_test as select * from src; +create table src_autho_test_n11 as select * from src; set hive.security.authorization.enabled=true; --table grant to user -grant select on table src_autho_test to user hive_test_user; +grant select on table src_autho_test_n11 to user hive_test_user; -show grant user hive_test_user on table src_autho_test; -show grant user hive_test_user on table src_autho_test(key); +show grant user hive_test_user on table src_autho_test_n11; +show grant user hive_test_user on table src_autho_test_n11(key); -select key from src_autho_test order by key limit 20; +select key from src_autho_test_n11 order by key limit 20; -revoke select on table src_autho_test from user hive_test_user; -show grant user hive_test_user on table src_autho_test; -show grant user hive_test_user on table src_autho_test(key); +revoke select on table src_autho_test_n11 from user hive_test_user; +show grant user hive_test_user on table src_autho_test_n11; +show grant user hive_test_user on table src_autho_test_n11(key); --column grant to user -grant select(key) on table src_autho_test to user hive_test_user; +grant select(key) on table src_autho_test_n11 to user hive_test_user; -show grant user hive_test_user on table src_autho_test; -show grant user hive_test_user on table src_autho_test(key); +show grant user hive_test_user on table src_autho_test_n11; +show grant user hive_test_user on table src_autho_test_n11(key); -select key from src_autho_test order by key limit 20; +select key from src_autho_test_n11 order by key limit 20; -revoke select(key) on table src_autho_test from user hive_test_user; -show grant user hive_test_user on table src_autho_test; -show grant user hive_test_user on table src_autho_test(key); +revoke select(key) on table src_autho_test_n11 from user hive_test_user; +show grant user hive_test_user on table src_autho_test_n11; +show grant user hive_test_user on table src_autho_test_n11(key); --table grant to group -grant select on table src_autho_test to group hive_test_group1; +grant select on table src_autho_test_n11 to group hive_test_group1; -show grant group hive_test_group1 on table src_autho_test; -show grant group hive_test_group1 on table src_autho_test(key); +show grant group hive_test_group1 on table src_autho_test_n11; +show grant group hive_test_group1 on table src_autho_test_n11(key); -select key from src_autho_test order by key limit 20; +select key from src_autho_test_n11 order by key limit 20; -revoke select on table src_autho_test from group hive_test_group1; -show grant group hive_test_group1 on table src_autho_test; -show grant group hive_test_group1 on table src_autho_test(key); +revoke select on table src_autho_test_n11 from group hive_test_group1; +show grant group hive_test_group1 on table src_autho_test_n11; +show grant group hive_test_group1 on table src_autho_test_n11(key); --column grant to group -grant select(key) on table src_autho_test to group hive_test_group1; +grant select(key) on table src_autho_test_n11 to group hive_test_group1; -show grant group hive_test_group1 on table src_autho_test; -show grant group hive_test_group1 on table src_autho_test(key); +show grant group hive_test_group1 on table src_autho_test_n11; +show grant group hive_test_group1 on table src_autho_test_n11(key); -select key from src_autho_test order by key limit 20; +select key from src_autho_test_n11 order by key limit 20; -revoke select(key) on table src_autho_test from group hive_test_group1; -show grant group hive_test_group1 on table src_autho_test; -show grant group hive_test_group1 on table src_autho_test(key); +revoke select(key) on table src_autho_test_n11 from group hive_test_group1; +show grant group hive_test_group1 on table src_autho_test_n11; +show grant group hive_test_group1 on table src_autho_test_n11(key); --role create role sRc_roLE; @@ -66,27 +66,27 @@ show role grant user hive_test_user; --column grant to role -grant select(key) on table src_autho_test to role sRc_roLE; +grant select(key) on table src_autho_test_n11 to role sRc_roLE; -show grant role sRc_roLE on table src_autho_test; -show grant role sRc_roLE on table src_autho_test(key); +show grant role sRc_roLE on table src_autho_test_n11; +show grant role sRc_roLE on table src_autho_test_n11(key); -select key from src_autho_test order by key limit 20; +select key from src_autho_test_n11 order by key limit 20; -revoke select(key) on table src_autho_test from role sRc_roLE; +revoke select(key) on table src_autho_test_n11 from role sRc_roLE; --table grant to role -grant select on table src_autho_test to role sRc_roLE; +grant select on table src_autho_test_n11 to role sRc_roLE; -select key from src_autho_test order by key limit 20; +select key from src_autho_test_n11 order by key limit 20; -show grant role sRc_roLE on table src_autho_test; -show grant role sRc_roLE on table src_autho_test(key); -revoke select on table src_autho_test from role sRc_roLE; +show grant role sRc_roLE on table src_autho_test_n11; +show grant role sRc_roLE on table src_autho_test_n11(key); +revoke select on table src_autho_test_n11 from role sRc_roLE; -- drop role drop role sRc_roLE; set hive.security.authorization.enabled=false; -drop table src_autho_test; +drop table src_autho_test_n11; diff --git a/ql/src/test/queries/clientpositive/authorization_3.q b/ql/src/test/queries/clientpositive/authorization_3.q index 95c25fecc4..b3c090ef69 100644 --- a/ql/src/test/queries/clientpositive/authorization_3.q +++ b/ql/src/test/queries/clientpositive/authorization_3.q @@ -3,20 +3,20 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.autho -- SORT_BEFORE_DIFF -create table src_autho_test as select * from src; +create table src_autho_test_n5 as select * from src; -grant drop on table src_autho_test to user hive_test_user; -grant select on table src_autho_test to user hive_test_user; +grant drop on table src_autho_test_n5 to user hive_test_user; +grant select on table src_autho_test_n5 to user hive_test_user; -show grant user hive_test_user on table src_autho_test; +show grant user hive_test_user on table src_autho_test_n5; -revoke select on table src_autho_test from user hive_test_user; -revoke drop on table src_autho_test from user hive_test_user; +revoke select on table src_autho_test_n5 from user hive_test_user; +revoke drop on table src_autho_test_n5 from user hive_test_user; -grant drop,select on table src_autho_test to user hive_test_user; -show grant user hive_test_user on table src_autho_test; -revoke drop,select on table src_autho_test from user hive_test_user; +grant drop,select on table src_autho_test_n5 to user hive_test_user; +show grant user hive_test_user on table src_autho_test_n5; +revoke drop,select on table src_autho_test_n5 from user hive_test_user; -grant drop,select(key), select(value) on table src_autho_test to user hive_test_user; -show grant user hive_test_user on table src_autho_test; -revoke drop,select(key), select(value) on table src_autho_test from user hive_test_user; +grant drop,select(key), select(value) on table src_autho_test_n5 to user hive_test_user; +show grant user hive_test_user on table src_autho_test_n5; +revoke drop,select(key), select(value) on table src_autho_test_n5 from user hive_test_user; diff --git a/ql/src/test/queries/clientpositive/authorization_4.q b/ql/src/test/queries/clientpositive/authorization_4.q index f76ba1f781..a5a65aedec 100644 --- a/ql/src/test/queries/clientpositive/authorization_4.q +++ b/ql/src/test/queries/clientpositive/authorization_4.q @@ -3,14 +3,14 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.autho -- SORT_BEFORE_DIFF -create table src_autho_test as select * from src; +create table src_autho_test_n2 as select * from src; -grant All on table src_autho_test to user hive_test_user; +grant All on table src_autho_test_n2 to user hive_test_user; set hive.security.authorization.enabled=true; -show grant user hive_test_user on table src_autho_test; +show grant user hive_test_user on table src_autho_test_n2; -select key from src_autho_test order by key limit 20; +select key from src_autho_test_n2 order by key limit 20; -drop table src_autho_test; +drop table src_autho_test_n2; diff --git a/ql/src/test/queries/clientpositive/authorization_6.q b/ql/src/test/queries/clientpositive/authorization_6.q index f105d053f9..e8b53ab83a 100644 --- a/ql/src/test/queries/clientpositive/authorization_6.q +++ b/ql/src/test/queries/clientpositive/authorization_6.q @@ -3,43 +3,43 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.autho -- SORT_BEFORE_DIFF -create table src_auth_tmp as select * from src; +create table src_auth_tmp_n0 as select * from src; -create table authorization_part (key int, value string) partitioned by (ds string); -ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE"); +create table authorization_part_n0 (key int, value string) partitioned by (ds string); +ALTER TABLE authorization_part_n0 SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE"); set hive.security.authorization.enabled=true; -grant select on table src_auth_tmp to user hive_test_user; +grant select on table src_auth_tmp_n0 to user hive_test_user; -- column grant to user -grant Create on table authorization_part to user hive_test_user; -grant Update on table authorization_part to user hive_test_user; -grant Drop on table authorization_part to user hive_test_user; +grant Create on table authorization_part_n0 to user hive_test_user; +grant Update on table authorization_part_n0 to user hive_test_user; +grant Drop on table authorization_part_n0 to user hive_test_user; -show grant user hive_test_user on table authorization_part; -grant select(key) on table authorization_part to user hive_test_user; -insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp; -insert overwrite table authorization_part partition (ds='2011') select key, value from src_auth_tmp; -show grant user hive_test_user on table authorization_part(key) partition (ds='2010'); -show grant user hive_test_user on table authorization_part(key) partition (ds='2011'); -show grant user hive_test_user on table authorization_part(key); -select key from authorization_part where ds>='2010' order by key limit 20; +show grant user hive_test_user on table authorization_part_n0; +grant select(key) on table authorization_part_n0 to user hive_test_user; +insert overwrite table authorization_part_n0 partition (ds='2010') select key, value from src_auth_tmp_n0; +insert overwrite table authorization_part_n0 partition (ds='2011') select key, value from src_auth_tmp_n0; +show grant user hive_test_user on table authorization_part_n0(key) partition (ds='2010'); +show grant user hive_test_user on table authorization_part_n0(key) partition (ds='2011'); +show grant user hive_test_user on table authorization_part_n0(key); +select key from authorization_part_n0 where ds>='2010' order by key limit 20; -drop table authorization_part; +drop table authorization_part_n0; set hive.security.authorization.enabled=false; -create table authorization_part (key int, value string) partitioned by (ds string); -ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="FALSE"); +create table authorization_part_n0 (key int, value string) partitioned by (ds string); +ALTER TABLE authorization_part_n0 SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="FALSE"); set hive.security.authorization.enabled=true; -grant Create on table authorization_part to user hive_test_user; -grant Update on table authorization_part to user hive_test_user; - -show grant user hive_test_user on table authorization_part; - -grant select(key) on table authorization_part to user hive_test_user; -insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp; -insert overwrite table authorization_part partition (ds='2011') select key, value from src_auth_tmp; -show grant user hive_test_user on table authorization_part(key) partition (ds='2010'); -show grant user hive_test_user on table authorization_part(key) partition (ds='2011'); -show grant user hive_test_user on table authorization_part(key); -select key from authorization_part where ds>='2010' order by key limit 20; +grant Create on table authorization_part_n0 to user hive_test_user; +grant Update on table authorization_part_n0 to user hive_test_user; + +show grant user hive_test_user on table authorization_part_n0; + +grant select(key) on table authorization_part_n0 to user hive_test_user; +insert overwrite table authorization_part_n0 partition (ds='2010') select key, value from src_auth_tmp_n0; +insert overwrite table authorization_part_n0 partition (ds='2011') select key, value from src_auth_tmp_n0; +show grant user hive_test_user on table authorization_part_n0(key) partition (ds='2010'); +show grant user hive_test_user on table authorization_part_n0(key) partition (ds='2011'); +show grant user hive_test_user on table authorization_part_n0(key); +select key from authorization_part_n0 where ds>='2010' order by key limit 20; diff --git a/ql/src/test/queries/clientpositive/authorization_9.q b/ql/src/test/queries/clientpositive/authorization_9.q index 19892dd331..40b5e869fb 100644 --- a/ql/src/test/queries/clientpositive/authorization_9.q +++ b/ql/src/test/queries/clientpositive/authorization_9.q @@ -4,21 +4,21 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.autho create database authorization_9; use authorization_9; -create table dummy (key string, value string); +create table dummy_n1 (key string, value string); grant select to user hive_test_user; grant select on database authorization_9 to user hive_test_user; -grant select on table dummy to user hive_test_user; -grant select (key, value) on table dummy to user hive_test_user; +grant select on table dummy_n1 to user hive_test_user; +grant select (key, value) on table dummy_n1 to user hive_test_user; show grant user hive_test_user on database authorization_9; -show grant user hive_test_user on table dummy; +show grant user hive_test_user on table dummy_n1; show grant user hive_test_user on all; grant select to user hive_test_user2; grant select on database authorization_9 to user hive_test_user2; -grant select on table dummy to user hive_test_user2; -grant select (key, value) on table dummy to user hive_test_user2; +grant select on table dummy_n1 to user hive_test_user2; +grant select (key, value) on table dummy_n1 to user hive_test_user2; show grant on all; show grant user hive_test_user on all; @@ -26,10 +26,10 @@ show grant user hive_test_user2 on all; revoke select from user hive_test_user; revoke select on database authorization_9 from user hive_test_user; -revoke select on table dummy from user hive_test_user; -revoke select (key, value) on table dummy from user hive_test_user; +revoke select on table dummy_n1 from user hive_test_user; +revoke select (key, value) on table dummy_n1 from user hive_test_user; revoke select from user hive_test_user2; revoke select on database authorization_9 from user hive_test_user2; -revoke select on table dummy from user hive_test_user2; -revoke select (key, value) on table dummy from user hive_test_user2; +revoke select on table dummy_n1 from user hive_test_user2; +revoke select (key, value) on table dummy_n1 from user hive_test_user2; diff --git a/ql/src/test/queries/clientpositive/authorization_admin_almighty1.q b/ql/src/test/queries/clientpositive/authorization_admin_almighty1.q index 3ae0ce118b..210f2991d3 100644 --- a/ql/src/test/queries/clientpositive/authorization_admin_almighty1.q +++ b/ql/src/test/queries/clientpositive/authorization_admin_almighty1.q @@ -5,13 +5,13 @@ set user.name=hive_test_user; -- actions from admin should work as if admin has all privileges -create table t1(i int); +create table t1_n76(i int); set user.name=hive_admin_user; show current roles; set role ADMIN; show current roles; -select * from t1; -grant all on table t1 to user user1; -show grant user user1 on table t1; -drop table t1; +select * from t1_n76; +grant all on table t1_n76 to user user1; +show grant user user1 on table t1_n76; +drop table t1_n76; diff --git a/ql/src/test/queries/clientpositive/authorization_cli_createtab_noauthzapi.q b/ql/src/test/queries/clientpositive/authorization_cli_createtab_noauthzapi.q index c39df65c3d..b7a9683a01 100644 --- a/ql/src/test/queries/clientpositive/authorization_cli_createtab_noauthzapi.q +++ b/ql/src/test/queries/clientpositive/authorization_cli_createtab_noauthzapi.q @@ -7,6 +7,6 @@ set user.name=hive_test_user; -- verify that sql std auth can be set as the authorizer with hive cli, while metastore authorization api calls are disabled (for cli) -create table t_cli(i int); +create table t_cli_n1(i int); -create view v_cli (i) as select i from t_cli; +create view v_cli_n0 (i) as select i from t_cli_n1; diff --git a/ql/src/test/queries/clientpositive/authorization_cli_nonsql.q b/ql/src/test/queries/clientpositive/authorization_cli_nonsql.q index 5bc481448b..7e0e1aeb3e 100644 --- a/ql/src/test/queries/clientpositive/authorization_cli_nonsql.q +++ b/ql/src/test/queries/clientpositive/authorization_cli_nonsql.q @@ -9,9 +9,9 @@ use default; dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/a_admin_almighty1; dfs -ls ${system:test.tmp.dir}/a_admin_almighty1; -create table a_table1(a int, b int); +create table a_table1_n0(a int, b int); add jar ${system:maven.local.repository}/org/apache/hive/hive-it-test-serde/${system:hive.version}/hive-it-test-serde-${system:hive.version}.jar; -alter table a_table1 set serde 'org.apache.hadoop.hive.serde2.TestSerDe' with serdeproperties('s1'='9'); +alter table a_table1_n0 set serde 'org.apache.hadoop.hive.serde2.TestSerDe' with serdeproperties('s1'='9'); drop table a_table; delete jar ${system:maven.local.repository}/org/apache/hive/hive-it-test-serde/${system:hive.version}/hive-it-test-serde-${system:hive.version}.jar; diff --git a/ql/src/test/queries/clientpositive/authorization_cli_stdconfigauth.q b/ql/src/test/queries/clientpositive/authorization_cli_stdconfigauth.q index a3f8e64f25..4cf547aeee 100644 --- a/ql/src/test/queries/clientpositive/authorization_cli_stdconfigauth.q +++ b/ql/src/test/queries/clientpositive/authorization_cli_stdconfigauth.q @@ -6,5 +6,5 @@ set hive.security.authorization.enabled=true; -- verify that SQLStdConfOnlyAuthorizerFactory as the authorizer factory with hive cli, with hive.security.authorization.enabled=true -- authorization verification would be just no-op -create table t_cli(i int); -describe t_cli; +create table t_cli_n0(i int); +describe t_cli_n0; diff --git a/ql/src/test/queries/clientpositive/authorization_grant_option_role.q b/ql/src/test/queries/clientpositive/authorization_grant_option_role.q index ea0b51b901..3a66acf7f1 100644 --- a/ql/src/test/queries/clientpositive/authorization_grant_option_role.q +++ b/ql/src/test/queries/clientpositive/authorization_grant_option_role.q @@ -8,21 +8,21 @@ create role r1; grant role r1 to user r1user; set user.name=user1; -CREATE TABLE t1(i int); +CREATE TABLE t1_n126(i int); -- all privileges should have been set for user -GRANT ALL ON t1 TO ROLE r1 WITH GRANT OPTION; +GRANT ALL ON t1_n126 TO ROLE r1 WITH GRANT OPTION; set user.name=r1user; -- check if user belong to role r1 can grant privileges to others -GRANT ALL ON t1 TO USER user3; +GRANT ALL ON t1_n126 TO USER user3; set user.name=hive_admin_user; set role admin; -- check privileges on table -show grant on table t1; +show grant on table t1_n126; -- check if drop role removes privileges for that role drop role r1; -show grant on table t1; +show grant on table t1_n126; diff --git a/ql/src/test/queries/clientpositive/authorization_non_id.q b/ql/src/test/queries/clientpositive/authorization_non_id.q index 26d4b5027b..9e2c4b0985 100644 --- a/ql/src/test/queries/clientpositive/authorization_non_id.q +++ b/ql/src/test/queries/clientpositive/authorization_non_id.q @@ -4,8 +4,8 @@ set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.Sessi set user.name=hive_admin_user; set role ADMIN; -drop table if exists src_autho_test; -create table src_autho_test (id int); +drop table if exists src_autho_test_n12; +create table src_autho_test_n12 (id int); create role src_role2; @@ -15,11 +15,11 @@ grant role src_role2 to user `foo-1`; show role grant user bar; show role grant user `foo-1`; -grant select on table src_autho_test to user bar; -grant select on table src_autho_test to user `foo-1`; +grant select on table src_autho_test_n12 to user bar; +grant select on table src_autho_test_n12 to user `foo-1`; show grant user bar on all; show grant user `foo-1` on all; -drop table src_autho_test; +drop table src_autho_test_n12; drop role src_role2; diff --git a/ql/src/test/queries/clientpositive/authorization_owner_actions.q b/ql/src/test/queries/clientpositive/authorization_owner_actions.q index d186a4434a..7340dbc614 100644 --- a/ql/src/test/queries/clientpositive/authorization_owner_actions.q +++ b/ql/src/test/queries/clientpositive/authorization_owner_actions.q @@ -5,13 +5,13 @@ set hive.security.authorization.enabled=true; set user.name=user1; -- actions that require user to be table owner -create table t1(i int); +create table t1_n108(i int); -ALTER TABLE t1 SET SERDEPROPERTIES ('field.delim' = ','); -drop table t1; +ALTER TABLE t1_n108 SET SERDEPROPERTIES ('field.delim' = ','); +drop table t1_n108; -create table t1(i int); -create view vt1 as select * from t1; +create table t1_n108(i int); +create view vt1_n0 as select * from t1_n108; -drop view vt1; -alter table t1 rename to tnew1; +drop view vt1_n0; +alter table t1_n108 rename to tnew1; diff --git a/ql/src/test/queries/clientpositive/authorization_parts.q b/ql/src/test/queries/clientpositive/authorization_parts.q index 18582fcc83..d1a74a318c 100644 --- a/ql/src/test/queries/clientpositive/authorization_parts.q +++ b/ql/src/test/queries/clientpositive/authorization_parts.q @@ -11,11 +11,11 @@ dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/a_uri_add_part2; -- check add partition without insert privilege -create table tpart(i int, j int) partitioned by (k string); +create table tpart_n0(i int, j int) partitioned by (k string); -alter table tpart add partition (k = '1') location '${system:test.tmp.dir}/a_uri_add_part1/'; -alter table tpart add partition (k = '2') location '${system:test.tmp.dir}/a_uri_add_part2/'; +alter table tpart_n0 add partition (k = '1') location '${system:test.tmp.dir}/a_uri_add_part1/'; +alter table tpart_n0 add partition (k = '2') location '${system:test.tmp.dir}/a_uri_add_part2/'; -select count(*) from tpart; +select count(*) from tpart_n0; -analyze table tpart partition (k) compute statistics; +analyze table tpart_n0 partition (k) compute statistics; diff --git a/ql/src/test/queries/clientpositive/authorization_show_grant.q b/ql/src/test/queries/clientpositive/authorization_show_grant.q index 0f17439eef..3c79db99db 100644 --- a/ql/src/test/queries/clientpositive/authorization_show_grant.q +++ b/ql/src/test/queries/clientpositive/authorization_show_grant.q @@ -17,37 +17,37 @@ grant role roleB to role roleA; set user.name=user1; -- create table and grant privileges to a role -create table t1(i int, j int, k int); -create table t2(i int, j int, k int); +create table t1_n15(i int, j int, k int); +create table t2_n9(i int, j int, k int); -grant select on t1 to role roleA; -grant insert on t2 to role roleA; -grant insert on t2 to role roleB; +grant select on t1_n15 to role roleA; +grant insert on t2_n9 to role roleA; +grant insert on t2_n9 to role roleB; -grant insert,delete on t1 to user userA; -grant select,insert on t2 to user userA; +grant insert,delete on t1_n15 to user userA; +grant select,insert on t2_n9 to user userA; set user.name=hive_admin_user; set role admin; -- as user in admin role, it should be possible to see other users grant -show grant user user1 on table t1; +show grant user user1 on table t1_n15; show grant user user1; -show grant role roleA on table t1; +show grant role roleA on table t1_n15; show grant role roleA; show grant; set user.name=userA; -- user belonging to role should be able to see it -show grant role roleA on table t1; +show grant role roleA on table t1_n15; show grant role roleA; -show grant role roleB on table t1; +show grant role roleB on table t1_n15; show grant role roleB; -show grant user userA on table t1; +show grant user userA on table t1_n15; show grant user userA; diff --git a/ql/src/test/queries/clientpositive/authorization_update.q b/ql/src/test/queries/clientpositive/authorization_update.q index 5e57904364..c39ffdbf61 100644 --- a/ql/src/test/queries/clientpositive/authorization_update.q +++ b/ql/src/test/queries/clientpositive/authorization_update.q @@ -11,8 +11,8 @@ set user.name=user1; CREATE TABLE t_auth_up(i int, j int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -CREATE TABLE t_select(i int); -GRANT ALL ON TABLE t_select TO ROLE public; +CREATE TABLE t_select_n0(i int); +GRANT ALL ON TABLE t_select_n0 TO ROLE public; -- grant update privilege to another user GRANT UPDATE ON t_auth_up TO USER userWIns; diff --git a/ql/src/test/queries/clientpositive/authorization_update_own_table.q b/ql/src/test/queries/clientpositive/authorization_update_own_table.q index e3292d2f65..9189bf5db0 100644 --- a/ql/src/test/queries/clientpositive/authorization_update_own_table.q +++ b/ql/src/test/queries/clientpositive/authorization_update_own_table.q @@ -9,8 +9,8 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set user.name=user1; -create table auth_noupd(i int, j int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -update auth_noupd set j = 0 where i > 0; +create table auth_noupd_n0(i int, j int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); +update auth_noupd_n0 set j = 0 where i > 0; set user.name=hive_admin_user; set role admin; diff --git a/ql/src/test/queries/clientpositive/authorization_view_1.q b/ql/src/test/queries/clientpositive/authorization_view_1.q index a355d33e20..e987f15439 100644 --- a/ql/src/test/queries/clientpositive/authorization_view_1.q +++ b/ql/src/test/queries/clientpositive/authorization_view_1.q @@ -1,60 +1,60 @@ --! qt:dataset:src set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; -create table src_autho_test as select * from src; +create table src_autho_test_n8 as select * from src; -create view v as select * from src_autho_test; +create view v_n9 as select * from src_autho_test_n8; -create view v1 as select * from src_autho_test; +create view v1_n13 as select * from src_autho_test_n8; -create view v2 as select * from src_autho_test; +create view v2_n7 as select * from src_autho_test_n8; set hive.security.authorization.enabled=true; --table grant to user -grant select on table src_autho_test to user hive_test_user; +grant select on table src_autho_test_n8 to user hive_test_user; -grant select on table v to user hive_test_user; -grant select on table v1 to user hive_test_user; -grant select on table v2 to user hive_test_user; +grant select on table v_n9 to user hive_test_user; +grant select on table v1_n13 to user hive_test_user; +grant select on table v2_n7 to user hive_test_user; -show grant user hive_test_user on table v; -show grant user hive_test_user on v; -show grant user hive_test_user on v(key); +show grant user hive_test_user on table v_n9; +show grant user hive_test_user on v_n9; +show grant user hive_test_user on v_n9(key); -select * from v order by key limit 10; +select * from v_n9 order by key limit 10; -revoke select on table src_autho_test from user hive_test_user; +revoke select on table src_autho_test_n8 from user hive_test_user; -show grant user hive_test_user on table v; -show grant user hive_test_user on v; -show grant user hive_test_user on v(key); +show grant user hive_test_user on table v_n9; +show grant user hive_test_user on v_n9; +show grant user hive_test_user on v_n9(key); -revoke select on table v from user hive_test_user; +revoke select on table v_n9 from user hive_test_user; -show grant user hive_test_user on table v; -show grant user hive_test_user on v; -show grant user hive_test_user on v(key); +show grant user hive_test_user on table v_n9; +show grant user hive_test_user on v_n9; +show grant user hive_test_user on v_n9(key); --column grant to user -grant select on table src_autho_test to user hive_test_user; -grant select(key) on table v to user hive_test_user; +grant select on table src_autho_test_n8 to user hive_test_user; +grant select(key) on table v_n9 to user hive_test_user; -show grant user hive_test_user on table v; -show grant user hive_test_user on v(key); +show grant user hive_test_user on table v_n9; +show grant user hive_test_user on v_n9(key); -select key from v order by key limit 10; +select key from v_n9 order by key limit 10; select key from -(select v.key from src_autho_test join v on src_autho_test.key=v.key)subq +(select v_n9.key from src_autho_test_n8 join v_n9 on src_autho_test_n8.key=v_n9.key)subq order by key limit 10; select key from -(select key as key from src_autho_test union all select key from v)subq +(select key as key from src_autho_test_n8 union all select key from v_n9)subq limit 10; select key from -(select value as key from v2 union select value as key from v1 union all select key from v)subq +(select value as key from v2_n7 union select value as key from v1_n13 union all select key from v_n9)subq limit 10; diff --git a/ql/src/test/queries/clientpositive/authorization_view_2.q b/ql/src/test/queries/clientpositive/authorization_view_2.q index 048cf6c0fe..0cffc0c28d 100644 --- a/ql/src/test/queries/clientpositive/authorization_view_2.q +++ b/ql/src/test/queries/clientpositive/authorization_view_2.q @@ -1,17 +1,17 @@ --! qt:dataset:src set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; -create table src_autho_test as select * from src; +create table src_autho_test_n13 as select * from src; -create view v1 as select * from src_autho_test; +create view v1_n19 as select * from src_autho_test_n13; -create view v2 as select * from v1; +create view v2_n12 as select * from v1_n19; set hive.security.authorization.enabled=true; --table grant to user -grant select on table v2 to user hive_test_user; +grant select on table v2_n12 to user hive_test_user; -select * from v2 order by key limit 10; +select * from v2_n12 order by key limit 10; diff --git a/ql/src/test/queries/clientpositive/authorization_view_3.q b/ql/src/test/queries/clientpositive/authorization_view_3.q index 31592b0904..e7c4f8f3f8 100644 --- a/ql/src/test/queries/clientpositive/authorization_view_3.q +++ b/ql/src/test/queries/clientpositive/authorization_view_3.q @@ -1,19 +1,19 @@ --! qt:dataset:src set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; -create table src_autho_test as select * from src; +create table src_autho_test_n1 as select * from src; -create view v1 as select * from src_autho_test; +create view v1_n2 as select * from src_autho_test_n1; -create view v2 as select * from v1; +create view v2_n0 as select * from v1_n2; set hive.security.authorization.enabled=true; --table grant to user -grant select on table v2 to user hive_test_user; +grant select on table v2_n0 to user hive_test_user; -grant select(key) on table src_autho_test to user hive_test_user; +grant select(key) on table src_autho_test_n1 to user hive_test_user; -select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10; +select v2_n0.key from v2_n0 join (select key from src_autho_test_n1)subq on v2_n0.value=subq.key order by key limit 10; diff --git a/ql/src/test/queries/clientpositive/authorization_view_4.q b/ql/src/test/queries/clientpositive/authorization_view_4.q index d87070dd07..8256bf3c00 100644 --- a/ql/src/test/queries/clientpositive/authorization_view_4.q +++ b/ql/src/test/queries/clientpositive/authorization_view_4.q @@ -1,19 +1,19 @@ --! qt:dataset:src set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; -create table src_autho_test as select * from src; +create table src_autho_test_n7 as select * from src; -create view v1 as select * from src; +create view v1_n11 as select * from src; -create view v2 as select * from v1; +create view v2_n5 as select * from v1_n11; set hive.security.authorization.enabled=true; --table grant to user -grant select on table v2 to user hive_test_user; +grant select on table v2_n5 to user hive_test_user; -grant select(key) on table src_autho_test to user hive_test_user; +grant select(key) on table src_autho_test_n7 to user hive_test_user; -select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10; +select v2_n5.key from v2_n5 join (select key from src_autho_test_n7)subq on v2_n5.value=subq.key order by key limit 10; diff --git a/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_1.q b/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_1.q index 07cd949706..be50b69830 100644 --- a/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_1.q +++ b/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_1.q @@ -2,70 +2,70 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; set hive.cbo.enable=false; -create table src_autho_test as select * from src; +create table src_autho_test_n9 as select * from src; -create view v as select * from src_autho_test; +create view v_n10 as select * from src_autho_test_n9; -create view v1 as select * from src_autho_test; +create view v1_n14 as select * from src_autho_test_n9; -create view v2 as select * from src_autho_test; +create view v2_n8 as select * from src_autho_test_n9; set hive.security.authorization.enabled=true; --table grant to user -grant select on table src_autho_test to user hive_test_user; +grant select on table src_autho_test_n9 to user hive_test_user; -grant select on table v to user hive_test_user; -grant select on table v1 to user hive_test_user; -grant select on table v2 to user hive_test_user; +grant select on table v_n10 to user hive_test_user; +grant select on table v1_n14 to user hive_test_user; +grant select on table v2_n8 to user hive_test_user; -show grant user hive_test_user on table v; -show grant user hive_test_user on v; -show grant user hive_test_user on v(key); +show grant user hive_test_user on table v_n10; +show grant user hive_test_user on v_n10; +show grant user hive_test_user on v_n10(key); -select * from v order by key limit 10; +select * from v_n10 order by key limit 10; -revoke select on table src_autho_test from user hive_test_user; +revoke select on table src_autho_test_n9 from user hive_test_user; -show grant user hive_test_user on table v; -show grant user hive_test_user on v; -show grant user hive_test_user on v(key); +show grant user hive_test_user on table v_n10; +show grant user hive_test_user on v_n10; +show grant user hive_test_user on v_n10(key); -revoke select on table v from user hive_test_user; +revoke select on table v_n10 from user hive_test_user; -show grant user hive_test_user on table v; -show grant user hive_test_user on v; -show grant user hive_test_user on v(key); +show grant user hive_test_user on table v_n10; +show grant user hive_test_user on v_n10; +show grant user hive_test_user on v_n10(key); --column grant to user -grant select on table src_autho_test to user hive_test_user; -grant select(key) on table v to user hive_test_user; +grant select on table src_autho_test_n9 to user hive_test_user; +grant select(key) on table v_n10 to user hive_test_user; -show grant user hive_test_user on table v; -show grant user hive_test_user on v(key); +show grant user hive_test_user on table v_n10; +show grant user hive_test_user on v_n10(key); -select key from v order by key limit 10; +select key from v_n10 order by key limit 10; select key from -(select v.key from src_autho_test join v on src_autho_test.key=v.key)subq +(select v_n10.key from src_autho_test_n9 join v_n10 on src_autho_test_n9.key=v_n10.key)subq order by key limit 10; select key from -(select key as key from src_autho_test union all select key from v)subq +(select key as key from src_autho_test_n9 union all select key from v_n10)subq limit 10; select key from -(select value as key from v2 union select value as key from v1 union all select key from v)subq +(select value as key from v2_n8 union select value as key from v1_n14 union all select key from v_n10)subq limit 10; set hive.cbo.enable=true; --although cbo is enabled, it will not succeed. -select key from v sort by key limit 10; +select key from v_n10 sort by key limit 10; select key from -(select key as key from src_autho_test union all select key from v cluster by key)subq +(select key as key from src_autho_test_n9 union all select key from v_n10 cluster by key)subq limit 10; diff --git a/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_2.q b/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_2.q index 0b6edf1550..52ac084ad2 100644 --- a/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_2.q +++ b/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_2.q @@ -2,17 +2,17 @@ set hive.cbo.enable=false; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; -create table src_autho_test as select * from src; +create table src_autho_test_n10 as select * from src; -create view v1 as select * from src_autho_test; +create view v1_n16 as select * from src_autho_test_n10; -create view v2 as select * from v1; +create view v2_n9 as select * from v1_n16; set hive.security.authorization.enabled=true; --table grant to user -grant select on table v2 to user hive_test_user; +grant select on table v2_n9 to user hive_test_user; -select * from v2 order by key limit 10; +select * from v2_n9 order by key limit 10; diff --git a/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_3.q b/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_3.q index 34cd490734..8ea9e1652e 100644 --- a/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_3.q +++ b/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_3.q @@ -2,11 +2,11 @@ set hive.cbo.enable=false; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; -create table src_autho_test as select * from src; +create table src_autho_test_n0 as select * from src; -create view v1 as select * from src_autho_test; +create view v1_n1 as select * from src_autho_test_n0; -create view v2 as select * from v1; +create view v2 as select * from v1_n1; set hive.security.authorization.enabled=true; @@ -14,7 +14,7 @@ set hive.security.authorization.enabled=true; grant select on table v2 to user hive_test_user; -grant select(key) on table src_autho_test to user hive_test_user; +grant select(key) on table src_autho_test_n0 to user hive_test_user; -select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10; +select v2.key from v2 join (select key from src_autho_test_n0)subq on v2.value=subq.key order by key limit 10; diff --git a/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_4.q b/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_4.q index 9953349a5f..7d186c2269 100644 --- a/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_4.q +++ b/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_4.q @@ -2,19 +2,19 @@ set hive.cbo.enable=false; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; -create table src_autho_test as select * from src; +create table src_autho_test_n6 as select * from src; -create view v1 as select * from src; +create view v1_n10 as select * from src; -create view v2 as select * from v1; +create view v2_n4 as select * from v1_n10; set hive.security.authorization.enabled=true; --table grant to user -grant select on table v2 to user hive_test_user; +grant select on table v2_n4 to user hive_test_user; -grant select(key) on table src_autho_test to user hive_test_user; +grant select(key) on table src_autho_test_n6 to user hive_test_user; -select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10; +select v2_n4.key from v2_n4 join (select key from src_autho_test_n6)subq on v2_n4.value=subq.key order by key limit 10; diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_1.q b/ql/src/test/queries/clientpositive/autoColumnStats_1.q index d584e0df15..98f7f1ac77 100644 --- a/ql/src/test/queries/clientpositive/autoColumnStats_1.q +++ b/ql/src/test/queries/clientpositive/autoColumnStats_1.q @@ -14,188 +14,188 @@ set hive.auto.convert.join.noconditionaltask.size=10000; set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ; set hive.optimize.bucketingsorting=false; -drop table src_multi1; +drop table src_multi1_n1; -create table src_multi1 like src; +create table src_multi1_n1 like src; -insert overwrite table src_multi1 select * from src; +insert overwrite table src_multi1_n1 select * from src; -explain extended select * from src_multi1; +explain extended select * from src_multi1_n1; -describe formatted src_multi1; +describe formatted src_multi1_n1; -drop table a; -drop table b; -create table a like src; -create table b like src; +drop table a_n12; +drop table b_n9; +create table a_n12 like src; +create table b_n9 like src; from src -insert overwrite table a select * -insert overwrite table b select *; +insert overwrite table a_n12 select * +insert overwrite table b_n9 select *; -describe formatted a; -describe formatted b; +describe formatted a_n12; +describe formatted b_n9; -drop table a; -drop table b; -create table a like src; -create table b like src; +drop table a_n12; +drop table b_n9; +create table a_n12 like src; +create table b_n9 like src; from src -insert overwrite table a select * -insert into table b select *; +insert overwrite table a_n12 select * +insert into table b_n9 select *; -describe formatted a; -describe formatted b; +describe formatted a_n12; +describe formatted b_n9; -drop table src_multi2; +drop table src_multi2_n2; -create table src_multi2 like src; +create table src_multi2_n2 like src; -insert overwrite table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key; +insert overwrite table src_multi2_n2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key; -describe formatted src_multi2; +describe formatted src_multi2_n2; -drop table nzhang_part14; +drop table nzhang_part14_n1; -create table if not exists nzhang_part14 (key string) +create table if not exists nzhang_part14_n1 (key string) partitioned by (value string); -desc formatted nzhang_part14; +desc formatted nzhang_part14_n1; -insert overwrite table nzhang_part14 partition(value) +insert overwrite table nzhang_part14_n1 partition(value) select key, value from ( - select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a + select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a_n12 union all - select * from (select 'k2' as key, '' as value from src limit 2)b + select * from (select 'k2' as key, '' as value from src limit 2)b_n9 union all - select * from (select 'k3' as key, ' ' as value from src limit 2)c + select * from (select 'k3' as key, ' ' as value from src limit 2)c_n2 ) T; -desc formatted nzhang_part14 partition (value=' '); +desc formatted nzhang_part14_n1 partition (value=' '); -explain select key from nzhang_part14; +explain select key from nzhang_part14_n1; -drop table src5; +drop table src5_n0; -create table src5 as select key, value from src limit 5; +create table src5_n0 as select key, value from src limit 5; -insert overwrite table nzhang_part14 partition(value) -select key, value from src5; +insert overwrite table nzhang_part14_n1 partition(value) +select key, value from src5_n0; -explain select key from nzhang_part14; +explain select key from nzhang_part14_n1; -create table alter5 ( col1 string ) partitioned by (dt string); +create table alter5_n0 ( col1 string ) partitioned by (dt string); -alter table alter5 add partition (dt='a') location 'parta'; +alter table alter5_n0 add partition (dt='a_n12') location 'parta_n12'; -describe formatted alter5 partition (dt='a'); +describe formatted alter5_n0 partition (dt='a_n12'); -insert overwrite table alter5 partition (dt='a') select key from src ; +insert overwrite table alter5_n0 partition (dt='a_n12') select key from src ; -describe formatted alter5 partition (dt='a'); +describe formatted alter5_n0 partition (dt='a_n12'); -explain select * from alter5 where dt='a'; +explain select * from alter5_n0 where dt='a_n12'; -drop table src_stat_part; -create table src_stat_part(key string, value string) partitioned by (partitionId int); +drop table src_stat_part_n0; +create table src_stat_part_n0(key string, value string) partitioned by (partitionId int); -insert overwrite table src_stat_part partition (partitionId=1) +insert overwrite table src_stat_part_n0 partition (partitionId=1) select * from src1 limit 5; -describe formatted src_stat_part PARTITION(partitionId=1); +describe formatted src_stat_part_n0 PARTITION(partitionId=1); -insert overwrite table src_stat_part partition (partitionId=2) +insert overwrite table src_stat_part_n0 partition (partitionId=2) select * from src1; -describe formatted src_stat_part PARTITION(partitionId=2); +describe formatted src_stat_part_n0 PARTITION(partitionId=2); -drop table srcbucket_mapjoin; -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -drop table tab_part; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -drop table srcbucket_mapjoin_part; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +drop table srcbucket_mapjoin_n6; +CREATE TABLE srcbucket_mapjoin_n6(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +drop table tab_part_n4; +CREATE TABLE tab_part_n4 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +drop table srcbucket_mapjoin_part_n7; +CREATE TABLE srcbucket_mapjoin_part_n7 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data_n12 local inpath '../../data_n12/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n6 partition(ds='2008-04-08'); +load data_n12 local inpath '../../data_n12/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n6 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data_n12 local inpath '../../data_n12/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n7 partition(ds='2008-04-08'); +load data_n12 local inpath '../../data_n12/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n7 partition(ds='2008-04-08'); +load data_n12 local inpath '../../data_n12/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n7 partition(ds='2008-04-08'); +load data_n12 local inpath '../../data_n12/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n7 partition(ds='2008-04-08'); -insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +insert overwrite table tab_part_n4 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n7; -describe formatted tab_part partition (ds='2008-04-08'); +describe formatted tab_part_n4 partition (ds='2008-04-08'); -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n3(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab_n3 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n6; -describe formatted tab partition (ds='2008-04-08'); +describe formatted tab_n3 partition (ds='2008-04-08'); -drop table nzhang_part14; +drop table nzhang_part14_n1; -create table if not exists nzhang_part14 (key string, value string) +create table if not exists nzhang_part14_n1 (key string, value string) partitioned by (ds string, hr string); -describe formatted nzhang_part14; +describe formatted nzhang_part14_n1; -insert overwrite table nzhang_part14 partition(ds, hr) +insert overwrite table nzhang_part14_n1 partition(ds, hr) select key, value, ds, hr from ( - select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a_n12 union all - select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b_n9 union all - select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c_n2 ) T; -desc formatted nzhang_part14 partition(ds='1', hr='3'); +desc formatted nzhang_part14_n1 partition(ds='1', hr='3'); -INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +INSERT OVERWRITE TABLE nzhang_part14_n1 PARTITION (ds='2010-03-03', hr) SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10; -desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12'); +desc formatted nzhang_part14_n1 PARTITION(ds='2010-03-03', hr='12'); -drop table nzhang_part14; -create table if not exists nzhang_part14 (key string, value string) +drop table nzhang_part14_n1; +create table if not exists nzhang_part14_n1 (key string, value string) partitioned by (ds string, hr string); -INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr) +INSERT OVERWRITE TABLE nzhang_part14_n1 PARTITION (ds='2010-03-03', hr) SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10; -desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12'); +desc formatted nzhang_part14_n1 PARTITION(ds='2010-03-03', hr='12'); -drop table a; -create table a (key string, value string) +drop table a_n12; +create table a_n12 (key string, value string) partitioned by (ds string, hr string); -drop table b; -create table b (key string, value string) +drop table b_n9; +create table b_n9 (key string, value string) partitioned by (ds string, hr string); -drop table c; -create table c (key string, value string) +drop table c_n2; +create table c_n2 (key string, value string) partitioned by (ds string, hr string); FROM srcpart -INSERT OVERWRITE TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10 -INSERT OVERWRITE TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11 -INSERT OVERWRITE TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0; - -explain select key from a; -explain select value from b; -explain select key from b; -explain select value from c; -explain select key from c; +INSERT OVERWRITE TABLE a_n12 PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10 +INSERT OVERWRITE TABLE b_n9 PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11 +INSERT OVERWRITE TABLE c_n2 PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0; + +explain select key from a_n12; +explain select value from b_n9; +explain select key from b_n9; +explain select value from c_n2; +explain select key from c_n2; diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_10.q b/ql/src/test/queries/clientpositive/autoColumnStats_10.q index bf166d8701..6bfefc4b7f 100644 --- a/ql/src/test/queries/clientpositive/autoColumnStats_10.q +++ b/ql/src/test/queries/clientpositive/autoColumnStats_10.q @@ -1,52 +1,52 @@ set hive.mapred.mode=nonstrict; set hive.stats.column.autogather=true; -drop table p; +drop table p_n1; -CREATE TABLE p(insert_num int, c1 tinyint, c2 smallint); +CREATE TABLE p_n1(insert_num int, c1 tinyint, c2 smallint); -desc formatted p; +desc formatted p_n1; -insert into p values (1,22,333); +insert into p_n1 values (1,22,333); -desc formatted p; +desc formatted p_n1; -alter table p replace columns (insert_num int, c1 STRING, c2 STRING); +alter table p_n1 replace columns (insert_num int, c1 STRING, c2 STRING); -desc formatted p; +desc formatted p_n1; -desc formatted p insert_num; -desc formatted p c1; +desc formatted p_n1 insert_num; +desc formatted p_n1 c1; -insert into p values (2,11,111); +insert into p_n1 values (2,11,111); -desc formatted p; +desc formatted p_n1; -desc formatted p insert_num; -desc formatted p c1; +desc formatted p_n1 insert_num; +desc formatted p_n1 c1; set hive.stats.column.autogather=false; -drop table p; +drop table p_n1; -CREATE TABLE p(insert_num int, c1 tinyint, c2 smallint); +CREATE TABLE p_n1(insert_num int, c1 tinyint, c2 smallint); -desc formatted p; +desc formatted p_n1; -insert into p values (1,22,333); +insert into p_n1 values (1,22,333); -desc formatted p; +desc formatted p_n1; -alter table p replace columns (insert_num int, c1 STRING, c2 STRING); +alter table p_n1 replace columns (insert_num int, c1 STRING, c2 STRING); -desc formatted p; +desc formatted p_n1; -desc formatted p insert_num; -desc formatted p c1; +desc formatted p_n1 insert_num; +desc formatted p_n1 c1; -insert into p values (2,11,111); +insert into p_n1 values (2,11,111); -desc formatted p; +desc formatted p_n1; -desc formatted p insert_num; -desc formatted p c1; +desc formatted p_n1 insert_num; +desc formatted p_n1 c1; diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_2.q b/ql/src/test/queries/clientpositive/autoColumnStats_2.q index 57266af910..65d3ae01c7 100644 --- a/ql/src/test/queries/clientpositive/autoColumnStats_2.q +++ b/ql/src/test/queries/clientpositive/autoColumnStats_2.q @@ -24,37 +24,37 @@ explain extended select * from src_multi1; describe formatted src_multi1; -drop table a; -drop table b; -create table a like src; -create table b like src; +drop table a_n3; +drop table b_n3; +create table a_n3 like src; +create table b_n3 like src; from src -insert into table a select * -insert into table b select *; +insert into table a_n3 select * +insert into table b_n3 select *; -describe formatted a key; -describe formatted b key; +describe formatted a_n3 key; +describe formatted b_n3 key; from src -insert overwrite table a select * -insert into table b select *; +insert overwrite table a_n3 select * +insert into table b_n3 select *; -describe formatted a; -describe formatted b; +describe formatted a_n3; +describe formatted b_n3; -describe formatted b key; -describe formatted b value; +describe formatted b_n3 key; +describe formatted b_n3 value; -insert into table b select NULL, NULL from src limit 10; +insert into table b_n3 select NULL, NULL from src limit 10; -describe formatted b key; -describe formatted b value; +describe formatted b_n3 key; +describe formatted b_n3 value; -insert into table b(value) select key+100000 from src limit 10; +insert into table b_n3(value) select key+100000 from src limit 10; -describe formatted b key; -describe formatted b value; +describe formatted b_n3 key; +describe formatted b_n3 value; drop table src_multi2; @@ -72,11 +72,11 @@ create table if not exists nzhang_part14 (key string) insert into table nzhang_part14 partition(value) select key, value from ( - select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a + select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a_n3 union all - select * from (select 'k2' as key, '' as value from src limit 2)b + select * from (select 'k2' as key, '' as value from src limit 2)b_n3 union all - select * from (select 'k3' as key, ' ' as value from src limit 2)c + select * from (select 'k3' as key, ' ' as value from src limit 2)c_n1 ) T; explain select key from nzhang_part14; @@ -95,29 +95,29 @@ drop table alter5; create table alter5 ( col1 string ) partitioned by (dt string); -alter table alter5 add partition (dt='a'); +alter table alter5 add partition (dt='a_n3'); -describe formatted alter5 partition (dt='a'); +describe formatted alter5 partition (dt='a_n3'); -insert into table alter5 partition (dt='a') select key from src ; +insert into table alter5 partition (dt='a_n3') select key from src ; -describe formatted alter5 partition (dt='a'); +describe formatted alter5 partition (dt='a_n3'); -explain select * from alter5 where dt='a'; +explain select * from alter5 where dt='a_n3'; drop table alter5; create table alter5 ( col1 string ) partitioned by (dt string); -alter table alter5 add partition (dt='a') location 'parta'; +alter table alter5 add partition (dt='a_n3') location 'parta_n3'; -describe formatted alter5 partition (dt='a'); +describe formatted alter5 partition (dt='a_n3'); -insert into table alter5 partition (dt='a') select key from src ; +insert into table alter5 partition (dt='a_n3') select key from src ; -describe formatted alter5 partition (dt='a'); +describe formatted alter5 partition (dt='a_n3'); -explain select * from alter5 where dt='a'; +explain select * from alter5 where dt='a_n3'; drop table src_stat_part; @@ -133,31 +133,31 @@ select * from src1; describe formatted src_stat_part PARTITION(partitionId=2); -drop table srcbucket_mapjoin; -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -drop table tab_part; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -drop table srcbucket_mapjoin_part; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +drop table srcbucket_mapjoin_n2; +CREATE TABLE srcbucket_mapjoin_n2(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +drop table tab_part_n1; +CREATE TABLE tab_part_n1 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +drop table srcbucket_mapjoin_part_n2; +CREATE TABLE srcbucket_mapjoin_part_n2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data_n3 local inpath '../../data_n3/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n2 partition(ds='2008-04-08'); +load data_n3 local inpath '../../data_n3/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data_n3 local inpath '../../data_n3/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n2 partition(ds='2008-04-08'); +load data_n3 local inpath '../../data_n3/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n2 partition(ds='2008-04-08'); +load data_n3 local inpath '../../data_n3/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n2 partition(ds='2008-04-08'); +load data_n3 local inpath '../../data_n3/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n2 partition(ds='2008-04-08'); -insert into table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +insert into table tab_part_n1 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n2; -describe formatted tab_part partition (ds='2008-04-08'); +describe formatted tab_part_n1 partition (ds='2008-04-08'); -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert into table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n0(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert into table tab_n0 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n2; -describe formatted tab partition (ds='2008-04-08'); +describe formatted tab_n0 partition (ds='2008-04-08'); drop table nzhang_part14; @@ -168,11 +168,11 @@ describe formatted nzhang_part14; insert into table nzhang_part14 partition(ds, hr) select key, value, ds, hr from ( - select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a + select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a_n3 union all - select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b + select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b_n3 union all - select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c + select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c_n1 ) T; desc formatted nzhang_part14 partition(ds='1', hr='3'); @@ -193,27 +193,27 @@ SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10; desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12'); -drop table a; -create table a (key string, value string) +drop table a_n3; +create table a_n3 (key string, value string) partitioned by (ds string, hr string); -drop table b; -create table b (key string, value string) +drop table b_n3; +create table b_n3 (key string, value string) partitioned by (ds string, hr string); -drop table c; -create table c (key string, value string) +drop table c_n1; +create table c_n1 (key string, value string) partitioned by (ds string, hr string); FROM srcpart -INSERT into TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10 -INSERT into TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11 -INSERT into TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0; - -explain select key from a; -explain select value from b; -explain select key from b; -explain select value from c; -explain select key from c; +INSERT into TABLE a_n3 PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10 +INSERT into TABLE b_n3 PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11 +INSERT into TABLE c_n1 PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0; + +explain select key from a_n3; +explain select value from b_n3; +explain select key from b_n3; +explain select value from c_n1; +explain select key from c_n1; diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_3.q b/ql/src/test/queries/clientpositive/autoColumnStats_3.q index 63f3a94a8e..7a41911890 100644 --- a/ql/src/test/queries/clientpositive/autoColumnStats_3.q +++ b/ql/src/test/queries/clientpositive/autoColumnStats_3.q @@ -10,31 +10,31 @@ set hive.auto.convert.join.noconditionaltask.size=10000; set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ; set hive.optimize.bucketingsorting=false; -drop table src_multi1; +drop table src_multi1_n6; -create table src_multi1 like src; +create table src_multi1_n6 like src; -analyze table src_multi1 compute statistics for columns key; +analyze table src_multi1_n6 compute statistics for columns key; -describe formatted src_multi1; +describe formatted src_multi1_n6; set hive.stats.column.autogather=true; -insert into table src_multi1 select * from src; +insert into table src_multi1_n6 select * from src; -describe formatted src_multi1; +describe formatted src_multi1_n6; set hive.stats.column.autogather=false; -drop table nzhang_part14; +drop table nzhang_part14_n2; -create table if not exists nzhang_part14 (key string, value string) +create table if not exists nzhang_part14_n2 (key string, value string) partitioned by (ds string, hr string); -describe formatted nzhang_part14; +describe formatted nzhang_part14_n2; -insert into table nzhang_part14 partition(ds, hr) +insert into table nzhang_part14_n2 partition(ds, hr) select key, value, ds, hr from ( select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a union all @@ -43,17 +43,17 @@ select key, value, ds, hr from ( select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c ) T; -desc formatted nzhang_part14 partition(ds='1', hr='3'); +desc formatted nzhang_part14_n2 partition(ds='1', hr='3'); -analyze table nzhang_part14 partition(ds='1', hr='3') compute statistics for columns value; +analyze table nzhang_part14_n2 partition(ds='1', hr='3') compute statistics for columns value; -desc formatted nzhang_part14 partition(ds='1', hr='3'); +desc formatted nzhang_part14_n2 partition(ds='1', hr='3'); -desc formatted nzhang_part14 partition(ds='2', hr='1'); +desc formatted nzhang_part14_n2 partition(ds='2', hr='1'); set hive.stats.column.autogather=true; -insert into table nzhang_part14 partition(ds, hr) +insert into table nzhang_part14_n2 partition(ds, hr) select key, value, ds, hr from ( select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a union all @@ -62,7 +62,7 @@ select key, value, ds, hr from ( select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c ) T; -desc formatted nzhang_part14 partition(ds='1', hr='3'); +desc formatted nzhang_part14_n2 partition(ds='1', hr='3'); -desc formatted nzhang_part14 partition(ds='2', hr='1'); +desc formatted nzhang_part14_n2 partition(ds='2', hr='1'); diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_5.q b/ql/src/test/queries/clientpositive/autoColumnStats_5.q index 3da7b3810c..0cbbea2cda 100644 --- a/ql/src/test/queries/clientpositive/autoColumnStats_5.q +++ b/ql/src/test/queries/clientpositive/autoColumnStats_5.q @@ -14,35 +14,35 @@ set hive.exec.dynamic.partition.mode=nonstrict; -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT --- -CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE; +CREATE TABLE partitioned1_n1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE; -explain insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original'); +explain insert into table partitioned1_n1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original'); -insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original'); +insert into table partitioned1_n1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original'); -desc formatted partitioned1 partition(part=1); +desc formatted partitioned1_n1 partition(part=1); -desc formatted partitioned1 partition(part=1) a; +desc formatted partitioned1_n1 partition(part=1) a; -- Table-Non-Cascade ADD COLUMNS ... -alter table partitioned1 add columns(c int, d string); +alter table partitioned1_n1 add columns(c int, d string); -desc formatted partitioned1 partition(part=1); +desc formatted partitioned1_n1 partition(part=1); -explain insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty'); +explain insert into table partitioned1_n1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty'); -insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty'); +insert into table partitioned1_n1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty'); -desc formatted partitioned1 partition(part=2); +desc formatted partitioned1_n1 partition(part=2); -desc formatted partitioned1 partition(part=2) c; +desc formatted partitioned1_n1 partition(part=2) c; -explain insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred'); +explain insert into table partitioned1_n1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred'); -insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred'); +insert into table partitioned1_n1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred'); -desc formatted partitioned1 partition(part=1); +desc formatted partitioned1_n1 partition(part=1); -desc formatted partitioned1 partition(part=1) a; +desc formatted partitioned1_n1 partition(part=1) a; -desc formatted partitioned1 partition(part=1) c; +desc formatted partitioned1_n1 partition(part=1) c; diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_7.q b/ql/src/test/queries/clientpositive/autoColumnStats_7.q index 8622b2394e..78682839e3 100644 --- a/ql/src/test/queries/clientpositive/autoColumnStats_7.q +++ b/ql/src/test/queries/clientpositive/autoColumnStats_7.q @@ -5,16 +5,16 @@ set hive.map.aggr=false; set hive.groupby.skewindata=true; -- Taken from groupby2.q -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_g2_n5(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src; explain FROM src_temp -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1); +INSERT OVERWRITE TABLE dest_g2_n5 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1); FROM src_temp -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1); +INSERT OVERWRITE TABLE dest_g2_n5 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1); -SELECT dest_g2.* FROM dest_g2; +SELECT dest_g2_n5.* FROM dest_g2_n5; -DROP TABLE dest_g2; +DROP TABLE dest_g2_n5; DROP TABLE src_temp; diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_9.q b/ql/src/test/queries/clientpositive/autoColumnStats_9.q index 944da2c7fb..2b9eb82660 100644 --- a/ql/src/test/queries/clientpositive/autoColumnStats_9.q +++ b/ql/src/test/queries/clientpositive/autoColumnStats_9.q @@ -8,17 +8,17 @@ set hive.skewjoin.key = 2; -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n23(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n23 SELECT src1.key, src2.value; FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n23 SELECT src1.key, src2.value; -desc formatted dest_j1; +desc formatted dest_j1_n23; -desc formatted dest_j1 key; +desc formatted dest_j1_n23 key; -desc formatted dest_j1 value; +desc formatted dest_j1_n23 value; diff --git a/ql/src/test/queries/clientpositive/auto_join1.q b/ql/src/test/queries/clientpositive/auto_join1.q index 58d31fb4fb..126ac36467 100644 --- a/ql/src/test/queries/clientpositive/auto_join1.q +++ b/ql/src/test/queries/clientpositive/auto_join1.q @@ -6,13 +6,13 @@ set hive.auto.convert.join =true; -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n3(key INT, value STRING) STORED AS TEXTFILE; explain FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n3 SELECT src1.key, src2.value; FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n3 SELECT src1.key, src2.value; -SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1; \ No newline at end of file +SELECT sum(hash(dest_j1_n3.key,dest_j1_n3.value)) FROM dest_j1_n3; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/auto_join14.q b/ql/src/test/queries/clientpositive/auto_join14.q index 2ce606a3d8..11829cc23b 100644 --- a/ql/src/test/queries/clientpositive/auto_join14.q +++ b/ql/src/test/queries/clientpositive/auto_join14.q @@ -6,7 +6,7 @@ set hive.auto.convert.join = true; -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n83(c1 INT, c2 STRING) STORED AS TEXTFILE; set mapreduce.framework.name=yarn; set mapreduce.jobtracker.address=localhost:58; @@ -14,9 +14,9 @@ set hive.exec.mode.local.auto=true; explain FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value; +INSERT OVERWRITE TABLE dest1_n83 SELECT src.key, srcpart.value; FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value; +INSERT OVERWRITE TABLE dest1_n83 SELECT src.key, srcpart.value; -SELECT sum(hash(dest1.c1,dest1.c2)) FROM dest1; +SELECT sum(hash(dest1_n83.c1,dest1_n83.c2)) FROM dest1_n83; diff --git a/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q b/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q index 6825da2b40..0c6b900dbd 100644 --- a/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q +++ b/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q @@ -5,16 +5,16 @@ set hive.auto.convert.join = true; -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n74(c1 INT, c2 STRING) STORED AS TEXTFILE; set mapred.job.tracker=localhost:58; set hive.exec.mode.local.auto=true; explain FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value; +INSERT OVERWRITE TABLE dest1_n74 SELECT src.key, srcpart.value; FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value; +INSERT OVERWRITE TABLE dest1_n74 SELECT src.key, srcpart.value; -SELECT sum(hash(dest1.c1,dest1.c2)) FROM dest1; +SELECT sum(hash(dest1_n74.c1,dest1_n74.c2)) FROM dest1_n74; diff --git a/ql/src/test/queries/clientpositive/auto_join17.q b/ql/src/test/queries/clientpositive/auto_join17.q index 6b63513c17..04019b9cec 100644 --- a/ql/src/test/queries/clientpositive/auto_join17.q +++ b/ql/src/test/queries/clientpositive/auto_join17.q @@ -4,14 +4,14 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join = true; -CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n41(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE; explain FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*; +INSERT OVERWRITE TABLE dest1_n41 SELECT src1.*, src2.*; FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*; +INSERT OVERWRITE TABLE dest1_n41 SELECT src1.*, src2.*; -SELECT sum(hash(dest1.key1,dest1.value1,dest1.key2,dest1.value2)) FROM dest1; \ No newline at end of file +SELECT sum(hash(dest1_n41.key1,dest1_n41.value1,dest1_n41.key2,dest1_n41.value2)) FROM dest1_n41; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/auto_join19.q b/ql/src/test/queries/clientpositive/auto_join19.q index f231e07c79..b75d47cb2e 100644 --- a/ql/src/test/queries/clientpositive/auto_join19.q +++ b/ql/src/test/queries/clientpositive/auto_join19.q @@ -4,17 +4,17 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join = true; -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n18(key INT, value STRING) STORED AS TEXTFILE; explain FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n18 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11'); FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n18 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11'); -SELECT sum(hash(dest1.key,dest1.value)) FROM dest1; +SELECT sum(hash(dest1_n18.key,dest1_n18.value)) FROM dest1_n18; diff --git a/ql/src/test/queries/clientpositive/auto_join19_inclause.q b/ql/src/test/queries/clientpositive/auto_join19_inclause.q index f8d16b5677..1a538973e8 100644 --- a/ql/src/test/queries/clientpositive/auto_join19_inclause.q +++ b/ql/src/test/queries/clientpositive/auto_join19_inclause.q @@ -5,17 +5,17 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join = true; set hive.optimize.point.lookup.min=2; -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n11(key INT, value STRING) STORED AS TEXTFILE; explain FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n11 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11'); FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n11 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11'); -SELECT sum(hash(dest1.key,dest1.value)) FROM dest1; +SELECT sum(hash(dest1_n11.key,dest1_n11.value)) FROM dest1_n11; diff --git a/ql/src/test/queries/clientpositive/auto_join24.q b/ql/src/test/queries/clientpositive/auto_join24.q index 32d5cf4233..9e4f7bc615 100644 --- a/ql/src/test/queries/clientpositive/auto_join24.q +++ b/ql/src/test/queries/clientpositive/auto_join24.q @@ -2,14 +2,14 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join = true; -create table tst1(key STRING, cnt INT); +create table tst1_n2(key STRING, cnt INT); -INSERT OVERWRITE TABLE tst1 +INSERT OVERWRITE TABLE tst1_n2 SELECT a.key, count(1) FROM src a group by a.key; explain -SELECT sum(a.cnt) FROM tst1 a JOIN tst1 b ON a.key = b.key; +SELECT sum(a.cnt) FROM tst1_n2 a JOIN tst1_n2 b ON a.key = b.key; -SELECT sum(a.cnt) FROM tst1 a JOIN tst1 b ON a.key = b.key; +SELECT sum(a.cnt) FROM tst1_n2 a JOIN tst1_n2 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/auto_join25.q b/ql/src/test/queries/clientpositive/auto_join25.q index 8f3009516a..28823410f0 100644 --- a/ql/src/test/queries/clientpositive/auto_join25.q +++ b/ql/src/test/queries/clientpositive/auto_join25.q @@ -11,25 +11,25 @@ set hive.mapjoin.check.memory.rows = 2; set hive.auto.convert.join.noconditionaltask = false; -- This test tests the scenario when the mapper dies. So, create a conditional task for the mapjoin -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n62(key INT, value STRING) STORED AS TEXTFILE; FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n62 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11'); -SELECT sum(hash(dest1.key,dest1.value)) FROM dest1; +SELECT sum(hash(dest1_n62.key,dest1_n62.value)) FROM dest1_n62; -CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j2_n0(key INT, value STRING) STORED AS TEXTFILE; FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) -INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value; +INSERT OVERWRITE TABLE dest_j2_n0 SELECT src1.key, src3.value; -SELECT sum(hash(dest_j2.key,dest_j2.value)) FROM dest_j2; +SELECT sum(hash(dest_j2_n0.key,dest_j2_n0.value)) FROM dest_j2_n0; -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n5(key INT, value STRING) STORED AS TEXTFILE; FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n5 SELECT src1.key, src2.value; -SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1; +SELECT sum(hash(dest_j1_n5.key,dest_j1_n5.value)) FROM dest_j1_n5; diff --git a/ql/src/test/queries/clientpositive/auto_join3.q b/ql/src/test/queries/clientpositive/auto_join3.q index bd29c9a00b..e76861adf3 100644 --- a/ql/src/test/queries/clientpositive/auto_join3.q +++ b/ql/src/test/queries/clientpositive/auto_join3.q @@ -3,13 +3,13 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join = true; -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n140(key INT, value STRING) STORED AS TEXTFILE; explain FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value; +INSERT OVERWRITE TABLE dest1_n140 SELECT src1.key, src3.value; FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value; +INSERT OVERWRITE TABLE dest1_n140 SELECT src1.key, src3.value; -SELECT sum(hash(dest1.key,dest1.value)) FROM dest1; \ No newline at end of file +SELECT sum(hash(dest1_n140.key,dest1_n140.value)) FROM dest1_n140; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/auto_join4.q b/ql/src/test/queries/clientpositive/auto_join4.q index 0a8848b6b7..40a82feb5a 100644 --- a/ql/src/test/queries/clientpositive/auto_join4.q +++ b/ql/src/test/queries/clientpositive/auto_join4.q @@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join = true; -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n115(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; explain FROM ( @@ -18,7 +18,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; +INSERT OVERWRITE TABLE dest1_n115 SELECT c.c1, c.c2, c.c3, c.c4; FROM ( FROM @@ -32,6 +32,6 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; +INSERT OVERWRITE TABLE dest1_n115 SELECT c.c1, c.c2, c.c3, c.c4; -SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1; +SELECT sum(hash(dest1_n115.c1,dest1_n115.c2,dest1_n115.c3,dest1_n115.c4)) FROM dest1_n115; diff --git a/ql/src/test/queries/clientpositive/auto_join5.q b/ql/src/test/queries/clientpositive/auto_join5.q index 5967319e72..68d459d053 100644 --- a/ql/src/test/queries/clientpositive/auto_join5.q +++ b/ql/src/test/queries/clientpositive/auto_join5.q @@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join = true; -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n64(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; explain FROM ( @@ -18,7 +18,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; +INSERT OVERWRITE TABLE dest1_n64 SELECT c.c1, c.c2, c.c3, c.c4; FROM ( FROM @@ -32,6 +32,6 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; +INSERT OVERWRITE TABLE dest1_n64 SELECT c.c1, c.c2, c.c3, c.c4; -SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1; +SELECT sum(hash(dest1_n64.c1,dest1_n64.c2,dest1_n64.c3,dest1_n64.c4)) FROM dest1_n64; diff --git a/ql/src/test/queries/clientpositive/auto_join6.q b/ql/src/test/queries/clientpositive/auto_join6.q index b356f55e77..d0a7c5fba4 100644 --- a/ql/src/test/queries/clientpositive/auto_join6.q +++ b/ql/src/test/queries/clientpositive/auto_join6.q @@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join = true; -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n9(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; explain FROM ( @@ -18,7 +18,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; +INSERT OVERWRITE TABLE dest1_n9 SELECT c.c1, c.c2, c.c3, c.c4; FROM ( FROM @@ -32,7 +32,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; +INSERT OVERWRITE TABLE dest1_n9 SELECT c.c1, c.c2, c.c3, c.c4; -SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1; +SELECT sum(hash(dest1_n9.c1,dest1_n9.c2,dest1_n9.c3,dest1_n9.c4)) FROM dest1_n9; diff --git a/ql/src/test/queries/clientpositive/auto_join7.q b/ql/src/test/queries/clientpositive/auto_join7.q index bd13519b9b..af03f372f1 100644 --- a/ql/src/test/queries/clientpositive/auto_join7.q +++ b/ql/src/test/queries/clientpositive/auto_join7.q @@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join = true; -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n147(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE; explain @@ -24,7 +24,7 @@ FROM ( ON (a.c1 = c.c5) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6; +INSERT OVERWRITE TABLE dest1_n147 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6; FROM ( FROM @@ -43,7 +43,7 @@ FROM ( ON (a.c1 = c.c5) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6; +INSERT OVERWRITE TABLE dest1_n147 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6; -SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4,dest1.c5,dest1.c6)) FROM dest1; +SELECT sum(hash(dest1_n147.c1,dest1_n147.c2,dest1_n147.c3,dest1_n147.c4,dest1_n147.c5,dest1_n147.c6)) FROM dest1_n147; diff --git a/ql/src/test/queries/clientpositive/auto_join8.q b/ql/src/test/queries/clientpositive/auto_join8.q index d9d3f911e3..b5d9f4c14e 100644 --- a/ql/src/test/queries/clientpositive/auto_join8.q +++ b/ql/src/test/queries/clientpositive/auto_join8.q @@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join = true; -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n3(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; explain FROM ( @@ -18,7 +18,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL; +INSERT OVERWRITE TABLE dest1_n3 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL; FROM ( FROM @@ -32,6 +32,6 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL; +INSERT OVERWRITE TABLE dest1_n3 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL; -SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1; +SELECT sum(hash(dest1_n3.c1,dest1_n3.c2,dest1_n3.c3,dest1_n3.c4)) FROM dest1_n3; diff --git a/ql/src/test/queries/clientpositive/auto_join9.q b/ql/src/test/queries/clientpositive/auto_join9.q index 72676d4ce4..53e9504902 100644 --- a/ql/src/test/queries/clientpositive/auto_join9.q +++ b/ql/src/test/queries/clientpositive/auto_join9.q @@ -4,15 +4,15 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join = true; -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n142(key INT, value STRING) STORED AS TEXTFILE; explain FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12'; +INSERT OVERWRITE TABLE dest1_n142 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12'; FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12'; +INSERT OVERWRITE TABLE dest1_n142 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12'; -SELECT sum(hash(dest1.key,dest1.value)) FROM dest1; +SELECT sum(hash(dest1_n142.key,dest1_n142.value)) FROM dest1_n142; diff --git a/ql/src/test/queries/clientpositive/auto_join_filters.q b/ql/src/test/queries/clientpositive/auto_join_filters.q index a44ffb302a..ea028f61c2 100644 --- a/ql/src/test/queries/clientpositive/auto_join_filters.q +++ b/ql/src/test/queries/clientpositive/auto_join_filters.q @@ -1,81 +1,81 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join = true; -CREATE TABLE myinput1(key int, value int); -LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1; - -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; - - -CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; -LOAD DATA LOCAL INPATH '../../data/files/in/000000_0' into table smb_input1; -LOAD DATA LOCAL INPATH '../../data/files/in/000001_0' into table smb_input1; -LOAD DATA LOCAL INPATH '../../data/files/in/000000_0' into table smb_input2; -LOAD DATA LOCAL INPATH '../../data/files/in/000001_0' into table smb_input2; +CREATE TABLE myinput1_n5(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1_n5; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a JOIN myinput1_n5 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a RIGHT OUTER JOIN myinput1_n5 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a FULL OUTER JOIN myinput1_n5 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a JOIN myinput1_n5 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a JOIN myinput1_n5 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a JOIN myinput1_n5 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a JOIN myinput1_n5 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a RIGHT OUTER JOIN myinput1_n5 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a RIGHT OUTER JOIN myinput1_n5 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a RIGHT OUTER JOIN myinput1_n5 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a RIGHT OUTER JOIN myinput1_n5 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a FULL OUTER JOIN myinput1_n5 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a FULL OUTER JOIN myinput1_n5 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a FULL OUTER JOIN myinput1_n5 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a FULL OUTER JOIN myinput1_n5 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1_n5 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n5 a RIGHT OUTER JOIN myinput1_n5 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1_n5 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b RIGHT OUTER JOIN myinput1_n5 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1_n5 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n5 a RIGHT OUTER JOIN myinput1_n5 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1_n5 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b RIGHT OUTER JOIN myinput1_n5 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; + + +CREATE TABLE smb_input1_n0(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE smb_input2_n0(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; +LOAD DATA LOCAL INPATH '../../data/files/in/000000_0' into table smb_input1_n0; +LOAD DATA LOCAL INPATH '../../data/files/in/000001_0' into table smb_input1_n0; +LOAD DATA LOCAL INPATH '../../data/files/in/000000_0' into table smb_input2_n0; +LOAD DATA LOCAL INPATH '../../data/files/in/000001_0' into table smb_input2_n0; SET hive.optimize.bucketmapjoin = true; SET hive.optimize.bucketmapjoin.sortedmerge = true; SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a JOIN myinput1_n5 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a RIGHT OUTER JOIN myinput1_n5 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a FULL OUTER JOIN myinput1_n5 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a JOIN myinput1_n5 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a JOIN myinput1_n5 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a JOIN myinput1_n5 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a JOIN myinput1_n5 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a RIGHT OUTER JOIN myinput1_n5 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a RIGHT OUTER JOIN myinput1_n5 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a RIGHT OUTER JOIN myinput1_n5 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a RIGHT OUTER JOIN myinput1_n5 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a FULL OUTER JOIN myinput1_n5 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a FULL OUTER JOIN myinput1_n5 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a FULL OUTER JOIN myinput1_n5 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a FULL OUTER JOIN myinput1_n5 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1_n5 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n5 a RIGHT OUTER JOIN myinput1_n5 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1_n5 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b RIGHT OUTER JOIN myinput1_n5 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1_n5 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n5 a RIGHT OUTER JOIN myinput1_n5 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1_n5 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n5 a LEFT OUTER JOIN myinput1_n5 b RIGHT OUTER JOIN myinput1_n5 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; diff --git a/ql/src/test/queries/clientpositive/auto_join_nulls.q b/ql/src/test/queries/clientpositive/auto_join_nulls.q index 279fd32e02..4a2b57b657 100644 --- a/ql/src/test/queries/clientpositive/auto_join_nulls.q +++ b/ql/src/test/queries/clientpositive/auto_join_nulls.q @@ -1,30 +1,30 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join = true; -CREATE TABLE myinput1(key int, value int); -LOAD DATA LOCAL INPATH '../../data/files/in1.txt' INTO TABLE myinput1; +CREATE TABLE myinput1_n2(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in1.txt' INTO TABLE myinput1_n2; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a JOIN myinput1_n2 b; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a LEFT OUTER JOIN myinput1_n2 b; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a RIGHT OUTER JOIN myinput1_n2 b; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a JOIN myinput1_n2 b ON a.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a JOIN myinput1_n2 b ON a.key = b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a JOIN myinput1_n2 b ON a.value = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a JOIN myinput1_n2 b ON a.value = b.value and a.key=b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a LEFT OUTER JOIN myinput1_n2 b ON a.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a LEFT OUTER JOIN myinput1_n2 b ON a.value = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a LEFT OUTER JOIN myinput1_n2 b ON a.key = b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a LEFT OUTER JOIN myinput1_n2 b ON a.key = b.key and a.value=b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a RIGHT OUTER JOIN myinput1_n2 b ON a.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a RIGHT OUTER JOIN myinput1_n2 b ON a.key = b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a RIGHT OUTER JOIN myinput1_n2 b ON a.value = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a RIGHT OUTER JOIN myinput1_n2 b ON a.key=b.key and a.value = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a FULL OUTER JOIN myinput1_n2 b ON a.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a FULL OUTER JOIN myinput1_n2 b ON a.key = b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a FULL OUTER JOIN myinput1_n2 b ON a.value = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a FULL OUTER JOIN myinput1_n2 b ON a.value = b.value and a.key=b.key; -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n2 a LEFT OUTER JOIN myinput1_n2 b ON (a.value=b.value) RIGHT OUTER JOIN myinput1_n2 c ON (b.value=c.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n2 a RIGHT OUTER JOIN myinput1_n2 b ON (a.value=b.value) LEFT OUTER JOIN myinput1_n2 c ON (b.value=c.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n2 a LEFT OUTER JOIN myinput1_n2 b RIGHT OUTER JOIN myinput1_n2 c ON a.value = b.value and b.value = c.value; diff --git a/ql/src/test/queries/clientpositive/auto_join_stats.q b/ql/src/test/queries/clientpositive/auto_join_stats.q index 7720fdcace..8b377bf4ca 100644 --- a/ql/src/test/queries/clientpositive/auto_join_stats.q +++ b/ql/src/test/queries/clientpositive/auto_join_stats.q @@ -3,19 +3,19 @@ set hive.auto.convert.join = true; set hive.auto.convert.join.noconditionaltask.size=2660; --- Setting HTS(src2) < threshold < HTS(src2) + HTS(smalltable). +-- Setting HTS(src2) < threshold < HTS(src2) + HTS(smalltable_n0). -- This query plan should thus not try to combine the mapjoin into a single work. -create table smalltable(key string, value string) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table smalltable; -analyze table smalltable compute statistics; +create table smalltable_n0(key string, value string) stored as textfile; +load data local inpath '../../data/files/T1.txt' into table smalltable_n0; +analyze table smalltable_n0 compute statistics; -explain select src1.key, src2.key, smalltable.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable ON (src1.key + src2.key = smalltable.key); -select src1.key, src2.key, smalltable.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable ON (src1.key + src2.key = smalltable.key); +explain select src1.key, src2.key, smalltable_n0.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable_n0 ON (src1.key + src2.key = smalltable_n0.key); +select src1.key, src2.key, smalltable_n0.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable_n0 ON (src1.key + src2.key = smalltable_n0.key); -create table smalltable2(key string, value string) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table smalltable2; -analyze table smalltable compute statistics; +create table smalltable2_n0(key string, value string) stored as textfile; +load data local inpath '../../data/files/T1.txt' into table smalltable2_n0; +analyze table smalltable_n0 compute statistics; -explain select src1.key, src2.key, smalltable.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable ON (src1.key + src2.key = smalltable.key) JOIN smalltable2 ON (src1.key + src2.key = smalltable2.key); -select src1.key, src2.key, smalltable.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable ON (src1.key + src2.key = smalltable.key) JOIN smalltable2 ON (src1.key + src2.key = smalltable2.key); \ No newline at end of file +explain select src1.key, src2.key, smalltable_n0.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable_n0 ON (src1.key + src2.key = smalltable_n0.key) JOIN smalltable2_n0 ON (src1.key + src2.key = smalltable2_n0.key); +select src1.key, src2.key, smalltable_n0.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable_n0 ON (src1.key + src2.key = smalltable_n0.key) JOIN smalltable2_n0 ON (src1.key + src2.key = smalltable2_n0.key); \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q b/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q index f6eb5c5a81..6472a3bd9b 100644 --- a/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q +++ b/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q @@ -257,8 +257,8 @@ select count(*) from ( join tbl2 b on subq2.key = b.key) a; -CREATE TABLE dest1(key int, value string); -CREATE TABLE dest2(key int, val1 string, val2 string); +CREATE TABLE dest1_n2(key int, value string); +CREATE TABLE dest2_n0(key int, val1 string, val2 string); -- The join is followed by a multi-table insert. It should be converted to -- a sort-merge join @@ -266,20 +266,20 @@ explain from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 -insert overwrite table dest1 select key, val1 -insert overwrite table dest2 select key, val1, val2; +insert overwrite table dest1_n2 select key, val1 +insert overwrite table dest2_n0 select key, val1, val2; from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 -insert overwrite table dest1 select key, val1 -insert overwrite table dest2 select key, val1, val2; +insert overwrite table dest1_n2 select key, val1 +insert overwrite table dest2_n0 select key, val1, val2; -select * from dest1; -select * from dest2; +select * from dest1_n2; +select * from dest2_n0; -DROP TABLE dest2; -CREATE TABLE dest2(key int, cnt int); +DROP TABLE dest2_n0; +CREATE TABLE dest2_n0(key int, cnt int); -- The join is followed by a multi-table insert, and one of the inserts involves a reducer. -- It should be converted to a sort-merge join @@ -287,14 +287,14 @@ explain from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 -insert overwrite table dest1 select key, val1 -insert overwrite table dest2 select key, count(*) group by key; +insert overwrite table dest1_n2 select key, val1 +insert overwrite table dest2_n0 select key, count(*) group by key; from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 -insert overwrite table dest1 select key, val1 -insert overwrite table dest2 select key, count(*) group by key; +insert overwrite table dest1_n2 select key, val1 +insert overwrite table dest2_n0 select key, count(*) group by key; -select * from dest1; -select * from dest2; +select * from dest1_n2; +select * from dest2_n0; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_1.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_1.q index 6949f8cece..1fbe8f79cf 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_1.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_1.q @@ -4,21 +4,21 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -- small 1 part, 2 bucket & big 2 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +CREATE TABLE bucket_small_n1 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small_n1 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small_n1 partition(ds='2008-04-08'); -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n1 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n1 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n1 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n1 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n1 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n1 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n1 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n1 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n1 partition(ds='2008-04-09'); set hive.auto.convert.join=true; set hive.auto.convert.sortmerge.join=true; @@ -31,12 +31,12 @@ set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hado set hive.auto.convert.join.noconditionaltask.size=10; -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select count(*) FROM bucket_small_n1 a JOIN bucket_big_n1 b ON a.key = b.key; +select count(*) FROM bucket_small_n1 a JOIN bucket_big_n1 b ON a.key = b.key; -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; -select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; +explain extended select count(*) FROM bucket_big_n1 a JOIN bucket_small_n1 b ON a.key = b.key; +select count(*) FROM bucket_big_n1 a JOIN bucket_small_n1 b ON a.key = b.key; set hive.auto.convert.sortmerge.join.to.mapjoin=true; -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; -select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; +explain extended select count(*) FROM bucket_big_n1 a JOIN bucket_small_n1 b ON a.key = b.key; +select count(*) FROM bucket_big_n1 a JOIN bucket_small_n1 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q index 1cbda1f41f..1b15a74012 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q @@ -5,13 +5,13 @@ set hive.explain.user=false; set hive.exec.reducers.max = 1; -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl1_n5(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl2_n4(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -insert overwrite table tbl1 +insert overwrite table tbl1_n5 select * from src where key < 10; -insert overwrite table tbl2 +insert overwrite table tbl2_n4 select * from src where key < 10; set hive.auto.convert.join=true; @@ -29,35 +29,35 @@ explain select count(*) from ( select * from - (select a.key as key, a.value as value from tbl1 a where key < 6 + (select a.key as key, a.value as value from tbl1_n5 a where key < 6 union all - select a.key as key, a.value as value from tbl1 a where key < 6 + select a.key as key, a.value as value from tbl1_n5 a where key < 6 ) usubq1 ) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n4 a where key < 6) subq2 on subq1.key = subq2.key; select count(*) from ( select * from - (select a.key as key, a.value as value from tbl1 a where key < 6 + (select a.key as key, a.value as value from tbl1_n5 a where key < 6 union all - select a.key as key, a.value as value from tbl1 a where key < 6 + select a.key as key, a.value as value from tbl1_n5 a where key < 6 ) usubq1 ) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n4 a where key < 6) subq2 on subq1.key = subq2.key; -- One of the subqueries contains a groupby, so it should not be converted to a sort-merge join. explain select count(*) from - (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1 + (select a.key as key, count(*) as value from tbl1_n5 a where key < 6 group by a.key) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n4 a where key < 6) subq2 on subq1.key = subq2.key; select count(*) from - (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1 + (select a.key as key, count(*) as value from tbl1_n5 a where key < 6 group by a.key) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n4 a where key < 6) subq2 on subq1.key = subq2.key; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_11.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_11.q index f95d36cda9..7416eb0ec0 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_11.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_11.q @@ -4,26 +4,26 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -- small 1 part, 2 bucket & big 2 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (KEY) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08'); +CREATE TABLE bucket_small_n11 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (KEY) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small_n11 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small_n11 partition(ds='2008-04-08'); -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY(KEY) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n11 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY(KEY) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n11 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n11 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n11 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n11 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n11 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n11 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n11 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n11 partition(ds='2008-04-09'); set hive.auto.convert.join=true; -- disable hash joins set hive.auto.convert.join.noconditionaltask.size=10; -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select count(*) FROM bucket_small_n11 a JOIN bucket_big_n11 b ON a.key = b.key; +select count(*) FROM bucket_small_n11 a JOIN bucket_big_n11 b ON a.key = b.key; set hive.auto.convert.sortmerge.join=true; set hive.optimize.bucketmapjoin=true; @@ -32,13 +32,13 @@ set hive.optimize.bucketmapjoin.sortedmerge=true; -- The tables are only bucketed and not sorted, the join should not be converted -- Currenly, a join is only converted to a sort-merge join without a hint, automatic conversion to -- bucketized mapjoin is not done -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select count(*) FROM bucket_small_n11 a JOIN bucket_big_n11 b ON a.key = b.key; +select count(*) FROM bucket_small_n11 a JOIN bucket_big_n11 b ON a.key = b.key; set hive.cbo.enable=false; -- The join is converted to a bucketed mapjoin with a mapjoin hint -explain extended select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ mapjoin(a) */ count(*) FROM bucket_small_n11 a JOIN bucket_big_n11 b ON a.key = b.key; +select /*+ mapjoin(a) */ count(*) FROM bucket_small_n11 a JOIN bucket_big_n11 b ON a.key = b.key; -- HIVE-7023 -explain extended select /*+ MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key; -select /*+ MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key; +explain extended select /*+ MAPJOIN(a,b) */ count(*) FROM bucket_small_n11 a JOIN bucket_big_n11 b ON a.key = b.key JOIN bucket_big_n11 c ON a.key = c.key; +select /*+ MAPJOIN(a,b) */ count(*) FROM bucket_small_n11 a JOIN bucket_big_n11 b ON a.key = b.key JOIN bucket_big_n11 c ON a.key = c.key; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_12.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_12.q index 86ba6cfe29..d68efea393 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_12.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_12.q @@ -4,21 +4,21 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -- small 1 part, 2 bucket & big 2 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +CREATE TABLE bucket_small_n15 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small_n15 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small_n15 partition(ds='2008-04-08'); -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n15 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n15 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n15 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n15 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n15 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n15 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n15 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n15 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n15 partition(ds='2008-04-09'); set hive.auto.convert.join=true; set hive.auto.convert.sortmerge.join=true; @@ -33,5 +33,5 @@ load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INT load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_medium partition(ds='2008-04-08'); load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_medium partition(ds='2008-04-08'); -explain extended select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key; -select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key; +explain extended select count(*) FROM bucket_small_n15 a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big_n15 c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key; +select count(*) FROM bucket_small_n15 a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big_n15 c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q index 44d3452d77..bbd7afab07 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q @@ -5,14 +5,14 @@ set hive.exec.reducers.max = 1; -- SORT_QUERY_RESULTS -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl1_n2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl2_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -insert overwrite table tbl1 select * from src where key < 10; -insert overwrite table tbl2 select * from src where key < 10; +insert overwrite table tbl1_n2 select * from src where key < 10; +insert overwrite table tbl2_n1 select * from src where key < 10; -CREATE TABLE dest1(k1 int, k2 int); -CREATE TABLE dest2(k1 string, k2 string); +CREATE TABLE dest1_n20(k1 int, k2 int); +CREATE TABLE dest2_n4(k1 string, k2 string); set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; @@ -27,20 +27,20 @@ set hive.auto.convert.join.noconditionaltask.size=200; explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 - FROM tbl1 a JOIN tbl2 b + FROM tbl1_n2 a JOIN tbl2_n1 b ON a.key = b.key ) subq -INSERT OVERWRITE TABLE dest1 select key1, key2 -INSERT OVERWRITE TABLE dest2 select value1, value2; +INSERT OVERWRITE TABLE dest1_n20 select key1, key2 +INSERT OVERWRITE TABLE dest2_n4 select value1, value2; from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 - FROM tbl1 a JOIN tbl2 b + FROM tbl1_n2 a JOIN tbl2_n1 b ON a.key = b.key ) subq -INSERT OVERWRITE TABLE dest1 select key1, key2 -INSERT OVERWRITE TABLE dest2 select value1, value2; +INSERT OVERWRITE TABLE dest1_n20 select key1, key2 +INSERT OVERWRITE TABLE dest2_n4 select value1, value2; -select * from dest1; -select * from dest2; +select * from dest1_n20; +select * from dest2_n4; set hive.auto.convert.join.noconditionaltask=true; set hive.mapjoin.hybridgrace.minwbsize=100; @@ -50,37 +50,37 @@ set hive.mapjoin.hybridgrace.minnumpartitions=2; explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 - FROM tbl1 a JOIN tbl2 b + FROM tbl1_n2 a JOIN tbl2_n1 b ON a.key = b.key ) subq -INSERT OVERWRITE TABLE dest1 select key1, key2 -INSERT OVERWRITE TABLE dest2 select value1, value2; +INSERT OVERWRITE TABLE dest1_n20 select key1, key2 +INSERT OVERWRITE TABLE dest2_n4 select value1, value2; from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 - FROM tbl1 a JOIN tbl2 b + FROM tbl1_n2 a JOIN tbl2_n1 b ON a.key = b.key ) subq -INSERT OVERWRITE TABLE dest1 select key1, key2 -INSERT OVERWRITE TABLE dest2 select value1, value2; +INSERT OVERWRITE TABLE dest1_n20 select key1, key2 +INSERT OVERWRITE TABLE dest2_n4 select value1, value2; -select * from dest1; -select * from dest2; +select * from dest1_n20; +select * from dest2_n4; set hive.auto.convert.sortmerge.join.to.mapjoin=true; -- A SMB join followed by a mutli-insert explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 - FROM tbl1 a JOIN tbl2 b + FROM tbl1_n2 a JOIN tbl2_n1 b ON a.key = b.key ) subq -INSERT OVERWRITE TABLE dest1 select key1, key2 -INSERT OVERWRITE TABLE dest2 select value1, value2; +INSERT OVERWRITE TABLE dest1_n20 select key1, key2 +INSERT OVERWRITE TABLE dest2_n4 select value1, value2; from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 - FROM tbl1 a JOIN tbl2 b + FROM tbl1_n2 a JOIN tbl2_n1 b ON a.key = b.key ) subq -INSERT OVERWRITE TABLE dest1 select key1, key2 -INSERT OVERWRITE TABLE dest2 select value1, value2; +INSERT OVERWRITE TABLE dest1_n20 select key1, key2 +INSERT OVERWRITE TABLE dest2_n4 select value1, value2; -select * from dest1; -select * from dest2; +select * from dest1_n20; +select * from dest2_n4; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q index b7c361ba82..1e21c92e59 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q @@ -5,11 +5,11 @@ set hive.explain.user=false; set hive.exec.reducers.max = 1; -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl1_n7(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl2_n6(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -insert overwrite table tbl1 select * from src where key < 20; -insert overwrite table tbl2 select * from src where key < 10; +insert overwrite table tbl1_n7 select * from src where key < 20; +insert overwrite table tbl2_n6 select * from src where key < 10; set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; @@ -21,14 +21,14 @@ set hive.auto.convert.join=true; -- disable hash joins set hive.auto.convert.join.noconditionaltask.size=10; --- Since tbl1 is the bigger table, tbl1 Left Outer Join tbl2 can be performed +-- Since tbl1_n7 is the bigger table, tbl1_n7 Left Outer Join tbl2_n6 can be performed explain -select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key; -select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key; +select count(*) FROM tbl1_n7 a LEFT OUTER JOIN tbl2_n6 b ON a.key = b.key; +select count(*) FROM tbl1_n7 a LEFT OUTER JOIN tbl2_n6 b ON a.key = b.key; -insert overwrite table tbl2 select * from src where key < 200; +insert overwrite table tbl2_n6 select * from src where key < 200; --- Since tbl2 is the bigger table, tbl1 Right Outer Join tbl2 can be performed +-- Since tbl2_n6 is the bigger table, tbl1_n7 Right Outer Join tbl2_n6 can be performed explain -select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key; -select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key; +select count(*) FROM tbl1_n7 a RIGHT OUTER JOIN tbl2_n6 b ON a.key = b.key; +select count(*) FROM tbl1_n7 a RIGHT OUTER JOIN tbl2_n6 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q index 4a2a2eab81..b3dd8e50d7 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q @@ -5,11 +5,11 @@ set hive.explain.user=false; set hive.exec.reducers.max = 1; -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl1_n11(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl2_n10(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -insert overwrite table tbl1 select * from src where key < 20; -insert overwrite table tbl2 select * from src where key < 10; +insert overwrite table tbl1_n11 select * from src where key < 20; +insert overwrite table tbl2_n10 select * from src where key < 10; set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; @@ -22,7 +22,7 @@ set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask.size=10; explain -select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key; +select count(*) FROM tbl1_n11 a LEFT OUTER JOIN tbl2_n10 b ON a.key = b.key; explain -select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key; +select count(*) FROM tbl1_n11 a RIGHT OUTER JOIN tbl2_n10 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q index 6d177aaaf0..0a72ddfc6b 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q @@ -12,14 +12,14 @@ set hive.optimize.bucketmapjoin.sortedmerge = true; -- SORT_QUERY_RESULTS -CREATE TABLE stage_bucket_big +CREATE TABLE stage_bucket_big_n17 ( key BIGINT, value STRING ) PARTITIONED BY (file_tag STRING); -CREATE TABLE bucket_big +CREATE TABLE bucket_big_n17 ( key BIGINT, value STRING @@ -28,14 +28,14 @@ PARTITIONED BY (day STRING, pri bigint) clustered by (key) sorted by (key) into 12 buckets stored as RCFile; -CREATE TABLE stage_bucket_small +CREATE TABLE stage_bucket_small_n17 ( key BIGINT, value string ) PARTITIONED BY (file_tag STRING); -CREATE TABLE bucket_small +CREATE TABLE bucket_small_n17 ( key BIGINT, value string @@ -44,32 +44,32 @@ PARTITIONED BY (pri bigint) clustered by (key) sorted by (key) into 12 buckets stored as RCFile; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' overwrite into table stage_bucket_small partition (file_tag='1'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' overwrite into table stage_bucket_small partition (file_tag='2'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' overwrite into table stage_bucket_small_n17 partition (file_tag='1'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' overwrite into table stage_bucket_small_n17 partition (file_tag='2'); -insert overwrite table bucket_small partition(pri) +insert overwrite table bucket_small_n17 partition(pri) select key, value, file_tag as pri from -stage_bucket_small +stage_bucket_small_n17 where file_tag between 1 and 2; -load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' overwrite into table stage_bucket_big partition (file_tag='1'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' overwrite into table stage_bucket_big_n17 partition (file_tag='1'); -insert overwrite table bucket_big partition(day,pri) +insert overwrite table bucket_big_n17 partition(day,pri) select key, value, 'day1' as day, 1 as pri - from stage_bucket_big + from stage_bucket_big_n17 where file_tag='1'; explain select a.key , a.value , b.value , 'day1' as day, 1 as pri from ( select key, value - from bucket_big where day='day1' ) a + from bucket_big_n17 where day='day1' ) a left outer join ( select key, value - from bucket_small + from bucket_small_n17 where pri between 1 and 2 ) b on (a.key = b.key) @@ -78,10 +78,10 @@ explain select a.key , a.value , b.value , 'day1' as day, 1 as pri select a.key , a.value , b.value , 'day1' as day, 1 as pri from ( select key, value - from bucket_big where day='day1' ) a + from bucket_big_n17 where day='day1' ) a left outer join ( select key, value - from bucket_small + from bucket_small_n17 where pri between 1 and 2 ) b on (a.key = b.key) @@ -94,10 +94,10 @@ set hive.auto.convert.sortmerge.join=true; explain select a.key , a.value , b.value , 'day1' as day, 1 as pri from ( select key, value - from bucket_big where day='day1' ) a + from bucket_big_n17 where day='day1' ) a left outer join ( select key, value - from bucket_small + from bucket_small_n17 where pri between 1 and 2 ) b on (a.key = b.key) @@ -106,21 +106,21 @@ on select a.key , a.value , b.value , 'day1' as day, 1 as pri from ( select key, value - from bucket_big where day='day1' ) a + from bucket_big_n17 where day='day1' ) a left outer join ( select key, value - from bucket_small + from bucket_small_n17 where pri between 1 and 2 ) b on (a.key = b.key) ; -drop table bucket_big; -drop table bucket_small; +drop table bucket_big_n17; +drop table bucket_small_n17; -- Test to make sure SMB is not kicked in when small table has more buckets than big table -CREATE TABLE bucket_big +CREATE TABLE bucket_big_n17 ( key BIGINT, value STRING @@ -129,7 +129,7 @@ PARTITIONED BY (day STRING, pri bigint) clustered by (key) sorted by (key) into 12 buckets stored as RCFile; -CREATE TABLE bucket_small +CREATE TABLE bucket_small_n17 ( key BIGINT, value string @@ -138,28 +138,28 @@ PARTITIONED BY (pri bigint) clustered by (key) sorted by (key) into 24 buckets stored as RCFile; -insert overwrite table bucket_small partition(pri) +insert overwrite table bucket_small_n17 partition(pri) select key, value, file_tag as pri from -stage_bucket_small +stage_bucket_small_n17 where file_tag between 1 and 2; -insert overwrite table bucket_big partition(day,pri) +insert overwrite table bucket_big_n17 partition(day,pri) select key, value, 'day1' as day, 1 as pri -from stage_bucket_big +from stage_bucket_big_n17 where file_tag='1'; explain select a.key , a.value , b.value , 'day1' as day, 1 as pri from ( select key, value - from bucket_big where day='day1' ) a + from bucket_big_n17 where day='day1' ) a left outer join ( select key, value - from bucket_small + from bucket_small_n17 where pri between 1 and 2 ) b on (a.key = b.key) @@ -168,10 +168,10 @@ explain select a.key , a.value , b.value , 'day1' as day, 1 as pri select a.key , a.value , b.value , 'day1' as day, 1 as pri from ( select key, value - from bucket_big where day='day1' ) a + from bucket_big_n17 where day='day1' ) a left outer join ( select key, value - from bucket_small + from bucket_small_n17 where pri between 1 and 2 ) b on (a.key = b.key) diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_2.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_2.q index 0413b51ef4..c6d5318f43 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_2.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_2.q @@ -3,18 +3,18 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -- small 1 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08'); +CREATE TABLE bucket_small_n3 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small_n3 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small_n3 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small_n3 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small_n3 partition(ds='2008-04-08'); -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n3 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n3 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n3 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n3 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n3 partition(ds='2008-04-09'); set hive.auto.convert.join=true; set hive.auto.convert.sortmerge.join=true; @@ -26,13 +26,13 @@ set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hado set hive.auto.convert.join.noconditionaltask.size=10; -- Since the leftmost table is assumed as the big table, arrange the tables in the join accordingly -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; -select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; +explain extended select count(*) FROM bucket_big_n3 a JOIN bucket_small_n3 b ON a.key = b.key; +select count(*) FROM bucket_big_n3 a JOIN bucket_small_n3 b ON a.key = b.key; set hive.auto.convert.sortmerge.join.to.mapjoin=true; set hive.mapjoin.localtask.max.memory.usage = 0.0001; set hive.mapjoin.check.memory.rows = 2; -- The mapjoin should fail resulting in the sort-merge join -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; -select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; +explain extended select count(*) FROM bucket_big_n3 a JOIN bucket_small_n3 b ON a.key = b.key; +select count(*) FROM bucket_big_n3 a JOIN bucket_small_n3 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_3.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_3.q index 7062b82f7b..f41b0970c9 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_3.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_3.q @@ -3,18 +3,18 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -- small 2 part, 2 bucket & big 1 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08'); +CREATE TABLE bucket_small_n9 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small_n9 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small_n9 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small_n9 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small_n9 partition(ds='2008-04-09'); -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n9 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n9 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n9 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n9 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n9 partition(ds='2008-04-08'); set hive.auto.convert.join=true; set hive.auto.convert.sortmerge.join=true; @@ -26,12 +26,12 @@ set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hado set hive.auto.convert.join.noconditionaltask.size=100; -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select count(*) FROM bucket_small_n9 a JOIN bucket_big_n9 b ON a.key = b.key; +select count(*) FROM bucket_small_n9 a JOIN bucket_big_n9 b ON a.key = b.key; -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; -select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; +explain extended select count(*) FROM bucket_big_n9 a JOIN bucket_small_n9 b ON a.key = b.key; +select count(*) FROM bucket_big_n9 a JOIN bucket_small_n9 b ON a.key = b.key; set hive.auto.convert.sortmerge.join.to.mapjoin=true; -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; -select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; +explain extended select count(*) FROM bucket_big_n9 a JOIN bucket_small_n9 b ON a.key = b.key; +select count(*) FROM bucket_big_n9 a JOIN bucket_small_n9 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_4.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_4.q index 1a23fca585..e28e0ab4a0 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_4.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_4.q @@ -3,20 +3,20 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -- small 2 part, 4 bucket & big 1 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08'); +CREATE TABLE bucket_small_n12 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small_n12 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small_n12 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small_n12 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small_n12 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small_n12 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small_n12 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small_n12 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small_n12 partition(ds='2008-04-09'); -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n12 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n12 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n12 partition(ds='2008-04-08'); set hive.auto.convert.join=true; set hive.auto.convert.sortmerge.join=true; @@ -28,12 +28,12 @@ set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hado set hive.auto.convert.join.noconditionaltask.size=200; -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select count(*) FROM bucket_small_n12 a JOIN bucket_big_n12 b ON a.key = b.key; +select count(*) FROM bucket_small_n12 a JOIN bucket_big_n12 b ON a.key = b.key; -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; -select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; +explain extended select count(*) FROM bucket_big_n12 a JOIN bucket_small_n12 b ON a.key = b.key; +select count(*) FROM bucket_big_n12 a JOIN bucket_small_n12 b ON a.key = b.key; set hive.auto.convert.sortmerge.join.to.mapjoin=true; -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; -select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; +explain extended select count(*) FROM bucket_big_n12 a JOIN bucket_small_n12 b ON a.key = b.key; +select count(*) FROM bucket_big_n12 a JOIN bucket_small_n12 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_5.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_5.q index 50303cf2e0..9550e21382 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_5.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_5.q @@ -6,15 +6,15 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small; -load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small; -load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small; -load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small; +CREATE TABLE bucket_small_n0 (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small_n0; +load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small_n0; +load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small_n0; +load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small_n0; -CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big; -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big; +CREATE TABLE bucket_big_n0 (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n0; +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n0; set hive.auto.convert.sortmerge.join=true; set hive.optimize.bucketmapjoin = true; @@ -25,13 +25,13 @@ set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hado set hive.auto.convert.join.noconditionaltask.size=1; -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select count(*) FROM bucket_small_n0 a JOIN bucket_big_n0 b ON a.key = b.key; +select count(*) FROM bucket_small_n0 a JOIN bucket_big_n0 b ON a.key = b.key; -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; -select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; +explain extended select count(*) FROM bucket_big_n0 a JOIN bucket_small_n0 b ON a.key = b.key; +select count(*) FROM bucket_big_n0 a JOIN bucket_small_n0 b ON a.key = b.key; set hive.auto.convert.sortmerge.join.to.mapjoin=true; set hive.auto.convert.join=true; -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; -select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; +explain extended select count(*) FROM bucket_big_n0 a JOIN bucket_small_n0 b ON a.key = b.key; +select count(*) FROM bucket_big_n0 a JOIN bucket_small_n0 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q index 32745c90e3..551e5f7e47 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q @@ -5,14 +5,14 @@ set hive.mapred.mode=nonstrict; set hive.exec.reducers.max = 1; set hive.explain.user=false; -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE tbl3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl1_n4(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl2_n3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl3_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; CREATE TABLE tbl4(key int, value string) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; -insert overwrite table tbl1 select * from src; -insert overwrite table tbl2 select * from src; -insert overwrite table tbl3 select * from src; +insert overwrite table tbl1_n4 select * from src; +insert overwrite table tbl2_n3 select * from src; +insert overwrite table tbl3_n0 select * from src; insert overwrite table tbl4 select * from src; set hive.auto.convert.sortmerge.join=true; @@ -30,47 +30,47 @@ set hive.auto.convert.sortmerge.join.to.mapjoin=false; -- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10] -- c = TS[1]-RS[7]-JOIN[8] -- a = TS[2]-MAPJOIN[11] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value; -select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value; +explain select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join src c on c.value = a.value; +select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join src c on c.value = a.value; -- d = TS[0]-RS[7]-JOIN[8]-SEL[9]-FS[10] -- b = TS[1]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8] -- a = TS[2]-MAPJOIN[11] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src d on d.value = a.value; -select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src d on d.value = a.value; +explain select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join src d on d.value = a.value; +select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join src d on d.value = a.value; -- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10] -- a = TS[1]-MAPJOIN[11] -- h = TS[2]-RS[7]-JOIN[8] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src h on h.value = a.value; -select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src h on h.value = a.value; +explain select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join src h on h.value = a.value; +select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join src h on h.value = a.value; -- A SMB join is being followed by a regular join on a non-bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key; -select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key; +explain select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join src c on c.key = a.key; +select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join src c on c.key = a.key; -- A SMB join is being followed by a regular join on a bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key; -select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key; +explain select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join tbl3_n0 c on c.key = a.key; +select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join tbl3_n0 c on c.key = a.key; -- A SMB join is being followed by a regular join on a bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value; -select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value; +explain select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join tbl4 c on c.value = a.value; +select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join tbl4 c on c.value = a.value; set hive.auto.convert.sortmerge.join.to.mapjoin=true; -- A SMB join is being followed by a regular join on a non-bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value; -select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value; +explain select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join src c on c.value = a.value; +select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join src c on c.value = a.value; -- A SMB join is being followed by a regular join on a non-bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key; -select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key; +explain select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join src c on c.key = a.key; +select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join src c on c.key = a.key; -- A SMB join is being followed by a regular join on a bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key; -select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key; +explain select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join tbl3_n0 c on c.key = a.key; +select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join tbl3_n0 c on c.key = a.key; -- A SMB join is being followed by a regular join on a bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value; -select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value; +explain select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join tbl4 c on c.value = a.value; +select count(*) FROM tbl1_n4 a JOIN tbl2_n3 b ON a.key = b.key join tbl4 c on c.value = a.value; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_7.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_7.q index 96f425086f..8c9dbacbba 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_7.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_7.q @@ -3,23 +3,23 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -- small 2 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08'); +CREATE TABLE bucket_small_n6 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small_n6 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small_n6 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small_n6 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small_n6 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small_n6 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small_n6 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000002_0' INTO TABLE bucket_small_n6 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000003_0' INTO TABLE bucket_small_n6 partition(ds='2008-04-09'); -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n6 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n6 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n6 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n6 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n6 partition(ds='2008-04-09'); set hive.auto.convert.join=true; set hive.auto.convert.sortmerge.join=true; @@ -31,12 +31,12 @@ set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hado set hive.auto.convert.join.noconditionaltask.size=10; -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select count(*) FROM bucket_small_n6 a JOIN bucket_big_n6 b ON a.key = b.key; +select count(*) FROM bucket_small_n6 a JOIN bucket_big_n6 b ON a.key = b.key; -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; -select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; +explain extended select count(*) FROM bucket_big_n6 a JOIN bucket_small_n6 b ON a.key = b.key; +select count(*) FROM bucket_big_n6 a JOIN bucket_small_n6 b ON a.key = b.key; set hive.auto.convert.sortmerge.join.to.mapjoin=true; -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; -select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; +explain extended select count(*) FROM bucket_big_n6 a JOIN bucket_small_n6 b ON a.key = b.key; +select count(*) FROM bucket_big_n6 a JOIN bucket_small_n6 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_8.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_8.q index 6783d354d9..51647e86b2 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_8.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_8.q @@ -6,23 +6,23 @@ set hive.exec.submitviachild=false; set hive.exec.submit.local.task.via.child=false; -- small 2 part, 2 bucket & big 2 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08'); +CREATE TABLE bucket_small_n5 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small_n5 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small_n5 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000000_0' INTO TABLE bucket_small_n5 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/small/000001_0' INTO TABLE bucket_small_n5 partition(ds='2008-04-09'); -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n5 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n5 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n5 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n5 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n5 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n5 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n5 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n5 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n5 partition(ds='2008-04-09'); set hive.auto.convert.join=true; set hive.auto.convert.sortmerge.join=true; @@ -34,16 +34,16 @@ set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hado set hive.auto.convert.join.noconditionaltask.size=10; -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select count(*) FROM bucket_small_n5 a JOIN bucket_big_n5 b ON a.key = b.key; +select count(*) FROM bucket_small_n5 a JOIN bucket_big_n5 b ON a.key = b.key; -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; -select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; +explain extended select count(*) FROM bucket_big_n5 a JOIN bucket_small_n5 b ON a.key = b.key; +select count(*) FROM bucket_big_n5 a JOIN bucket_small_n5 b ON a.key = b.key; set hive.auto.convert.sortmerge.join.to.mapjoin=true; set hive.mapjoin.localtask.max.memory.usage = 0.0001; set hive.mapjoin.check.memory.rows = 2; -- The mapjoin should fail resulting in the sort-merge join -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; -select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key; +explain extended select count(*) FROM bucket_big_n5 a JOIN bucket_small_n5 b ON a.key = b.key; +select count(*) FROM bucket_big_n5 a JOIN bucket_small_n5 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q index b342dea1d5..5696682fd5 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q @@ -9,13 +9,13 @@ set hive.exec.reducers.max = 1; -- SORT_QUERY_RESULTS -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl1_n10(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl2_n9(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -insert overwrite table tbl1 +insert overwrite table tbl1_n10 select * from src where key < 10; -insert overwrite table tbl2 +insert overwrite table tbl2_n9 select * from src where key < 10; set hive.auto.convert.join=true; @@ -29,24 +29,24 @@ set hive.auto.convert.join.noconditionaltask.size=1; -- The join is being performed as part of sub-query. It should be converted to a sort-merge join explain select count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1; select count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1; -- The join is being performed as part of sub-query. It should be converted to a sort-merge join explain select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1 group by key; select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1 group by key; @@ -56,7 +56,7 @@ select count(*) from ( select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1 group by key ) subq2; @@ -65,7 +65,7 @@ select count(*) from ( select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1 group by key ) subq2; @@ -77,14 +77,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key; @@ -93,14 +93,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key; @@ -109,15 +109,15 @@ on src1.key = src2.key; -- be converted to a sort-merge join. explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n10 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq2 on subq1.key = subq2.key; select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n10 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq2 on subq1.key = subq2.key; -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should @@ -127,22 +127,22 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq1 where key < 6 ) subq2 - join tbl2 b + join tbl2_n9 b on subq2.key = b.key; select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq1 where key < 6 ) subq2 - join tbl2 b + join tbl2_n9 b on subq2.key = b.key; -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. @@ -152,7 +152,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq1 where key < 6 ) subq2 @@ -160,7 +160,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq3 where key < 6 ) subq4 @@ -170,7 +170,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq1 where key < 6 ) subq2 @@ -178,7 +178,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq3 where key < 6 ) subq4 @@ -189,75 +189,75 @@ select count(*) from -- item, but that is not part of the join key. explain select count(*) from - (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + (select a.key as key, concat(a.value, a.value) as value from tbl1_n10 a where key < 8) subq1 join - (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + (select a.key as key, concat(a.value, a.value) as value from tbl2_n9 a where key < 8) subq2 on subq1.key = subq2.key; select count(*) from - (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + (select a.key as key, concat(a.value, a.value) as value from tbl1_n10 a where key < 8) subq1 join - (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + (select a.key as key, concat(a.value, a.value) as value from tbl2_n9 a where key < 8) subq2 on subq1.key = subq2.key; -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized mapside -- join should be performed explain select count(*) from - (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n10 a) subq1 join - (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n9 a) subq2 on subq1.key = subq2.key; select count(*) from - (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n10 a) subq1 join - (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n9 a) subq2 on subq1.key = subq2.key; -- The left table is a sub-query and the right table is not. -- It should be converted to a sort-merge join. explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key; + (select a.key as key, a.value as value from tbl1_n10 a where key < 6) subq1 + join tbl2_n9 a on subq1.key = a.key; select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key; + (select a.key as key, a.value as value from tbl1_n10 a where key < 6) subq1 + join tbl2_n9 a on subq1.key = a.key; -- The right table is a sub-query and the left table is not. -- It should be converted to a sort-merge join. explain -select count(*) from tbl1 a +select count(*) from tbl1_n10 a join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq1 on a.key = subq1.key; -select count(*) from tbl1 a +select count(*) from tbl1_n10 a join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq1 on a.key = subq1.key; -- There are more than 2 inputs to the join, all of them being sub-queries. -- It should be converted to to a sort-merge join explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n10 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq2 on (subq1.key = subq2.key) join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq3 on (subq1.key = subq3.key); select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n10 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq2 on subq1.key = subq2.key join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq3 on (subq1.key = subq3.key); -- The join is being performed on a nested sub-query, and an aggregation is performed after that. @@ -268,11 +268,11 @@ select count(*) from ( ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq1 where key < 6 ) subq2 -join tbl2 b +join tbl2_n9 b on subq2.key = b.key) a; select count(*) from ( @@ -280,11 +280,11 @@ select count(*) from ( ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq1 where key < 6 ) subq2 -join tbl2 b +join tbl2_n9 b on subq2.key = b.key) a; set hive.auto.convert.sortmerge.join.to.mapjoin=true; @@ -292,24 +292,24 @@ set hive.auto.convert.sortmerge.join.to.mapjoin=true; -- The join is being performed as part of sub-query. It should be converted to a sort-merge join explain select count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1; select count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1; -- The join is being performed as part of sub-query. It should be converted to a sort-merge join explain select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1 group by key; select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1 group by key; @@ -319,7 +319,7 @@ select count(*) from ( select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1 group by key ) subq2; @@ -328,7 +328,7 @@ select count(*) from ( select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1 group by key ) subq2; @@ -340,14 +340,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key; @@ -356,14 +356,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n10 a join tbl2_n9 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key; @@ -372,15 +372,15 @@ on src1.key = src2.key; -- be converted to a sort-merge join. explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n10 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq2 on subq1.key = subq2.key; select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n10 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq2 on subq1.key = subq2.key; -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should @@ -390,22 +390,22 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq1 where key < 6 ) subq2 - join tbl2 b + join tbl2_n9 b on subq2.key = b.key; select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq1 where key < 6 ) subq2 - join tbl2 b + join tbl2_n9 b on subq2.key = b.key; -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. @@ -415,7 +415,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq1 where key < 6 ) subq2 @@ -423,7 +423,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq3 where key < 6 ) subq4 @@ -433,7 +433,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq1 where key < 6 ) subq2 @@ -441,7 +441,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq3 where key < 6 ) subq4 @@ -452,60 +452,60 @@ select count(*) from -- item, but that is not part of the join key. explain select count(*) from - (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + (select a.key as key, concat(a.value, a.value) as value from tbl1_n10 a where key < 8) subq1 join - (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + (select a.key as key, concat(a.value, a.value) as value from tbl2_n9 a where key < 8) subq2 on subq1.key = subq2.key; select count(*) from - (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + (select a.key as key, concat(a.value, a.value) as value from tbl1_n10 a where key < 8) subq1 join - (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + (select a.key as key, concat(a.value, a.value) as value from tbl2_n9 a where key < 8) subq2 on subq1.key = subq2.key; -- The left table is a sub-query and the right table is not. -- It should be converted to a sort-merge join. explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key; + (select a.key as key, a.value as value from tbl1_n10 a where key < 6) subq1 + join tbl2_n9 a on subq1.key = a.key; select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key; + (select a.key as key, a.value as value from tbl1_n10 a where key < 6) subq1 + join tbl2_n9 a on subq1.key = a.key; -- The right table is a sub-query and the left table is not. -- It should be converted to a sort-merge join. explain -select count(*) from tbl1 a +select count(*) from tbl1_n10 a join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq1 on a.key = subq1.key; -select count(*) from tbl1 a +select count(*) from tbl1_n10 a join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq1 on a.key = subq1.key; -- There are more than 2 inputs to the join, all of them being sub-queries. -- It should be converted to to a sort-merge join explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n10 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq2 on (subq1.key = subq2.key) join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq3 on (subq1.key = subq3.key); select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n10 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq2 on subq1.key = subq2.key join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + (select a.key as key, a.value as value from tbl2_n9 a where key < 6) subq3 on (subq1.key = subq3.key); -- The join is being performed on a nested sub-query, and an aggregation is performed after that. @@ -516,11 +516,11 @@ select count(*) from ( ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq1 where key < 6 ) subq2 -join tbl2 b +join tbl2_n9 b on subq2.key = b.key) a; select count(*) from ( @@ -528,9 +528,9 @@ select count(*) from ( ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n10 a where key < 8 ) subq1 where key < 6 ) subq2 -join tbl2 b +join tbl2_n9 b on subq2.key = b.key) a; diff --git a/ql/src/test/queries/clientpositive/avro_add_column.q b/ql/src/test/queries/clientpositive/avro_add_column.q index 17dc2ff6b2..fde9ab8053 100644 --- a/ql/src/test/queries/clientpositive/avro_add_column.q +++ b/ql/src/test/queries/clientpositive/avro_add_column.q @@ -1,17 +1,17 @@ -- SORT_QUERY_RESULTS -- verify that we can actually read avro files -CREATE TABLE doctors ( +CREATE TABLE doctors_n0 ( number int, first_name string) STORED AS AVRO; -DESCRIBE doctors; +DESCRIBE doctors_n0; -ALTER TABLE doctors ADD COLUMNS (last_name string); +ALTER TABLE doctors_n0 ADD COLUMNS (last_name string); -DESCRIBE doctors; +DESCRIBE doctors_n0; -LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors; +LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors_n0; -SELECT * FROM doctors; \ No newline at end of file +SELECT * FROM doctors_n0; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/avro_add_column3.q b/ql/src/test/queries/clientpositive/avro_add_column3.q index 939cb4fe4b..692a86d26a 100644 --- a/ql/src/test/queries/clientpositive/avro_add_column3.q +++ b/ql/src/test/queries/clientpositive/avro_add_column3.q @@ -3,24 +3,24 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -- verify that we can actually read avro files -CREATE TABLE doctors ( +CREATE TABLE doctors_n3 ( number int, first_name string, last_name string) STORED AS AVRO; -LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors; +LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors_n3; -CREATE TABLE doctors_copy ( +CREATE TABLE doctors_copy_n0 ( number int, first_name string) PARTITIONED BY (part int) STORED AS AVRO; -INSERT INTO TABLE doctors_copy PARTITION(part=1) SELECT number, first_name FROM doctors; +INSERT INTO TABLE doctors_copy_n0 PARTITION(part=1) SELECT number, first_name FROM doctors_n3; -ALTER TABLE doctors_copy ADD COLUMNS (last_name string); +ALTER TABLE doctors_copy_n0 ADD COLUMNS (last_name string); -DESCRIBE doctors_copy; +DESCRIBE doctors_copy_n0; -SELECT * FROM doctors_copy; \ No newline at end of file +SELECT * FROM doctors_copy_n0; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/avro_alter_table_update_columns.q b/ql/src/test/queries/clientpositive/avro_alter_table_update_columns.q index 4b03ffff8c..279d05d2e3 100644 --- a/ql/src/test/queries/clientpositive/avro_alter_table_update_columns.q +++ b/ql/src/test/queries/clientpositive/avro_alter_table_update_columns.q @@ -1,6 +1,6 @@ -- verify schema changes introduced in avro.schema.literal/url sync with HMS if ALTER TABLE UPDATE COLUMNS is called -CREATE TABLE avro_extschema_literal +CREATE TABLE avro_extschema_literal_n1 STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", @@ -11,9 +11,9 @@ CREATE TABLE avro_extschema_literal { "name":"first_name", "type":"string" }, { "name":"last_name", "type":"string" } ] }'); -DESCRIBE avro_extschema_literal; +DESCRIBE avro_extschema_literal_n1; -ALTER TABLE avro_extschema_literal SET +ALTER TABLE avro_extschema_literal_n1 SET TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", "name": "ext_schema", @@ -21,12 +21,12 @@ ALTER TABLE avro_extschema_literal SET "fields": [ { "name":"newCol", "type":"int" } ] }'); -DESCRIBE avro_extschema_literal; +DESCRIBE avro_extschema_literal_n1; -ALTER TABLE avro_extschema_literal UNSET TBLPROPERTIES ('avro.schema.literal'); -DESCRIBE avro_extschema_literal; +ALTER TABLE avro_extschema_literal_n1 UNSET TBLPROPERTIES ('avro.schema.literal'); +DESCRIBE avro_extschema_literal_n1; -ALTER TABLE avro_extschema_literal SET +ALTER TABLE avro_extschema_literal_n1 SET TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", "name": "ext_schema", @@ -34,36 +34,36 @@ ALTER TABLE avro_extschema_literal SET "fields": [ { "name":"newCol", "type":"int" } ] }'); -ALTER TABLE avro_extschema_literal UPDATE COLUMNS CASCADE; -DESCRIBE avro_extschema_literal; +ALTER TABLE avro_extschema_literal_n1 UPDATE COLUMNS CASCADE; +DESCRIBE avro_extschema_literal_n1; -ALTER TABLE avro_extschema_literal UNSET TBLPROPERTIES ('avro.schema.literal'); -DESCRIBE avro_extschema_literal; +ALTER TABLE avro_extschema_literal_n1 UNSET TBLPROPERTIES ('avro.schema.literal'); +DESCRIBE avro_extschema_literal_n1; dfs -cp ${system:hive.root}data/files/grad.avsc ${system:test.tmp.dir}/; dfs -cp ${system:hive.root}data/files/grad2.avsc ${system:test.tmp.dir}/; -CREATE TABLE avro_extschema_url +CREATE TABLE avro_extschema_url_n1 STORED AS AVRO TBLPROPERTIES ('avro.schema.url'='${system:test.tmp.dir}/grad.avsc'); -DESCRIBE avro_extschema_url; +DESCRIBE avro_extschema_url_n1; -ALTER TABLE avro_extschema_url SET +ALTER TABLE avro_extschema_url_n1 SET TBLPROPERTIES ('avro.schema.url'='${system:test.tmp.dir}/grad2.avsc'); -DESCRIBE avro_extschema_url; +DESCRIBE avro_extschema_url_n1; -ALTER TABLE avro_extschema_url UNSET TBLPROPERTIES ('avro.schema.url'); -DESCRIBE avro_extschema_url; +ALTER TABLE avro_extschema_url_n1 UNSET TBLPROPERTIES ('avro.schema.url'); +DESCRIBE avro_extschema_url_n1; -ALTER TABLE avro_extschema_url SET +ALTER TABLE avro_extschema_url_n1 SET TBLPROPERTIES ('avro.schema.url'='${system:test.tmp.dir}/grad2.avsc'); -ALTER TABLE avro_extschema_url UPDATE COLUMNS CASCADE; -DESCRIBE avro_extschema_url; +ALTER TABLE avro_extschema_url_n1 UPDATE COLUMNS CASCADE; +DESCRIBE avro_extschema_url_n1; -ALTER TABLE avro_extschema_url UNSET TBLPROPERTIES ('avro.schema.url'); -DESCRIBE avro_extschema_url; +ALTER TABLE avro_extschema_url_n1 UNSET TBLPROPERTIES ('avro.schema.url'); +DESCRIBE avro_extschema_url_n1; diff --git a/ql/src/test/queries/clientpositive/avro_compression_enabled.q b/ql/src/test/queries/clientpositive/avro_compression_enabled.q index e0fa2b6356..cd8095a0f0 100644 --- a/ql/src/test/queries/clientpositive/avro_compression_enabled.q +++ b/ql/src/test/queries/clientpositive/avro_compression_enabled.q @@ -1,7 +1,7 @@ --! qt:dataset:src -- verify that new joins bring in correct schemas (including evolved schemas) -CREATE TABLE doctors4 +CREATE TABLE doctors4_n0 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -36,7 +36,7 @@ TBLPROPERTIES ('avro.schema.literal'='{ ] }'); -LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4; +LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4_n0; set hive.exec.compress.output=true; diff --git a/ql/src/test/queries/clientpositive/avro_compression_enabled_native.q b/ql/src/test/queries/clientpositive/avro_compression_enabled_native.q index b168880ff1..8c9e47d12c 100644 --- a/ql/src/test/queries/clientpositive/avro_compression_enabled_native.q +++ b/ql/src/test/queries/clientpositive/avro_compression_enabled_native.q @@ -1,14 +1,14 @@ --! qt:dataset:src -- verify that new joins bring in correct schemas (including evolved schemas) -CREATE TABLE doctors4 ( +CREATE TABLE doctors4_n1 ( number int, first_name string, last_name string, extra_field string) STORED AS AVRO; -LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4; +LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4_n1; set hive.exec.compress.output=true; diff --git a/ql/src/test/queries/clientpositive/avro_decimal.q b/ql/src/test/queries/clientpositive/avro_decimal.q index 7dce0a6be5..3b25097c6f 100644 --- a/ql/src/test/queries/clientpositive/avro_decimal.q +++ b/ql/src/test/queries/clientpositive/avro_decimal.q @@ -1,15 +1,15 @@ -DROP TABLE IF EXISTS `dec`; +DROP TABLE IF EXISTS `dec_n0`; -CREATE TABLE `dec`(name string, value decimal(8,4)); +CREATE TABLE `dec_n0`(name string, value decimal(8,4)); -LOAD DATA LOCAL INPATH '../../data/files/dec.txt' into TABLE `dec`; +LOAD DATA LOCAL INPATH '../../data/files/dec.txt' into TABLE `dec_n0`; -ANALYZE TABLE `dec` COMPUTE STATISTICS FOR COLUMNS value; -DESC FORMATTED `dec` value; +ANALYZE TABLE `dec_n0` COMPUTE STATISTICS FOR COLUMNS value; +DESC FORMATTED `dec_n0` value; -DROP TABLE IF EXISTS avro_dec; +DROP TABLE IF EXISTS avro_dec_n0; -CREATE TABLE `avro_dec`( +CREATE TABLE `avro_dec_n0`( `name` string COMMENT 'from deserializer', `value` decimal(5,2) COMMENT 'from deserializer') COMMENT 'just drop the schema right into the HQL' @@ -24,15 +24,15 @@ TBLPROPERTIES ( 'avro.schema.literal'='{\"namespace\":\"com.howdy\",\"name\":\"some_schema\",\"type\":\"record\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"value\",\"type\":{\"type\":\"bytes\",\"logicalType\":\"decimal\",\"precision\":5,\"scale\":2}}]}' ); -DESC avro_dec; +DESC avro_dec_n0; -INSERT OVERWRITE TABLE avro_dec select name, value from `dec`; +INSERT OVERWRITE TABLE avro_dec_n0 select name, value from `dec_n0`; -SELECT * FROM avro_dec; +SELECT * FROM avro_dec_n0; -DROP TABLE IF EXISTS avro_dec1; +DROP TABLE IF EXISTS avro_dec1_n0; -CREATE TABLE `avro_dec1`( +CREATE TABLE `avro_dec1_n0`( `name` string COMMENT 'from deserializer', `value` decimal(4,1) COMMENT 'from deserializer') COMMENT 'just drop the schema right into the HQL' @@ -47,12 +47,12 @@ TBLPROPERTIES ( 'avro.schema.literal'='{\"namespace\":\"com.howdy\",\"name\":\"some_schema\",\"type\":\"record\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"value\",\"type\":{\"type\":\"bytes\",\"logicalType\":\"decimal\",\"precision\":4,\"scale\":1}}]}' ); -DESC avro_dec1; +DESC avro_dec1_n0; -LOAD DATA LOCAL INPATH '../../data/files/dec.avro' into TABLE avro_dec1; +LOAD DATA LOCAL INPATH '../../data/files/dec.avro' into TABLE avro_dec1_n0; -select value from avro_dec1; +select value from avro_dec1_n0; -DROP TABLE `dec`; -DROP TABLE avro_dec; -DROP TABLE avro_dec1; +DROP TABLE `dec_n0`; +DROP TABLE avro_dec_n0; +DROP TABLE avro_dec1_n0; diff --git a/ql/src/test/queries/clientpositive/avro_joins.q b/ql/src/test/queries/clientpositive/avro_joins.q index 9d06881e49..8ada7e284d 100644 --- a/ql/src/test/queries/clientpositive/avro_joins.q +++ b/ql/src/test/queries/clientpositive/avro_joins.q @@ -2,7 +2,7 @@ -- verify that new joins bring in correct schemas (including evolved schemas) -CREATE TABLE doctors4 +CREATE TABLE doctors4_n2 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -37,11 +37,11 @@ TBLPROPERTIES ('avro.schema.literal'='{ ] }'); -DESCRIBE doctors4; +DESCRIBE doctors4_n2; -LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4; +LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4_n2; -CREATE TABLE episodes +CREATE TABLE episodes_n3 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -49,7 +49,7 @@ INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n3", "type": "record", "fields": [ { @@ -70,11 +70,11 @@ TBLPROPERTIES ('avro.schema.literal'='{ ] }'); -DESCRIBE episodes; +DESCRIBE episodes_n3; -LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes; +LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes_n3; SELECT e.title, e.air_date, d.first_name, d.last_name, d.extra_field, e.air_date -FROM doctors4 d JOIN episodes e ON (d.number=e.doctor); +FROM doctors4_n2 d JOIN episodes_n3 e ON (d.number=e.doctor); diff --git a/ql/src/test/queries/clientpositive/avro_joins_native.q b/ql/src/test/queries/clientpositive/avro_joins_native.q index ca95c1601e..4a4be0fd98 100644 --- a/ql/src/test/queries/clientpositive/avro_joins_native.q +++ b/ql/src/test/queries/clientpositive/avro_joins_native.q @@ -12,15 +12,15 @@ DESCRIBE doctors4; LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4; -CREATE TABLE episodes ( +CREATE TABLE episodes_n1 ( title string COMMENT "episode title", air_date string COMMENT "initial date", doctor int COMMENT "main actor playing the Doctor in episode") STORED AS AVRO; -DESCRIBE episodes; +DESCRIBE episodes_n1; -LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes; +LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes_n1; SELECT e.title, e.air_date, d.first_name, d.last_name, e.air_date -FROM doctors4 d JOIN episodes e ON (d.number=e.doctor); \ No newline at end of file +FROM doctors4 d JOIN episodes_n1 e ON (d.number=e.doctor); \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/avro_native.q b/ql/src/test/queries/clientpositive/avro_native.q index 61d1bc603f..9d25ab6b6b 100644 --- a/ql/src/test/queries/clientpositive/avro_native.q +++ b/ql/src/test/queries/clientpositive/avro_native.q @@ -1,14 +1,14 @@ -- SORT_QUERY_RESULTS -- verify that we can actually read avro files -CREATE TABLE doctors ( +CREATE TABLE doctors_n4 ( number int, first_name string, last_name string) STORED AS AVRO; -DESCRIBE doctors; +DESCRIBE doctors_n4; -LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors; +LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors_n4; -SELECT * FROM doctors; \ No newline at end of file +SELECT * FROM doctors_n4; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/avro_partitioned.q b/ql/src/test/queries/clientpositive/avro_partitioned.q index d475dff056..bcc19e8a01 100644 --- a/ql/src/test/queries/clientpositive/avro_partitioned.q +++ b/ql/src/test/queries/clientpositive/avro_partitioned.q @@ -1,7 +1,7 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -- Verify that table scans work with partitioned Avro tables -CREATE TABLE episodes +CREATE TABLE episodes_n2 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -9,7 +9,7 @@ INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n2", "type": "record", "fields": [ { @@ -30,9 +30,9 @@ TBLPROPERTIES ('avro.schema.literal'='{ ] }'); -LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes; +LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes_n2; -CREATE TABLE episodes_partitioned +CREATE TABLE episodes_partitioned_n1 PARTITIONED BY (doctor_pt INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' @@ -41,7 +41,7 @@ INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n2", "type": "record", "fields": [ { @@ -63,19 +63,19 @@ TBLPROPERTIES ('avro.schema.literal'='{ }'); SET hive.exec.dynamic.partition.mode=nonstrict; -INSERT OVERWRITE TABLE episodes_partitioned PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes; +INSERT OVERWRITE TABLE episodes_partitioned_n1 PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes_n2; -SELECT * FROM episodes_partitioned WHERE doctor_pt > 6; +SELECT * FROM episodes_partitioned_n1 WHERE doctor_pt > 6; -- Verify that Fetch works in addition to Map -SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5; +SELECT * FROM episodes_partitioned_n1 ORDER BY air_date LIMIT 5; -- Fetch w/filter to specific partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 6; +SELECT * FROM episodes_partitioned_n1 WHERE doctor_pt = 6; -- Fetch w/non-existent partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5; +SELECT * FROM episodes_partitioned_n1 WHERE doctor_pt = 7 LIMIT 5; -- Alter table add an empty partition -ALTER TABLE episodes_partitioned ADD PARTITION (doctor_pt=7); -SELECT COUNT(*) FROM episodes_partitioned; +ALTER TABLE episodes_partitioned_n1 ADD PARTITION (doctor_pt=7); +SELECT COUNT(*) FROM episodes_partitioned_n1; -- Verify that reading from an Avro partition works -- even if it has an old schema relative to the current table level schema @@ -87,7 +87,7 @@ ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' WITH SERDEPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n2", "type": "record", "fields": [ { @@ -112,14 +112,14 @@ INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'; -- Insert data into a partition -INSERT INTO TABLE episodes_partitioned_serdeproperties PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes; +INSERT INTO TABLE episodes_partitioned_serdeproperties PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes_n2; set hive.metastore.disallow.incompatible.col.type.changes=false; -- Evolve the table schema by adding new array field "cast_and_crew" ALTER TABLE episodes_partitioned_serdeproperties SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' WITH SERDEPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n2", "type": "record", "fields": [ { diff --git a/ql/src/test/queries/clientpositive/avro_sanity_test.q b/ql/src/test/queries/clientpositive/avro_sanity_test.q index 59e914b04d..4a51e67ecc 100644 --- a/ql/src/test/queries/clientpositive/avro_sanity_test.q +++ b/ql/src/test/queries/clientpositive/avro_sanity_test.q @@ -1,7 +1,7 @@ -- SORT_QUERY_RESULTS -- verify that we can actually read avro files -CREATE TABLE doctors +CREATE TABLE doctors_n1 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -9,7 +9,7 @@ INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "doctors", + "name": "doctors_n1", "type": "record", "fields": [ { @@ -30,9 +30,9 @@ TBLPROPERTIES ('avro.schema.literal'='{ ] }'); -DESCRIBE doctors; +DESCRIBE doctors_n1; -LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors; +LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors_n1; -SELECT * FROM doctors; +SELECT * FROM doctors_n1; diff --git a/ql/src/test/queries/clientpositive/avro_schema_evolution_native.q b/ql/src/test/queries/clientpositive/avro_schema_evolution_native.q index efeb167965..7ba35b922f 100644 --- a/ql/src/test/queries/clientpositive/avro_schema_evolution_native.q +++ b/ql/src/test/queries/clientpositive/avro_schema_evolution_native.q @@ -2,15 +2,15 @@ set hive.cli.print.header=true; set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -- Verify that table scans work with partitioned Avro tables -CREATE TABLE episodes ( +CREATE TABLE episodes_n0 ( title string COMMENT "episode title", air_date string COMMENT "initial date", doctor int COMMENT "main actor playing the Doctor in episode") STORED AS AVRO; -LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes; +LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes_n0; -CREATE TABLE episodes_partitioned ( +CREATE TABLE episodes_partitioned_n0 ( title string COMMENT "episode title", air_date string COMMENT "initial date", doctor int COMMENT "main actor playing the Doctor in episode") @@ -18,16 +18,16 @@ PARTITIONED BY (doctor_pt INT) STORED AS AVRO; SET hive.exec.dynamic.partition.mode=nonstrict; -INSERT OVERWRITE TABLE episodes_partitioned PARTITION (doctor_pt) -SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes; -DESCRIBE FORMATTED episodes_partitioned; +INSERT OVERWRITE TABLE episodes_partitioned_n0 PARTITION (doctor_pt) +SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes_n0; +DESCRIBE FORMATTED episodes_partitioned_n0; -ALTER TABLE episodes_partitioned +ALTER TABLE episodes_partitioned_n0 SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' WITH SERDEPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n0", "type": "record", "fields": [ { @@ -53,29 +53,29 @@ SERDEPROPERTIES ('avro.schema.literal'='{ } ] }'); -DESCRIBE FORMATTED episodes_partitioned; +DESCRIBE FORMATTED episodes_partitioned_n0; set hive.fetch.task.conversion=more; EXPLAIN -SELECT * FROM episodes_partitioned WHERE doctor_pt > 6; +SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt > 6; -SELECT * FROM episodes_partitioned WHERE doctor_pt > 6; +SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt > 6; -- Verify that Fetch works in addition to Map -SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5; +SELECT * FROM episodes_partitioned_n0 ORDER BY air_date LIMIT 5; -- Fetch w/filter to specific partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 6; +SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt = 6; -- Fetch w/non-existent partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5; +SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt = 7 LIMIT 5; set hive.fetch.task.conversion=none; EXPLAIN -SELECT * FROM episodes_partitioned WHERE doctor_pt > 6; +SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt > 6; -SELECT * FROM episodes_partitioned WHERE doctor_pt > 6; +SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt > 6; -SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5; -SELECT * FROM episodes_partitioned WHERE doctor_pt = 6; -SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5; \ No newline at end of file +SELECT * FROM episodes_partitioned_n0 ORDER BY air_date LIMIT 5; +SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt = 6; +SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt = 7 LIMIT 5; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/avro_tableproperty_optimize.q b/ql/src/test/queries/clientpositive/avro_tableproperty_optimize.q index e6b75c68a6..0f576075a8 100644 --- a/ql/src/test/queries/clientpositive/avro_tableproperty_optimize.q +++ b/ql/src/test/queries/clientpositive/avro_tableproperty_optimize.q @@ -3,7 +3,7 @@ SET hive.optimize.update.table.properties.from.serde=true; dfs -cp ${system:hive.root}data/files/table1.avsc ${system:test.tmp.dir}/; -CREATE TABLE avro_extschema_literal +CREATE TABLE avro_extschema_literal_n0 STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", @@ -14,18 +14,18 @@ TBLPROPERTIES ('avro.schema.literal'='{ { "name":"col2", "type":"long" }, { "name":"col3", "type":"string" } ] }'); -INSERT INTO TABLE avro_extschema_literal VALUES('s1', 1, 's2'); +INSERT INTO TABLE avro_extschema_literal_n0 VALUES('s1', 1, 's2'); -DESCRIBE EXTENDED avro_extschema_literal; -SELECT * FROM avro_extschema_literal; +DESCRIBE EXTENDED avro_extschema_literal_n0; +SELECT * FROM avro_extschema_literal_n0; -CREATE TABLE avro_extschema_url +CREATE TABLE avro_extschema_url_n0 STORED AS AVRO TBLPROPERTIES ('avro.schema.url'='${system:test.tmp.dir}/table1.avsc'); -INSERT INTO TABLE avro_extschema_url VALUES('s1', 1, 's2'); +INSERT INTO TABLE avro_extschema_url_n0 VALUES('s1', 1, 's2'); -DESCRIBE EXTENDED avro_extschema_url; -SELECT * FROM avro_extschema_url; +DESCRIBE EXTENDED avro_extschema_url_n0; +SELECT * FROM avro_extschema_url_n0; CREATE TABLE avro_extschema_literal1 ROW FORMAT SERDE diff --git a/ql/src/test/queries/clientpositive/avrocountemptytbl.q b/ql/src/test/queries/clientpositive/avrocountemptytbl.q index 98c3a456d7..163272addd 100644 --- a/ql/src/test/queries/clientpositive/avrocountemptytbl.q +++ b/ql/src/test/queries/clientpositive/avrocountemptytbl.q @@ -1,11 +1,11 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -drop table if exists emptyavro; -create table emptyavro (a int) stored as avro; -select count(*) from emptyavro; -insert into emptyavro select count(*) from emptyavro; -select count(*) from emptyavro; -insert into emptyavro select key from src where key = 100 limit 1; -select * from emptyavro; +drop table if exists emptyavro_n0; +create table emptyavro_n0 (a int) stored as avro; +select count(*) from emptyavro_n0; +insert into emptyavro_n0 select count(*) from emptyavro_n0; +select count(*) from emptyavro_n0; +insert into emptyavro_n0 select key from src where key = 100 limit 1; +select * from emptyavro_n0; diff --git a/ql/src/test/queries/clientpositive/avrotblsjoin.q b/ql/src/test/queries/clientpositive/avrotblsjoin.q index 8c1f08478d..dedd13799f 100644 --- a/ql/src/test/queries/clientpositive/avrotblsjoin.q +++ b/ql/src/test/queries/clientpositive/avrotblsjoin.q @@ -1,10 +1,10 @@ -drop table if exists table1; +drop table if exists table1_n1; drop table if exists table1_1; dfs -cp ${system:hive.root}data/files/table1.avsc ${system:test.tmp.dir}/; dfs -cp ${system:hive.root}data/files/table1_1.avsc ${system:test.tmp.dir}/; -create table table1 +create table table1_n1 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS INPUTFORMAT @@ -20,9 +20,9 @@ create table table1_1 OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ('avro.schema.url'='${system:test.tmp.dir}/table1_1.avsc'); -insert into table1 values ("1", "2", "3"); +insert into table1_n1 values ("1", "2", "3"); insert into table1_1 values (1, "2"); set hive.auto.convert.join=false; set hive.strict.checks.type.safety=false; set hive.mapred.mode=nonstrict; -select table1.col1, table1_1.* from table1 join table1_1 on table1.col1=table1_1.col1 where table1_1.col1="1"; +select table1_n1.col1, table1_1.* from table1_n1 join table1_1 on table1_n1.col1=table1_1.col1 where table1_1.col1="1"; diff --git a/ql/src/test/queries/clientpositive/ba_table1.q b/ql/src/test/queries/clientpositive/ba_table1.q index cce7ccb447..158d9875ec 100644 --- a/ql/src/test/queries/clientpositive/ba_table1.q +++ b/ql/src/test/queries/clientpositive/ba_table1.q @@ -1,16 +1,16 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -drop table ba_test; +drop table ba_test_n4; -- This query tests a) binary type works correctly in grammar b) string can be cast into binary c) binary can be stored in a table d) binary data can be loaded back again and queried d) order-by on a binary key -create table ba_test (ba_key binary, ba_val binary) ; +create table ba_test_n4 (ba_key binary, ba_val binary) ; -describe extended ba_test; +describe extended ba_test_n4; -from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary); +from src insert overwrite table ba_test_n4 select cast (src.key as binary), cast (src.value as binary); -select * from ba_test tablesample (10 rows); +select * from ba_test_n4 tablesample (10 rows); -drop table ba_test; +drop table ba_test_n4; diff --git a/ql/src/test/queries/clientpositive/ba_table2.q b/ql/src/test/queries/clientpositive/ba_table2.q index 71689f0a67..570f846111 100644 --- a/ql/src/test/queries/clientpositive/ba_table2.q +++ b/ql/src/test/queries/clientpositive/ba_table2.q @@ -1,19 +1,19 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -drop table ba_test; +drop table ba_test_n3; -- All the test in ba_test1.q + using LazyBinarySerde instead of LazySimpleSerde -create table ba_test (ba_key binary, ba_val binary) ; -alter table ba_test set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe'; +create table ba_test_n3 (ba_key binary, ba_val binary) ; +alter table ba_test_n3 set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe'; -describe extended ba_test; +describe extended ba_test_n3; -from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary); +from src insert overwrite table ba_test_n3 select cast (src.key as binary), cast (src.value as binary); -select * from ba_test tablesample (10 rows); +select * from ba_test_n3 tablesample (10 rows); -drop table ba_test; +drop table ba_test_n3; diff --git a/ql/src/test/queries/clientpositive/ba_table3.q b/ql/src/test/queries/clientpositive/ba_table3.q index 0423327560..6271eb745b 100644 --- a/ql/src/test/queries/clientpositive/ba_table3.q +++ b/ql/src/test/queries/clientpositive/ba_table3.q @@ -1,14 +1,14 @@ --! qt:dataset:src -drop table ba_test; +drop table ba_test_n2; -- All the tests of ba_table1.q + test for a group-by and aggregation on a binary key. -create table ba_test (ba_key binary, ba_val binary) ; +create table ba_test_n2 (ba_key binary, ba_val binary) ; -from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary); +from src insert overwrite table ba_test_n2 select cast (src.key as binary), cast (src.value as binary); -select ba_test.ba_key, count(ba_test.ba_val) from ba_test group by ba_test.ba_key order by ba_key limit 5; +select ba_test_n2.ba_key, count(ba_test_n2.ba_val) from ba_test_n2 group by ba_test_n2.ba_key order by ba_key limit 5; -drop table ba_test; +drop table ba_test_n2; diff --git a/ql/src/test/queries/clientpositive/ba_table_udfs.q b/ql/src/test/queries/clientpositive/ba_table_udfs.q index 676e06a6ef..73c01aa08b 100644 --- a/ql/src/test/queries/clientpositive/ba_table_udfs.q +++ b/ql/src/test/queries/clientpositive/ba_table_udfs.q @@ -3,11 +3,11 @@ USE default; -CREATE TABLE dest1(bytes1 BINARY, +CREATE TABLE dest1_n146(bytes1 BINARY, bytes2 BINARY, string STRING); -FROM src INSERT OVERWRITE TABLE dest1 +FROM src INSERT OVERWRITE TABLE dest1_n146 SELECT CAST(key AS BINARY), CAST(value AS BINARY), @@ -16,7 +16,7 @@ ORDER BY value LIMIT 100; --Add in a null row for good measure -INSERT INTO TABLE dest1 SELECT NULL, NULL, NULL FROM dest1 LIMIT 1; +INSERT INTO TABLE dest1_n146 SELECT NULL, NULL, NULL FROM dest1_n146 LIMIT 1; -- this query tests all the udfs provided to work with binary types @@ -35,4 +35,4 @@ SELECT UNBASE64(BASE64(bytes1)), HEX(ENCODE(string, 'US-ASCII')), DECODE(ENCODE(string, 'US-ASCII'), 'US-ASCII') -FROM dest1; +FROM dest1_n146; diff --git a/ql/src/test/queries/clientpositive/binary_output_format.q b/ql/src/test/queries/clientpositive/binary_output_format.q index 9ead0cc82f..3c067ace60 100644 --- a/ql/src/test/queries/clientpositive/binary_output_format.q +++ b/ql/src/test/queries/clientpositive/binary_output_format.q @@ -1,6 +1,6 @@ --! qt:dataset:src -- Create a table with binary output format -CREATE TABLE dest1(mydata STRING) +CREATE TABLE dest1_n109(mydata STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ( @@ -12,7 +12,7 @@ STORED AS -- Insert into that table using transform EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n109 SELECT TRANSFORM(*) USING 'cat' AS mydata STRING @@ -24,7 +24,7 @@ SELECT TRANSFORM(*) RECORDREADER 'org.apache.hadoop.hive.ql.exec.BinaryRecordReader' FROM src; -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n109 SELECT TRANSFORM(*) USING 'cat' AS mydata STRING @@ -37,4 +37,4 @@ SELECT TRANSFORM(*) FROM src; -- Test the result -SELECT * FROM dest1; +SELECT * FROM dest1_n109; diff --git a/ql/src/test/queries/clientpositive/binary_table_bincolserde.q b/ql/src/test/queries/clientpositive/binary_table_bincolserde.q index 7b99e40b9c..f842a5d2c1 100644 --- a/ql/src/test/queries/clientpositive/binary_table_bincolserde.q +++ b/ql/src/test/queries/clientpositive/binary_table_bincolserde.q @@ -1,17 +1,17 @@ --! qt:dataset:src -drop table ba_test; +drop table ba_test_n1; -- Tests everything in binary_table_colserde.q + uses LazyBinaryColumnarSerde -create table ba_test (ba_key binary, ba_val binary) stored as rcfile; -alter table ba_test set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +create table ba_test_n1 (ba_key binary, ba_val binary) stored as rcfile; +alter table ba_test_n1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; -describe extended ba_test; +describe extended ba_test_n1; -from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary); +from src insert overwrite table ba_test_n1 select cast (src.key as binary), cast (src.value as binary); -select ba_key, ba_val from ba_test order by ba_key limit 10; +select ba_key, ba_val from ba_test_n1 order by ba_key limit 10; -drop table ba_test; +drop table ba_test_n1; diff --git a/ql/src/test/queries/clientpositive/binary_table_colserde.q b/ql/src/test/queries/clientpositive/binary_table_colserde.q index 71a4b22290..ecd259aa20 100644 --- a/ql/src/test/queries/clientpositive/binary_table_colserde.q +++ b/ql/src/test/queries/clientpositive/binary_table_colserde.q @@ -1,17 +1,17 @@ --! qt:dataset:src -drop table ba_test; +drop table ba_test_n0; -- Everything in ba_table1.q + columnar serde in RCFILE. -create table ba_test (ba_key binary, ba_val binary) stored as rcfile; -alter table ba_test set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'; +create table ba_test_n0 (ba_key binary, ba_val binary) stored as rcfile; +alter table ba_test_n0 set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'; -describe extended ba_test; +describe extended ba_test_n0; -from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary); +from src insert overwrite table ba_test_n0 select cast (src.key as binary), cast (src.value as binary); -select ba_key, ba_val from ba_test order by ba_key limit 10; +select ba_key, ba_val from ba_test_n0 order by ba_key limit 10; -drop table ba_test; +drop table ba_test_n0; diff --git a/ql/src/test/queries/clientpositive/binarysortable_1.q b/ql/src/test/queries/clientpositive/binarysortable_1.q index 39c1d25e73..63d7424127 100644 --- a/ql/src/test/queries/clientpositive/binarysortable_1.q +++ b/ql/src/test/queries/clientpositive/binarysortable_1.q @@ -1,21 +1,21 @@ -CREATE TABLE mytable(key STRING, value STRING) +CREATE TABLE mytable_n0(key STRING, value STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '9' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable; +LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable_n0; EXPLAIN SELECT REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(key, '\001', '^A'), '\0', '^@'), '\002', '^B'), value FROM ( SELECT key, sum(value) as value - FROM mytable + FROM mytable_n0 GROUP BY key ) a; SELECT REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(key, '\001', '^A'), '\0', '^@'), '\002', '^B'), value FROM ( SELECT key, sum(value) as value - FROM mytable + FROM mytable_n0 GROUP BY key ) a; diff --git a/ql/src/test/queries/clientpositive/bucket_if_with_path_filter.q b/ql/src/test/queries/clientpositive/bucket_if_with_path_filter.q index 956a61f7bd..cc4fc47c9c 100644 --- a/ql/src/test/queries/clientpositive/bucket_if_with_path_filter.q +++ b/ql/src/test/queries/clientpositive/bucket_if_with_path_filter.q @@ -1,13 +1,13 @@ dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/bmjpathfilter; -create table t1 (dt string) location '${system:test.tmp.dir}/bmjpathfilter/t1'; -Create table t2 (dt string) stored as orc; +create table t1_n99 (dt string) location '${system:test.tmp.dir}/bmjpathfilter/t1'; +Create table t2_n62 (dt string) stored as orc; dfs -touchz ${system:test.tmp.dir}/bmjpathfilter/t1/_SUCCESS; SET hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; SET hive.optimize.bucketmapjoin=true; -SELECT /*+ MAPJOIN(b) */ a.dt FROM t1 a JOIN t2 b ON (a.dt = b.dt); +SELECT /*+ MAPJOIN(b) */ a.dt FROM t1_n99 a JOIN t2_n62 b ON (a.dt = b.dt); SET hive.optimize.bucketmapjoin=false; set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_1.q b/ql/src/test/queries/clientpositive/bucket_map_join_1.q index f170a71435..58aa265082 100644 --- a/ql/src/test/queries/clientpositive/bucket_map_join_1.q +++ b/ql/src/test/queries/clientpositive/bucket_map_join_1.q @@ -1,18 +1,18 @@ set hive.strict.checks.bucketing=false; -drop table table1; -drop table table2; +drop table table1_n9; +drop table table2_n5; ; -create table table1(key string, value string) clustered by (key, value) +create table table1_n9(key string, value string) clustered by (key, value) sorted by (key, value) into 1 BUCKETS stored as textfile; -create table table2(key string, value string) clustered by (value, key) +create table table2_n5(key string, value string) clustered by (value, key) sorted by (value, key) into 1 BUCKETS stored as textfile; -load data local inpath '../../data/files/SortCol1Col2/000000_0' overwrite into table table1; -load data local inpath '../../data/files/SortCol2Col1/000000_0' overwrite into table table2; +load data local inpath '../../data/files/SortCol1Col2/000000_0' overwrite into table table1_n9; +load data local inpath '../../data/files/SortCol2Col1/000000_0' overwrite into table table2_n5; set hive.optimize.bucketmapjoin = true; set hive.optimize.bucketmapjoin.sortedmerge = true; @@ -22,7 +22,7 @@ set hive.cbo.enable=false; -- Neither bucketed map-join, nor sort-merge join should be performed explain extended -select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value; +select /*+ mapjoin(b) */ count(*) from table1_n9 a join table2_n5 b on a.key=b.key and a.value=b.value; -select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value; +select /*+ mapjoin(b) */ count(*) from table1_n9 a join table2_n5 b on a.key=b.key and a.value=b.value; diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_spark1.q b/ql/src/test/queries/clientpositive/bucket_map_join_spark1.q index 16b555d649..778a46861a 100644 --- a/ql/src/test/queries/clientpositive/bucket_map_join_spark1.q +++ b/ql/src/test/queries/clientpositive/bucket_map_join_spark1.q @@ -2,56 +2,56 @@ SET hive.vectorized.execution.enabled=false; set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +CREATE TABLE srcbucket_mapjoin_part_n19 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n19 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n19 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n19 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n19 partition(ds='2008-04-08'); -CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); +CREATE TABLE srcbucket_mapjoin_part_2_n16 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n16 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n16 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n16 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2_n16 partition(ds='2008-04-08'); -create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint); -create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_1_n7 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_2_n7 (key bigint , value1 bigint, value2 bigint); set hive.auto.convert.join = true; set hive.optimize.bucketmapjoin = true; -create table bucketmapjoin_tmp_result (key string , value1 string, value2 string); +create table bucketmapjoin_tmp_result_n9 (key string , value1 string, value2 string); explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n9 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n19 a join srcbucket_mapjoin_part_2_n16 b on a.key=b.key and b.ds="2008-04-08"; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n9 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n19 a join srcbucket_mapjoin_part_2_n16 b on a.key=b.key and b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n9; +insert overwrite table bucketmapjoin_hash_result_1_n7 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n9; set hive.optimize.bucketmapjoin = false; explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n9 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n19 a join srcbucket_mapjoin_part_2_n16 b on a.key=b.key and b.ds="2008-04-08"; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n9 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n19 a join srcbucket_mapjoin_part_2_n16 b on a.key=b.key and b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n9; +insert overwrite table bucketmapjoin_hash_result_1_n7 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n9; diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_spark2.q b/ql/src/test/queries/clientpositive/bucket_map_join_spark2.q index d8b65615f4..9e3222dec9 100644 --- a/ql/src/test/queries/clientpositive/bucket_map_join_spark2.q +++ b/ql/src/test/queries/clientpositive/bucket_map_join_spark2.q @@ -2,54 +2,54 @@ SET hive.vectorized.execution.enabled=false; set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +CREATE TABLE srcbucket_mapjoin_part_n12 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n12 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n12 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n12 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n12 partition(ds='2008-04-08'); -CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); +CREATE TABLE srcbucket_mapjoin_part_2_n10 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n10 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n10 partition(ds='2008-04-08'); -create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint); -create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_1_n3 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_2_n3 (key bigint , value1 bigint, value2 bigint); set hive.auto.convert.join = true; set hive.optimize.bucketmapjoin = true; -create table bucketmapjoin_tmp_result (key string , value1 string, value2 string); +create table bucketmapjoin_tmp_result_n5 (key string , value1 string, value2 string); explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n5 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n12 a join srcbucket_mapjoin_part_2_n10 b on a.key=b.key and b.ds="2008-04-08"; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n5 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n12 a join srcbucket_mapjoin_part_2_n10 b on a.key=b.key and b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n5; +insert overwrite table bucketmapjoin_hash_result_1_n3 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n5; set hive.optimize.bucketmapjoin = true; explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n5 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n12 a join srcbucket_mapjoin_part_2_n10 b on a.key=b.key and b.ds="2008-04-08"; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n5 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n12 a join srcbucket_mapjoin_part_2_n10 b on a.key=b.key and b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n5; +insert overwrite table bucketmapjoin_hash_result_1_n3 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n5; diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_spark3.q b/ql/src/test/queries/clientpositive/bucket_map_join_spark3.q index 2adb777952..d68a4a8702 100644 --- a/ql/src/test/queries/clientpositive/bucket_map_join_spark3.q +++ b/ql/src/test/queries/clientpositive/bucket_map_join_spark3.q @@ -2,54 +2,54 @@ SET hive.vectorized.execution.enabled=false; set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +CREATE TABLE srcbucket_mapjoin_part_n4 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_n4 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_n4 partition(ds='2008-04-08'); -CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); +CREATE TABLE srcbucket_mapjoin_part_2_n3 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n3 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n3 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n3 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2_n3 partition(ds='2008-04-08'); -create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint); -create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_1_n1 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_2_n1 (key bigint , value1 bigint, value2 bigint); set hive.auto.convert.join = true; set hive.optimize.bucketmapjoin = true; -create table bucketmapjoin_tmp_result (key string , value1 string, value2 string); +create table bucketmapjoin_tmp_result_n1 (key string , value1 string, value2 string); explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n1 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n4 a join srcbucket_mapjoin_part_2_n3 b on a.key=b.key and b.ds="2008-04-08"; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n1 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n4 a join srcbucket_mapjoin_part_2_n3 b on a.key=b.key and b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n1; +insert overwrite table bucketmapjoin_hash_result_1_n1 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n1; set hive.optimize.bucketmapjoin = false; explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n1 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n4 a join srcbucket_mapjoin_part_2_n3 b on a.key=b.key and b.ds="2008-04-08"; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n1 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n4 a join srcbucket_mapjoin_part_2_n3 b on a.key=b.key and b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n1; +insert overwrite table bucketmapjoin_hash_result_1_n1 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n1; diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q b/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q index f3dc097304..ffc64e360b 100644 --- a/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q +++ b/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q @@ -7,14 +7,14 @@ set hive.exec.reducers.max = 1; -- SORT_QUERY_RESULTS -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl1_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl2_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; CREATE TABLE tbl3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -insert overwrite table tbl1 +insert overwrite table tbl1_n0 select * from src where key < 10; -insert overwrite table tbl2 +insert overwrite table tbl2_n0 select * from src where key < 10; insert overwrite table tbl3 @@ -30,16 +30,16 @@ set hive.optimize.bucketmapjoin = true; explain extended select a.key as key, a.value as val1, b.value as val2, c.value as val3 -from tbl1 a join tbl2 b on a.key = b.key join tbl3 c on a.value = c.value; +from tbl1_n0 a join tbl2_n0 b on a.key = b.key join tbl3 c on a.value = c.value; select a.key as key, a.value as val1, b.value as val2, c.value as val3 -from tbl1 a join tbl2 b on a.key = b.key join tbl3 c on a.value = c.value; +from tbl1_n0 a join tbl2_n0 b on a.key = b.key join tbl3 c on a.value = c.value; set hive.optimize.bucketmapjoin = false; explain extended select a.key as key, a.value as val1, b.value as val2, c.value as val3 -from tbl1 a join tbl2 b on a.key = b.key join tbl3 c on a.value = c.value; +from tbl1_n0 a join tbl2_n0 b on a.key = b.key join tbl3 c on a.value = c.value; select a.key as key, a.value as val1, b.value as val2, c.value as val3 -from tbl1 a join tbl2 b on a.key = b.key join tbl3 c on a.value = c.value; +from tbl1_n0 a join tbl2_n0 b on a.key = b.key join tbl3 c on a.value = c.value; diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q index 5622ce2cc6..c93a372073 100644 --- a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q +++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q @@ -6,46 +6,46 @@ set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000; -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n14(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part_n9 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part_n15 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n14 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n14 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n15 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n15 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n15 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n15 partition(ds='2008-04-08'); set hive.optimize.bucketingsorting=false; -insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +insert overwrite table tab_part_n9 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n15; -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n8(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab_n8 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n14; -analyze table srcbucket_mapjoin compute statistics for columns; -analyze table srcbucket_mapjoin_part compute statistics for columns; -analyze table tab compute statistics for columns; -analyze table tab_part compute statistics for columns; +analyze table srcbucket_mapjoin_n14 compute statistics for columns; +analyze table srcbucket_mapjoin_part_n15 compute statistics for columns; +analyze table tab_n8 compute statistics for columns; +analyze table tab_part_n9 compute statistics for columns; set hive.convert.join.bucket.mapjoin.tez = false; explain select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key order by a.key, a.value, b.value; +from tab_n8 a join tab_part_n9 b on a.key = b.key order by a.key, a.value, b.value; select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key order by a.key, a.value, b.value; +from tab_n8 a join tab_part_n9 b on a.key = b.key order by a.key, a.value, b.value; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key order by a.key, a.value, b.value; +from tab_n8 a join tab_part_n9 b on a.key = b.key order by a.key, a.value, b.value; select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key order by a.key, a.value, b.value; +from tab_n8 a join tab_part_n9 b on a.key = b.key order by a.key, a.value, b.value; set hive.auto.convert.join.noconditionaltask.size=900; @@ -53,31 +53,31 @@ set hive.convert.join.bucket.mapjoin.tez = false; explain select count(*) from -(select distinct key from tab_part) a join tab b on a.key = b.key; +(select distinct key from tab_part_n9) a join tab_n8 b on a.key = b.key; select count(*) from -(select distinct key from tab_part) a join tab b on a.key = b.key; +(select distinct key from tab_part_n9) a join tab_n8 b on a.key = b.key; set hive.convert.join.bucket.mapjoin.tez = true; explain select count(*) from -(select distinct key from tab_part) a join tab b on a.key = b.key; +(select distinct key from tab_part_n9) a join tab_n8 b on a.key = b.key; select count(*) from -(select distinct key from tab_part) a join tab b on a.key = b.key; +(select distinct key from tab_part_n9) a join tab_n8 b on a.key = b.key; set hive.convert.join.bucket.mapjoin.tez = false; explain select count(*) from -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c +(select a.key as key, a.value as value from tab_n8 a join tab_part_n9 b on a.key = b.key) c join tab_part d on c.key = d.key; select count(*) from -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c +(select a.key as key, a.value as value from tab_n8 a join tab_part_n9 b on a.key = b.key) c join tab_part d on c.key = d.key; @@ -85,12 +85,12 @@ set hive.convert.join.bucket.mapjoin.tez = true; explain select count(*) from -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c +(select a.key as key, a.value as value from tab_n8 a join tab_part_n9 b on a.key = b.key) c join tab_part d on c.key = d.key; select count(*) from -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c +(select a.key as key, a.value as value from tab_n8 a join tab_part_n9 b on a.key = b.key) c join tab_part d on c.key = d.key; @@ -100,12 +100,12 @@ select count(*) from tab_part d join -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c on c.key = d.key; +(select a.key as key, a.value as value from tab_n8 a join tab_part_n9 b on a.key = b.key) c on c.key = d.key; select count(*) from tab_part d join -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c on c.key = d.key; +(select a.key as key, a.value as value from tab_n8 a join tab_part_n9 b on a.key = b.key) c on c.key = d.key; set hive.convert.join.bucket.mapjoin.tez = true; explain @@ -113,168 +113,168 @@ select count(*) from tab_part d join -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c on c.key = d.key; +(select a.key as key, a.value as value from tab_n8 a join tab_part_n9 b on a.key = b.key) c on c.key = d.key; select count(*) from tab_part d join -(select a.key as key, a.value as value from tab a join tab_part b on a.key = b.key) c on c.key = d.key; +(select a.key as key, a.value as value from tab_n8 a join tab_part_n9 b on a.key = b.key) c on c.key = d.key; --- one side is really bucketed. srcbucket_mapjoin is not really a bucketed table. +-- one side is really bucketed. srcbucket_mapjoin_n14 is not really a bucketed table. -- In this case the sub-query is chosen as the big table. set hive.convert.join.bucket.mapjoin.tez = false; set hive.auto.convert.join.noconditionaltask.size=1000; explain select a.k1, a.v1, b.value -from (select sum(substr(srcbucket_mapjoin.value,5)) as v1, key as k1 from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a -join tab b on a.k1 = b.key; +from (select sum(substr(srcbucket_mapjoin_n14.value,5)) as v1, key as k1 from srcbucket_mapjoin_n14 GROUP BY srcbucket_mapjoin_n14.key) a +join tab_n8 b on a.k1 = b.key; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.k1, a.v1, b.value -from (select sum(substr(srcbucket_mapjoin.value,5)) as v1, key as k1 from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a - join tab b on a.k1 = b.key; +from (select sum(substr(srcbucket_mapjoin_n14.value,5)) as v1, key as k1 from srcbucket_mapjoin_n14 GROUP BY srcbucket_mapjoin_n14.key) a + join tab_n8 b on a.k1 = b.key; set hive.convert.join.bucket.mapjoin.tez = false; explain select a.k1, a.v1, b.value -from (select sum(substr(tab.value,5)) as v1, key as k1 from tab_part join tab on tab_part.key = tab.key GROUP BY tab.key) a -join tab b on a.k1 = b.key; +from (select sum(substr(tab_n8.value,5)) as v1, key as k1 from tab_part_n9 join tab_n8 on tab_part_n9.key = tab_n8.key GROUP BY tab_n8.key) a +join tab_n8 b on a.k1 = b.key; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.k1, a.v1, b.value -from (select sum(substr(tab.value,5)) as v1, key as k1 from tab_part join tab on tab_part.key = tab.key GROUP BY tab.key) a - join tab b on a.k1 = b.key; +from (select sum(substr(tab_n8.value,5)) as v1, key as k1 from tab_part_n9 join tab_n8 on tab_part_n9.key = tab_n8.key GROUP BY tab_n8.key) a + join tab_n8 b on a.k1 = b.key; set hive.convert.join.bucket.mapjoin.tez = false; explain select a.k1, a.v1, b.value -from (select sum(substr(x.value,5)) as v1, x.key as k1 from tab x join tab y on x.key = y.key GROUP BY x.key) a -join tab_part b on a.k1 = b.key; +from (select sum(substr(x.value,5)) as v1, x.key as k1 from tab_n8 x join tab_n8 y on x.key = y.key GROUP BY x.key) a +join tab_part_n9 b on a.k1 = b.key; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.k1, a.v1, b.value -from (select sum(substr(x.value,5)) as v1, x.key as k1 from tab x join tab y on x.key = y.key GROUP BY x.key) a - join tab_part b on a.k1 = b.key; +from (select sum(substr(x.value,5)) as v1, x.key as k1 from tab_n8 x join tab_n8 y on x.key = y.key GROUP BY x.key) a + join tab_part_n9 b on a.k1 = b.key; -- multi-way join set hive.convert.join.bucket.mapjoin.tez = false; set hive.auto.convert.join.noconditionaltask.size=20000; explain select a.key, a.value, b.value -from tab_part a join tab b on a.key = b.key join tab c on a.key = c.key; +from tab_part_n9 a join tab_n8 b on a.key = b.key join tab_n8 c on a.key = c.key; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.key, a.value, b.value -from tab_part a join tab b on a.key = b.key join tab c on a.key = c.key; +from tab_part_n9 a join tab_n8 b on a.key = b.key join tab_n8 c on a.key = c.key; set hive.convert.join.bucket.mapjoin.tez = false; explain select a.key, a.value, c.value -from (select x.key, x.value from tab_part x join tab y on x.key = y.key) a join tab c on a.key = c.key; +from (select x.key, x.value from tab_part_n9 x join tab_n8 y on x.key = y.key) a join tab_n8 c on a.key = c.key; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.key, a.value, c.value -from (select x.key, x.value from tab_part x join tab y on x.key = y.key) a join tab c on a.key = c.key; +from (select x.key, x.value from tab_part_n9 x join tab_n8 y on x.key = y.key) a join tab_n8 c on a.key = c.key; -- in this case sub-query is the small table set hive.convert.join.bucket.mapjoin.tez = false; set hive.auto.convert.join.noconditionaltask.size=900; explain select a.key, a.value, b.value -from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a -join tab_part b on a.key = b.key; +from (select key, sum(substr(srcbucket_mapjoin_n14.value,5)) as value from srcbucket_mapjoin_n14 GROUP BY srcbucket_mapjoin_n14.key) a +join tab_part_n9 b on a.key = b.key; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.key, a.value, b.value -from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a - join tab_part b on a.key = b.key; +from (select key, sum(substr(srcbucket_mapjoin_n14.value,5)) as value from srcbucket_mapjoin_n14 GROUP BY srcbucket_mapjoin_n14.key) a + join tab_part_n9 b on a.key = b.key; set hive.convert.join.bucket.mapjoin.tez = false; set hive.map.aggr=false; explain select a.key, a.value, b.value -from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a -join tab_part b on a.key = b.key; +from (select key, sum(substr(srcbucket_mapjoin_n14.value,5)) as value from srcbucket_mapjoin_n14 GROUP BY srcbucket_mapjoin_n14.key) a +join tab_part_n9 b on a.key = b.key; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.key, a.value, b.value -from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a - join tab_part b on a.key = b.key; +from (select key, sum(substr(srcbucket_mapjoin_n14.value,5)) as value from srcbucket_mapjoin_n14 GROUP BY srcbucket_mapjoin_n14.key) a + join tab_part_n9 b on a.key = b.key; -- join on non-bucketed column results in shuffle join. set hive.convert.join.bucket.mapjoin.tez = false; explain select a.key, a.value, b.value -from tab a join tab_part b on a.value = b.value; +from tab_n8 a join tab_part_n9 b on a.value = b.value; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.key, a.value, b.value -from tab a join tab_part b on a.value = b.value; +from tab_n8 a join tab_part_n9 b on a.value = b.value; -CREATE TABLE tab1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab1 -select key,value from srcbucket_mapjoin; +CREATE TABLE tab1_n4(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab1_n4 +select key,value from srcbucket_mapjoin_n14; set hive.auto.convert.join.noconditionaltask.size=20000; set hive.convert.join.bucket.mapjoin.tez = false; explain select a.key, a.value, b.value -from tab1 a join tab_part b on a.key = b.key; +from tab1_n4 a join tab_part_n9 b on a.key = b.key; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.key, a.value, b.value -from tab1 a join tab_part b on a.key = b.key; +from tab1_n4 a join tab_part_n9 b on a.key = b.key; -- No map joins should be created. set hive.convert.join.bucket.mapjoin.tez = false; set hive.auto.convert.join.noconditionaltask.size=15000; -explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value; +explain select a.key, b.key from tab_part_n9 a join tab_part_n9 c on a.key = c.key join tab_part_n9 b on a.value = b.value; set hive.convert.join.bucket.mapjoin.tez = true; -explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value; +explain select a.key, b.key from tab_part_n9 a join tab_part_n9 c on a.key = c.key join tab_part_n9 b on a.value = b.value; set hive.convert.join.bucket.mapjoin.tez = false; -- This wont have any effect as the column ds is partition column which is not bucketed. explain select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key and a.ds = b.ds; +from tab_n8 a join tab_part_n9 b on a.key = b.key and a.ds = b.ds; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key and a.ds = b.ds; +from tab_n8 a join tab_part_n9 b on a.key = b.key and a.ds = b.ds; -- HIVE-17792 : Enable Bucket Map Join when there are extra keys other than bucketed columns set hive.auto.convert.join.noconditionaltask.size=20000; set hive.convert.join.bucket.mapjoin.tez = false; explain select a.key, a.value, b.value - from tab a join tab_part b on a.key = b.key and a.value = b.value; + from tab_n8 a join tab_part_n9 b on a.key = b.key and a.value = b.value; select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key and a.value = b.value +from tab_n8 a join tab_part_n9 b on a.key = b.key and a.value = b.value order by a.key, a.value, b.value; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.key, a.value, b.value - from tab a join tab_part b on a.key = b.key and a.value = b.value; + from tab_n8 a join tab_part_n9 b on a.key = b.key and a.value = b.value; select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key and a.value = b.value +from tab_n8 a join tab_part_n9 b on a.key = b.key and a.value = b.value order by a.key, a.value, b.value; -- With non-bucketed small table -CREATE TABLE tab2(key int, value string) PARTITIONED BY(ds STRING) STORED AS TEXTFILE; -insert overwrite table tab2 partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; -analyze table tab2 compute statistics for columns; +CREATE TABLE tab2_n4(key int, value string) PARTITIONED BY(ds STRING) STORED AS TEXTFILE; +insert overwrite table tab2_n4 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n14; +analyze table tab2_n4 compute statistics for columns; set hive.convert.join.bucket.mapjoin.tez = false; explain select a.key, a.value, b.value - from tab2 a join tab_part b on a.key = b.key and a.value = b.value; + from tab2_n4 a join tab_part_n9 b on a.key = b.key and a.value = b.value; select a.key, a.value, b.value -from tab2 a join tab_part b on a.key = b.key and a.value = b.value +from tab2_n4 a join tab_part_n9 b on a.key = b.key and a.value = b.value order by a.key, a.value, b.value; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.key, a.value, b.value - from tab2 a join tab_part b on a.key = b.key and a.value = b.value; + from tab2_n4 a join tab_part_n9 b on a.key = b.key and a.value = b.value; select a.key, a.value, b.value -from tab2 a join tab_part b on a.key = b.key and a.value = b.value +from tab2_n4 a join tab_part_n9 b on a.key = b.key and a.value = b.value order by a.key, a.value, b.value; diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q b/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q index 0ee49fca0b..7af8854164 100644 --- a/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q +++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q @@ -8,94 +8,94 @@ set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000; -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n18(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part_n11 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part_n20 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n18 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n18 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n20 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n20 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n20 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n20 partition(ds='2008-04-08'); set hive.optimize.bucketingsorting=false; -insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +insert overwrite table tab_part_n11 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n20; -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n10(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab_n10 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n18; -analyze table srcbucket_mapjoin compute statistics for columns; -analyze table srcbucket_mapjoin_part compute statistics for columns; -analyze table tab compute statistics for columns; -analyze table tab_part compute statistics for columns; +analyze table srcbucket_mapjoin_n18 compute statistics for columns; +analyze table srcbucket_mapjoin_part_n20 compute statistics for columns; +analyze table tab_n10 compute statistics for columns; +analyze table tab_part_n11 compute statistics for columns; set hive.auto.convert.join.noconditionaltask.size=1500; set hive.convert.join.bucket.mapjoin.tez = false; -explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value; +explain select a.key, b.key from tab_part_n11 a join tab_part_n11 c on a.key = c.key join tab_part_n11 b on a.value = b.value; set hive.convert.join.bucket.mapjoin.tez = true; -explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value; +explain select a.key, b.key from tab_part_n11 a join tab_part_n11 c on a.key = c.key join tab_part_n11 b on a.value = b.value; -CREATE TABLE tab1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab1 -select key,value from srcbucket_mapjoin; -analyze table tab1 compute statistics for columns; +CREATE TABLE tab1_n5(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab1_n5 +select key,value from srcbucket_mapjoin_n18; +analyze table tab1_n5 compute statistics for columns; -- A negative test as src is not bucketed. set hive.auto.convert.join.noconditionaltask.size=20000; set hive.convert.join.bucket.mapjoin.tez = false; explain select a.key, a.value, b.value -from tab1 a join src b on a.key = b.key; +from tab1_n5 a join src b on a.key = b.key; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.key, a.value, b.value -from tab1 a join src b on a.key = b.key; +from tab1_n5 a join src b on a.key = b.key; set hive.auto.convert.join.noconditionaltask.size=500; set hive.convert.join.bucket.mapjoin.tez = false; explain -select a.key, b.key from (select key from tab_part where key > 1) a join (select key from tab_part where key > 2) b on a.key = b.key; +select a.key, b.key from (select key from tab_part_n11 where key > 1) a join (select key from tab_part_n11 where key > 2) b on a.key = b.key; set hive.convert.join.bucket.mapjoin.tez = true; explain -select a.key, b.key from (select key from tab_part where key > 1) a join (select key from tab_part where key > 2) b on a.key = b.key; +select a.key, b.key from (select key from tab_part_n11 where key > 1) a join (select key from tab_part_n11 where key > 2) b on a.key = b.key; set hive.convert.join.bucket.mapjoin.tez = false; explain -select a.key, b.key from (select key from tab_part where key > 1) a left outer join (select key from tab_part where key > 2) b on a.key = b.key; +select a.key, b.key from (select key from tab_part_n11 where key > 1) a left outer join (select key from tab_part_n11 where key > 2) b on a.key = b.key; set hive.convert.join.bucket.mapjoin.tez = true; explain -select a.key, b.key from (select key from tab_part where key > 1) a left outer join (select key from tab_part where key > 2) b on a.key = b.key; +select a.key, b.key from (select key from tab_part_n11 where key > 1) a left outer join (select key from tab_part_n11 where key > 2) b on a.key = b.key; set hive.convert.join.bucket.mapjoin.tez = false; explain -select a.key, b.key from (select key from tab_part where key > 1) a right outer join (select key from tab_part where key > 2) b on a.key = b.key; +select a.key, b.key from (select key from tab_part_n11 where key > 1) a right outer join (select key from tab_part_n11 where key > 2) b on a.key = b.key; set hive.convert.join.bucket.mapjoin.tez = true; explain -select a.key, b.key from (select key from tab_part where key > 1) a right outer join (select key from tab_part where key > 2) b on a.key = b.key; +select a.key, b.key from (select key from tab_part_n11 where key > 1) a right outer join (select key from tab_part_n11 where key > 2) b on a.key = b.key; set hive.auto.convert.join.noconditionaltask.size=300; set hive.convert.join.bucket.mapjoin.tez = false; -explain select a.key, b.key from (select distinct key from tab) a join tab b on b.key = a.key; +explain select a.key, b.key from (select distinct key from tab_n10) a join tab_n10 b on b.key = a.key; set hive.convert.join.bucket.mapjoin.tez = true; -explain select a.key, b.key from (select distinct key from tab) a join tab b on b.key = a.key; +explain select a.key, b.key from (select distinct key from tab_n10) a join tab_n10 b on b.key = a.key; set hive.convert.join.bucket.mapjoin.tez = false; -explain select a.value, b.value from (select distinct value from tab) a join tab b on b.key = a.value; +explain select a.value, b.value from (select distinct value from tab_n10) a join tab_n10 b on b.key = a.value; set hive.convert.join.bucket.mapjoin.tez = true; -explain select a.value, b.value from (select distinct value from tab) a join tab b on b.key = a.value; +explain select a.value, b.value from (select distinct value from tab_n10) a join tab_n10 b on b.key = a.value; --multi key CREATE TABLE tab_part1 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key, value) INTO 4 BUCKETS STORED AS TEXTFILE; insert overwrite table tab_part1 partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +select key,value from srcbucket_mapjoin_part_n20; analyze table tab_part1 compute statistics for columns; set hive.auto.convert.join.noconditionaltask.size=20000; @@ -103,12 +103,12 @@ set hive.convert.join.bucket.mapjoin.tez = false; explain select count(*) from -(select distinct key,value from tab_part) a join tab b on a.key = b.key and a.value = b.value; +(select distinct key,value from tab_part_n11) a join tab_n10 b on a.key = b.key and a.value = b.value; set hive.convert.join.bucket.mapjoin.tez = true; explain select count(*) from -(select distinct key,value from tab_part) a join tab b on a.key = b.key and a.value = b.value; +(select distinct key,value from tab_part_n11) a join tab_n10 b on a.key = b.key and a.value = b.value; --HIVE-17939 @@ -122,10 +122,10 @@ explain select small.i, big.i from small,big where small.i=big.i; select small.i, big.i from small,big where small.i=big.i order by small.i, big.i; -- Bucket map join disabled for external tables --- Create external table equivalent of tab_part +-- Create external table equivalent of tab_part_n11 CREATE EXTERNAL TABLE tab_part_ext (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; insert overwrite table tab_part_ext partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +select key,value from srcbucket_mapjoin_part_n20; analyze table tab_part_ext compute statistics for columns; set hive.auto.convert.join.noconditionaltask.size=1500; @@ -133,7 +133,7 @@ set hive.convert.join.bucket.mapjoin.tez = true; set hive.disable.unsafe.external.table.operations=true; set test.comment=Bucket map join should work here; set test.comment; -explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value; +explain select a.key, b.key from tab_part_n11 a join tab_part_n11 c on a.key = c.key join tab_part_n11 b on a.value = b.value; set test.comment=External tables, bucket map join should be disabled; set test.comment; diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez_empty.q b/ql/src/test/queries/clientpositive/bucket_map_join_tez_empty.q index cc43b5b912..359560aab7 100644 --- a/ql/src/test/queries/clientpositive/bucket_map_join_tez_empty.q +++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez_empty.q @@ -4,15 +4,15 @@ set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000; -CREATE TABLE tab1(key1 int, value string) CLUSTERED BY (key1) INTO 10 BUCKETS STORED AS TEXTFILE; -CREATE TABLE tab2 (key1 int, value string) CLUSTERED BY (key1) INTO 10 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab1_n0(key1 int, value string) CLUSTERED BY (key1) INTO 10 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab2_n0 (key1 int, value string) CLUSTERED BY (key1) INTO 10 BUCKETS STORED AS TEXTFILE; -- HIVE-18721 : Make sure only certain buckets have data. -insert into tab1 VALUES (1,"abc"),(4,"def"),(8, "ghi"); -insert into tab2 VALUES (1, "abc"), (5, "aa"); +insert into tab1_n0 VALUES (1,"abc"),(4,"def"),(8, "ghi"); +insert into tab2_n0 VALUES (1, "abc"), (5, "aa"); set hive.convert.join.bucket.mapjoin.tez = true; -explain select * from tab1, tab2 where tab1.key1 = tab2.key1; -select * from tab1, tab2 where tab1.key1 = tab2.key1; +explain select * from tab1_n0, tab2_n0 where tab1_n0.key1 = tab2_n0.key1; +select * from tab1_n0, tab2_n0 where tab1_n0.key1 = tab2_n0.key1; diff --git a/ql/src/test/queries/clientpositive/bucket_num_reducers2.q b/ql/src/test/queries/clientpositive/bucket_num_reducers2.q index 40965da775..770878f0e0 100644 --- a/ql/src/test/queries/clientpositive/bucket_num_reducers2.q +++ b/ql/src/test/queries/clientpositive/bucket_num_reducers2.q @@ -7,11 +7,11 @@ set hive.exec.reducers.max = 2; -- This test sets the maximum number of reduce tasks to 2 for overwriting a -- table with 3 buckets, and uses a post-hook to confirm that 1 reducer was used -CREATE TABLE test_table(key int, value string) CLUSTERED BY (key) INTO 3 BUCKETS; +CREATE TABLE test_table_n4(key int, value string) CLUSTERED BY (key) INTO 3 BUCKETS; -explain extended insert overwrite table test_table +explain extended insert overwrite table test_table_n4 select * from src; -insert overwrite table test_table +insert overwrite table test_table_n4 select * from src; -drop table test_table; +drop table test_table_n4; diff --git a/ql/src/test/queries/clientpositive/bucketcontext_1.q b/ql/src/test/queries/clientpositive/bucketcontext_1.q index cf5fa6ccf5..efb368e3e6 100644 --- a/ql/src/test/queries/clientpositive/bucketcontext_1.q +++ b/ql/src/test/queries/clientpositive/bucketcontext_1.q @@ -3,25 +3,25 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -- small 1 part, 2 bucket & big 2 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08'); +CREATE TABLE bucket_small_n14 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n14 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n14 partition(ds='2008-04-08'); -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n14 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-09'); set hive.cbo.enable=false; set hive.optimize.bucketmapjoin = true; -explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n14 a JOIN bucket_big_n14 b ON a.key = b.key; +select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n14 a JOIN bucket_big_n14 b ON a.key = b.key; set hive.optimize.bucketmapjoin.sortedmerge = true; -explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n14 a JOIN bucket_big_n14 b ON a.key = b.key; +select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n14 a JOIN bucket_big_n14 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/bucketcontext_2.q b/ql/src/test/queries/clientpositive/bucketcontext_2.q index a53105c504..9287df85d8 100644 --- a/ql/src/test/queries/clientpositive/bucketcontext_2.q +++ b/ql/src/test/queries/clientpositive/bucketcontext_2.q @@ -3,23 +3,23 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -- small 1 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08'); +CREATE TABLE bucket_small_n8 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n8 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n8 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n8 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n8 partition(ds='2008-04-08'); -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n8 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n8 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n8 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n8 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n8 partition(ds='2008-04-09'); set hive.cbo.enable=false; set hive.optimize.bucketmapjoin = true; -explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n8 a JOIN bucket_big_n8 b ON a.key = b.key; +select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n8 a JOIN bucket_big_n8 b ON a.key = b.key; set hive.optimize.bucketmapjoin.sortedmerge = true; -explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n8 a JOIN bucket_big_n8 b ON a.key = b.key; +select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n8 a JOIN bucket_big_n8 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/bucketcontext_3.q b/ql/src/test/queries/clientpositive/bucketcontext_3.q index b77173a252..edfbd570fa 100644 --- a/ql/src/test/queries/clientpositive/bucketcontext_3.q +++ b/ql/src/test/queries/clientpositive/bucketcontext_3.q @@ -3,23 +3,23 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -- small 2 part, 2 bucket & big 1 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08'); +CREATE TABLE bucket_small_n4 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n4 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n4 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n4 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n4 partition(ds='2008-04-09'); -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n4 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n4 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n4 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n4 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n4 partition(ds='2008-04-08'); set hive.cbo.enable=false; set hive.optimize.bucketmapjoin = true; -explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n4 a JOIN bucket_big_n4 b ON a.key = b.key; +select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n4 a JOIN bucket_big_n4 b ON a.key = b.key; set hive.optimize.bucketmapjoin.sortedmerge = true; -explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n4 a JOIN bucket_big_n4 b ON a.key = b.key; +select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n4 a JOIN bucket_big_n4 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/bucketcontext_5.q b/ql/src/test/queries/clientpositive/bucketcontext_5.q index 332d533682..331f2c115c 100644 --- a/ql/src/test/queries/clientpositive/bucketcontext_5.q +++ b/ql/src/test/queries/clientpositive/bucketcontext_5.q @@ -2,20 +2,20 @@ set hive.strict.checks.bucketing=false; -- small no part, 4 bucket & big no part, 2 bucket -CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small; -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small; -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small; -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small; +CREATE TABLE bucket_small_n13 (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n13; +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n13; +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n13; +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n13; -CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big; -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big; +CREATE TABLE bucket_big_n13 (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n13; +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n13; set hive.cbo.enable=false; set hive.optimize.bucketmapjoin = true; -explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n13 a JOIN bucket_big_n13 b ON a.key = b.key; +select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n13 a JOIN bucket_big_n13 b ON a.key = b.key; set hive.optimize.bucketmapjoin.sortedmerge = true; -explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n13 a JOIN bucket_big_n13 b ON a.key = b.key; +select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n13 a JOIN bucket_big_n13 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/bucketcontext_6.q b/ql/src/test/queries/clientpositive/bucketcontext_6.q index 0b608ec2a4..6ab14c37ec 100644 --- a/ql/src/test/queries/clientpositive/bucketcontext_6.q +++ b/ql/src/test/queries/clientpositive/bucketcontext_6.q @@ -3,23 +3,23 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -- small no part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small; -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small; -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small; -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small; +CREATE TABLE bucket_small_n7 (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n7; +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n7; +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n7; +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n7; -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n7 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n7 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n7 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n7 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n7 partition(ds='2008-04-09'); set hive.cbo.enable=false; set hive.optimize.bucketmapjoin = true; -explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n7 a JOIN bucket_big_n7 b ON a.key = b.key; +select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n7 a JOIN bucket_big_n7 b ON a.key = b.key; set hive.optimize.bucketmapjoin.sortedmerge = true; -explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n7 a JOIN bucket_big_n7 b ON a.key = b.key; +select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n7 a JOIN bucket_big_n7 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/bucketcontext_7.q b/ql/src/test/queries/clientpositive/bucketcontext_7.q index 32245a5352..d8de2c57b4 100644 --- a/ql/src/test/queries/clientpositive/bucketcontext_7.q +++ b/ql/src/test/queries/clientpositive/bucketcontext_7.q @@ -3,28 +3,28 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -- small 2 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08'); +CREATE TABLE bucket_small_n2 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-09'); -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n2 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n2 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n2 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n2 partition(ds='2008-04-09'); set hive.cbo.enable=false; set hive.optimize.bucketmapjoin = true; -explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n2 a JOIN bucket_big_n2 b ON a.key = b.key; +select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n2 a JOIN bucket_big_n2 b ON a.key = b.key; set hive.optimize.bucketmapjoin.sortedmerge = true; -explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n2 a JOIN bucket_big_n2 b ON a.key = b.key; +select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n2 a JOIN bucket_big_n2 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/bucketcontext_8.q b/ql/src/test/queries/clientpositive/bucketcontext_8.q index 252f74eb38..b095cfff46 100644 --- a/ql/src/test/queries/clientpositive/bucketcontext_8.q +++ b/ql/src/test/queries/clientpositive/bucketcontext_8.q @@ -3,28 +3,28 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -- small 2 part, 2 bucket & big 2 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08'); +CREATE TABLE bucket_small_n10 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n10 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n10 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n10 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n10 partition(ds='2008-04-09'); -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n10 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-09'); set hive.cbo.enable=false; set hive.optimize.bucketmapjoin = true; -explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n10 a JOIN bucket_big_n10 b ON a.key = b.key; +select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n10 a JOIN bucket_big_n10 b ON a.key = b.key; set hive.optimize.bucketmapjoin.sortedmerge = true; -explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; -select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n10 a JOIN bucket_big_n10 b ON a.key = b.key; +select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n10 a JOIN bucket_big_n10 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q b/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q index a87fa1af0f..bdc00901b8 100644 --- a/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q +++ b/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q @@ -2,37 +2,37 @@ set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; set mapred.max.split.size = 32000000; -CREATE TABLE T1(name STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n125(name STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1_n125; -CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE; +CREATE TABLE T2_n74(name STRING) STORED AS SEQUENCEFILE; -INSERT OVERWRITE TABLE T2 SELECT * FROM ( +INSERT OVERWRITE TABLE T2_n74 SELECT * FROM ( SELECT tmp1.name as name FROM ( - SELECT name, 'MMM' AS n FROM T1) tmp1 - JOIN (SELECT 'MMM' AS n FROM T1) tmp2 - JOIN (SELECT 'MMM' AS n FROM T1) tmp3 + SELECT name, 'MMM' AS n FROM T1_n125) tmp1 + JOIN (SELECT 'MMM' AS n FROM T1_n125) tmp2 + JOIN (SELECT 'MMM' AS n FROM T1_n125) tmp3 ON tmp1.n = tmp2.n AND tmp1.n = tmp3.n) ttt LIMIT 5000000; -CREATE TABLE T3(name STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3; -LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T3; +CREATE TABLE T3_n28(name STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3_n28; +LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T3_n28; set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecutePrinter,org.apache.hadoop.hive.ql.hooks.ShowMapredStatsHook; set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -- 2 split by max.split.size -SELECT COUNT(1) FROM T2; +SELECT COUNT(1) FROM T2_n74; -- 1 split for two file -SELECT COUNT(1) FROM T3; +SELECT COUNT(1) FROM T3_n28; set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -- 1 split -SELECT COUNT(1) FROM T2; +SELECT COUNT(1) FROM T2_n74; -- 2 split for two file -SELECT COUNT(1) FROM T3; +SELECT COUNT(1) FROM T3_n28; diff --git a/ql/src/test/queries/clientpositive/bucketizedhiveinputformat_auto.q b/ql/src/test/queries/clientpositive/bucketizedhiveinputformat_auto.q index 5bcce90937..871f959cc7 100644 --- a/ql/src/test/queries/clientpositive/bucketizedhiveinputformat_auto.q +++ b/ql/src/test/queries/clientpositive/bucketizedhiveinputformat_auto.q @@ -1,26 +1,26 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08'); +CREATE TABLE bucket_small_n16 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n16 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n16 partition(ds='2008-04-08'); -CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08'); +CREATE TABLE bucket_big_n16 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-08'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-08'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09'); -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-09'); +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-09'); set hive.optimize.bucketmapjoin = true; -select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +select /* + MAPJOIN(a) */ count(*) FROM bucket_small_n16 a JOIN bucket_big_n16 b ON a.key = b.key; set hive.optimize.bucketmapjoin.sortedmerge = true; -select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +select /* + MAPJOIN(a) */ count(*) FROM bucket_small_n16 a JOIN bucket_big_n16 b ON a.key = b.key; set hive.input.format = org.apache.hadoop.hive.ql.io.HiveInputFormat; -select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; +select /* + MAPJOIN(a) */ count(*) FROM bucket_small_n16 a JOIN bucket_big_n16 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin1.q b/ql/src/test/queries/clientpositive/bucketmapjoin1.q index e04c12006a..39f0b9b9b0 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin1.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin1.q @@ -2,110 +2,110 @@ SET hive.vectorized.execution.enabled=false; set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part_n1 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part_2_n1 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; set hive.optimize.bucketmapjoin = true; -- empty partitions (HIVE-3205) explain extended select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n1 a join srcbucket_mapjoin_part_2_n1 b on a.key=b.key where b.ds="2008-04-08"; select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n1 a join srcbucket_mapjoin_part_2_n1 b on a.key=b.key where b.ds="2008-04-08"; explain extended select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n1 a join srcbucket_mapjoin_part_2_n1 b on a.key=b.key where b.ds="2008-04-08"; select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n1 a join srcbucket_mapjoin_part_2_n1 b on a.key=b.key where b.ds="2008-04-08"; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n1; +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_n1; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n1 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n1 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n1 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n1 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n1 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n1 partition(ds='2008-04-08'); -create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint); -create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_1_n0 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_2_n0 (key bigint , value1 bigint, value2 bigint); set hive.optimize.bucketmapjoin = true; -create table bucketmapjoin_tmp_result (key string , value1 string, value2 string); +create table bucketmapjoin_tmp_result_n0 (key string , value1 string, value2 string); explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n0 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n1 a join srcbucket_mapjoin_part_n1 b on a.key=b.key where b.ds="2008-04-08"; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n0 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n1 a join srcbucket_mapjoin_part_n1 b on a.key=b.key where b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n0; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +insert overwrite table bucketmapjoin_hash_result_1_n0 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n0; set hive.optimize.bucketmapjoin = false; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n0 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n1 a join srcbucket_mapjoin_part_n1 b on a.key=b.key where b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_2 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n0; +insert overwrite table bucketmapjoin_hash_result_2_n0 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n0; select a.key-b.key, a.value1-b.value1, a.value2-b.value2 -from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b +from bucketmapjoin_hash_result_1_n0 a left outer join bucketmapjoin_hash_result_2_n0 b on a.key = b.key; set hive.optimize.bucketmapjoin = true; explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n0 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n1 a join srcbucket_mapjoin_part_n1 b on a.key=b.key where b.ds="2008-04-08"; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n0 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n1 a join srcbucket_mapjoin_part_n1 b on a.key=b.key where b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n0; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +insert overwrite table bucketmapjoin_hash_result_1_n0 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n0; set hive.optimize.bucketmapjoin = false; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n0 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n1 a join srcbucket_mapjoin_part_n1 b on a.key=b.key where b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_2 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n0; +insert overwrite table bucketmapjoin_hash_result_2_n0 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n0; select a.key-b.key, a.value1-b.value1, a.value2-b.value2 -from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b +from bucketmapjoin_hash_result_1_n0 a left outer join bucketmapjoin_hash_result_2_n0 b on a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin10.q b/ql/src/test/queries/clientpositive/bucketmapjoin10.q index 25141c1701..1c1be880c2 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin10.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin10.q @@ -4,27 +4,27 @@ set hive.strict.checks.bucketing=false; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_1_n6 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='1'); -ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 3 BUCKETS; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2'); +ALTER TABLE srcbucket_mapjoin_part_1_n6 CLUSTERED BY (key) INTO 3 BUCKETS; +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='2'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='2'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='2'); -CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_2_n13 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='1'); -ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2'); +ALTER TABLE srcbucket_mapjoin_part_2_n13 CLUSTERED BY (key) INTO 2 BUCKETS; +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='2'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='2'); -ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 BUCKETS; +ALTER TABLE srcbucket_mapjoin_part_2_n13 CLUSTERED BY (key) INTO 3 BUCKETS; set hive.cbo.enable=false; set hive.optimize.bucketmapjoin=true; @@ -32,9 +32,9 @@ set hive.optimize.bucketmapjoin=true; EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n6 a JOIN srcbucket_mapjoin_part_2_n13 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n6 a JOIN srcbucket_mapjoin_part_2_n13 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL; diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin11.q b/ql/src/test/queries/clientpositive/bucketmapjoin11.q index 361a16228c..54a65995c5 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin11.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin11.q @@ -4,27 +4,27 @@ set hive.strict.checks.bucketing=false; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_1_n2 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='1'); -ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 4 BUCKETS; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2'); +ALTER TABLE srcbucket_mapjoin_part_1_n2 CLUSTERED BY (key) INTO 4 BUCKETS; +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='2'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='2'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='2'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='2'); -CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_2_n6 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='1'); -ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2'); +ALTER TABLE srcbucket_mapjoin_part_2_n6 CLUSTERED BY (key) INTO 2 BUCKETS; +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='2'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='2'); set hive.cbo.enable=false; set hive.optimize.bucketmapjoin=true; @@ -34,18 +34,18 @@ set hive.optimize.bucketmapjoin=true; EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n2 a JOIN srcbucket_mapjoin_part_2_n6 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n2 a JOIN srcbucket_mapjoin_part_2_n6 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL; EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n2 a JOIN srcbucket_mapjoin_part_2_n6 b ON a.key = b.key AND a.part = b.part AND a.part IS NOT NULL AND b.part IS NOT NULL; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n2 a JOIN srcbucket_mapjoin_part_2_n6 b ON a.key = b.key AND a.part = b.part AND a.part IS NOT NULL AND b.part IS NOT NULL; diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin12.q b/ql/src/test/queries/clientpositive/bucketmapjoin12.q index 103bdc35de..538381b92c 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin12.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin12.q @@ -9,12 +9,12 @@ CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1'); LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1'); -CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_2_n0 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n0 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n0 PARTITION (part='1'); -ALTER TABLE srcbucket_mapjoin_part_2 NOT CLUSTERED; +ALTER TABLE srcbucket_mapjoin_part_2_n0 NOT CLUSTERED; CREATE TABLE srcbucket_mapjoin_part_3 (key INT, value STRING) PARTITIONED BY (part STRING) STORED AS TEXTFILE; @@ -29,11 +29,11 @@ set hive.optimize.bucketmapjoin=true; EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2_n0 b ON a.key = b.key AND a.part = '1' and b.part = '1'; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2_n0 b ON a.key = b.key AND a.part = '1' and b.part = '1'; -- The table bucketing metadata match but one partition is not bucketed, bucket map join should not be used diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin13.q b/ql/src/test/queries/clientpositive/bucketmapjoin13.q index 0766b3bd9c..6b05ac3f5c 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin13.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin13.q @@ -4,73 +4,73 @@ SET hive.vectorized.execution.enabled=false; set hive.mapred.mode=nonstrict; set hive.exec.reducers.max=1; -CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_1_n8 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (value) INTO 2 BUCKETS; --- part=1 partition for srcbucket_mapjoin_part_1 is bucketed by 'value' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +-- part=1 partition for srcbucket_mapjoin_part_1_n8 is bucketed by 'value' +INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n8 PARTITION (part='1') SELECT * FROM src; -ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 2 BUCKETS; +ALTER TABLE srcbucket_mapjoin_part_1_n8 CLUSTERED BY (key) INTO 2 BUCKETS; --- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +-- part=2 partition for srcbucket_mapjoin_part_1_n8 is bucketed by 'key' +INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n8 PARTITION (part='2') SELECT * FROM src; -CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_2_n18 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS; --- part=1 partition for srcbucket_mapjoin_part_2 is bucketed by 'key' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +-- part=1 partition for srcbucket_mapjoin_part_2_n18 is bucketed by 'key' +INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n18 PARTITION (part='1') SELECT * FROM src; set hive.cbo.enable=false; set hive.optimize.bucketmapjoin=true; --- part=1 partition for srcbucket_mapjoin_part_1 is bucketed by 'value' +-- part=1 partition for srcbucket_mapjoin_part_1_n8 is bucketed by 'value' -- and it is also being joined. So, bucketed map-join cannot be performed EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key; --- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' +-- part=2 partition for srcbucket_mapjoin_part_1_n8 is bucketed by 'key' -- and it is being joined. So, bucketed map-join can be performed EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key and a.part = '2'; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key and a.part = '2'; -ALTER TABLE srcbucket_mapjoin_part_1 drop partition (part = '1'); +ALTER TABLE srcbucket_mapjoin_part_1_n8 drop partition (part = '1'); --- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' +-- part=2 partition for srcbucket_mapjoin_part_1_n8 is bucketed by 'key' -- and it is being joined. So, bucketed map-join can be performed EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key; -ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (value) INTO 2 BUCKETS; +ALTER TABLE srcbucket_mapjoin_part_1_n8 CLUSTERED BY (value) INTO 2 BUCKETS; --- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' +-- part=2 partition for srcbucket_mapjoin_part_1_n8 is bucketed by 'key' -- and it is being joined. So, bucketed map-join can be performed -- The fact that the table is being bucketed by 'value' does not matter EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin2.q b/ql/src/test/queries/clientpositive/bucketmapjoin2.q index cff3a8ceba..2e48f11cd0 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin2.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin2.q @@ -2,115 +2,115 @@ SET hive.vectorized.execution.enabled=false; set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +CREATE TABLE srcbucket_mapjoin_part_n6 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n6 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n6 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n6 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n6 partition(ds='2008-04-08'); -CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); +CREATE TABLE srcbucket_mapjoin_part_2_n5 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n5 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n5 partition(ds='2008-04-08'); -create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint); -create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_1_n2 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_2_n2 (key bigint , value1 bigint, value2 bigint); set hive.optimize.bucketmapjoin = true; -create table bucketmapjoin_tmp_result (key string , value1 string, value2 string); +create table bucketmapjoin_tmp_result_n2 (key string , value1 string, value2 string); explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n2 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n6 a join srcbucket_mapjoin_part_2_n5 b on a.key=b.key and b.ds="2008-04-08"; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n2 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n6 a join srcbucket_mapjoin_part_2_n5 b on a.key=b.key and b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n2; +insert overwrite table bucketmapjoin_hash_result_1_n2 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n2; set hive.optimize.bucketmapjoin = false; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n2 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n6 a join srcbucket_mapjoin_part_2_n5 b on a.key=b.key and b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_2 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n2; +insert overwrite table bucketmapjoin_hash_result_2_n2 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n2; select a.key-b.key, a.value1-b.value1, a.value2-b.value2 -from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b +from bucketmapjoin_hash_result_1_n2 a left outer join bucketmapjoin_hash_result_2_n2 b on a.key = b.key; set hive.optimize.bucketmapjoin = true; explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n2 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n6 a join srcbucket_mapjoin_part_2_n5 b on a.key=b.key and b.ds="2008-04-08"; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n2 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n6 a join srcbucket_mapjoin_part_2_n5 b on a.key=b.key and b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n2; +insert overwrite table bucketmapjoin_hash_result_1_n2 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n2; set hive.optimize.bucketmapjoin = false; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n2 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n6 a join srcbucket_mapjoin_part_2_n5 b on a.key=b.key and b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_2 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n2; +insert overwrite table bucketmapjoin_hash_result_2_n2 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n2; select a.key-b.key, a.value1-b.value1, a.value2-b.value2 -from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b +from bucketmapjoin_hash_result_1_n2 a left outer join bucketmapjoin_hash_result_2_n2 b on a.key = b.key; -- HIVE-3210 -load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09'); -load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09'); +load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n5 partition(ds='2008-04-09'); +load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n5 partition(ds='2008-04-09'); set hive.optimize.bucketmapjoin = true; explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n2 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n6 a join srcbucket_mapjoin_part_2_n5 b on a.key=b.key; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n2 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n6 a join srcbucket_mapjoin_part_2_n5 b on a.key=b.key; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n2; +insert overwrite table bucketmapjoin_hash_result_1_n2 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n2; set hive.optimize.bucketmapjoin = false; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n2 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n6 a join srcbucket_mapjoin_part_2_n5 b on a.key=b.key; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_2 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n2; +insert overwrite table bucketmapjoin_hash_result_2_n2 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n2; select a.key-b.key, a.value1-b.value1, a.value2-b.value2 -from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b +from bucketmapjoin_hash_result_1_n2 a left outer join bucketmapjoin_hash_result_2_n2 b on a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin3.q b/ql/src/test/queries/clientpositive/bucketmapjoin3.q index 59040682ee..e798e2f8ef 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin3.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin3.q @@ -1,81 +1,81 @@ SET hive.vectorized.execution.enabled=false; set hive.strict.checks.bucketing=false; -CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n12(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n12; +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_n12; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +CREATE TABLE srcbucket_mapjoin_part_n13 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n13 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n13 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n13 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n13 partition(ds='2008-04-08'); -CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); +CREATE TABLE srcbucket_mapjoin_part_2_n11 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n11 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n11 partition(ds='2008-04-08'); -create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint); -create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_1_n4 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_2_n4 (key bigint , value1 bigint, value2 bigint); set hive.optimize.bucketmapjoin = true; -create table bucketmapjoin_tmp_result (key string , value1 string, value2 string); +create table bucketmapjoin_tmp_result_n6 (key string , value1 string, value2 string); explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n6 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_part_2_n11 a join srcbucket_mapjoin_part_n13 b on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n6 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_part_2_n11 a join srcbucket_mapjoin_part_n13 b on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n6; +insert overwrite table bucketmapjoin_hash_result_1_n4 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n6; set hive.optimize.bucketmapjoin = false; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n6 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_part_2_n11 a join srcbucket_mapjoin_part_n13 b on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_2 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n6; +insert overwrite table bucketmapjoin_hash_result_2_n4 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n6; select a.key-b.key, a.value1-b.value1, a.value2-b.value2 -from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b +from bucketmapjoin_hash_result_1_n4 a left outer join bucketmapjoin_hash_result_2_n4 b on a.key = b.key; set hive.optimize.bucketmapjoin = true; explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n6 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_part_2_n11 a join srcbucket_mapjoin_part_n13 b on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n6 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_part_2_n11 a join srcbucket_mapjoin_part_n13 b on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_2 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n6; +insert overwrite table bucketmapjoin_hash_result_2_n4 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n6; set hive.optimize.bucketmapjoin = false; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n6 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_part_2_n11 a join srcbucket_mapjoin_part_n13 b on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_2 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n6; +insert overwrite table bucketmapjoin_hash_result_2_n4 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n6; select a.key-b.key, a.value1-b.value1, a.value2-b.value2 -from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b +from bucketmapjoin_hash_result_1_n4 a left outer join bucketmapjoin_hash_result_2_n4 b on a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin4.q b/ql/src/test/queries/clientpositive/bucketmapjoin4.q index d882a384d3..2957d4a55e 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin4.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin4.q @@ -2,82 +2,82 @@ SET hive.vectorized.execution.enabled=false; set hive.strict.checks.bucketing=false; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n17(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n17; +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_n17; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +CREATE TABLE srcbucket_mapjoin_part_n18 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n18 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n18 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n18 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n18 partition(ds='2008-04-08'); -CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); +CREATE TABLE srcbucket_mapjoin_part_2_n15 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n15 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n15 partition(ds='2008-04-08'); -create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint); -create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_1_n6 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_2_n6 (key bigint , value1 bigint, value2 bigint); set hive.optimize.bucketmapjoin = true; -create table bucketmapjoin_tmp_result (key string , value1 string, value2 string); +create table bucketmapjoin_tmp_result_n8 (key string , value1 string, value2 string); explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n8 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin b +from srcbucket_mapjoin_n17 a join srcbucket_mapjoin_n17 b on a.key=b.key; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n8 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin b +from srcbucket_mapjoin_n17 a join srcbucket_mapjoin_n17 b on a.key=b.key; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n8; +insert overwrite table bucketmapjoin_hash_result_1_n6 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n8; set hive.optimize.bucketmapjoin = false; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n8 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin b +from srcbucket_mapjoin_n17 a join srcbucket_mapjoin_n17 b on a.key=b.key; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_2 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n8; +insert overwrite table bucketmapjoin_hash_result_2_n6 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n8; select a.key-b.key, a.value1-b.value1, a.value2-b.value2 -from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b +from bucketmapjoin_hash_result_1_n6 a left outer join bucketmapjoin_hash_result_2_n6 b on a.key = b.key; set hive.optimize.bucketmapjoin = true; explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n8 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin b +from srcbucket_mapjoin_n17 a join srcbucket_mapjoin_n17 b on a.key=b.key; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n8 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin b +from srcbucket_mapjoin_n17 a join srcbucket_mapjoin_n17 b on a.key=b.key; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n8; +insert overwrite table bucketmapjoin_hash_result_1_n6 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n8; set hive.optimize.bucketmapjoin = false; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n8 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin b +from srcbucket_mapjoin_n17 a join srcbucket_mapjoin_n17 b on a.key=b.key; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_2 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n8; +insert overwrite table bucketmapjoin_hash_result_2_n6 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n8; select a.key-b.key, a.value1-b.value1, a.value2-b.value2 -from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b +from bucketmapjoin_hash_result_1_n6 a left outer join bucketmapjoin_hash_result_2_n6 b on a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin5.q b/ql/src/test/queries/clientpositive/bucketmapjoin5.q index 7b09e7acd7..c2737cc3b9 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin5.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin5.q @@ -2,19 +2,19 @@ SET hive.vectorized.execution.enabled=false; set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin; - -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09'); +CREATE TABLE srcbucket_mapjoin_n0(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n0; +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_n0; + +CREATE TABLE srcbucket_mapjoin_part_n0 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-09'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-09'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-09'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-09'); CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); @@ -31,12 +31,12 @@ create table bucketmapjoin_tmp_result (key string , value1 string, value2 string explain extended insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_n0 b on a.key=b.key; insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_n0 b on a.key=b.key; select count(1) from bucketmapjoin_tmp_result; @@ -46,7 +46,7 @@ select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_t set hive.optimize.bucketmapjoin = false; insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_n0 b on a.key=b.key; select count(1) from bucketmapjoin_tmp_result; @@ -62,12 +62,12 @@ set hive.optimize.bucketmapjoin = true; explain extended insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_2 b on a.key=b.key; insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_2 b on a.key=b.key; select count(1) from bucketmapjoin_tmp_result; @@ -77,7 +77,7 @@ select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_t set hive.optimize.bucketmapjoin = false; insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_2 b on a.key=b.key; select count(1) from bucketmapjoin_tmp_result; diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin7.q b/ql/src/test/queries/clientpositive/bucketmapjoin7.q index cbd690ff45..917a4a179c 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin7.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin7.q @@ -4,15 +4,15 @@ set hive.cbo.enable=false; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (ds STRING, hr STRING) +CREATE TABLE srcbucket_mapjoin_part_1_n4 (key INT, value STRING) PARTITIONED BY (ds STRING, hr STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (ds='2008-04-08', hr='0'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (ds='2008-04-08', hr='0'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n4 PARTITION (ds='2008-04-08', hr='0'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n4 PARTITION (ds='2008-04-08', hr='0'); -CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (ds STRING, hr STRING) +CREATE TABLE srcbucket_mapjoin_part_2_n9 (key INT, value STRING) PARTITIONED BY (ds STRING, hr STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (ds='2008-04-08', hr='0'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (ds='2008-04-08', hr='0'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n9 PARTITION (ds='2008-04-08', hr='0'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n9 PARTITION (ds='2008-04-08', hr='0'); set hive.optimize.bucketmapjoin=true; @@ -20,11 +20,11 @@ set hive.optimize.bucketmapjoin=true; EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ a.key, b.value -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n4 a JOIN srcbucket_mapjoin_part_2_n9 b ON a.key = b.key AND a.ds = '2008-04-08' AND b.ds = '2008-04-08' ORDER BY a.key, b.value LIMIT 1; SELECT /*+ MAPJOIN(b) */ a.key, b.value -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n4 a JOIN srcbucket_mapjoin_part_2_n9 b ON a.key = b.key AND a.ds = '2008-04-08' AND b.ds = '2008-04-08' ORDER BY a.key, b.value LIMIT 1; diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin8.q b/ql/src/test/queries/clientpositive/bucketmapjoin8.q index 15c52dbac9..ce05e34b5b 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin8.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin8.q @@ -4,17 +4,17 @@ set hive.strict.checks.bucketing=false; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_1_n1 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n1 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n1 PARTITION (part='1'); -CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_2_n4 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n4 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n4 PARTITION (part='1'); -ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 BUCKETS; +ALTER TABLE srcbucket_mapjoin_part_2_n4 CLUSTERED BY (key) INTO 3 BUCKETS; set hive.optimize.bucketmapjoin=true; set hive.cbo.enable=false; @@ -22,22 +22,22 @@ set hive.cbo.enable=false; EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n1 a JOIN srcbucket_mapjoin_part_2_n4 b ON a.key = b.key AND a.part = '1' and b.part = '1'; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n1 a JOIN srcbucket_mapjoin_part_2_n4 b ON a.key = b.key AND a.part = '1' and b.part = '1'; -ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (value) INTO 2 BUCKETS; +ALTER TABLE srcbucket_mapjoin_part_2_n4 CLUSTERED BY (value) INTO 2 BUCKETS; -- The partition bucketing metadata match but the tables are bucketed on different columns, bucket map join should still be used EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n1 a JOIN srcbucket_mapjoin_part_2_n4 b ON a.key = b.key AND a.part = '1' and b.part = '1'; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n1 a JOIN srcbucket_mapjoin_part_2_n4 b ON a.key = b.key AND a.part = '1' and b.part = '1'; diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin9.q b/ql/src/test/queries/clientpositive/bucketmapjoin9.q index d6ebd062ef..6cfebba65f 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin9.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin9.q @@ -5,18 +5,18 @@ set hive.strict.checks.bucketing=false; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_1_n5 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n5 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n5 PARTITION (part='1'); -CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_2_n12 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1'); -ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS; +ALTER TABLE srcbucket_mapjoin_part_2_n12 CLUSTERED BY (key) INTO 2 BUCKETS; set hive.optimize.bucketmapjoin=true; @@ -25,27 +25,27 @@ set hive.cbo.enable=false; EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n5 a JOIN srcbucket_mapjoin_part_2_n12 b ON a.key = b.key AND a.part = '1' and b.part = '1'; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n5 a JOIN srcbucket_mapjoin_part_2_n12 b ON a.key = b.key AND a.part = '1' and b.part = '1'; -ALTER TABLE srcbucket_mapjoin_part_2 DROP PARTITION (part='1'); -ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (value) INTO 2 BUCKETS; -LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); -LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); +ALTER TABLE srcbucket_mapjoin_part_2_n12 DROP PARTITION (part='1'); +ALTER TABLE srcbucket_mapjoin_part_2_n12 CLUSTERED BY (value) INTO 2 BUCKETS; +LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1'); +LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1'); -ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS; +ALTER TABLE srcbucket_mapjoin_part_2_n12 CLUSTERED BY (key) INTO 2 BUCKETS; -- The table bucketing metadata matches but the partitions are bucketed on different columns, bucket map join should not be used EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n5 a JOIN srcbucket_mapjoin_part_2_n12 b ON a.key = b.key AND a.part = '1' AND b.part = '1'; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n5 a JOIN srcbucket_mapjoin_part_2_n12 b ON a.key = b.key AND a.part = '1' AND b.part = '1'; diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin_negative.q b/ql/src/test/queries/clientpositive/bucketmapjoin_negative.q index aa207a8af9..7c38238cbe 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin_negative.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin_negative.q @@ -1,24 +1,24 @@ set hive.strict.checks.bucketing=false; -CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n10(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n10; +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_n10; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +CREATE TABLE srcbucket_mapjoin_part_n10 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n10 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n10 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n10 partition(ds='2008-04-08'); set hive.cbo.enable=false; set hive.optimize.bucketmapjoin = true; -create table bucketmapjoin_tmp_result (key string , value1 string, value2 string); +create table bucketmapjoin_tmp_result_n4 (key string , value1 string, value2 string); explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n4 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n10 a join srcbucket_mapjoin_part_n10 b on a.key=b.key where b.ds="2008-04-08"; diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin_negative2.q b/ql/src/test/queries/clientpositive/bucketmapjoin_negative2.q index c0db5df776..c8538d70fb 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin_negative2.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin_negative2.q @@ -1,21 +1,21 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n5(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n5; +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_n5; -CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09'); -load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09'); +CREATE TABLE srcbucket_mapjoin_part_2_n7 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n7 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n7 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n7 partition(ds='2008-04-09'); +load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n7 partition(ds='2008-04-09'); set hive.cbo.enable=false; set hive.optimize.bucketmapjoin = true; -create table bucketmapjoin_tmp_result (key string , value1 string, value2 string); +create table bucketmapjoin_tmp_result_n3 (key string , value1 string, value2 string); explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n3 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_n5 a join srcbucket_mapjoin_part_2_n7 b on a.key=b.key; diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin_negative3.q b/ql/src/test/queries/clientpositive/bucketmapjoin_negative3.q index 5d319b6c9e..e301b9ae01 100644 --- a/ql/src/test/queries/clientpositive/bucketmapjoin_negative3.q +++ b/ql/src/test/queries/clientpositive/bucketmapjoin_negative3.q @@ -1,22 +1,22 @@ set hive.strict.checks.bucketing=false; -drop table test1; -drop table test2; +drop table test1_n11; +drop table test2_n7; drop table test3; drop table test4; -create table test1 (key string, value string) clustered by (key) sorted by (key) into 3 buckets; -create table test2 (key string, value string) clustered by (value) sorted by (value) into 3 buckets; +create table test1_n11 (key string, value string) clustered by (key) sorted by (key) into 3 buckets; +create table test2_n7 (key string, value string) clustered by (value) sorted by (value) into 3 buckets; create table test3 (key string, value string) clustered by (key, value) sorted by (key, value) into 3 buckets; create table test4 (key string, value string) clustered by (value, key) sorted by (value, key) into 3 buckets; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE test1; -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE test1; -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE test1; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE test1_n11; +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE test1_n11; +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE test1_n11; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE test2; -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE test2; -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE test2; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE test2_n7; +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE test2_n7; +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE test2_n7; load data local inpath '../../data/files/bmj/000000_0' INTO TABLE test3; load data local inpath '../../data/files/bmj/000001_0' INTO TABLE test3; @@ -28,14 +28,14 @@ load data local inpath '../../data/files/bmj/000002_0' INTO TABLE test4; set hive.cbo.enable=false; set hive.optimize.bucketmapjoin = true; -- should be allowed -explain extended select /*+ MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value; -explain extended select /*+ MAPJOIN(R) */ * from test2 L join test2 R on L.key=R.key AND L.value=R.value; +explain extended select /*+ MAPJOIN(R) */ * from test1_n11 L join test1_n11 R on L.key=R.key AND L.value=R.value; +explain extended select /*+ MAPJOIN(R) */ * from test2_n7 L join test2_n7 R on L.key=R.key AND L.value=R.value; -- should not apply bucket mapjoin -explain extended select /*+ MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key; -explain extended select /*+ MAPJOIN(R) */ * from test1 L join test2 R on L.key=R.key AND L.value=R.value; -explain extended select /*+ MAPJOIN(R) */ * from test1 L join test3 R on L.key=R.key AND L.value=R.value; -explain extended select /*+ MAPJOIN(R) */ * from test1 L join test4 R on L.key=R.key AND L.value=R.value; -explain extended select /*+ MAPJOIN(R) */ * from test2 L join test3 R on L.key=R.key AND L.value=R.value; -explain extended select /*+ MAPJOIN(R) */ * from test2 L join test4 R on L.key=R.key AND L.value=R.value; +explain extended select /*+ MAPJOIN(R) */ * from test1_n11 L join test1_n11 R on L.key+L.key=R.key; +explain extended select /*+ MAPJOIN(R) */ * from test1_n11 L join test2_n7 R on L.key=R.key AND L.value=R.value; +explain extended select /*+ MAPJOIN(R) */ * from test1_n11 L join test3 R on L.key=R.key AND L.value=R.value; +explain extended select /*+ MAPJOIN(R) */ * from test1_n11 L join test4 R on L.key=R.key AND L.value=R.value; +explain extended select /*+ MAPJOIN(R) */ * from test2_n7 L join test3 R on L.key=R.key AND L.value=R.value; +explain extended select /*+ MAPJOIN(R) */ * from test2_n7 L join test4 R on L.key=R.key AND L.value=R.value; explain extended select /*+ MAPJOIN(R) */ * from test3 L join test4 R on L.key=R.key AND L.value=R.value; diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q index d5791e19de..5bfdf48fad 100644 --- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q +++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q @@ -11,70 +11,70 @@ set hive.merge.mapredfiles=false; set hive.auto.convert.sortmerge.join.to.mapjoin=true; -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table1_n5 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n5 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *; +INSERT OVERWRITE TABLE test_table1_n5 PARTITION (ds = '1') SELECT *; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.key, x.value from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x; -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.key, x.value from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x; -select count(*) from test_table2 where ds = '1'; -select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1'; -select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1'; +select count(*) from test_table2_n5 where ds = '1'; +select count(*) from test_table2_n5 tablesample (bucket 1 out of 2) s where ds = '1'; +select count(*) from test_table2_n5 tablesample (bucket 2 out of 2) s where ds = '1'; EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT * from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x; -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT * from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x; -select count(*) from test_table2 where ds = '1'; -select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1'; -select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1'; +select count(*) from test_table2_n5 where ds = '1'; +select count(*) from test_table2_n5 tablesample (bucket 1 out of 2) s where ds = '1'; +select count(*) from test_table2_n5 tablesample (bucket 2 out of 2) s where ds = '1'; -- it should be a map-only job EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.key, concat(x.value, x.value) from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x; -- it should be a map-reduce job EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.key+x.key, x.value from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x; -- it should be a map-only job EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.k1, concat(x.v1, x.v1) from ( -SELECT a.key as k1, a.value as v1 FROM test_table1 a WHERE a.ds = '1' +SELECT a.key as k1, a.value as v1 FROM test_table1_n5 a WHERE a.ds = '1' )x; diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q index fadd178a7c..0b913caa8e 100644 --- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q +++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q @@ -17,132 +17,132 @@ set hive.auto.convert.sortmerge.join.to.mapjoin=true; set hive.auto.convert.join.noconditionaltask.size=10; -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table1_n0 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n0 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table3_n0 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10; +INSERT OVERWRITE TABLE test_table1_n0 PARTITION (ds = '1') SELECT * where key < 10; FROM src -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100; +INSERT OVERWRITE TABLE test_table2_n0 PARTITION (ds = '1') SELECT * where key < 100; FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '2') SELECT * where key < 10; +INSERT OVERWRITE TABLE test_table1_n0 PARTITION (ds = '2') SELECT * where key < 10; FROM src -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT * where key < 100; +INSERT OVERWRITE TABLE test_table2_n0 PARTITION (ds = '2') SELECT * where key < 100; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n0 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n0 a JOIN test_table2_n0 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n0 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n0 a JOIN test_table2_n0 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n0 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n0 tablesample (bucket 2 out of 2) s where ds = '1'; -- Since more than one partition of 'a' (the big table) is being selected, -- it should be a map-reduce job EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n0 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n0 a JOIN test_table2_n0 b ON a.key = b.key WHERE a.ds is not null and b.ds = '1'; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n0 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n0 a JOIN test_table2_n0 b ON a.key = b.key WHERE a.ds is not null and b.ds = '1'; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n0 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n0 tablesample (bucket 2 out of 2) s where ds = '1'; -- Since a single partition of the big table ('a') is being selected, it should be a map-only -- job even though multiple partitions of 'b' are being selected EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n0 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n0 a JOIN test_table2_n0 b ON a.key = b.key WHERE a.ds = '1' and b.ds is not null; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n0 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n0 a JOIN test_table2_n0 b ON a.key = b.key WHERE a.ds = '1' and b.ds is not null; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n0 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n0 tablesample (bucket 2 out of 2) s where ds = '1'; -- This should be a map-only job EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n0 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM -(select key, value from test_table1 where ds = '1') a +(select key, value from test_table1_n0 where ds = '1') a JOIN -(select key, value from test_table2 where ds = '1') b +(select key, value from test_table2_n0 where ds = '1') b ON a.key = b.key; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n0 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM -(select key, value from test_table1 where ds = '1') a +(select key, value from test_table1_n0 where ds = '1') a JOIN -(select key, value from test_table2 where ds = '1') b +(select key, value from test_table2_n0 where ds = '1') b ON a.key = b.key; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n0 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n0 tablesample (bucket 2 out of 2) s where ds = '1'; -- This should be a map-only job EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n0 PARTITION (ds = '1') SELECT a.key, concat(a.v1, b.v2) FROM -(select key, concat(value, value) as v1 from test_table1 where ds = '1') a +(select key, concat(value, value) as v1 from test_table1_n0 where ds = '1') a JOIN -(select key, concat(value, value) as v2 from test_table2 where ds = '1') b +(select key, concat(value, value) as v2 from test_table2_n0 where ds = '1') b ON a.key = b.key; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n0 PARTITION (ds = '1') SELECT a.key, concat(a.v1, b.v2) FROM -(select key, concat(value, value) as v1 from test_table1 where ds = '1') a +(select key, concat(value, value) as v1 from test_table1_n0 where ds = '1') a JOIN -(select key, concat(value, value) as v2 from test_table2 where ds = '1') b +(select key, concat(value, value) as v2 from test_table2_n0 where ds = '1') b ON a.key = b.key; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n0 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n0 tablesample (bucket 2 out of 2) s where ds = '1'; -- This should be a map-reduce job EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n0 PARTITION (ds = '1') SELECT a.key+a.key, concat(a.value, b.value) FROM -(select key, value from test_table1 where ds = '1') a +(select key, value from test_table1_n0 where ds = '1') a JOIN -(select key, value from test_table2 where ds = '1') b +(select key, value from test_table2_n0 where ds = '1') b ON a.key = b.key; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n0 PARTITION (ds = '1') SELECT a.key+a.key, concat(a.value, b.value) FROM -(select key, value from test_table1 where ds = '1') a +(select key, value from test_table1_n0 where ds = '1') a JOIN -(select key, value from test_table2 where ds = '1') b +(select key, value from test_table2_n0 where ds = '1') b ON a.key = b.key; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n0 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n0 tablesample (bucket 2 out of 2) s where ds = '1'; diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q index 7c44d7004c..289d10ec1c 100644 --- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q +++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q @@ -11,44 +11,44 @@ set hive.merge.mapredfiles=false; set hive.auto.convert.sortmerge.join.to.mapjoin=true; -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table1_n19 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table2 (value STRING, key INT) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n18 (value STRING, key INT) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *; +INSERT OVERWRITE TABLE test_table1_n19 PARTITION (ds = '1') SELECT *; -- Insert data into the bucketed table by selecting from another bucketed table -- The bucketing positions dont match - although the actual bucketing do. -- This should be a map-only operation EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n18 PARTITION (ds = '1') SELECT x.value, x.key from -(SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x; +(SELECT a.key, a.value FROM test_table1_n19 a WHERE a.ds = '1')x; -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n18 PARTITION (ds = '1') SELECT x.value, x.key from -(SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x; +(SELECT a.key, a.value FROM test_table1_n19 a WHERE a.ds = '1')x; -select count(*) from test_table2 where ds = '1'; -select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1'; -select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1'; +select count(*) from test_table2_n18 where ds = '1'; +select count(*) from test_table2_n18 tablesample (bucket 1 out of 2) s where ds = '1'; +select count(*) from test_table2_n18 tablesample (bucket 2 out of 2) s where ds = '1'; -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table3_n10 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; -- Insert data into the bucketed table by selecting from another bucketed table -- The bucketing positions dont match - this should be a map-reduce operation EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n18 PARTITION (ds = '1') SELECT x.key, x.value from -(SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x; +(SELECT a.key, a.value FROM test_table1_n19 a WHERE a.ds = '1')x; -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n18 PARTITION (ds = '1') SELECT x.key, x.value from -(SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x; +(SELECT a.key, a.value FROM test_table1_n19 a WHERE a.ds = '1')x; -select count(*) from test_table2 where ds = '1'; -select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1'; -select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1'; +select count(*) from test_table2_n18 where ds = '1'; +select count(*) from test_table2_n18 tablesample (bucket 1 out of 2) s where ds = '1'; +select count(*) from test_table2_n18 tablesample (bucket 2 out of 2) s where ds = '1'; diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q index 4189c549b3..25bb8727c4 100644 --- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q +++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q @@ -15,54 +15,54 @@ set hive.auto.convert.sortmerge.join.bigtable.selection.policy=org.apache.hadoop set hive.auto.convert.sortmerge.join.to.mapjoin=true; -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table1_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n15 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table3_n8 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key2) SORTED BY (key2) INTO 2 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10; +INSERT OVERWRITE TABLE test_table1_n16 PARTITION (ds = '1') SELECT * where key < 10; FROM src -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100; +INSERT OVERWRITE TABLE test_table2_n15 PARTITION (ds = '1') SELECT * where key < 100; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation, since the insert is happening on the bucketing position EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n8 PARTITION (ds = '1') SELECT a.key, a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n16 a JOIN test_table2_n15 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n8 PARTITION (ds = '1') SELECT a.key, a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n16 a JOIN test_table2_n15 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n8 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n8 tablesample (bucket 2 out of 2) s where ds = '1'; -DROP TABLE test_table3; +DROP TABLE test_table3_n8; -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table3_n8 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-reduce operation, since the insert is happening on a non-bucketing position EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n8 PARTITION (ds = '1') SELECT a.key, a.value -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n16 a JOIN test_table2_n15 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n8 PARTITION (ds = '1') SELECT a.key, a.value -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n16 a JOIN test_table2_n15 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n8 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n8 tablesample (bucket 2 out of 2) s where ds = '1'; -DROP TABLE test_table3; +DROP TABLE test_table3_n8; diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q index c9c617da81..038c03c066 100644 --- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q +++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q @@ -15,52 +15,52 @@ set hive.auto.convert.sortmerge.join.bigtable.selection.policy=org.apache.hadoop set hive.auto.convert.sortmerge.join.to.mapjoin=true; -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table1_n8 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n8 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table3_n5 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key desc) INTO 2 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10; +INSERT OVERWRITE TABLE test_table1_n8 PARTITION (ds = '1') SELECT * where key < 10; FROM src -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100; +INSERT OVERWRITE TABLE test_table2_n8 PARTITION (ds = '1') SELECT * where key < 100; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-reduce operation, since the sort-order does not match EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n5 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n8 a JOIN test_table2_n8 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n5 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n8 a JOIN test_table2_n8 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n5 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n5 tablesample (bucket 2 out of 2) s where ds = '1'; -- This should be a map-reduce job since the sort order does not match EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n5 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM -(select key, value from test_table1 where ds = '1') a +(select key, value from test_table1_n8 where ds = '1') a JOIN -(select key, value from test_table2 where ds = '1') b +(select key, value from test_table2_n8 where ds = '1') b ON a.key = b.key; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n5 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM -(select key, value from test_table1 where ds = '1') a +(select key, value from test_table1_n8 where ds = '1') a JOIN -(select key, value from test_table2 where ds = '1') b +(select key, value from test_table2_n8 where ds = '1') b ON a.key = b.key; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n5 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n5 tablesample (bucket 2 out of 2) s where ds = '1'; diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q index af2ab96f03..cd0a234097 100644 --- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q +++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q @@ -15,132 +15,132 @@ set hive.auto.convert.sortmerge.join.bigtable.selection.policy=org.apache.hadoop set hive.auto.convert.sortmerge.join.to.mapjoin=true; -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table1_n3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS; -CREATE TABLE test_table2 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS; -CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table3_n3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT key, key+1, value where key < 10; +INSERT OVERWRITE TABLE test_table1_n3 PARTITION (ds = '1') SELECT key, key+1, value where key < 10; FROM src -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT key, key+1, value where key < 100; +INSERT OVERWRITE TABLE test_table2_n3 PARTITION (ds = '1') SELECT key, key+1, value where key < 100; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation, since the sort-order matches set hive.auto.convert.join.noconditionaltask.size=800; EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n3 PARTITION (ds = '1') SELECT a.key, a.key2, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n3 a JOIN test_table2_n3 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n3 PARTITION (ds = '1') SELECT a.key, a.key2, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n3 a JOIN test_table2_n3 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n3 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n3 tablesample (bucket 2 out of 2) s where ds = '1'; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation, since the sort-order matches EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n3 PARTITION (ds = '1') SELECT subq1.key, subq1.key2, subq1.value from ( SELECT a.key, a.key2, concat(a.value, b.value) as value -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n3 a JOIN test_table2_n3 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n3 PARTITION (ds = '1') SELECT subq1.key, subq1.key2, subq1.value from ( SELECT a.key, a.key2, concat(a.value, b.value) as value -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n3 a JOIN test_table2_n3 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n3 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n3 tablesample (bucket 2 out of 2) s where ds = '1'; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-reduce operation EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n3 PARTITION (ds = '1') SELECT a.key2, a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n3 a JOIN test_table2_n3 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-reduce operation EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n3 PARTITION (ds = '1') SELECT subq1.key2, subq1.key, subq1.value from ( SELECT a.key, a.key2, concat(a.value, b.value) as value -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n3 a JOIN test_table2_n3 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n3 PARTITION (ds = '1') SELECT subq2.key, subq2.key2, subq2.value from ( SELECT subq1.key2, subq1.key, subq1.value from ( SELECT a.key, a.key2, concat(a.value, b.value) as value -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n3 a JOIN test_table2_n3 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 )subq2; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n3 PARTITION (ds = '1') SELECT subq2.key, subq2.key2, subq2.value from ( SELECT subq1.key2, subq1.key, subq1.value from ( SELECT a.key, a.key2, concat(a.value, b.value) as value -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n3 a JOIN test_table2_n3 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 )subq2; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n3 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n3 tablesample (bucket 2 out of 2) s where ds = '1'; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n3 PARTITION (ds = '1') SELECT subq2.k2, subq2.k1, subq2.value from ( SELECT subq1.key2 as k1, subq1.key as k2, subq1.value from ( SELECT a.key, a.key2, concat(a.value, b.value) as value -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n3 a JOIN test_table2_n3 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 )subq2; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n3 PARTITION (ds = '1') SELECT subq2.k2, subq2.k1, subq2.value from ( SELECT subq1.key2 as k1, subq1.key as k2, subq1.value from ( SELECT a.key, a.key2, concat(a.value, b.value) as value -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n3 a JOIN test_table2_n3 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 )subq2; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n3 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n3 tablesample (bucket 2 out of 2) s where ds = '1'; CREATE TABLE test_table4 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key, key2) SORTED BY (key DESC, key2 DESC) INTO 2 BUCKETS; @@ -154,7 +154,7 @@ SELECT subq2.k2, subq2.k1, subq2.value from SELECT subq1.key2 as k1, subq1.key as k2, subq1.value from ( SELECT a.key, a.key2, concat(a.value, b.value) as value -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n3 a JOIN test_table2_n3 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 )subq2; diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q index 1b344da2b3..bef48b2d0a 100644 --- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q +++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q @@ -17,77 +17,77 @@ set hive.auto.convert.sortmerge.join.to.mapjoin=true; set hive.auto.convert.join.noconditionaltask.size=10; -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table1_n20 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n19 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table3_n11 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10; +INSERT OVERWRITE TABLE test_table1_n20 PARTITION (ds = '1') SELECT * where key < 10; FROM src -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100; +INSERT OVERWRITE TABLE test_table2_n19 PARTITION (ds = '1') SELECT * where key < 100; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n11 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n20 a JOIN test_table2_n19 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' and (a.key = 0 or a.key = 5); -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n11 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n20 a JOIN test_table2_n19 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' and (a.key = 0 or a.key = 5); -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n11 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n11 tablesample (bucket 2 out of 2) s where ds = '1'; -- This should be a map-only job EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n11 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM -(select key, value from test_table1 where ds = '1' and (key = 0 or key = 5)) a +(select key, value from test_table1_n20 where ds = '1' and (key = 0 or key = 5)) a JOIN -(select key, value from test_table2 where ds = '1' and (key = 0 or key = 5)) b +(select key, value from test_table2_n19 where ds = '1' and (key = 0 or key = 5)) b ON a.key = b.key; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n11 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM -(select key, value from test_table1 where ds = '1' and (key = 0 or key = 5)) a +(select key, value from test_table1_n20 where ds = '1' and (key = 0 or key = 5)) a JOIN -(select key, value from test_table2 where ds = '1' and (key = 0 or key = 5)) b +(select key, value from test_table2_n19 where ds = '1' and (key = 0 or key = 5)) b ON a.key = b.key; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n11 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n11 tablesample (bucket 2 out of 2) s where ds = '1'; -- This should be a map-only job EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n11 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM -(select key, value from test_table1 where ds = '1' and key < 8) a +(select key, value from test_table1_n20 where ds = '1' and key < 8) a JOIN -(select key, value from test_table2 where ds = '1' and key < 8) b +(select key, value from test_table2_n19 where ds = '1' and key < 8) b ON a.key = b.key WHERE a.key = 0 or a.key = 5; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n11 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM -(select key, value from test_table1 where ds = '1' and key < 8) a +(select key, value from test_table1_n20 where ds = '1' and key < 8) a JOIN -(select key, value from test_table2 where ds = '1' and key < 8) b +(select key, value from test_table2_n19 where ds = '1' and key < 8) b ON a.key = b.key WHERE a.key = 0 or a.key = 5; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n11 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n11 tablesample (bucket 2 out of 2) s where ds = '1'; diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q index b9492eb662..1f66e2035f 100644 --- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q +++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q @@ -15,47 +15,47 @@ set hive.auto.convert.sortmerge.join.bigtable.selection.policy=org.apache.hadoop set hive.auto.convert.sortmerge.join.to.mapjoin=true; -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table1_n2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table3_n2 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10; +INSERT OVERWRITE TABLE test_table1_n2 PARTITION (ds = '1') SELECT * where key < 10; FROM src -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100; +INSERT OVERWRITE TABLE test_table2_n2 PARTITION (ds = '1') SELECT * where key < 100; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n2 PARTITION (ds = '1') SELECT a.key, b.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n2 a JOIN test_table2_n2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n2 PARTITION (ds = '1') SELECT a.key, b.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n2 a JOIN test_table2_n2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n2 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n2 tablesample (bucket 2 out of 2) s where ds = '1'; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n2 PARTITION (ds = '1') SELECT b.key, a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n2 a JOIN test_table2_n2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n2 PARTITION (ds = '1') SELECT b.key, a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n2 a JOIN test_table2_n2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'; -select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'; -select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'; +select * from test_table3_n2 tablesample (bucket 1 out of 2) s where ds = '1'; +select * from test_table3_n2 tablesample (bucket 2 out of 2) s where ds = '1'; diff --git a/ql/src/test/queries/clientpositive/case_sensitivity.q b/ql/src/test/queries/clientpositive/case_sensitivity.q index 1dab85e39c..3f9f9c92de 100644 --- a/ql/src/test/queries/clientpositive/case_sensitivity.q +++ b/ql/src/test/queries/clientpositive/case_sensitivity.q @@ -1,11 +1,11 @@ --! qt:dataset:src_thrift -CREATE TABLE DEST1(Key INT, VALUE STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n129(Key INT, VALUE STRING) STORED AS TEXTFILE; EXPLAIN FROM SRC_THRIFT -INSERT OVERWRITE TABLE dest1 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0; +INSERT OVERWRITE TABLE dest1_n129 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0; FROM SRC_THRIFT -INSERT OVERWRITE TABLE dest1 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0; +INSERT OVERWRITE TABLE dest1_n129 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0; -SELECT DEST1.* FROM Dest1; +SELECT DEST1_n129.* FROM Dest1_n129; diff --git a/ql/src/test/queries/clientpositive/cast1.q b/ql/src/test/queries/clientpositive/cast1.q index 359b022206..a3fd707cbe 100644 --- a/ql/src/test/queries/clientpositive/cast1.q +++ b/ql/src/test/queries/clientpositive/cast1.q @@ -1,11 +1,11 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -CREATE TABLE dest1(c1 INT, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 INT, c6 STRING, c7 INT) STORED AS TEXTFILE; +CREATE TABLE dest1_n151(c1 INT, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 INT, c6 STRING, c7 INT) STORED AS TEXTFILE; EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86; +FROM src INSERT OVERWRITE TABLE dest1_n151 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86; -FROM src INSERT OVERWRITE TABLE dest1 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86; +FROM src INSERT OVERWRITE TABLE dest1_n151 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86; -select dest1.* FROM dest1; +select dest1_n151.* FROM dest1_n151; diff --git a/ql/src/test/queries/clientpositive/cast_on_constant.q b/ql/src/test/queries/clientpositive/cast_on_constant.q index bd9d57ad19..780a6efced 100644 --- a/ql/src/test/queries/clientpositive/cast_on_constant.q +++ b/ql/src/test/queries/clientpositive/cast_on_constant.q @@ -1,8 +1,8 @@ -create table t1(ts_field timestamp, date_field date); -explain select * from t1 where ts_field = "2016-01-23 00:00:00"; -explain select * from t1 where date_field = "2016-01-23"; -explain select * from t1 where ts_field = timestamp '2016-01-23 00:00:00'; -explain select * from t1 where date_field = date '2016-01-23'; -explain select * from t1 where date_field = ts_field; +create table t1_n138(ts_field timestamp, date_field date); +explain select * from t1_n138 where ts_field = "2016-01-23 00:00:00"; +explain select * from t1_n138 where date_field = "2016-01-23"; +explain select * from t1_n138 where ts_field = timestamp '2016-01-23 00:00:00'; +explain select * from t1_n138 where date_field = date '2016-01-23'; +explain select * from t1_n138 where date_field = ts_field; -drop table t1; +drop table t1_n138; diff --git a/ql/src/test/queries/clientpositive/cast_tinyint_to_double.q b/ql/src/test/queries/clientpositive/cast_tinyint_to_double.q index 95cc518055..eea85b91db 100644 --- a/ql/src/test/queries/clientpositive/cast_tinyint_to_double.q +++ b/ql/src/test/queries/clientpositive/cast_tinyint_to_double.q @@ -1,8 +1,8 @@ --! qt:dataset:src -drop table t; -CREATE TABLE t(c tinyint); -insert overwrite table t select 10 from src limit 1; +drop table t_n24; +CREATE TABLE t_n24(c tinyint); +insert overwrite table t_n24 select 10 from src limit 1; -select * from t where c = 10.0; +select * from t_n24 where c = 10.0; -select * from t where c = -10.0; \ No newline at end of file +select * from t_n24 where c = -10.0; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/cbo_SortUnionTransposeRule.q b/ql/src/test/queries/clientpositive/cbo_SortUnionTransposeRule.q index 1b2925ee45..cfdcf3d1bd 100644 --- a/ql/src/test/queries/clientpositive/cbo_SortUnionTransposeRule.q +++ b/ql/src/test/queries/clientpositive/cbo_SortUnionTransposeRule.q @@ -2,30 +2,30 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -create table s as select * from src limit 10; +create table s_n3 as select * from src limit 10; explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b order by key; explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 0; explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 5; explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b order by key limit 5; @@ -46,27 +46,27 @@ limit 5; set hive.optimize.limittranspose=true; explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b order by key; explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 0; explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 5; explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b order by key limit 5; @@ -87,16 +87,16 @@ limit 5; set hive.optimize.limittranspose.reductionpercentage=0.1f; explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 5; set hive.optimize.limittranspose.reductionpercentage=1f; set hive.optimize.limittranspose.reductiontuples=8; explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 5; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/cbo_const.q b/ql/src/test/queries/clientpositive/cbo_const.q index de5d21991f..1cc7afabb9 100644 --- a/ql/src/test/queries/clientpositive/cbo_const.q +++ b/ql/src/test/queries/clientpositive/cbo_const.q @@ -14,7 +14,7 @@ from src limit 1; select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'; -drop view t1; +drop view t1_n107; create table t1_new (key string, value string) partitioned by (ds string); @@ -24,7 +24,7 @@ select 'key1', 'value1' from src tablesample (1 rows); insert overwrite table t1_new partition (ds = '2011-10-16') select 'key2', 'value2' from src tablesample (1 rows); -create view t1 partitioned on (ds) as +create view t1_n107 partitioned on (ds) as select * from ( select key, value, ds from t1_new @@ -32,7 +32,7 @@ union all select key, value, ds from t1_new )subq; -select * from t1 where ds = '2011-10-15'; +select * from t1_n107 where ds = '2011-10-15'; explain select array(1,2,3) from src; diff --git a/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q b/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q index 99bd780e06..9c351eae5f 100644 --- a/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q +++ b/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q @@ -23,69 +23,69 @@ set hive.map.aggr.hash.percentmemory=0.0f; -- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) -- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) -create table if not exists loc_staging ( +create table if not exists loc_staging_n1 ( state string, locid int, zip bigint, year int ) row format delimited fields terminated by '|' stored as textfile; -create table loc_orc like loc_staging; -alter table loc_orc set fileformat orc; +create table loc_orc_n1 like loc_staging_n1; +alter table loc_orc_n1 set fileformat orc; -load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging; +load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n1; -insert overwrite table loc_orc select * from loc_staging; +insert overwrite table loc_orc_n1 select * from loc_staging_n1; -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc; +explain select * from loc_orc_n1; -- partial column stats -analyze table loc_orc compute statistics for columns state; +analyze table loc_orc_n1 compute statistics for columns state; -- inner group by: map - numRows: 8 reduce - numRows: 4 -- outer group by: map - numRows: 4 reduce numRows: 2 explain select a, c, min(b) from ( select state as a, locid as b, count(*) as c - from loc_orc + from loc_orc_n1 group by state,locid ) sq1 group by a,c; -analyze table loc_orc compute statistics for columns state,locid,year; +analyze table loc_orc_n1 compute statistics for columns state,locid,year; -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 -- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select year from loc_orc group by year; +explain select year from loc_orc_n1 group by year; -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 -- Case 9: column stats, NO grouping sets - caridnality = 8 -explain select state,locid from loc_orc group by state,locid; +explain select state,locid from loc_orc_n1 group by state,locid; -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 -- Case 8: column stats, grouping sets - cardinality = 32 -explain select state,locid from loc_orc group by state,locid with cube; +explain select state,locid from loc_orc_n1 group by state,locid with cube; -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 -- Case 8: column stats, grouping sets - cardinality = 24 -explain select state,locid from loc_orc group by state,locid with rollup; -explain select state,locid from loc_orc group by rollup (state,locid); +explain select state,locid from loc_orc_n1 group by state,locid with rollup; +explain select state,locid from loc_orc_n1 group by rollup (state,locid); -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8 -- Case 8: column stats, grouping sets - cardinality = 8 -explain select state,locid from loc_orc group by state,locid grouping sets((state)); +explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state)); -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16 -- Case 8: column stats, grouping sets - cardinality = 16 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)); +explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state),(locid)); -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 -- Case 8: column stats, grouping sets - cardinality = 24 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()); +explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state),(locid),()); -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 -- Case 8: column stats, grouping sets - cardinality = 32 -explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()); +explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state,locid),(state),(locid),()); set hive.map.aggr.hash.percentmemory=0.5f; set mapred.max.split.size=80; @@ -93,52 +93,52 @@ set mapred.max.split.size=80; -- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 -- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select year from loc_orc group by year; +explain select year from loc_orc_n1 group by year; -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16 -- Case 8: column stats, grouping sets - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube; +explain select state,locid from loc_orc_n1 group by state,locid with cube; -- ndvProduct becomes 0 as zip does not have column stats -- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 -- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select state,zip from loc_orc group by state,zip; +explain select state,zip from loc_orc_n1 group by state,zip; set mapred.max.split.size=1000; set hive.stats.fetch.column.stats=false; -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 -- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube; +explain select state,locid from loc_orc_n1 group by state,locid with cube; -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 -- Case 7: NO column stats - cardinality = 12 -explain select state,locid from loc_orc group by state,locid with rollup; -explain select state,locid from loc_orc group by rollup (state,locid); +explain select state,locid from loc_orc_n1 group by state,locid with rollup; +explain select state,locid from loc_orc_n1 group by rollup (state,locid); -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 -- Case 7: NO column stats - cardinality = 4 -explain select state,locid from loc_orc group by state,locid grouping sets((state)); +explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state)); -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16 -- Case 7: NO column stats - cardinality = 8 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)); +explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state),(locid)); -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 -- Case 7: NO column stats - cardinality = 12 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()); +explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state),(locid),()); -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 -- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()); +explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state,locid),(state),(locid),()); set mapred.max.split.size=80; -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 -- Case 7: NO column stats - cardinality = 4 -explain select year from loc_orc group by year; +explain select year from loc_orc_n1 group by year; -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 -- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube; +explain select state,locid from loc_orc_n1 group by state,locid with cube; diff --git a/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q b/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q index 9137669281..e4163fafa4 100644 --- a/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q +++ b/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q @@ -10,20 +10,20 @@ set hive.exec.reducers.max = 1; set hive.transpose.aggr.join=true; -- SORT_QUERY_RESULTS -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl1_n13(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl2_n12(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -insert overwrite table tbl1 +insert overwrite table tbl1_n13 select * from src where key < 10; -insert overwrite table tbl2 +insert overwrite table tbl2_n12 select * from src where key < 10; -analyze table tbl1 compute statistics; -analyze table tbl1 compute statistics for columns; +analyze table tbl1_n13 compute statistics; +analyze table tbl1_n13 compute statistics for columns; -analyze table tbl2 compute statistics; -analyze table tbl2 compute statistics for columns; +analyze table tbl2_n12 compute statistics; +analyze table tbl2_n12 compute statistics for columns; set hive.optimize.bucketmapjoin = true; set hive.optimize.bucketmapjoin.sortedmerge = true; @@ -34,11 +34,11 @@ set hive.auto.convert.sortmerge.join=true; -- The join is being performed as part of sub-query. It should be converted to a sort-merge join explain select count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1; select count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1; -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join @@ -47,7 +47,7 @@ select count(*) from ( select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 group by key ) subq2; @@ -56,7 +56,7 @@ select count(*) from ( select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 group by key ) subq2; @@ -68,14 +68,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key; @@ -84,14 +84,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key; @@ -100,15 +100,15 @@ on src1.key = src2.key; -- be converted to a sort-merge join. explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2 on subq1.key = subq2.key; select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2 on subq1.key = subq2.key; -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should @@ -118,22 +118,22 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 - join tbl2 b + join tbl2_n12 b on subq2.key = b.key; select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 - join tbl2 b + join tbl2_n12 b on subq2.key = b.key; -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. @@ -143,7 +143,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 @@ -151,7 +151,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq3 where key < 6 ) subq4 @@ -161,7 +161,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 @@ -169,7 +169,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq3 where key < 6 ) subq4 @@ -180,62 +180,62 @@ select count(*) from -- item, but that is not part of the join key. explain select count(*) from - (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + (select a.key as key, concat(a.value, a.value) as value from tbl1_n13 a where key < 8) subq1 join - (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + (select a.key as key, concat(a.value, a.value) as value from tbl2_n12 a where key < 8) subq2 on subq1.key = subq2.key; select count(*) from - (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + (select a.key as key, concat(a.value, a.value) as value from tbl1_n13 a where key < 8) subq1 join - (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + (select a.key as key, concat(a.value, a.value) as value from tbl2_n12 a where key < 8) subq2 on subq1.key = subq2.key; -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side -- join should be performed explain select count(*) from - (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n13 a) subq1 join - (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n12 a) subq2 on subq1.key = subq2.key; select count(*) from - (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n13 a) subq1 join - (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n12 a) subq2 on subq1.key = subq2.key; -- One of the tables is a sub-query and the other is not. -- It should be converted to a sort-merge join. explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key; + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 + join tbl2_n12 a on subq1.key = a.key; select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key; + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 + join tbl2_n12 a on subq1.key = a.key; -- There are more than 2 inputs to the join, all of them being sub-queries. -- It should be converted to to a sort-merge join explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2 on (subq1.key = subq2.key) join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq3 on (subq1.key = subq3.key); select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2 on subq1.key = subq2.key join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq3 on (subq1.key = subq3.key); -- The join is being performed on a nested sub-query, and an aggregation is performed after that. @@ -246,11 +246,11 @@ select count(*) from ( ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 -join tbl2 b +join tbl2_n12 b on subq2.key = b.key) a; select count(*) from ( @@ -258,22 +258,22 @@ select count(*) from ( ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 -join tbl2 b +join tbl2_n12 b on subq2.key = b.key) a; -- The join is followed by a multi-table insert. It should be converted to -- a sort-merge join -explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key; +explain select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key; -select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key; +select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key; -- The join is followed by a multi-table insert, and one of the inserts involves a reducer. -- It should be converted to a sort-merge join -explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key; +explain select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key; -select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key; +select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/cbo_rp_auto_join17.q b/ql/src/test/queries/clientpositive/cbo_rp_auto_join17.q index ae17ec66b4..78c19368d5 100644 --- a/ql/src/test/queries/clientpositive/cbo_rp_auto_join17.q +++ b/ql/src/test/queries/clientpositive/cbo_rp_auto_join17.q @@ -4,14 +4,14 @@ set hive.mapred.mode=nonstrict; set hive.cbo.returnpath.hiveop=true; set hive.auto.convert.join = true; -CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n112(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE; explain FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*; +INSERT OVERWRITE TABLE dest1_n112 SELECT src1.*, src2.*; FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*; +INSERT OVERWRITE TABLE dest1_n112 SELECT src1.*, src2.*; -SELECT sum(hash(dest1.key1,dest1.value1,dest1.key2,dest1.value2)) FROM dest1; \ No newline at end of file +SELECT sum(hash(dest1_n112.key1,dest1_n112.value1,dest1_n112.key2,dest1_n112.value2)) FROM dest1_n112; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/cbo_rp_cross_product_check_2.q b/ql/src/test/queries/clientpositive/cbo_rp_cross_product_check_2.q index 6ada9aec41..00c19c74ad 100644 --- a/ql/src/test/queries/clientpositive/cbo_rp_cross_product_check_2.q +++ b/ql/src/test/queries/clientpositive/cbo_rp_cross_product_check_2.q @@ -4,10 +4,10 @@ set hive.cbo.returnpath.hiveop=true; set hive.explain.user=false; -- SORT_QUERY_RESULTS -create table A as +create table A_n18 as select * from src; -create table B as +create table B_n14 as select * from src order by key limit 10; @@ -15,19 +15,19 @@ set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000000; -explain select * from A join B; +explain select * from A_n18 join B_n14; -explain select * from B d1 join B d2 on d1.key = d2.key join A; +explain select * from B_n14 d1 join B_n14 d2 on d1.key = d2.key join A_n18; -explain select * from A join +explain select * from A_n18 join (select d1.key - from B d1 join B d2 on d1.key = d2.key + from B_n14 d1 join B_n14 d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1; -explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1; +explain select * from A_n18 join (select d1.key from B_n14 d1 join B_n14 d2 where 1 = 1 group by d1.key) od1; explain select * from -(select A.key from A group by key) ss join -(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1; +(select A_n18.key from A_n18 group by key) ss join +(select d1.key from B_n14 d1 join B_n14 d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1; diff --git a/ql/src/test/queries/clientpositive/cbo_rp_gby2_map_multi_distinct.q b/ql/src/test/queries/clientpositive/cbo_rp_gby2_map_multi_distinct.q index 0e73621feb..cfb0b791f4 100644 --- a/ql/src/test/queries/clientpositive/cbo_rp_gby2_map_multi_distinct.q +++ b/ql/src/test/queries/clientpositive/cbo_rp_gby2_map_multi_distinct.q @@ -9,32 +9,32 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE; +CREATE TABLE dest1_n166(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n166 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n166 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n166.* FROM dest1_n166; -- HIVE-5560 when group by key is used in distinct funtion, invalid result are returned EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n166 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n166 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n166.* FROM dest1_n166; diff --git a/ql/src/test/queries/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q b/ql/src/test/queries/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q index ff2fbfb35b..ebd08bb37b 100644 --- a/ql/src/test/queries/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q +++ b/ql/src/test/queries/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q @@ -5,11 +5,11 @@ set hive.mapred.mode=nonstrict; set hive.groupby.skewindata=false; set mapred.reduce.tasks=31; -CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n123(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n123 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -23,7 +23,7 @@ INSERT OVERWRITE TABLE dest1 SELECT count(DISTINCT substr(src.value, 5)); FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n123 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -36,5 +36,5 @@ INSERT OVERWRITE TABLE dest1 SELECT sum(DISTINCT substr(src.value, 5)), count(DISTINCT substr(src.value, 5)); -SELECT dest1.* FROM dest1; +SELECT dest1_n123.* FROM dest1_n123; diff --git a/ql/src/test/queries/clientpositive/cbo_rp_insert.q b/ql/src/test/queries/clientpositive/cbo_rp_insert.q index eeaeec2a68..4c493db80f 100644 --- a/ql/src/test/queries/clientpositive/cbo_rp_insert.q +++ b/ql/src/test/queries/clientpositive/cbo_rp_insert.q @@ -4,14 +4,14 @@ set hive.cbo.returnpath.hiveop=true; drop database if exists x314 cascade; create database x314; use x314; -create table source(s1 int, s2 int); -create table target1(x int, y int, z int); +create table source_n1(s1 int, s2 int); +create table target1_n0(x int, y int, z int); -insert into source(s2,s1) values(2,1); --- expect source to contain 1 row (1,2) -select * from source; -insert into target1(z,x) select * from source; --- expect target1 to contain 1 row (2,NULL,1) -select * from target1; +insert into source_n1(s2,s1) values(2,1); +-- expect source_n1 to contain 1 row (1,2) +select * from source_n1; +insert into target1_n0(z,x) select * from source_n1; +-- expect target1_n0 to contain 1 row (2,NULL,1) +select * from target1_n0; drop database if exists x314 cascade; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/cbo_rp_join1.q b/ql/src/test/queries/clientpositive/cbo_rp_join1.q index 6f637d97e3..2be8c2caef 100644 --- a/ql/src/test/queries/clientpositive/cbo_rp_join1.q +++ b/ql/src/test/queries/clientpositive/cbo_rp_join1.q @@ -1,21 +1,21 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join = true; -CREATE TABLE myinput1(key int, value int); -LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1; +CREATE TABLE myinput1_n0(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1_n0; SET hive.optimize.bucketmapjoin = true; SET hive.optimize.bucketmapjoin.sortedmerge = true; SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND b.key = 40; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND b.key = 40; +EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND b.key = 40; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND b.key = 40; -EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND a.value = 40 AND a.key = a.value AND b.key = 40; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND a.key = a.value AND b.key = 40; +EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND a.value = 40 AND a.key = a.value AND b.key = 40; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND a.key = a.value AND b.key = 40; -EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND a.key = b.key AND b.key = 40; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND a.key = b.key AND b.key = 40; +EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND a.key = b.key AND b.key = 40; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND a.key = b.key AND b.key = 40; -EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; diff --git a/ql/src/test/queries/clientpositive/cbo_rp_lineage2.q b/ql/src/test/queries/clientpositive/cbo_rp_lineage2.q index 47c816803f..fc22431603 100644 --- a/ql/src/test/queries/clientpositive/cbo_rp_lineage2.q +++ b/ql/src/test/queries/clientpositive/cbo_rp_lineage2.q @@ -1,54 +1,54 @@ ---! qt:dataset:src1 ---! qt:dataset:src -set hive.mapred.mode=nonstrict; -set hive.cbo.returnpath.hiveop=true; -set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.LineageLogger; - -drop table if exists src2; -create table src2 as select key key2, value value2 from src1; - -select * from src1 where key is not null and value is not null limit 3; -select * from src1 where key > 10 and value > 'val' order by key limit 5; - -drop table if exists dest1; -create table dest1 as select * from src1; -insert into table dest1 select * from src2; - -select key k, dest1.value from dest1; -select key from src1 union select key2 from src2 order by key; -select key k from src1 union select key2 from src2 order by k; - -select key, count(1) a from dest1 group by key; -select key k, count(*) from dest1 group by key; -select key k, count(value) from dest1 group by key; -select value, max(length(key)) from dest1 group by value; -select value, max(length(key)) from dest1 group by value order by value limit 5; - -select key, length(value) from dest1; -select length(value) + 3 from dest1; -select 5 from dest1; -select 3 * 5 from dest1; - -drop table if exists dest2; -create table dest2 as select * from src1 JOIN src2 ON src1.key = src2.key2; -insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2; -insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2; -insert into table dest2 - select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1; - -select * from src1 where length(key) > 2; -select * from src1 where length(key) > 2 and value > 'a'; - -drop table if exists dest3; -create table dest3 as - select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 1; -insert overwrite table dest2 - select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3; - -drop table if exists dest_l1; -CREATE TABLE dest_l1(key INT, value STRING) STORED AS TEXTFILE; - -INSERT OVERWRITE TABLE dest_l1 +--! qt_n16:dataset_n16:src1 +--! qt_n16:dataset_n16:src +set_n16 hive.mapred.mode=nonstrict_n16; +set_n16 hive.cbo.returnpath.hiveop=true; +set_n16 hive.exec.post_n16.hooks=org.apache.hadoop.hive.ql.hooks.LineageLogger; + +drop table if exists src2_n1; +create table src2_n1 as select_n16 key key2, value value2 from src1; + +select_n16 * from src1 where key is not_n16 null and value is not_n16 null limit_n16 3; +select_n16 * from src1 where key > 10 and value > 'val' order by key limit_n16 5; + +drop table if exists dest1_n95; +create table dest1_n95 as select_n16 * from src1; +insert_n16 into table dest1_n95 select_n16 * from src2_n1; + +select_n16 key k, dest1_n95.value from dest1_n95; +select_n16 key from src1 union select_n16 key2 from src2_n1 order by key; +select_n16 key k from src1 union select_n16 key2 from src2_n1 order by k; + +select_n16 key, count_n16(1) a from dest1_n95 group by key; +select_n16 key k, count_n16(*) from dest1_n95 group by key; +select_n16 key k, count_n16(value) from dest1_n95 group by key; +select_n16 value, max(length(key)) from dest1_n95 group by value; +select_n16 value, max(length(key)) from dest1_n95 group by value order by value limit_n16 5; + +select_n16 key, length(value) from dest1_n95; +select_n16 length(value) + 3 from dest1_n95; +select_n16 5 from dest1_n95; +select_n16 3 * 5 from dest1_n95; + +drop table if exists dest2_n25; +create table dest2_n25 as select_n16 * from src1 JOIN src2_n1 ON src1.key = src2_n1.key2; +insert_n16 overwrite table dest2_n25 select_n16 * from src1 JOIN src2_n1 ON src1.key = src2_n1.key2; +insert_n16 into table dest2_n25 select_n16 * from src1 JOIN src2_n1 ON src1.key = src2_n1.key2; +insert_n16 into table dest2_n25 + select_n16 * from src1 JOIN src2_n1 ON length(src1.value) = length(src2_n1.value2) + 1; + +select_n16 * from src1 where length(key) > 2; +select_n16 * from src1 where length(key) > 2 and value > 'a'; + +drop table if exists dest3_n3; +create table dest3_n3 as + select_n16 * from src1 JOIN src2_n1 ON src1.key = src2_n1.key2 WHERE length(key) > 1; +insert_n16 overwrite table dest2_n25 + select_n16 * from src1 JOIN src2_n1 ON src1.key = src2_n1.key2 WHERE length(key) > 3; + +drop table if exists dest_l1_n1; +CREATE TABLE dest_l1_n1(key INT, value STRING) STORED AS TEXTFILE; + +INSERT OVERWRITE TABLE dest_l1_n1 SELECT j.* FROM (SELECT t1.key, p1.value FROM src1 t1 @@ -60,61 +60,61 @@ FROM (SELECT t1.key, p1.value LEFT OUTER JOIN src p2 ON (t2.key = p2.key)) j; -drop table if exists emp; -drop table if exists dept; -drop table if exists project; -drop table if exists tgt; -create table emp(emp_id int, name string, mgr_id int, dept_id int); -create table dept(dept_id int, dept_name string); -create table project(project_id int, project_name string); -create table tgt(dept_name string, name string, - emp_id int, mgr_id int, proj_id int, proj_name string); - -INSERT INTO TABLE tgt +drop table if exists emp_n1; +drop table if exists dept_n0; +drop table if exists project_n0; +drop table if exists tgt_n0; +create table emp_n1(emp_id int_n16, name string, mgr_id int_n16, dept_id int_n16); +create table dept_n0(dept_id int_n16, dept_name string); +create table project_n0(project_id int_n16, project_name string); +create table tgt_n0(dept_name string, name string, + emp_id int_n16, mgr_id int_n16, proj_id int_n16, proj_name string); + +INSERT INTO TABLE tgt_n0 SELECT emd.dept_name, emd.name, emd.emp_id, emd.mgr_id, p.project_id, p.project_name FROM ( SELECT d.dept_name, em.name, em.emp_id, em.mgr_id, em.dept_id FROM ( SELECT e.name, e.dept_id, e.emp_id emp_id, m.emp_id mgr_id - FROM emp e JOIN emp m ON e.emp_id = m.emp_id + FROM emp_n1 e JOIN emp_n1 m ON e.emp_id = m.emp_id ) em - JOIN dept d ON d.dept_id = em.dept_id - ) emd JOIN project p ON emd.dept_id = p.project_id; + JOIN dept_n0 d ON d.dept_id = em.dept_id + ) emd JOIN project_n0 p ON emd.dept_id = p.project_id; -drop table if exists dest_l2; -create table dest_l2 (id int, c1 tinyint, c2 int, c3 bigint) stored as textfile; -insert into dest_l2 values(0, 1, 100, 10000); +drop table if exists dest_l2_n0; +create table dest_l2_n0 (id int_n16, c1 tinyint_n16, c2 int_n16, c3 bigint_n16) stored as textfile; +insert_n16 into dest_l2_n0 values(0, 1, 100, 10000); -select * from ( - select c1 + c2 x from dest_l2 +select_n16 * from ( + select_n16 c1 + c2 x from dest_l2_n0 union all - select sum(c3) y from (select c3 from dest_l2) v1) v2 order by x; + select_n16 sum(c3) y from (select_n16 c3 from dest_l2_n0) v1) v2 order by x; -drop table if exists dest_l3; -create table dest_l3 (id int, c1 string, c2 string, c3 int) stored as textfile; -insert into dest_l3 values(0, "s1", "s2", 15); +drop table if exists dest_l3_n0; +create table dest_l3_n0 (id int_n16, c1 string, c2 string, c3 int_n16) stored as textfile; +insert_n16 into dest_l3_n0 values(0, "s1", "s2", 15); -select sum(a.c1) over (partition by a.c1 order by a.id) -from dest_l2 a +select_n16 sum(a.c1) over (partition by a.c1 order by a.id) +from dest_l2_n0 a where a.c2 != 10 group by a.c1, a.c2, a.id -having count(a.c2) > 0; +having count_n16(a.c2) > 0; -select sum(a.c1), count(b.c1), b.c2, b.c3 -from dest_l2 a join dest_l3 b on (a.id = b.id) +select_n16 sum(a.c1), count_n16(b.c1), b.c2, b.c3 +from dest_l2_n0 a join dest_l3_n0 b on (a.id = b.id) where a.c2 != 10 and b.c3 > 0 group by a.c1, a.c2, a.id, b.c1, b.c2, b.c3 -having count(a.c2) > 0 -order by b.c3 limit 5; +having count_n16(a.c2) > 0 +order by b.c3 limit_n16 5; -drop table if exists t; -create table t as -select distinct a.c2, a.c3 from dest_l2 a -inner join dest_l3 b on (a.id = b.id) +drop table if exists t_n16; +create table t_n16 as +select_n16 distinct_n16 a.c2, a.c3 from dest_l2_n0 a +inner join dest_l3_n0 b on (a.id = b.id) where a.id > 0 and b.c3 = 15; -SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)), -concat(substr(src1.key,1,1),sum(substr(src1.value,5))) +SELECT substr(src1.key,1,1), count_n16(DISTINCT substr(src1.value,5)), +concat_n16(substr(src1.key,1,1),sum(substr(src1.value,5))) from src1 GROUP BY substr(src1.key,1,1); diff --git a/ql/src/test/queries/clientpositive/cbo_rp_subq_exists.q b/ql/src/test/queries/clientpositive/cbo_rp_subq_exists.q index 692bb41a8b..b3e1af5b4b 100644 --- a/ql/src/test/queries/clientpositive/cbo_rp_subq_exists.q +++ b/ql/src/test/queries/clientpositive/cbo_rp_subq_exists.q @@ -30,7 +30,7 @@ having not exists -- 19. SubQueries Exists -- view test -create view cv1 as +create view cv1_n4 as select * from src_cbo b where exists @@ -39,7 +39,7 @@ where exists where b.value = a.value and a.key = b.key and a.value > 'val_9') ; -select * from cv1 +select * from cv1_n4 ; -- sq in from diff --git a/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q b/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q index b8535cf170..ba198eb6ee 100644 --- a/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q +++ b/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q @@ -5,96 +5,96 @@ set hive.cbo.returnpath.hiveop=true; -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) -- 0.23 changed input order of data in reducer task, which affects result of percentile_approx -CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket; -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket; -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket; -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket; +CREATE TABLE bucket_n1 (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_n1; +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_n1; +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_n1; +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_n1; -create table t1 (result double); -create table t2 (result double); -create table t3 (result double); -create table t4 (result double); -create table t5 (result double); -create table t6 (result double); -create table t7 (result array); -create table t8 (result array); -create table t9 (result array); -create table t10 (result array); -create table t11 (result array); -create table t12 (result array); +create table t1_n132 (result double); +create table t2_n79 (result double); +create table t3_n31 (result double); +create table t4_n18 (result double); +create table t5_n5 (result double); +create table t6_n4 (result double); +create table t7_n5 (result array); +create table t8_n3 (result array); +create table t9_n2 (result array); +create table t10_n1 (result array); +create table t11_n3 (result array); +create table t12_n1 (result array); set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; set hive.map.aggr=false; -- disable map-side aggregation -FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) +FROM bucket_n1 +insert overwrite table t1_n132 SELECT percentile_approx(cast(key AS double), 0.5) +insert overwrite table t2_n79 SELECT percentile_approx(cast(key AS double), 0.5, 100) +insert overwrite table t3_n31 SELECT percentile_approx(cast(key AS double), 0.5, 1000) -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) +insert overwrite table t4_n18 SELECT percentile_approx(cast(key AS int), 0.5) +insert overwrite table t5_n5 SELECT percentile_approx(cast(key AS int), 0.5, 100) +insert overwrite table t6_n4 SELECT percentile_approx(cast(key AS int), 0.5, 1000) -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t7_n5 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) +insert overwrite table t8_n3 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t9_n2 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000); +insert overwrite table t10_n1 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) +insert overwrite table t11_n3 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t12_n1 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000); -select * from t1; -select * from t2; -select * from t3; -select * from t4; -select * from t5; -select * from t6; -select * from t7; -select * from t8; -select * from t9; -select * from t10; -select * from t11; -select * from t12; +select * from t1_n132; +select * from t2_n79; +select * from t3_n31; +select * from t4_n18; +select * from t5_n5; +select * from t6_n4; +select * from t7_n5; +select * from t8_n3; +select * from t9_n2; +select * from t10_n1; +select * from t11_n3; +select * from t12_n1; set hive.map.aggr=true; -- enable map-side aggregation -FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) +FROM bucket_n1 +insert overwrite table t1_n132 SELECT percentile_approx(cast(key AS double), 0.5) +insert overwrite table t2_n79 SELECT percentile_approx(cast(key AS double), 0.5, 100) +insert overwrite table t3_n31 SELECT percentile_approx(cast(key AS double), 0.5, 1000) -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) +insert overwrite table t4_n18 SELECT percentile_approx(cast(key AS int), 0.5) +insert overwrite table t5_n5 SELECT percentile_approx(cast(key AS int), 0.5, 100) +insert overwrite table t6_n4 SELECT percentile_approx(cast(key AS int), 0.5, 1000) -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t7_n5 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) +insert overwrite table t8_n3 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t9_n2 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000); +insert overwrite table t10_n1 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) +insert overwrite table t11_n3 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t12_n1 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000); -select * from t1; -select * from t2; -select * from t3; -select * from t4; -select * from t5; -select * from t6; -select * from t7; -select * from t8; -select * from t9; -select * from t10; -select * from t11; -select * from t12; +select * from t1_n132; +select * from t2_n79; +select * from t3_n31; +select * from t4_n18; +select * from t5_n5; +select * from t6_n4; +select * from t7_n5; +select * from t8_n3; +select * from t9_n2; +select * from t10_n1; +select * from t11_n3; +select * from t12_n1; -- NaN explain -select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket; -select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) between 340.5 and 343.0 from bucket; +select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket_n1; +select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) between 340.5 and 343.0 from bucket_n1; -- with CBO explain -select percentile_approx(key, 0.5) from bucket; -select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket; +select percentile_approx(key, 0.5) from bucket_n1; +select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket_n1; diff --git a/ql/src/test/queries/clientpositive/cbo_rp_unionDistinct_2.q b/ql/src/test/queries/clientpositive/cbo_rp_unionDistinct_2.q index 8f132f27d4..28afea7446 100644 --- a/ql/src/test/queries/clientpositive/cbo_rp_unionDistinct_2.q +++ b/ql/src/test/queries/clientpositive/cbo_rp_unionDistinct_2.q @@ -2,129 +2,129 @@ set hive.cbo.returnpath.hiveop=true; -- SORT_QUERY_RESULTS -CREATE TABLE u1 as select key, value from src order by key limit 5; +CREATE TABLE u1_n0 as select key, value from src order by key limit 5; -CREATE TABLE u2 as select key, value from src order by key limit 3; +CREATE TABLE u2_n0 as select key, value from src order by key limit 3; -CREATE TABLE u3 as select key, value from src order by key desc limit 5; +CREATE TABLE u3_n0 as select key, value from src order by key desc limit 5; -select * from u1; +select * from u1_n0; -select * from u2; +select * from u2_n0; -select * from u3; +select * from u3_n0; select key, value from ( -select key, value from u1 +select key, value from u1_n0 union all -select key, value from u2 +select key, value from u2_n0 union all -select key as key, value from u3 +select key as key, value from u3_n0 ) tab; select key, value from ( -select key, value from u1 +select key, value from u1_n0 union -select key, value from u2 +select key, value from u2_n0 union all -select key, value from u3 +select key, value from u3_n0 ) tab; select key, value from ( -select key, value from u1 +select key, value from u1_n0 union distinct -select key, value from u2 +select key, value from u2_n0 union all -select key as key, value from u3 +select key as key, value from u3_n0 ) tab; select key, value from ( -select key, value from u1 +select key, value from u1_n0 union all -select key, value from u2 +select key, value from u2_n0 union -select key, value from u3 +select key, value from u3_n0 ) tab; select key, value from ( -select key, value from u1 +select key, value from u1_n0 union -select key, value from u2 +select key, value from u2_n0 union -select key as key, value from u3 +select key as key, value from u3_n0 ) tab; select distinct * from ( -select key, value from u1 +select key, value from u1_n0 union all -select key, value from u2 +select key, value from u2_n0 union all -select key as key, value from u3 +select key as key, value from u3_n0 ) tab; select distinct * from ( -select distinct * from u1 +select distinct * from u1_n0 union -select key, value from u2 +select key, value from u2_n0 union all -select key as key, value from u3 +select key as key, value from u3_n0 ) tab; -drop view if exists v; +drop view if exists v_n14; set hive.cbo.returnpath.hiveop=false; -create view v as select distinct * from +create view v_n14 as select distinct * from ( -select distinct * from u1 +select distinct * from u1_n0 union -select key, value from u2 +select key, value from u2_n0 union all -select key as key, value from u3 +select key as key, value from u3_n0 ) tab; -describe extended v; +describe extended v_n14; -select * from v; +select * from v_n14; -drop view if exists v; +drop view if exists v_n14; -create view v as select tab.* from +create view v_n14 as select tab.* from ( -select distinct * from u1 +select distinct * from u1_n0 union -select distinct * from u2 +select distinct * from u2_n0 ) tab; -describe extended v; +describe extended v_n14; -select * from v; +select * from v_n14; -drop view if exists v; +drop view if exists v_n14; -create view v as select * from +create view v_n14 as select * from ( -select distinct u1.* from u1 +select distinct u1_n0.* from u1_n0 union all -select distinct * from u2 +select distinct * from u2_n0 ) tab; -describe extended v; +describe extended v_n14; -select * from v; +select * from v_n14; select distinct * from ( -select key, value from u1 +select key, value from u1_n0 union all -select key, value from u2 +select key, value from u2_n0 union -select key as key, value from u3 +select key as key, value from u3_n0 ) tab; diff --git a/ql/src/test/queries/clientpositive/cbo_rp_views.q b/ql/src/test/queries/clientpositive/cbo_rp_views.q index d1f2bcb46d..eb21f2eabd 100644 --- a/ql/src/test/queries/clientpositive/cbo_rp_views.q +++ b/ql/src/test/queries/clientpositive/cbo_rp_views.q @@ -7,46 +7,46 @@ set hive.stats.fetch.column.stats=true; set hive.auto.convert.join=false; -- 10. Test views -create view v1 as select c_int, value, c_boolean, dt from cbo_t1; -create view v2 as select c_int, value from cbo_t2; +create view v1_n18 as select c_int, value, c_boolean, dt from cbo_t1; +create view v2_n11 as select c_int, value from cbo_t2; set hive.cbo.returnpath.hiveop=true; -select value from v1 where c_boolean=false; -select max(c_int) from v1 group by (c_boolean); +select value from v1_n18 where c_boolean=false; +select max(c_int) from v1_n18 group by (c_boolean); -select count(v1.c_int) from v1 join cbo_t2 on v1.c_int = cbo_t2.c_int; -select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int; +select count(v1_n18.c_int) from v1_n18 join cbo_t2 on v1_n18.c_int = cbo_t2.c_int; +select count(v1_n18.c_int) from v1_n18 join v2_n11 on v1_n18.c_int = v2_n11.c_int; -select count(*) from v1 a join v1 b on a.value = b.value; +select count(*) from v1_n18 a join v1_n18 b on a.value = b.value; set hive.cbo.returnpath.hiveop=false; -create view v3 as select v1.value val from v1 join cbo_t1 on v1.c_boolean = cbo_t1.c_boolean; +create view v3_n4 as select v1_n18.value val from v1_n18 join cbo_t1 on v1_n18.c_boolean = cbo_t1.c_boolean; set hive.cbo.returnpath.hiveop=true; -select count(val) from v3 where val != '1'; +select count(val) from v3_n4 where val != '1'; with q1 as ( select key from cbo_t1 where key = '1') select count(*) from q1; -with q1 as ( select value from v1 where c_boolean = false) +with q1 as ( select value from v1_n18 where c_boolean = false) select count(value) from q1 ; set hive.cbo.returnpath.hiveop=false; -create view v4 as +create view v4_n4 as with q1 as ( select key,c_int from cbo_t1 where key = '1') select * from q1 ; set hive.cbo.returnpath.hiveop=true; with q1 as ( select c_int from q2 where c_boolean = false), -q2 as ( select c_int,c_boolean from v1 where value = '1') +q2 as ( select c_int,c_boolean from v1_n18 where value = '1') select sum(c_int) from (select c_int from q1) a; with q1 as ( select cbo_t1.c_int c_int from q2 join cbo_t1 where q2.c_int = cbo_t1.c_int and cbo_t1.dt='2014'), -q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') -select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int; +q2 as ( select c_int,c_boolean from v1_n18 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4_n4 on q1.c_int = q2.c_int and v4_n4.c_int = q2.c_int; -drop view v1; -drop view v2; -drop view v3; -drop view v4; +drop view v1_n18; +drop view v2_n11; +drop view v3_n4; +drop view v4_n4; diff --git a/ql/src/test/queries/clientpositive/cbo_rp_windowing_2.q b/ql/src/test/queries/clientpositive/cbo_rp_windowing_2.q index 39787662a6..9a1afefea2 100644 --- a/ql/src/test/queries/clientpositive/cbo_rp_windowing_2.q +++ b/ql/src/test/queries/clientpositive/cbo_rp_windowing_2.q @@ -201,7 +201,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi set hive.cbo.returnpath.hiveop=false; -- 22. testViewAsTableInputWithWindowing -create view IF NOT EXISTS mfgr_price_view as +create view IF NOT EXISTS mfgr_price_view_n4 as select p_mfgr, p_brand, round(sum(p_retailprice),2) as s from part @@ -212,26 +212,26 @@ select * from ( select p_mfgr, p_brand, s, round(sum(s) over w1 , 2) as s1 -from mfgr_price_view +from mfgr_price_view_n4 window w1 as (distribute by p_mfgr sort by p_mfgr ) ) sq order by p_mfgr, p_brand; select p_mfgr, p_brand, s, round(sum(s) over w1 ,2) as s1 -from mfgr_price_view +from mfgr_price_view_n4 window w1 as (distribute by p_mfgr sort by p_brand rows between 2 preceding and current row); set hive.cbo.returnpath.hiveop=false; -- 23. testCreateViewWithWindowingQuery -create view IF NOT EXISTS mfgr_brand_price_view as +create view IF NOT EXISTS mfgr_brand_price_view_n1 as select p_mfgr, p_brand, round(sum(p_retailprice) over w1,2) as s from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and current row); set hive.cbo.returnpath.hiveop=true ; -select * from mfgr_brand_price_view; +select * from mfgr_brand_price_view_n1; -- 24. testLateralViews select p_mfgr, p_name, @@ -241,7 +241,7 @@ lateral view explode(arr) part_lv as lv_col window w1 as (distribute by p_mfgr sort by p_size, lv_col rows between 2 preceding and current row); -- 25. testMultipleInserts3SWQs -CREATE TABLE part_1( +CREATE TABLE part_1_n1( p_mfgr STRING, p_name STRING, p_size INT, @@ -249,7 +249,7 @@ r INT, dr INT, s DOUBLE); -CREATE TABLE part_2( +CREATE TABLE part_2_n1( p_mfgr STRING, p_name STRING, p_size INT, @@ -259,7 +259,7 @@ cud INT, s2 DOUBLE, fv1 INT); -CREATE TABLE part_3( +CREATE TABLE part_3_n1( p_mfgr STRING, p_name STRING, p_size INT, @@ -268,12 +268,12 @@ ca INT, fv INT); from part -INSERT OVERWRITE TABLE part_1 +INSERT OVERWRITE TABLE part_1_n1 select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name ) as r, dense_rank() over(distribute by p_mfgr sort by p_name ) as dr, round(sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row),2) as s -INSERT OVERWRITE TABLE part_2 +INSERT OVERWRITE TABLE part_2_n1 select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, @@ -281,18 +281,18 @@ cume_dist() over(distribute by p_mfgr sort by p_name) as cud, round(sum(p_size) over (distribute by p_mfgr sort by p_size range between 5 preceding and current row),1) as s2, first_value(p_size) over w1 as fv1 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following) -INSERT OVERWRITE TABLE part_3 +INSERT OVERWRITE TABLE part_3_n1 select p_mfgr,p_name, p_size, count(*) over(distribute by p_mfgr sort by p_name) as c, count(p_size) over(distribute by p_mfgr sort by p_name) as ca, first_value(p_size) over w1 as fv window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following); -select * from part_1; +select * from part_1_n1; -select * from part_2; +select * from part_2_n1; -select * from part_3; +select * from part_3_n1; -- 26. testGroupByHavingWithSWQAndAlias select p_mfgr, p_name, p_size, min(p_retailprice) as mi, diff --git a/ql/src/test/queries/clientpositive/cbo_subq_exists.q b/ql/src/test/queries/clientpositive/cbo_subq_exists.q index bc71840236..45f318493d 100644 --- a/ql/src/test/queries/clientpositive/cbo_subq_exists.q +++ b/ql/src/test/queries/clientpositive/cbo_subq_exists.q @@ -29,7 +29,7 @@ having not exists -- 19. SubQueries Exists -- view test -create view cv1 as +create view cv1_n2 as select * from src_cbo b where exists @@ -38,7 +38,7 @@ where exists where b.value = a.value and a.key = b.key and a.value > 'val_9') ; -select * from cv1 +select * from cv1_n2 ; -- sq in from diff --git a/ql/src/test/queries/clientpositive/cbo_views.q b/ql/src/test/queries/clientpositive/cbo_views.q index 650bad6ed3..fc7e5d2733 100644 --- a/ql/src/test/queries/clientpositive/cbo_views.q +++ b/ql/src/test/queries/clientpositive/cbo_views.q @@ -8,41 +8,41 @@ set hive.stats.fetch.column.stats=true; set hive.auto.convert.join=false; -- 10. Test views -create view v1 as select c_int, value, c_boolean, dt from cbo_t1; -create view v2 as select c_int, value from cbo_t2; +create view v1_n12 as select c_int, value, c_boolean, dt from cbo_t1; +create view v2_n6 as select c_int, value from cbo_t2; -select value from v1 where c_boolean=false; -select max(c_int) from v1 group by (c_boolean); +select value from v1_n12 where c_boolean=false; +select max(c_int) from v1_n12 group by (c_boolean); -select count(v1.c_int) from v1 join cbo_t2 on v1.c_int = cbo_t2.c_int; -select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int; +select count(v1_n12.c_int) from v1_n12 join cbo_t2 on v1_n12.c_int = cbo_t2.c_int; +select count(v1_n12.c_int) from v1_n12 join v2_n6 on v1_n12.c_int = v2_n6.c_int; -select count(*) from v1 a join v1 b on a.value = b.value; +select count(*) from v1_n12 a join v1_n12 b on a.value = b.value; -create view v3 as select v1.value val from v1 join cbo_t1 on v1.c_boolean = cbo_t1.c_boolean; +create view v3_n2 as select v1_n12.value val from v1_n12 join cbo_t1 on v1_n12.c_boolean = cbo_t1.c_boolean; -select count(val) from v3 where val != '1'; +select count(val) from v3_n2 where val != '1'; with q1 as ( select key from cbo_t1 where key = '1') select count(*) from q1; -with q1 as ( select value from v1 where c_boolean = false) +with q1 as ( select value from v1_n12 where c_boolean = false) select count(value) from q1 ; -create view v4 as +create view v4_n2 as with q1 as ( select key,c_int from cbo_t1 where key = '1') select * from q1 ; with q1 as ( select c_int from q2 where c_boolean = false), -q2 as ( select c_int,c_boolean from v1 where value = '1') +q2 as ( select c_int,c_boolean from v1_n12 where value = '1') select sum(c_int) from (select c_int from q1) a; with q1 as ( select cbo_t1.c_int c_int from q2 join cbo_t1 where q2.c_int = cbo_t1.c_int and cbo_t1.dt='2014'), -q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') -select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int; +q2 as ( select c_int,c_boolean from v1_n12 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4_n2 on q1.c_int = q2.c_int and v4_n2.c_int = q2.c_int; -drop view v1; -drop view v2; -drop view v3; -drop view v4; +drop view v1_n12; +drop view v2_n6; +drop view v3_n2; +drop view v4_n2; diff --git a/ql/src/test/queries/clientpositive/char_2.q b/ql/src/test/queries/clientpositive/char_2.q index e50aa1466f..ac743b32e0 100644 --- a/ql/src/test/queries/clientpositive/char_2.q +++ b/ql/src/test/queries/clientpositive/char_2.q @@ -1,12 +1,12 @@ --! qt:dataset:src -drop table char_2; +drop table char_2_n1; -create table char_2 ( +create table char_2_n1 ( key char(10), value char(20) ); -insert overwrite table char_2 select * from src; +insert overwrite table char_2_n1 select * from src; select value, sum(cast(key as int)), count(*) numrows from src @@ -16,7 +16,7 @@ limit 5; -- should match the query from src select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n1 group by value order by value asc limit 5; @@ -29,9 +29,9 @@ limit 5; -- should match the query from src select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n1 group by value order by value desc limit 5; -drop table char_2; +drop table char_2_n1; diff --git a/ql/src/test/queries/clientpositive/char_join1.q b/ql/src/test/queries/clientpositive/char_join1.q index b8699e367b..2502061375 100644 --- a/ql/src/test/queries/clientpositive/char_join1.q +++ b/ql/src/test/queries/clientpositive/char_join1.q @@ -2,7 +2,7 @@ drop table char_join1_ch1; drop table char_join1_ch2; -drop table char_join1_str; +drop table char_join1_str_n0; create table char_join1_ch1 ( c1 int, @@ -14,14 +14,14 @@ create table char_join1_ch2 ( c2 char(20) ); -create table char_join1_str ( +create table char_join1_str_n0 ( c1 int, c2 string ); load data local inpath '../../data/files/vc1.txt' into table char_join1_ch1; load data local inpath '../../data/files/vc1.txt' into table char_join1_ch2; -load data local inpath '../../data/files/vc1.txt' into table char_join1_str; +load data local inpath '../../data/files/vc1.txt' into table char_join1_str_n0; -- Join char with same length char select * from char_join1_ch1 a join char_join1_ch1 b on (a.c2 = b.c2); @@ -30,8 +30,8 @@ select * from char_join1_ch1 a join char_join1_ch1 b on (a.c2 = b.c2); select * from char_join1_ch1 a join char_join1_ch2 b on (a.c2 = b.c2); -- Join char with string -select * from char_join1_ch1 a join char_join1_str b on (a.c2 = b.c2); +select * from char_join1_ch1 a join char_join1_str_n0 b on (a.c2 = b.c2); drop table char_join1_ch1; drop table char_join1_ch2; -drop table char_join1_str; +drop table char_join1_str_n0; diff --git a/ql/src/test/queries/clientpositive/char_pad_convert.q b/ql/src/test/queries/clientpositive/char_pad_convert.q index a65132ba3c..b3e0a28318 100644 --- a/ql/src/test/queries/clientpositive/char_pad_convert.q +++ b/ql/src/test/queries/clientpositive/char_pad_convert.q @@ -1,5 +1,5 @@ -create table over1k( +create table over1k_n6( t tinyint, si smallint, i int, @@ -14,7 +14,7 @@ create table over1k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over1k' into table over1k; +load data local inpath '../../data/files/over1k' into table over1k_n6; -- Pass non-strings for the first and third arguments to test argument conversion @@ -22,23 +22,23 @@ load data local inpath '../../data/files/over1k' into table over1k; select lpad(t, 4, ' '), lpad(si, 2, ' '), lpad(i, 9, 'z'), - lpad(b, 2, 'a') from over1k limit 5; + lpad(b, 2, 'a') from over1k_n6 limit 5; select lpad("oh", 10, t), lpad("my", 6, si), lpad("other", 14, i), - lpad("one", 12, b) from over1k limit 5; + lpad("one", 12, b) from over1k_n6 limit 5; -- Integers select rpad(t, 4, ' '), rpad(si, 2, ' '), rpad(i, 9, 'z'), - rpad(b, 2, 'a') from over1k limit 5; + rpad(b, 2, 'a') from over1k_n6 limit 5; select rpad("oh", 10, t), rpad("my", 6, si), rpad("other", 14, i), - rpad("one", 12, b) from over1k limit 5; + rpad("one", 12, b) from over1k_n6 limit 5; -- More select lpad(f, 4, ' '), @@ -46,25 +46,25 @@ select lpad(f, 4, ' '), lpad(bo, 9, 'z'), lpad(ts, 2, 'a'), lpad(`dec`, 7, 'd'), - lpad(bin, 8, 'b') from over1k limit 5; + lpad(bin, 8, 'b') from over1k_n6 limit 5; select lpad("oh", 10, f), lpad("my", 6, d), lpad("other", 14, bo), lpad("one", 12, ts), lpad("two", 7, `dec`), - lpad("three", 8, bin) from over1k limit 5; + lpad("three", 8, bin) from over1k_n6 limit 5; select rpad(f, 4, ' '), rpad(d, 2, ' '), rpad(bo, 9, 'z'), rpad(ts, 2, 'a'), rpad(`dec`, 7, 'd'), - rpad(bin, 8, 'b') from over1k limit 5; + rpad(bin, 8, 'b') from over1k_n6 limit 5; select rpad("oh", 10, f), rpad("my", 6, d), rpad("other", 14, bo), rpad("one", 12, ts), rpad("two", 7, `dec`), - rpad("three", 8, bin) from over1k limit 5; + rpad("three", 8, bin) from over1k_n6 limit 5; diff --git a/ql/src/test/queries/clientpositive/check_constraint.q b/ql/src/test/queries/clientpositive/check_constraint.q index 5671b3e426..8c3831b44f 100644 --- a/ql/src/test/queries/clientpositive/check_constraint.q +++ b/ql/src/test/queries/clientpositive/check_constraint.q @@ -2,27 +2,27 @@ set hive.stats.autogather=false; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -CREATE TABLE table1(i int CHECK -i > -10, +CREATE TABLE table1_n0(i int CHECK -i > -10, j int CHECK +j > 10, ij boolean CHECK ij IS NOT NULL, a int CHECK a BETWEEN i AND j, bb float CHECK bb IN (23.4,56,4), d bigint CHECK d > round(567.6) AND d < round(1000.4)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES('transactional'='true'); -DESC FORMATTED table1; +DESC FORMATTED table1_n0; -EXPLAIN INSERT INTO table1 values(1,100,true, 5, 23.4, 700.5); -INSERT INTO table1 values(1,100,true, 5, 23.4, 700.5); -SELECT * from table1; -DROP TABLE table1; +EXPLAIN INSERT INTO table1_n0 values(1,100,true, 5, 23.4, 700.5); +INSERT INTO table1_n0 values(1,100,true, 5, 23.4, 700.5); +SELECT * from table1_n0; +DROP TABLE table1_n0; -- null check constraint -CREATE TABLE table2(i int CHECK i + NULL > 0); -DESC FORMATTED table2; -EXPLAIN INSERT INTO table2 values(8); -INSERT INTO table2 values(8); -select * from table2; -Drop table table2; +CREATE TABLE table2_n0(i int CHECK i + NULL > 0); +DESC FORMATTED table2_n0; +EXPLAIN INSERT INTO table2_n0 values(8); +INSERT INTO table2_n0 values(8); +select * from table2_n0; +Drop table table2_n0; -- UDF created by users CREATE FUNCTION test_udf2 AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestGetJavaString'; @@ -79,57 +79,57 @@ SELECT * from texpr; DROP TABLE texpr; -- UPDATE -create table acid_uami(i int, +create table acid_uami_n0(i int, de decimal(5,2) constraint nn1 not null enforced, vc varchar(128) constraint ch2 CHECK de >= cast(i as decimal(5,2)) enforced) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -DESC FORMATTED acid_uami; +DESC FORMATTED acid_uami_n0; --! qt:dataset:src -- insert as select -explain insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src; -insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src; +explain insert into table acid_uami_n0 select cast(key as int), cast (key as decimal(5,2)), value from src; +insert into table acid_uami_n0 select cast(key as int), cast (key as decimal(5,2)), value from src; -- insert overwrite -explain insert overwrite table acid_uami select cast(key as int), cast (key as decimal(5,2)), value +explain insert overwrite table acid_uami_n0 select cast(key as int), cast (key as decimal(5,2)), value from src order by cast(key as int) limit 10 ; -insert overwrite table acid_uami select cast(key as int), cast (key as decimal(5,2)), value +insert overwrite table acid_uami_n0 select cast(key as int), cast (key as decimal(5,2)), value from src order by cast(key as int) limit 10 ; -- insert as select cont -explain insert into table acid_uami select cast(s1.key as int) as c1, cast (s2.key as decimal(5,2)) as c2, s1.value from src s1 +explain insert into table acid_uami_n0 select cast(s1.key as int) as c1, cast (s2.key as decimal(5,2)) as c2, s1.value from src s1 left outer join src s2 on s1.key=s2.key where s1.value > 'val' limit 10 ; -insert into table acid_uami select cast(s1.key as int) as c1, cast (s2.key as decimal(5,2)) as c2, s1.value from src s1 +insert into table acid_uami_n0 select cast(s1.key as int) as c1, cast (s2.key as decimal(5,2)) as c2, s1.value from src s1 left outer join src s2 on s1.key=s2.key where s1.value > 'val' limit 10 ; -select * from acid_uami; -truncate table acid_uami; +select * from acid_uami_n0; +truncate table acid_uami_n0; -- insert as select group by + agg -explain insert into table acid_uami select min(cast(key as int)) as c1, max(cast (key as decimal(5,2))) as c2, value +explain insert into table acid_uami_n0 select min(cast(key as int)) as c1, max(cast (key as decimal(5,2))) as c2, value from src group by key, value order by key, value limit 10; -insert into table acid_uami select min(cast(key as int)) as c1, max(cast (key as decimal(5,2))) as c2, value +insert into table acid_uami_n0 select min(cast(key as int)) as c1, max(cast (key as decimal(5,2))) as c2, value from src group by key, value order by key, value limit 10; -select * from acid_uami; -truncate table acid_uami; +select * from acid_uami_n0; +truncate table acid_uami_n0; -- multi insert -create table src_multi2 (i STRING, j STRING NOT NULL ENABLE); +create table src_multi2_n0 (i STRING, j STRING NOT NULL ENABLE); explain from src -insert into table acid_uami select cast(key as int), cast(key as decimal(5,2)), value where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; -drop table src_multi2; +insert into table acid_uami_n0 select cast(key as int), cast(key as decimal(5,2)), value where key < 10 +insert overwrite table src_multi2_n0 select * where key > 10 and key < 20; +drop table src_multi2_n0; -- update -select * from acid_uami order by de desc limit 15; -explain update acid_uami set de = 893.14 where de = 103.00 or de = 119.00; -update acid_uami set de = 893.14 where de = 103.00 or de = 119.00; -select * from acid_uami order by de desc limit 15; -ALTER table acid_uami drop constraint ch2; -explain update acid_uami set vc = 'apache_hive' where de = 893.14 ; -update acid_uami set vc = 'apache_hive' where de = 893.14 ; -select * from acid_uami order by vc limit 15; -DROP TABLE acid_uami; +select * from acid_uami_n0 order by de desc limit 15; +explain update acid_uami_n0 set de = 893.14 where de = 103.00 or de = 119.00; +update acid_uami_n0 set de = 893.14 where de = 103.00 or de = 119.00; +select * from acid_uami_n0 order by de desc limit 15; +ALTER table acid_uami_n0 drop constraint ch2; +explain update acid_uami_n0 set vc = 'apache_hive' where de = 893.14 ; +update acid_uami_n0 set vc = 'apache_hive' where de = 893.14 ; +select * from acid_uami_n0 order by vc limit 15; +DROP TABLE acid_uami_n0; -- MERGE create table tmerge(key int CHECK key > 0 AND (key < 100 OR key = 5) enable, a1 string NOT NULL, value string) @@ -202,12 +202,12 @@ DESC FORMATTED trely; DROP TABLE trely; -- table level constraint -create table tbl1(a string, b int, CONSTRAINT check1 CHECK a != '' AND b > 4); -desc formatted tbl1; -explain insert into tbl1 values('a', 69); -insert into tbl1 values('a', 69); -select * from tbl1; -ALTER TABLE tbl1 add constraint chk2 CHECK (b < 100); -desc formatted tbl1; -explain insert into tbl1 values('a', 69); -drop table tbl1; +create table tbl1_n1(a string, b int, CONSTRAINT check1 CHECK a != '' AND b > 4); +desc formatted tbl1_n1; +explain insert into tbl1_n1 values('a', 69); +insert into tbl1_n1 values('a', 69); +select * from tbl1_n1; +ALTER TABLE tbl1_n1 add constraint chk2 CHECK (b < 100); +desc formatted tbl1_n1; +explain insert into tbl1_n1 values('a', 69); +drop table tbl1_n1; diff --git a/ql/src/test/queries/clientpositive/colstats_all_nulls.q b/ql/src/test/queries/clientpositive/colstats_all_nulls.q index ae6b0f51ed..22a9e7126c 100644 --- a/ql/src/test/queries/clientpositive/colstats_all_nulls.q +++ b/ql/src/test/queries/clientpositive/colstats_all_nulls.q @@ -1,11 +1,11 @@ -CREATE TABLE src_null(a bigint) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null; +CREATE TABLE src_null_n2(a bigint) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null_n2; -create table all_nulls as SELECT a, cast(a as double) as b, cast(a as decimal) as c FROM src_null where a is null limit 5; +create table all_nulls as SELECT a, cast(a as double) as b, cast(a as decimal) as c FROM src_null_n2 where a is null limit 5; analyze table all_nulls compute statistics for columns; describe formatted all_nulls a; describe formatted all_nulls b; drop table all_nulls; -drop table src_null; +drop table src_null_n2; diff --git a/ql/src/test/queries/clientpositive/columnStatsUpdateForStatsOptimizer_1.q b/ql/src/test/queries/clientpositive/columnStatsUpdateForStatsOptimizer_1.q index a11e7f0445..0827e91870 100644 --- a/ql/src/test/queries/clientpositive/columnStatsUpdateForStatsOptimizer_1.q +++ b/ql/src/test/queries/clientpositive/columnStatsUpdateForStatsOptimizer_1.q @@ -1,101 +1,101 @@ -set hive.stats.column.autogather=false; -set hive.stats.fetch.column.stats=true; -set hive.compute.query.using.stats=true; -set hive.mapred.mode=nonstrict; +set_n31 hive.stats.column.autogather=false; +set_n31 hive.stats.fetch.column.stats=true; +set_n31 hive.compute.query.using.stats=true; +set_n31 hive.mapred.mode=nonstrict_n31; -drop table calendar; +drop table calendar_n0; -CREATE TABLE calendar (year int, month int); +CREATE TABLE calendar_n0 (year int_n31, month int_n31); -insert into calendar values (2010, 10), (2011, 11), (2012, 12); +insert_n31 into calendar_n0 values (2010, 10), (2011, 11), (2012, 12); -desc formatted calendar; +desc formatted calendar_n0; -analyze table calendar compute statistics; +analyze table calendar_n0 compute statistics; -desc formatted calendar; +desc formatted calendar_n0; -explain select count(1) from calendar; +explain select_n31 count_n31(1) from calendar_n0; -explain select max(year) from calendar; +explain select_n31 max(year) from calendar_n0; -select max(year) from calendar; +select_n31 max(year) from calendar_n0; -select max(month) from calendar; +select_n31 max(month) from calendar_n0; -analyze table calendar compute statistics for columns; +analyze table calendar_n0 compute statistics for columns; -desc formatted calendar; +desc formatted calendar_n0; -explain select max(year) from calendar; +explain select_n31 max(year) from calendar_n0; -select max(year) from calendar; +select_n31 max(year) from calendar_n0; -insert into calendar values (2015, 15); +insert_n31 into calendar_n0 values (2015, 15); -desc formatted calendar; +desc formatted calendar_n0; -explain select max(year) from calendar; +explain select_n31 max(year) from calendar_n0; -select max(year) from calendar; +select_n31 max(year) from calendar_n0; -explain select max(month) from calendar; +explain select_n31 max(month) from calendar_n0; -select max(month) from calendar; +select_n31 max(month) from calendar_n0; -analyze table calendar compute statistics for columns year; +analyze table calendar_n0 compute statistics for columns year; -desc formatted calendar; +desc formatted calendar_n0; -explain select max(year) from calendar; +explain select_n31 max(year) from calendar_n0; -select max(year) from calendar; +select_n31 max(year) from calendar_n0; -explain select max(month) from calendar; +explain select_n31 max(month) from calendar_n0; -select max(month) from calendar; +select_n31 max(month) from calendar_n0; -analyze table calendar compute statistics for columns month; +analyze table calendar_n0 compute statistics for columns month; -desc formatted calendar; +desc formatted calendar_n0; -explain select max(month) from calendar; +explain select_n31 max(month) from calendar_n0; -select max(month) from calendar; +select_n31 max(month) from calendar_n0; -CREATE TABLE calendarp (`year` int) partitioned by (p int); +CREATE TABLE calendarp (`year` int_n31) partitioned by (p int_n31); -insert into table calendarp partition (p=1) values (2010), (2011), (2012); +insert_n31 into table calendarp partition (p=1) values (2010), (2011), (2012); desc formatted calendarp partition (p=1); -explain select max(year) from calendarp where p=1; +explain select_n31 max(year) from calendarp where p=1; -select max(year) from calendarp where p=1; +select_n31 max(year) from calendarp where p=1; analyze table calendarp partition (p=1) compute statistics for columns; desc formatted calendarp partition (p=1); -explain select max(year) from calendarp where p=1; +explain select_n31 max(year) from calendarp where p=1; -insert into table calendarp partition (p=1) values (2015); +insert_n31 into table calendarp partition (p=1) values (2015); desc formatted calendarp partition (p=1); -explain select max(year) from calendarp where p=1; +explain select_n31 max(year) from calendarp where p=1; -select max(year) from calendarp where p=1; +select_n31 max(year) from calendarp where p=1; -create table t (key string, value string); +create table t_n31 (key string, value string); -load data local inpath '../../data/files/kv1.txt' into table t; +load data local inpath '../../data/files/kv1.txt_n31' into table t_n31; -desc formatted t; +desc formatted t_n31; -analyze table t compute statistics; +analyze table t_n31 compute statistics; -desc formatted t; +desc formatted t_n31; diff --git a/ql/src/test/queries/clientpositive/column_access_stats.q b/ql/src/test/queries/clientpositive/column_access_stats.q index 8a7f4761ba..78a3e38ba7 100644 --- a/ql/src/test/queries/clientpositive/column_access_stats.q +++ b/ql/src/test/queries/clientpositive/column_access_stats.q @@ -6,114 +6,114 @@ SET hive.stats.collect.scancols=true; -- SORT_QUERY_RESULTS -- This test is used for testing the ColumnAccessAnalyzer -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +CREATE TABLE T1_n127(key STRING, val STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n127; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T4(key STRING, val STRING) PARTITIONED BY (p STRING); +CREATE TABLE T2_n75(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T3_n29(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T4_n16(key STRING, val STRING) PARTITIONED BY (p STRING); -- Simple select queries -SELECT key FROM T1; -SELECT key, val FROM T1; -SELECT 1 FROM T1; -SELECT key, val from T4 where p=1; -SELECT val FROM T4 where p=1; -SELECT p, val FROM T4 where p=1; +SELECT key FROM T1_n127; +SELECT key, val FROM T1_n127; +SELECT 1 FROM T1_n127; +SELECT key, val from T4_n16 where p=1; +SELECT val FROM T4_n16 where p=1; +SELECT p, val FROM T4_n16 where p=1; -- More complicated select queries -EXPLAIN SELECT key FROM (SELECT key, val FROM T1) subq1; -SELECT key FROM (SELECT key, val FROM T1) subq1; -EXPLAIN SELECT k FROM (SELECT key as k, val as v FROM T1) subq1; -SELECT k FROM (SELECT key as k, val as v FROM T1) subq1; -SELECT key + 1 as k FROM T1; -SELECT key + val as k FROM T1; +EXPLAIN SELECT key FROM (SELECT key, val FROM T1_n127) subq1; +SELECT key FROM (SELECT key, val FROM T1_n127) subq1; +EXPLAIN SELECT k FROM (SELECT key as k, val as v FROM T1_n127) subq1; +SELECT k FROM (SELECT key as k, val as v FROM T1_n127) subq1; +SELECT key + 1 as k FROM T1_n127; +SELECT key + val as k FROM T1_n127; -- Work with union EXPLAIN SELECT * FROM ( -SELECT key as c FROM T1 +SELECT key as c FROM T1_n127 UNION ALL -SELECT val as c FROM T1 +SELECT val as c FROM T1_n127 ) subq1; SELECT * FROM ( -SELECT key as c FROM T1 +SELECT key as c FROM T1_n127 UNION ALL -SELECT val as c FROM T1 +SELECT val as c FROM T1_n127 ) subq1; EXPLAIN SELECT * FROM ( -SELECT key as c FROM T1 +SELECT key as c FROM T1_n127 UNION ALL -SELECT key as c FROM T1 +SELECT key as c FROM T1_n127 ) subq1; SELECT * FROM ( -SELECT key as c FROM T1 +SELECT key as c FROM T1_n127 UNION ALL -SELECT key as c FROM T1 +SELECT key as c FROM T1_n127 ) subq1; -- Work with insert overwrite -FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE T3 SELECT key, sum(val) GROUP BY key; +FROM T1_n127 +INSERT OVERWRITE TABLE T2_n75 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE T3_n29 SELECT key, sum(val) GROUP BY key; -- Simple joins SELECT * -FROM T1 JOIN T2 -ON T1.key = T2.key ; +FROM T1_n127 JOIN T2_n75 +ON T1_n127.key = T2_n75.key ; EXPLAIN -SELECT T1.key -FROM T1 JOIN T2 -ON T1.key = T2.key; +SELECT T1_n127.key +FROM T1_n127 JOIN T2_n75 +ON T1_n127.key = T2_n75.key; -SELECT T1.key -FROM T1 JOIN T2 -ON T1.key = T2.key; +SELECT T1_n127.key +FROM T1_n127 JOIN T2_n75 +ON T1_n127.key = T2_n75.key; SELECT * -FROM T1 JOIN T2 -ON T1.key = T2.key AND T1.val = T2.val; +FROM T1_n127 JOIN T2_n75 +ON T1_n127.key = T2_n75.key AND T1_n127.val = T2_n75.val; -- Map join SELECT /*+ MAPJOIN(a) */ * -FROM T1 a JOIN T2 b +FROM T1_n127 a JOIN T2_n75 b ON a.key = b.key; -- More joins EXPLAIN SELECT * -FROM T1 JOIN T2 -ON T1.key = T2.key AND T1.val = 3 and T2.val = 3; +FROM T1_n127 JOIN T2_n75 +ON T1_n127.key = T2_n75.key AND T1_n127.val = 3 and T2_n75.val = 3; SELECT * -FROM T1 JOIN T2 -ON T1.key = T2.key AND T1.val = 3 and T2.val = 3; +FROM T1_n127 JOIN T2_n75 +ON T1_n127.key = T2_n75.key AND T1_n127.val = 3 and T2_n75.val = 3; EXPLAIN SELECT subq1.val FROM ( - SELECT val FROM T1 WHERE key = 5 + SELECT val FROM T1_n127 WHERE key = 5 ) subq1 JOIN ( - SELECT val FROM T2 WHERE key = 6 + SELECT val FROM T2_n75 WHERE key = 6 ) subq2 ON subq1.val = subq2.val; SELECT subq1.val FROM ( - SELECT val FROM T1 WHERE key = 5 + SELECT val FROM T1_n127 WHERE key = 5 ) subq1 JOIN ( - SELECT val FROM T2 WHERE key = 6 + SELECT val FROM T2_n75 WHERE key = 6 ) subq2 ON subq1.val = subq2.val; @@ -125,16 +125,16 @@ FROM SELECT subq1.key as key FROM ( - SELECT key, val FROM T1 + SELECT key, val FROM T1_n127 ) subq1 JOIN ( - SELECT key, 'teststring' as val FROM T2 + SELECT key, 'teststring' as val FROM T2_n75 ) subq2 ON subq1.key = subq2.key -) T4 -JOIN T3 -ON T3.key = T4.key; +) T4_n16 +JOIN T3_n29 +ON T3_n29.key = T4_n16.key; SELECT * FROM @@ -142,16 +142,16 @@ FROM SELECT subq1.key as key FROM ( - SELECT key, val FROM T1 + SELECT key, val FROM T1_n127 ) subq1 JOIN ( - SELECT key, 'teststring' as val FROM T2 + SELECT key, 'teststring' as val FROM T2_n75 ) subq2 ON subq1.key = subq2.key -) T4 -JOIN T3 -ON T3.key = T4.key; +) T4_n16 +JOIN T3_n29 +ON T3_n29.key = T4_n16.key; -- for partitioned table SELECT * FROM srcpart TABLESAMPLE (10 ROWS); diff --git a/ql/src/test/queries/clientpositive/column_pruner_multiple_children.q b/ql/src/test/queries/clientpositive/column_pruner_multiple_children.q index cb605a87f8..9001dd29fe 100644 --- a/ql/src/test/queries/clientpositive/column_pruner_multiple_children.q +++ b/ql/src/test/queries/clientpositive/column_pruner_multiple_children.q @@ -2,19 +2,19 @@ set hive.map.aggr=false; set hive.stats.column.autogather=true; -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n52(key INT, value STRING) STORED AS TEXTFILE; -create table s as select * from src where key='10'; +create table s_n129 as select * from src where key='10'; -explain FROM S -INSERT OVERWRITE TABLE DEST1 SELECT key, sum(SUBSTR(value,5)) GROUP BY key +explain FROM S_n129 +INSERT OVERWRITE TABLE DEST1_n52 SELECT key, sum(SUBSTR(value,5)) GROUP BY key ; -FROM S -INSERT OVERWRITE TABLE DEST1 SELECT key, sum(SUBSTR(value,5)) GROUP BY key +FROM S_n129 +INSERT OVERWRITE TABLE DEST1_n52 SELECT key, sum(SUBSTR(value,5)) GROUP BY key ; -desc formatted DEST1; +desc formatted DEST1_n52; -desc formatted DEST1 key; -desc formatted DEST1 value; +desc formatted DEST1_n52 key; +desc formatted DEST1_n52 value; diff --git a/ql/src/test/queries/clientpositive/column_table_stats_orc.q b/ql/src/test/queries/clientpositive/column_table_stats_orc.q index 51fccd20c5..31e8f4625a 100644 --- a/ql/src/test/queries/clientpositive/column_table_stats_orc.q +++ b/ql/src/test/queries/clientpositive/column_table_stats_orc.q @@ -1,57 +1,57 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -DROP TABLE IF EXISTS s; +DROP TABLE IF EXISTS s_n0; -CREATE TABLE s (key STRING COMMENT 'default', value STRING COMMENT 'default') STORED AS ORC; +CREATE TABLE s_n0 (key STRING COMMENT 'default', value STRING COMMENT 'default') STORED AS ORC; -insert into table s values ('1','2'); +insert into table s_n0 values_n0 ('1','2'); -desc formatted s; +desc formatted s_n0; -explain extended analyze table s compute statistics for columns; +explain extended analyze table s_n0 compute statistics_n0 for columns_n0; -analyze table s compute statistics for columns; +analyze table s_n0 compute statistics_n0 for columns_n0; -desc formatted s; +desc formatted s_n0; -DROP TABLE IF EXISTS spart; +DROP TABLE IF EXISTS spart_n0; -CREATE TABLE spart (key STRING COMMENT 'default', value STRING COMMENT 'default') -PARTITIONED BY (ds STRING, hr STRING) +CREATE TABLE spart_n0 (key STRING COMMENT 'default', value STRING COMMENT 'default') +PARTITIONED BY (ds_n0 STRING, hr STRING) STORED AS ORC; -insert into table spart PARTITION (ds="2008-04-08", hr="12") values ('1','2'); -insert into table spart PARTITION (ds="2008-04-08", hr="11") values ('1','2'); +insert into table spart_n0 PARTITION (ds_n0="2008-04-08", hr="12") values_n0 ('1','2'); +insert into table spart_n0 PARTITION (ds_n0="2008-04-08", hr="11") values_n0 ('1','2'); -desc formatted spart; +desc formatted spart_n0; -explain extended analyze table spart compute statistics for columns; +explain extended analyze table spart_n0 compute statistics_n0 for columns_n0; -analyze table spart compute statistics for columns; +analyze table spart_n0 compute statistics_n0 for columns_n0; -desc formatted spart; +desc formatted spart_n0; -desc formatted spart PARTITION(ds='2008-04-08', hr=11); -desc formatted spart PARTITION(ds='2008-04-08', hr=12); +desc formatted spart_n0 PARTITION(ds_n0='2008-04-08', hr=11); +desc formatted spart_n0 PARTITION(ds_n0='2008-04-08', hr=12); -DROP TABLE IF EXISTS spart; +DROP TABLE IF EXISTS spart_n0; -CREATE TABLE spart (key STRING COMMENT 'default', value STRING COMMENT 'default') -PARTITIONED BY (ds STRING, hr STRING) +CREATE TABLE spart_n0 (key STRING COMMENT 'default', value STRING COMMENT 'default') +PARTITIONED BY (ds_n0 STRING, hr STRING) STORED AS ORC; -insert into table spart PARTITION (ds="2008-04-08", hr="12") values ('1','2'); -insert into table spart PARTITION (ds="2008-04-08", hr="11") values ('1','2'); +insert into table spart_n0 PARTITION (ds_n0="2008-04-08", hr="12") values_n0 ('1','2'); +insert into table spart_n0 PARTITION (ds_n0="2008-04-08", hr="11") values_n0 ('1','2'); -desc formatted spart; +desc formatted spart_n0; -explain extended analyze table spart partition(hr="11") compute statistics for columns; +explain extended analyze table spart_n0 partition(hr="11") compute statistics_n0 for columns_n0; -analyze table spart partition(hr="11") compute statistics for columns; +analyze table spart_n0 partition(hr="11") compute statistics_n0 for columns_n0; -desc formatted spart; +desc formatted spart_n0; -desc formatted spart PARTITION(ds='2008-04-08', hr=11); -desc formatted spart PARTITION(ds='2008-04-08', hr=12); +desc formatted spart_n0 PARTITION(ds_n0='2008-04-08', hr=11); +desc formatted spart_n0 PARTITION(ds_n0='2008-04-08', hr=12); diff --git a/ql/src/test/queries/clientpositive/columnstats_infinity.q b/ql/src/test/queries/clientpositive/columnstats_infinity.q index c99a1cb7be..b214fa9849 100644 --- a/ql/src/test/queries/clientpositive/columnstats_infinity.q +++ b/ql/src/test/queries/clientpositive/columnstats_infinity.q @@ -1,44 +1,44 @@ set hive.stats.column.autogather=false; -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n45(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n45; -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n13(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n13 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n45; -desc formatted table_change_numeric_group_string_group_floating_string_group; +desc formatted table_change_numeric_group_string_group_floating_string_group_n13; -analyze table table_change_numeric_group_string_group_floating_string_group compute statistics for columns; +analyze table table_change_numeric_group_string_group_floating_string_group_n13 compute statistics for columns; -desc formatted table_change_numeric_group_string_group_floating_string_group; +desc formatted table_change_numeric_group_string_group_floating_string_group_n13; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n13; set hive.stats.column.autogather=true; -drop table table_change_numeric_group_string_group_floating_string_group; +drop table table_change_numeric_group_string_group_floating_string_group_n13; -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n13(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n13 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n45; -desc formatted table_change_numeric_group_string_group_floating_string_group; +desc formatted table_change_numeric_group_string_group_floating_string_group_n13; diff --git a/ql/src/test/queries/clientpositive/columnstats_partlvl_dp.q b/ql/src/test/queries/clientpositive/columnstats_partlvl_dp.q index c065edd3dc..d47e20c1ce 100644 --- a/ql/src/test/queries/clientpositive/columnstats_partlvl_dp.q +++ b/ql/src/test/queries/clientpositive/columnstats_partlvl_dp.q @@ -1,72 +1,72 @@ set hive.mapred.mode=nonstrict; -DROP TABLE Employee_Part; +DROP TABLE Employee_Part_n0; -CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string) +CREATE TABLE Employee_Part_n0(employeeID int, employeeName String) partitioned by (employeeSalary double, country string) row format delimited fields terminated by '|' stored as textfile; -LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA'); -LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK'); -LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA'); -LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA'); -LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK'); -LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK'); +LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='2000.0', country='USA'); +LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='2000.0', country='UK'); +LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='3000.0', country='USA'); +LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='4000.0', country='USA'); +LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='3500.0', country='UK'); +LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='3000.0', country='UK'); -- dynamic partitioning syntax explain -analyze table Employee_Part partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID; -analyze table Employee_Part partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID; +analyze table Employee_Part_n0 partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID; +analyze table Employee_Part_n0 partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID; -describe formatted Employee_Part partition (employeeSalary='4000.0', country='USA'); +describe formatted Employee_Part_n0 partition (employeeSalary='4000.0', country='USA'); -describe formatted Employee_Part partition (employeeSalary='4000.0', country='USA') employeeName; +describe formatted Employee_Part_n0 partition (employeeSalary='4000.0', country='USA') employeeName; -- don't specify all partitioning keys explain -analyze table Employee_Part partition (employeeSalary='2000.0') compute statistics for columns employeeID; -analyze table Employee_Part partition (employeeSalary='2000.0') compute statistics for columns employeeID; +analyze table Employee_Part_n0 partition (employeeSalary='2000.0') compute statistics for columns employeeID; +analyze table Employee_Part_n0 partition (employeeSalary='2000.0') compute statistics for columns employeeID; -describe formatted Employee_Part partition (employeeSalary='2000.0', country='USA') employeeID; -describe formatted Employee_Part partition (employeeSalary='2000.0', country='UK') employeeID; +describe formatted Employee_Part_n0 partition (employeeSalary='2000.0', country='USA') employeeID; +describe formatted Employee_Part_n0 partition (employeeSalary='2000.0', country='UK') employeeID; -- don't specify any partitioning keys explain -analyze table Employee_Part partition (employeeSalary) compute statistics for columns employeeID; -analyze table Employee_Part partition (employeeSalary) compute statistics for columns employeeID; +analyze table Employee_Part_n0 partition (employeeSalary) compute statistics for columns employeeID; +analyze table Employee_Part_n0 partition (employeeSalary) compute statistics for columns employeeID; -describe formatted Employee_Part partition (employeeSalary='3000.0', country='UK') employeeID; +describe formatted Employee_Part_n0 partition (employeeSalary='3000.0', country='UK') employeeID; explain -analyze table Employee_Part partition (employeeSalary,country) compute statistics for columns; -analyze table Employee_Part partition (employeeSalary,country) compute statistics for columns; +analyze table Employee_Part_n0 partition (employeeSalary,country) compute statistics for columns; +analyze table Employee_Part_n0 partition (employeeSalary,country) compute statistics for columns; -describe formatted Employee_Part partition (employeeSalary='3500.0', country='UK') employeeName; +describe formatted Employee_Part_n0 partition (employeeSalary='3500.0', country='UK') employeeName; -- partially populated stats -drop table Employee; -CREATE TABLE Employee(employeeID int, employeeName String) partitioned by (employeeSalary double, country string) +drop table Employee_n0; +CREATE TABLE Employee_n0(employeeID int, employeeName String) partitioned by (employeeSalary double, country string) row format delimited fields terminated by '|' stored as textfile; -LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee partition(employeeSalary='2000.0', country='USA'); -LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee partition(employeeSalary='2000.0', country='UK'); -LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee partition(employeeSalary='3500.0', country='UK'); -LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee partition(employeeSalary='3000.0', country='UK'); +LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_n0 partition(employeeSalary='2000.0', country='USA'); +LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_n0 partition(employeeSalary='2000.0', country='UK'); +LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_n0 partition(employeeSalary='3500.0', country='UK'); +LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_n0 partition(employeeSalary='3000.0', country='UK'); -analyze table Employee partition (employeeSalary,country) compute statistics for columns; +analyze table Employee_n0 partition (employeeSalary,country) compute statistics for columns; -describe formatted Employee partition (employeeSalary='3500.0', country='UK') employeeName; -LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee partition(employeeSalary='3000.0', country='USA'); -LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee partition(employeeSalary='4000.0', country='USA'); +describe formatted Employee_n0 partition (employeeSalary='3500.0', country='UK') employeeName; +LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_n0 partition(employeeSalary='3000.0', country='USA'); +LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_n0 partition(employeeSalary='4000.0', country='USA'); -analyze table Employee partition (employeeSalary) compute statistics for columns; +analyze table Employee_n0 partition (employeeSalary) compute statistics for columns; -describe formatted Employee partition (employeeSalary='3000.0', country='USA') employeeName; +describe formatted Employee_n0 partition (employeeSalary='3000.0', country='USA') employeeName; -- add columns -alter table Employee add columns (c int ,d string); +alter table Employee_n0 add columns (c int ,d string); -LOAD DATA LOCAL INPATH "../../data/files/employee_part.txt" INTO TABLE Employee partition(employeeSalary='6000.0', country='UK'); +LOAD DATA LOCAL INPATH "../../data/files/employee_part.txt" INTO TABLE Employee_n0 partition(employeeSalary='6000.0', country='UK'); -analyze table Employee partition (employeeSalary='6000.0',country='UK') compute statistics for columns; +analyze table Employee_n0 partition (employeeSalary='6000.0',country='UK') compute statistics for columns; -describe formatted Employee partition (employeeSalary='6000.0', country='UK') employeeName; -describe formatted Employee partition (employeeSalary='6000.0', country='UK') c; -describe formatted Employee partition (employeeSalary='6000.0', country='UK') d; +describe formatted Employee_n0 partition (employeeSalary='6000.0', country='UK') employeeName; +describe formatted Employee_n0 partition (employeeSalary='6000.0', country='UK') c; +describe formatted Employee_n0 partition (employeeSalary='6000.0', country='UK') d; diff --git a/ql/src/test/queries/clientpositive/combine2.q b/ql/src/test/queries/clientpositive/combine2.q index 313d03c866..5b19bc00fb 100644 --- a/ql/src/test/queries/clientpositive/combine2.q +++ b/ql/src/test/queries/clientpositive/combine2.q @@ -15,7 +15,7 @@ set hive.merge.smallfiles.avgsize=0; -- SORT_QUERY_RESULTS -create table combine2(key string) partitioned by (value string); +create table combine2_n0(key string) partitioned by (value string); -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) -- This test sets mapred.max.split.size=256 and hive.merge.smallfiles.avgsize=0 @@ -25,7 +25,7 @@ create table combine2(key string) partitioned by (value string); -- significant impact on the results results of this test. -- This issue was fixed in MAPREDUCE-2046 which is included in 0.22. -insert overwrite table combine2 partition(value) +insert overwrite table combine2_n0 partition(value) select * from ( select key, value from src where key < 10 union all @@ -33,17 +33,17 @@ select * from ( union all select key, '2010-04-21 09:45:00' value from src where key = 19) s; -show partitions combine2; +show partitions combine2_n0; explain -select key, value from combine2 where value is not null; +select key, value from combine2_n0 where value is not null; -select key, value from combine2 where value is not null; +select key, value from combine2_n0 where value is not null; explain extended -select count(1) from combine2 where value is not null; +select count(1) from combine2_n0 where value is not null; -select count(1) from combine2 where value is not null; +select count(1) from combine2_n0 where value is not null; explain select ds, count(1) from srcpart where ds is not null group by ds; diff --git a/ql/src/test/queries/clientpositive/comma_in_column_name.q b/ql/src/test/queries/clientpositive/comma_in_column_name.q index cb8823e366..be2b2525cc 100644 --- a/ql/src/test/queries/clientpositive/comma_in_column_name.q +++ b/ql/src/test/queries/clientpositive/comma_in_column_name.q @@ -1,14 +1,14 @@ -create table test (`x,y` int); +create table test_n4 (`x,y` int); -insert into test values (1),(2); +insert into test_n4 values (1),(2); -select `x,y` from test where `x,y` >=2 ; +select `x,y` from test_n4 where `x,y` >=2 ; -drop table test; +drop table test_n4; -create table test (`x,y` int) stored as orc; +create table test_n4 (`x,y` int) stored as orc; -insert into test values (1),(2); +insert into test_n4 values (1),(2); -select `x,y` from test where `x,y` <2 ; +select `x,y` from test_n4 where `x,y` <2 ; diff --git a/ql/src/test/queries/clientpositive/constGby.q b/ql/src/test/queries/clientpositive/constGby.q index bde2e79ad0..d27554ee3a 100644 --- a/ql/src/test/queries/clientpositive/constGby.q +++ b/ql/src/test/queries/clientpositive/constGby.q @@ -1,21 +1,21 @@ set hive.mapred.mode=nonstrict; -create table t1 (a int); -analyze table t1 compute statistics; -analyze table t1 compute statistics for columns; +create table t1_n36 (a int); +analyze table t1_n36 compute statistics; +analyze table t1_n36 compute statistics for columns; -explain select count(1) from t1 group by 1; -select count(1) from t1 group by 1; -select count(1) from t1; -explain select count(*) from t1; -select count(*) from t1; -select count(1) from t1 group by 1=1; -select count(1), max(a) from t1 group by 1=1; +explain select count(1) from t1_n36 group by 1; +select count(1) from t1_n36 group by 1; +select count(1) from t1_n36; +explain select count(*) from t1_n36; +select count(*) from t1_n36; +select count(1) from t1_n36 group by 1=1; +select count(1), max(a) from t1_n36 group by 1=1; set hive.compute.query.using.stats=false; -select count(1) from t1 group by 1; -select count(1) from t1; -select count(*) from t1; -select count(1) from t1 group by 1=1; -select count(1), max(a) from t1 group by 1=1; +select count(1) from t1_n36 group by 1; +select count(1) from t1_n36; +select count(*) from t1_n36; +select count(1) from t1_n36 group by 1=1; +select count(1), max(a) from t1_n36 group by 1=1; diff --git a/ql/src/test/queries/clientpositive/constantPropWhen.q b/ql/src/test/queries/clientpositive/constantPropWhen.q index 03bfd54030..1889664ded 100644 --- a/ql/src/test/queries/clientpositive/constantPropWhen.q +++ b/ql/src/test/queries/clientpositive/constantPropWhen.q @@ -1,45 +1,45 @@ set hive.mapred.mode=nonstrict; set hive.optimize.constant.propagation=false; -drop table test_1; +drop table test_1_n4; -create table test_1 (id int, id2 int); +create table test_1_n4 (id int, id2 int); -insert into table test_1 values (123, NULL), (NULL, NULL), (NULL, 123), (123, 123); +insert into table test_1_n4 values (123, NULL), (NULL, NULL), (NULL, 123), (123, 123); -explain SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1; +explain SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4; -SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1; +SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4; -explain SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1; +explain SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4; -SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1; +SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4; -explain SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1; +explain SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4; -SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1; +SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4; -explain SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1; +explain SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4; -SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1; +SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4; set hive.cbo.enable=false; set hive.optimize.constant.propagation=true; -explain SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1; +explain SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4; -SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1; +SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4; -explain SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1; +explain SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4; -SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1; +SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4; -explain SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1; +explain SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4; -SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1; +SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4; -explain SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1; +explain SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4; -SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1; +SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4; diff --git a/ql/src/test/queries/clientpositive/constant_prop_1.q b/ql/src/test/queries/clientpositive/constant_prop_1.q index 3d3016fdf0..683b7302aa 100644 --- a/ql/src/test/queries/clientpositive/constant_prop_1.q +++ b/ql/src/test/queries/clientpositive/constant_prop_1.q @@ -36,7 +36,7 @@ select a.key, b.value from src a join src b where a.key = '238' and b.value = '2 explain select a.key, b.value from src a join src b on a.key=b.key where b.value = '234'; -create table t ( +create table t_n26 ( a int, b int, c int, @@ -47,7 +47,7 @@ e int explain select a2 as a3 from (select a1 as a2, c1 as c2 from -(select a as a1, b as b1, c as c1 from t where a=1 and b=2 and c=3)sub1)sub2; +(select a as a1, b as b1, c as c1 from t_n26 where a=1 and b=2 and c=3)sub1)sub2; diff --git a/ql/src/test/queries/clientpositive/constantfolding.q b/ql/src/test/queries/clientpositive/constantfolding.q index 210d35ebea..d5e491770e 100644 --- a/ql/src/test/queries/clientpositive/constantfolding.q +++ b/ql/src/test/queries/clientpositive/constantfolding.q @@ -11,23 +11,23 @@ union all select * from (select 'k4' as key, ' ' as value from src limit 2)c; -drop table if exists union_all_bug_test_1; -drop table if exists union_all_bug_test_2; -create table if not exists union_all_bug_test_1 +drop table if exists union_all_bug_test_1_n0; +drop table if exists union_all_bug_test_2_n0; +create table if not exists union_all_bug_test_1_n0 ( f1 int, f2 int ); -create table if not exists union_all_bug_test_2 +create table if not exists union_all_bug_test_2_n0 ( f1 int ); -insert into table union_all_bug_test_1 values (1,1); -insert into table union_all_bug_test_2 values (1); -insert into table union_all_bug_test_1 values (0,0); -insert into table union_all_bug_test_2 values (0); +insert into table union_all_bug_test_1_n0 values (1,1); +insert into table union_all_bug_test_2_n0 values (1); +insert into table union_all_bug_test_1_n0 values (0,0); +insert into table union_all_bug_test_2_n0 values (0); @@ -37,14 +37,14 @@ FROM ( SELECT f1 , if('helloworld' like '%hello%' ,f1,f2) as filter -FROM union_all_bug_test_1 +FROM union_all_bug_test_1_n0 union all select f1 , 0 as filter -from union_all_bug_test_2 +from union_all_bug_test_2_n0 ) A WHERE (filter = 1 and f1 = 1); @@ -65,9 +65,9 @@ explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from src; -- numRows: 2 rawDataSize: 112 explain select cast("1970-12-31 15:59:58.174" as DATE) from src; -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n127(c1 STRING) STORED AS TEXTFILE; -FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86; +FROM src INSERT OVERWRITE TABLE dest1_n127 SELECT ' abc ' WHERE src.key = 86; EXPLAIN SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), @@ -77,7 +77,7 @@ SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), - POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1; + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1_n127; SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), @@ -86,4 +86,4 @@ SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), - POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1; + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1_n127; diff --git a/ql/src/test/queries/clientpositive/constprog_dp.q b/ql/src/test/queries/clientpositive/constprog_dp.q index d7fcb5e4b1..1c36df3dfe 100644 --- a/ql/src/test/queries/clientpositive/constprog_dp.q +++ b/ql/src/test/queries/clientpositive/constprog_dp.q @@ -2,11 +2,11 @@ set hive.optimize.constant.propagation=true; set hive.exec.dynamic.partition.mode=nonstrict; -create table dest(key string, value string) partitioned by (ds string); +create table dest_n1(key string, value string) partitioned by (ds string); EXPLAIN from srcpart -insert overwrite table dest partition (ds) select key, value, ds where ds='2008-04-08'; +insert overwrite table dest_n1 partition (ds) select key, value, ds where ds='2008-04-08'; from srcpart -insert overwrite table dest partition (ds) select key, value, ds where ds='2008-04-08'; +insert overwrite table dest_n1 partition (ds) select key, value, ds where ds='2008-04-08'; diff --git a/ql/src/test/queries/clientpositive/constprog_semijoin.q b/ql/src/test/queries/clientpositive/constprog_semijoin.q index a5546ecf2a..eb92713bb5 100644 --- a/ql/src/test/queries/clientpositive/constprog_semijoin.q +++ b/ql/src/test/queries/clientpositive/constprog_semijoin.q @@ -3,35 +3,35 @@ set hive.explain.user=true; -- SORT_QUERY_RESULTS -create table table1 (id int, val string, val1 string, dimid int); -insert into table1 (id, val, val1, dimid) values (1, 't1val01', 'val101', 100), (2, 't1val02', 'val102', 200), (3, 't1val03', 'val103', 103), (3, 't1val01', 'val104', 100), (2, 't1val05', 'val105', 200), (3, 't1val01', 'val106', 103), (1, 't1val07', 'val107', 200), (2, 't1val01', 'val108', 200), (3, 't1val09', 'val109', 103), (4,'t1val01', 'val110', 200); +create table table1_n10 (id int, val string, val1 string, dimid int); +insert into table1_n10 (id, val, val1, dimid) values (1, 't1val01', 'val101', 100), (2, 't1val02', 'val102', 200), (3, 't1val03', 'val103', 103), (3, 't1val01', 'val104', 100), (2, 't1val05', 'val105', 200), (3, 't1val01', 'val106', 103), (1, 't1val07', 'val107', 200), (2, 't1val01', 'val108', 200), (3, 't1val09', 'val109', 103), (4,'t1val01', 'val110', 200); -create table table2 (id int, val2 string); -insert into table2 (id, val2) values (1, 't2val201'), (2, 't2val202'), (3, 't2val203'); +create table table2_n6 (id int, val2 string); +insert into table2_n6 (id, val2) values (1, 't2val201'), (2, 't2val202'), (3, 't2val203'); -create table table3 (id int); -insert into table3 (id) values (100), (100), (101), (102), (103); +create table table3_n0 (id int); +insert into table3_n0 (id) values (100), (100), (101), (102), (103); -explain select table1.id, table1.val, table1.val1 from table1 left semi join table3 on table1.dimid = table3.id where table1.val = 't1val01'; -select table1.id, table1.val, table1.val1 from table1 left semi join table3 on table1.dimid = table3.id where table1.val = 't1val01'; +explain select table1_n10.id, table1_n10.val, table1_n10.val1 from table1_n10 left semi join table3_n0 on table1_n10.dimid = table3_n0.id where table1_n10.val = 't1val01'; +select table1_n10.id, table1_n10.val, table1_n10.val1 from table1_n10 left semi join table3_n0 on table1_n10.dimid = table3_n0.id where table1_n10.val = 't1val01'; -explain select table1.id, table1.val, table2.val2 from table1 inner join table2 on table1.val = 't1val01' and table1.id = table2.id left semi join table3 on table1.dimid = table3.id; -select table1.id, table1.val, table2.val2 from table1 inner join table2 on table1.val = 't1val01' and table1.id = table2.id left semi join table3 on table1.dimid = table3.id; +explain select table1_n10.id, table1_n10.val, table2_n6.val2 from table1_n10 inner join table2_n6 on table1_n10.val = 't1val01' and table1_n10.id = table2_n6.id left semi join table3_n0 on table1_n10.dimid = table3_n0.id; +select table1_n10.id, table1_n10.val, table2_n6.val2 from table1_n10 inner join table2_n6 on table1_n10.val = 't1val01' and table1_n10.id = table2_n6.id left semi join table3_n0 on table1_n10.dimid = table3_n0.id; -explain select table1.id, table1.val, table2.val2 from table1 left semi join table3 on table1.dimid = table3.id inner join table2 on table1.val = 't1val01' and table1.id = table2.id; -select table1.id, table1.val, table2.val2 from table1 left semi join table3 on table1.dimid = table3.id inner join table2 on table1.val = 't1val01' and table1.id = table2.id; +explain select table1_n10.id, table1_n10.val, table2_n6.val2 from table1_n10 left semi join table3_n0 on table1_n10.dimid = table3_n0.id inner join table2_n6 on table1_n10.val = 't1val01' and table1_n10.id = table2_n6.id; +select table1_n10.id, table1_n10.val, table2_n6.val2 from table1_n10 left semi join table3_n0 on table1_n10.dimid = table3_n0.id inner join table2_n6 on table1_n10.val = 't1val01' and table1_n10.id = table2_n6.id; -explain select table1.id, table1.val, table1.val1 from table1 left semi join table3 on table1.dimid = table3.id and table3.id = 100 where table1.dimid <> 100; -select table1.id, table1.val, table1.val1 from table1 left semi join table3 on table1.dimid = table3.id and table3.id = 100 where table1.dimid <> 100; +explain select table1_n10.id, table1_n10.val, table1_n10.val1 from table1_n10 left semi join table3_n0 on table1_n10.dimid = table3_n0.id and table3_n0.id = 100 where table1_n10.dimid <> 100; +select table1_n10.id, table1_n10.val, table1_n10.val1 from table1_n10 left semi join table3_n0 on table1_n10.dimid = table3_n0.id and table3_n0.id = 100 where table1_n10.dimid <> 100; -explain select table1.id, table1.val, table1.val1 from table1 left semi join table3 on table1.dimid = table3.id and table3.id = 100 where table1.dimid IN (100,200); -select table1.id, table1.val, table1.val1 from table1 left semi join table3 on table1.dimid = table3.id and table3.id = 100 where table1.dimid IN (100,200); +explain select table1_n10.id, table1_n10.val, table1_n10.val1 from table1_n10 left semi join table3_n0 on table1_n10.dimid = table3_n0.id and table3_n0.id = 100 where table1_n10.dimid IN (100,200); +select table1_n10.id, table1_n10.val, table1_n10.val1 from table1_n10 left semi join table3_n0 on table1_n10.dimid = table3_n0.id and table3_n0.id = 100 where table1_n10.dimid IN (100,200); -explain select table1.id, table1.val, table1.val1 from table1 left semi join table3 on table1.dimid = table3.id and table3.id = 100 where table1.dimid = 200; -select table1.id, table1.val, table1.val1 from table1 left semi join table3 on table1.dimid = table3.id and table3.id = 100 where table1.dimid = 200; +explain select table1_n10.id, table1_n10.val, table1_n10.val1 from table1_n10 left semi join table3_n0 on table1_n10.dimid = table3_n0.id and table3_n0.id = 100 where table1_n10.dimid = 200; +select table1_n10.id, table1_n10.val, table1_n10.val1 from table1_n10 left semi join table3_n0 on table1_n10.dimid = table3_n0.id and table3_n0.id = 100 where table1_n10.dimid = 200; -explain select table1.id, table1.val, table1.val1 from table1 left semi join table3 on table1.dimid = table3.id and table3.id = 100 where table1.dimid = 100; -select table1.id, table1.val, table1.val1 from table1 left semi join table3 on table1.dimid = table3.id and table3.id = 100 where table1.dimid = 100; +explain select table1_n10.id, table1_n10.val, table1_n10.val1 from table1_n10 left semi join table3_n0 on table1_n10.dimid = table3_n0.id and table3_n0.id = 100 where table1_n10.dimid = 100; +select table1_n10.id, table1_n10.val, table1_n10.val1 from table1_n10 left semi join table3_n0 on table1_n10.dimid = table3_n0.id and table3_n0.id = 100 where table1_n10.dimid = 100; -explain select table1.id, table1.val, table1.val1 from table1 left semi join table3 on table1.dimid = table3.id and table3.id = 100; -select table1.id, table1.val, table1.val1 from table1 left semi join table3 on table1.dimid = table3.id and table3.id = 100; +explain select table1_n10.id, table1_n10.val, table1_n10.val1 from table1_n10 left semi join table3_n0 on table1_n10.dimid = table3_n0.id and table3_n0.id = 100; +select table1_n10.id, table1_n10.val, table1_n10.val1 from table1_n10 left semi join table3_n0 on table1_n10.dimid = table3_n0.id and table3_n0.id = 100; diff --git a/ql/src/test/queries/clientpositive/constprog_type.q b/ql/src/test/queries/clientpositive/constprog_type.q index 05e091abb4..c1a79a9f32 100644 --- a/ql/src/test/queries/clientpositive/constprog_type.q +++ b/ql/src/test/queries/clientpositive/constprog_type.q @@ -1,18 +1,18 @@ --! qt:dataset:src set hive.optimize.constant.propagation=true; -CREATE TABLE dest1(d date, t timestamp); +CREATE TABLE dest1_n26(d date, t timestamp); EXPLAIN -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n26 SELECT cast('2013-11-17' as date), cast(cast('1.3041352164485E9' as double) as timestamp) FROM src tablesample (1 rows); -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n26 SELECT cast('2013-11-17' as date), cast(cast('1.3041352164485E9' as double) as timestamp) FROM src tablesample (1 rows); -SELECT * FROM dest1; +SELECT * FROM dest1_n26; SELECT key, value FROM src WHERE key = cast(86 as double); diff --git a/ql/src/test/queries/clientpositive/correlated_join_keys.q b/ql/src/test/queries/clientpositive/correlated_join_keys.q index 4c801de9bf..48ff7ed3bd 100644 --- a/ql/src/test/queries/clientpositive/correlated_join_keys.q +++ b/ql/src/test/queries/clientpositive/correlated_join_keys.q @@ -1,7 +1,7 @@ -drop table customer_address; +drop table customer_address_n0; -create table customer_address +create table customer_address_n0 ( ca_address_sk int, ca_address_id string, @@ -19,16 +19,16 @@ create table customer_address ) row format delimited fields terminated by '|'; -load data local inpath '../../data/files/customer_address.txt' overwrite into table customer_address; -analyze table customer_address compute statistics; -analyze table customer_address compute statistics for columns ca_state, ca_zip; +load data local inpath '../../data/files/customer_address.txt' overwrite into table customer_address_n0; +analyze table customer_address_n0 compute statistics; +analyze table customer_address_n0 compute statistics for columns ca_state, ca_zip; set hive.stats.fetch.column.stats=true; set hive.stats.correlated.multi.key.joins=false; -explain select count(*) from customer_address a join customer_address b on (a.ca_zip = b.ca_zip and a.ca_state = b.ca_state); +explain select count(*) from customer_address_n0 a join customer_address_n0 b on (a.ca_zip = b.ca_zip and a.ca_state = b.ca_state); set hive.stats.correlated.multi.key.joins=true; -explain select count(*) from customer_address a join customer_address b on (a.ca_zip = b.ca_zip and a.ca_state = b.ca_state); +explain select count(*) from customer_address_n0 a join customer_address_n0 b on (a.ca_zip = b.ca_zip and a.ca_state = b.ca_state); -drop table customer_address; +drop table customer_address_n0; diff --git a/ql/src/test/queries/clientpositive/correlationoptimizer11.q b/ql/src/test/queries/clientpositive/correlationoptimizer11.q index 8c99141e5b..9f6997e641 100644 --- a/ql/src/test/queries/clientpositive/correlationoptimizer11.q +++ b/ql/src/test/queries/clientpositive/correlationoptimizer11.q @@ -5,26 +5,26 @@ set hive.auto.convert.join=false; -- Tests in this file are used to make sure Correlation Optimizer -- can correctly handle tables with partitions -CREATE TABLE part_table(key string, value string) PARTITIONED BY (partitionId int); -INSERT OVERWRITE TABLE part_table PARTITION (partitionId=1) +CREATE TABLE part_table_n1(key string, value string) PARTITIONED BY (partitionId int); +INSERT OVERWRITE TABLE part_table_n1 PARTITION (partitionId=1) SELECT key, value FROM src ORDER BY key, value LIMIT 100; -INSERT OVERWRITE TABLE part_table PARTITION (partitionId=2) +INSERT OVERWRITE TABLE part_table_n1 PARTITION (partitionId=2) SELECT key, value FROM src1 ORDER BY key, value; set hive.optimize.correlation=false; --- In this case, we should not do shared scan on part_table +-- In this case, we should not do shared scan on part_table_n1 -- because left and right tables of JOIN use different partitions --- of part_table. With Correlation Optimizer we will generate +-- of part_table_n1. With Correlation Optimizer we will generate -- 1 MR job. EXPLAIN SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 1 AND y.partitionId = 2 GROUP BY x.key; SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 1 AND y.partitionId = 2 GROUP BY x.key; @@ -32,31 +32,31 @@ GROUP BY x.key; set hive.optimize.correlation=true; EXPLAIN SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 1 AND y.partitionId = 2 GROUP BY x.key; SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 1 AND y.partitionId = 2 GROUP BY x.key; set hive.optimize.correlation=false; --- In this case, we should do shared scan on part_table +-- In this case, we should do shared scan on part_table_n1 -- because left and right tables of JOIN use the same partition --- of part_table. With Correlation Optimizer we will generate +-- of part_table_n1. With Correlation Optimizer we will generate -- 1 MR job. EXPLAIN SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 2 AND y.partitionId = 2 GROUP BY x.key; SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 2 AND y.partitionId = 2 GROUP BY x.key; @@ -64,13 +64,13 @@ GROUP BY x.key; set hive.optimize.correlation=true; EXPLAIN SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 2 AND y.partitionId = 2 GROUP BY x.key; SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 2 AND y.partitionId = 2 GROUP BY x.key; diff --git a/ql/src/test/queries/clientpositive/correlationoptimizer4.q b/ql/src/test/queries/clientpositive/correlationoptimizer4.q index 02edeff691..c34ff237e6 100644 --- a/ql/src/test/queries/clientpositive/correlationoptimizer4.q +++ b/ql/src/test/queries/clientpositive/correlationoptimizer4.q @@ -1,10 +1,10 @@ set hive.mapred.mode=nonstrict; -CREATE TABLE T1(key INT, val STRING); -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; -CREATE TABLE T2(key INT, val STRING); -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -CREATE TABLE T3(key INT, val STRING); -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T3; +CREATE TABLE T1_n146(key INT, val STRING); +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n146; +CREATE TABLE T2_n86(key INT, val STRING); +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n86; +CREATE TABLE T3_n34(key INT, val STRING); +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T3_n34; set hive.auto.convert.join=false; set hive.optimize.correlation=false; @@ -15,24 +15,24 @@ set hive.optimize.correlation=false; EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt - FROM T2 x JOIN T1 y ON (x.key = y.key) JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x JOIN T1_n146 y ON (x.key = y.key) JOIN T3_n34 z ON (y.key = z.key) GROUP BY y.key) tmp; SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt - FROM T2 x JOIN T1 y ON (x.key = y.key) JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x JOIN T1_n146 y ON (x.key = y.key) JOIN T3_n34 z ON (y.key = z.key) GROUP BY y.key) tmp; set hive.optimize.correlation=true; EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt - FROM T2 x JOIN T1 y ON (x.key = y.key) JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x JOIN T1_n146 y ON (x.key = y.key) JOIN T3_n34 z ON (y.key = z.key) GROUP BY y.key) tmp; SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt - FROM T2 x JOIN T1 y ON (x.key = y.key) JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x JOIN T1_n146 y ON (x.key = y.key) JOIN T3_n34 z ON (y.key = z.key) GROUP BY y.key) tmp; set hive.optimize.correlation=true; @@ -41,12 +41,12 @@ set hive.auto.convert.join=true; EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt - FROM T2 x JOIN T1 y ON (x.key = y.key) JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x JOIN T1_n146 y ON (x.key = y.key) JOIN T3_n34 z ON (y.key = z.key) GROUP BY y.key) tmp; SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt - FROM T2 x JOIN T1 y ON (x.key = y.key) JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x JOIN T1_n146 y ON (x.key = y.key) JOIN T3_n34 z ON (y.key = z.key) GROUP BY y.key) tmp; set hive.auto.convert.join=false; @@ -56,24 +56,24 @@ set hive.optimize.correlation=false; EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt - FROM T2 x LEFT OUTER JOIN T1 y ON (x.key = y.key) LEFT OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x LEFT OUTER JOIN T1_n146 y ON (x.key = y.key) LEFT OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY x.key) tmp; SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt - FROM T2 x LEFT OUTER JOIN T1 y ON (x.key = y.key) LEFT OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x LEFT OUTER JOIN T1_n146 y ON (x.key = y.key) LEFT OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY x.key) tmp; set hive.optimize.correlation=true; EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt - FROM T2 x LEFT OUTER JOIN T1 y ON (x.key = y.key) LEFT OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x LEFT OUTER JOIN T1_n146 y ON (x.key = y.key) LEFT OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY x.key) tmp; SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt - FROM T2 x LEFT OUTER JOIN T1 y ON (x.key = y.key) LEFT OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x LEFT OUTER JOIN T1_n146 y ON (x.key = y.key) LEFT OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY x.key) tmp; set hive.optimize.correlation=true; @@ -83,12 +83,12 @@ set hive.optimize.correlation=true; EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt - FROM T2 x LEFT OUTER JOIN T1 y ON (x.key = y.key) LEFT OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x LEFT OUTER JOIN T1_n146 y ON (x.key = y.key) LEFT OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY y.key) tmp; SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt - FROM T2 x LEFT OUTER JOIN T1 y ON (x.key = y.key) LEFT OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x LEFT OUTER JOIN T1_n146 y ON (x.key = y.key) LEFT OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY y.key) tmp; set hive.optimize.correlation=false; @@ -97,24 +97,24 @@ set hive.optimize.correlation=false; EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT z.key AS key, count(1) AS cnt - FROM T2 x RIGHT OUTER JOIN T1 y ON (x.key = y.key) RIGHT OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x RIGHT OUTER JOIN T1_n146 y ON (x.key = y.key) RIGHT OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY z.key) tmp; SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT z.key AS key, count(1) AS cnt - FROM T2 x RIGHT OUTER JOIN T1 y ON (x.key = y.key) RIGHT OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x RIGHT OUTER JOIN T1_n146 y ON (x.key = y.key) RIGHT OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY z.key) tmp; set hive.optimize.correlation=true; EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT z.key AS key, count(1) AS cnt - FROM T2 x RIGHT OUTER JOIN T1 y ON (x.key = y.key) RIGHT OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x RIGHT OUTER JOIN T1_n146 y ON (x.key = y.key) RIGHT OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY z.key) tmp; SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT z.key AS key, count(1) AS cnt - FROM T2 x RIGHT OUTER JOIN T1 y ON (x.key = y.key) RIGHT OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x RIGHT OUTER JOIN T1_n146 y ON (x.key = y.key) RIGHT OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY z.key) tmp; set hive.optimize.correlation=true; @@ -124,12 +124,12 @@ set hive.optimize.correlation=true; EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt - FROM T2 x RIGHT OUTER JOIN T1 y ON (x.key = y.key) RIGHT OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x RIGHT OUTER JOIN T1_n146 y ON (x.key = y.key) RIGHT OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY y.key) tmp; SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt - FROM T2 x RIGHT OUTER JOIN T1 y ON (x.key = y.key) RIGHT OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x RIGHT OUTER JOIN T1_n146 y ON (x.key = y.key) RIGHT OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY y.key) tmp; set hive.optimize.correlation=false; @@ -138,22 +138,22 @@ set hive.optimize.correlation=false; EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt - FROM T2 x FULL OUTER JOIN T1 y ON (x.key = y.key) FULL OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x FULL OUTER JOIN T1_n146 y ON (x.key = y.key) FULL OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY y.key) tmp; SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt - FROM T2 x FULL OUTER JOIN T1 y ON (x.key = y.key) FULL OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x FULL OUTER JOIN T1_n146 y ON (x.key = y.key) FULL OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY y.key) tmp; set hive.optimize.correlation=true; EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt - FROM T2 x FULL OUTER JOIN T1 y ON (x.key = y.key) FULL OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x FULL OUTER JOIN T1_n146 y ON (x.key = y.key) FULL OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY y.key) tmp; SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt - FROM T2 x FULL OUTER JOIN T1 y ON (x.key = y.key) FULL OUTER JOIN T3 z ON (y.key = z.key) + FROM T2_n86 x FULL OUTER JOIN T1_n146 y ON (x.key = y.key) FULL OUTER JOIN T3_n34 z ON (y.key = z.key) GROUP BY y.key) tmp; diff --git a/ql/src/test/queries/clientpositive/correlationoptimizer5.q b/ql/src/test/queries/clientpositive/correlationoptimizer5.q index 002fb12e22..d75a48f6c0 100644 --- a/ql/src/test/queries/clientpositive/correlationoptimizer5.q +++ b/ql/src/test/queries/clientpositive/correlationoptimizer5.q @@ -1,14 +1,14 @@ set hive.stats.column.autogather=false; -- Currently, a query with multiple FileSinkOperators are not supported. set hive.mapred.mode=nonstrict; -CREATE TABLE T1(key INT, val STRING); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1; -CREATE TABLE T2(key INT, val STRING); -LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T2; -CREATE TABLE T3(key INT, val STRING); -LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE T3; -CREATE TABLE T4(key INT, val STRING); -LOAD DATA LOCAL INPATH '../../data/files/kv5.txt' INTO TABLE T4; +CREATE TABLE T1_n19(key INT, val STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1_n19; +CREATE TABLE T2_n11(key INT, val STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T2_n11; +CREATE TABLE T3_n5(key INT, val STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE T3_n5; +CREATE TABLE T4_n1(key INT, val STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv5.txt' INTO TABLE T4_n1; CREATE TABLE dest_co1(key INT, val STRING); CREATE TABLE dest_co2(key INT, val STRING); @@ -22,17 +22,17 @@ EXPLAIN INSERT OVERWRITE TABLE dest_co1 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n19 x JOIN T2_n11 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n5 m JOIN T4_n1 n ON (m.key = n.key)) d ON b.key = d.key; INSERT OVERWRITE TABLE dest_co1 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n19 x JOIN T2_n11 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n5 m JOIN T4_n1 n ON (m.key = n.key)) d ON b.key = d.key; set hive.optimize.correlation=true; @@ -40,17 +40,17 @@ EXPLAIN INSERT OVERWRITE TABLE dest_co2 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n19 x JOIN T2_n11 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n5 m JOIN T4_n1 n ON (m.key = n.key)) d ON b.key = d.key; INSERT OVERWRITE TABLE dest_co2 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n19 x JOIN T2_n11 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n5 m JOIN T4_n1 n ON (m.key = n.key)) d ON b.key = d.key; set hive.optimize.correlation=true; @@ -61,17 +61,17 @@ EXPLAIN INSERT OVERWRITE TABLE dest_co3 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n19 x JOIN T2_n11 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n5 m JOIN T4_n1 n ON (m.key = n.key)) d ON b.key = d.key; INSERT OVERWRITE TABLE dest_co3 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n19 x JOIN T2_n11 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n5 m JOIN T4_n1 n ON (m.key = n.key)) d ON b.key = d.key; -- dest_co1, dest_co2 and dest_co3 should be same diff --git a/ql/src/test/queries/clientpositive/correlationoptimizer9.q b/ql/src/test/queries/clientpositive/correlationoptimizer9.q index 38f9480737..3f97b4e5eb 100644 --- a/ql/src/test/queries/clientpositive/correlationoptimizer9.q +++ b/ql/src/test/queries/clientpositive/correlationoptimizer9.q @@ -2,27 +2,27 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE tmp(c1 INT, c2 INT, c3 STRING, c4 STRING); +CREATE TABLE tmp_n2(c1 INT, c2 INT, c3 STRING, c4 STRING); set hive.auto.convert.join=false; -INSERT OVERWRITE TABLE tmp +INSERT OVERWRITE TABLE tmp_n2 SELECT x.key, y.key, x.value, y.value FROM src x JOIN src y ON (x.key = y.key); set hive.optimize.correlation=false; EXPLAIN SELECT xx.key, yy.key, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx +(SELECT x.c1 AS key, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1) xx JOIN -(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy +(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy ON (xx.key = yy.key); SELECT xx.key, yy.key, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx +(SELECT x.c1 AS key, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1) xx JOIN -(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy +(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy ON (xx.key = yy.key); set hive.optimize.correlation=true; @@ -30,46 +30,46 @@ set hive.optimize.correlation=true; EXPLAIN SELECT xx.key, yy.key, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx +(SELECT x.c1 AS key, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1) xx JOIN -(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy +(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy ON (xx.key = yy.key); SELECT xx.key, yy.key, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx +(SELECT x.c1 AS key, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1) xx JOIN -(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy +(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy ON (xx.key = yy.key); set hive.optimize.correlation=false; EXPLAIN SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx +(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx JOIN -(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy +(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2); SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx +(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx JOIN -(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy +(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2); set hive.optimize.correlation=true; EXPLAIN SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx +(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx JOIN -(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy +(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2); SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx +(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx JOIN -(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy +(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2); diff --git a/ql/src/test/queries/clientpositive/count.q b/ql/src/test/queries/clientpositive/count.q index 2849d9a8ba..a369f50165 100644 --- a/ql/src/test/queries/clientpositive/count.q +++ b/ql/src/test/queries/clientpositive/count.q @@ -1,54 +1,54 @@ set hive.mapred.mode=nonstrict; set hive.explain.user=false; -- SORT_QUERY_RESULTS -create table abcd (a int, b int, c int, d int); -LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd; +create table abcd_n2 (a int, b int, c int, d int); +LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd_n2; -select * from abcd; +select * from abcd_n2; set hive.map.aggr=true; -explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a; -select a, count(distinct b), count(distinct c), sum(d) from abcd group by a; +explain select a, count(distinct b), count(distinct c), sum(d) from abcd_n2 group by a; +select a, count(distinct b), count(distinct c), sum(d) from abcd_n2 group by a; -explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd; -select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd; +explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd_n2; +select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd_n2; set hive.map.aggr=false; -explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a; -select a, count(distinct b), count(distinct c), sum(d) from abcd group by a; +explain select a, count(distinct b), count(distinct c), sum(d) from abcd_n2 group by a; +select a, count(distinct b), count(distinct c), sum(d) from abcd_n2 group by a; -explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd; -select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd; +explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd_n2; +select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd_n2; set hive.cbo.returnpath.hiveop=true; set hive.map.aggr=true; --first aggregation with literal. gbinfo was generating wrong expression -explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd; -select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd; +explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd_n2; +select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd_n2; set hive.map.aggr=false; -explain select count(distinct b) from abcd group by a; -select count(distinct b) from abcd group by a; +explain select count(distinct b) from abcd_n2 group by a; +select count(distinct b) from abcd_n2 group by a; -explain select count(distinct b) from abcd group by b; -select count(distinct b) from abcd group by b; +explain select count(distinct b) from abcd_n2 group by b; +select count(distinct b) from abcd_n2 group by b; -explain select count(distinct b) from abcd group by c; -select count(distinct b) from abcd group by c; +explain select count(distinct b) from abcd_n2 group by c; +select count(distinct b) from abcd_n2 group by c; -explain select count(b), count(distinct c) from abcd group by d; -select count(b), count(distinct c) from abcd group by d; +explain select count(b), count(distinct c) from abcd_n2 group by d; +select count(b), count(distinct c) from abcd_n2 group by d; --non distinct aggregate with same column as group by key -explain select a, count(distinct b), count(distinct c), sum(d), sum(d+d), sum(d*3), sum(b), sum(c), sum(a), sum(distinct a), sum(distinct b) from abcd group by a; -select a, count(distinct b), count(distinct c), sum(d), sum(d+d), sum(d*3), sum(b), sum(c), sum(a), sum(distinct a), sum(distinct b) from abcd group by a; +explain select a, count(distinct b), count(distinct c), sum(d), sum(d+d), sum(d*3), sum(b), sum(c), sum(a), sum(distinct a), sum(distinct b) from abcd_n2 group by a; +select a, count(distinct b), count(distinct c), sum(d), sum(d+d), sum(d*3), sum(b), sum(c), sum(a), sum(distinct a), sum(distinct b) from abcd_n2 group by a; --non distinct aggregate with same column as distinct aggregate -explain select a, count(distinct b), count(distinct c), sum(d), sum(c) from abcd group by a; -select a, count(distinct b), count(distinct c), sum(d), sum(c) from abcd group by a; +explain select a, count(distinct b), count(distinct c), sum(d), sum(c) from abcd_n2 group by a; +select a, count(distinct b), count(distinct c), sum(d), sum(c) from abcd_n2 group by a; --aggregation with literal -explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd; -select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd; +explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd_n2; +select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd_n2; set hive.cbo.returnpath.hiveop=false; diff --git a/ql/src/test/queries/clientpositive/create_1.q b/ql/src/test/queries/clientpositive/create_1.q index f348e59022..d1b89d29b8 100644 --- a/ql/src/test/queries/clientpositive/create_1.q +++ b/ql/src/test/queries/clientpositive/create_1.q @@ -1,14 +1,14 @@ set fs.default.name=invalidscheme:///; -CREATE TABLE table1 (a STRING, b STRING) STORED AS TEXTFILE; -DESCRIBE table1; -DESCRIBE EXTENDED table1; +CREATE TABLE table1_n5 (a STRING, b STRING) STORED AS TEXTFILE; +DESCRIBE table1_n5; +DESCRIBE EXTENDED table1_n5; -CREATE TABLE IF NOT EXISTS table1 (a STRING, b STRING) STORED AS TEXTFILE; +CREATE TABLE IF NOT EXISTS table1_n5 (a STRING, b STRING) STORED AS TEXTFILE; -CREATE TABLE IF NOT EXISTS table2 (a STRING, b INT) STORED AS TEXTFILE; -DESCRIBE table2; -DESCRIBE EXTENDED table2; +CREATE TABLE IF NOT EXISTS table2_n2 (a STRING, b INT) STORED AS TEXTFILE; +DESCRIBE table2_n2; +DESCRIBE EXTENDED table2_n2; CREATE TABLE table3 (a STRING, b STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' @@ -22,8 +22,8 @@ STORED AS SEQUENCEFILE; DESCRIBE table4; DESCRIBE EXTENDED table4; -CREATE TABLE table5 (a STRING, b STRING) +CREATE TABLE table5_n1 (a STRING, b STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS RCFILE; -DESCRIBE table5; -DESCRIBE EXTENDED table5; +DESCRIBE table5_n1; +DESCRIBE EXTENDED table5_n1; diff --git a/ql/src/test/queries/clientpositive/create_escape.q b/ql/src/test/queries/clientpositive/create_escape.q index d30b0dab80..a52d23002d 100644 --- a/ql/src/test/queries/clientpositive/create_escape.q +++ b/ql/src/test/queries/clientpositive/create_escape.q @@ -1,11 +1,11 @@ --! qt:dataset:src -CREATE TABLE table1 (a STRING, b STRING) +CREATE TABLE table1_n6 (a STRING, b STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\' STORED AS TEXTFILE; -DESCRIBE table1; -DESCRIBE EXTENDED table1; +DESCRIBE table1_n6; +DESCRIBE EXTENDED table1_n6; -INSERT OVERWRITE TABLE table1 SELECT key, '\\\t\\' FROM src WHERE key = 86; +INSERT OVERWRITE TABLE table1_n6 SELECT key, '\\\t\\' FROM src WHERE key = 86; -SELECT * FROM table1; +SELECT * FROM table1_n6; diff --git a/ql/src/test/queries/clientpositive/create_genericudf.q b/ql/src/test/queries/clientpositive/create_genericudf.q index ad67027f2f..4e63724843 100644 --- a/ql/src/test/queries/clientpositive/create_genericudf.q +++ b/ql/src/test/queries/clientpositive/create_genericudf.q @@ -4,10 +4,10 @@ CREATE TEMPORARY FUNCTION test_translate AS 'org.apache.hadoop.hive.ql.udf.gener CREATE TEMPORARY FUNCTION test_translate AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestTranslate'; -CREATE TABLE dest1(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING, c6 STRING, c7 STRING); +CREATE TABLE dest1_n113(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING, c6 STRING, c7 STRING); FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n113 SELECT test_translate('abc', 'a', 'b'), test_translate('abc', 'ab', 'bc'), @@ -17,6 +17,6 @@ SELECT test_translate('abc', 'ab', 'b'), test_translate('abc', 'a', 'ab'); -SELECT dest1.* FROM dest1 LIMIT 1; +SELECT dest1_n113.* FROM dest1_n113 LIMIT 1; DROP TEMPORARY FUNCTION test_translate; diff --git a/ql/src/test/queries/clientpositive/create_like.q b/ql/src/test/queries/clientpositive/create_like.q index b3626274c3..9ad4425dd4 100644 --- a/ql/src/test/queries/clientpositive/create_like.q +++ b/ql/src/test/queries/clientpositive/create_like.q @@ -3,43 +3,43 @@ -CREATE TABLE table1 (a STRING, b STRING) STORED AS TEXTFILE; -DESCRIBE FORMATTED table1; +CREATE TABLE table1_n17 (a STRING, b STRING) STORED AS TEXTFILE; +DESCRIBE FORMATTED table1_n17; -CREATE TABLE table2 LIKE table1; -DESCRIBE FORMATTED table2; +CREATE TABLE table2_n12 LIKE table1_n17; +DESCRIBE FORMATTED table2_n12; -CREATE TABLE IF NOT EXISTS table2 LIKE table1; +CREATE TABLE IF NOT EXISTS table2_n12 LIKE table1_n17; -CREATE EXTERNAL TABLE IF NOT EXISTS table2 LIKE table1; +CREATE EXTERNAL TABLE IF NOT EXISTS table2_n12 LIKE table1_n17; -CREATE EXTERNAL TABLE IF NOT EXISTS table3 LIKE table1; -DESCRIBE FORMATTED table3; +CREATE EXTERNAL TABLE IF NOT EXISTS table3_n3 LIKE table1_n17; +DESCRIBE FORMATTED table3_n3; -INSERT OVERWRITE TABLE table1 SELECT key, value FROM src WHERE key = 86; -INSERT OVERWRITE TABLE table2 SELECT key, value FROM src WHERE key = 100; +INSERT OVERWRITE TABLE table1_n17 SELECT key, value FROM src WHERE key = 86; +INSERT OVERWRITE TABLE table2_n12 SELECT key, value FROM src WHERE key = 100; -SELECT * FROM table1; -SELECT * FROM table2; +SELECT * FROM table1_n17; +SELECT * FROM table2_n12; dfs -cp ${system:hive.root}/data/files/ext_test ${system:test.tmp.dir}/ext_test; -CREATE EXTERNAL TABLE table4 (a INT) LOCATION '${system:test.tmp.dir}/ext_test'; -CREATE EXTERNAL TABLE table5 LIKE table4 LOCATION '${system:test.tmp.dir}/ext_test'; +CREATE EXTERNAL TABLE table4_n1 (a INT) LOCATION '${system:test.tmp.dir}/ext_test'; +CREATE EXTERNAL TABLE table5_n5 LIKE table4_n1 LOCATION '${system:test.tmp.dir}/ext_test'; -SELECT * FROM table4; -SELECT * FROM table5; +SELECT * FROM table4_n1; +SELECT * FROM table5_n5; -DROP TABLE table5; -SELECT * FROM table4; -DROP TABLE table4; +DROP TABLE table5_n5; +SELECT * FROM table4_n1; +DROP TABLE table4_n1; -CREATE EXTERNAL TABLE table4 (a INT) LOCATION '${system:test.tmp.dir}/ext_test'; -SELECT * FROM table4; +CREATE EXTERNAL TABLE table4_n1 (a INT) LOCATION '${system:test.tmp.dir}/ext_test'; +SELECT * FROM table4_n1; -CREATE TABLE doctors STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{ +CREATE TABLE doctors_n2 STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "doctors", + "name": "doctors_n2", "type": "record", "fields": [ { @@ -60,10 +60,10 @@ CREATE TABLE doctors STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{ ] }'); -alter table doctors set tblproperties ('k1'='v1', 'k2'='v2'); -DESCRIBE FORMATTED doctors; +alter table doctors_n2 set tblproperties ('k1'='v1', 'k2'='v2'); +DESCRIBE FORMATTED doctors_n2; -CREATE TABLE doctors2 like doctors; +CREATE TABLE doctors2 like doctors_n2; DESCRIBE FORMATTED doctors2; CREATE TABLE PropertiedParquetTable(a INT, b STRING) STORED AS PARQUET TBLPROPERTIES("parquet.compression"="LZO"); @@ -71,28 +71,28 @@ CREATE TABLE LikePropertiedParquetTable LIKE PropertiedParquetTable; DESCRIBE FORMATTED LikePropertiedParquetTable; -CREATE TABLE table5(col1 int, col2 string) stored as TEXTFILE; -DESCRIBE FORMATTED table5; +CREATE TABLE table5_n5(col1 int, col2 string) stored as TEXTFILE; +DESCRIBE FORMATTED table5_n5; -CREATE TABLE table6 like table5 stored as RCFILE; -DESCRIBE FORMATTED table6; +CREATE TABLE table6_n4 like table5_n5 stored as RCFILE; +DESCRIBE FORMATTED table6_n4; -drop table table6; +drop table table6_n4; -CREATE TABLE table6 like table5 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' LOCATION '${system:hive.root}/data/files/table6'; -DESCRIBE FORMATTED table6; +CREATE TABLE table6_n4 like table5_n5 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' LOCATION '${system:hive.root}/data/files/table6'; +DESCRIBE FORMATTED table6_n4; -drop table table5; +drop table table5_n5; -create table orc_table ( +create table orc_table_n0 ( `time` string) stored as ORC tblproperties ("orc.compress"="SNAPPY"); -create table orc_table_using_like like orc_table; +create table orc_table_using_like like orc_table_n0; describe formatted orc_table_using_like; drop table orc_table_using_like; -drop table orc_table; +drop table orc_table_n0; diff --git a/ql/src/test/queries/clientpositive/create_like2.q b/ql/src/test/queries/clientpositive/create_like2.q index 146467b2cd..7cc10c088a 100644 --- a/ql/src/test/queries/clientpositive/create_like2.q +++ b/ql/src/test/queries/clientpositive/create_like2.q @@ -1,9 +1,9 @@ -- Tests the copying over of Table Parameters according to a HiveConf setting -- when doing a CREATE TABLE LIKE. -CREATE TABLE table1(a INT, b STRING); -ALTER TABLE table1 SET TBLPROPERTIES ('a'='1', 'b'='2', 'c'='3', 'd' = '4'); +CREATE TABLE table1_n20(a INT, b STRING); +ALTER TABLE table1_n20 SET TBLPROPERTIES ('a'='1', 'b'='2', 'c'='3', 'd' = '4'); SET hive.ddl.createtablelike.properties.whitelist=a,c,D; -CREATE TABLE table2 LIKE table1; -DESC FORMATTED table2; +CREATE TABLE table2_n14 LIKE table1_n20; +DESC FORMATTED table2_n14; diff --git a/ql/src/test/queries/clientpositive/create_like_tbl_props.q b/ql/src/test/queries/clientpositive/create_like_tbl_props.q index 7e1e115459..3dfef821db 100644 --- a/ql/src/test/queries/clientpositive/create_like_tbl_props.q +++ b/ql/src/test/queries/clientpositive/create_like_tbl_props.q @@ -1,36 +1,36 @@ --! qt:dataset:src -- Test that CREATE TABLE LIKE commands can take explicit table properties -CREATE TABLE test_table LIKE src TBLPROPERTIES('key'='value'); +CREATE TABLE test_table_n15 LIKE src TBLPROPERTIES('key'='value'); -DESC FORMATTED test_table; +DESC FORMATTED test_table_n15; set hive.table.parameters.default=key1=value1; --Test that CREATE TABLE LIKE commands can take default table properties -CREATE TABLE test_table1 LIKE src; +CREATE TABLE test_table1_n18 LIKE src; -DESC FORMATTED test_table1; +DESC FORMATTED test_table1_n18; -- Test that CREATE TABLE LIKE commands can take default and explicit table properties -CREATE TABLE test_table2 LIKE src TBLPROPERTIES('key2' = 'value2'); +CREATE TABLE test_table2_n17 LIKE src TBLPROPERTIES('key2' = 'value2'); -DESC FORMATTED test_table2; +DESC FORMATTED test_table2_n17; set hive.ddl.createtablelike.properties.whitelist=key2; -- Test that properties inherited are overwritten by explicitly set ones -CREATE TABLE test_table3 LIKE test_table2 TBLPROPERTIES('key2' = 'value3'); +CREATE TABLE test_table3_n9 LIKE test_table2_n17 TBLPROPERTIES('key2' = 'value3'); -DESC FORMATTED test_table3; +DESC FORMATTED test_table3_n9; --Test that CREATE TALBE LIKE on a view can take explicit table properties CREATE VIEW test_view (key, value) AS SELECT * FROM src; -CREATE TABLE test_table4 LIKE test_view TBLPROPERTIES('key'='value'); +CREATE TABLE test_table4_n2 LIKE test_view TBLPROPERTIES('key'='value'); -DESC FORMATTED test_table4; +DESC FORMATTED test_table4_n2; diff --git a/ql/src/test/queries/clientpositive/create_like_view.q b/ql/src/test/queries/clientpositive/create_like_view.q index 874389ff8d..5db11929ef 100644 --- a/ql/src/test/queries/clientpositive/create_like_view.q +++ b/ql/src/test/queries/clientpositive/create_like_view.q @@ -2,42 +2,42 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -DROP TABLE IF EXISTS table1; -DROP TABLE IF EXISTS table2; -DROP TABLE IF EXISTS table3; -DROP VIEW IF EXISTS view1; +DROP TABLE IF EXISTS table1_n14; +DROP TABLE IF EXISTS table2_n9; +DROP TABLE IF EXISTS table3_n2; +DROP VIEW IF EXISTS view1_n1; -CREATE TABLE table1 (a STRING, b STRING) STORED AS TEXTFILE; -DESCRIBE table1; -DESCRIBE FORMATTED table1; +CREATE TABLE table1_n14 (a STRING, b STRING) STORED AS TEXTFILE; +DESCRIBE table1_n14; +DESCRIBE FORMATTED table1_n14; -CREATE VIEW view1 AS SELECT * FROM table1; +CREATE VIEW view1_n1 AS SELECT * FROM table1_n14; -CREATE TABLE table2 LIKE view1; -DESCRIBE table2; -DESCRIBE FORMATTED table2; +CREATE TABLE table2_n9 LIKE view1_n1; +DESCRIBE table2_n9; +DESCRIBE FORMATTED table2_n9; -CREATE TABLE IF NOT EXISTS table2 LIKE view1; +CREATE TABLE IF NOT EXISTS table2_n9 LIKE view1_n1; -CREATE EXTERNAL TABLE IF NOT EXISTS table2 LIKE view1; +CREATE EXTERNAL TABLE IF NOT EXISTS table2_n9 LIKE view1_n1; -CREATE EXTERNAL TABLE IF NOT EXISTS table3 LIKE view1; -DESCRIBE table3; -DESCRIBE FORMATTED table3; +CREATE EXTERNAL TABLE IF NOT EXISTS table3_n2 LIKE view1_n1; +DESCRIBE table3_n2; +DESCRIBE FORMATTED table3_n2; -INSERT OVERWRITE TABLE table1 SELECT key, value FROM src WHERE key = 86; -INSERT OVERWRITE TABLE table2 SELECT key, value FROM src WHERE key = 100; +INSERT OVERWRITE TABLE table1_n14 SELECT key, value FROM src WHERE key = 86; +INSERT OVERWRITE TABLE table2_n9 SELECT key, value FROM src WHERE key = 100; -SELECT * FROM table1; -SELECT * FROM table2; +SELECT * FROM table1_n14; +SELECT * FROM table2_n9; -DROP TABLE table1; -DROP TABLE table2; -DROP VIEW view1; +DROP TABLE table1_n14; +DROP TABLE table2_n9; +DROP VIEW view1_n1; -- check partitions -create view view1 partitioned on (ds, hr) as select * from srcpart; -create table table1 like view1; -describe formatted table1; -DROP TABLE table1; -DROP VIEW view1; \ No newline at end of file +create view view1_n1 partitioned on (ds, hr) as select * from srcpart; +create table table1_n14 like view1_n1; +describe formatted table1_n14; +DROP TABLE table1_n14; +DROP VIEW view1_n1; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/create_merge_compressed.q b/ql/src/test/queries/clientpositive/create_merge_compressed.q index 995401bf0c..c4ac823e64 100644 --- a/ql/src/test/queries/clientpositive/create_merge_compressed.q +++ b/ql/src/test/queries/clientpositive/create_merge_compressed.q @@ -1,26 +1,26 @@ set hive.strict.checks.bucketing=false; -create table src_rc_merge_test(key int, value string) stored as rcfile; +create table src_rc_merge_test_n1(key int, value string) stored as rcfile; -load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test; +load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_n1; set hive.exec.compress.output = true; -create table tgt_rc_merge_test(key int, value string) stored as rcfile; -insert into table tgt_rc_merge_test select * from src_rc_merge_test; -insert into table tgt_rc_merge_test select * from src_rc_merge_test; +create table tgt_rc_merge_test_n1(key int, value string) stored as rcfile; +insert into table tgt_rc_merge_test_n1 select * from src_rc_merge_test_n1; +insert into table tgt_rc_merge_test_n1 select * from src_rc_merge_test_n1; -show table extended like `tgt_rc_merge_test`; +show table extended like `tgt_rc_merge_test_n1`; -select count(1) from tgt_rc_merge_test; -select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test; +select count(1) from tgt_rc_merge_test_n1; +select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test_n1; -alter table tgt_rc_merge_test concatenate; +alter table tgt_rc_merge_test_n1 concatenate; -show table extended like `tgt_rc_merge_test`; +show table extended like `tgt_rc_merge_test_n1`; -select count(1) from tgt_rc_merge_test; -select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test; +select count(1) from tgt_rc_merge_test_n1; +select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test_n1; -drop table src_rc_merge_test; -drop table tgt_rc_merge_test; \ No newline at end of file +drop table src_rc_merge_test_n1; +drop table tgt_rc_merge_test_n1; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/create_nested_type.q b/ql/src/test/queries/clientpositive/create_nested_type.q index 735b139719..fc2ee8d339 100644 --- a/ql/src/test/queries/clientpositive/create_nested_type.q +++ b/ql/src/test/queries/clientpositive/create_nested_type.q @@ -1,16 +1,16 @@ -CREATE TABLE table1 ( +CREATE TABLE table1_n2 ( a STRING, b ARRAY, c ARRAY>, d MAP> ) STORED AS TEXTFILE; -DESCRIBE table1; -DESCRIBE EXTENDED table1; +DESCRIBE table1_n2; +DESCRIBE EXTENDED table1_n2; -LOAD DATA LOCAL INPATH '../../data/files/create_nested_type.txt' OVERWRITE INTO TABLE table1; +LOAD DATA LOCAL INPATH '../../data/files/create_nested_type.txt' OVERWRITE INTO TABLE table1_n2; -SELECT * from table1; +SELECT * from table1_n2; diff --git a/ql/src/test/queries/clientpositive/create_table_like_stats.q b/ql/src/test/queries/clientpositive/create_table_like_stats.q index a59c0a6e22..65c48164c8 100644 --- a/ql/src/test/queries/clientpositive/create_table_like_stats.q +++ b/ql/src/test/queries/clientpositive/create_table_like_stats.q @@ -4,29 +4,29 @@ set hive.mapred.mode=nonstrict; dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/t; -drop table a; +drop table a_n13; -create table a like src; +create table a_n13 like src; -desc formatted a; +desc formatted a_n13; -drop table a; +drop table a_n13; -create table a like src location '${system:test.tmp.dir}/t'; +create table a_n13 like src location '${system:test.tmp.dir}/t'; -desc formatted a; +desc formatted a_n13; -drop table a; +drop table a_n13; -create table a (key STRING COMMENT 'default', value STRING COMMENT 'default') +create table a_n13 (key STRING COMMENT 'default', value STRING COMMENT 'default') PARTITIONED BY (ds STRING, hr STRING) STORED AS TEXTFILE; -desc formatted a; +desc formatted a_n13; -drop table a; +drop table a_n13; -create table a like srcpart; +create table a_n13 like srcpart; -desc formatted a; +desc formatted a_n13; diff --git a/ql/src/test/queries/clientpositive/create_udaf.q b/ql/src/test/queries/clientpositive/create_udaf.q index ee310aa074..b8fe5e6326 100644 --- a/ql/src/test/queries/clientpositive/create_udaf.q +++ b/ql/src/test/queries/clientpositive/create_udaf.q @@ -4,11 +4,11 @@ CREATE TEMPORARY FUNCTION test_max AS 'org.apache.hadoop.hive.ql.udf.UDAFTestMax CREATE TEMPORARY FUNCTION test_max AS 'org.apache.hadoop.hive.ql.udf.UDAFTestMax'; -CREATE TABLE dest1(col INT); +CREATE TABLE dest1_n34(col INT); -FROM src INSERT OVERWRITE TABLE dest1 SELECT test_max(length(src.value)); +FROM src INSERT OVERWRITE TABLE dest1_n34 SELECT test_max(length(src.value)); -SELECT dest1.* FROM dest1; +SELECT dest1_n34.* FROM dest1_n34; -- cover all the other value types: SELECT test_max(CAST(length(src.value) AS SMALLINT)) FROM src; diff --git a/ql/src/test/queries/clientpositive/create_view.q b/ql/src/test/queries/clientpositive/create_view.q index b04b5d8697..b3534e2f0e 100644 --- a/ql/src/test/queries/clientpositive/create_view.q +++ b/ql/src/test/queries/clientpositive/create_view.q @@ -56,28 +56,28 @@ ALTER VIEW view3 SET TBLPROPERTIES ("biggest" = "loser"); DESCRIBE EXTENDED view3; DESCRIBE FORMATTED view3; -CREATE TABLE table1 (key int); +CREATE TABLE table1_n4 (key int); -- use DESCRIBE EXTENDED on a base table and an external table as points -- of comparison for view descriptions -DESCRIBE EXTENDED table1; +DESCRIBE EXTENDED table1_n4; DESCRIBE EXTENDED src1; -- use DESCRIBE EXTENDED on a base table as a point of comparison for -- view descriptions -DESCRIBE EXTENDED table1; +DESCRIBE EXTENDED table1_n4; -INSERT OVERWRITE TABLE table1 SELECT key FROM src WHERE key = 86; +INSERT OVERWRITE TABLE table1_n4 SELECT key FROM src WHERE key = 86; -SELECT * FROM table1; -CREATE VIEW view4 AS SELECT * FROM table1; +SELECT * FROM table1_n4; +CREATE VIEW view4 AS SELECT * FROM table1_n4; SELECT * FROM view4; DESCRIBE view4; -ALTER TABLE table1 ADD COLUMNS (value STRING); -SELECT * FROM table1; +ALTER TABLE table1_n4 ADD COLUMNS (value STRING); +SELECT * FROM table1_n4; SELECT * FROM view4; -DESCRIBE table1; +DESCRIBE table1_n4; DESCRIBE view4; CREATE VIEW view5 AS SELECT v1.key as key1, v2.key as key2 @@ -116,7 +116,7 @@ CREATE TEMPORARY FUNCTION test_translate AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestTranslate'; CREATE VIEW view8(c) AS SELECT test_translate('abc', 'a', 'b') -FROM table1; +FROM table1_n4; DESCRIBE EXTENDED view8; DESCRIBE FORMATTED view8; SELECT * FROM view8; @@ -154,7 +154,7 @@ CREATE TEMPORARY FUNCTION test_explode AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDTFExplode'; CREATE VIEW view11 AS SELECT test_explode(array(1,2,3)) AS (boom) -FROM table1; +FROM table1_n4; DESCRIBE EXTENDED view11; DESCRIBE FORMATTED view11; SELECT * FROM view11; @@ -223,8 +223,8 @@ DROP TABLE IF EXISTS view16; DESCRIBE view16; -- Likewise, DROP VIEW IF EXISTS should ignore a matching table name -DROP VIEW IF EXISTS table1; -DESCRIBE table1; +DROP VIEW IF EXISTS table1_n4; +DESCRIBE table1_n4; -- this should work since currently we don't track view->table -- dependencies for implementing RESTRICT diff --git a/ql/src/test/queries/clientpositive/create_view_partitioned.q b/ql/src/test/queries/clientpositive/create_view_partitioned.q index 5116b13bdc..691a0f5b02 100644 --- a/ql/src/test/queries/clientpositive/create_view_partitioned.q +++ b/ql/src/test/queries/clientpositive/create_view_partitioned.q @@ -87,11 +87,11 @@ DROP VIEW vp3; -- HIVE-16828 set hive.security.authorization.enabled=true; -CREATE TABLE table1 (id int) PARTITIONED BY (year int); +CREATE TABLE table1_n12 (id int) PARTITIONED BY (year int); -- create partitioned view -CREATE VIEW view1 partitioned on (year) as select id, year from table1; +CREATE VIEW view1_n0 partitioned on (year) as select id, year from table1_n12; -select year from view1; +select year from view1_n0; -Drop view view1; -drop table table1; \ No newline at end of file +Drop view view1_n0; +drop table table1_n12; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/create_view_translate.q b/ql/src/test/queries/clientpositive/create_view_translate.q index f4704c7fa2..59545c48ff 100644 --- a/ql/src/test/queries/clientpositive/create_view_translate.q +++ b/ql/src/test/queries/clientpositive/create_view_translate.q @@ -1,16 +1,16 @@ --! qt:dataset:src -drop view if exists v; +drop view if exists v_n7; drop view if exists w; -create view v as select cast(key as string) from src; -describe formatted v; +create view v_n7 as select cast(key as string) from src; +describe formatted v_n7; create view w as select key, value from ( select key, value from src ) a; describe formatted w; -drop view v; +drop view v_n7; drop view w; diff --git a/ql/src/test/queries/clientpositive/create_with_constraints.q b/ql/src/test/queries/clientpositive/create_with_constraints.q index 10712ca69a..084ca8d8c1 100644 --- a/ql/src/test/queries/clientpositive/create_with_constraints.q +++ b/ql/src/test/queries/clientpositive/create_with_constraints.q @@ -1,22 +1,22 @@ -CREATE TABLE table1 (a STRING, b STRING, PRIMARY KEY (a) DISABLE); -CREATE TABLE table2 (a STRING, b STRING, CONSTRAINT pk1 PRIMARY KEY (a) DISABLE); -CREATE TABLE table3 (x string NOT NULL DISABLE, PRIMARY KEY (x) DISABLE, CONSTRAINT fk1 FOREIGN KEY (x) REFERENCES table2(a) DISABLE); -CREATE TABLE table4 (x string CONSTRAINT nn4_1 NOT NULL DISABLE, y string CONSTRAINT nn4_2 NOT NULL DISABLE, UNIQUE (x) DISABLE, CONSTRAINT fk2 FOREIGN KEY (x) REFERENCES table2(a) DISABLE, -CONSTRAINT fk3 FOREIGN KEY (y) REFERENCES table2(a) DISABLE); -CREATE TABLE table5 (x string, PRIMARY KEY (x) DISABLE, FOREIGN KEY (x) REFERENCES table2(a) DISABLE); -CREATE TABLE table6 (x string, y string, PRIMARY KEY (x) DISABLE, FOREIGN KEY (x) REFERENCES table2(a) DISABLE, -CONSTRAINT fk4 FOREIGN KEY (y) REFERENCES table1(a) DISABLE); -CREATE TABLE table7 (a STRING, b STRING, PRIMARY KEY (a) DISABLE RELY); +CREATE TABLE table1_n13 (a STRING, b STRING, PRIMARY KEY (a) DISABLE); +CREATE TABLE table2_n8 (a STRING, b STRING, CONSTRAINT pk1 PRIMARY KEY (a) DISABLE); +CREATE TABLE table3_n1 (x string NOT NULL DISABLE, PRIMARY KEY (x) DISABLE, CONSTRAINT fk1 FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE); +CREATE TABLE table4_n0 (x string CONSTRAINT nn4_1 NOT NULL DISABLE, y string CONSTRAINT nn4_2 NOT NULL DISABLE, UNIQUE (x) DISABLE, CONSTRAINT fk2 FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE, +CONSTRAINT fk3 FOREIGN KEY (y) REFERENCES table2_n8(a) DISABLE); +CREATE TABLE table5_n4 (x string, PRIMARY KEY (x) DISABLE, FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE); +CREATE TABLE table6_n3 (x string, y string, PRIMARY KEY (x) DISABLE, FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE, +CONSTRAINT fk4 FOREIGN KEY (y) REFERENCES table1_n13(a) DISABLE); +CREATE TABLE table7_n3 (a STRING, b STRING, PRIMARY KEY (a) DISABLE RELY); CREATE TABLE table8 (a STRING, b STRING, CONSTRAINT pk8 PRIMARY KEY (a) DISABLE NORELY); CREATE TABLE table9 (a STRING, b STRING, PRIMARY KEY (a, b) DISABLE RELY); CREATE TABLE table10 (a STRING, b STRING, CONSTRAINT pk10 PRIMARY KEY (a) DISABLE NORELY, FOREIGN KEY (a, b) REFERENCES table9(a, b) DISABLE); CREATE TABLE table11 (a STRING, b STRING, c STRING, CONSTRAINT pk11 PRIMARY KEY (a) DISABLE RELY, CONSTRAINT fk11_1 FOREIGN KEY (a, b) REFERENCES table9(a, b) DISABLE, -CONSTRAINT fk11_2 FOREIGN KEY (c) REFERENCES table4(x) DISABLE); +CONSTRAINT fk11_2 FOREIGN KEY (c) REFERENCES table4_n0(x) DISABLE); CREATE TABLE table12 (a STRING CONSTRAINT nn12_1 NOT NULL DISABLE NORELY, b STRING); CREATE TABLE table13 (b STRING) PARTITIONED BY (a STRING NOT NULL DISABLE RELY); CREATE TABLE table14 (a STRING CONSTRAINT nn14_1 NOT NULL DISABLE RELY, b STRING); -CREATE TABLE table15 (a STRING REFERENCES table4(x) DISABLE, b STRING); -CREATE TABLE table16 (a STRING CONSTRAINT nn16_1 REFERENCES table4(x) DISABLE RELY, b STRING); +CREATE TABLE table15 (a STRING REFERENCES table4_n0(x) DISABLE, b STRING); +CREATE TABLE table16 (a STRING CONSTRAINT nn16_1 REFERENCES table4_n0(x) DISABLE RELY, b STRING); CREATE TABLE table17 (a STRING CONSTRAINT uk17_1 UNIQUE DISABLE RELY, b STRING); CREATE TABLE table18 (a STRING, CONSTRAINT uk18_1 UNIQUE (b) DISABLE RELY) PARTITIONED BY (b STRING); CREATE TABLE table19 (a STRING, b STRING, CONSTRAINT pk19_1 PRIMARY KEY (b) DISABLE RELY, CONSTRAINT fk19_2 FOREIGN KEY (a) REFERENCES table19(b) DISABLE RELY); @@ -25,13 +25,13 @@ CREATE TABLE table21 (a STRING, CONSTRAINT uk21_1 UNIQUE (a,b) DISABLE) PARTITIO CREATE TABLE table22 (a STRING, b STRING, CONSTRAINT fk22_1 FOREIGN KEY (a,b) REFERENCES table21(a,b) DISABLE); -DESCRIBE EXTENDED table1; -DESCRIBE EXTENDED table2; -DESCRIBE EXTENDED table3; -DESCRIBE EXTENDED table4; -DESCRIBE EXTENDED table5; -DESCRIBE EXTENDED table6; -DESCRIBE EXTENDED table7; +DESCRIBE EXTENDED table1_n13; +DESCRIBE EXTENDED table2_n8; +DESCRIBE EXTENDED table3_n1; +DESCRIBE EXTENDED table4_n0; +DESCRIBE EXTENDED table5_n4; +DESCRIBE EXTENDED table6_n3; +DESCRIBE EXTENDED table7_n3; DESCRIBE EXTENDED table8; DESCRIBE EXTENDED table9; DESCRIBE EXTENDED table10; @@ -48,13 +48,13 @@ DESCRIBE EXTENDED table20; DESCRIBE EXTENDED table21; DESCRIBE EXTENDED table22; -DESCRIBE FORMATTED table1; -DESCRIBE FORMATTED table2; -DESCRIBE FORMATTED table3; -DESCRIBE FORMATTED table4; -DESCRIBE FORMATTED table5; -DESCRIBE FORMATTED table6; -DESCRIBE FORMATTED table7; +DESCRIBE FORMATTED table1_n13; +DESCRIBE FORMATTED table2_n8; +DESCRIBE FORMATTED table3_n1; +DESCRIBE FORMATTED table4_n0; +DESCRIBE FORMATTED table5_n4; +DESCRIBE FORMATTED table6_n3; +DESCRIBE FORMATTED table7_n3; DESCRIBE FORMATTED table8; DESCRIBE FORMATTED table9; DESCRIBE FORMATTED table10; @@ -71,40 +71,40 @@ DESCRIBE FORMATTED table20; DESCRIBE FORMATTED table21; DESCRIBE FORMATTED table22; -ALTER TABLE table2 DROP CONSTRAINT pk1; -ALTER TABLE table3 DROP CONSTRAINT fk1; -ALTER TABLE table4 DROP CONSTRAINT nn4_1; -ALTER TABLE table6 DROP CONSTRAINT fk4; +ALTER TABLE table2_n8 DROP CONSTRAINT pk1; +ALTER TABLE table3_n1 DROP CONSTRAINT fk1; +ALTER TABLE table4_n0 DROP CONSTRAINT nn4_1; +ALTER TABLE table6_n3 DROP CONSTRAINT fk4; ALTER TABLE table8 DROP CONSTRAINT pk8; ALTER TABLE table16 DROP CONSTRAINT nn16_1; ALTER TABLE table18 DROP CONSTRAINT uk18_1; -DESCRIBE EXTENDED table2; -DESCRIBE EXTENDED table3; -DESCRIBE EXTENDED table4; -DESCRIBE EXTENDED table6; +DESCRIBE EXTENDED table2_n8; +DESCRIBE EXTENDED table3_n1; +DESCRIBE EXTENDED table4_n0; +DESCRIBE EXTENDED table6_n3; DESCRIBE EXTENDED table8; DESCRIBE EXTENDED table16; DESCRIBE EXTENDED table18; -DESCRIBE FORMATTED table2; -DESCRIBE FORMATTED table3; -DESCRIBE FORMATTED table4; -DESCRIBE FORMATTED table6; +DESCRIBE FORMATTED table2_n8; +DESCRIBE FORMATTED table3_n1; +DESCRIBE FORMATTED table4_n0; +DESCRIBE FORMATTED table6_n3; DESCRIBE FORMATTED table8; DESCRIBE FORMATTED table16; DESCRIBE FORMATTED table18; -ALTER TABLE table2 ADD CONSTRAINT pkt2 PRIMARY KEY (a) DISABLE NOVALIDATE; -ALTER TABLE table3 ADD CONSTRAINT fk1 FOREIGN KEY (x) REFERENCES table2(a) DISABLE NOVALIDATE RELY; -ALTER TABLE table6 ADD CONSTRAINT fk4 FOREIGN KEY (y) REFERENCES table1(a) DISABLE NOVALIDATE; +ALTER TABLE table2_n8 ADD CONSTRAINT pkt2 PRIMARY KEY (a) DISABLE NOVALIDATE; +ALTER TABLE table3_n1 ADD CONSTRAINT fk1 FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE NOVALIDATE RELY; +ALTER TABLE table6_n3 ADD CONSTRAINT fk4 FOREIGN KEY (y) REFERENCES table1_n13(a) DISABLE NOVALIDATE; ALTER TABLE table8 ADD CONSTRAINT pk8_2 PRIMARY KEY (a, b) DISABLE NOVALIDATE RELY; -ALTER TABLE table16 CHANGE a a STRING REFERENCES table4(x) DISABLE NOVALIDATE; +ALTER TABLE table16 CHANGE a a STRING REFERENCES table4_n0(x) DISABLE NOVALIDATE; ALTER TABLE table18 ADD CONSTRAINT uk18_2 UNIQUE (a, b) DISABLE NOVALIDATE; -DESCRIBE FORMATTED table2; -DESCRIBE FORMATTED table3; -DESCRIBE FORMATTED table6; +DESCRIBE FORMATTED table2_n8; +DESCRIBE FORMATTED table3_n1; +DESCRIBE FORMATTED table6_n3; DESCRIBE FORMATTED table8; DESCRIBE FORMATTED table16; DESCRIBE FORMATTED table18; @@ -134,7 +134,7 @@ DESCRIBE FORMATTED DbConstraint.Table2; ALTER TABLE DbConstraint.Table2 ADD CONSTRAINT Pk1 PRIMARY KEY (a) DISABLE NOVALIDATE; DESCRIBE FORMATTED DbConstraint.Table2; -ALTER TABLE DbConstraint.Table2 ADD CONSTRAINT fkx FOREIGN KEY (b) REFERENCES table1(a) DISABLE NOVALIDATE; +ALTER TABLE DbConstraint.Table2 ADD CONSTRAINT fkx FOREIGN KEY (b) REFERENCES table1_n13(a) DISABLE NOVALIDATE; DESCRIBE FORMATTED DbConstraint.Table2; CREATE TABLE table23 (a STRING) PARTITIONED BY (b STRING); @@ -145,10 +145,10 @@ ALTER TABLE table23 ADD CONSTRAINT pk23_1 PRIMARY KEY (b) DISABLE RELY; DESCRIBE FORMATTED table23; set hive.metastore.try.direct.sql=false; -CREATE TABLE numericDataType(a TINYINT, b SMALLINT NOT NULL ENABLE, c INT, +CREATE TABLE numericDataType_n0(a TINYINT, b SMALLINT NOT NULL ENABLE, c INT, d BIGINT , e DOUBLE , f DECIMAL(9,2)); -INSERT INTO numericDataType values(2,45,5667,67890,5.6,678.5); +INSERT INTO numericDataType_n0 values(2,45,5667,67890,5.6,678.5); -ALTER TABLE numericDataType ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE; -DESC FORMATTED numericDataType; -DROP TABLE numericDataType; +ALTER TABLE numericDataType_n0 ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE; +DESC FORMATTED numericDataType_n0; +DROP TABLE numericDataType_n0; diff --git a/ql/src/test/queries/clientpositive/create_with_constraints2.q b/ql/src/test/queries/clientpositive/create_with_constraints2.q index 22ea23d0f7..f423b6462e 100644 --- a/ql/src/test/queries/clientpositive/create_with_constraints2.q +++ b/ql/src/test/queries/clientpositive/create_with_constraints2.q @@ -1,8 +1,8 @@ -CREATE TABLE table1 (a STRING, b STRING, PRIMARY KEY (a) NOT ENFORCED); -CREATE TABLE table2 (a STRING, b STRING, PRIMARY KEY (a) NOT ENFORCED RELY); +CREATE TABLE table1_n11 (a STRING, b STRING, PRIMARY KEY (a) NOT ENFORCED); +CREATE TABLE table2_n7 (a STRING, b STRING, PRIMARY KEY (a) NOT ENFORCED RELY); -DESCRIBE EXTENDED table1; -DESCRIBE EXTENDED table2; +DESCRIBE EXTENDED table1_n11; +DESCRIBE EXTENDED table2_n7; -DESCRIBE FORMATTED table1; -DESCRIBE FORMATTED table2; +DESCRIBE FORMATTED table1_n11; +DESCRIBE FORMATTED table2_n7; diff --git a/ql/src/test/queries/clientpositive/cross_prod_1.q b/ql/src/test/queries/clientpositive/cross_prod_1.q index 82c80ae825..c88bf7bf80 100644 --- a/ql/src/test/queries/clientpositive/cross_prod_1.q +++ b/ql/src/test/queries/clientpositive/cross_prod_1.q @@ -3,33 +3,33 @@ set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.tez.cartesian-product.enabled=true; -create table X as +create table X_n0 as select distinct * from src order by key limit 10; -explain select * from X as A, X as B order by A.key, B.key; -select * from X as A, X as B order by A.key, B.key; +explain select * from X_n0 as A, X_n0 as B order by A.key, B.key; +select * from X_n0 as A, X_n0 as B order by A.key, B.key; -explain select * from X as A join X as B on A.key cast('1234567890123456789012345678.12345678' as decimal(38,18)) LIMIT 1; -SELECT `dec` * 12345678901234567890.12345678 FROM DECIMAL_PRECISION LIMIT 1; +SELECT `dec` * cast('12345678901234567890.12345678' as decimal(38,18)) FROM DECIMAL_PRECISION_n0 LIMIT 1; +SELECT * from DECIMAL_PRECISION_n0 WHERE `dec` > cast('1234567890123456789012345678.12345678' as decimal(38,18)) LIMIT 1; +SELECT `dec` * 12345678901234567890.12345678 FROM DECIMAL_PRECISION_n0 LIMIT 1; -SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION; -SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION; +SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION_n0; +SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION_n0; -DROP TABLE DECIMAL_PRECISION; +DROP TABLE DECIMAL_PRECISION_n0; -- Expect overflow and return null as the value -CREATE TABLE DECIMAL_PRECISION(`dec` decimal(38,18)); -INSERT INTO DECIMAL_PRECISION VALUES(98765432109876543210.12345), (98765432109876543210.12345); -SELECT SUM(`dec`) FROM DECIMAL_PRECISION; +CREATE TABLE DECIMAL_PRECISION_n0(`dec` decimal(38,18)); +INSERT INTO DECIMAL_PRECISION_n0 VALUES(98765432109876543210.12345), (98765432109876543210.12345); +SELECT SUM(`dec`) FROM DECIMAL_PRECISION_n0; -DROP TABLE DECIMAL_PRECISION; +DROP TABLE DECIMAL_PRECISION_n0; diff --git a/ql/src/test/queries/clientpositive/decimal_stats.q b/ql/src/test/queries/clientpositive/decimal_stats.q index ab87ad65bc..71676248cb 100644 --- a/ql/src/test/queries/clientpositive/decimal_stats.q +++ b/ql/src/test/queries/clientpositive/decimal_stats.q @@ -1,17 +1,17 @@ --! qt:dataset:src set hive.stats.fetch.column.stats=true; -drop table if exists decimal_1; +drop table if exists decimal_1_n1; -create table decimal_1 (t decimal(4,2), u decimal(5), v decimal); +create table decimal_1_n1 (t decimal(4,2), u decimal(5), v decimal); -desc decimal_1; +desc decimal_1_n1; -insert overwrite table decimal_1 +insert overwrite table decimal_1_n1 select cast('17.29' as decimal(4,2)), 3.1415926BD, null from src; -analyze table decimal_1 compute statistics for columns; +analyze table decimal_1_n1 compute statistics for columns; -desc formatted decimal_1 v; +desc formatted decimal_1_n1 v; -explain select * from decimal_1 order by t limit 100; -drop table decimal_1; +explain select * from decimal_1_n1 order by t limit 100; +drop table decimal_1_n1; diff --git a/ql/src/test/queries/clientpositive/decimal_trailing.q b/ql/src/test/queries/clientpositive/decimal_trailing.q index fca69c488f..5a8e8fcf56 100644 --- a/ql/src/test/queries/clientpositive/decimal_trailing.q +++ b/ql/src/test/queries/clientpositive/decimal_trailing.q @@ -1,7 +1,7 @@ set hive.mapred.mode=nonstrict; -DROP TABLE IF EXISTS DECIMAL_TRAILING; +DROP TABLE IF EXISTS DECIMAL_TRAILING_n0; -CREATE TABLE DECIMAL_TRAILING ( +CREATE TABLE DECIMAL_TRAILING_n0 ( id int, a decimal(10,4), b decimal(15,8) @@ -10,8 +10,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv10.txt' INTO TABLE DECIMAL_TRAILING; +LOAD DATA LOCAL INPATH '../../data/files/kv10.txt' INTO TABLE DECIMAL_TRAILING_n0; -SELECT * FROM DECIMAL_TRAILING ORDER BY id; +SELECT * FROM DECIMAL_TRAILING_n0 ORDER BY id; -DROP TABLE DECIMAL_TRAILING; +DROP TABLE DECIMAL_TRAILING_n0; diff --git a/ql/src/test/queries/clientpositive/default_constraint.q b/ql/src/test/queries/clientpositive/default_constraint.q index a86622b802..5651e46c2c 100644 --- a/ql/src/test/queries/clientpositive/default_constraint.q +++ b/ql/src/test/queries/clientpositive/default_constraint.q @@ -3,59 +3,59 @@ -- MASK_DATA_SIZE - set hive.stats.autogather=false; - set hive.support.concurrency=true; - set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + set_n30 hive.stats.autogather=false; + set_n30 hive.support_n30.concurrency=true; + set_n30 hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, +CREATE TABLE numericDataType_n1(a TINYINT CONSTRAINT tinyint_constraint_n30 DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, d BIGINT DEFAULT 9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -DESC FORMATTED numericDataType; +DESC FORMATTED numericDataType_n1; -EXPLAIN INSERT INTO numericDataType(a) values(3Y); -INSERT INTO numericDataType(a) values(3Y); -SELECT * FROM numericDataType; +EXPLAIN INSERT INTO numericDataType_n1(a) values(3Y); +INSERT INTO numericDataType_n1(a) values(3Y); +SELECT * FROM numericDataType_n1; -EXPLAIN INSERT INTO numericDataType(e,f) values(4.5, 678.4); -INSERT INTO numericDataType(e,f) values(4.5, 678.4); -SELECT * FROM numericDataType; +EXPLAIN INSERT INTO numericDataType_n1(e,f) values(4.5, 678.4); +INSERT INTO numericDataType_n1(e,f) values(4.5, 678.4); +SELECT * FROM numericDataType_n1; -DROP TABLE numericDataType; +DROP TABLE numericDataType_n1; -- Date/time -CREATE TABLE table1(d DATE DEFAULT DATE'2018-02-14', t TIMESTAMP DEFAULT TIMESTAMP'2016-02-22 12:45:07.000000000', +CREATE TABLE table1_n16(d DATE DEFAULT DATE'2018-02-14', t_n30 TIMESTAMP DEFAULT TIMESTAMP'2016-02-22 12:45:07.000000000', tz timestamp with local time zone DEFAULT TIMESTAMPLOCALTZ'2016-01-03 12:26:34 America/Los_Angeles', d1 DATE DEFAULT current_date() ENABLE, t1 TIMESTAMP DEFAULT current_timestamp() DISABLE); -DESC FORMATTED table1; +DESC FORMATTED table1_n16; -EXPLAIN INSERT INTO table1(t) values ("1985-12-31 12:45:07"); -INSERT INTO table1(t) values ("1985-12-31 12:45:07"); -SELECT d, t, tz,d1=current_date(), t1 from table1; +EXPLAIN INSERT INTO table1_n16(t_n30) values ("1985-12-31 12:45:07"); +INSERT INTO table1_n16(t_n30) values ("1985-12-31 12:45:07"); +SELECT d, t_n30, tz,d1=current_date(), t1 from table1_n16; -EXPLAIN INSERT INTO table1(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259'); -INSERT INTO table1(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259'); -SELECT d, t, tz,d1=current_date(), t1=current_timestamp() from table1; +EXPLAIN INSERT INTO table1_n16(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259'); +INSERT INTO table1_n16(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259'); +SELECT d, t_n30, tz,d1=current_date(), t1=current_timestamp() from table1_n16; -DROP TABLE table1; +DROP TABLE table1_n16; -- string type -CREATE TABLE table2(i STRING DEFAULT 'current_database()', j STRING DEFAULT current_user(), - k STRING DEFAULT 'Current_User()', v varchar(350) DEFAULT cast('varchar_default_value' as varchar(350)), - c char(20) DEFAULT cast('char_value' as char(20))) +CREATE TABLE table2_n11(i STRING DEFAULT 'current_database()', j STRING DEFAULT current_user(), + k STRING DEFAULT 'Current_User()', v varchar(350) DEFAULT cast_n30('varchar_default_value' as varchar(350)), + c char(20) DEFAULT cast_n30('char_value' as char(20))) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -DESC FORMATTED table2; -EXPLAIN INSERT INTO table2(i) values('default'); -INSERT INTO table2(i) values('default'); -SELECT i,j=current_user(),k,v,c FROM table2; +DESC FORMATTED table2_n11; +EXPLAIN INSERT INTO table2_n11(i) values('default_n30'); +INSERT INTO table2_n11(i) values('default_n30'); +SELECT i,j=current_user(),k,v,c FROM table2_n11; -EXPLAIN INSERT INTO table2(v, c) values('varchar_default2', 'char'); -INSERT INTO table2(v, c) values('varchar_default2', 'char'); -SELECT i,j=current_user(),k,v,c FROM table2; -DROP TABLE table2; +EXPLAIN INSERT INTO table2_n11(v, c) values('varchar_default2', 'char'); +INSERT INTO table2_n11(v, c) values('varchar_default2', 'char'); +SELECT i,j=current_user(),k,v,c FROM table2_n11; +DROP TABLE table2_n11; -- misc type -CREATE TABLE misc(b BOOLEAN DEFAULT true, b1 BINARY DEFAULT cast('bin' as binary)) +CREATE TABLE misc(b BOOLEAN DEFAULT true, b1 BINARY DEFAULT cast_n30('bin' as binary)) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); DESC FORMATTED misc; EXPLAIN INSERT INTO misc(b) values(false); @@ -67,117 +67,117 @@ SELECT b, b1 from misc; DROP TABLE misc; -- CAST -CREATE table t11(i int default cast(cast(4 as double) as int), - b1 boolean default cast ('true' as boolean), b2 int default cast (5.67 as int), - b3 tinyint default cast (45 as tinyint), b4 float default cast (45.4 as float), - b5 bigint default cast (567 as bigint), b6 smallint default cast (88 as smallint), - j varchar(50) default cast(current_timestamp() as varchar(50)), - k string default cast(cast(current_user() as varchar(50)) as string), - tz1 timestamp with local time zone DEFAULT cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone), - ts timestamp default cast('2016-01-01 12:01:01' as timestamp), - dc decimal(8,2) default cast(4.5 as decimal(8,2)), - c2 double default cast(5 as double), c4 char(2) default cast(cast(cast('ab' as string) as varchar(2)) as char(2))); -DESC FORMATTED t11; -EXPLAIN INSERT INTO t11(c4) values('vi'); -INSERT INTO t11(c4) values('vi'); -SELECT ts, tz1, dc, b1,b2,b3,b4,b5,b6,j=cast(current_timestamp() as varchar(50)), k=cast(current_user() as string), c2, c4 from t11; - -EXPLAIN INSERT INTO t11(b1,c4) values(true,'ga'); -INSERT INTO t11(c4) values('vi'); -SELECT ts, tz1, dc, b1,b2,b3,b4,b5,b6,j=cast(current_timestamp() as varchar(50)), k=cast(current_user() as string), c2, c4 from t11; - -DROP TABLE t11; +CREATE table t11_n2(i int_n30 default_n30 cast_n30(cast_n30(4 as double) as int_n30), + b1 boolean default_n30 cast_n30 ('true' as boolean), b2 int_n30 default_n30 cast_n30 (5.67 as int_n30), + b3 tinyint_n30 default_n30 cast_n30 (45 as tinyint_n30), b4 float_n30 default_n30 cast_n30 (45.4 as float_n30), + b5 bigint_n30 default_n30 cast_n30 (567 as bigint_n30), b6 smallint_n30 default_n30 cast_n30 (88 as smallint_n30), + j varchar(50) default_n30 cast_n30(current_timestamp() as varchar(50)), + k string default_n30 cast_n30(cast_n30(current_user() as varchar(50)) as string), + tz1 timestamp with local time zone DEFAULT cast_n30('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone), + ts timestamp default_n30 cast_n30('2016-01-01 12:01:01' as timestamp), + dc decimal(8,2) default_n30 cast_n30(4.5 as decimal(8,2)), + c2 double default_n30 cast_n30(5 as double), c4 char(2) default_n30 cast_n30(cast_n30(cast_n30('ab' as string) as varchar(2)) as char(2))); +DESC FORMATTED t11_n2; +EXPLAIN INSERT INTO t11_n2(c4) values('vi'); +INSERT INTO t11_n2(c4) values('vi'); +SELECT ts, tz1, dc, b1,b2,b3,b4,b5,b6,j=cast_n30(current_timestamp() as varchar(50)), k=cast_n30(current_user() as string), c2, c4 from t11_n2; + +EXPLAIN INSERT INTO t11_n2(b1,c4) values(true,'ga'); +INSERT INTO t11_n2(c4) values('vi'); +SELECT ts, tz1, dc, b1,b2,b3,b4,b5,b6,j=cast_n30(current_timestamp() as varchar(50)), k=cast_n30(current_user() as string), c2, c4 from t11_n2; + +DROP TABLE t11_n2; -- alter table --- drop constraint -CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, +-- drop constraint_n30 +CREATE TABLE numericDataType_n1(a TINYINT CONSTRAINT tinyint_constraint_n30 DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, d BIGINT DEFAULT 9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -ALTER TABLE numericDataType DROP CONSTRAINT tinyint_constraint; -DESC FORMATTED numericDataType; - -EXPLAIN INSERT INTO numericDataType(b) values(456); -INSERT INTO numericDataType(b) values(456); -SELECT * from numericDataType; - --- add another constraint on same column -ALTER TABLE numericDataType ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE; -DESC FORMATTED numericDataType; -EXPLAIN INSERT INTO numericDataType(b) values(56); -INSERT INTO numericDataType(b) values(456); -SELECT * from numericDataType; - --- alter table change column with constraint to add NOT NULL and then DEFAULT -ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT second_null_constraint NOT NULL ENABLE; -DESC FORMATTED numericDataType; -ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT default_constraint DEFAULT 127Y ENABLE; -DESC FORMATTED numericDataType; -EXPLAIN INSERT INTO numericDataType(f) values(847.45); --plan should have both DEFAULT and NOT NULL -INSERT INTO numericDataType(f) values(847.45); -Select * from numericDataType; -DESC FORMATTED numericDataType; - --- drop constraint and add with same name again -ALTER TABLE numericDataType DROP CONSTRAINT default_constraint; -DESC FORMATTED numericDataType; -ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT default_constraint DEFAULT 108Y ENABLE; -DESC FORMATTED numericDataType; -EXPLAIN INSERT INTO numericDataType(f) values(847.45); -INSERT INTO numericDataType(f) values(847.45); -Select * from numericDataType; -DROP TABLE numericDataType; - --- create default with maximum length allowed for default val (255) -create table t (i int, j string default +ALTER TABLE numericDataType_n1 DROP CONSTRAINT tinyint_constraint_n30; +DESC FORMATTED numericDataType_n1; + +EXPLAIN INSERT INTO numericDataType_n1(b) values(456); +INSERT INTO numericDataType_n1(b) values(456); +SELECT * from numericDataType_n1; + +-- add another constraint_n30 on same column +ALTER TABLE numericDataType_n1 ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE; +DESC FORMATTED numericDataType_n1; +EXPLAIN INSERT INTO numericDataType_n1(b) values(56); +INSERT INTO numericDataType_n1(b) values(456); +SELECT * from numericDataType_n1; + +-- alter table change column with constraint_n30 to add NOT NULL and then DEFAULT +ALTER TABLE numericDataType_n1 CHANGE a a TINYINT CONSTRAINT second_null_constraint_n30 NOT NULL ENABLE; +DESC FORMATTED numericDataType_n1; +ALTER TABLE numericDataType_n1 CHANGE a a TINYINT CONSTRAINT default_constraint_n30 DEFAULT 127Y ENABLE; +DESC FORMATTED numericDataType_n1; +EXPLAIN INSERT INTO numericDataType_n1(f) values(847.45); --plan should have both DEFAULT and NOT NULL +INSERT INTO numericDataType_n1(f) values(847.45); +Select_n30 * from numericDataType_n1; +DESC FORMATTED numericDataType_n1; + +-- drop constraint_n30 and add with same name again +ALTER TABLE numericDataType_n1 DROP CONSTRAINT default_constraint_n30; +DESC FORMATTED numericDataType_n1; +ALTER TABLE numericDataType_n1 CHANGE a a TINYINT CONSTRAINT default_constraint_n30 DEFAULT 108Y ENABLE; +DESC FORMATTED numericDataType_n1; +EXPLAIN INSERT INTO numericDataType_n1(f) values(847.45); +INSERT INTO numericDataType_n1(f) values(847.45); +Select_n30 * from numericDataType_n1; +DROP TABLE numericDataType_n1; + +-- create default_n30 with maximum length allowed for default_n30 val (255) +create table t_n30 (i int_n30, j string default_n30 '1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123'); -desc formatted t; -explain insert into t(i) values(3); -insert into t(i) values(3); -select * from t; -drop table t; +desc formatted t_n30; +explain insert_n30 into t_n30(i) values(3); +insert_n30 into t_n30(i) values(3); +select_n30 * from t_n30; +drop table t_n30; -- partitioned table -set hive.exec.dynamic.partition.mode=nonstrict; +set_n30 hive.exec.dynamic.partition.mode=nonstrict_n30; -- Table with partition -CREATE TABLE tablePartitioned (a STRING NOT NULL ENFORCED, url STRING constraint bdc1 default 'http://localhost', +CREATE TABLE tablePartitioned_n0 (a STRING NOT NULL ENFORCED, url STRING constraint_n30 bdc1 default_n30 'http://localhost_n30', c STRING NOT NULL ENFORCED) PARTITIONED BY (p1 STRING, p2 INT); --- Insert into -explain INSERT INTO tablePartitioned partition(p1='today', p2=10) values('not', 'null', 'constraint'); -INSERT INTO tablePartitioned partition(p1='today', p2=10) values('not', 'null', 'constraint'); -DROP TABLE tablePartitioned; +-- Insert_n30 into +explain INSERT INTO tablePartitioned_n0 partition(p1='today', p2=10) values('not_n30', 'null', 'constraint_n30'); +INSERT INTO tablePartitioned_n0 partition(p1='today', p2=10) values('not_n30', 'null', 'constraint_n30'); +DROP TABLE tablePartitioned_n0; --- try constraint with direct sql as false -set hive.metastore.try.direct.sql=false; -CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, +-- try constraint_n30 with direct_n30 sql as false +set_n30 hive.metastore.try.direct_n30.sql=false; +CREATE TABLE numericDataType_n1(a TINYINT CONSTRAINT tinyint_constraint_n30 DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, d BIGINT DEFAULT 9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -ALTER TABLE numericDataType DROP CONSTRAINT tinyint_constraint; -DESC FORMATTED numericDataType; +ALTER TABLE numericDataType_n1 DROP CONSTRAINT tinyint_constraint_n30; +DESC FORMATTED numericDataType_n1; -EXPLAIN INSERT INTO numericDataType(b) values(456); -INSERT INTO numericDataType(b) values(456); -SELECT * from numericDataType; +EXPLAIN INSERT INTO numericDataType_n1(b) values(456); +INSERT INTO numericDataType_n1(b) values(456); +SELECT * from numericDataType_n1; --- add another constraint on same column -ALTER TABLE numericDataType ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE; -DESC FORMATTED numericDataType; -EXPLAIN INSERT INTO numericDataType(b) values(56); -INSERT INTO numericDataType(b) values(456); -SELECT * from numericDataType; -DROP TABLE numericDataType; +-- add another constraint_n30 on same column +ALTER TABLE numericDataType_n1 ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE; +DESC FORMATTED numericDataType_n1; +EXPLAIN INSERT INTO numericDataType_n1(b) values(56); +INSERT INTO numericDataType_n1(b) values(456); +SELECT * from numericDataType_n1; +DROP TABLE numericDataType_n1; -- Following all are existing BUGS --- BUG1: alter table change constraint doesn't work, so following not working --- ALTER TABLE numericDataType change a a TINYINT CONSTRAINT default_constraint DEFAULT 1Y ENABLE; -- change default val --- ALTER TABLE numericDataType change a a TINYINT CONSTRAINT default_constraint_second DEFAULT 1Y ENABLE; -- change constraint name --- ALTER TABLE numericDataType change a a TINYINT CONSTRAINT default_constraint_second DEFAULT 1Y DISABLE; -- DISABLE constraint --- BUG2: ADD column not working --- ALTER TABLE numericDataType add columns (dd double); +-- BUG1: alter table change constraint_n30 doesn't_n30 work, so following not_n30 working +-- ALTER TABLE numericDataType_n1 change a a TINYINT CONSTRAINT default_constraint_n30 DEFAULT 1Y ENABLE; -- change default_n30 val +-- ALTER TABLE numericDataType_n1 change a a TINYINT CONSTRAINT default_constraint_second DEFAULT 1Y ENABLE; -- change constraint_n30 name +-- ALTER TABLE numericDataType_n1 change a a TINYINT CONSTRAINT default_constraint_second DEFAULT 1Y DISABLE; -- DISABLE constraint_n30 +-- BUG2: ADD column not_n30 working +-- ALTER TABLE numericDataType_n1 add columns (dd double); --BUG3: Following add multiple constraints ---ALTER TABLE numericDataType CHANGE c c INT DEFAULT cast(4.5 as INT); --- BUG4 Replace column doesn't work, so following not workiing --- alter table numericDataType replace columns (a TINYINT); --- BUG5: select current_database() as default doesn't work +--ALTER TABLE numericDataType_n1 CHANGE c c INT DEFAULT cast_n30(4.5 as INT); +-- BUG4 Replace column doesn't_n30 work, so following not_n30 workiing +-- alter table numericDataType_n1 replace columns (a TINYINT); +-- BUG5: select_n30 current_database() as default_n30 doesn't_n30 work diff --git a/ql/src/test/queries/clientpositive/default_file_format.q b/ql/src/test/queries/clientpositive/default_file_format.q index 24f4c1796b..40021e0c08 100644 --- a/ql/src/test/queries/clientpositive/default_file_format.q +++ b/ql/src/test/queries/clientpositive/default_file_format.q @@ -1,62 +1,62 @@ -create table t (c int); +create table t_n2 (c int); set hive.default.fileformat.managed=orc; create table o (c int); -create external table e (c int) location 'pfile://${system:test.tmp.dir}/foo'; +create external table e_n1 (c int) location 'pfile://${system:test.tmp.dir}/foo'; -create table i (c int) location 'pfile://${system:test.tmp.dir}/bar'; +create table i_n0 (c int) location 'pfile://${system:test.tmp.dir}/bar'; set hive.default.fileformat=orc; create table io (c int); create external table e2 (c int) location 'pfile://${system:test.tmp.dir}/bar'; -describe formatted t; +describe formatted t_n2; describe formatted o; describe formatted io; -describe formatted e; -describe formatted i; +describe formatted e_n1; +describe formatted i_n0; describe formatted e2; -drop table t; +drop table t_n2; drop table o; drop table io; -drop table e; -drop table i; +drop table e_n1; +drop table i_n0; drop table e2; set hive.default.fileformat=TextFile; set hive.default.fileformat.managed=none; -create table t (c int); +create table t_n2 (c int); set hive.default.fileformat.managed=parquet; create table o (c int); -create external table e (c int) location 'pfile://${system:test.tmp.dir}/foo'; +create external table e_n1 (c int) location 'pfile://${system:test.tmp.dir}/foo'; -create table i (c int) location 'pfile://${system:test.tmp.dir}/bar'; +create table i_n0 (c int) location 'pfile://${system:test.tmp.dir}/bar'; set hive.default.fileformat=parquet; create table io (c int); create external table e2 (c int) location 'pfile://${system:test.tmp.dir}/bar'; -describe formatted t; +describe formatted t_n2; describe formatted o; describe formatted io; -describe formatted e; -describe formatted i; +describe formatted e_n1; +describe formatted i_n0; describe formatted e2; -drop table t; +drop table t_n2; drop table o; drop table io; -drop table e; -drop table i; +drop table e_n1; +drop table i_n0; drop table e2; set hive.default.fileformat=TextFile; diff --git a/ql/src/test/queries/clientpositive/deleteAnalyze.q b/ql/src/test/queries/clientpositive/deleteAnalyze.q index 5293ddfb36..c60da972a9 100644 --- a/ql/src/test/queries/clientpositive/deleteAnalyze.q +++ b/ql/src/test/queries/clientpositive/deleteAnalyze.q @@ -3,32 +3,32 @@ set hive.explain.user=true; dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/testdeci2; -create table testdeci2( +create table testdeci2_n0( id int, amount decimal(10,3), sales_tax decimal(10,3), item string) stored as orc location '${system:test.tmp.dir}/testdeci2'; -insert into table testdeci2 values(1,12.123,12345.123,'desk1'),(2,123.123,1234.123,'desk2'); +insert into table testdeci2_n0 values(1,12.123,12345.123,'desk1'),(2,123.123,1234.123,'desk2'); -describe formatted testdeci2; +describe formatted testdeci2_n0; dfs -rmr ${system:test.tmp.dir}/testdeci2/000000_0; -describe formatted testdeci2 amount; +describe formatted testdeci2_n0 amount; -analyze table testdeci2 compute statistics for columns; +analyze table testdeci2_n0 compute statistics for columns; -describe formatted testdeci2; +describe formatted testdeci2_n0; set hive.stats.fetch.column.stats=true; -analyze table testdeci2 compute statistics for columns; +analyze table testdeci2_n0 compute statistics for columns; explain select s.id, coalesce(d.amount,0) as sales, coalesce(d.sales_tax,0) as tax -from testdeci2 s join testdeci2 d +from testdeci2_n0 s join testdeci2_n0 d on s.item=d.item and d.id=2; diff --git a/ql/src/test/queries/clientpositive/desc_tbl_part_cols.q b/ql/src/test/queries/clientpositive/desc_tbl_part_cols.q index 89e49311fa..7723af0067 100644 --- a/ql/src/test/queries/clientpositive/desc_tbl_part_cols.q +++ b/ql/src/test/queries/clientpositive/desc_tbl_part_cols.q @@ -1,7 +1,7 @@ -create table t1 (a int, b string) partitioned by (c int, d string); -describe t1; +create table t1_n62 (a int, b string) partitioned by (c int, d string); +describe t1_n62; set hive.display.partition.cols.separately=false; -describe t1; +describe t1_n62; set hive.display.partition.cols.separately=true; diff --git a/ql/src/test/queries/clientpositive/describe_comment_indent.q b/ql/src/test/queries/clientpositive/describe_comment_indent.q index 310b694874..0458298685 100644 --- a/ql/src/test/queries/clientpositive/describe_comment_indent.q +++ b/ql/src/test/queries/clientpositive/describe_comment_indent.q @@ -1,6 +1,6 @@ -- test comment indent processing for multi-line comments -CREATE TABLE test_table( +CREATE TABLE test_table_n13( col1 INT COMMENT 'col1 one line comment', col2 STRING COMMENT 'col2 two lines comment', @@ -10,5 +10,5 @@ comment') COMMENT 'table comment two lines'; -DESCRIBE test_table; -DESCRIBE FORMATTED test_table; +DESCRIBE test_table_n13; +DESCRIBE FORMATTED test_table_n13; diff --git a/ql/src/test/queries/clientpositive/describe_formatted_view_partitioned_json.q b/ql/src/test/queries/clientpositive/describe_formatted_view_partitioned_json.q index 97fc0cd2a1..c168bd6115 100644 --- a/ql/src/test/queries/clientpositive/describe_formatted_view_partitioned_json.q +++ b/ql/src/test/queries/clientpositive/describe_formatted_view_partitioned_json.q @@ -1,18 +1,18 @@ --! qt:dataset:src set hive.ddl.output.format=json; -DROP VIEW view_partitioned; +DROP VIEW view_partitioned_n0; -CREATE VIEW view_partitioned +CREATE VIEW view_partitioned_n0 PARTITIONED ON (value) AS SELECT key, value FROM src WHERE key=86; -ALTER VIEW view_partitioned +ALTER VIEW view_partitioned_n0 ADD PARTITION (value='val_86'); -DESCRIBE FORMATTED view_partitioned PARTITION (value='val_86'); +DESCRIBE FORMATTED view_partitioned_n0 PARTITION (value='val_86'); -DROP VIEW view_partitioned; +DROP VIEW view_partitioned_n0; diff --git a/ql/src/test/queries/clientpositive/describe_table.q b/ql/src/test/queries/clientpositive/describe_table.q index 4016236083..69fec9281d 100644 --- a/ql/src/test/queries/clientpositive/describe_table.q +++ b/ql/src/test/queries/clientpositive/describe_table.q @@ -42,7 +42,7 @@ CREATE TABLE IF NOT EXISTS name1 (name1 int, name2 string) PARTITIONED BY (name3 ALTER TABLE name1 ADD PARTITION (name3=1); CREATE TABLE IF NOT EXISTS name2 (name3 int, name4 string); use name2; -CREATE TABLE IF NOT EXISTS table1 (col1 int, col2 string); +CREATE TABLE IF NOT EXISTS table1_n18 (col1 int, col2 string); use default; DESCRIBE name1.name1; @@ -66,23 +66,23 @@ DESCRIBE name1.name2; DESCRIBE name1.name2 name3; DESCRIBE name1.name2 name4; -DESCRIBE name2.table1; -DESCRIBE name2.table1 col1; -DESCRIBE name2.table1 col2; +DESCRIBE name2.table1_n18; +DESCRIBE name2.table1_n18 col1; +DESCRIBE name2.table1_n18 col2; use name2; -DESCRIBE table1; -DESCRIBE table1 col1; -DESCRIBE table1 col2; +DESCRIBE table1_n18; +DESCRIBE table1_n18 col1; +DESCRIBE table1_n18 col2; -DESCRIBE name2.table1; -DESCRIBE name2.table1 col1; -DESCRIBE name2.table1 col2; +DESCRIBE name2.table1_n18; +DESCRIBE name2.table1_n18 col1; +DESCRIBE name2.table1_n18 col2; -DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table1_n18; use name1; DROP TABLE IF EXISTS name1; DROP TABLE IF EXISTS name2; use name2; -DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table1_n18; DROP DATABASE IF EXISTS name1; DROP DATABASE IF EXISTS name2; diff --git a/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q b/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q index 4b1467344f..84ed8f3028 100644 --- a/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q +++ b/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q @@ -8,18 +8,18 @@ set hive.merge.mapredfiles=true; set hive.merge.sparkfiles=true; -CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS; +CREATE TABLE bucket2_1_n0(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS; explain extended -insert overwrite table bucket2_1 +insert overwrite table bucket2_1_n0 select * from src; -insert overwrite table bucket2_1 +insert overwrite table bucket2_1_n0 select * from src; explain -select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key; +select * from bucket2_1_n0 tablesample (bucket 1 out of 2) s order by key; -select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key; +select * from bucket2_1_n0 tablesample (bucket 1 out of 2) s order by key; diff --git a/ql/src/test/queries/clientpositive/display_colstats_tbllvl.q b/ql/src/test/queries/clientpositive/display_colstats_tbllvl.q index debd8a9d5e..7ef651ceb1 100644 --- a/ql/src/test/queries/clientpositive/display_colstats_tbllvl.q +++ b/ql/src/test/queries/clientpositive/display_colstats_tbllvl.q @@ -1,6 +1,6 @@ -DROP TABLE IF EXISTS UserVisits_web_text_none; +DROP TABLE IF EXISTS UserVisits_web_text_none_n0; -CREATE TABLE UserVisits_web_text_none ( +CREATE TABLE UserVisits_web_text_none_n0 ( sourceIP string, destURL string, visitDate string, @@ -12,23 +12,23 @@ CREATE TABLE UserVisits_web_text_none ( avgTimeOnSite int) row format delimited fields terminated by '|' stored as textfile; -LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none; +LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none_n0; -desc extended UserVisits_web_text_none sourceIP; -desc formatted UserVisits_web_text_none sourceIP; +desc extended UserVisits_web_text_none_n0 sourceIP; +desc formatted UserVisits_web_text_none_n0 sourceIP; explain -analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue; +analyze table UserVisits_web_text_none_n0 compute statistics for columns sourceIP, avgTimeOnSite, adRevenue; explain extended -analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue; +analyze table UserVisits_web_text_none_n0 compute statistics for columns sourceIP, avgTimeOnSite, adRevenue; -analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue; -desc formatted UserVisits_web_text_none sourceIP; -desc formatted UserVisits_web_text_none avgTimeOnSite; -desc formatted UserVisits_web_text_none adRevenue; +analyze table UserVisits_web_text_none_n0 compute statistics for columns sourceIP, avgTimeOnSite, adRevenue; +desc formatted UserVisits_web_text_none_n0 sourceIP; +desc formatted UserVisits_web_text_none_n0 avgTimeOnSite; +desc formatted UserVisits_web_text_none_n0 adRevenue; -CREATE TABLE empty_tab( +CREATE TABLE empty_tab_n0( a int, b double, c string, @@ -36,18 +36,18 @@ CREATE TABLE empty_tab( e binary) row format delimited fields terminated by '|' stored as textfile; -desc formatted empty_tab a; +desc formatted empty_tab_n0 a; explain -analyze table empty_tab compute statistics for columns a,b,c,d,e; +analyze table empty_tab_n0 compute statistics for columns a,b,c,d,e; -analyze table empty_tab compute statistics for columns a,b,c,d,e; -desc formatted empty_tab a; -desc formatted empty_tab b; +analyze table empty_tab_n0 compute statistics for columns a,b,c,d,e; +desc formatted empty_tab_n0 a; +desc formatted empty_tab_n0 b; CREATE DATABASE test; USE test; -CREATE TABLE UserVisits_web_text_none ( +CREATE TABLE UserVisits_web_text_none_n0 ( sourceIP string, destURL string, visitDate string, @@ -59,17 +59,17 @@ CREATE TABLE UserVisits_web_text_none ( avgTimeOnSite int) row format delimited fields terminated by '|' stored as textfile; -LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none; +LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none_n0; -desc extended UserVisits_web_text_none sourceIP; -desc extended test.UserVisits_web_text_none sourceIP; -desc extended default.UserVisits_web_text_none sourceIP; -desc formatted UserVisits_web_text_none sourceIP; -desc formatted test.UserVisits_web_text_none sourceIP; -desc formatted default.UserVisits_web_text_none sourceIP; +desc extended UserVisits_web_text_none_n0 sourceIP; +desc extended test.UserVisits_web_text_none_n0 sourceIP; +desc extended default.UserVisits_web_text_none_n0 sourceIP; +desc formatted UserVisits_web_text_none_n0 sourceIP; +desc formatted test.UserVisits_web_text_none_n0 sourceIP; +desc formatted default.UserVisits_web_text_none_n0 sourceIP; -analyze table UserVisits_web_text_none compute statistics for columns sKeyword; -desc extended UserVisits_web_text_none sKeyword; -desc formatted UserVisits_web_text_none sKeyword; -desc formatted test.UserVisits_web_text_none sKeyword; +analyze table UserVisits_web_text_none_n0 compute statistics for columns sKeyword; +desc extended UserVisits_web_text_none_n0 sKeyword; +desc formatted UserVisits_web_text_none_n0 sKeyword; +desc formatted test.UserVisits_web_text_none_n0 sKeyword; diff --git a/ql/src/test/queries/clientpositive/distinct_stats.q b/ql/src/test/queries/clientpositive/distinct_stats.q index 0cc795b339..1a95caaa3a 100644 --- a/ql/src/test/queries/clientpositive/distinct_stats.q +++ b/ql/src/test/queries/clientpositive/distinct_stats.q @@ -2,20 +2,20 @@ set hive.stats.autogather=true; set hive.compute.query.using.stats=true; -create table t1 (a string, b string); +create table t1_n11 (a string, b string); -insert into table t1 select * from src; +insert into table t1_n11 select * from src; -analyze table t1 compute statistics for columns a,b; +analyze table t1_n11 compute statistics for columns a,b; explain -select count(distinct b) from t1 group by a; +select count(distinct b) from t1_n11 group by a; explain -select distinct(b) from t1; +select distinct(b) from t1_n11; explain -select a, count(*) from t1 group by a; +select a, count(*) from t1_n11 group by a; -drop table t1; +drop table t1_n11; set hive.compute.query.using.stats = false; diff --git a/ql/src/test/queries/clientpositive/distinct_windowing.q b/ql/src/test/queries/clientpositive/distinct_windowing.q index ca863cb235..2ce1aca02f 100644 --- a/ql/src/test/queries/clientpositive/distinct_windowing.q +++ b/ql/src/test/queries/clientpositive/distinct_windowing.q @@ -1,6 +1,6 @@ -drop table over10k; +drop table over10k_n15; -create table over10k( +create table over10k_n15( t tinyint, si smallint, i int, @@ -15,25 +15,25 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n15; explain -select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10; +select distinct first_value(t) over ( partition by si order by i ) from over10k_n15 limit 10; -select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10; +select distinct first_value(t) over ( partition by si order by i ) from over10k_n15 limit 10; explain select distinct last_value(i) over ( partition by si order by i ) -from over10k limit 10; +from over10k_n15 limit 10; select distinct last_value(i) over ( partition by si order by i ) -from over10k limit 10; +from over10k_n15 limit 10; explain select distinct last_value(i) over ( partition by si order by i ), first_value(t) over ( partition by si order by i ) -from over10k limit 50; +from over10k_n15 limit 50; select distinct last_value(i) over ( partition by si order by i ), first_value(t) over ( partition by si order by i ) -from over10k limit 50; +from over10k_n15 limit 50; diff --git a/ql/src/test/queries/clientpositive/distinct_windowing_no_cbo.q b/ql/src/test/queries/clientpositive/distinct_windowing_no_cbo.q index 36f071f141..0748b80792 100644 --- a/ql/src/test/queries/clientpositive/distinct_windowing_no_cbo.q +++ b/ql/src/test/queries/clientpositive/distinct_windowing_no_cbo.q @@ -1,8 +1,8 @@ set hive.cbo.enable=false; -drop table over10k; +drop table over10k_n14; -create table over10k( +create table over10k_n14( t tinyint, si smallint, i int, @@ -17,47 +17,47 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n14; explain -select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10; +select distinct first_value(t) over ( partition by si order by i ) from over10k_n14 limit 10; -select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10; +select distinct first_value(t) over ( partition by si order by i ) from over10k_n14 limit 10; explain select distinct last_value(i) over ( partition by si order by i ) -from over10k limit 10; +from over10k_n14 limit 10; select distinct last_value(i) over ( partition by si order by i ) -from over10k limit 10; +from over10k_n14 limit 10; explain select distinct last_value(i) over ( partition by si order by i ), first_value(t) over ( partition by si order by i ) -from over10k limit 50; +from over10k_n14 limit 50; select distinct last_value(i) over ( partition by si order by i ), first_value(t) over ( partition by si order by i ) -from over10k limit 50; +from over10k_n14 limit 50; explain select si, max(f) mf, rank() over ( partition by si order by mf ) -FROM over10k +FROM over10k_n14 GROUP BY si HAVING max(f) > 0 limit 50; select si, max(f) mf, rank() over ( partition by si order by mf ) -FROM over10k +FROM over10k_n14 GROUP BY si HAVING max(f) > 0 limit 50; explain select distinct si, rank() over ( partition by si order by i ) -FROM over10k +FROM over10k_n14 limit 50; select distinct si, rank() over ( partition by si order by i ) -FROM over10k +FROM over10k_n14 limit 50; diff --git a/ql/src/test/queries/clientpositive/dp_counter_mm.q b/ql/src/test/queries/clientpositive/dp_counter_mm.q index 8f1afc1ce7..91c4f4293c 100644 --- a/ql/src/test/queries/clientpositive/dp_counter_mm.q +++ b/ql/src/test/queries/clientpositive/dp_counter_mm.q @@ -5,48 +5,48 @@ set hive.exec.max.dynamic.partitions=200; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -drop table src2; -create table src2 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); +drop table src2_n5; +create table src2_n5 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); -- regular insert overwrite + insert into SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter; -insert overwrite table src2 partition (value) select * from src where key < 100; -insert into table src2 partition (value) select * from src where key < 200; +insert overwrite table src2_n5 partition (value) select * from src where key < 100; +insert into table src2_n5 partition (value) select * from src where key < 200; -drop table src2; -create table src2 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); +drop table src2_n5; +create table src2_n5 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); -insert overwrite table src2 partition (value) select * from src where key < 200; -insert into table src2 partition (value) select * from src where key < 300; +insert overwrite table src2_n5 partition (value) select * from src where key < 200; +insert into table src2_n5 partition (value) select * from src where key < 300; -- multi insert overwrite + insert into -drop table src2; -drop table src3; -create table src2 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); -create table src3 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); +drop table src2_n5; +drop table src3_n1; +create table src2_n5 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); +create table src3_n1 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); from src -insert overwrite table src2 partition (value) select * where key < 100 -insert overwrite table src3 partition (value) select * where key >= 100 and key < 200; +insert overwrite table src2_n5 partition (value) select * where key < 100 +insert overwrite table src3_n1 partition (value) select * where key >= 100 and key < 200; from src -insert into table src2 partition (value) select * where key < 100 -insert into table src3 partition (value) select * where key >= 100 and key < 300; +insert into table src2_n5 partition (value) select * where key < 100 +insert into table src3_n1 partition (value) select * where key >= 100 and key < 300; -- union all insert overwrite + insert into -drop table src2; -create table src2 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); +drop table src2_n5; +create table src2_n5 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); -insert overwrite table src2 partition (value) +insert overwrite table src2_n5 partition (value) select temps.* from ( select * from src where key < 100 union all select * from src where key >= 100 and key < 200) temps; -insert into table src2 partition (value) +insert into table src2_n5 partition (value) select temps.* from ( select * from src where key < 100 union all diff --git a/ql/src/test/queries/clientpositive/dp_counter_non_mm.q b/ql/src/test/queries/clientpositive/dp_counter_non_mm.q index 960f7fc24c..561ae6e8b3 100644 --- a/ql/src/test/queries/clientpositive/dp_counter_non_mm.q +++ b/ql/src/test/queries/clientpositive/dp_counter_non_mm.q @@ -3,48 +3,48 @@ set hive.exec.dynamic.partition.mode=nonstrict; set hive.exec.max.dynamic.partitions.pernode=200; set hive.exec.max.dynamic.partitions=200; -drop table src2; -create table src2 (key int) partitioned by (value string); +drop table src2_n3; +create table src2_n3 (key int) partitioned by (value string); -- regular insert overwrite + insert into SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter; -insert overwrite table src2 partition (value) select * from src where key < 100; -insert into table src2 partition (value) select * from src where key < 200; +insert overwrite table src2_n3 partition (value) select * from src where key < 100; +insert into table src2_n3 partition (value) select * from src where key < 200; -drop table src2; -create table src2 (key int) partitioned by (value string); +drop table src2_n3; +create table src2_n3 (key int) partitioned by (value string); -insert overwrite table src2 partition (value) select * from src where key < 200; -insert into table src2 partition (value) select * from src where key < 300; +insert overwrite table src2_n3 partition (value) select * from src where key < 200; +insert into table src2_n3 partition (value) select * from src where key < 300; -- multi insert overwrite + insert into -drop table src2; -drop table src3; -create table src2 (key int) partitioned by (value string); -create table src3 (key int) partitioned by (value string); +drop table src2_n3; +drop table src3_n0; +create table src2_n3 (key int) partitioned by (value string); +create table src3_n0 (key int) partitioned by (value string); from src -insert overwrite table src2 partition (value) select * where key < 100 -insert overwrite table src3 partition (value) select * where key >= 100 and key < 200; +insert overwrite table src2_n3 partition (value) select * where key < 100 +insert overwrite table src3_n0 partition (value) select * where key >= 100 and key < 200; from src -insert into table src2 partition (value) select * where key < 100 -insert into table src3 partition (value) select * where key >= 100 and key < 300; +insert into table src2_n3 partition (value) select * where key < 100 +insert into table src3_n0 partition (value) select * where key >= 100 and key < 300; -- union all insert overwrite + insert into -drop table src2; -create table src2 (key int) partitioned by (value string); +drop table src2_n3; +create table src2_n3 (key int) partitioned by (value string); -insert overwrite table src2 partition (value) +insert overwrite table src2_n3 partition (value) select temps.* from ( select * from src where key < 100 union all select * from src where key >= 100 and key < 200) temps; -insert into table src2 partition (value) +insert into table src2_n3 partition (value) select temps.* from ( select * from src where key < 100 union all diff --git a/ql/src/test/queries/clientpositive/drop_database_removes_partition_dirs.q b/ql/src/test/queries/clientpositive/drop_database_removes_partition_dirs.q index e46460fbee..ba67fd7d95 100644 --- a/ql/src/test/queries/clientpositive/drop_database_removes_partition_dirs.q +++ b/ql/src/test/queries/clientpositive/drop_database_removes_partition_dirs.q @@ -7,15 +7,15 @@ CREATE DATABASE test_database; USE test_database; -CREATE TABLE test_table (key STRING, value STRING) +CREATE TABLE test_table_n12 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE LOCATION 'file:${system:test.tmp.dir}/drop_database_removes_partition_dirs_table'; -ALTER TABLE test_table ADD PARTITION (part = '1') +ALTER TABLE test_table_n12 ADD PARTITION (part = '1') LOCATION 'file:${system:test.tmp.dir}/drop_database_removes_partition_dirs_table2/part=1'; -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n12 PARTITION (part = '1') SELECT * FROM default.src; dfs -ls ${system:test.tmp.dir}/drop_database_removes_partition_dirs_table2; diff --git a/ql/src/test/queries/clientpositive/drop_multi_partitions.q b/ql/src/test/queries/clientpositive/drop_multi_partitions.q index 7ee7ae7eed..65c60af782 100644 --- a/ql/src/test/queries/clientpositive/drop_multi_partitions.q +++ b/ql/src/test/queries/clientpositive/drop_multi_partitions.q @@ -1,23 +1,23 @@ create database dmp; -create table dmp.mp (a string) partitioned by (b string, c string); +create table dmp.mp_n0 (a string) partitioned by (b string, c string); -alter table dmp.mp add partition (b='1', c='1'); -alter table dmp.mp add partition (b='1', c='2'); -alter table dmp.mp add partition (b='2', c='2'); +alter table dmp.mp_n0 add partition (b='1', c='1'); +alter table dmp.mp_n0 add partition (b='1', c='2'); +alter table dmp.mp_n0 add partition (b='2', c='2'); -show partitions dmp.mp; +show partitions dmp.mp_n0; -explain extended alter table dmp.mp drop partition (b='1'); -alter table dmp.mp drop partition (b='1'); +explain extended alter table dmp.mp_n0 drop partition (b='1'); +alter table dmp.mp_n0 drop partition (b='1'); -show partitions dmp.mp; +show partitions dmp.mp_n0; set hive.exec.drop.ignorenonexistent=false; -alter table dmp.mp drop if exists partition (b='3'); +alter table dmp.mp_n0 drop if exists partition (b='3'); -show partitions dmp.mp; +show partitions dmp.mp_n0; -drop table dmp.mp; +drop table dmp.mp_n0; drop database dmp; diff --git a/ql/src/test/queries/clientpositive/drop_partition_with_stats.q b/ql/src/test/queries/clientpositive/drop_partition_with_stats.q index 2211840d2f..48a1f68b71 100644 --- a/ql/src/test/queries/clientpositive/drop_partition_with_stats.q +++ b/ql/src/test/queries/clientpositive/drop_partition_with_stats.q @@ -3,67 +3,67 @@ set hive.mapred.mode=nonstrict; -- The column stats for a partitioned table will go to PART_COL_STATS CREATE DATABASE IF NOT EXISTS partstatsdb1; USE partstatsdb1; -CREATE TABLE IF NOT EXISTS testtable (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p11', Part2='P12'); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p21', Part2='P22'); -ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key; -ANALYZE TABLE testtable PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key; +CREATE TABLE IF NOT EXISTS testtable_n0 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable_n0 PARTITION (part1='p11', Part2='P12'); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable_n0 PARTITION (part1='p21', Part2='P22'); +ANALYZE TABLE testtable_n0 COMPUTE STATISTICS FOR COLUMNS key; +ANALYZE TABLE testtable_n0 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key; -CREATE TABLE IF NOT EXISTS TestTable1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P11'); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P12'); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p21', Part2='P22'); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p31', Part2='P32'); -ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key; -ANALYZE TABLE TestTable1 PARTITION (part1='p11') COMPUTE STATISTICS FOR COLUMNS key; -ANALYZE TABLE TestTable1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key; +CREATE TABLE IF NOT EXISTS TestTable1_n1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p11', Part2='P11'); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p11', Part2='P12'); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p21', Part2='P22'); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p31', Part2='P32'); +ANALYZE TABLE TestTable1_n1 COMPUTE STATISTICS FOR COLUMNS key; +ANALYZE TABLE TestTable1_n1 PARTITION (part1='p11') COMPUTE STATISTICS FOR COLUMNS key; +ANALYZE TABLE TestTable1_n1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key; -CREATE TABLE IF NOT EXISTS TESTTABLE2 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12'); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p21', Part2='P22'); -ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key; -ANALYZE TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key; +CREATE TABLE IF NOT EXISTS TESTTABLE2_n1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2_n1 PARTITION (part1='p11', Part2='P12'); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2_n1 PARTITION (part1='p21', Part2='P22'); +ANALYZE TABLE TESTTABLE2_n1 COMPUTE STATISTICS FOR COLUMNS key; +ANALYZE TABLE TESTTABLE2_n1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key; -ALTER TABLE partstatsdb1.testtable DROP PARTITION (part1='p11', Part2='P12'); -ALTER TABLE partstatsdb1.TestTable1 DROP PARTITION (part1='p11', Part2='P12'); -ALTER TABLE partstatsdb1.TESTTABLE2 DROP PARTITION (part1='p11', Part2='P12'); +ALTER TABLE partstatsdb1.testtable_n0 DROP PARTITION (part1='p11', Part2='P12'); +ALTER TABLE partstatsdb1.TestTable1_n1 DROP PARTITION (part1='p11', Part2='P12'); +ALTER TABLE partstatsdb1.TESTTABLE2_n1 DROP PARTITION (part1='p11', Part2='P12'); -DROP TABLE partstatsdb1.testtable; -DROP TABLE partstatsdb1.TestTable1; -DROP TABLE partstatsdb1.TESTTABLE2; +DROP TABLE partstatsdb1.testtable_n0; +DROP TABLE partstatsdb1.TestTable1_n1; +DROP TABLE partstatsdb1.TESTTABLE2_n1; DROP DATABASE partstatsdb1; CREATE DATABASE IF NOT EXISTS PARTSTATSDB2; USE PARTSTATSDB2; -CREATE TABLE IF NOT EXISTS testtable (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p11', Part2='P12'); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p21', Part2='P22'); -ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key; -ANALYZE TABLE testtable PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key; +CREATE TABLE IF NOT EXISTS testtable_n0 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable_n0 PARTITION (part1='p11', Part2='P12'); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable_n0 PARTITION (part1='p21', Part2='P22'); +ANALYZE TABLE testtable_n0 COMPUTE STATISTICS FOR COLUMNS key; +ANALYZE TABLE testtable_n0 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key; -CREATE TABLE IF NOT EXISTS TestTable1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P11'); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P12'); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p21', Part2='P22'); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p31', Part2='P32'); -ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key; -ANALYZE TABLE TestTable1 PARTITION (part1='p11') COMPUTE STATISTICS FOR COLUMNS key; -ANALYZE TABLE TestTable1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key; +CREATE TABLE IF NOT EXISTS TestTable1_n1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p11', Part2='P11'); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p11', Part2='P12'); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p21', Part2='P22'); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p31', Part2='P32'); +ANALYZE TABLE TestTable1_n1 COMPUTE STATISTICS FOR COLUMNS key; +ANALYZE TABLE TestTable1_n1 PARTITION (part1='p11') COMPUTE STATISTICS FOR COLUMNS key; +ANALYZE TABLE TestTable1_n1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key; -CREATE TABLE IF NOT EXISTS TESTTABLE2 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12'); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p21', Part2='P22'); -ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key; -ANALYZE TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key; +CREATE TABLE IF NOT EXISTS TESTTABLE2_n1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2_n1 PARTITION (part1='p11', Part2='P12'); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2_n1 PARTITION (part1='p21', Part2='P22'); +ANALYZE TABLE TESTTABLE2_n1 COMPUTE STATISTICS FOR COLUMNS key; +ANALYZE TABLE TESTTABLE2_n1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key; -ALTER TABLE PARTSTATSDB2.testtable DROP PARTITION (part1='p11', Part2='P12'); -ALTER TABLE PARTSTATSDB2.TestTable1 DROP PARTITION (part1='p11', Part2='P12'); -ALTER TABLE PARTSTATSDB2.TESTTABLE2 DROP PARTITION (part1='p11', Part2='P12'); +ALTER TABLE PARTSTATSDB2.testtable_n0 DROP PARTITION (part1='p11', Part2='P12'); +ALTER TABLE PARTSTATSDB2.TestTable1_n1 DROP PARTITION (part1='p11', Part2='P12'); +ALTER TABLE PARTSTATSDB2.TESTTABLE2_n1 DROP PARTITION (part1='p11', Part2='P12'); -DROP TABLE PARTSTATSDB2.testtable; -DROP TABLE PARTSTATSDB2.TestTable1; -DROP TABLE PARTSTATSDB2.TESTTABLE2; +DROP TABLE PARTSTATSDB2.testtable_n0; +DROP TABLE PARTSTATSDB2.TestTable1_n1; +DROP TABLE PARTSTATSDB2.TESTTABLE2_n1; DROP DATABASE PARTSTATSDB2; diff --git a/ql/src/test/queries/clientpositive/drop_partitions_filter.q b/ql/src/test/queries/clientpositive/drop_partitions_filter.q index 04fbcae0a6..5862753b23 100644 --- a/ql/src/test/queries/clientpositive/drop_partitions_filter.q +++ b/ql/src/test/queries/clientpositive/drop_partitions_filter.q @@ -1,37 +1,37 @@ -create table ptestfilter (a string, b int) partitioned by (c string, d string); -describe ptestfilter; - -alter table ptestfilter add partition (c='US', d=1); -alter table ptestfilter add partition (c='US', d=2); -alter table ptestFilter add partition (c='Uganda', d=2); -alter table ptestfilter add partition (c='Germany', d=2); -alter table ptestfilter add partition (c='Canada', d=3); -alter table ptestfilter add partition (c='Russia', d=3); -alter table ptestfilter add partition (c='Greece', d=2); -alter table ptestfilter add partition (c='India', d=3); -alter table ptestfilter add partition (c='France', d=4); -show partitions ptestfilter; - -alter table ptestfilter drop partition (c='US', d<'2'); -show partitions ptestfilter; - -alter table ptestfilter drop partition (c>='US', d<='2'); -show partitions ptestfilter; - -alter table ptestfilter drop partition (c >'India'); -show partitions ptestfilter; - -alter table ptestfilter drop partition (c >='India'), +create table ptestfilter_n1 (a string, b int) partitioned by (c string, d string); +describe ptestfilter_n1; + +alter table ptestfilter_n1 add partition (c='US', d=1); +alter table ptestfilter_n1 add partition (c='US', d=2); +alter table ptestFilter_n1 add partition (c='Uganda', d=2); +alter table ptestfilter_n1 add partition (c='Germany', d=2); +alter table ptestfilter_n1 add partition (c='Canada', d=3); +alter table ptestfilter_n1 add partition (c='Russia', d=3); +alter table ptestfilter_n1 add partition (c='Greece', d=2); +alter table ptestfilter_n1 add partition (c='India', d=3); +alter table ptestfilter_n1 add partition (c='France', d=4); +show partitions ptestfilter_n1; + +alter table ptestfilter_n1 drop partition (c='US', d<'2'); +show partitions ptestfilter_n1; + +alter table ptestfilter_n1 drop partition (c>='US', d<='2'); +show partitions ptestfilter_n1; + +alter table ptestfilter_n1 drop partition (c >'India'); +show partitions ptestfilter_n1; + +alter table ptestfilter_n1 drop partition (c >='India'), partition (c='Greece', d='2'); -show partitions ptestfilter; +show partitions ptestfilter_n1; -alter table ptestfilter drop partition (c != 'France'); -show partitions ptestfilter; +alter table ptestfilter_n1 drop partition (c != 'France'); +show partitions ptestfilter_n1; set hive.exec.drop.ignorenonexistent=false; -alter table ptestfilter drop if exists partition (c='US'); -show partitions ptestfilter; +alter table ptestfilter_n1 drop if exists partition (c='US'); +show partitions ptestfilter_n1; -drop table ptestfilter; +drop table ptestfilter_n1; diff --git a/ql/src/test/queries/clientpositive/drop_partitions_filter2.q b/ql/src/test/queries/clientpositive/drop_partitions_filter2.q index 54e6a35b5a..9b7bd7a396 100644 --- a/ql/src/test/queries/clientpositive/drop_partitions_filter2.q +++ b/ql/src/test/queries/clientpositive/drop_partitions_filter2.q @@ -1,23 +1,23 @@ -create table ptestfilter (a string, b int) partitioned by (c int, d int); -describe ptestfilter; +create table ptestfilter_n0 (a string, b int) partitioned by (c int, d int); +describe ptestfilter_n0; -alter table ptestfilter add partition (c=1, d=1); -alter table ptestfilter add partition (c=1, d=2); -alter table ptestFilter add partition (c=2, d=1); -alter table ptestfilter add partition (c=2, d=2); -alter table ptestfilter add partition (c=3, d=1); -alter table ptestfilter add partition (c=30, d=2); -show partitions ptestfilter; +alter table ptestfilter_n0 add partition (c=1, d=1); +alter table ptestfilter_n0 add partition (c=1, d=2); +alter table ptestFilter_n0 add partition (c=2, d=1); +alter table ptestfilter_n0 add partition (c=2, d=2); +alter table ptestfilter_n0 add partition (c=3, d=1); +alter table ptestfilter_n0 add partition (c=30, d=2); +show partitions ptestfilter_n0; -alter table ptestfilter drop partition (c=1, d=1); -show partitions ptestfilter; +alter table ptestfilter_n0 drop partition (c=1, d=1); +show partitions ptestfilter_n0; -alter table ptestfilter drop partition (c=2); -show partitions ptestfilter; +alter table ptestfilter_n0 drop partition (c=2); +show partitions ptestfilter_n0; -alter table ptestfilter drop partition (c<4); -show partitions ptestfilter; +alter table ptestfilter_n0 drop partition (c<4); +show partitions ptestfilter_n0; -drop table ptestfilter; +drop table ptestfilter_n0; diff --git a/ql/src/test/queries/clientpositive/drop_partitions_filter3.q b/ql/src/test/queries/clientpositive/drop_partitions_filter3.q index 2d41da4627..55485a93b8 100644 --- a/ql/src/test/queries/clientpositive/drop_partitions_filter3.q +++ b/ql/src/test/queries/clientpositive/drop_partitions_filter3.q @@ -1,20 +1,20 @@ -create table ptestfilter (a string, b int) partitioned by (c string, d int); -describe ptestfilter; +create table ptestfilter_n3 (a string, b int) partitioned by (c string, d int); +describe ptestfilter_n3; -alter table ptestfilter add partition (c='1', d=1); -alter table ptestfilter add partition (c='1', d=2); -alter table ptestFilter add partition (c='2', d=1); -alter table ptestfilter add partition (c='2', d=2); -alter table ptestfilter add partition (c='3', d=1); -alter table ptestfilter add partition (c='3', d=2); -show partitions ptestfilter; +alter table ptestfilter_n3 add partition (c='1', d=1); +alter table ptestfilter_n3 add partition (c='1', d=2); +alter table ptestFilter_n3 add partition (c='2', d=1); +alter table ptestfilter_n3 add partition (c='2', d=2); +alter table ptestfilter_n3 add partition (c='3', d=1); +alter table ptestfilter_n3 add partition (c='3', d=2); +show partitions ptestfilter_n3; -alter table ptestfilter drop partition (c='1', d=1); -show partitions ptestfilter; +alter table ptestfilter_n3 drop partition (c='1', d=1); +show partitions ptestfilter_n3; -alter table ptestfilter drop partition (c='2'); -show partitions ptestfilter; +alter table ptestfilter_n3 drop partition (c='2'); +show partitions ptestfilter_n3; -drop table ptestfilter; +drop table ptestfilter_n3; diff --git a/ql/src/test/queries/clientpositive/drop_partitions_filter4.q b/ql/src/test/queries/clientpositive/drop_partitions_filter4.q index ee6d46d85c..1b065ca68d 100644 --- a/ql/src/test/queries/clientpositive/drop_partitions_filter4.q +++ b/ql/src/test/queries/clientpositive/drop_partitions_filter4.q @@ -1,39 +1,39 @@ SET hive.exec.dynamic.partition.mode=nonstrict; -create table ptestfilter (a string, b int) partitioned by (c double); -INSERT OVERWRITE TABLE ptestfilter PARTITION (c) select 'Col1', 1, null; -alter table ptestfilter add partition (c=3.4); -alter table ptestfilter add partition (c=5.55); -show partitions ptestfilter; - -alter table ptestfilter drop partition(c = '__HIVE_DEFAULT_PARTITION__'); -alter table ptestfilter drop partition(c = 3.40); -show partitions ptestfilter; - -INSERT OVERWRITE TABLE ptestfilter PARTITION (c) select 'Col1', 1, null; -alter table ptestfilter drop partition(c != '__HIVE_DEFAULT_PARTITION__'); -show partitions ptestfilter; - -drop table ptestfilter; - -create table ptestfilter (a string, b int) partitioned by (c string, d int); -INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col1', 1, null, null; -INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col2', 2, null, 2; -INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col3', 3, 'Uganda', null; -alter table ptestfilter add partition (c='Germany', d=2); -show partitions ptestfilter; - -alter table ptestfilter drop partition (c='__HIVE_DEFAULT_PARTITION__'); -alter table ptestfilter drop partition (c='Uganda', d='__HIVE_DEFAULT_PARTITION__'); -alter table ptestfilter drop partition (c='Germany', d=2); -show partitions ptestfilter; - -INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col2', 2, null, 2; -INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col2', 2, null, 3; -INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col3', 3, 'Uganda', null; -alter table ptestfilter drop partition (d != 3); -show partitions ptestfilter; - -drop table ptestfilter; +create table ptestfilter_n2 (a string, b int) partitioned by (c double); +INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c) select 'Col1', 1, null; +alter table ptestfilter_n2 add partition (c=3.4); +alter table ptestfilter_n2 add partition (c=5.55); +show partitions ptestfilter_n2; + +alter table ptestfilter_n2 drop partition(c = '__HIVE_DEFAULT_PARTITION__'); +alter table ptestfilter_n2 drop partition(c = 3.40); +show partitions ptestfilter_n2; + +INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c) select 'Col1', 1, null; +alter table ptestfilter_n2 drop partition(c != '__HIVE_DEFAULT_PARTITION__'); +show partitions ptestfilter_n2; + +drop table ptestfilter_n2; + +create table ptestfilter_n2 (a string, b int) partitioned by (c string, d int); +INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col1', 1, null, null; +INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col2', 2, null, 2; +INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col3', 3, 'Uganda', null; +alter table ptestfilter_n2 add partition (c='Germany', d=2); +show partitions ptestfilter_n2; + +alter table ptestfilter_n2 drop partition (c='__HIVE_DEFAULT_PARTITION__'); +alter table ptestfilter_n2 drop partition (c='Uganda', d='__HIVE_DEFAULT_PARTITION__'); +alter table ptestfilter_n2 drop partition (c='Germany', d=2); +show partitions ptestfilter_n2; + +INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col2', 2, null, 2; +INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col2', 2, null, 3; +INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col3', 3, 'Uganda', null; +alter table ptestfilter_n2 drop partition (d != 3); +show partitions ptestfilter_n2; + +drop table ptestfilter_n2; diff --git a/ql/src/test/queries/clientpositive/drop_table2.q b/ql/src/test/queries/clientpositive/drop_table2.q index a3e8c5c3bd..68cc28e7e4 100644 --- a/ql/src/test/queries/clientpositive/drop_table2.q +++ b/ql/src/test/queries/clientpositive/drop_table2.q @@ -1,15 +1,15 @@ SET hive.metastore.batch.retrieve.max=1; -create table if not exists temp(col STRING) partitioned by (p STRING); -alter table temp add if not exists partition (p ='p1'); -alter table temp add if not exists partition (p ='p2'); -alter table temp add if not exists partition (p ='p3'); +create table if not exists temp_n0(col STRING) partitioned by (p STRING); +alter table temp_n0 add if not exists partition (p ='p1'); +alter table temp_n0 add if not exists partition (p ='p2'); +alter table temp_n0 add if not exists partition (p ='p3'); -show partitions temp; +show partitions temp_n0; -drop table temp; +drop table temp_n0; -create table if not exists temp(col STRING) partitioned by (p STRING); +create table if not exists temp_n0(col STRING) partitioned by (p STRING); -show partitions temp; +show partitions temp_n0; -drop table temp; +drop table temp_n0; diff --git a/ql/src/test/queries/clientpositive/drop_table_purge.q b/ql/src/test/queries/clientpositive/drop_table_purge.q index f094a5bbcd..47c5310610 100644 --- a/ql/src/test/queries/clientpositive/drop_table_purge.q +++ b/ql/src/test/queries/clientpositive/drop_table_purge.q @@ -1,4 +1,4 @@ SET hive.metastore.batch.retrieve.max=1; -CREATE TABLE IF NOT EXISTS temp(col STRING); +CREATE TABLE IF NOT EXISTS temp_n1(col STRING); -DROP TABLE temp PURGE; +DROP TABLE temp_n1 PURGE; diff --git a/ql/src/test/queries/clientpositive/drop_table_removes_partition_dirs.q b/ql/src/test/queries/clientpositive/drop_table_removes_partition_dirs.q index 1ba3f85fdd..ff4034034f 100644 --- a/ql/src/test/queries/clientpositive/drop_table_removes_partition_dirs.q +++ b/ql/src/test/queries/clientpositive/drop_table_removes_partition_dirs.q @@ -3,20 +3,20 @@ -- This test verifies that if a partition exists outside the table's current location when the -- table is dropped the partition's location is dropped as well. -CREATE TABLE test_table (key STRING, value STRING) +CREATE TABLE test_table_n3 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE LOCATION 'file:${system:test.tmp.dir}/drop_table_removes_partition_dirs_table'; -ALTER TABLE test_table ADD PARTITION (part = '1') +ALTER TABLE test_table_n3 ADD PARTITION (part = '1') LOCATION 'file:${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2/part=1'; -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n3 PARTITION (part = '1') SELECT * FROM src; dfs -ls ${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2; -DROP TABLE test_table; +DROP TABLE test_table_n3; dfs -ls ${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2; diff --git a/ql/src/test/queries/clientpositive/drop_table_with_stats.q b/ql/src/test/queries/clientpositive/drop_table_with_stats.q index b655b53aad..ccf3d57a6e 100644 --- a/ql/src/test/queries/clientpositive/drop_table_with_stats.q +++ b/ql/src/test/queries/clientpositive/drop_table_with_stats.q @@ -6,17 +6,17 @@ CREATE TABLE IF NOT EXISTS testtable (key STRING, value STRING); LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable; ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key; -CREATE TABLE IF NOT EXISTS TestTable1 (key STRING, value STRING); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1; -ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key; +CREATE TABLE IF NOT EXISTS TestTable1_n0 (key STRING, value STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n0; +ANALYZE TABLE TestTable1_n0 COMPUTE STATISTICS FOR COLUMNS key; -CREATE TABLE IF NOT EXISTS TESTTABLE2 (key STRING, value STRING); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2; -ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key; +CREATE TABLE IF NOT EXISTS TESTTABLE2_n0 (key STRING, value STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2_n0; +ANALYZE TABLE TESTTABLE2_n0 COMPUTE STATISTICS FOR COLUMNS key; DROP TABLE tblstatsdb1.testtable; -DROP TABLE tblstatsdb1.TestTable1; -DROP TABLE tblstatsdb1.TESTTABLE2; +DROP TABLE tblstatsdb1.TestTable1_n0; +DROP TABLE tblstatsdb1.TESTTABLE2_n0; DROP DATABASE tblstatsdb1; CREATE DATABASE IF NOT EXISTS TBLSTATSDB2; @@ -26,18 +26,18 @@ LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key; -CREATE TABLE IF NOT EXISTS TestTable1 (key STRING, value STRING); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1; -ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key; +CREATE TABLE IF NOT EXISTS TestTable1_n0 (key STRING, value STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n0; +ANALYZE TABLE TestTable1_n0 COMPUTE STATISTICS FOR COLUMNS key; -CREATE TABLE IF NOT EXISTS TESTTABLE2 (key STRING, value STRING); -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2; -ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key; +CREATE TABLE IF NOT EXISTS TESTTABLE2_n0 (key STRING, value STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2_n0; +ANALYZE TABLE TESTTABLE2_n0 COMPUTE STATISTICS FOR COLUMNS key; DROP TABLE TBLSTATSDB2.testtable; -DROP TABLE TBLSTATSDB2.TestTable1; -DROP TABLE TBLSTATSDB2.TESTTABLE2; +DROP TABLE TBLSTATSDB2.TestTable1_n0; +DROP TABLE TBLSTATSDB2.TESTTABLE2_n0; DROP DATABASE TBLSTATSDB2; diff --git a/ql/src/test/queries/clientpositive/druid_basic2.q b/ql/src/test/queries/clientpositive/druid_basic2.q index 3c17bc5412..f1d215aa8a 100644 --- a/ql/src/test/queries/clientpositive/druid_basic2.q +++ b/ql/src/test/queries/clientpositive/druid_basic2.q @@ -1,28 +1,28 @@ set hive.strict.checks.cartesian.product=false; set hive.druid.broker.address.default=localhost.test; -CREATE EXTERNAL TABLE druid_table_1 +CREATE EXTERNAL TABLE druid_table_1_n2 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia"); -DESCRIBE FORMATTED druid_table_1; +DESCRIBE FORMATTED druid_table_1_n2; -- dimension EXPLAIN EXTENDED -SELECT robot FROM druid_table_1; +SELECT robot FROM druid_table_1_n2; -- metric EXPLAIN EXTENDED -SELECT delta FROM druid_table_1; +SELECT delta FROM druid_table_1_n2; EXPLAIN EXTENDED SELECT robot -FROM druid_table_1 +FROM druid_table_1_n2 WHERE language = 'en'; EXPLAIN EXTENDED SELECT DISTINCT robot -FROM druid_table_1 +FROM druid_table_1_n2 WHERE language = 'en'; -- TODO: currently nothing is pushed - ISNOTNULL @@ -31,10 +31,10 @@ SELECT a.robot, b.language FROM ( (SELECT robot, language - FROM druid_table_1) a + FROM druid_table_1_n2) a JOIN (SELECT language - FROM druid_table_1) b + FROM druid_table_1_n2) b ON a.language = b.language ); @@ -43,28 +43,28 @@ SELECT a.robot, b.language FROM ( (SELECT robot, language - FROM druid_table_1 + FROM druid_table_1_n2 WHERE language = 'en') a JOIN (SELECT language - FROM druid_table_1) b + FROM druid_table_1_n2) b ON a.language = b.language ); EXPLAIN EXTENDED SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s -FROM druid_table_1 +FROM druid_table_1_n2 GROUP BY robot, language, floor_day(`__time`) ORDER BY CAST(robot AS INTEGER) ASC, m DESC LIMIT 10; EXPLAIN SELECT substring(namespace, CAST(deleted AS INT), 4) -FROM druid_table_1; +FROM druid_table_1_n2; EXPLAIN SELECT robot, floor_day(`__time`) -FROM druid_table_1 +FROM druid_table_1_n2 WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' GROUP BY robot, floor_day(`__time`) ORDER BY robot @@ -72,7 +72,7 @@ LIMIT 10; EXPLAIN SELECT robot, `__time` -FROM druid_table_1 +FROM druid_table_1_n2 WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' GROUP BY robot, `__time` ORDER BY robot @@ -80,7 +80,7 @@ LIMIT 10; EXPLAIN SELECT robot, floor_day(`__time`) -FROM druid_table_1 +FROM druid_table_1_n2 WHERE `__time` BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' GROUP BY robot, floor_day(`__time`) ORDER BY robot @@ -90,7 +90,7 @@ LIMIT 10; set hive.cbo.enable=false; EXPLAIN EXTENDED SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s -FROM druid_table_1 +FROM druid_table_1_n2 GROUP BY robot, language, floor_day(`__time`) ORDER BY CAST(robot AS INTEGER) ASC, m DESC LIMIT 10; diff --git a/ql/src/test/queries/clientpositive/druid_basic3.q b/ql/src/test/queries/clientpositive/druid_basic3.q index 624beeba5d..f43b6365fb 100644 --- a/ql/src/test/queries/clientpositive/druid_basic3.q +++ b/ql/src/test/queries/clientpositive/druid_basic3.q @@ -1,43 +1,43 @@ set hive.strict.checks.cartesian.product=false; set hive.druid.broker.address.default=localhost.test; -CREATE EXTERNAL TABLE druid_table_1 +CREATE EXTERNAL TABLE druid_table_1_n4 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia"); EXPLAIN SELECT sum(added) + sum(delta) as a, language -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC; EXPLAIN SELECT sum(delta), sum(added) + sum(delta) AS a, language -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC; EXPLAIN SELECT language, sum(added) / sum(delta) AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC; EXPLAIN SELECT language, sum(added) * sum(delta) AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC; EXPLAIN SELECT language, sum(added) - sum(delta) AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC; EXPLAIN SELECT language, sum(added) + 100 AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC; @@ -45,26 +45,26 @@ EXPLAIN SELECT language, -1 * (a + b) AS c FROM ( SELECT (sum(added)-sum(delta)) / (count(*) * 3) AS a, sum(deleted) AS b, language - FROM druid_table_1 + FROM druid_table_1_n4 GROUP BY language) subq ORDER BY c DESC; EXPLAIN SELECT language, robot, sum(added) - sum(delta) AS a -FROM druid_table_1 +FROM druid_table_1_n4 WHERE extract (week from `__time`) IN (10,11) GROUP BY language, robot; EXPLAIN SELECT language, sum(delta) / count(*) AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC; EXPLAIN SELECT language, sum(added) / sum(delta) AS a, CASE WHEN sum(deleted)=0 THEN 1.0 ELSE sum(deleted) END AS b -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC; @@ -72,7 +72,7 @@ EXPLAIN SELECT language, a, a - b as c FROM ( SELECT language, sum(added) + 100 AS a, sum(delta) AS b - FROM druid_table_1 + FROM druid_table_1_n4 GROUP BY language) subq ORDER BY a DESC; @@ -80,7 +80,7 @@ EXPLAIN SELECT language, robot, "A" FROM ( SELECT sum(added) - sum(delta) AS a, language, robot - FROM druid_table_1 + FROM druid_table_1_n4 GROUP BY language, robot ) subq ORDER BY "A" LIMIT 5; @@ -89,7 +89,7 @@ EXPLAIN SELECT language, robot, "A" FROM ( SELECT language, sum(added) + sum(delta) AS a, robot - FROM druid_table_1 + FROM druid_table_1_n4 GROUP BY language, robot) subq ORDER BY robot, language LIMIT 5; diff --git a/ql/src/test/queries/clientpositive/druid_intervals.q b/ql/src/test/queries/clientpositive/druid_intervals.q index 140ff82151..a7ee052ab5 100644 --- a/ql/src/test/queries/clientpositive/druid_intervals.q +++ b/ql/src/test/queries/clientpositive/druid_intervals.q @@ -1,67 +1,67 @@ set hive.druid.broker.address.default=localhost.test; -CREATE EXTERNAL TABLE druid_table_1 +CREATE EXTERNAL TABLE druid_table_1_n0 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia"); -DESCRIBE FORMATTED druid_table_1; +DESCRIBE FORMATTED druid_table_1_n0; -- (-∞‥+∞) EXPLAIN SELECT `__time` -FROM druid_table_1; +FROM druid_table_1_n0; -- (-∞‥2012-03-01 00:00:00) EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` < '2012-03-01 00:00:00'; -- [2010-01-01 00:00:00‥2012-03-01 00:00:00) EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00'; -- [2010-01-01 00:00:00‥2011-01-01 00:00:00) EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00' AND `__time` < '2011-01-01 00:00:00'; -- [2010-01-01 00:00:00‥2011-01-01 00:00:00] EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00'; -- [2010-01-01 00:00:00‥2011-01-01 00:00:00],[2012-01-01 00:00:00‥2013-01-01 00:00:00] EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00') OR (`__time` BETWEEN '2012-01-01 00:00:00' AND '2013-01-01 00:00:00'); -- OVERLAP [2010-01-01 00:00:00‥2012-01-01 00:00:00] EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00') OR (`__time` BETWEEN '2010-06-01 00:00:00' AND '2012-01-01 00:00:00'); -- IN: MULTIPLE INTERVALS [2010-01-01 00:00:00‥2010-01-01 00:00:00),[2011-01-01 00:00:00‥2011-01-01 00:00:00) EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00'); EXPLAIN SELECT `__time`, robot -FROM druid_table_1 +FROM druid_table_1_n0 WHERE robot = 'user1' AND `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00'); EXPLAIN SELECT `__time`, robot -FROM druid_table_1 +FROM druid_table_1_n0 WHERE robot = 'user1' OR `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00'); diff --git a/ql/src/test/queries/clientpositive/druid_timeseries.q b/ql/src/test/queries/clientpositive/druid_timeseries.q index a330ade385..6ff7d590ea 100644 --- a/ql/src/test/queries/clientpositive/druid_timeseries.q +++ b/ql/src/test/queries/clientpositive/druid_timeseries.q @@ -1,30 +1,30 @@ set hive.druid.broker.address.default=localhost.test; -CREATE EXTERNAL TABLE druid_table_1 +CREATE EXTERNAL TABLE druid_table_1_n3 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia"); --- DESCRIBE FORMATTED druid_table_1; +-- DESCRIBE FORMATTED druid_table_1_n3; -- GRANULARITY: ALL -EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` >= '2010-01-01 00:00:00 UTC' AND `__time` <= '2012-03-01 00:00:00 UTC' OR added <= 0; +EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` >= '2010-01-01 00:00:00 UTC' AND `__time` <= '2012-03-01 00:00:00 UTC' OR added <= 0; -EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` <= '2010-01-01 00:00:00 UTC'; +EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00 UTC'; EXPLAIN SELECT max(added), sum(variation) -FROM druid_table_1; +FROM druid_table_1_n3; -- GRANULARITY: NONE EXPLAIN SELECT `__time`, max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY `__time`; -- GRANULARITY: YEAR EXPLAIN SELECT floor_year(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_year(`__time`); -- @TODO FIXME https://issues.apache.org/jira/browse/CALCITE-2222 @@ -32,56 +32,56 @@ GROUP BY floor_year(`__time`); -- GRANULARITY: QUARTER EXPLAIN SELECT floor_quarter(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_quarter(`__time`); -- GRANULARITY: MONTH EXPLAIN SELECT floor_month(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_month(`__time`); -- GRANULARITY: WEEK EXPLAIN SELECT floor_week(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_week(`__time`); -- GRANULARITY: DAY EXPLAIN SELECT floor_day(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_day(`__time`); -- GRANULARITY: HOUR EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_hour(`__time`); -- GRANULARITY: MINUTE EXPLAIN SELECT floor_minute(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_minute(`__time`); -- GRANULARITY: SECOND EXPLAIN SELECT floor_second(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_second(`__time`); -- WITH FILTER ON DIMENSION EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 WHERE robot='1' GROUP BY floor_hour(`__time`); -- WITH FILTER ON TIME EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 WHERE floor_hour(`__time`) BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) @@ -93,19 +93,19 @@ SELECT subq.h, subq.m, subq.s FROM ( SELECT floor_hour(`__time`) as h, max(added) as m, sum(variation) as s - FROM druid_table_1 + FROM druid_table_1_n3 GROUP BY floor_hour(`__time`) ) subq WHERE subq.h BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE); -- Simplification of count(__time) as count(*) since time column is not null -EXPLAIN SELECT count(`__time`) from druid_table_1; +EXPLAIN SELECT count(`__time`) from druid_table_1_n3; -EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` <= '2010-01-01 00:00:00 UTC'; +EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00 UTC'; -EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` >= '2010-01-01 00:00:00'; +EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` >= '2010-01-01 00:00:00'; -EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` <= '2010-01-01 00:00:00' OR `__time` <= '2012-03-01 00:00:00'; +EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00' OR `__time` <= '2012-03-01 00:00:00'; diff --git a/ql/src/test/queries/clientpositive/druid_timestamptz.q b/ql/src/test/queries/clientpositive/druid_timestamptz.q index 63c6e4e211..483004402f 100644 --- a/ql/src/test/queries/clientpositive/druid_timestamptz.q +++ b/ql/src/test/queries/clientpositive/druid_timestamptz.q @@ -1,51 +1,51 @@ set hive.fetch.task.conversion=more; -drop table tstz1; +drop table tstz1_n0; -create table tstz1(`__time` timestamp with local time zone, n string, v integer) +create table tstz1_n0(`__time` timestamp with local time zone, n string, v integer) STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR"); -insert into table tstz1 +insert into table tstz1_n0 values(cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone), 'Bill', 10); -EXPLAIN select `__time` from tstz1; -select `__time` from tstz1; +EXPLAIN select `__time` from tstz1_n0; +select `__time` from tstz1_n0; -EXPLAIN select cast(`__time` as timestamp) from tstz1; -select cast(`__time` as timestamp) from tstz1; +EXPLAIN select cast(`__time` as timestamp) from tstz1_n0; +select cast(`__time` as timestamp) from tstz1_n0; -EXPLAIN select cast(`__time` as timestamp) from tstz1 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); -select cast(`__time` as timestamp) from tstz1 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); +EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); +select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); -EXPLAIN SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1; -SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1; +EXPLAIN SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n0; +SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n0; -EXPLAIN SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1; -SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1; +EXPLAIN SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n0; +SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n0; set time zone UTC; -EXPLAIN select `__time` from tstz1; -select `__time` from tstz1; -EXPLAIN select cast(`__time` as timestamp) from tstz1; -select cast(`__time` as timestamp) from tstz1; -EXPLAIN select cast(`__time` as timestamp) from tstz1 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); -select cast(`__time` as timestamp) from tstz1 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); +EXPLAIN select `__time` from tstz1_n0; +select `__time` from tstz1_n0; +EXPLAIN select cast(`__time` as timestamp) from tstz1_n0; +select cast(`__time` as timestamp) from tstz1_n0; +EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); +select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); -- THIS is failing explore why ---EXPLAIN select cast(`__time` as timestamp) from tstz1 where `__time` = cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); ---select cast(`__time` as timestamp) from tstz1 where `__time` = cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); +--EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` = cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); +--select cast(`__time` as timestamp) from tstz1_n0 where `__time` = cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); -EXPLAIN select cast(`__time` as timestamp) from tstz1 where `__time` >= cast('2016-01-03 20:26:34' as timestamp); -select cast(`__time` as timestamp) from tstz1 where `__time` >= cast('2016-01-03 20:26:34' as timestamp); +EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 20:26:34' as timestamp); +select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 20:26:34' as timestamp); -EXPLAIN select cast(`__time` as timestamp) from tstz1 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) AND `__time` <= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); -select cast(`__time` as timestamp) from tstz1 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) AND `__time` <= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); +EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) AND `__time` <= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); +select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) AND `__time` <= cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); -EXPLAIN SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1; -SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1; +EXPLAIN SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n0; +SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n0; -EXPLAIN SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1; -SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1; +EXPLAIN SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n0; +SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n0; diff --git a/ql/src/test/queries/clientpositive/druid_topn.q b/ql/src/test/queries/clientpositive/druid_topn.q index 24d505173f..2aa6aff954 100644 --- a/ql/src/test/queries/clientpositive/druid_topn.q +++ b/ql/src/test/queries/clientpositive/druid_topn.q @@ -1,15 +1,15 @@ set hive.druid.broker.address.default=localhost.test; -CREATE EXTERNAL TABLE druid_table_1 +CREATE EXTERNAL TABLE druid_table_1_n1 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia"); -DESCRIBE FORMATTED druid_table_1; +DESCRIBE FORMATTED druid_table_1_n1; -- GRANULARITY: ALL EXPLAIN SELECT robot, max(added) as m, sum(variation) -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot ORDER BY m DESC LIMIT 100; @@ -17,7 +17,7 @@ LIMIT 100; -- GRANULARITY: NONE EXPLAIN SELECT robot, `__time`, max(added), sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, `__time` ORDER BY s DESC LIMIT 100; @@ -25,7 +25,7 @@ LIMIT 100; -- GRANULARITY: YEAR EXPLAIN SELECT robot, floor_year(`__time`), max(added), sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, floor_year(`__time`) ORDER BY s DESC LIMIT 10; @@ -33,7 +33,7 @@ LIMIT 10; -- ASC: TRANSFORM INTO GROUP BY EXPLAIN SELECT robot, floor_month(`__time`), max(added), sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, floor_month(`__time`) ORDER BY s LIMIT 10; @@ -41,7 +41,7 @@ LIMIT 10; -- MULTIPLE ORDER: TRANSFORM INTO GROUP BY EXPLAIN SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, namespace, floor_month(`__time`) ORDER BY s DESC, m DESC LIMIT 10; @@ -49,7 +49,7 @@ LIMIT 10; -- MULTIPLE ORDER MIXED: TRANSFORM INTO GROUP BY EXPLAIN SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, namespace, floor_month(`__time`) ORDER BY robot ASC, m DESC LIMIT 10; @@ -57,7 +57,7 @@ LIMIT 10; -- WITH FILTER ON DIMENSION: TRANSFORM INTO GROUP BY EXPLAIN SELECT robot, floor_year(`__time`), max(added), sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 WHERE robot='1' GROUP BY robot, floor_year(`__time`) ORDER BY s @@ -66,7 +66,7 @@ LIMIT 10; -- WITH FILTER ON TIME EXPLAIN SELECT robot, floor_hour(`__time`), max(added) as m, sum(variation) -FROM druid_table_1 +FROM druid_table_1_n1 WHERE floor_hour(`__time`) BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) diff --git a/ql/src/test/queries/clientpositive/druidmini_expressions.q b/ql/src/test/queries/clientpositive/druidmini_expressions.q index 882d7afacd..02024e1ec1 100644 --- a/ql/src/test/queries/clientpositive/druidmini_expressions.q +++ b/ql/src/test/queries/clientpositive/druidmini_expressions.q @@ -1,5 +1,5 @@ SET hive.vectorized.execution.enabled=false; -CREATE TABLE druid_table +CREATE TABLE druid_table_n0 STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR", "druid.query.granularity" = "MINUTE") AS @@ -18,36 +18,36 @@ SELECT cast (`ctimestamp1` as timestamp with local time zone) as `__time`, -- MATH AND STRING functions -SELECT count(*) FROM druid_table WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3; +SELECT count(*) FROM druid_table_n0 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3; -SELECT count(*) FROM druid_table WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10; +SELECT count(*) FROM druid_table_n0 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10; -SELECT count(*) FROM druid_table WHERE power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3; +SELECT count(*) FROM druid_table_n0 WHERE power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3; SELECT SUM(cfloat + 1), CAST(SUM(cdouble + ctinyint) AS INTEGER), SUM(ctinyint) + 1 , CAST(SUM(csmallint) + SUM(cint) AS DOUBLE), SUM(cint), SUM(cbigint) -FROM druid_table WHERE ceil(cfloat) > 0 AND floor(cdouble) * 2 < 1000 OR ln(cdouble) / log10(10) > 0 AND COS(cint) > 0 OR SIN(cdouble) > 1; +FROM druid_table_n0 WHERE ceil(cfloat) > 0 AND floor(cdouble) * 2 < 1000 OR ln(cdouble) / log10(10) > 0 AND COS(cint) > 0 OR SIN(cdouble) > 1; SELECT SUM(cfloat + 1), CAST(SUM(cdouble + ctinyint) AS INTEGER), SUM(ctinyint) + 1 , CAST(SUM(csmallint) + SUM(cint) AS DOUBLE), SUM(cint), SUM(cbigint) -FROM druid_table WHERE ceil(cfloat) > 0 AND floor(cdouble) * 2 < 1000; +FROM druid_table_n0 WHERE ceil(cfloat) > 0 AND floor(cdouble) * 2 < 1000; SELECT SUM(cfloat + 1), CAST(SUM(cdouble + ctinyint) AS INTEGER), SUM(ctinyint) + 1 , CAST(SUM(csmallint) + SUM(cint) AS DOUBLE), SUM(cint), SUM(cbigint) -FROM druid_table WHERE ln(cdouble) / log10(10) > 0 AND COS(cint) > 0 OR SIN(cdouble) > 1; +FROM druid_table_n0 WHERE ln(cdouble) / log10(10) > 0 AND COS(cint) > 0 OR SIN(cdouble) > 1; SELECT SUM(cfloat + 1), CAST(SUM(cdouble + ctinyint) AS INTEGER), SUM(ctinyint) + 1 , CAST(SUM(csmallint) + SUM(cint) AS DOUBLE), SUM(cint), SUM(cbigint) -FROM druid_table WHERE SIN(cdouble) > 1; +FROM druid_table_n0 WHERE SIN(cdouble) > 1; -SELECT cstring1 || '_'|| cstring2, substring(cstring2, 2, 3) as concat , upper(cstring2), lower(cstring1), SUM(cdouble) as s FROM druid_table WHERE cstring1 IS NOT NULL AND cstring2 IS NOT NULL AND cstring2 like 'Y%' +SELECT cstring1 || '_'|| cstring2, substring(cstring2, 2, 3) as concat , upper(cstring2), lower(cstring1), SUM(cdouble) as s FROM druid_table_n0 WHERE cstring1 IS NOT NULL AND cstring2 IS NOT NULL AND cstring2 like 'Y%' GROUP BY cstring1 || '_'|| cstring2, substring(cstring2, 2, 3), upper(cstring2), lower(cstring1) ORDER BY concat DESC LIMIT 10; -EXPLAIN SELECT count(*) FROM druid_table WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3; +EXPLAIN SELECT count(*) FROM druid_table_n0 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3; EXPLAIN SELECT SUM(cfloat + 1), CAST(SUM(cdouble + ctinyint) AS INTEGER), SUM(ctinyint) + 1 , CAST(SUM(csmallint) + SUM(cint) AS DOUBLE), SUM(cint), SUM(cbigint) - FROM druid_table WHERE ceil(cfloat) > 0 AND floor(cdouble) * 2 < 1000 OR ln(cdouble) / log10(10) > 0 AND COS(cint) > 0 OR SIN(cdouble) > 1; + FROM druid_table_n0 WHERE ceil(cfloat) > 0 AND floor(cdouble) * 2 < 1000 OR ln(cdouble) / log10(10) > 0 AND COS(cint) > 0 OR SIN(cdouble) > 1; -EXPLAIN SELECT cstring1 || '_'|| cstring2, substring(cstring2, 2, 3) as concat , upper(cstring2), lower(cstring1), SUM(cdouble) as s FROM druid_table WHERE cstring1 IS NOT NULL AND cstring2 IS NOT NULL AND cstring2 like 'Y%' +EXPLAIN SELECT cstring1 || '_'|| cstring2, substring(cstring2, 2, 3) as concat , upper(cstring2), lower(cstring1), SUM(cdouble) as s FROM druid_table_n0 WHERE cstring1 IS NOT NULL AND cstring2 IS NOT NULL AND cstring2 like 'Y%' GROUP BY cstring1 || '_'|| cstring2, substring(cstring2, 2, 3), upper(cstring2), lower(cstring1) ORDER BY concat DESC LIMIT 10; -DROP TABLE druid_table; \ No newline at end of file +DROP TABLE druid_table_n0; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/druidmini_floorTime.q b/ql/src/test/queries/clientpositive/druidmini_floorTime.q index 4e14855b17..cb7f15bbe2 100644 --- a/ql/src/test/queries/clientpositive/druidmini_floorTime.q +++ b/ql/src/test/queries/clientpositive/druidmini_floorTime.q @@ -1,5 +1,5 @@ SET hive.vectorized.execution.enabled=false; -CREATE TABLE druid_table +CREATE TABLE druid_table_n2 STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR", "druid.query.granularity" = "MINUTE") AS @@ -19,78 +19,78 @@ SELECT cast (`ctimestamp1` as timestamp with local time zone) as `__time`, -- GROUP BY TIME EXTRACT --SECONDS -SELECT floor(`__time` to SECOND) FROM druid_table +SELECT floor(`__time` to SECOND) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY floor(`__time` to SECOND); -EXPLAIN SELECT floor(`__time` to SECOND) FROM druid_table +EXPLAIN SELECT floor(`__time` to SECOND) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY floor(`__time` to SECOND); -- MINUTES -SELECT floor(`__time` to MINUTE) FROM druid_table +SELECT floor(`__time` to MINUTE) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY floor(`__time` to MINUTE); -EXPLAIN SELECT floor(`__time` to MINUTE) FROM druid_table +EXPLAIN SELECT floor(`__time` to MINUTE) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY floor(`__time` to MINUTE); -- HOUR -SELECT floor(`__time` to HOUR) FROM druid_table +SELECT floor(`__time` to HOUR) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY floor(`__time` to HOUR); -EXPLAIN SELECT floor(`__time` to HOUR) FROM druid_table +EXPLAIN SELECT floor(`__time` to HOUR) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY floor(`__time` to HOUR); -- DAY -SELECT EXTRACT(DAY from `__time`) FROM druid_table +SELECT EXTRACT(DAY from `__time`) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY EXTRACT(DAY from `__time`); -EXPLAIN SELECT EXTRACT(DAY from `__time`) FROM druid_table +EXPLAIN SELECT EXTRACT(DAY from `__time`) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY EXTRACT(DAY from `__time`); --WEEK -SELECT EXTRACT(WEEK from `__time`) FROM druid_table +SELECT EXTRACT(WEEK from `__time`) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY EXTRACT(WEEK from `__time`); -EXPLAIN SELECT EXTRACT(WEEK from `__time`) FROM druid_table +EXPLAIN SELECT EXTRACT(WEEK from `__time`) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY EXTRACT(WEEK from `__time`); --MONTH -SELECT EXTRACT(MONTH from `__time`) FROM druid_table +SELECT EXTRACT(MONTH from `__time`) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY EXTRACT(MONTH from `__time`); -EXPLAIN SELECT EXTRACT(MONTH from `__time`) FROM druid_table +EXPLAIN SELECT EXTRACT(MONTH from `__time`) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY EXTRACT(MONTH from `__time`); --QUARTER -SELECT EXTRACT(QUARTER from `__time`) FROM druid_table +SELECT EXTRACT(QUARTER from `__time`) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY EXTRACT(QUARTER from `__time`); -EXPLAIN SELECT EXTRACT(QUARTER from `__time`) FROM druid_table +EXPLAIN SELECT EXTRACT(QUARTER from `__time`) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY EXTRACT(QUARTER from `__time`); -- YEAR -SELECT EXTRACT(YEAR from `__time`) FROM druid_table +SELECT EXTRACT(YEAR from `__time`) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY EXTRACT(YEAR from `__time`); -EXPLAIN SELECT EXTRACT(YEAR from `__time`) FROM druid_table +EXPLAIN SELECT EXTRACT(YEAR from `__time`) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP BY EXTRACT(YEAR from `__time`); @@ -98,67 +98,67 @@ AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 GROUP B -- SECOND -EXPLAIN SELECT EXTRACT(SECOND from `__time`) FROM druid_table WHERE EXTRACT(SECOND from `__time`) = 0 LIMIT 1; +EXPLAIN SELECT EXTRACT(SECOND from `__time`) FROM druid_table_n2 WHERE EXTRACT(SECOND from `__time`) = 0 LIMIT 1; -SELECT EXTRACT(SECOND from `__time`) FROM druid_table WHERE EXTRACT(SECOND from `__time`) = 0 LIMIT 1; +SELECT EXTRACT(SECOND from `__time`) FROM druid_table_n2 WHERE EXTRACT(SECOND from `__time`) = 0 LIMIT 1; -- MINUTE -EXPLAIN SELECT EXTRACT(MINUTE from `__time`) FROM druid_table +EXPLAIN SELECT EXTRACT(MINUTE from `__time`) FROM druid_table_n2 WHERE EXTRACT(MINUTE from `__time`) >= 0 LIMIT 2; -SELECT EXTRACT(MINUTE from `__time`) as minute FROM druid_table +SELECT EXTRACT(MINUTE from `__time`) as minute FROM druid_table_n2 WHERE EXTRACT(MINUTE from `__time`) >= 0 order by minute LIMIT 2; -- HOUR -EXPLAIN SELECT EXTRACT(HOUR from `__time`) FROM druid_table +EXPLAIN SELECT EXTRACT(HOUR from `__time`) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 LIMIT 1; -SELECT EXTRACT(HOUR from `__time`) FROM druid_table +SELECT EXTRACT(HOUR from `__time`) FROM druid_table_n2 WHERE character_length(CAST(ctinyint AS STRING)) > 1 AND char_length(CAST(ctinyint AS STRING)) < 10 AND power(cfloat, 2) * pow(csmallint, 3) > 1 AND SQRT(ABS(ctinyint)) > 3 LIMIT 1; --DAY EXPLAIN SELECT EXTRACT(DAY from `__time`), EXTRACT(DAY from `__time`) DIV 7 AS WEEK, SUBSTRING(CAST(CAST(`__time` AS DATE) AS STRING), 9, 2) AS day_str -FROM druid_table WHERE SUBSTRING(CAST(CAST(`__time` AS DATE) AS STRING), 9, 2) = 31 LIMIT 1; +FROM druid_table_n2 WHERE SUBSTRING(CAST(CAST(`__time` AS DATE) AS STRING), 9, 2) = 31 LIMIT 1; SELECT EXTRACT(DAY from `__time`) , EXTRACT(DAY from `__time`) DIV 7 AS WEEK, SUBSTRING(CAST(CAST(`__time` AS DATE) AS STRING), 9, 2) AS dar_str -FROM druid_table WHERE SUBSTRING(CAST(CAST(`__time` AS DATE) AS STRING), 9, 2) = 31 LIMIT 1 ; +FROM druid_table_n2 WHERE SUBSTRING(CAST(CAST(`__time` AS DATE) AS STRING), 9, 2) = 31 LIMIT 1 ; -- WEEK -EXPLAIN SELECT EXTRACT(WEEK from `__time`) FROM druid_table WHERE EXTRACT(WEEK from `__time`) >= 1 +EXPLAIN SELECT EXTRACT(WEEK from `__time`) FROM druid_table_n2 WHERE EXTRACT(WEEK from `__time`) >= 1 AND EXTRACT(WEEK from `__time`) DIV 4 + 1 = 1 LIMIT 1; -SELECT EXTRACT(WEEK from `__time`) FROM druid_table WHERE EXTRACT(WEEK from `__time`) >= 1 +SELECT EXTRACT(WEEK from `__time`) FROM druid_table_n2 WHERE EXTRACT(WEEK from `__time`) >= 1 AND EXTRACT(WEEK from `__time`) DIV 4 + 1 = 1 LIMIT 1 ; --MONTH -EXPLAIN SELECT EXTRACT(MONTH FROM `__time`) / 4 + 1, EXTRACT(MONTH FROM `__time`), SUBSTRING(CAST(CAST(`__time` AS DATE) AS STRING), 6, 2) as month_str FROM druid_table +EXPLAIN SELECT EXTRACT(MONTH FROM `__time`) / 4 + 1, EXTRACT(MONTH FROM `__time`), SUBSTRING(CAST(CAST(`__time` AS DATE) AS STRING), 6, 2) as month_str FROM druid_table_n2 WHERE EXTRACT(MONTH FROM `__time`) / 4 + 1 = 4 AND EXTRACT(MONTH FROM `__time`) BETWEEN 11 AND 12 LIMIT 1; -SELECT EXTRACT(MONTH FROM `__time`) / 4 + 1, EXTRACT(MONTH FROM `__time`), SUBSTRING(CAST(CAST(`__time` AS DATE) AS STRING), 6, 2) as month_str FROM druid_table +SELECT EXTRACT(MONTH FROM `__time`) / 4 + 1, EXTRACT(MONTH FROM `__time`), SUBSTRING(CAST(CAST(`__time` AS DATE) AS STRING), 6, 2) as month_str FROM druid_table_n2 WHERE EXTRACT(MONTH FROM `__time`) / 4 + 1 = 4 AND EXTRACT(MONTH FROM `__time`) BETWEEN 11 AND 12 LIMIT 1; --QUARTER -EXPLAIN SELECT EXTRACT(QUARTER from `__time`), EXTRACT(MONTH FROM `__time`) / 4 + 1 as q_number FROM druid_table WHERE EXTRACT(QUARTER from `__time`) >= 4 +EXPLAIN SELECT EXTRACT(QUARTER from `__time`), EXTRACT(MONTH FROM `__time`) / 4 + 1 as q_number FROM druid_table_n2 WHERE EXTRACT(QUARTER from `__time`) >= 4 AND EXTRACT(MONTH FROM `__time`) / 4 + 1 = 4 LIMIT 1; -SELECT EXTRACT(QUARTER from `__time`), EXTRACT(MONTH FROM `__time`) / 4 + 1 as q_number FROM druid_table WHERE EXTRACT(QUARTER from `__time`) >= 4 +SELECT EXTRACT(QUARTER from `__time`), EXTRACT(MONTH FROM `__time`) / 4 + 1 as q_number FROM druid_table_n2 WHERE EXTRACT(QUARTER from `__time`) >= 4 AND EXTRACT(MONTH FROM `__time`) / 4 + 1 = 4 LIMIT 1; --YEAR -EXPLAIN SELECT EXTRACT(YEAR from `__time`), SUBSTRING(CAST(CAST(`__time` AS DATE) AS STRING), 1, 4) AS year_str FROM druid_table WHERE EXTRACT(YEAR from `__time`) >= 1969 +EXPLAIN SELECT EXTRACT(YEAR from `__time`), SUBSTRING(CAST(CAST(`__time` AS DATE) AS STRING), 1, 4) AS year_str FROM druid_table_n2 WHERE EXTRACT(YEAR from `__time`) >= 1969 AND CAST(EXTRACT(YEAR from `__time`) as STRING) = '1969' LIMIT 1; -SELECT EXTRACT(YEAR from `__time`), SUBSTRING(CAST(CAST(`__time` AS DATE) AS STRING), 1, 4) as year_str FROM druid_table WHERE EXTRACT(YEAR from `__time`) >= 1969 +SELECT EXTRACT(YEAR from `__time`), SUBSTRING(CAST(CAST(`__time` AS DATE) AS STRING), 1, 4) as year_str FROM druid_table_n2 WHERE EXTRACT(YEAR from `__time`) >= 1969 AND CAST(EXTRACT(YEAR from `__time`) as STRING) = '1969' LIMIT 1; -DROP TABLE druid_table; \ No newline at end of file +DROP TABLE druid_table_n2; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/druidmini_mv.q b/ql/src/test/queries/clientpositive/druidmini_mv.q index 8cf3d03c0e..46b204ecc6 100644 --- a/ql/src/test/queries/clientpositive/druidmini_mv.q +++ b/ql/src/test/queries/clientpositive/druidmini_mv.q @@ -6,7 +6,7 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.strict.checks.cartesian.product=false; set hive.materializedview.rewriting=true; -CREATE TABLE cmv_basetable +CREATE TABLE cmv_basetable_n2 STORED AS orc TBLPROPERTIES ('transactional'='true') AS @@ -25,91 +25,91 @@ FROM TABLE ( (3, 'charlie', 'charlie_a', 9.8, 1), (3, 'charlie', 'charlie_b', 15.8, 1)) as q (a, b, userid, c, d); -CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE +CREATE MATERIALIZED VIEW cmv_mat_view_n2 ENABLE REWRITE STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR") AS SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c, userid -FROM cmv_basetable +FROM cmv_basetable_n2 WHERE a = 2; -SELECT a, b, c FROM cmv_mat_view; +SELECT a, b, c FROM cmv_mat_view_n2; -SHOW TBLPROPERTIES cmv_mat_view; +SHOW TBLPROPERTIES cmv_mat_view_n2; -CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2 ENABLE REWRITE +CREATE MATERIALIZED VIEW IF NOT EXISTS cmv_mat_view2_n0 ENABLE REWRITE STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR") AS SELECT cast(t AS timestamp with local time zone) as `__time`, a, b, c, userid -FROM cmv_basetable +FROM cmv_basetable_n2 WHERE a = 3; -SELECT a, c FROM cmv_mat_view2; +SELECT a, c FROM cmv_mat_view2_n0; -SHOW TBLPROPERTIES cmv_mat_view2; +SHOW TBLPROPERTIES cmv_mat_view2_n0; EXPLAIN SELECT a, c -FROM cmv_basetable +FROM cmv_basetable_n2 WHERE a = 3; SELECT a, c -FROM cmv_basetable +FROM cmv_basetable_n2 WHERE a = 3; EXPLAIN SELECT * FROM ( - (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1 JOIN - (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + (SELECT a, c FROM cmv_basetable_n2 WHERE d = 3) table2 ON table1.a = table2.a); SELECT * FROM ( - (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1 JOIN - (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + (SELECT a, c FROM cmv_basetable_n2 WHERE d = 3) table2 ON table1.a = table2.a); -INSERT INTO cmv_basetable VALUES +INSERT INTO cmv_basetable_n2 VALUES (cast(current_timestamp() AS timestamp), 3, 'charlie', 'charlie_c', 15.8, 1); -- TODO: CANNOT USE THE VIEW, IT IS OUTDATED EXPLAIN SELECT * FROM ( - (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1 JOIN - (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + (SELECT a, c FROM cmv_basetable_n2 WHERE d = 3) table2 ON table1.a = table2.a); SELECT * FROM ( - (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1 JOIN - (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + (SELECT a, c FROM cmv_basetable_n2 WHERE d = 3) table2 ON table1.a = table2.a); -- REBUILD EXPLAIN -ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view2_n0 REBUILD; -ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view2_n0 REBUILD; -SHOW TBLPROPERTIES cmv_mat_view2; +SHOW TBLPROPERTIES cmv_mat_view2_n0; -- NOW IT CAN BE USED AGAIN EXPLAIN SELECT * FROM ( - (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1 JOIN - (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + (SELECT a, c FROM cmv_basetable_n2 WHERE d = 3) table2 ON table1.a = table2.a); SELECT * FROM ( - (SELECT a, c FROM cmv_basetable WHERE a = 3) table1 + (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1 JOIN - (SELECT a, c FROM cmv_basetable WHERE d = 3) table2 + (SELECT a, c FROM cmv_basetable_n2 WHERE d = 3) table2 ON table1.a = table2.a); -DROP MATERIALIZED VIEW cmv_mat_view; -DROP MATERIALIZED VIEW cmv_mat_view2; -DROP TABLE cmv_basetable; +DROP MATERIALIZED VIEW cmv_mat_view_n2; +DROP MATERIALIZED VIEW cmv_mat_view2_n0; +DROP TABLE cmv_basetable_n2; diff --git a/ql/src/test/queries/clientpositive/druidmini_test1.q b/ql/src/test/queries/clientpositive/druidmini_test1.q index ad9546f6dc..4a1bdc5982 100644 --- a/ql/src/test/queries/clientpositive/druidmini_test1.q +++ b/ql/src/test/queries/clientpositive/druidmini_test1.q @@ -1,5 +1,5 @@ --! qt:dataset:alltypesorc -CREATE TABLE druid_table +CREATE TABLE druid_table_n3 STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR", "druid.query.granularity" = "MINUTE") AS @@ -17,106 +17,106 @@ SELECT cast (`ctimestamp1` as timestamp with local time zone) as `__time`, FROM alltypesorc where ctimestamp1 IS NOT NULL; -- Time Series Query -explain select count(*) FROM druid_table; -SELECT count(*) FROM druid_table; +explain select count(*) FROM druid_table_n3; +SELECT count(*) FROM druid_table_n3; EXPLAIN SELECT floor_year(`__time`), SUM(cfloat), SUM(cdouble), SUM(ctinyint), SUM(csmallint),SUM(cint), SUM(cbigint) -FROM druid_table GROUP BY floor_year(`__time`); +FROM druid_table_n3 GROUP BY floor_year(`__time`); SELECT floor_year(`__time`), SUM(cfloat), SUM(cdouble), SUM(ctinyint), SUM(csmallint),SUM(cint), SUM(cbigint) -FROM druid_table GROUP BY floor_year(`__time`); +FROM druid_table_n3 GROUP BY floor_year(`__time`); EXPLAIN SELECT floor_year(`__time`), MIN(cfloat), MIN(cdouble), MIN(ctinyint), MIN(csmallint),MIN(cint), MIN(cbigint) -FROM druid_table GROUP BY floor_year(`__time`); +FROM druid_table_n3 GROUP BY floor_year(`__time`); SELECT floor_year(`__time`), MIN(cfloat), MIN(cdouble), MIN(ctinyint), MIN(csmallint),MIN(cint), MIN(cbigint) -FROM druid_table GROUP BY floor_year(`__time`); +FROM druid_table_n3 GROUP BY floor_year(`__time`); EXPLAIN SELECT floor_year(`__time`), MAX(cfloat), MAX(cdouble), MAX(ctinyint), MAX(csmallint),MAX(cint), MAX(cbigint) -FROM druid_table GROUP BY floor_year(`__time`); +FROM druid_table_n3 GROUP BY floor_year(`__time`); SELECT floor_year(`__time`), MAX(cfloat), MAX(cdouble), MAX(ctinyint), MAX(csmallint),MAX(cint), MAX(cbigint) -FROM druid_table GROUP BY floor_year(`__time`); +FROM druid_table_n3 GROUP BY floor_year(`__time`); -- Group By -EXPLAIN SELECT cstring1, SUM(cdouble) as s FROM druid_table GROUP BY cstring1 ORDER BY s ASC LIMIT 10; +EXPLAIN SELECT cstring1, SUM(cdouble) as s FROM druid_table_n3 GROUP BY cstring1 ORDER BY s ASC LIMIT 10; -SELECT cstring1, SUM(cdouble) as s FROM druid_table GROUP BY cstring1 ORDER BY s ASC LIMIT 10; +SELECT cstring1, SUM(cdouble) as s FROM druid_table_n3 GROUP BY cstring1 ORDER BY s ASC LIMIT 10; -EXPLAIN SELECT cstring2, MAX(cdouble) FROM druid_table GROUP BY cstring2 ORDER BY cstring2 ASC LIMIT 10; +EXPLAIN SELECT cstring2, MAX(cdouble) FROM druid_table_n3 GROUP BY cstring2 ORDER BY cstring2 ASC LIMIT 10; -SELECT cstring2, MAX(cdouble) FROM druid_table GROUP BY cstring2 ORDER BY cstring2 ASC LIMIT 10; +SELECT cstring2, MAX(cdouble) FROM druid_table_n3 GROUP BY cstring2 ORDER BY cstring2 ASC LIMIT 10; -- TIME STUFF EXPLAIN SELECT `__time` -FROM druid_table ORDER BY `__time` ASC LIMIT 10; +FROM druid_table_n3 ORDER BY `__time` ASC LIMIT 10; SELECT `__time` -FROM druid_table ORDER BY `__time` ASC LIMIT 10; +FROM druid_table_n3 ORDER BY `__time` ASC LIMIT 10; EXPLAIN SELECT `__time` -FROM druid_table +FROM druid_table_n3 WHERE `__time` < '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10; SELECT `__time` -FROM druid_table +FROM druid_table_n3 WHERE `__time` < '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10; EXPLAIN SELECT `__time` -FROM druid_table +FROM druid_table_n3 WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10; SELECT `__time` -FROM druid_table +FROM druid_table_n3 WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10; EXPLAIN SELECT `__time` -FROM druid_table +FROM druid_table_n3 WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00' AND `__time` < '2011-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10; SELECT `__time` -FROM druid_table +FROM druid_table_n3 WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00' AND `__time` < '2011-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10; EXPLAIN SELECT `__time` -FROM druid_table +FROM druid_table_n3 WHERE `__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10;; SELECT `__time` -FROM druid_table +FROM druid_table_n3 WHERE `__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10;; EXPLAIN SELECT `__time` -FROM druid_table +FROM druid_table_n3 WHERE (`__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00') OR (`__time` BETWEEN '1968-02-01 00:00:00' AND '1970-04-01 00:00:00') ORDER BY `__time` ASC LIMIT 10; SELECT `__time` -FROM druid_table +FROM druid_table_n3 WHERE (`__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00') OR (`__time` BETWEEN '1968-02-01 00:00:00' AND '1970-04-01 00:00:00') ORDER BY `__time` ASC LIMIT 10; diff --git a/ql/src/test/queries/clientpositive/druidmini_test_alter.q b/ql/src/test/queries/clientpositive/druidmini_test_alter.q index 15ae952d6a..e19a00637b 100644 --- a/ql/src/test/queries/clientpositive/druidmini_test_alter.q +++ b/ql/src/test/queries/clientpositive/druidmini_test_alter.q @@ -1,4 +1,4 @@ -CREATE TABLE druid_alltypesorc +CREATE TABLE druid_alltypesorc_n0 STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "HOUR", "druid.query.granularity" = "MINUTE") AS @@ -13,21 +13,21 @@ cbigint, cboolean1 FROM alltypesorc where ctimestamp2 IS NOT NULL; -DESCRIBE druid_alltypesorc; +DESCRIBE druid_alltypesorc_n0; -DESCRIBE extended druid_alltypesorc; +DESCRIBE extended druid_alltypesorc_n0; -SELECT COUNT(*) FROM druid_alltypesorc; +SELECT COUNT(*) FROM druid_alltypesorc_n0; -ALTER TABLE druid_alltypesorc ADD COLUMNS (cstring2 string, cboolean2 boolean, cint2 int); +ALTER TABLE druid_alltypesorc_n0 ADD COLUMNS (cstring2 string, cboolean2 boolean, cint2 int); -DESCRIBE druid_alltypesorc; +DESCRIBE druid_alltypesorc_n0; -DESCRIBE extended druid_alltypesorc; +DESCRIBE extended druid_alltypesorc_n0; -SELECT COUNT(*) FROM druid_alltypesorc WHERE cstring2 IS NOT NULL; +SELECT COUNT(*) FROM druid_alltypesorc_n0 WHERE cstring2 IS NOT NULL; -INSERT INTO TABLE druid_alltypesorc +INSERT INTO TABLE druid_alltypesorc_n0 SELECT cast (`ctimestamp1` as timestamp with local time zone) as `__time`, cstring1, cdouble, @@ -43,10 +43,10 @@ cint FROM alltypesorc where ctimestamp1 IS NOT NULL; -SELECT COUNT(*) FROM druid_alltypesorc; +SELECT COUNT(*) FROM druid_alltypesorc_n0; -SELECT COUNT(*) FROM druid_alltypesorc WHERE cstring2 IS NULL; +SELECT COUNT(*) FROM druid_alltypesorc_n0 WHERE cstring2 IS NULL; -SELECT COUNT(*) FROM druid_alltypesorc WHERE cstring2 IS NOT NULL; +SELECT COUNT(*) FROM druid_alltypesorc_n0 WHERE cstring2 IS NOT NULL; -DROP TABLE druid_alltypesorc; +DROP TABLE druid_alltypesorc_n0; diff --git a/ql/src/test/queries/clientpositive/druidmini_test_insert.q b/ql/src/test/queries/clientpositive/druidmini_test_insert.q index c14a1b6f7e..03657fd7d7 100644 --- a/ql/src/test/queries/clientpositive/druidmini_test_insert.q +++ b/ql/src/test/queries/clientpositive/druidmini_test_insert.q @@ -58,23 +58,23 @@ DROP TABLE druid_alltypesorc; create database druid_test_create_then_insert; use druid_test_create_then_insert; - create table test_table(`timecolumn` timestamp, `userid` string, `num_l` float); + create table test_table_n9(`timecolumn` timestamp, `userid` string, `num_l` float); - insert into test_table values ('2015-01-08 00:00:00', 'i1-start', 4); - insert into test_table values ('2015-01-08 23:59:59', 'i1-end', 1); + insert into test_table_n9 values ('2015-01-08 00:00:00', 'i1-start', 4); + insert into test_table_n9 values ('2015-01-08 23:59:59', 'i1-end', 1); - CREATE TABLE druid_table (`__time` timestamp with local time zone, `userid` string, `num_l` float) + CREATE TABLE druid_table_n1 (`__time` timestamp with local time zone, `userid` string, `num_l` float) STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "DAY"); - INSERT INTO TABLE druid_table - select cast(`timecolumn` as timestamp with local time zone) as `__time`, `userid`, `num_l` FROM test_table; + INSERT INTO TABLE druid_table_n1 + select cast(`timecolumn` as timestamp with local time zone) as `__time`, `userid`, `num_l` FROM test_table_n9; - select count(*) FROM druid_table; + select count(*) FROM druid_table_n1; - DROP TABLE test_table; - DROP TABLE druid_table; + DROP TABLE test_table_n9; + DROP TABLE druid_table_n1; DROP DATABASE druid_test_create_then_insert; -- Day light saving time test insert into test @@ -90,54 +90,54 @@ insert into test_base_table values ('2015-03-09 23:59:59', 'i2-end', 1); insert into test_base_table values ('2015-03-10 00:00:00', 'i3-start', 2); insert into test_base_table values ('2015-03-10 23:59:59', 'i3-end', 2); -CREATE TABLE druid_test_table +CREATE TABLE druid_test_table_n9 STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler' TBLPROPERTIES ("druid.segment.granularity" = "DAY") AS select cast(`timecolumn` as timestamp with local time zone) as `__time`, `userid`, `num_l` FROM test_base_table; -select * FROM druid_test_table; +select * FROM druid_test_table_n9; -select * from druid_test_table where `__time` = cast('2015-03-08 00:00:00' as timestamp with local time zone); -select * from druid_test_table where `__time` = cast('2015-03-08 23:59:59' as timestamp with local time zone); +select * from druid_test_table_n9 where `__time` = cast('2015-03-08 00:00:00' as timestamp with local time zone); +select * from druid_test_table_n9 where `__time` = cast('2015-03-08 23:59:59' as timestamp with local time zone); -select * from druid_test_table where `__time` = cast('2015-03-09 00:00:00' as timestamp with local time zone); -select * from druid_test_table where `__time` = cast('2015-03-09 23:59:59' as timestamp with local time zone); +select * from druid_test_table_n9 where `__time` = cast('2015-03-09 00:00:00' as timestamp with local time zone); +select * from druid_test_table_n9 where `__time` = cast('2015-03-09 23:59:59' as timestamp with local time zone); -select * from druid_test_table where `__time` = cast('2015-03-10 00:00:00' as timestamp with local time zone); -select * from druid_test_table where `__time` = cast('2015-03-10 23:59:59' as timestamp with local time zone); +select * from druid_test_table_n9 where `__time` = cast('2015-03-10 00:00:00' as timestamp with local time zone); +select * from druid_test_table_n9 where `__time` = cast('2015-03-10 23:59:59' as timestamp with local time zone); -explain select * from druid_test_table where `__time` = cast('2015-03-08 00:00:00' as timestamp with local time zone); -explain select * from druid_test_table where `__time` = cast('2015-03-08 23:59:59' as timestamp with local time zone); +explain select * from druid_test_table_n9 where `__time` = cast('2015-03-08 00:00:00' as timestamp with local time zone); +explain select * from druid_test_table_n9 where `__time` = cast('2015-03-08 23:59:59' as timestamp with local time zone); -explain select * from druid_test_table where `__time` = cast('2015-03-09 00:00:00' as timestamp with local time zone); -explain select * from druid_test_table where `__time` = cast('2015-03-09 23:59:59' as timestamp with local time zone); +explain select * from druid_test_table_n9 where `__time` = cast('2015-03-09 00:00:00' as timestamp with local time zone); +explain select * from druid_test_table_n9 where `__time` = cast('2015-03-09 23:59:59' as timestamp with local time zone); -explain select * from druid_test_table where `__time` = cast('2015-03-10 00:00:00' as timestamp with local time zone); -explain select * from druid_test_table where `__time` = cast('2015-03-10 23:59:59' as timestamp with local time zone); +explain select * from druid_test_table_n9 where `__time` = cast('2015-03-10 00:00:00' as timestamp with local time zone); +explain select * from druid_test_table_n9 where `__time` = cast('2015-03-10 23:59:59' as timestamp with local time zone); -select * from druid_test_table where `__time` = cast('2015-03-08 00:00:00' as timestamp ); -select * from druid_test_table where `__time` = cast('2015-03-08 23:59:59' as timestamp ); +select * from druid_test_table_n9 where `__time` = cast('2015-03-08 00:00:00' as timestamp ); +select * from druid_test_table_n9 where `__time` = cast('2015-03-08 23:59:59' as timestamp ); -select * from druid_test_table where `__time` = cast('2015-03-09 00:00:00' as timestamp ); -select * from druid_test_table where `__time` = cast('2015-03-09 23:59:59' as timestamp ); +select * from druid_test_table_n9 where `__time` = cast('2015-03-09 00:00:00' as timestamp ); +select * from druid_test_table_n9 where `__time` = cast('2015-03-09 23:59:59' as timestamp ); -select * from druid_test_table where `__time` = cast('2015-03-10 00:00:00' as timestamp ); -select * from druid_test_table where `__time` = cast('2015-03-10 23:59:59' as timestamp ); +select * from druid_test_table_n9 where `__time` = cast('2015-03-10 00:00:00' as timestamp ); +select * from druid_test_table_n9 where `__time` = cast('2015-03-10 23:59:59' as timestamp ); -EXPLAIN select * from druid_test_table where `__time` = cast('2015-03-08 00:00:00' as timestamp ); -EXPLAIN select * from druid_test_table where `__time` = cast('2015-03-08 23:59:59' as timestamp ); +EXPLAIN select * from druid_test_table_n9 where `__time` = cast('2015-03-08 00:00:00' as timestamp ); +EXPLAIN select * from druid_test_table_n9 where `__time` = cast('2015-03-08 23:59:59' as timestamp ); -EXPLAIN select * from druid_test_table where `__time` = cast('2015-03-09 00:00:00' as timestamp ); -EXPLAIN select * from druid_test_table where `__time` = cast('2015-03-09 23:59:59' as timestamp ); +EXPLAIN select * from druid_test_table_n9 where `__time` = cast('2015-03-09 00:00:00' as timestamp ); +EXPLAIN select * from druid_test_table_n9 where `__time` = cast('2015-03-09 23:59:59' as timestamp ); -EXPLAIN select * from druid_test_table where `__time` = cast('2015-03-10 00:00:00' as timestamp ); -EXPLAIN select * from druid_test_table where `__time` = cast('2015-03-10 23:59:59' as timestamp ); +EXPLAIN select * from druid_test_table_n9 where `__time` = cast('2015-03-10 00:00:00' as timestamp ); +EXPLAIN select * from druid_test_table_n9 where `__time` = cast('2015-03-10 23:59:59' as timestamp ); DROP TABLE test_base_table; -DROP TABLE druid_test_table; +DROP TABLE druid_test_table_n9; drop database druid_test_dst; diff --git a/ql/src/test/queries/clientpositive/dynamic_partition_insert.q b/ql/src/test/queries/clientpositive/dynamic_partition_insert.q index ab70ff1639..ee13bd5480 100644 --- a/ql/src/test/queries/clientpositive/dynamic_partition_insert.q +++ b/ql/src/test/queries/clientpositive/dynamic_partition_insert.q @@ -1,57 +1,57 @@ SET hive.vectorized.execution.enabled=false; set hive.mapred.mode=nonstrict; -CREATE TABLE t1 (c1 BIGINT, c2 STRING); +CREATE TABLE t1_n131 (c1 BIGINT, c2 STRING); -CREATE TABLE t2 (c1 INT, c2 STRING) +CREATE TABLE t2_n78 (c1 INT, c2 STRING) PARTITIONED BY (p1 STRING); -LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1; -LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1; -LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1; -LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1; -LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1; +LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n131; +LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n131; +LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n131; +LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n131; +LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n131; SET hive.exec.dynamic.partition.mode=nonstrict; -INSERT OVERWRITE TABLE t2 partition(p1) SELECT *,c1 AS p1 FROM t1 DISTRIBUTE BY p1; -SELECT * FROM t2; +INSERT OVERWRITE TABLE t2_n78 partition(p1) SELECT *,c1 AS p1 FROM t1_n131 DISTRIBUTE BY p1; +SELECT * FROM t2_n78; -- no partition spec -TRUNCATE TABLE t2; -INSERT OVERWRITE TABLE t2 SELECT *,c1 AS p1 FROM t1 DISTRIBUTE BY p1; -SHOW PARTITIONS t2; -SELECT * FROM t2; +TRUNCATE TABLE t2_n78; +INSERT OVERWRITE TABLE t2_n78 SELECT *,c1 AS p1 FROM t1_n131 DISTRIBUTE BY p1; +SHOW PARTITIONS t2_n78; +SELECT * FROM t2_n78; -DROP TABLE t1; -DROP TABLE t2; +DROP TABLE t1_n131; +DROP TABLE t2_n78; -- Single partition with buckets -CREATE TABLE table1 (id int) partitioned by (key string) clustered by (id) into 2 buckets ; +CREATE TABLE table1_n15 (id int) partitioned by (key string) clustered by (id) into 2 buckets ; -- without partition schema -INSERT INTO TABLE table1 VALUES (1, '101'), (2, '202'), (3, '303'), (4, '404'), (5, '505'); -SHOW PARTITIONS table1; -SELECT * FROM table1; -DROP TABLE table1; +INSERT INTO TABLE table1_n15 VALUES (1, '101'), (2, '202'), (3, '303'), (4, '404'), (5, '505'); +SHOW PARTITIONS table1_n15; +SELECT * FROM table1_n15; +DROP TABLE table1_n15; -- Multiple partitions -CREATE TABLE table1 (name string, age int) PARTITIONED BY (country string, state string); -INSERT INTO table1 values ('John Doe', 23, 'USA', 'CA'), ('Jane Doe', 22, 'USA', 'TX'); -SHOW PARTITIONS table1; - -CREATE TABLE table2 (name string, age int) PARTITIONED BY (country string, state string); -INSERT INTO TABLE table2 SELECT * FROM table1; -SHOW PARTITIONS table2; -SELECT * FROM table2; -DROP TABLE table2; -DROP TABLE table1; - -CREATE TABLE dest1(key string) partitioned by (value string); -CREATE TABLE dest2(key string) partitioned by (value string); +CREATE TABLE table1_n15 (name string, age int) PARTITIONED BY (country string, state string); +INSERT INTO table1_n15 values ('John Doe', 23, 'USA', 'CA'), ('Jane Doe', 22, 'USA', 'TX'); +SHOW PARTITIONS table1_n15; + +CREATE TABLE table2_n10 (name string, age int) PARTITIONED BY (country string, state string); +INSERT INTO TABLE table2_n10 SELECT * FROM table1_n15; +SHOW PARTITIONS table2_n10; +SELECT * FROM table2_n10; +DROP TABLE table2_n10; +DROP TABLE table1_n15; + +CREATE TABLE dest1_n143(key string) partitioned by (value string); +CREATE TABLE dest2_n37(key string) partitioned by (value string); FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200; +INSERT OVERWRITE TABLE dest1_n143 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n37 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200; SELECT distinct value FROM SRC WHERE src.key < 100; -SHOW PARTITIONS dest1; +SHOW PARTITIONS dest1_n143; SELECT distinct value FROM SRC WHERE src.key >= 100 and src.key < 200; -SHOW PARTITIONS dest2; -DROP TABLE dest1; -DROP TABLE dest2; +SHOW PARTITIONS dest2_n37; +DROP TABLE dest1_n143; +DROP TABLE dest2_n37; diff --git a/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q b/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q index 52d1c26a4e..de339f892a 100644 --- a/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q +++ b/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q @@ -14,74 +14,74 @@ set hive.tez.bigtable.minsize.semijoin.reduction=1; select distinct ds from srcpart; select distinct hr from srcpart; -EXPLAIN create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds; -create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds; -create table srcpart_hour as select hr as hr, hr as hour from srcpart group by hr; -create table srcpart_date_hour as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr; -create table srcpart_double_hour as select (hr*2) as hr, hr as hour from srcpart group by hr; +EXPLAIN create table srcpart_date_n2 as select ds as ds, ds as `date` from srcpart group by ds; +create table srcpart_date_n2 as select ds as ds, ds as `date` from srcpart group by ds; +create table srcpart_hour_n0 as select hr as hr, hr as hour from srcpart group by hr; +create table srcpart_date_hour_n0 as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr; +create table srcpart_double_hour_n0 as select (hr*2) as hr, hr as hour from srcpart group by hr; -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = '2008-04-08'; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = '2008-04-08'; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where ds = '2008-04-08'; -- single column, single key, udf with typechange -EXPLAIN select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (day(srcpart.ds) = day(srcpart_date_n2.ds)) where srcpart_date_n2.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n2 on (day(srcpart.ds) = day(srcpart_date_n2.ds)) where srcpart_date_n2.`date` = '2008-04-08'; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (day(srcpart.ds) = day(srcpart_date_n2.ds)) where srcpart_date_n2.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n2 on (day(srcpart.ds) = day(srcpart_date_n2.ds)) where srcpart_date_n2.`date` = '2008-04-08'; set hive.tez.dynamic.partition.pruning=true; -- multiple sources, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) join srcpart_hour_n0 on (srcpart.hr = srcpart_hour_n0.hr) +where srcpart_date_n2.`date` = '2008-04-08' and srcpart_hour_n0.hour = 11; +select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) join srcpart_hour_n0 on (srcpart.hr = srcpart_hour_n0.hr) +where srcpart_date_n2.`date` = '2008-04-08' and srcpart_hour_n0.hour = 11; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) join srcpart_hour_n0 on (srcpart.hr = srcpart_hour_n0.hr) +where srcpart_date_n2.`date` = '2008-04-08' and srcpart_hour_n0.hour = 11; +select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) join srcpart_hour_n0 on (srcpart.hr = srcpart_hour_n0.hr) +where srcpart_date_n2.`date` = '2008-04-08' and srcpart_hour_n0.hour = 11; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_hour_n0 on (srcpart.ds = srcpart_date_hour_n0.ds and srcpart.hr = srcpart_date_hour_n0.hr) where srcpart_date_hour_n0.`date` = '2008-04-08' and srcpart_date_hour_n0.hour = 11; +select count(*) from srcpart join srcpart_date_hour_n0 on (srcpart.ds = srcpart_date_hour_n0.ds and srcpart.hr = srcpart_date_hour_n0.hr) where srcpart_date_hour_n0.`date` = '2008-04-08' and srcpart_date_hour_n0.hour = 11; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_hour_n0 on (srcpart.ds = srcpart_date_hour_n0.ds and srcpart.hr = srcpart_date_hour_n0.hr) where srcpart_date_hour_n0.`date` = '2008-04-08' and srcpart_date_hour_n0.hour = 11; +select count(*) from srcpart join srcpart_date_hour_n0 on (srcpart.ds = srcpart_date_hour_n0.ds and srcpart.hr = srcpart_date_hour_n0.hr) where srcpart_date_hour_n0.`date` = '2008-04-08' and srcpart_date_hour_n0.hour = 11; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where ds = '2008-04-08' and hr = 11; -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = 'I DONT EXIST'; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = 'I DONT EXIST'; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where ds = 'I DONT EXIST'; -- expressions -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour_n0 on (srcpart.hr = cast(srcpart_double_hour_n0.hr/2 as int)) where srcpart_double_hour_n0.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n0 on (srcpart.hr = cast(srcpart_double_hour_n0.hr/2 as int)) where srcpart_double_hour_n0.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour_n0 on (srcpart.hr*2 = srcpart_double_hour_n0.hr) where srcpart_double_hour_n0.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n0 on (srcpart.hr*2 = srcpart_double_hour_n0.hr) where srcpart_double_hour_n0.hour = 11; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour_n0 on (srcpart.hr = cast(srcpart_double_hour_n0.hr/2 as int)) where srcpart_double_hour_n0.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n0 on (srcpart.hr = cast(srcpart_double_hour_n0.hr/2 as int)) where srcpart_double_hour_n0.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour_n0 on (srcpart.hr*2 = srcpart_double_hour_n0.hr) where srcpart_double_hour_n0.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n0 on (srcpart.hr*2 = srcpart_double_hour_n0.hr) where srcpart_double_hour_n0.hour = 11; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where hr = 11; -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour_n0 on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour_n0.hr as string)) where srcpart_double_hour_n0.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n0 on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour_n0.hr as string)) where srcpart_double_hour_n0.hour = 11; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where cast(hr as string) = 11; @@ -92,29 +92,29 @@ select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart gr select count(*) from srcpart where ds = '2008-04-08'; -- non-equi join -EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); -select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); +EXPLAIN select count(*) from srcpart, srcpart_date_hour_n0 where (srcpart_date_hour_n0.`date` = '2008-04-08' and srcpart_date_hour_n0.hour = 11) and (srcpart.ds = srcpart_date_hour_n0.ds or srcpart.hr = srcpart_date_hour_n0.hr); +select count(*) from srcpart, srcpart_date_hour_n0 where (srcpart_date_hour_n0.`date` = '2008-04-08' and srcpart_date_hour_n0.hour = 11) and (srcpart.ds = srcpart_date_hour_n0.ds or srcpart.hr = srcpart_date_hour_n0.hr); -- old style join syntax -EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; -select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; +EXPLAIN select count(*) from srcpart, srcpart_date_hour_n0 where srcpart_date_hour_n0.`date` = '2008-04-08' and srcpart_date_hour_n0.hour = 11 and srcpart.ds = srcpart_date_hour_n0.ds and srcpart.hr = srcpart_date_hour_n0.hr; +select count(*) from srcpart, srcpart_date_hour_n0 where srcpart_date_hour_n0.`date` = '2008-04-08' and srcpart_date_hour_n0.hour = 11 and srcpart.ds = srcpart_date_hour_n0.ds and srcpart.hr = srcpart_date_hour_n0.hr; -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart left join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart_date_n2 left join srcpart on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = '2008-04-08'; -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart full outer join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = '2008-04-08'; -- with static pruning -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) join srcpart_hour_n0 on (srcpart.hr = srcpart_hour_n0.hr) +where srcpart_date_n2.`date` = '2008-04-08' and srcpart_hour_n0.hour = 11 and srcpart.hr = 11; +select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) join srcpart_hour_n0 on (srcpart.hr = srcpart_hour_n0.hr) +where srcpart_date_n2.`date` = '2008-04-08' and srcpart_hour_n0.hour = 11 and srcpart.hr = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) join srcpart_hour_n0 on (srcpart.hr = srcpart_hour_n0.hr) +where srcpart_date_n2.`date` = '2008-04-08' and srcpart.hr = 13; +select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) join srcpart_hour_n0 on (srcpart.hr = srcpart_hour_n0.hr) +where srcpart_date_n2.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); @@ -129,35 +129,35 @@ set hive.auto.convert.join.noconditionaltask = true; set hive.auto.convert.join.noconditionaltask.size = 10000000; -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = '2008-04-08'; select count(*) from srcpart where ds = '2008-04-08'; -- single column, single key, udf with typechange -EXPLAIN select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (day(srcpart.ds) = day(srcpart_date_n2.ds)) where srcpart_date_n2.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n2 on (day(srcpart.ds) = day(srcpart_date_n2.ds)) where srcpart_date_n2.`date` = '2008-04-08'; -- multiple sources, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) join srcpart_hour_n0 on (srcpart.hr = srcpart_hour_n0.hr) +where srcpart_date_n2.`date` = '2008-04-08' and srcpart_hour_n0.hour = 11; +select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) join srcpart_hour_n0 on (srcpart.hr = srcpart_hour_n0.hr) +where srcpart_date_n2.`date` = '2008-04-08' and srcpart_hour_n0.hour = 11; select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_hour_n0 on (srcpart.ds = srcpart_date_hour_n0.ds and srcpart.hr = srcpart_date_hour_n0.hr) where srcpart_date_hour_n0.`date` = '2008-04-08' and srcpart_date_hour_n0.hour = 11; +select count(*) from srcpart join srcpart_date_hour_n0 on (srcpart.ds = srcpart_date_hour_n0.ds and srcpart.hr = srcpart_date_hour_n0.hr) where srcpart_date_hour_n0.`date` = '2008-04-08' and srcpart_date_hour_n0.hour = 11; select count(*) from srcpart where ds = '2008-04-08' and hr = 11; -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = 'I DONT EXIST'; -- expressions -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour_n0 on (srcpart.hr = cast(srcpart_double_hour_n0.hr/2 as int)) where srcpart_double_hour_n0.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n0 on (srcpart.hr = cast(srcpart_double_hour_n0.hr/2 as int)) where srcpart_double_hour_n0.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour_n0 on (srcpart.hr*2 = srcpart_double_hour_n0.hr) where srcpart_double_hour_n0.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n0 on (srcpart.hr*2 = srcpart_double_hour_n0.hr) where srcpart_double_hour_n0.hour = 11; select count(*) from srcpart where hr = 11; @@ -170,22 +170,22 @@ select count(*) from srcpart where ds = '2008-04-08'; set hive.stats.fetch.column.stats=true; -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart left join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart_date_n2 left join srcpart on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = '2008-04-08'; -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart full outer join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) where srcpart_date_n2.`date` = '2008-04-08'; -- with static pruning -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) join srcpart_hour_n0 on (srcpart.hr = srcpart_hour_n0.hr) +where srcpart_date_n2.`date` = '2008-04-08' and srcpart_hour_n0.hour = 11 and srcpart.hr = 11; +select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) join srcpart_hour_n0 on (srcpart.hr = srcpart_hour_n0.hr) +where srcpart_date_n2.`date` = '2008-04-08' and srcpart_hour_n0.hour = 11 and srcpart.hr = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) join srcpart_hour_n0 on (srcpart.hr = srcpart_hour_n0.hr) +where srcpart_date_n2.`date` = '2008-04-08' and srcpart.hr = 13; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; +select count(*) from srcpart join srcpart_date_n2 on (srcpart.ds = srcpart_date_n2.ds) join srcpart_hour_n0 on (srcpart.hr = srcpart_hour_n0.hr) +where srcpart_date_n2.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); @@ -201,12 +201,12 @@ set hive.vectorized.execution.enabled=false; set hive.exec.max.dynamic.partitions=1000; insert into table srcpart_orc partition (ds, hr) select key, value, ds, hr from srcpart; -EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09'); -select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09'); +EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour_n0 on (srcpart_orc.ds = srcpart_date_hour_n0.ds and srcpart_orc.hr = srcpart_date_hour_n0.hr) where srcpart_date_hour_n0.hour = 11 and (srcpart_date_hour_n0.`date` = '2008-04-08' or srcpart_date_hour_n0.`date` = '2008-04-09'); +select count(*) from srcpart_orc join srcpart_date_hour_n0 on (srcpart_orc.ds = srcpart_date_hour_n0.ds and srcpart_orc.hr = srcpart_date_hour_n0.hr) where srcpart_date_hour_n0.hour = 11 and (srcpart_date_hour_n0.`date` = '2008-04-08' or srcpart_date_hour_n0.`date` = '2008-04-09'); select count(*) from srcpart where (ds = '2008-04-08' or ds = '2008-04-09') and hr = 11; drop table srcpart_orc; -drop table srcpart_date; -drop table srcpart_hour; -drop table srcpart_date_hour; -drop table srcpart_double_hour; +drop table srcpart_date_n2; +drop table srcpart_hour_n0; +drop table srcpart_date_hour_n0; +drop table srcpart_double_hour_n0; diff --git a/ql/src/test/queries/clientpositive/dynamic_rdd_cache.q b/ql/src/test/queries/clientpositive/dynamic_rdd_cache.q index 729464ed83..de3eba298c 100644 --- a/ql/src/test/queries/clientpositive/dynamic_rdd_cache.q +++ b/ql/src/test/queries/clientpositive/dynamic_rdd_cache.q @@ -21,33 +21,33 @@ ON (X.key = Z.key) SELECT sum(hash(Y.key,Y.value)) GROUP BY Y.key; -CREATE TABLE dest1(key INT, value STRING); -CREATE TABLE dest2(key INT, value STRING); +CREATE TABLE dest1_n90(key INT, value STRING); +CREATE TABLE dest2_n24(key INT, value STRING); EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(SUBSTR(src.value,5)) GROUP BY src.key -INSERT OVERWRITE TABLE dest2 SELECT src.key, sum(SUBSTR(src.value,5)) GROUP BY src.key; +INSERT OVERWRITE TABLE dest1_n90 SELECT src.key, sum(SUBSTR(src.value,5)) GROUP BY src.key +INSERT OVERWRITE TABLE dest2_n24 SELECT src.key, sum(SUBSTR(src.value,5)) GROUP BY src.key; -SELECT dest1.* FROM dest1; -SELECT dest2.* FROM dest2; +SELECT dest1_n90.* FROM dest1_n90; +SELECT dest2_n24.* FROM dest2_n24; -DROP TABLE dest1; -DROP TABLE dest2; +DROP TABLE dest1_n90; +DROP TABLE dest2_n24; -- UNION TEST -CREATE TABLE tmptable(key STRING, value INT); +CREATE TABLE tmptable_n8(key STRING, value INT); EXPLAIN -INSERT OVERWRITE TABLE tmptable +INSERT OVERWRITE TABLE tmptable_n8 SELECT unionsrc.key, unionsrc.value FROM (SELECT 'tst1' AS key, count(1) AS value FROM src s1 UNION ALL SELECT 'tst2' AS key, count(1) AS value FROM src s2 UNION ALL SELECT 'tst3' AS key, count(1) AS value FROM src s3) unionsrc; -SELECT * FROM tmptable x SORT BY x.key; +SELECT * FROM tmptable_n8 x SORT BY x.key; DROP TABLE tmtable; diff --git a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction.q b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction.q index 705dfdc382..32c28540ad 100644 --- a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction.q +++ b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction.q @@ -16,67 +16,67 @@ set hive.stats.fetch.column.stats=true; set hive.tez.bloom.filter.factor=1.0f; -- Create Tables -create table alltypesorc_int ( cint int, cstring string ) stored as ORC; -create table srcpart_date (key string, value string) partitioned by (ds string ) stored as ORC; -CREATE TABLE srcpart_small(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; +create table alltypesorc_int_n1 ( cint int, cstring string ) stored as ORC; +create table srcpart_date_n7 (key string, value string) partitioned by (ds string ) stored as ORC; +CREATE TABLE srcpart_small_n3(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; -- Add Partitions -alter table srcpart_date add partition (ds = "2008-04-08"); -alter table srcpart_date add partition (ds = "2008-04-09"); +alter table srcpart_date_n7 add partition (ds = "2008-04-08"); +alter table srcpart_date_n7 add partition (ds = "2008-04-09"); -alter table srcpart_small add partition (ds = "2008-04-08"); -alter table srcpart_small add partition (ds = "2008-04-09"); +alter table srcpart_small_n3 add partition (ds = "2008-04-08"); +alter table srcpart_small_n3 add partition (ds = "2008-04-09"); -- Load -insert overwrite table alltypesorc_int select cint, cstring1 from alltypesorc; -insert overwrite table srcpart_date partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; -insert overwrite table srcpart_date partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; -insert overwrite table srcpart_small partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; +insert overwrite table alltypesorc_int_n1 select cint, cstring1 from alltypesorc; +insert overwrite table srcpart_date_n7 partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; +insert overwrite table srcpart_date_n7 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; +insert overwrite table srcpart_small_n3 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; set hive.tez.dynamic.semijoin.reduction=false; -analyze table alltypesorc_int compute statistics for columns; -analyze table srcpart_date compute statistics for columns; -analyze table srcpart_small compute statistics for columns; +analyze table alltypesorc_int_n1 compute statistics for columns; +analyze table srcpart_date_n7 compute statistics for columns; +analyze table srcpart_small_n3 compute statistics for columns; -- single column, single key -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); set hive.tez.dynamic.semijoin.reduction=true; -- Mix dynamic partition pruning(DPP) and min/max bloom filter optimizations. Should pick the DPP. -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.ds); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.ds); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.ds); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.ds); set hive.tez.dynamic.semijoin.reduction=false; --multiple sources, single key -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_small_n3.key1 = alltypesorc_int_n1.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_small_n3.key1 = alltypesorc_int_n1.cstring); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_small_n3.key1 = alltypesorc_int_n1.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_small_n3.key1 = alltypesorc_int_n1.cstring); set hive.tez.dynamic.semijoin.reduction=false; -- single source, multiple keys -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1 and srcpart_date_n7.value = srcpart_small_n3.value1); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1 and srcpart_date_n7.value = srcpart_small_n3.value1); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1 and srcpart_date_n7.value = srcpart_small_n3.value1); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1 and srcpart_date_n7.value = srcpart_small_n3.value1); set hive.tez.dynamic.semijoin.reduction=false; -- multiple sources, different keys -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); -- Explain extended to verify fast start for Reducer in semijoin branch -EXPLAIN extended select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN extended select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); set hive.tez.dynamic.semijoin.reduction=false; -- With Mapjoins, there shouldn't be any semijoin parallel to mapjoin. @@ -84,64 +84,64 @@ set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=100000000000; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); set hive.tez.dynamic.semijoin.reduction.for.mapjoin=true; -- Enable semijoin parallel to mapjoins. -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1); set hive.tez.dynamic.semijoin.reduction.for.mapjoin=false; set hive.tez.dynamic.semijoin.reduction=false; -- multiple sources, different keys -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); set hive.tez.dynamic.semijoin.reduction.for.mapjoin=true; -- Enable semijoin parallel to mapjoins. -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.key = srcpart_small_n3.key1) join alltypesorc_int_n1 on (srcpart_date_n7.value = alltypesorc_int_n1.cstring); set hive.tez.dynamic.semijoin.reduction.for.mapjoin=false; -- HIVE-17323 - with DPP, the 1st mapjoin is on a map with DPP and 2nd mapjoin is on a map which had semijoin but still removed. -create table alltypesorc_int40 as select * from alltypesorc_int limit 40; +create table alltypesorc_int40 as select * from alltypesorc_int_n1 limit 40; set hive.tez.dynamic.semijoin.reduction=false; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.ds = srcpart_small.ds) join alltypesorc_int40 on (srcpart_date.value = alltypesorc_int40.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.ds = srcpart_small.ds) join alltypesorc_int40 on (srcpart_date.value = alltypesorc_int40.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.ds = srcpart_small_n3.ds) join alltypesorc_int40 on (srcpart_date_n7.value = alltypesorc_int40.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.ds = srcpart_small_n3.ds) join alltypesorc_int40 on (srcpart_date_n7.value = alltypesorc_int40.cstring); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.ds = srcpart_small.ds) join alltypesorc_int40 on (srcpart_date.value = alltypesorc_int40.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.ds = srcpart_small.ds) join alltypesorc_int40 on (srcpart_date.value = alltypesorc_int40.cstring); +EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.ds = srcpart_small_n3.ds) join alltypesorc_int40 on (srcpart_date_n7.value = alltypesorc_int40.cstring); +select count(*) from srcpart_date_n7 join srcpart_small_n3 on (srcpart_date_n7.ds = srcpart_small_n3.ds) join alltypesorc_int40 on (srcpart_date_n7.value = alltypesorc_int40.cstring); -- HIVE-17399 -create table srcpart_small10 as select * from srcpart_small limit 10; +create table srcpart_small10 as select * from srcpart_small_n3 limit 10; analyze table srcpart_small10 compute statistics for columns; set hive.tez.dynamic.semijoin.reduction=false; -EXPLAIN select count(*) from srcpart_small10, srcpart_small, srcpart_date where srcpart_small.key1 = srcpart_small10.key1 and srcpart_date.ds = srcpart_small.ds; -select count(*) from srcpart_small10, srcpart_small, srcpart_date where srcpart_small.key1 = srcpart_small10.key1 and srcpart_date.ds = srcpart_small.ds; +EXPLAIN select count(*) from srcpart_small10, srcpart_small_n3, srcpart_date_n7 where srcpart_small_n3.key1 = srcpart_small10.key1 and srcpart_date_n7.ds = srcpart_small_n3.ds; +select count(*) from srcpart_small10, srcpart_small_n3, srcpart_date_n7 where srcpart_small_n3.key1 = srcpart_small10.key1 and srcpart_date_n7.ds = srcpart_small_n3.ds; set hive.tez.dynamic.semijoin.reduction=true; set hive.llap.object.cache.enabled=false; -EXPLAIN select count(*) from srcpart_small10, srcpart_small, srcpart_date where srcpart_small.key1 = srcpart_small10.key1 and srcpart_date.ds = srcpart_small.ds; -select count(*) from srcpart_small10, srcpart_small, srcpart_date where srcpart_small.key1 = srcpart_small10.key1 and srcpart_date.ds = srcpart_small.ds; +EXPLAIN select count(*) from srcpart_small10, srcpart_small_n3, srcpart_date_n7 where srcpart_small_n3.key1 = srcpart_small10.key1 and srcpart_date_n7.ds = srcpart_small_n3.ds; +select count(*) from srcpart_small10, srcpart_small_n3, srcpart_date_n7 where srcpart_small_n3.key1 = srcpart_small10.key1 and srcpart_date_n7.ds = srcpart_small_n3.ds; -- HIVE-17936 set hive.tez.dynamic.semijoin.reduction.for.dpp.factor = 0.75; -EXPLAIN select count(*) from srcpart_small10, srcpart_small, srcpart_date where srcpart_small.key1 = srcpart_small10.key1 and srcpart_date.ds = srcpart_small.ds; +EXPLAIN select count(*) from srcpart_small10, srcpart_small_n3, srcpart_date_n7 where srcpart_small_n3.key1 = srcpart_small10.key1 and srcpart_date_n7.ds = srcpart_small_n3.ds; -- semijoin branch should be removed. set hive.tez.dynamic.semijoin.reduction.for.dpp.factor = 0.4; -EXPLAIN select count(*) from srcpart_small10, srcpart_small, srcpart_date where srcpart_small.key1 = srcpart_small10.key1 and srcpart_date.ds = srcpart_small.ds; +EXPLAIN select count(*) from srcpart_small10, srcpart_small_n3, srcpart_date_n7 where srcpart_small_n3.key1 = srcpart_small10.key1 and srcpart_date_n7.ds = srcpart_small_n3.ds; -- With unions -explain select * from alltypesorc_int join - (select srcpart_date.key as key from srcpart_date +explain select * from alltypesorc_int_n1 join + (select srcpart_date_n7.key as key from srcpart_date_n7 union all - select srcpart_small.key1 as key from srcpart_small) unionsrc on (alltypesorc_int.cstring = unionsrc.key); + select srcpart_small_n3.key1 as key from srcpart_small_n3) unionsrc on (alltypesorc_int_n1.cstring = unionsrc.key); -drop table srcpart_date; -drop table srcpart_small; -drop table alltypesorc_int; +drop table srcpart_date_n7; +drop table srcpart_small_n3; +drop table alltypesorc_int_n1; diff --git a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_2.q b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_2.q index 044ce5a772..20b657e78f 100644 --- a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_2.q +++ b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_2.q @@ -12,14 +12,14 @@ set hive.tez.bigtable.minsize.semijoin.reduction=1; set hive.tez.min.bloom.filter.entries=1; set hive.tez.dynamic.semijoin.reduction.threshold=-999999999999; -CREATE TABLE `table_1`( +CREATE TABLE `table_1_n2`( `bigint_col_7` bigint, `decimal2016_col_26` decimal(20,16), `tinyint_col_3` tinyint, `decimal2612_col_77` decimal(26,12), `timestamp_col_9` timestamp); -CREATE TABLE `table_18`( +CREATE TABLE `table_18_n2`( `tinyint_col_15` tinyint, `decimal2709_col_9` decimal(27,9), `tinyint_col_20` tinyint, @@ -31,29 +31,29 @@ CREATE TABLE `table_18`( EXPLAIN SELECT COUNT(*) -FROM table_1 t1 +FROM table_1_n2 t1 -INNER JOIN table_18 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND +INNER JOIN table_18_n2 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND ((t2.decimal2709_col_9) = (t1.decimal2016_col_26))) AND ((t2.tinyint_col_20) = (t1.tinyint_col_3)) WHERE (t2.smallint_col_19) IN (SELECT COALESCE(-92, -994) AS int_col -FROM table_1 tt1 -INNER JOIN table_18 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) +FROM table_1_n2 tt1 +INNER JOIN table_18_n2 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) WHERE (t1.timestamp_col_9) = (tt2.timestamp_col_18)); -drop table table_1; -drop table table_18; +drop table table_1_n2; +drop table table_18_n2; -- Hive 15699 -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n20(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE src2 as select * from src1; -insert into src2 select * from src2; -insert into src2 select * from src2; +CREATE TABLE src2_n7 as select * from src1; +insert into src2_n7 select * from src2_n7; +insert into src2_n7 select * from src2_n7; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n20 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n20 partition(ds='2008-04-08'); set hive.strict.checks.bucketing=false; set hive.join.emit.interval=2; @@ -61,9 +61,9 @@ set hive.stats.fetch.column.stats=true; set hive.optimize.bucketingsorting=false; set hive.stats.autogather=true; -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n12(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab_n12 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n20; set hive.convert.join.bucket.mapjoin.tez = true; set hive.auto.convert.sortmerge.join = true; @@ -74,20 +74,20 @@ set hive.mapjoin.hybridgrace.minnumpartitions=4; set hive.llap.memory.oversubscription.max.executors.per.query=3; -CREATE TABLE tab2 (key int, value string, ds string); +CREATE TABLE tab2_n6 (key int, value string, ds string); set hive.exec.dynamic.partition.mode=nonstrict -insert into tab2select key, value, ds from tab; -analyze table tab2 compute statistics; -analyze table tab2 compute statistics for columns; +insert into tab2select key, value, ds from tab_n12; +analyze table tab2_n6 compute statistics; +analyze table tab2_n6 compute statistics for columns; explain select count(*) from - (select x.key as key, min(x.value) as value from tab2 x group by x.key) a + (select x.key as key, min(x.value) as value from tab2_n6 x group by x.key) a join - (select x.key as key, min(x.value) as value from tab2 x group by x.key) b + (select x.key as key, min(x.value) as value from tab2_n6 x group by x.key) b on a.key = b.key join src1 c on a.value = c.value where c.key < 0; diff --git a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_4.q b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_4.q index 67bf7c86a1..a04ab666e0 100644 --- a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_4.q +++ b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_4.q @@ -16,17 +16,17 @@ set hive.tez.bloom.filter.factor=1.0f; set hive.disable.unsafe.external.table.operations=true; -- Create Tables -create table srcpart_date (key string, value string) partitioned by (ds string ) stored as ORC; -CREATE TABLE srcpart_small(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; +create table srcpart_date_n1 (key string, value string) partitioned by (ds string ) stored as ORC; +CREATE TABLE srcpart_small_n0(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; create external table srcpart_date_ext (key string, value string) partitioned by (ds string ) stored as ORC; CREATE external TABLE srcpart_small_ext(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; -- Add Partitions -alter table srcpart_date add partition (ds = "2008-04-08"); -alter table srcpart_date add partition (ds = "2008-04-09"); +alter table srcpart_date_n1 add partition (ds = "2008-04-08"); +alter table srcpart_date_n1 add partition (ds = "2008-04-09"); -alter table srcpart_small add partition (ds = "2008-04-08"); -alter table srcpart_small add partition (ds = "2008-04-09"); +alter table srcpart_small_n0 add partition (ds = "2008-04-08"); +alter table srcpart_small_n0 add partition (ds = "2008-04-09"); alter table srcpart_date_ext add partition (ds = "2008-04-08"); alter table srcpart_date_ext add partition (ds = "2008-04-09"); @@ -35,16 +35,16 @@ alter table srcpart_small_ext add partition (ds = "2008-04-08"); alter table srcpart_small_ext add partition (ds = "2008-04-09"); -- Load -insert overwrite table srcpart_date partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; -insert overwrite table srcpart_date partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; -insert overwrite table srcpart_small partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; +insert overwrite table srcpart_date_n1 partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; +insert overwrite table srcpart_date_n1 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; +insert overwrite table srcpart_small_n0 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; insert overwrite table srcpart_date_ext partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; insert overwrite table srcpart_date_ext partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; insert overwrite table srcpart_small_ext partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; -analyze table srcpart_date compute statistics for columns; -analyze table srcpart_small compute statistics for columns; +analyze table srcpart_date_n1 compute statistics for columns; +analyze table srcpart_small_n0 compute statistics for columns; analyze table srcpart_date_ext compute statistics for columns; analyze table srcpart_small_ext compute statistics for columns; @@ -53,13 +53,13 @@ analyze table srcpart_small_ext compute statistics for columns; -- single column, single key set test.comment=This query should use semijoin reduction optimization; set test.comment; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n1 join srcpart_small_n0 on (srcpart_date_n1.key = srcpart_small_n0.key1); set test.comment=Big table is external table - no semijoin reduction opt; set test.comment; -EXPLAIN select count(*) from srcpart_date_ext join srcpart_small on (srcpart_date_ext.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_ext join srcpart_small_n0 on (srcpart_date_ext.key = srcpart_small_n0.key1); set test.comment=Small table is external table - no semijoin reduction opt; set test.comment; -EXPLAIN select count(*) from srcpart_date join srcpart_small_ext on (srcpart_date.key = srcpart_small_ext.key1); +EXPLAIN select count(*) from srcpart_date_n1 join srcpart_small_ext on (srcpart_date_n1.key = srcpart_small_ext.key1); diff --git a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_sw.q b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_sw.q index 3372cee299..e4ca184362 100644 --- a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_sw.q +++ b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_sw.q @@ -15,48 +15,48 @@ set hive.tez.min.bloom.filter.entries=1; set hive.stats.fetch.column.stats=true; -- Create Tables -create table alltypesorc_int ( cint int, cstring string ) stored as ORC; -create table srcpart_date (key string, value string) partitioned by (ds string ) stored as ORC; -CREATE TABLE srcpart_small(key1 STRING, value1 STRING) partitioned by (ds1 string) STORED as ORC; +create table alltypesorc_int_n0 ( cint int, cstring string ) stored as ORC; +create table srcpart_date_n6 (key string, value string) partitioned by (ds string ) stored as ORC; +CREATE TABLE srcpart_small_n2(key1 STRING, value1 STRING) partitioned by (ds1 string) STORED as ORC; -- Add Partitions -alter table srcpart_date add partition (ds = "2008-04-08"); -alter table srcpart_date add partition (ds = "2008-04-09"); +alter table srcpart_date_n6 add partition (ds = "2008-04-08"); +alter table srcpart_date_n6 add partition (ds = "2008-04-09"); -alter table srcpart_small add partition (ds1 = "2008-04-08"); -alter table srcpart_small add partition (ds1 = "2008-04-09"); +alter table srcpart_small_n2 add partition (ds1 = "2008-04-08"); +alter table srcpart_small_n2 add partition (ds1 = "2008-04-09"); -- Load -insert overwrite table alltypesorc_int select cint, cstring1 from alltypesorc; -insert overwrite table srcpart_date partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; -insert overwrite table srcpart_date partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; -insert overwrite table srcpart_small partition (ds1 = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; +insert overwrite table alltypesorc_int_n0 select cint, cstring1 from alltypesorc; +insert overwrite table srcpart_date_n6 partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; +insert overwrite table srcpart_date_n6 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; +insert overwrite table srcpart_small_n2 partition (ds1 = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; set hive.tez.dynamic.semijoin.reduction=false; -analyze table alltypesorc_int compute statistics for columns; -analyze table srcpart_date compute statistics for columns; -analyze table srcpart_small compute statistics for columns; +analyze table alltypesorc_int_n0 compute statistics for columns; +analyze table srcpart_date_n6 compute statistics for columns; +analyze table srcpart_small_n2 compute statistics for columns; set hive.tez.dynamic.semijoin.reduction=true; EXPLAIN SELECT count(*) FROM ( SELECT * - FROM (SELECT * FROM srcpart_date WHERE ds = "2008-04-09") `srcpart_date` - JOIN (SELECT * FROM srcpart_small WHERE ds1 = "2008-04-08") `srcpart_small` - ON (srcpart_date.key = srcpart_small.key1) - JOIN alltypesorc_int - ON (srcpart_small.key1 = alltypesorc_int.cstring)) a + FROM (SELECT * FROM srcpart_date_n6 WHERE ds = "2008-04-09") `srcpart_date_n6` + JOIN (SELECT * FROM srcpart_small_n2 WHERE ds1 = "2008-04-08") `srcpart_small_n2` + ON (srcpart_date_n6.key = srcpart_small_n2.key1) + JOIN alltypesorc_int_n0 + ON (srcpart_small_n2.key1 = alltypesorc_int_n0.cstring)) a JOIN ( SELECT * - FROM (SELECT * FROM srcpart_date WHERE ds = "2008-04-08") `srcpart_date` - JOIN (SELECT * FROM srcpart_small WHERE ds1 = "2008-04-08") `srcpart_small` - ON (srcpart_date.key = srcpart_small.key1) - JOIN alltypesorc_int - ON (srcpart_small.key1 = alltypesorc_int.cstring)) b + FROM (SELECT * FROM srcpart_date_n6 WHERE ds = "2008-04-08") `srcpart_date_n6` + JOIN (SELECT * FROM srcpart_small_n2 WHERE ds1 = "2008-04-08") `srcpart_small_n2` + ON (srcpart_date_n6.key = srcpart_small_n2.key1) + JOIN alltypesorc_int_n0 + ON (srcpart_small_n2.key1 = alltypesorc_int_n0.cstring)) b ON ('1' = '1'); -drop table srcpart_date; -drop table srcpart_small; -drop table alltypesorc_int; +drop table srcpart_date_n6; +drop table srcpart_small_n2; +drop table alltypesorc_int_n0; diff --git a/ql/src/test/queries/clientpositive/dynamic_semijoin_user_level.q b/ql/src/test/queries/clientpositive/dynamic_semijoin_user_level.q index e9d0109306..5e3a1b84fb 100644 --- a/ql/src/test/queries/clientpositive/dynamic_semijoin_user_level.q +++ b/ql/src/test/queries/clientpositive/dynamic_semijoin_user_level.q @@ -17,65 +17,65 @@ set hive.stats.fetch.column.stats=true; set hive.llap.memory.oversubscription.max.executors.per.query=0; -- Create Tables -create table alltypesorc_int ( cint int, cstring string ) stored as ORC; -create table srcpart_date (key string, value string) partitioned by (ds string ) stored as ORC; -CREATE TABLE srcpart_small(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; +create table alltypesorc_int_n2 ( cint int, cstring string ) stored as ORC; +create table srcpart_date_n9 (key string, value string) partitioned by (ds string ) stored as ORC; +CREATE TABLE srcpart_small_n4(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; -- Add Partitions -alter table srcpart_date add partition (ds = "2008-04-08"); -alter table srcpart_date add partition (ds = "2008-04-09"); +alter table srcpart_date_n9 add partition (ds = "2008-04-08"); +alter table srcpart_date_n9 add partition (ds = "2008-04-09"); -alter table srcpart_small add partition (ds = "2008-04-08"); -alter table srcpart_small add partition (ds = "2008-04-09"); +alter table srcpart_small_n4 add partition (ds = "2008-04-08"); +alter table srcpart_small_n4 add partition (ds = "2008-04-09"); -- Load -insert overwrite table alltypesorc_int select cint, cstring1 from alltypesorc; -insert overwrite table srcpart_date partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; -insert overwrite table srcpart_date partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; -insert overwrite table srcpart_small partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; +insert overwrite table alltypesorc_int_n2 select cint, cstring1 from alltypesorc; +insert overwrite table srcpart_date_n9 partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; +insert overwrite table srcpart_date_n9 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; +insert overwrite table srcpart_small_n4 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; set hive.tez.dynamic.semijoin.reduction=false; -analyze table alltypesorc_int compute statistics for columns; -analyze table srcpart_date compute statistics for columns; -analyze table srcpart_small compute statistics for columns; +analyze table alltypesorc_int_n2 compute statistics for columns; +analyze table srcpart_date_n9 compute statistics for columns; +analyze table srcpart_small_n4 compute statistics for columns; -- single column, single key set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1); set hive.tez.dynamic.semijoin.reduction=true; -- Mix dynamic partition pruning(DPP) and min/max bloom filter optimizations. Should pick the DPP. -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.ds); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.ds); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.ds); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.ds); set hive.tez.dynamic.semijoin.reduction=false; --multiple sources, single key -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_small_n4.key1 = alltypesorc_int_n2.cstring); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_small_n4.key1 = alltypesorc_int_n2.cstring); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_small.key1 = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_small_n4.key1 = alltypesorc_int_n2.cstring); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_small_n4.key1 = alltypesorc_int_n2.cstring); set hive.tez.dynamic.semijoin.reduction=false; -- single source, multiple keys -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1 and srcpart_date_n9.value = srcpart_small_n4.value1); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1 and srcpart_date_n9.value = srcpart_small_n4.value1); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1 and srcpart_date.value = srcpart_small.value1); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1 and srcpart_date_n9.value = srcpart_small_n4.value1); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1 and srcpart_date_n9.value = srcpart_small_n4.value1); set hive.tez.dynamic.semijoin.reduction=false; -- multiple sources, different keys -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); -- Explain extended to verify fast start for Reducer in semijoin branch -EXPLAIN extended select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN extended select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1); set hive.tez.dynamic.semijoin.reduction=false; -- With Mapjoins. @@ -83,28 +83,28 @@ set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=100000000000; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1); set hive.tez.dynamic.semijoin.reduction=false; -- multiple sources, different keys -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); set hive.tez.dynamic.semijoin.reduction=true; -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); -select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1) join alltypesorc_int on (srcpart_date.value = alltypesorc_int.cstring); +EXPLAIN select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); +select count(*) from srcpart_date_n9 join srcpart_small_n4 on (srcpart_date_n9.key = srcpart_small_n4.key1) join alltypesorc_int_n2 on (srcpart_date_n9.value = alltypesorc_int_n2.cstring); --set hive.tez.dynamic.semijoin.reduction=false; -- With unions -explain select * from alltypesorc_int join - (select srcpart_date.key as key from srcpart_date +explain select * from alltypesorc_int_n2 join + (select srcpart_date_n9.key as key from srcpart_date_n9 union all - select srcpart_small.key1 as key from srcpart_small) unionsrc on (alltypesorc_int.cstring = unionsrc.key); + select srcpart_small_n4.key1 as key from srcpart_small_n4) unionsrc on (alltypesorc_int_n2.cstring = unionsrc.key); -drop table srcpart_date; -drop table srcpart_small; -drop table alltypesorc_int; +drop table srcpart_date_n9; +drop table srcpart_small_n4; +drop table alltypesorc_int_n2; diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q b/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q index af9853cb84..f2f36d1b6d 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q @@ -19,9 +19,9 @@ set hive.exec.dynamic.partition.mode=nonstrict; -drop table t1; +drop table t1_n147; -create table t1( +create table t1_n147( a string, b int, c int, @@ -30,18 +30,18 @@ partitioned by (e string) clustered by(a) sorted by(a desc) into 10 buckets stored as textfile; -insert overwrite table t1 partition(e) select a,b,c,d,'epart' from t1_staging; +insert overwrite table t1_n147 partition(e) select a,b,c,d,'epart' from t1_staging; select 'bucket_0'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000000_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000000_0; select 'bucket_2'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000002_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000002_0; select 'bucket_4'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000004_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000004_0; select 'bucket_6'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000006_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000006_0; select 'bucket_8'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000008_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000008_0; set hive.optimize.sort.dynamic.partition=false; set hive.exec.dynamic.partition.mode=nonstrict; @@ -49,9 +49,9 @@ set hive.exec.dynamic.partition.mode=nonstrict; -- disable sorted dynamic partition optimization to make sure the results are correct -drop table t1; +drop table t1_n147; -create table t1( +create table t1_n147( a string, b int, c int, @@ -60,15 +60,15 @@ partitioned by (e string) clustered by(a) sorted by(a desc) into 10 buckets stored as textfile; -insert overwrite table t1 partition(e) select a,b,c,d,'epart' from t1_staging; +insert overwrite table t1_n147 partition(e) select a,b,c,d,'epart' from t1_staging; select 'bucket_0'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000000_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000000_0; select 'bucket_2'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000002_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000002_0; select 'bucket_4'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000004_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000004_0; select 'bucket_6'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000006_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000006_0; select 'bucket_8'; -dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000008_0; +dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1_n147/e=epart/000008_0; diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q index 6a90f830ba..435cdaddd0 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q @@ -10,7 +10,7 @@ set hive.vectorized.execution.enabled=true; -create table over1k( +create table over1k_n1( t tinyint, si smallint, i int, @@ -25,11 +25,11 @@ create table over1k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over1k' into table over1k; +load data local inpath '../../data/files/over1k' into table over1k_n1; -create table over1k_orc like over1k; +create table over1k_orc like over1k_n1; alter table over1k_orc set fileformat orc; -insert overwrite table over1k_orc select * from over1k; +insert overwrite table over1k_orc select * from over1k_n1; create table over1k_part_orc( si smallint, diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q index 3f35b81a77..436c0edc77 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q @@ -10,7 +10,7 @@ set hive.exec.dynamic.partition.mode=nonstrict; -create table over1k( +create table over1k_n3( t tinyint, si smallint, i int, @@ -25,7 +25,7 @@ create table over1k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over1k' into table over1k; +load data local inpath '../../data/files/over1k' into table over1k_n3; create table over1k_part( si smallint, @@ -54,29 +54,29 @@ create table over1k_part_buck_sort( sorted by (f) into 4 buckets; -- map-only jobs converted to map-reduce job by hive.optimize.sort.dynamic.partition optimization -explain insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27; -explain insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10; -explain insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27; -explain insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27; +explain insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +explain insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27 limit 10; +explain insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +explain insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; -insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27; -insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10; -insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27; -insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27; +insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27 limit 10; +insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; -- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization -explain insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27; -explain insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10; -explain insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27; -explain insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27; +explain insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +explain insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27 limit 10; +explain insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +explain insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; -insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27; -insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10; -insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27; -insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27; +insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k_n3 where t is null or t=27 limit 10; +insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; +insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; desc formatted over1k_part partition(ds="foo",t=27); desc formatted over1k_part partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__"); @@ -101,19 +101,19 @@ create table over1k_part2( partitioned by (ds string, t tinyint); set hive.optimize.sort.dynamic.partition=false; -explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i; +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k_n3 where t is null or t=27 order by i; set hive.optimize.sort.dynamic.partition=true; -explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i; -explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27; +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k_n3 where t is null or t=27 order by i; +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k_n3 order by i limit 10) tmp where t is null or t=27; set hive.optimize.sort.dynamic.partition=false; -explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t; +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k_n3 where t is null or t=27 group by si,i,b,f,t; set hive.optimize.sort.dynamic.partition=true; -- tests for HIVE-8162, only partition column 't' should be in last RS operator -explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t; +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k_n3 where t is null or t=27 group by si,i,b,f,t; set hive.optimize.sort.dynamic.partition=false; -insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i; +insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k_n3 where t is null or t=27 order by i; desc formatted over1k_part2 partition(ds="foo",t=27); desc formatted over1k_part2 partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__"); @@ -123,7 +123,7 @@ select * from over1k_part2; select count(*) from over1k_part2; set hive.optimize.sort.dynamic.partition=true; -insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i; +insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k_n3 where t is null or t=27 order by i; desc formatted over1k_part2 partition(ds="foo",t=27); desc formatted over1k_part2 partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__"); @@ -145,12 +145,12 @@ create table over1k_part_buck_sort2( sorted by (f) into 1 buckets; set hive.optimize.sort.dynamic.partition=false; -explain insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k where t is null or t=27; +explain insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; set hive.optimize.sort.dynamic.partition=true; -explain insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k where t is null or t=27; +explain insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; set hive.optimize.sort.dynamic.partition=false; -insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k where t is null or t=27; +insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; desc formatted over1k_part_buck_sort2 partition(t=27); desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__"); @@ -159,7 +159,7 @@ select * from over1k_part_buck_sort2; select count(*) from over1k_part_buck_sort2; set hive.optimize.sort.dynamic.partition=true; -insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k where t is null or t=27; +insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k_n3 where t is null or t=27; desc formatted over1k_part_buck_sort2 partition(t=27); desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__"); @@ -174,21 +174,21 @@ create table over1k_part3( partitioned by (s string, t tinyint, i int); set hive.optimize.sort.dynamic.partition=true; -explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where s="foo"; -explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where t=27; -explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100; -explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and t=27; -explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and s="foo"; -explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where t=27 and s="foo"; -explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and t=27 and s="foo"; - -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where s="foo"; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where t=27; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and t=27; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and s="foo"; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where t=27 and s="foo"; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and t=27 and s="foo"; +explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where s="foo"; +explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where t=27; +explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100; +explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and t=27; +explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and s="foo"; +explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where t=27 and s="foo"; +explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and t=27 and s="foo"; + +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where s="foo"; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where t=27; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and t=27; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and s="foo"; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where t=27 and s="foo"; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and t=27 and s="foo"; select sum(hash(*)) from over1k_part3; @@ -200,12 +200,12 @@ create table over1k_part3( f float) partitioned by (s string, t tinyint, i int); set hive.optimize.sort.dynamic.partition=false; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where s="foo"; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where t=27; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and t=27; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and s="foo"; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where t=27 and s="foo"; -insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and t=27 and s="foo"; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where s="foo"; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where t=27; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and t=27; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and s="foo"; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where t=27 and s="foo"; +insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k_n3 where i=100 and t=27 and s="foo"; select sum(hash(*)) from over1k_part3; diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q index 9a52fb4f71..6dfb51a7da 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q @@ -11,12 +11,12 @@ set hive.exec.dynamic.partition.mode=nonstrict; -- SORT_QUERY_RESULTS -drop table ss; +drop table ss_n0; drop table ss_orc; drop table ss_part; drop table ss_part_orc; -create table ss ( +create table ss_n0 ( ss_sold_date_sk int, ss_net_paid_inc_tax float, ss_net_profit float); @@ -26,13 +26,13 @@ ss_net_paid_inc_tax float, ss_net_profit float) partitioned by (ss_sold_date_sk int); -load data local inpath '../../data/files/dynpart_test.txt' overwrite into table ss; +load data local inpath '../../data/files/dynpart_test.txt' overwrite into table ss_n0; explain insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 group by ss_sold_date_sk, ss_net_paid_inc_tax, @@ -43,7 +43,7 @@ insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 group by ss_sold_date_sk, ss_net_paid_inc_tax, @@ -60,7 +60,7 @@ explain insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 distribute by ss_sold_date_sk; @@ -68,7 +68,7 @@ insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 distribute by ss_sold_date_sk; @@ -85,7 +85,7 @@ explain insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 group by ss_sold_date_sk, ss_net_paid_inc_tax, @@ -96,7 +96,7 @@ insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 group by ss_sold_date_sk, ss_net_paid_inc_tax, @@ -113,7 +113,7 @@ explain insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 distribute by ss_sold_date_sk; @@ -121,7 +121,7 @@ insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk - from ss + from ss_n0 where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 distribute by ss_sold_date_sk; @@ -144,9 +144,9 @@ ss_net_paid_inc_tax float, ss_net_profit float) partitioned by (ss_sold_date_sk int) stored as orc; -insert overwrite table ss_orc select * from ss; +insert overwrite table ss_orc select * from ss_n0; -drop table ss; +drop table ss_n0; drop table ss_part; explain insert overwrite table ss_part_orc partition (ss_sold_date_sk) diff --git a/ql/src/test/queries/clientpositive/empty_join.q b/ql/src/test/queries/clientpositive/empty_join.q index 088b66a75b..cac2a6a2b2 100644 --- a/ql/src/test/queries/clientpositive/empty_join.q +++ b/ql/src/test/queries/clientpositive/empty_join.q @@ -2,23 +2,23 @@ set hive.auto.convert.join=true; set hive.mapjoin.hybridgrace.hashtable=false; set hive.explain.user=true; -DROP TABLE IF EXISTS test_1; -CREATE TABLE test_1 AS SELECT 1 AS id; +DROP TABLE IF EXISTS test_1_n2; +CREATE TABLE test_1_n2 AS SELECT 1 AS id; -DROP TABLE IF EXISTS test_2; -CREATE TABLE test_2 (id INT); +DROP TABLE IF EXISTS test_2_n2; +CREATE TABLE test_2_n2 (id INT); -DROP TABLE IF EXISTS test_3; -CREATE TABLE test_3 AS SELECT 1 AS id; +DROP TABLE IF EXISTS test_3_n0; +CREATE TABLE test_3_n0 AS SELECT 1 AS id; explain SELECT t1.id, t2.id, t3.id -FROM test_1 t1 -LEFT JOIN test_2 t2 ON t1.id = t2.id -INNER JOIN test_3 t3 ON t1.id = t3.id; +FROM test_1_n2 t1 +LEFT JOIN test_2_n2 t2 ON t1.id = t2.id +INNER JOIN test_3_n0 t3 ON t1.id = t3.id; SELECT t1.id, t2.id, t3.id -FROM test_1 t1 -LEFT JOIN test_2 t2 ON t1.id = t2.id -INNER JOIN test_3 t3 ON t1.id = t3.id +FROM test_1_n2 t1 +LEFT JOIN test_2_n2 t2 ON t1.id = t2.id +INNER JOIN test_3_n0 t3 ON t1.id = t3.id ; diff --git a/ql/src/test/queries/clientpositive/encryption_auto_purge_tables.q b/ql/src/test/queries/clientpositive/encryption_auto_purge_tables.q index b095557cf3..14c7f7eb96 100644 --- a/ql/src/test/queries/clientpositive/encryption_auto_purge_tables.q +++ b/ql/src/test/queries/clientpositive/encryption_auto_purge_tables.q @@ -3,37 +3,37 @@ -- we're setting this so that TestNegaiveCliDriver.vm doesn't stop processing after DROP TABLE fails; -DROP TABLE IF EXISTS encrypted_table PURGE; -DROP TABLE IF EXISTS encrypted_ext_table PURGE; +DROP TABLE IF EXISTS encrypted_table_n5 PURGE; +DROP TABLE IF EXISTS encrypted_ext_table_n0 PURGE; -CREATE TABLE encrypted_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; +CREATE TABLE encrypted_table_n5 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; CRYPTO CREATE_KEY --keyName key_128 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table; SHOW TABLES LIKE "encrypted_*"; -ALTER TABLE encrypted_table SET TBLPROPERTIES("auto.purge"="true"); +ALTER TABLE encrypted_table_n5 SET TBLPROPERTIES("auto.purge"="true"); -INSERT OVERWRITE TABLE encrypted_table SELECT * FROM src; -SELECT COUNT(*) from encrypted_table; +INSERT OVERWRITE TABLE encrypted_table_n5 SELECT * FROM src; +SELECT COUNT(*) from encrypted_table_n5; -TRUNCATE TABLE encrypted_table; -SELECT COUNT(*) FROM encrypted_table; +TRUNCATE TABLE encrypted_table_n5; +SELECT COUNT(*) FROM encrypted_table_n5; -INSERT OVERWRITE TABLE encrypted_table SELECT * FROM src; -SELECT COUNT(*) FROM encrypted_table; +INSERT OVERWRITE TABLE encrypted_table_n5 SELECT * FROM src; +SELECT COUNT(*) FROM encrypted_table_n5; -CREATE EXTERNAL TABLE encrypted_ext_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; -ALTER TABLE encrypted_ext_table SET TBLPROPERTIES("auto.purge"="true"); +CREATE EXTERNAL TABLE encrypted_ext_table_n0 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; +ALTER TABLE encrypted_ext_table_n0 SET TBLPROPERTIES("auto.purge"="true"); -INSERT OVERWRITE TABLE encrypted_ext_table SELECT * FROM src; -SELECT COUNT(*) from encrypted_ext_table; +INSERT OVERWRITE TABLE encrypted_ext_table_n0 SELECT * FROM src; +SELECT COUNT(*) from encrypted_ext_table_n0; -DROP TABLE encrypted_table; -DROP TABLE encrypted_ext_table; +DROP TABLE encrypted_table_n5; +DROP TABLE encrypted_ext_table_n0; SHOW TABLES LIKE "encrypted_*"; -- cleanup -DROP TABLE IF EXISTS encrypted_table PURGE; -DROP TABLE IF EXISTS encrypted_ext_table PURGE; +DROP TABLE IF EXISTS encrypted_table_n5 PURGE; +DROP TABLE IF EXISTS encrypted_ext_table_n0 PURGE; CRYPTO DELETE_KEY --keyName key_128; diff --git a/ql/src/test/queries/clientpositive/encryption_drop_table.q b/ql/src/test/queries/clientpositive/encryption_drop_table.q index 2212e7d857..884e510a92 100644 --- a/ql/src/test/queries/clientpositive/encryption_drop_table.q +++ b/ql/src/test/queries/clientpositive/encryption_drop_table.q @@ -5,14 +5,14 @@ set hive.cli.errors.ignore=true; -DROP TABLE IF EXISTS encrypted_table; +DROP TABLE IF EXISTS encrypted_table_n2; DROP TABLE IF EXISTS encrypted_ext_table; -CREATE TABLE encrypted_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; +CREATE TABLE encrypted_table_n2 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; CRYPTO CREATE_KEY --keyName key_128 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table; -INSERT OVERWRITE TABLE encrypted_table SELECT * FROM src; +INSERT OVERWRITE TABLE encrypted_table_n2 SELECT * FROM src; CREATE EXTERNAL TABLE encrypted_ext_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; SHOW TABLES LIKE "encrypted_*"; @@ -20,7 +20,7 @@ SHOW TABLES LIKE "encrypted_*"; DROP TABLE default.encrypted_ext_table; SHOW TABLES LIKE "encrypted_*"; -DROP TABLE default.encrypted_table; +DROP TABLE default.encrypted_table_n2; SHOW TABLES LIKE "encrypted_*"; DROP TABLE IF EXISTS encrypted_table1; diff --git a/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q b/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q index 6cfb3826ef..b5ea78bdbd 100644 --- a/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q +++ b/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q @@ -8,40 +8,40 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -- SORT_QUERY_RESULTS -- init -drop table IF EXISTS encryptedTable PURGE; -drop table IF EXISTS unencryptedTable PURGE; +drop table IF EXISTS encryptedTable_n0 PURGE; +drop table IF EXISTS unencryptedTable_n0 PURGE; -create table encryptedTable(value string) +create table encryptedTable_n0(value string) partitioned by (key string) clustered by (value) into 2 buckets stored as orc LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encryptedTable' TBLPROPERTIES ('transactional'='true'); CRYPTO CREATE_KEY --keyName key_1 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_1 --path ${hiveconf:hive.metastore.warehouse.dir}/encryptedTable; -create table unencryptedTable(value string) +create table unencryptedTable_n0(value string) partitioned by (key string) clustered by (value) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -- insert encrypted table from values -insert into table encryptedTable partition (key) values +insert into table encryptedTable_n0 partition (key) values ('val_501', '501'), ('val_502', '502'); -select * from encryptedTable order by key; +select * from encryptedTable_n0 order by key; -- insert encrypted table from unencrypted source from src -insert into table encryptedTable partition (key) +insert into table encryptedTable_n0 partition (key) select value, key limit 2; -select * from encryptedTable order by key; +select * from encryptedTable_n0 order by key; -- insert unencrypted table from encrypted source -from encryptedTable -insert into table unencryptedTable partition (key) +from encryptedTable_n0 +insert into table unencryptedTable_n0 partition (key) select value, key; -select * from unencryptedTable order by key; +select * from unencryptedTable_n0 order by key; -- clean up -drop table encryptedTable PURGE; +drop table encryptedTable_n0 PURGE; CRYPTO DELETE_KEY --keyName key_1; -drop table unencryptedTable PURGE; +drop table unencryptedTable_n0 PURGE; diff --git a/ql/src/test/queries/clientpositive/encryption_insert_values.q b/ql/src/test/queries/clientpositive/encryption_insert_values.q index a712f35f3c..242515e705 100644 --- a/ql/src/test/queries/clientpositive/encryption_insert_values.q +++ b/ql/src/test/queries/clientpositive/encryption_insert_values.q @@ -1,13 +1,13 @@ -- SORT_QUERY_RESULTS; set hive.stats.column.autogather=false; -DROP TABLE IF EXISTS encrypted_table PURGE; -CREATE TABLE encrypted_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; +DROP TABLE IF EXISTS encrypted_table_n3 PURGE; +CREATE TABLE encrypted_table_n3 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; CRYPTO CREATE_KEY --keyName key_128 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table; -INSERT INTO encrypted_table values(1,'foo'),(2,'bar'); +INSERT INTO encrypted_table_n3 values(1,'foo'),(2,'bar'); -select * from encrypted_table; +select * from encrypted_table_n3; CRYPTO DELETE_KEY --keyName key_128; diff --git a/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q b/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q index a96807e2f0..834bfbcc2b 100644 --- a/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q +++ b/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q @@ -1,20 +1,20 @@ -DROP TABLE IF EXISTS encrypted_table PURGE; +DROP TABLE IF EXISTS encrypted_table_n0 PURGE; -CREATE TABLE encrypted_table (key STRING, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encrypted_table'; +CREATE TABLE encrypted_table_n0 (key STRING, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encrypted_table'; -- Create encryption key and zone; crypto create_key --keyName key1; crypto create_zone --keyName key1 --path ${hiveconf:hive.metastore.warehouse.dir}/encrypted_table; -- Test loading data from the local filesystem; -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE encrypted_table; -SELECT * FROM encrypted_table; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE encrypted_table_n0; +SELECT * FROM encrypted_table_n0; -- Test loading data from the hdfs filesystem; dfs -copyFromLocal ../../data/files/kv1.txt hdfs:///tmp/kv1.txt; -LOAD DATA INPATH '/tmp/kv1.txt' OVERWRITE INTO TABLE encrypted_table; -SELECT * FROM encrypted_table; +LOAD DATA INPATH '/tmp/kv1.txt' OVERWRITE INTO TABLE encrypted_table_n0; +SELECT * FROM encrypted_table_n0; -DROP TABLE encrypted_table PURGE; +DROP TABLE encrypted_table_n0 PURGE; crypto delete_key --keyName key1; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/encryption_move_tbl.q b/ql/src/test/queries/clientpositive/encryption_move_tbl.q index ff1d57ccb0..b63607641a 100644 --- a/ql/src/test/queries/clientpositive/encryption_move_tbl.q +++ b/ql/src/test/queries/clientpositive/encryption_move_tbl.q @@ -6,11 +6,11 @@ set hive.stats.column.autogather=false; set hive.cli.errors.ignore=true; -DROP TABLE IF EXISTS encrypted_table PURGE; +DROP TABLE IF EXISTS encrypted_table_n1 PURGE; DROP DATABASE IF EXISTS encrypted_db; --- create table default.encrypted_table in its default warehouse location ${hiveconf:hive.metastore.warehouse.dir}/encrypted_table -CREATE TABLE encrypted_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encrypted_table'; +-- create table default.encrypted_table_n1 in its default warehouse location ${hiveconf:hive.metastore.warehouse.dir}/encrypted_table +CREATE TABLE encrypted_table_n1 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encrypted_table'; CRYPTO CREATE_KEY --keyName key_128 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/encrypted_table; @@ -19,20 +19,20 @@ CREATE DATABASE encrypted_db LOCATION '${hiveconf:hive.metastore.warehouse.dir}/ CRYPTO CREATE_KEY --keyName key_128_2 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_128_2 --path ${hiveconf:hive.metastore.warehouse.dir}/encrypted_db.db; -INSERT OVERWRITE TABLE encrypted_table SELECT * FROM src; +INSERT OVERWRITE TABLE encrypted_table_n1 SELECT * FROM src; SHOW TABLES LIKE "encrypted_*"; -ANALYZE TABLE encrypted_table COMPUTE STATISTICS FOR COLUMNS; -DESCRIBE FORMATTED encrypted_table key; -DESCRIBE FORMATTED encrypted_table value; +ANALYZE TABLE encrypted_table_n1 COMPUTE STATISTICS FOR COLUMNS; +DESCRIBE FORMATTED encrypted_table_n1 key; +DESCRIBE FORMATTED encrypted_table_n1 value; -- should fail, since they are in different encryption zones, but table columns statistics should not change -ALTER TABLE default.encrypted_table RENAME TO encrypted_db.encrypted_table_2; +ALTER TABLE default.encrypted_table_n1 RENAME TO encrypted_db.encrypted_table_2; SHOW TABLES; -DESCRIBE FORMATTED encrypted_table key; -DESCRIBE FORMATTED encrypted_table value; +DESCRIBE FORMATTED encrypted_table_n1 key; +DESCRIBE FORMATTED encrypted_table_n1 value; -- should succeed in Hadoop 2.7 but fail in 2.6 (HDFS-7530) -ALTER TABLE default.encrypted_table RENAME TO default.plain_table; +ALTER TABLE default.encrypted_table_n1 RENAME TO default.plain_table; SHOW TABLES; -- create table encrypted_table_outloc under default database but in a specified location other than the default db location in the warehouse @@ -49,20 +49,20 @@ CRYPTO CREATE_KEY --keyName key_128_4 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_128_4 --path ${hiveconf:hive.metastore.warehouse.dir}/../specified_db_location; USE encrypted_db_outloc; -CREATE TABLE encrypted_table (key INT, value STRING); -INSERT OVERWRITE TABLE encrypted_table SELECT * FROM default.src; -ALTER TABLE encrypted_table RENAME TO renamed_encrypted_table; +CREATE TABLE encrypted_table_n1 (key INT, value STRING); +INSERT OVERWRITE TABLE encrypted_table_n1 SELECT * FROM default.src; +ALTER TABLE encrypted_table_n1 RENAME TO renamed_encrypted_table_n1; -- should succeed since data moves within specified_db_location SHOW TABLES; -- should fail, since they are in different encryption zones -ALTER TABLE encrypted_db_outloc.renamed_encrypted_table RENAME TO default.plain_table_2; +ALTER TABLE encrypted_db_outloc.renamed_encrypted_table_n1 RENAME TO default.plain_table_2; SHOW TABLES; -DROP TABLE default.encrypted_table PURGE; +DROP TABLE default.encrypted_table_n1 PURGE; DROP TABLE default.plain_table PURGE; DROP TABLE default.renamed_encrypted_table_outloc PURGE; DROP DATABASE encrypted_db; -DROP TABLE encrypted_db_outloc.renamed_encrypted_table PURGE; +DROP TABLE encrypted_db_outloc.renamed_encrypted_table_n1 PURGE; DROP DATABASE encrypted_db_outloc; CRYPTO DELETE_KEY --keyName key_128; CRYPTO DELETE_KEY --keyName key_128_2; diff --git a/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q b/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q index c9ac035c49..94052a5c3a 100644 --- a/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q +++ b/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q @@ -1,16 +1,16 @@ -- SORT_QUERY_RESULTS -DROP TABLE IF EXISTS encrypted_table PURGE; -CREATE TABLE encrypted_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; +DROP TABLE IF EXISTS encrypted_table_n4 PURGE; +CREATE TABLE encrypted_table_n4 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; CRYPTO CREATE_KEY --keyName key_128 --bitLength 128; CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table; -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE encrypted_table; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE encrypted_table_n4; dfs -chmod -R 555 ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table; -SELECT count(*) FROM encrypted_table; +SELECT count(*) FROM encrypted_table_n4; -drop table encrypted_table PURGE; +drop table encrypted_table_n4 PURGE; CRYPTO DELETE_KEY --keyName key_128; diff --git a/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q b/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q index d29f213dce..c6eba88ac8 100644 --- a/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q +++ b/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q @@ -1,109 +1,109 @@ --! qt:dataset:src -- SIMPLE TABLE -- create table with first and last column with not null -CREATE TABLE table1 (a STRING NOT NULL ENFORCED, b STRING, c STRING NOT NULL ENFORCED); +CREATE TABLE table1_n7 (a STRING NOT NULL ENFORCED, b STRING, c STRING NOT NULL ENFORCED); -- insert value tuples -explain INSERT INTO table1 values('not', 'null', 'constraint'); -INSERT INTO table1 values('not', 'null', 'constraint'); -SELECT * FROM table1; +explain INSERT INTO table1_n7 values('not', 'null', 'constraint'); +INSERT INTO table1_n7 values('not', 'null', 'constraint'); +SELECT * FROM table1_n7; -- insert with column specified -explain insert into table1(a,c) values('1','2'); -insert into table1(a,c) values('1','2'); +explain insert into table1_n7(a,c) values('1','2'); +insert into table1_n7(a,c) values('1','2'); -- insert from select -explain INSERT INTO table1 select key, src.value, value from src; -INSERT INTO table1 select key, src.value, value from src; -SELECT * FROM table1; +explain INSERT INTO table1_n7 select key, src.value, value from src; +INSERT INTO table1_n7 select key, src.value, value from src; +SELECT * FROM table1_n7; -- insert overwrite -explain INSERT OVERWRITE TABLE table1 select src.*, value from src; -INSERT OVERWRITE TABLE table1 select src.*, value from src; -SELECT * FROM table1; +explain INSERT OVERWRITE TABLE table1_n7 select src.*, value from src; +INSERT OVERWRITE TABLE table1_n7 select src.*, value from src; +SELECT * FROM table1_n7; -- insert overwrite with if not exists -explain INSERT OVERWRITE TABLE table1 if not exists select src.key, src.key, src.value from src; -INSERT OVERWRITE TABLE table1 if not exists select src.key, src.key, src.value from src; -SELECT * FROM table1; +explain INSERT OVERWRITE TABLE table1_n7 if not exists select src.key, src.key, src.value from src; +INSERT OVERWRITE TABLE table1_n7 if not exists select src.key, src.key, src.value from src; +SELECT * FROM table1_n7; -DROP TABLE table1; +DROP TABLE table1_n7; -- multi insert -create table src_multi1 (a STRING NOT NULL ENFORCED, b STRING); -create table src_multi2 (i STRING, j STRING NOT NULL ENABLE); +create table src_multi1_n0 (a STRING NOT NULL ENFORCED, b STRING); +create table src_multi2_n1 (i STRING, j STRING NOT NULL ENABLE); explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n0 select * where key < 10 +insert overwrite table src_multi2_n1 select * where key > 10 and key < 20; from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n0 select * where key < 10 +insert overwrite table src_multi2_n1 select * where key > 10 and key < 20; explain from src -insert into table src_multi1 select * where src.key < 10 -insert into table src_multi2 select src.* where key > 10 and key < 20; +insert into table src_multi1_n0 select * where src.key < 10 +insert into table src_multi2_n1 select src.* where key > 10 and key < 20; from src -insert into table src_multi1 select * where src.key < 10 -insert into table src_multi2 select src.* where key > 10 and key < 20; +insert into table src_multi1_n0 select * where src.key < 10 +insert into table src_multi2_n1 select src.* where key > 10 and key < 20; -- ACID TABLE set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -- SORT_QUERY_RESULTS -create table acid_uami(i int, +create table acid_uami_n1(i int, de decimal(5,2) constraint nn1 not null enforced, vc varchar(128) constraint nn2 not null enforced) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -- insert into values -explain insert into table acid_uami values +explain insert into table acid_uami_n1 values (1, 109.23, 'mary had a little lamb'), (6553, 923.19, 'its fleece was white as snow'); -insert into table acid_uami values +insert into table acid_uami_n1 values (1, 109.23, 'mary had a little lamb'), (6553, 923.19, 'its fleece was white as snow'); -select * from acid_uami; +select * from acid_uami_n1; --insert into select -explain insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src; -insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src; +explain insert into table acid_uami_n1 select cast(key as int), cast (key as decimal(5,2)), value from src; +insert into table acid_uami_n1 select cast(key as int), cast (key as decimal(5,2)), value from src; -- select with limit -explain insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src limit 2; +explain insert into table acid_uami_n1 select cast(key as int), cast (key as decimal(5,2)), value from src limit 2; -- select with order by -explain insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src +explain insert into table acid_uami_n1 select cast(key as int), cast (key as decimal(5,2)), value from src order by key limit 2; -- select with group by -explain insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src +explain insert into table acid_uami_n1 select cast(key as int), cast (key as decimal(5,2)), value from src group by key, value order by key limit 2; --overwrite -explain insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src; -insert into table acid_uami select cast(key as int), cast (key as decimal(5,2)), value from src; +explain insert into table acid_uami_n1 select cast(key as int), cast (key as decimal(5,2)), value from src; +insert into table acid_uami_n1 select cast(key as int), cast (key as decimal(5,2)), value from src; -- update -explain update acid_uami set de = 3.14 where de = 109.23 or de = 119.23; -update acid_uami set de = 3.14 where de = 109.23 or de = 119.23; +explain update acid_uami_n1 set de = 3.14 where de = 109.23 or de = 119.23; +update acid_uami_n1 set de = 3.14 where de = 109.23 or de = 119.23; -ALTER table acid_uami drop constraint nn1; -ALTER table acid_uami CHANGE i i int constraint nn0 not null enforced; +ALTER table acid_uami_n1 drop constraint nn1; +ALTER table acid_uami_n1 CHANGE i i int constraint nn0 not null enforced; -explain update acid_uami set de = 3.14159 where de = 3.14 ; -update acid_uami set de = 3.14159 where de = 3.14 ; +explain update acid_uami_n1 set de = 3.14159 where de = 3.14 ; +update acid_uami_n1 set de = 3.14159 where de = 3.14 ; -- multi insert explain from src -insert overwrite table acid_uami select cast(key as int), cast(key as decimal(5,2)), value where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table acid_uami_n1 select cast(key as int), cast(key as decimal(5,2)), value where key < 10 +insert overwrite table src_multi2_n1 select * where key > 10 and key < 20; set hive.exec.dynamic.partition.mode=nonstrict; -- Table with partition @@ -124,58 +124,58 @@ select * from tablePartitioned; explain from src INSERT INTO tablePartitioned partition(p1, p2) select key, value, value, 'yesterday' as p1, 3 as p2 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi2_n1 select * where key > 10 and key < 20; -DROP TABLE src_multi1; -DROP TABLE src_multi2; -DROP TABLE acid_uami; +DROP TABLE src_multi1_n0; +DROP TABLE src_multi2_n1; +DROP TABLE acid_uami_n1; -- MERGE statements set hive.mapred.mode=nonstrict; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -create table nonacid (key int, a1 string, value string) stored as orc; +create table nonacid_n2 (key int, a1 string, value string) stored as orc; -create table masking_test (key int NOT NULL enable, a1 string, value string) +create table masking_test_n4 (key int NOT NULL enable, a1 string, value string) clustered by (value) into 2 buckets stored as orc tblproperties ("transactional"="true"); -- with cardinality check off set hive.merge.cardinality.check=false; -explain MERGE INTO masking_test as t using nonacid as s ON t.key = s.key +explain MERGE INTO masking_test_n4 as t using nonacid_n2 as s ON t.key = s.key WHEN MATCHED AND s.key < 5 THEN DELETE WHEN MATCHED AND s.key < 3 THEN UPDATE set a1 = '1' WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.a1, s.value); -- with cardinality check on set hive.merge.cardinality.check=true; -explain MERGE INTO masking_test as t using nonacid as s ON t.key = s.key +explain MERGE INTO masking_test_n4 as t using nonacid_n2 as s ON t.key = s.key WHEN MATCHED AND s.key < 5 THEN DELETE WHEN MATCHED AND s.key < 3 THEN UPDATE set a1 = '1' WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.a1, s.value); -explain MERGE INTO masking_test as t using nonacid as s ON t.key = s.key +explain MERGE INTO masking_test_n4 as t using nonacid_n2 as s ON t.key = s.key WHEN MATCHED AND s.key < 5 THEN DELETE WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.a1, s.value); -explain MERGE INTO masking_test as t using nonacid as s ON t.key = s.key +explain MERGE INTO masking_test_n4 as t using nonacid_n2 as s ON t.key = s.key WHEN MATCHED AND s.key < 3 THEN UPDATE set a1 = '1' WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.a1, s.value); -- shouldn't have constraint enforcement -explain MERGE INTO masking_test as t using nonacid as s ON t.key = s.key +explain MERGE INTO masking_test_n4 as t using nonacid_n2 as s ON t.key = s.key WHEN MATCHED AND s.key < 5 THEN DELETE; -DROP TABLE masking_test; -DROP TABLE nonacid; +DROP TABLE masking_test_n4; +DROP TABLE nonacid_n2; -- Test drop constraint -create table table2(i int constraint nn5 not null enforced, j int); -explain insert into table2 values(2, 3); -alter table table2 drop constraint nn5; -explain insert into table2 values(2, 3); -DROP TABLE table2; +create table table2_n3(i int constraint nn5 not null enforced, j int); +explain insert into table2_n3 values(2, 3); +alter table table2_n3 drop constraint nn5; +explain insert into table2_n3 values(2, 3); +DROP TABLE table2_n3; -- temporary table create temporary table tttemp(i int not null enforced); @@ -188,10 +188,10 @@ set hive.create.as.insert.only=true; set hive.exec.dynamic.partition.mode=nonstrict; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -create table part_mm(key int not null enforced) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); -explain insert into table part_mm partition(key_mm=455) select key from src order by value limit 3; -insert into table part_mm partition(key_mm=455) select key from src order by value limit 3; +create table part_mm_n1(key int not null enforced) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); +explain insert into table part_mm_n1 partition(key_mm=455) select key from src order by value limit 3; +insert into table part_mm_n1 partition(key_mm=455) select key from src order by value limit 3; select key from src order by value limit 3; -select * from part_mm; -drop table part_mm; +select * from part_mm_n1; +drop table part_mm_n1; diff --git a/ql/src/test/queries/clientpositive/escape1.q b/ql/src/test/queries/clientpositive/escape1.q index bda2088071..604e9d92ac 100644 --- a/ql/src/test/queries/clientpositive/escape1.q +++ b/ql/src/test/queries/clientpositive/escape1.q @@ -5,13 +5,13 @@ set hive.exec.dynamic.partition=true; set hive.exec.max.dynamic.partitions.pernode=200; DROP TABLE escape1; -DROP TABLE escape_raw; +DROP TABLE escape_raw_n0; -CREATE TABLE escape_raw (s STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/escapetest.txt' INTO TABLE escape_raw; +CREATE TABLE escape_raw_n0 (s STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/escapetest.txt' INTO TABLE escape_raw_n0; -SELECT count(*) from escape_raw; -SELECT * from escape_raw; +SELECT count(*) from escape_raw_n0; +SELECT * from escape_raw_n0; CREATE TABLE escape1 (a STRING) PARTITIONED BY (ds STRING, part STRING); INSERT OVERWRITE TABLE escape1 PARTITION (ds='1', part) SELECT '1', s from @@ -25,4 +25,4 @@ ALTER TABLE escape1 DROP PARTITION (ds='1'); SHOW PARTITIONS escape1; DROP TABLE escape1; -DROP TABLE escape_raw; +DROP TABLE escape_raw_n0; diff --git a/ql/src/test/queries/clientpositive/escape_crlf.q b/ql/src/test/queries/clientpositive/escape_crlf.q index 46c3605100..0b7b77cd2f 100644 --- a/ql/src/test/queries/clientpositive/escape_crlf.q +++ b/ql/src/test/queries/clientpositive/escape_crlf.q @@ -2,22 +2,22 @@ set hive.vectorized.execution.enabled=false; set hive.test.vectorized.execution.enabled.override=disable; -DROP TABLE IF EXISTS base_tab; -CREATE TABLE base_tab(a STRING, b STRING) +DROP TABLE IF EXISTS base_tab_n0; +CREATE TABLE base_tab_n0(a STRING, b STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'; -DESCRIBE EXTENDED base_tab; +DESCRIBE EXTENDED base_tab_n0; -LOAD DATA LOCAL INPATH '../../data/files/escape_crlf.txt' OVERWRITE INTO TABLE base_tab; +LOAD DATA LOCAL INPATH '../../data/files/escape_crlf.txt' OVERWRITE INTO TABLE base_tab_n0; -- No crlf escaping -SELECT * FROM base_tab; +SELECT * FROM base_tab_n0; -- Crlf escaping -ALTER TABLE base_tab SET SERDEPROPERTIES ('escape.delim'='\\', 'serialization.escape.crlf'='true'); -SELECT * FROM base_tab; +ALTER TABLE base_tab_n0 SET SERDEPROPERTIES ('escape.delim'='\\', 'serialization.escape.crlf'='true'); +SELECT * FROM base_tab_n0; SET hive.fetch.task.conversion=none; -- Make sure intermediate serde works correctly -SELECT * FROM base_tab; +SELECT * FROM base_tab_n0; -DROP TABLE base_tab; +DROP TABLE base_tab_n0; diff --git a/ql/src/test/queries/clientpositive/except_all.q b/ql/src/test/queries/clientpositive/except_all.q index 13bb7fb315..a87c524d8b 100644 --- a/ql/src/test/queries/clientpositive/except_all.q +++ b/ql/src/test/queries/clientpositive/except_all.q @@ -2,49 +2,49 @@ set hive.mapred.mode=nonstrict; set hive.cbo.enable=true; -create table a(key int); +create table a_n15(key int); -insert into table a values (0),(1),(2),(2),(2),(2),(3),(NULL),(NULL); +insert into table a_n15 values (0),(1),(2),(2),(2),(2),(3),(NULL),(NULL); -create table b(key bigint); +create table b_n11(key bigint); -insert into table b values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL); +insert into table b_n11 values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL); -select * from a except all select * from b; +select * from a_n15 except all select * from b_n11; -drop table a; +drop table a_n15; -drop table b; +drop table b_n11; -create table a(key int, value int); +create table a_n15(key int, value int); -insert into table a values (1,2),(1,2),(1,3),(2,3),(2,2); +insert into table a_n15 values (1,2),(1,2),(1,3),(2,3),(2,2); -create table b(key int, value int); +create table b_n11(key int, value int); -insert into table b values (1,2),(2,3),(2,2),(2,2),(2,20); +insert into table b_n11 values (1,2),(2,3),(2,2),(2,2),(2,20); -select * from a except all select * from b; +select * from a_n15 except all select * from b_n11; -select * from b except all select * from a; +select * from b_n11 except all select * from a_n15; -select * from b except all select * from a intersect distinct select * from b; +select * from b_n11 except all select * from a_n15 intersect distinct select * from b_n11; -select * from b except all select * from a except distinct select * from b; +select * from b_n11 except all select * from a_n15 except distinct select * from b_n11; -select * from a except all select * from b union all select * from a except distinct select * from b; +select * from a_n15 except all select * from b_n11 union all select * from a_n15 except distinct select * from b_n11; -select * from a except all select * from b union select * from a except distinct select * from b; +select * from a_n15 except all select * from b_n11 union select * from a_n15 except distinct select * from b_n11; -select * from a except all select * from b except distinct select * from a except distinct select * from b; +select * from a_n15 except all select * from b_n11 except distinct select * from a_n15 except distinct select * from b_n11; -select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +select * from (select a_n15.key, b_n11.value from a_n15 join b_n11 on a_n15.key=b_n11.key)sub1 except all -select * from (select a.key, b.value from a join b on a.key=b.key)sub2; +select * from (select a_n15.key, b_n11.value from a_n15 join b_n11 on a_n15.key=b_n11.key)sub2; -select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +select * from (select a_n15.key, b_n11.value from a_n15 join b_n11 on a_n15.key=b_n11.key)sub1 except all -select * from (select b.value as key, a.key as value from a join b on a.key=b.key)sub2; +select * from (select b_n11.value as key, a_n15.key as value from a_n15 join b_n11 on a_n15.key=b_n11.key)sub2; explain select * from src except all select * from src; @@ -54,6 +54,6 @@ explain select * from src except all select * from src except distinct select * select * from src except all select * from src except distinct select * from src except distinct select * from src; -explain select value from a group by value except distinct select key from b group by key; +explain select value from a_n15 group by value except distinct select key from b_n11 group by key; -select value from a group by value except distinct select key from b group by key; +select value from a_n15 group by value except distinct select key from b_n11 group by key; diff --git a/ql/src/test/queries/clientpositive/except_distinct.q b/ql/src/test/queries/clientpositive/except_distinct.q index 6fb0071462..a4a7af04dc 100644 --- a/ql/src/test/queries/clientpositive/except_distinct.q +++ b/ql/src/test/queries/clientpositive/except_distinct.q @@ -2,49 +2,49 @@ set hive.mapred.mode=nonstrict; set hive.cbo.enable=true; -create table a(key int); +create table a_n16(key int); -insert into table a values (0),(1),(2),(2),(2),(2),(3),(NULL),(NULL); +insert into table a_n16 values (0),(1),(2),(2),(2),(2),(3),(NULL),(NULL); -create table b(key bigint); +create table b_n12(key bigint); -insert into table b values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL); +insert into table b_n12 values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL); -select * from a except distinct select * from b; +select * from a_n16 except distinct select * from b_n12; -drop table a; +drop table a_n16; -drop table b; +drop table b_n12; -create table a(key int, value int); +create table a_n16(key int, value int); -insert into table a values (1,2),(1,2),(1,3),(2,3),(2,2); +insert into table a_n16 values (1,2),(1,2),(1,3),(2,3),(2,2); -create table b(key int, value int); +create table b_n12(key int, value int); -insert into table b values (1,2),(2,3),(2,2),(2,2),(2,20); +insert into table b_n12 values (1,2),(2,3),(2,2),(2,2),(2,20); -select * from a except distinct select * from b; +select * from a_n16 except distinct select * from b_n12; -select * from b except distinct select * from a; +select * from b_n12 except distinct select * from a_n16; -select * from b except distinct select * from a intersect distinct select * from b; +select * from b_n12 except distinct select * from a_n16 intersect distinct select * from b_n12; -select * from b except distinct select * from a except distinct select * from b; +select * from b_n12 except distinct select * from a_n16 except distinct select * from b_n12; -select * from a except distinct select * from b union all select * from a except distinct select * from b; +select * from a_n16 except distinct select * from b_n12 union all select * from a_n16 except distinct select * from b_n12; -select * from a except distinct select * from b union select * from a except distinct select * from b; +select * from a_n16 except distinct select * from b_n12 union select * from a_n16 except distinct select * from b_n12; -select * from a except distinct select * from b except distinct select * from a except distinct select * from b; +select * from a_n16 except distinct select * from b_n12 except distinct select * from a_n16 except distinct select * from b_n12; -select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +select * from (select a_n16.key, b_n12.value from a_n16 join b_n12 on a_n16.key=b_n12.key)sub1 except distinct -select * from (select a.key, b.value from a join b on a.key=b.key)sub2; +select * from (select a_n16.key, b_n12.value from a_n16 join b_n12 on a_n16.key=b_n12.key)sub2; -select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +select * from (select a_n16.key, b_n12.value from a_n16 join b_n12 on a_n16.key=b_n12.key)sub1 except distinct -select * from (select b.value as key, a.key as value from a join b on a.key=b.key)sub2; +select * from (select b_n12.value as key, a_n16.key as value from a_n16 join b_n12 on a_n16.key=b_n12.key)sub2; explain select * from src except distinct select * from src; @@ -54,6 +54,6 @@ explain select * from src except distinct select * from src except distinct sele select * from src except distinct select * from src except distinct select * from src except distinct select * from src; -explain select value from a group by value except distinct select key from b group by key; +explain select value from a_n16 group by value except distinct select key from b_n12 group by key; -select value from a group by value except distinct select key from b group by key; +select value from a_n16 group by value except distinct select key from b_n12 group by key; diff --git a/ql/src/test/queries/clientpositive/exchange_partition3.q b/ql/src/test/queries/clientpositive/exchange_partition3.q index 7c076cebe8..26af541a08 100644 --- a/ql/src/test/queries/clientpositive/exchange_partition3.q +++ b/ql/src/test/queries/clientpositive/exchange_partition3.q @@ -1,15 +1,15 @@ -CREATE TABLE exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING, hr STRING); -CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING, hr STRING); -SHOW PARTITIONS exchange_part_test1; -SHOW PARTITIONS exchange_part_test2; +CREATE TABLE exchange_part_test1_n0 (f1 string) PARTITIONED BY (ds STRING, hr STRING); +CREATE TABLE exchange_part_test2_n0 (f1 string) PARTITIONED BY (ds STRING, hr STRING); +SHOW PARTITIONS exchange_part_test1_n0; +SHOW PARTITIONS exchange_part_test2_n0; -ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2014-01-03', hr='1'); -ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='1'); -ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='2'); -SHOW PARTITIONS exchange_part_test1; -SHOW PARTITIONS exchange_part_test2; +ALTER TABLE exchange_part_test1_n0 ADD PARTITION (ds='2014-01-03', hr='1'); +ALTER TABLE exchange_part_test2_n0 ADD PARTITION (ds='2013-04-05', hr='1'); +ALTER TABLE exchange_part_test2_n0 ADD PARTITION (ds='2013-04-05', hr='2'); +SHOW PARTITIONS exchange_part_test1_n0; +SHOW PARTITIONS exchange_part_test2_n0; -- This will exchange both partitions hr=1 and hr=2 -ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2; -SHOW PARTITIONS exchange_part_test1; -SHOW PARTITIONS exchange_part_test2; +ALTER TABLE exchange_part_test1_n0 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2_n0; +SHOW PARTITIONS exchange_part_test1_n0; +SHOW PARTITIONS exchange_part_test2_n0; diff --git a/ql/src/test/queries/clientpositive/exchgpartition2lel.q b/ql/src/test/queries/clientpositive/exchgpartition2lel.q index 96e99bc041..567ff8a0bc 100644 --- a/ql/src/test/queries/clientpositive/exchgpartition2lel.q +++ b/ql/src/test/queries/clientpositive/exchgpartition2lel.q @@ -1,33 +1,33 @@ --! qt:dataset:src -DROP TABLE IF EXISTS t1; -DROP TABLE IF EXISTS t2; -DROP TABLE IF EXISTS t3; -DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t1_n72; +DROP TABLE IF EXISTS t2_n44; +DROP TABLE IF EXISTS t3_n15; +DROP TABLE IF EXISTS t4_n7; -CREATE TABLE t1 (a int) PARTITIONED BY (d1 int); -CREATE TABLE t2 (a int) PARTITIONED BY (d1 int); -CREATE TABLE t3 (a int) PARTITIONED BY (d1 int, d2 int); -CREATE TABLE t4 (a int) PARTITIONED BY (d1 int, d2 int); -CREATE TABLE t5 (a int) PARTITIONED BY (d1 int, d2 int, d3 int); -CREATE TABLE t6 (a int) PARTITIONED BY (d1 int, d2 int, d3 int); +CREATE TABLE t1_n72 (a int) PARTITIONED BY (d1 int); +CREATE TABLE t2_n44 (a int) PARTITIONED BY (d1 int); +CREATE TABLE t3_n15 (a int) PARTITIONED BY (d1 int, d2 int); +CREATE TABLE t4_n7 (a int) PARTITIONED BY (d1 int, d2 int); +CREATE TABLE t5_n3 (a int) PARTITIONED BY (d1 int, d2 int, d3 int); +CREATE TABLE t6_n2 (a int) PARTITIONED BY (d1 int, d2 int, d3 int); set hive.mapred.mode=nonstrict; -INSERT OVERWRITE TABLE t1 PARTITION (d1 = 1) SELECT key FROM src where key = 100 limit 1; -INSERT OVERWRITE TABLE t3 PARTITION (d1 = 1, d2 = 1) SELECT key FROM src where key = 100 limit 1; -INSERT OVERWRITE TABLE t5 PARTITION (d1 = 1, d2 = 1, d3=1) SELECT key FROM src where key = 100 limit 1; +INSERT OVERWRITE TABLE t1_n72 PARTITION (d1 = 1) SELECT key FROM src where key = 100 limit 1; +INSERT OVERWRITE TABLE t3_n15 PARTITION (d1 = 1, d2 = 1) SELECT key FROM src where key = 100 limit 1; +INSERT OVERWRITE TABLE t5_n3 PARTITION (d1 = 1, d2 = 1, d3=1) SELECT key FROM src where key = 100 limit 1; -SELECT * FROM t1; +SELECT * FROM t1_n72; -SELECT * FROM t3; +SELECT * FROM t3_n15; -ALTER TABLE t2 EXCHANGE PARTITION (d1 = 1) WITH TABLE t1; -SELECT * FROM t1; -SELECT * FROM t2; +ALTER TABLE t2_n44 EXCHANGE PARTITION (d1 = 1) WITH TABLE t1_n72; +SELECT * FROM t1_n72; +SELECT * FROM t2_n44; -ALTER TABLE t4 EXCHANGE PARTITION (d1 = 1, d2 = 1) WITH TABLE t3; -SELECT * FROM t3; -SELECT * FROM t4; +ALTER TABLE t4_n7 EXCHANGE PARTITION (d1 = 1, d2 = 1) WITH TABLE t3_n15; +SELECT * FROM t3_n15; +SELECT * FROM t4_n7; -ALTER TABLE t6 EXCHANGE PARTITION (d1 = 1, d2 = 1, d3 = 1) WITH TABLE t5; -SELECT * FROM t5; -SELECT * FROM t6; +ALTER TABLE t6_n2 EXCHANGE PARTITION (d1 = 1, d2 = 1, d3 = 1) WITH TABLE t5_n3; +SELECT * FROM t5_n3; +SELECT * FROM t6_n2; diff --git a/ql/src/test/queries/clientpositive/exec_parallel_column_stats.q b/ql/src/test/queries/clientpositive/exec_parallel_column_stats.q index 75e6e71399..7c5e7e98da 100644 --- a/ql/src/test/queries/clientpositive/exec_parallel_column_stats.q +++ b/ql/src/test/queries/clientpositive/exec_parallel_column_stats.q @@ -1,8 +1,8 @@ --! qt:dataset:src set hive.exec.parallel=true; -create table t as select * from src; +create table t_n25 as select * from src; -explain analyze table t compute statistics for columns; +explain analyze table t_n25 compute statistics for columns; -analyze table t compute statistics for columns; +analyze table t_n25 compute statistics for columns; diff --git a/ql/src/test/queries/clientpositive/exim_00_nonpart_empty.q b/ql/src/test/queries/clientpositive/exim_00_nonpart_empty.q index 8288bbfd86..e2c1a3f385 100644 --- a/ql/src/test/queries/clientpositive/exim_00_nonpart_empty.q +++ b/ql/src/test/queries/clientpositive/exim_00_nonpart_empty.q @@ -3,24 +3,24 @@ set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.Sessi set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department_n0,exim_employee; -create table exim_department ( dep_id int comment "department id") +create table exim_department_n0 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_department; -export table exim_department to 'ql/test/data/exports/exim_department'; -drop table exim_department; +export table exim_department_n0 to 'ql/test/data/exports/exim_department'; +drop table exim_department_n0; create database importer; use importer; import from 'ql/test/data/exports/exim_department'; -describe extended exim_department; -show table extended like exim_department; +describe extended exim_department_n0; +show table extended like exim_department_n0; dfs -rmr target/tmp/ql/test/data/exports/exim_department; -select * from exim_department; -drop table exim_department; +select * from exim_department_n0; +drop table exim_department_n0; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_01_nonpart.q b/ql/src/test/queries/clientpositive/exim_01_nonpart.q index 1e2eed803a..85ff81114a 100644 --- a/ql/src/test/queries/clientpositive/exim_01_nonpart.q +++ b/ql/src/test/queries/clientpositive/exim_01_nonpart.q @@ -1,24 +1,24 @@ set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department_n7,exim_employee; -create table exim_department ( dep_id int comment "department id") +create table exim_department_n7 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna"); -load data local inpath "../../data/files/test.dat" into table exim_department; +load data local inpath "../../data/files/test.dat" into table exim_department_n7; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_department; -export table exim_department to 'ql/test/data/exports/exim_department'; -drop table exim_department; +export table exim_department_n7 to 'ql/test/data/exports/exim_department'; +drop table exim_department_n7; create database importer; use importer; import from 'ql/test/data/exports/exim_department'; -describe extended exim_department; -show table extended like exim_department; +describe extended exim_department_n7; +show table extended like exim_department_n7; dfs -rmr target/tmp/ql/test/data/exports/exim_department; -select * from exim_department; -drop table exim_department; +select * from exim_department_n7; +drop table exim_department_n7; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_02_00_part_empty.q b/ql/src/test/queries/clientpositive/exim_02_00_part_empty.q index 495306b778..4eac16e89f 100644 --- a/ql/src/test/queries/clientpositive/exim_02_00_part_empty.q +++ b/ql/src/test/queries/clientpositive/exim_02_00_part_empty.q @@ -1,26 +1,26 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n9; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n9 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n9 to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n9; create database importer; use importer; import from 'ql/test/data/exports/exim_employee'; -describe extended exim_employee; -show table extended like exim_employee; +describe extended exim_employee_n9; +show table extended like exim_employee_n9; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -select * from exim_employee; -drop table exim_employee; +select * from exim_employee_n9; +drop table exim_employee_n9; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_03_nonpart_over_compat.q b/ql/src/test/queries/clientpositive/exim_03_nonpart_over_compat.q index 47d949aa36..90e7b1ffdc 100644 --- a/ql/src/test/queries/clientpositive/exim_03_nonpart_over_compat.q +++ b/ql/src/test/queries/clientpositive/exim_03_nonpart_over_compat.q @@ -1,26 +1,26 @@ set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department_n5,exim_employee; -create table exim_department ( dep_id int comment "department id") +create table exim_department_n5 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna"); -load data local inpath "../../data/files/test.dat" into table exim_department; +load data local inpath "../../data/files/test.dat" into table exim_department_n5; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_department; -export table exim_department to 'ql/test/data/exports/exim_department'; -drop table exim_department; +export table exim_department_n5 to 'ql/test/data/exports/exim_department'; +drop table exim_department_n5; create database importer; use importer; -create table exim_department ( dep_id int comment "department identifier") +create table exim_department_n5 ( dep_id int comment "department identifier") stored as textfile tblproperties("maker"="krishna"); import from 'ql/test/data/exports/exim_department'; -describe extended exim_department; -select * from exim_department; -drop table exim_department; +describe extended exim_department_n5; +select * from exim_department_n5; +drop table exim_department_n5; dfs -rmr target/tmp/ql/test/data/exports/exim_department; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_04_all_part.q b/ql/src/test/queries/clientpositive/exim_04_all_part.q index 8fb7aa0783..a46c712aa1 100644 --- a/ql/src/test/queries/clientpositive/exim_04_all_part.q +++ b/ql/src/test/queries/clientpositive/exim_04_all_part.q @@ -1,34 +1,34 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n5; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n5 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n5 partition (emp_country="in", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka"); + into table exim_employee_n5 partition (emp_country="in", emp_state="ka"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn"); + into table exim_employee_n5 partition (emp_country="us", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka"); + into table exim_employee_n5 partition (emp_country="us", emp_state="ka"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n5 to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n5; create database importer; use importer; import from 'ql/test/data/exports/exim_employee'; -describe extended exim_employee; -show table extended like exim_employee; +describe extended exim_employee_n5; +show table extended like exim_employee_n5; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -select * from exim_employee; -drop table exim_employee; +select * from exim_employee_n5; +drop table exim_employee_n5; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_04_evolved_parts.q b/ql/src/test/queries/clientpositive/exim_04_evolved_parts.q index 6acac7b56b..aa989c726a 100644 --- a/ql/src/test/queries/clientpositive/exim_04_evolved_parts.q +++ b/ql/src/test/queries/clientpositive/exim_04_evolved_parts.q @@ -1,43 +1,43 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n12; -create table exim_employee (emp_id int comment 'employee id', emp_name string, emp_dob string comment 'employee date of birth', emp_sex string comment 'M/F') +create table exim_employee_n12 (emp_id int comment 'employee id', emp_name string, emp_dob string comment 'employee date of birth', emp_sex string comment 'M/F') comment 'employee table' partitioned by (emp_country string comment '2-char code', emp_state string comment '2-char code') clustered by (emp_sex) sorted by (emp_id ASC) into 10 buckets row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" with serdeproperties ('serialization.format'='1') stored as rcfile; -alter table exim_employee add columns (emp_dept int); -alter table exim_employee clustered by (emp_sex, emp_dept) sorted by (emp_id desc) into 5 buckets; -alter table exim_employee add partition (emp_country='in', emp_state='tn'); +alter table exim_employee_n12 add columns (emp_dept int); +alter table exim_employee_n12 clustered by (emp_sex, emp_dept) sorted by (emp_id desc) into 5 buckets; +alter table exim_employee_n12 add partition (emp_country='in', emp_state='tn'); -alter table exim_employee set fileformat +alter table exim_employee_n12 set fileformat inputformat "org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat" outputformat "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat" serde "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe"; ; -alter table exim_employee set serde "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe" with serdeproperties ('serialization.format'='2'); +alter table exim_employee_n12 set serde "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe" with serdeproperties ('serialization.format'='2'); -alter table exim_employee add partition (emp_country='in', emp_state='ka'); +alter table exim_employee_n12 add partition (emp_country='in', emp_state='ka'); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n12 to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n12; create database importer; use importer; import from 'ql/test/data/exports/exim_employee'; -describe extended exim_employee; -describe extended exim_employee partition (emp_country='in', emp_state='tn'); -describe extended exim_employee partition (emp_country='in', emp_state='ka'); -show table extended like exim_employee; +describe extended exim_employee_n12; +describe extended exim_employee_n12 partition (emp_country='in', emp_state='tn'); +describe extended exim_employee_n12 partition (emp_country='in', emp_state='ka'); +show table extended like exim_employee_n12; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -select * from exim_employee; -drop table exim_employee; +select * from exim_employee_n12; +drop table exim_employee_n12; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_05_some_part.q b/ql/src/test/queries/clientpositive/exim_05_some_part.q index 9e07a9e2b9..235ab8118f 100644 --- a/ql/src/test/queries/clientpositive/exim_05_some_part.q +++ b/ql/src/test/queries/clientpositive/exim_05_some_part.q @@ -1,34 +1,34 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n15; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n15 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n15 partition (emp_country="in", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka"); + into table exim_employee_n15 partition (emp_country="in", emp_state="ka"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn"); + into table exim_employee_n15 partition (emp_country="us", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka"); + into table exim_employee_n15 partition (emp_country="us", emp_state="ka"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee partition (emp_state="ka") to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n15 partition (emp_state="ka") to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n15; create database importer; use importer; import from 'ql/test/data/exports/exim_employee'; -describe extended exim_employee; -show table extended like exim_employee; +describe extended exim_employee_n15; +show table extended like exim_employee_n15; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -select * from exim_employee; -drop table exim_employee; +select * from exim_employee_n15; +drop table exim_employee_n15; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_06_one_part.q b/ql/src/test/queries/clientpositive/exim_06_one_part.q index 0dcdaa1a3f..d83dfe831f 100644 --- a/ql/src/test/queries/clientpositive/exim_06_one_part.q +++ b/ql/src/test/queries/clientpositive/exim_06_one_part.q @@ -1,34 +1,34 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n3; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n3 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n3 partition (emp_country="in", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka"); + into table exim_employee_n3 partition (emp_country="in", emp_state="ka"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn"); + into table exim_employee_n3 partition (emp_country="us", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka"); + into table exim_employee_n3 partition (emp_country="us", emp_state="ka"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee partition (emp_country="in",emp_state="ka") to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n3 partition (emp_country="in",emp_state="ka") to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n3; create database importer; use importer; import from 'ql/test/data/exports/exim_employee'; -describe extended exim_employee; -show table extended like exim_employee; +describe extended exim_employee_n3; +show table extended like exim_employee_n3; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -select * from exim_employee; -drop table exim_employee; +select * from exim_employee_n3; +drop table exim_employee_n3; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_07_all_part_over_nonoverlap.q b/ql/src/test/queries/clientpositive/exim_07_all_part_over_nonoverlap.q index e897ee7bcb..04f68d4e9a 100644 --- a/ql/src/test/queries/clientpositive/exim_07_all_part_over_nonoverlap.q +++ b/ql/src/test/queries/clientpositive/exim_07_all_part_over_nonoverlap.q @@ -1,40 +1,40 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n8; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n8 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n8 partition (emp_country="in", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka"); + into table exim_employee_n8 partition (emp_country="in", emp_state="ka"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn"); + into table exim_employee_n8 partition (emp_country="us", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka"); + into table exim_employee_n8 partition (emp_country="us", emp_state="ka"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n8 to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n8; create database importer; use importer; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n8 ( emp_id int comment "employee id") comment "table of employees" partitioned by (emp_country string comment "iso code", emp_state string comment "free-form text") stored as textfile tblproperties("maker"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="al"); + into table exim_employee_n8 partition (emp_country="us", emp_state="al"); import from 'ql/test/data/exports/exim_employee'; -describe extended exim_employee; -select * from exim_employee; -drop table exim_employee; +describe extended exim_employee_n8; +select * from exim_employee_n8; +drop table exim_employee_n8; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_08_nonpart_rename.q b/ql/src/test/queries/clientpositive/exim_08_nonpart_rename.q index 8a1d945476..c8e075df12 100644 --- a/ql/src/test/queries/clientpositive/exim_08_nonpart_rename.q +++ b/ql/src/test/queries/clientpositive/exim_08_nonpart_rename.q @@ -1,28 +1,28 @@ set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee,exim_imported_dept; +set hive.test.mode.nosamplelist=exim_department_n9,exim_employee,exim_imported_dept; -create table exim_department ( dep_id int comment "department id") +create table exim_department_n9 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna"); -load data local inpath "../../data/files/test.dat" into table exim_department; +load data local inpath "../../data/files/test.dat" into table exim_department_n9; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_department; -export table exim_department to 'ql/test/data/exports/exim_department'; -drop table exim_department; +export table exim_department_n9 to 'ql/test/data/exports/exim_department'; +drop table exim_department_n9; create database importer; use importer; -create table exim_department ( dep_id int comment "department id") +create table exim_department_n9 ( dep_id int comment "department id") partitioned by (emp_org string) stored as textfile tblproperties("creator"="krishna"); -load data local inpath "../../data/files/test.dat" into table exim_department partition (emp_org="hr"); +load data local inpath "../../data/files/test.dat" into table exim_department_n9 partition (emp_org="hr"); import table exim_imported_dept from 'ql/test/data/exports/exim_department'; describe extended exim_imported_dept; select * from exim_imported_dept; drop table exim_imported_dept; -drop table exim_department; +drop table exim_department_n9; dfs -rmr target/tmp/ql/test/data/exports/exim_department; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_09_part_spec_nonoverlap.q b/ql/src/test/queries/clientpositive/exim_09_part_spec_nonoverlap.q index 4a9441828c..1f8cc345a1 100644 --- a/ql/src/test/queries/clientpositive/exim_09_part_spec_nonoverlap.q +++ b/ql/src/test/queries/clientpositive/exim_09_part_spec_nonoverlap.q @@ -1,41 +1,41 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n10; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n10 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n10 partition (emp_country="in", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka"); + into table exim_employee_n10 partition (emp_country="in", emp_state="ka"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn"); + into table exim_employee_n10 partition (emp_country="us", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka"); + into table exim_employee_n10 partition (emp_country="us", emp_state="ka"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n10 to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n10; create database importer; use importer; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n10 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n10 partition (emp_country="in", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka"); -import table exim_employee partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee'; -describe extended exim_employee; -select * from exim_employee; -drop table exim_employee; + into table exim_employee_n10 partition (emp_country="in", emp_state="ka"); +import table exim_employee_n10 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee'; +describe extended exim_employee_n10; +select * from exim_employee_n10; +drop table exim_employee_n10; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_10_external_managed.q b/ql/src/test/queries/clientpositive/exim_10_external_managed.q index 54859eed19..5aadbb3e05 100644 --- a/ql/src/test/queries/clientpositive/exim_10_external_managed.q +++ b/ql/src/test/queries/clientpositive/exim_10_external_managed.q @@ -1,27 +1,27 @@ set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department_n4,exim_employee; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_department/temp; dfs -rmr target/tmp/ql/test/data/tablestore/exim_department; -create external table exim_department ( dep_id int comment "department id") +create external table exim_department_n4 ( dep_id int comment "department id") stored as textfile location 'ql/test/data/tablestore/exim_department' tblproperties("creator"="krishna"); -load data local inpath "../../data/files/test.dat" into table exim_department; +load data local inpath "../../data/files/test.dat" into table exim_department_n4; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_department; -export table exim_department to 'ql/test/data/exports/exim_department'; -drop table exim_department; +export table exim_department_n4 to 'ql/test/data/exports/exim_department'; +drop table exim_department_n4; dfs -rmr target/tmp/ql/test/data/tablestore/exim_department; create database importer; use importer; import from 'ql/test/data/exports/exim_department'; -describe extended exim_department; -select * from exim_department; -drop table exim_department; +describe extended exim_department_n4; +select * from exim_department_n4; +drop table exim_department_n4; dfs -rmr target/tmp/ql/test/data/exports/exim_department; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_12_external_location.q b/ql/src/test/queries/clientpositive/exim_12_external_location.q index e4d50ffe5b..5c6ed08000 100644 --- a/ql/src/test/queries/clientpositive/exim_12_external_location.q +++ b/ql/src/test/queries/clientpositive/exim_12_external_location.q @@ -1,15 +1,15 @@ set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department_n8,exim_employee; -create table exim_department ( dep_id int comment "department id") +create table exim_department_n8 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna"); -load data local inpath "../../data/files/test.dat" into table exim_department; +load data local inpath "../../data/files/test.dat" into table exim_department_n8; dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/ql/test/data/exports/exim_department/temp; dfs -rmr ${system:test.tmp.dir}/ql/test/data/exports/exim_department; -export table exim_department to 'ql/test/data/exports/exim_department'; -drop table exim_department; +export table exim_department_n8 to 'ql/test/data/exports/exim_department'; +drop table exim_department_n8; create database importer; use importer; @@ -17,13 +17,13 @@ use importer; dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/ql/test/data/tablestore/exim_department/temp; dfs -rmr ${system:test.tmp.dir}/ql/test/data/tablestore/exim_department; -import external table exim_department from 'ql/test/data/exports/exim_department' +import external table exim_department_n8 from 'ql/test/data/exports/exim_department' location 'ql/test/data/tablestore/exim_department'; -describe extended exim_department; +describe extended exim_department_n8; dfs -rmr ${system:test.tmp.dir}/ql/test/data/exports/exim_department; -select * from exim_department; +select * from exim_department_n8; dfs -rmr ${system:test.tmp.dir}/ql/test/data/tablestore/exim_department; -select * from exim_department; -drop table exim_department; +select * from exim_department_n8; +drop table exim_department_n8; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_13_managed_location.q b/ql/src/test/queries/clientpositive/exim_13_managed_location.q index 909d23794b..ae3e045dcd 100644 --- a/ql/src/test/queries/clientpositive/exim_13_managed_location.q +++ b/ql/src/test/queries/clientpositive/exim_13_managed_location.q @@ -1,15 +1,15 @@ set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department_n2,exim_employee; -create table exim_department ( dep_id int comment "department id") +create table exim_department_n2 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna"); -load data local inpath "../../data/files/test.dat" into table exim_department; +load data local inpath "../../data/files/test.dat" into table exim_department_n2; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_department; -export table exim_department to 'ql/test/data/exports/exim_department'; -drop table exim_department; +export table exim_department_n2 to 'ql/test/data/exports/exim_department'; +drop table exim_department_n2; create database importer; use importer; @@ -17,13 +17,13 @@ use importer; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_department/temp; dfs -rmr target/tmp/ql/test/data/tablestore/exim_department; -import table exim_department from 'ql/test/data/exports/exim_department' +import table exim_department_n2 from 'ql/test/data/exports/exim_department' location 'ql/test/data/tablestore/exim_department'; -describe extended exim_department; +describe extended exim_department_n2; dfs -rmr target/tmp/ql/test/data/exports/exim_department; -select * from exim_department; +select * from exim_department_n2; dfs -rmr target/tmp/ql/test/data/tablestore/exim_department; -select * from exim_department; -drop table exim_department; +select * from exim_department_n2; +drop table exim_department_n2; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_14_managed_location_over_existing.q b/ql/src/test/queries/clientpositive/exim_14_managed_location_over_existing.q index dbb5fd9343..3d16880259 100644 --- a/ql/src/test/queries/clientpositive/exim_14_managed_location_over_existing.q +++ b/ql/src/test/queries/clientpositive/exim_14_managed_location_over_existing.q @@ -1,15 +1,15 @@ set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department_n10,exim_employee; -create table exim_department ( dep_id int comment "department id") +create table exim_department_n10 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna"); -load data local inpath "../../data/files/test.dat" into table exim_department; +load data local inpath "../../data/files/test.dat" into table exim_department_n10; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_department; -export table exim_department to 'ql/test/data/exports/exim_department'; -drop table exim_department; +export table exim_department_n10 to 'ql/test/data/exports/exim_department'; +drop table exim_department_n10; create database importer; use importer; @@ -17,17 +17,17 @@ use importer; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_department/temp; dfs -rmr target/tmp/ql/test/data/tablestore/exim_department; -create table exim_department ( dep_id int comment "department id") +create table exim_department_n10 ( dep_id int comment "department id") stored as textfile location 'ql/test/data/tablestore/exim_department' tblproperties("creator"="krishna"); -import table exim_department from 'ql/test/data/exports/exim_department' +import table exim_department_n10 from 'ql/test/data/exports/exim_department' location 'ql/test/data/tablestore/exim_department'; -describe extended exim_department; +describe extended exim_department_n10; dfs -rmr target/tmp/ql/test/data/exports/exim_department; -select * from exim_department; +select * from exim_department_n10; dfs -rmr target/tmp/ql/test/data/tablestore/exim_department; -select * from exim_department; -drop table exim_department; +select * from exim_department_n10; +drop table exim_department_n10; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_15_external_part.q b/ql/src/test/queries/clientpositive/exim_15_external_part.q index f1f2c380fa..a4fd56aebf 100644 --- a/ql/src/test/queries/clientpositive/exim_15_external_part.q +++ b/ql/src/test/queries/clientpositive/exim_15_external_part.q @@ -1,25 +1,25 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n0; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n0 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n0 partition (emp_country="in", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka"); + into table exim_employee_n0 partition (emp_country="in", emp_state="ka"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn"); + into table exim_employee_n0 partition (emp_country="us", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka"); + into table exim_employee_n0 partition (emp_country="us", emp_state="ka"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n0 to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n0; create database importer; use importer; @@ -27,25 +27,25 @@ use importer; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee; -create external table exim_employee ( emp_id int comment "employee id") +create external table exim_employee_n0 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile location 'ql/test/data/tablestore/exim_employee' tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n0 partition (emp_country="in", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka"); -import external table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n0 partition (emp_country="in", emp_state="ka"); +import external table exim_employee_n0 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee'; -describe extended exim_employee; -select * from exim_employee; +describe extended exim_employee_n0; +select * from exim_employee_n0; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -select * from exim_employee; +select * from exim_employee_n0; dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee; -select * from exim_employee; -drop table exim_employee; +select * from exim_employee_n0; +drop table exim_employee_n0; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_16_part_external.q b/ql/src/test/queries/clientpositive/exim_16_part_external.q index 60dbe9ebcb..b56832a984 100644 --- a/ql/src/test/queries/clientpositive/exim_16_part_external.q +++ b/ql/src/test/queries/clientpositive/exim_16_part_external.q @@ -1,25 +1,25 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n11; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n11 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n11 partition (emp_country="in", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka"); + into table exim_employee_n11 partition (emp_country="in", emp_state="ka"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn"); + into table exim_employee_n11 partition (emp_country="us", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka"); + into table exim_employee_n11 partition (emp_country="us", emp_state="ka"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n11 to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n11; create database importer; use importer; @@ -29,22 +29,22 @@ dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore2/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/tablestore2/exim_employee; -create external table exim_employee ( emp_id int comment "employee id") +create external table exim_employee_n11 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile location 'ql/test/data/tablestore2/exim_employee' tblproperties("creator"="krishna"); -import table exim_employee partition (emp_country="us", emp_state="tn") +import table exim_employee_n11 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee'; -show table extended like exim_employee; -show table extended like exim_employee partition (emp_country="us", emp_state="tn"); +show table extended like exim_employee_n11; +show table extended like exim_employee_n11 partition (emp_country="us", emp_state="tn"); dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -select * from exim_employee; +select * from exim_employee_n11; dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee; -select * from exim_employee; -drop table exim_employee; +select * from exim_employee_n11; +drop table exim_employee_n11; dfs -rmr target/tmp/ql/test/data/tablestore2/exim_employee; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_17_part_managed.q b/ql/src/test/queries/clientpositive/exim_17_part_managed.q index 6cb912f05e..0c6036b4cc 100644 --- a/ql/src/test/queries/clientpositive/exim_17_part_managed.q +++ b/ql/src/test/queries/clientpositive/exim_17_part_managed.q @@ -1,25 +1,25 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n4; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n4 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n4 partition (emp_country="in", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka"); + into table exim_employee_n4 partition (emp_country="in", emp_state="ka"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn"); + into table exim_employee_n4 partition (emp_country="us", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka"); + into table exim_employee_n4 partition (emp_country="us", emp_state="ka"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n4 to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n4; create database importer; use importer; @@ -27,23 +27,23 @@ use importer; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n4 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); -import table exim_employee partition (emp_country="us", emp_state="tn") +import table exim_employee_n4 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee'; -alter table exim_employee add partition (emp_country="us", emp_state="ap") +alter table exim_employee_n4 add partition (emp_country="us", emp_state="ap") location 'ql/test/data/tablestore2/exim_employee'; -show table extended like exim_employee; -show table extended like exim_employee partition (emp_country="us", emp_state="tn"); -show table extended like exim_employee partition (emp_country="us", emp_state="ap"); +show table extended like exim_employee_n4; +show table extended like exim_employee_n4 partition (emp_country="us", emp_state="tn"); +show table extended like exim_employee_n4 partition (emp_country="us", emp_state="ap"); dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -select * from exim_employee; +select * from exim_employee_n4; dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee; -select * from exim_employee; -drop table exim_employee; +select * from exim_employee_n4; +drop table exim_employee_n4; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_18_part_external.q b/ql/src/test/queries/clientpositive/exim_18_part_external.q index 4e6974cf67..1a259daa9a 100644 --- a/ql/src/test/queries/clientpositive/exim_18_part_external.q +++ b/ql/src/test/queries/clientpositive/exim_18_part_external.q @@ -1,37 +1,37 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n14; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n14 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n14 partition (emp_country="in", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka"); + into table exim_employee_n14 partition (emp_country="in", emp_state="ka"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn"); + into table exim_employee_n14 partition (emp_country="us", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka"); + into table exim_employee_n14 partition (emp_country="us", emp_state="ka"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n14 to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n14; create database importer; use importer; -import external table exim_employee partition (emp_country="us", emp_state="tn") +import external table exim_employee_n14 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee'; -describe extended exim_employee; -show table extended like exim_employee; -show table extended like exim_employee partition (emp_country="us", emp_state="tn"); -select * from exim_employee; +describe extended exim_employee_n14; +show table extended like exim_employee_n14; +show table extended like exim_employee_n14 partition (emp_country="us", emp_state="tn"); +select * from exim_employee_n14; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -select * from exim_employee; -drop table exim_employee; +select * from exim_employee_n14; +drop table exim_employee_n14; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_19_00_part_external_location.q b/ql/src/test/queries/clientpositive/exim_19_00_part_external_location.q index e7bfdcf114..eecfd70d27 100644 --- a/ql/src/test/queries/clientpositive/exim_19_00_part_external_location.q +++ b/ql/src/test/queries/clientpositive/exim_19_00_part_external_location.q @@ -1,21 +1,21 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n2; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n2 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n2 partition (emp_country="in", emp_state="tn"); load data local inpath "../../data/files/test2.dat" - into table exim_employee partition (emp_country="in", emp_state="ka"); + into table exim_employee_n2 partition (emp_country="in", emp_state="ka"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n2 to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n2; create database importer; use importer; @@ -23,17 +23,17 @@ use importer; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee; -import external table exim_employee +import external table exim_employee_n2 from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee'; -describe extended exim_employee; -show table extended like exim_employee; -show table extended like exim_employee partition (emp_country="in", emp_state="tn"); -show table extended like exim_employee partition (emp_country="in", emp_state="ka"); +describe extended exim_employee_n2; +show table extended like exim_employee_n2; +show table extended like exim_employee_n2 partition (emp_country="in", emp_state="tn"); +show table extended like exim_employee_n2 partition (emp_country="in", emp_state="ka"); dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -select * from exim_employee; +select * from exim_employee_n2; dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee; -select * from exim_employee; -drop table exim_employee; +select * from exim_employee_n2; +drop table exim_employee_n2; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_19_part_external_location.q b/ql/src/test/queries/clientpositive/exim_19_part_external_location.q index 389b983330..8f8d7eb117 100644 --- a/ql/src/test/queries/clientpositive/exim_19_part_external_location.q +++ b/ql/src/test/queries/clientpositive/exim_19_part_external_location.q @@ -1,25 +1,25 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n13; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n13 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n13 partition (emp_country="in", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka"); + into table exim_employee_n13 partition (emp_country="in", emp_state="ka"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn"); + into table exim_employee_n13 partition (emp_country="us", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka"); + into table exim_employee_n13 partition (emp_country="us", emp_state="ka"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n13 to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n13; create database importer; use importer; @@ -27,16 +27,16 @@ use importer; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee; -import external table exim_employee partition (emp_country="us", emp_state="tn") +import external table exim_employee_n13 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee'; -describe extended exim_employee; -show table extended like exim_employee; -show table extended like exim_employee partition (emp_country="us", emp_state="tn"); +describe extended exim_employee_n13; +show table extended like exim_employee_n13; +show table extended like exim_employee_n13 partition (emp_country="us", emp_state="tn"); dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -select * from exim_employee; +select * from exim_employee_n13; dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee; -select * from exim_employee; -drop table exim_employee; +select * from exim_employee_n13; +drop table exim_employee_n13; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q b/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q index 5b29ebb143..644a0cef0d 100644 --- a/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q +++ b/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q @@ -1,25 +1,25 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n1; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n1 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n1 partition (emp_country="in", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka"); + into table exim_employee_n1 partition (emp_country="in", emp_state="ka"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn"); + into table exim_employee_n1 partition (emp_country="us", emp_state="tn"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka"); + into table exim_employee_n1 partition (emp_country="us", emp_state="ka"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n1 to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n1; create database importer; use importer; @@ -27,16 +27,16 @@ use importer; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee; -import table exim_employee partition (emp_country="us", emp_state="tn") +import table exim_employee_n1 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee'; -describe extended exim_employee; -show table extended like exim_employee; -show table extended like exim_employee partition (emp_country="us", emp_state="tn"); +describe extended exim_employee_n1; +show table extended like exim_employee_n1; +show table extended like exim_employee_n1 partition (emp_country="us", emp_state="tn"); dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -select * from exim_employee; +select * from exim_employee_n1; dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee; -select * from exim_employee; -drop table exim_employee; +select * from exim_employee_n1; +drop table exim_employee_n1; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q b/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q index 1e3eaeed61..9d4cd042c2 100644 --- a/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q +++ b/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q @@ -2,15 +2,15 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.autho set hive.test.mode=true; set hive.test.mode.prefix=; -create table exim_department ( dep_id int) stored as textfile; -load data local inpath "../../data/files/test.dat" into table exim_department; +create table exim_department_n3 ( dep_id int) stored as textfile; +load data local inpath "../../data/files/test.dat" into table exim_department_n3; set hive.security.authorization.enabled=true; -grant Select on table exim_department to user hive_test_user; +grant Select on table exim_department_n3 to user hive_test_user; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_department; -export table exim_department to 'ql/test/data/exports/exim_department'; +export table exim_department_n3 to 'ql/test/data/exports/exim_department'; set hive.security.authorization.enabled=false; -drop table exim_department; +drop table exim_department_n3; diff --git a/ql/src/test/queries/clientpositive/exim_22_import_exist_authsuccess.q b/ql/src/test/queries/clientpositive/exim_22_import_exist_authsuccess.q index 606f9af261..a27ef458b8 100644 --- a/ql/src/test/queries/clientpositive/exim_22_import_exist_authsuccess.q +++ b/ql/src/test/queries/clientpositive/exim_22_import_exist_authsuccess.q @@ -1,27 +1,27 @@ set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department_n1,exim_employee; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; -create table exim_department ( dep_id int) stored as textfile; -load data local inpath "../../data/files/test.dat" into table exim_department; +create table exim_department_n1 ( dep_id int) stored as textfile; +load data local inpath "../../data/files/test.dat" into table exim_department_n1; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_department; -export table exim_department to 'ql/test/data/exports/exim_department'; -drop table exim_department; +export table exim_department_n1 to 'ql/test/data/exports/exim_department'; +drop table exim_department_n1; create database importer; use importer; -create table exim_department ( dep_id int) stored as textfile; +create table exim_department_n1 ( dep_id int) stored as textfile; set hive.security.authorization.enabled=true; -grant Alter on table exim_department to user hive_test_user; -grant Update on table exim_department to user hive_test_user; +grant Alter on table exim_department_n1 to user hive_test_user; +grant Update on table exim_department_n1 to user hive_test_user; import from 'ql/test/data/exports/exim_department'; set hive.security.authorization.enabled=false; -select * from exim_department; -drop table exim_department; +select * from exim_department_n1; +drop table exim_department_n1; drop database importer; dfs -rmr target/tmp/ql/test/data/exports/exim_department; diff --git a/ql/src/test/queries/clientpositive/exim_23_import_part_authsuccess.q b/ql/src/test/queries/clientpositive/exim_23_import_part_authsuccess.q index 316f2e0b98..888448d648 100644 --- a/ql/src/test/queries/clientpositive/exim_23_import_part_authsuccess.q +++ b/ql/src/test/queries/clientpositive/exim_23_import_part_authsuccess.q @@ -1,36 +1,36 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n7; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n7 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn"); + into table exim_employee_n7 partition (emp_country="in", emp_state="tn"); dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -export table exim_employee to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n7 to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n7; create database importer; use importer; -create table exim_employee ( emp_id int comment "employee id") +create table exim_employee_n7 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna"); set hive.security.authorization.enabled=true; -grant Alter on table exim_employee to user hive_test_user; -grant Update on table exim_employee to user hive_test_user; +grant Alter on table exim_employee_n7 to user hive_test_user; +grant Update on table exim_employee_n7 to user hive_test_user; import from 'ql/test/data/exports/exim_employee'; set hive.security.authorization.enabled=false; -select * from exim_employee; +select * from exim_employee_n7; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -drop table exim_employee; +drop table exim_employee_n7; drop database importer; diff --git a/ql/src/test/queries/clientpositive/exim_24_import_nonexist_authsuccess.q b/ql/src/test/queries/clientpositive/exim_24_import_nonexist_authsuccess.q index 8ded70b648..e403418640 100644 --- a/ql/src/test/queries/clientpositive/exim_24_import_nonexist_authsuccess.q +++ b/ql/src/test/queries/clientpositive/exim_24_import_nonexist_authsuccess.q @@ -1,14 +1,14 @@ set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department_n6,exim_employee; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; -create table exim_department ( dep_id int) stored as textfile; -load data local inpath "../../data/files/test.dat" into table exim_department; +create table exim_department_n6 ( dep_id int) stored as textfile; +load data local inpath "../../data/files/test.dat" into table exim_department_n6; dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/test; dfs -rmr target/tmp/ql/test/data/exports/exim_department; -export table exim_department to 'ql/test/data/exports/exim_department'; -drop table exim_department; +export table exim_department_n6 to 'ql/test/data/exports/exim_department'; +drop table exim_department_n6; create database importer; use importer; @@ -18,8 +18,8 @@ grant Create on database importer to user hive_test_user; import from 'ql/test/data/exports/exim_department'; set hive.security.authorization.enabled=false; -select * from exim_department; -drop table exim_department; +select * from exim_department_n6; +drop table exim_department_n6; drop database importer; dfs -rmr target/tmp/ql/test/data/exports/exim_department; diff --git a/ql/src/test/queries/clientpositive/exim_hidden_files.q b/ql/src/test/queries/clientpositive/exim_hidden_files.q index 18bf3ad3ed..f046411266 100644 --- a/ql/src/test/queries/clientpositive/exim_hidden_files.q +++ b/ql/src/test/queries/clientpositive/exim_hidden_files.q @@ -1,23 +1,23 @@ set hive.mapred.mode=nonstrict; set hive.test.mode=true; set hive.test.mode.prefix=; -set hive.test.mode.nosamplelist=exim_department,exim_employee; +set hive.test.mode.nosamplelist=exim_department,exim_employee_n6; -create table exim_employee ( emp_id int) partitioned by (emp_country string); -load data local inpath "../../data/files/test.dat" into table exim_employee partition (emp_country="in"); +create table exim_employee_n6 ( emp_id int) partitioned by (emp_country string); +load data local inpath "../../data/files/test.dat" into table exim_employee_n6 partition (emp_country="in"); dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/exim_employee/emp_country=in/_logs; dfs -touchz ${system:test.warehouse.dir}/exim_employee/emp_country=in/_logs/job.xml; -export table exim_employee to 'ql/test/data/exports/exim_employee'; -drop table exim_employee; +export table exim_employee_n6 to 'ql/test/data/exports/exim_employee'; +drop table exim_employee_n6; create database importer; use importer; import from 'ql/test/data/exports/exim_employee'; -describe formatted exim_employee; -select * from exim_employee; +describe formatted exim_employee_n6; +select * from exim_employee_n6; dfs -rmr target/tmp/ql/test/data/exports/exim_employee; -drop table exim_employee; +drop table exim_employee_n6; drop database importer; use default; diff --git a/ql/src/test/queries/clientpositive/explain_ddl.q b/ql/src/test/queries/clientpositive/explain_ddl.q index b873088832..16f72017b0 100644 --- a/ql/src/test/queries/clientpositive/explain_ddl.q +++ b/ql/src/test/queries/clientpositive/explain_ddl.q @@ -2,18 +2,18 @@ -- This test is used for testing explain for DDL/DML statements -- Create some views and tabels -CREATE VIEW V1 AS SELECT key, value from src; -select count(*) from V1 where key > 0; +CREATE VIEW V1_n0 AS SELECT key, value from src; +select count(*) from V1_n0 where key > 0; CREATE TABLE M1 AS SELECT key, value from src; select count(*) from M1 where key > 0; EXPLAIN CREATE TABLE M1 AS select * from src; EXPLAIN CREATE TABLE M1 AS select * from M1; -EXPLAIN CREATE TABLE M1 AS select * from V1; +EXPLAIN CREATE TABLE M1 AS select * from V1_n0; -EXPLAIN CREATE TABLE V1 AS select * from M1; -EXPLAIN CREATE VIEW V1 AS select * from M1; +EXPLAIN CREATE TABLE V1_n0 AS select * from M1; +EXPLAIN CREATE VIEW V1_n0 AS select * from M1; EXPLAIN CREATE TABLE M1 LIKE src; EXPLAIN CREATE TABLE M1 LIKE M1; diff --git a/ql/src/test/queries/clientpositive/explain_dependency.q b/ql/src/test/queries/clientpositive/explain_dependency.q index df3cb7a746..33c31eb25d 100644 --- a/ql/src/test/queries/clientpositive/explain_dependency.q +++ b/ql/src/test/queries/clientpositive/explain_dependency.q @@ -5,14 +5,14 @@ set hive.mapred.mode=nonstrict; -- This test is used for testing EXPLAIN DEPENDENCY command -- Create some views -CREATE VIEW V1 AS SELECT key, value from src; -CREATE VIEW V2 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL; +CREATE VIEW V1_n6 AS SELECT key, value from src; +CREATE VIEW V2_n1 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL; CREATE VIEW V3 AS - SELECT src1.key, src2.value FROM V2 src1 + SELECT src1.key, src2.value FROM V2_n1 src1 JOIN src src2 ON src1.key = src2.key WHERE src1.ds IS NOT NULL; CREATE VIEW V4 AS SELECT src1.key, src2.value as value1, src3.value as value2 - FROM V1 src1 JOIN V2 src2 on src1.key = src2.key JOIN src src3 ON src2.key = src3.key; + FROM V1_n6 src1 JOIN V2_n1 src2 on src1.key = src2.key JOIN src src3 ON src2.key = src3.key; -- Simple select queries, union queries and join queries EXPLAIN DEPENDENCY @@ -27,8 +27,8 @@ EXPLAIN DEPENDENCY SELECT S1.key, S2.value FROM src S1 JOIN srcpart S2 ON S1.key = S2.key WHERE ds IS NOT NULL; -- With views -EXPLAIN DEPENDENCY SELECT * FROM V1; -EXPLAIN DEPENDENCY SELECT * FROM V2; +EXPLAIN DEPENDENCY SELECT * FROM V1_n6; +EXPLAIN DEPENDENCY SELECT * FROM V2_n1; EXPLAIN DEPENDENCY SELECT * FROM V3; EXPLAIN DEPENDENCY SELECT * FROM V4; diff --git a/ql/src/test/queries/clientpositive/explain_logical.q b/ql/src/test/queries/clientpositive/explain_logical.q index 261b50a19d..96b867238d 100644 --- a/ql/src/test/queries/clientpositive/explain_logical.q +++ b/ql/src/test/queries/clientpositive/explain_logical.q @@ -5,14 +5,14 @@ set hive.mapred.mode=nonstrict; -- This test is used for testing EXPLAIN LOGICAL command -- Create some views -CREATE VIEW V1 AS SELECT key, value from src; -CREATE VIEW V2 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL; -CREATE VIEW V3 AS - SELECT src1.key, src2.value FROM V2 src1 +CREATE VIEW V1_n8 AS SELECT key, value from src; +CREATE VIEW V2_n3 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL; +CREATE VIEW V3_n1 AS + SELECT src1.key, src2.value FROM V2_n3 src1 JOIN src src2 ON src1.key = src2.key WHERE src1.ds IS NOT NULL; -CREATE VIEW V4 AS +CREATE VIEW V4_n1 AS SELECT src1.key, src2.value as value1, src3.value as value2 - FROM V1 src1 JOIN V2 src2 on src1.key = src2.key JOIN src src3 ON src2.key = src3.key; + FROM V1_n8 src1 JOIN V2_n3 src2 on src1.key = src2.key JOIN src src3 ON src2.key = src3.key; -- Simple select queries, union queries and join queries EXPLAIN LOGICAL @@ -27,14 +27,14 @@ EXPLAIN LOGICAL SELECT S1.key, S2.value FROM src S1 JOIN srcpart S2 ON S1.key = S2.key WHERE ds IS NOT NULL; -- With views -EXPLAIN LOGICAL SELECT * FROM V1; -EXPLAIN LOGICAL SELECT * FROM V2; -EXPLAIN LOGICAL SELECT * FROM V3; -EXPLAIN LOGICAL SELECT * FROM V4; +EXPLAIN LOGICAL SELECT * FROM V1_n8; +EXPLAIN LOGICAL SELECT * FROM V2_n3; +EXPLAIN LOGICAL SELECT * FROM V3_n1; +EXPLAIN LOGICAL SELECT * FROM V4_n1; -- The table should show up in the explain logical even if none -- of the partitions are selected. -CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10'; -EXPLAIN LOGICAL SELECT * FROM V5; +CREATE VIEW V5_n0 as SELECT * FROM srcpart where ds = '10'; +EXPLAIN LOGICAL SELECT * FROM V5_n0; EXPLAIN LOGICAL SELECT s1.key, s1.cnt, s2.value FROM (SELECT key, count(value) as cnt FROM src GROUP BY key) s1 JOIN src s2 ON (s1.key = s2.key) ORDER BY s1.key; diff --git a/ql/src/test/queries/clientpositive/explain_outputs.q b/ql/src/test/queries/clientpositive/explain_outputs.q index d53b66edf8..d42847db35 100644 --- a/ql/src/test/queries/clientpositive/explain_outputs.q +++ b/ql/src/test/queries/clientpositive/explain_outputs.q @@ -1,15 +1,15 @@ -create table t1 (id int); -create table t2 (id int); +create table t1_n22 (id int); +create table t2_n14 (id int); -insert into t1 values (1),(10); -insert into t2 values (1),(2),(3),(4),(5); +insert into t1_n22 values (1),(10); +insert into t2_n14 values (1),(2),(3),(4),(5); explain -select sum(t1.id) from t1 join t2 on (t1.id=t2.id); +select sum(t1_n22.id) from t1_n22 join t2_n14 on (t1_n22.id=t2_n14.id); explain analyze -select sum(t1.id) from t1 join t2 on (t1.id=t2.id); +select sum(t1_n22.id) from t1_n22 join t2_n14 on (t1_n22.id=t2_n14.id); explain reoptimization -select sum(t1.id) from t1 join t2 on (t1.id=t2.id); +select sum(t1_n22.id) from t1_n22 join t2_n14 on (t1_n22.id=t2_n14.id); diff --git a/ql/src/test/queries/clientpositive/explain_rearrange.q b/ql/src/test/queries/clientpositive/explain_rearrange.q index 51644c166d..5c1cc094cc 100644 --- a/ql/src/test/queries/clientpositive/explain_rearrange.q +++ b/ql/src/test/queries/clientpositive/explain_rearrange.q @@ -2,8 +2,8 @@ set hive.mapred.mode=nonstrict; -- query from auto_sortmerge_join_9.q -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl1_n9(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl2_n8(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; set hive.auto.convert.join=true; set hive.optimize.bucketmapjoin = true; @@ -20,14 +20,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key @@ -42,14 +42,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key @@ -64,14 +64,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key @@ -86,14 +86,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key diff --git a/ql/src/test/queries/clientpositive/explainanalyze_1.q b/ql/src/test/queries/clientpositive/explainanalyze_1.q index 9a2b9c5a4b..e16cec2d01 100644 --- a/ql/src/test/queries/clientpositive/explainanalyze_1.q +++ b/ql/src/test/queries/clientpositive/explainanalyze_1.q @@ -1,49 +1,49 @@ ---! qt:dataset:src +--! qt_n28:dataset_n28:src SET hive.vectorized.execution.enabled=false; -set hive.map.aggr=false; -set hive.mapred.mode=nonstrict; +set_n28 hive.map.aggr=false; +set_n28 hive.mapred.mode=nonstrict_n28; -explain analyze select * from src a union all select * from src b limit 10; +explain analyze select_n28 * from src a union all select_n28 * from src b limit_n28 10; -explain analyze select key from src; +explain analyze select_n28 key from src; -explain analyze create table t as select key from src; +explain analyze create table t_n28 as select_n28 key from src; -create table t as select key from src; +create table t_n28 as select_n28 key from src; -explain analyze insert overwrite table t select key from src; +explain analyze insert_n28 overwrite table t_n28 select_n28 key from src; -explain analyze select key from src limit 10; +explain analyze select_n28 key from src limit_n28 10; -explain analyze select key from src where value < 10; +explain analyze select_n28 key from src where value < 10; -explain analyze select key from src where key < 10; -select count(*) from (select key from src where key < 10)subq; +explain analyze select_n28 key from src where key < 10; +select_n28 count_n28(*) from (select_n28 key from src where key < 10)subq; -explain analyze select key, count(key) from src group by key; -select count(*) from (select key, count(key) from src group by key)subq; +explain analyze select_n28 key, count_n28(key) from src group by key; +select_n28 count_n28(*) from (select_n28 key, count_n28(key) from src group by key)subq; -explain analyze select count(*) from src a join src b on a.key = b.value where a.key > 0; +explain analyze select_n28 count_n28(*) from src a join src b on a.key = b.value where a.key > 0; -explain analyze select count(*) from src a join src b on a.key = b.key where a.key > 0; -select count(*) from src a join src b on a.key = b.key where a.key > 0; +explain analyze select_n28 count_n28(*) from src a join src b on a.key = b.key where a.key > 0; +select_n28 count_n28(*) from src a join src b on a.key = b.key where a.key > 0; -explain analyze select * from src a union all select * from src b; -select count(*) from (select * from src a union all select * from src b)subq; +explain analyze select_n28 * from src a union all select_n28 * from src b; +select_n28 count_n28(*) from (select_n28 * from src a union all select_n28 * from src b)subq; -set hive.auto.convert.join=true; -set hive.auto.convert.join.noconditionaltask=true; -set hive.auto.convert.join.noconditionaltask.size=10000; +set_n28 hive.auto.convert_n28.join=true; +set_n28 hive.auto.convert_n28.join.noconditionaltask=true; +set_n28 hive.auto.convert_n28.join.noconditionaltask.size=10000; EXPLAIN analyze SELECT x.key, y.value FROM src x JOIN src y ON (x.key = y.key); -set hive.entity.capture.transform=true; +set_n28 hive.entity.capture.transform=true; explain analyze SELECT -TRANSFORM(a.key, a.value) USING 'cat' AS (tkey, tvalue) +TRANSFORM(a.key, a.value) USING 'cat_n28' AS (tkey, tvalue) FROM src a join src b on a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/explainanalyze_2.q b/ql/src/test/queries/clientpositive/explainanalyze_2.q index 10b605f264..b54be2f9c9 100644 --- a/ql/src/test/queries/clientpositive/explainanalyze_2.q +++ b/ql/src/test/queries/clientpositive/explainanalyze_2.q @@ -1,6 +1,6 @@ --! qt:dataset:srcpart --! qt:dataset:src1 ---! qt:dataset:src +--! qt:dataset:src_n3 SET hive.vectorized.execution.enabled=false; set hive.map.aggr=false; @@ -9,25 +9,25 @@ set hive.explain.user=true; explain analyze SELECT x.key, z.value, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union select * from src)z ON (x.value = z.value) +FROM src1 x JOIN src_n3 y ON (x.key = y.key) +JOIN (select * from src1 union select * from src_n3)z ON (x.value = z.value) union SELECT x.key, z.value, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union select * from src)z ON (x.value = z.value); +FROM src1 x JOIN src_n3 y ON (x.key = y.key) +JOIN (select * from src1 union select * from src_n3)z ON (x.value = z.value); explain analyze SELECT x.key, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union select * from src)z ON (x.value = z.value) +FROM src1 x JOIN src_n3 y ON (x.key = y.key) +JOIN (select * from src1 union select * from src_n3)z ON (x.value = z.value) union SELECT x.key, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select key, value from src1 union select key, value from src union select key, value from src)z ON (x.value = z.value) +FROM src1 x JOIN src_n3 y ON (x.key = y.key) +JOIN (select key, value from src1 union select key, value from src_n3 union select key, value from src_n3)z ON (x.value = z.value) union SELECT x.key, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select key, value from src1 union select key, value from src union select key, value from src union select key, value from src)z ON (x.value = z.value); +FROM src1 x JOIN src_n3 y ON (x.key = y.key) +JOIN (select key, value from src1 union select key, value from src_n3 union select key, value from src_n3 union select key, value from src_n3)z ON (x.value = z.value); set hive.auto.convert.join=true; @@ -35,31 +35,31 @@ set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000; set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ; -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n11(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part_n7 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part_n11 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data_n14 local inpath '../../data_n14/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n11 partition(ds='2008-04-08'); +load data_n14 local inpath '../../data_n14/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n11 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data_n14 local inpath '../../data_n14/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n11 partition(ds='2008-04-08'); +load data_n14 local inpath '../../data_n14/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n11 partition(ds='2008-04-08'); +load data_n14 local inpath '../../data_n14/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n11 partition(ds='2008-04-08'); +load data_n14 local inpath '../../data_n14/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n11 partition(ds='2008-04-08'); set hive.optimize.bucketingsorting=false; -insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +insert overwrite table tab_part_n7 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n11; -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n6(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab_n6 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n11; -CREATE TABLE tab2(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab2 partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab2_n3(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab2_n3 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n11; set hive.convert.join.bucket.mapjoin.tez = false; set hive.auto.convert.sortmerge.join = true; @@ -67,101 +67,101 @@ set hive.auto.convert.sortmerge.join = true; set hive.auto.convert.join.noconditionaltask.size=500; explain analyze -select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key; +select s1.key as key, s1.value as value from tab_n6 s1 join tab_n6 s3 on s1.key=s3.key; explain analyze -select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key join tab s2 on s1.value=s2.value; +select s1.key as key, s1.value as value from tab_n6 s1 join tab_n6 s3 on s1.key=s3.key join tab_n6 s2 on s1.value=s2.value; explain analyze -select s1.key as key, s1.value as value from tab s1 join tab2 s3 on s1.key=s3.key; +select s1.key as key, s1.value as value from tab_n6 s1 join tab2_n3 s3 on s1.key=s3.key; explain analyze -select s1.key as key, s1.value as value from tab s1 join tab2 s3 on s1.key=s3.key join tab2 s2 on s1.value=s2.value; +select s1.key as key, s1.value as value from tab_n6 s1 join tab2_n3 s3 on s1.key=s3.key join tab2_n3 s2 on s1.value=s2.value; explain analyze -select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +select count(*) from (select s1.key as key, s1.value as value from tab_n6 s1 join tab_n6 s3 on s1.key=s3.key UNION ALL -select s2.key as key, s2.value as value from tab s2 -) a join tab_part b on (a.key = b.key); +select s2.key as key, s2.value as value from tab_n6 s2 +) a_n14 join tab_part_n7 b_n10 on (a_n14.key = b_n10.key); explain analyze -select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key join tab s2 on s1.value=s2.value +select count(*) from (select s1.key as key, s1.value as value from tab_n6 s1 join tab_n6 s3 on s1.key=s3.key join tab_n6 s2 on s1.value=s2.value UNION ALL -select s2.key as key, s2.value as value from tab s2 -) a join tab_part b on (a.key = b.key); +select s2.key as key, s2.value as value from tab_n6 s2 +) a_n14 join tab_part_n7 b_n10 on (a_n14.key = b_n10.key); -CREATE TABLE a(key STRING, value STRING) STORED AS TEXTFILE; -CREATE TABLE b(key STRING, value STRING) STORED AS TEXTFILE; -CREATE TABLE c(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE a_n14(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE b_n10(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE c_n3(key STRING, value STRING) STORED AS TEXTFILE; explain analyze from ( SELECT x.key, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union all select * from src)z ON (x.value = z.value) +FROM src1 x JOIN src_n3 y ON (x.key = y.key) +JOIN (select * from src1 union all select * from src_n3)z ON (x.value = z.value) union all SELECT x.key, y.value -FROM src x JOIN src y ON (x.key = y.key) -JOIN (select key, value from src1 union all select key, value from src union all select key, value from src)z ON (x.value = z.value) +FROM src_n3 x JOIN src_n3 y ON (x.key = y.key) +JOIN (select key, value from src1 union all select key, value from src_n3 union all select key, value from src_n3)z ON (x.value = z.value) union all SELECT x.key, y.value FROM src1 x JOIN src1 y ON (x.key = y.key) -JOIN (select key, value from src1 union all select key, value from src union all select key, value from src union all select key, value from src)z ON (x.value = z.value) +JOIN (select key, value from src1 union all select key, value from src_n3 union all select key, value from src_n3 union all select key, value from src_n3)z ON (x.value = z.value) ) tmp -INSERT OVERWRITE TABLE a SELECT tmp.key, tmp.value -INSERT OVERWRITE TABLE b SELECT tmp.key, tmp.value -INSERT OVERWRITE TABLE c SELECT tmp.key, tmp.value; +INSERT OVERWRITE TABLE a_n14 SELECT tmp.key, tmp.value +INSERT OVERWRITE TABLE b_n10 SELECT tmp.key, tmp.value +INSERT OVERWRITE TABLE c_n3 SELECT tmp.key, tmp.value; explain analyze FROM ( -SELECT x.key as key, y.value as value from src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union select * from src)z ON (x.value = z.value) +SELECT x.key as key, y.value as value from src1 x JOIN src_n3 y ON (x.key = y.key) +JOIN (select * from src1 union select * from src_n3)z ON (x.value = z.value) union -SELECT x.key as key, y.value as value from src x JOIN src y ON (x.key = y.key) -JOIN (select key, value from src1 union select key, value from src union select key, value from src)z ON (x.value = z.value) +SELECT x.key as key, y.value as value from src_n3 x JOIN src_n3 y ON (x.key = y.key) +JOIN (select key, value from src1 union select key, value from src_n3 union select key, value from src_n3)z ON (x.value = z.value) union SELECT x.key as key, y.value as value from src1 x JOIN src1 y ON (x.key = y.key) -JOIN (select key, value from src1 union select key, value from src union select key, value from src union select key, value from src)z ON (x.value = z.value) +JOIN (select key, value from src1 union select key, value from src_n3 union select key, value from src_n3 union select key, value from src_n3)z ON (x.value = z.value) ) tmp -INSERT OVERWRITE TABLE a SELECT tmp.key, tmp.value -INSERT OVERWRITE TABLE b SELECT tmp.key, tmp.value -INSERT OVERWRITE TABLE c SELECT tmp.key, tmp.value; +INSERT OVERWRITE TABLE a_n14 SELECT tmp.key, tmp.value +INSERT OVERWRITE TABLE b_n10 SELECT tmp.key, tmp.value +INSERT OVERWRITE TABLE c_n3 SELECT tmp.key, tmp.value; -CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n105(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n29(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; explain analyze -FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 +FROM (select 'tst1' as key, cast(count(1) as string) as value from src_n3 s1 UNION DISTINCT - select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; + select s2.key as key, s2.value as value from src_n3 s2) unionsrc_n3 +INSERT OVERWRITE TABLE DEST1_n105 SELECT unionsrc_n3.key, COUNT(DISTINCT SUBSTR(unionsrc_n3.value,5)) GROUP BY unionsrc_n3.key +INSERT OVERWRITE TABLE DEST2_n29 SELECT unionsrc_n3.key, unionsrc_n3.value, COUNT(DISTINCT SUBSTR(unionsrc_n3.value,5)) GROUP BY unionsrc_n3.key, unionsrc_n3.value; -explain analyze FROM UNIQUEJOIN PRESERVE src a (a.key), PRESERVE src1 b (b.key), PRESERVE srcpart c (c.key) SELECT a.key, b.key, c.key; +explain analyze FROM UNIQUEJOIN PRESERVE src_n3 a_n14 (a_n14.key), PRESERVE src1 b_n10 (b_n10.key), PRESERVE srcpart c_n3 (c_n3.key) SELECT a_n14.key, b_n10.key, c_n3.key; explain analyze FROM ( select key, value from ( - select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as value2 from src s1 + select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as value2 from src_n3 s1 UNION all - select s2.key as key, s2.value as value, 'tst1' as value2 from src s2) unionsub + select s2.key as key, s2.value as value, 'tst1' as value2 from src_n3 s2) unionsub_n10 UNION all - select key, value from src s0 - ) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) -GROUP BY unionsrc.key, unionsrc.value; + select key, value from src_n3 s0 + ) unionsrc_n3 +INSERT OVERWRITE TABLE DEST1_n105 SELECT unionsrc_n3.key, COUNT(DISTINCT SUBSTR(unionsrc_n3.value,5)) GROUP BY unionsrc_n3.key +INSERT OVERWRITE TABLE DEST2_n29 SELECT unionsrc_n3.key, unionsrc_n3.value, COUNT(DISTINCT SUBSTR(unionsrc_n3.value,5)) +GROUP BY unionsrc_n3.key, unionsrc_n3.value; explain analyze FROM ( - select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as value2 from src s1 + select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as value2 from src_n3 s1 UNION all - select s2.key as key, s2.value as value, 'tst1' as value2 from src s2 - ) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) -GROUP BY unionsrc.key, unionsrc.value; + select s2.key as key, s2.value as value, 'tst1' as value2 from src_n3 s2 + ) unionsrc_n3 +INSERT OVERWRITE TABLE DEST1_n105 SELECT unionsrc_n3.key, COUNT(DISTINCT SUBSTR(unionsrc_n3.value,5)) GROUP BY unionsrc_n3.key +INSERT OVERWRITE TABLE DEST2_n29 SELECT unionsrc_n3.key, unionsrc_n3.value, COUNT(DISTINCT SUBSTR(unionsrc_n3.value,5)) +GROUP BY unionsrc_n3.key, unionsrc_n3.value; diff --git a/ql/src/test/queries/clientpositive/explainanalyze_3.q b/ql/src/test/queries/clientpositive/explainanalyze_3.q index d5583db2bb..3d5b3a829d 100644 --- a/ql/src/test/queries/clientpositive/explainanalyze_3.q +++ b/ql/src/test/queries/clientpositive/explainanalyze_3.q @@ -34,13 +34,13 @@ explain analyze use newDB; use newDB; -create table tab (name string); +create table tab_n2 (name string); -explain analyze alter table tab rename to newName; +explain analyze alter table tab_n2 rename to newName; -explain analyze drop table tab; +explain analyze drop table tab_n2; -drop table tab; +drop table tab_n2; explain analyze use default; @@ -65,24 +65,24 @@ explain analyze SELECT SIGMOID(2) FROM src LIMIT 1; explain analyze DROP TEMPORARY MACRO SIGMOID; DROP TEMPORARY MACRO SIGMOID; -explain analyze create table src_autho_test as select * from src; -create table src_autho_test as select * from src; +explain analyze create table src_autho_test_n4 as select * from src; +create table src_autho_test_n4 as select * from src; set hive.security.authorization.enabled=true; -explain analyze grant select on table src_autho_test to user hive_test_user; -grant select on table src_autho_test to user hive_test_user; +explain analyze grant select on table src_autho_test_n4 to user hive_test_user; +grant select on table src_autho_test_n4 to user hive_test_user; -explain analyze show grant user hive_test_user on table src_autho_test; -explain analyze show grant user hive_test_user on table src_autho_test(key); +explain analyze show grant user hive_test_user on table src_autho_test_n4; +explain analyze show grant user hive_test_user on table src_autho_test_n4(key); -select key from src_autho_test order by key limit 20; +select key from src_autho_test_n4 order by key limit 20; -explain analyze revoke select on table src_autho_test from user hive_test_user; +explain analyze revoke select on table src_autho_test_n4 from user hive_test_user; -explain analyze grant select(key) on table src_autho_test to user hive_test_user; +explain analyze grant select(key) on table src_autho_test_n4 to user hive_test_user; -explain analyze revoke select(key) on table src_autho_test from user hive_test_user; +explain analyze revoke select(key) on table src_autho_test_n4 from user hive_test_user; explain analyze create role sRc_roLE; @@ -100,19 +100,19 @@ explain analyze drop role sRc_roLE; drop role sRc_roLE; set hive.security.authorization.enabled=false; -drop table src_autho_test; +drop table src_autho_test_n4; -explain analyze drop view v; +explain analyze drop view v_n5; -explain analyze create view v as with cte as (select * from src order by key limit 5) +explain analyze create view v_n5 as with cte as (select * from src order by key limit 5) select * from cte; explain analyze with cte as (select * from src order by key limit 5) select * from cte; -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; +create table orc_merge5_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; -load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5; +load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n1; SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; SET mapred.min.split.size=1000; @@ -131,40 +131,40 @@ set hive.merge.tezfiles=true; set hive.merge.mapfiles=true; set hive.merge.mapredfiles=true; -explain analyze insert overwrite table orc_merge5 select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +explain analyze insert overwrite table orc_merge5_n1 select userid,string1,subtype,decimal1,ts from orc_merge5_n1 where userid<=13; -drop table orc_merge5; +drop table orc_merge5_n1; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000; -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n4(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part_n3 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part_n5 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n4 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n4 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n5 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n5 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n5 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n5 partition(ds='2008-04-08'); set hive.optimize.bucketingsorting=false; -insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +insert overwrite table tab_part_n3 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n5; -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n2(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab_n2 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n4; set hive.convert.join.bucket.mapjoin.tez = true; explain analyze select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key; +from tab_n2 a join tab_part_n3 b on a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/explainanalyze_5.q b/ql/src/test/queries/clientpositive/explainanalyze_5.q index 5f2c840097..754d6e2834 100644 --- a/ql/src/test/queries/clientpositive/explainanalyze_5.q +++ b/ql/src/test/queries/clientpositive/explainanalyze_5.q @@ -6,25 +6,25 @@ set hive.map.aggr=false; set hive.stats.column.autogather=true; -drop table src_stats; +drop table src_stats_n0; -create table src_stats as select * from src; +create table src_stats_n0 as select * from src; -explain analyze analyze table src_stats compute statistics; +explain analyze analyze table src_stats_n0 compute statistics; -explain analyze analyze table src_stats compute statistics for columns; +explain analyze analyze table src_stats_n0 compute statistics for columns; -drop table src_multi2; +drop table src_multi2_n7; -create table src_multi2 like src; +create table src_multi2_n7 like src; -explain analyze insert overwrite table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key; +explain analyze insert overwrite table src_multi2_n7 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key; select count(*) from (select * from src union select * from src1)subq; -insert overwrite table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key; +insert overwrite table src_multi2_n7 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key; -describe formatted src_multi2; +describe formatted src_multi2_n7; set hive.mapred.mode=nonstrict; @@ -35,27 +35,27 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -- SORT_QUERY_RESULTS -create table acid_uami(i int, +create table acid_uami_n2(i int, de decimal(5,2), vc varchar(128)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -insert into table acid_uami values +insert into table acid_uami_n2 values (1, 109.23, 'mary had a little lamb'), (6553, 923.19, 'its fleece was white as snow'); -insert into table acid_uami values +insert into table acid_uami_n2 values (10, 119.23, 'and everywhere that mary went'), (65530, 823.19, 'the lamb was sure to go'); -select * from acid_uami order by de; +select * from acid_uami_n2 order by de; -explain analyze update acid_uami set de = 3.14 where de = 109.23 or de = 119.23; +explain analyze update acid_uami_n2 set de = 3.14 where de = 109.23 or de = 119.23; -select * from acid_uami order by de; +select * from acid_uami_n2 order by de; -update acid_uami set de = 3.14 where de = 109.23 or de = 119.23; +update acid_uami_n2 set de = 3.14 where de = 109.23 or de = 119.23; -select * from acid_uami order by de; +select * from acid_uami_n2 order by de; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; @@ -64,7 +64,7 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/delete_orig_table; dfs -copyFromLocal ../../data/files/alltypesorc ${system:test.tmp.dir}/delete_orig_table/00000_0; -create table acid_dot( +create table acid_dot_n0( ctinyint TINYINT, csmallint SMALLINT, cint INT, @@ -78,14 +78,14 @@ create table acid_dot( cboolean1 BOOLEAN, cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc location '${system:test.tmp.dir}/delete_orig_table' TBLPROPERTIES ('transactional'='true'); -select count(*) from acid_dot; +select count(*) from acid_dot_n0; -explain analyze delete from acid_dot where cint < -1070551679; +explain analyze delete from acid_dot_n0 where cint < -1070551679; -select count(*) from acid_dot; +select count(*) from acid_dot_n0; -delete from acid_dot where cint < -1070551679; +delete from acid_dot_n0 where cint < -1070551679; -select count(*) from acid_dot; +select count(*) from acid_dot_n0; dfs -rmr ${system:test.tmp.dir}/delete_orig_table; diff --git a/ql/src/test/queries/clientpositive/explainuser_1.q b/ql/src/test/queries/clientpositive/explainuser_1.q index 0772fb9984..23bdb79196 100644 --- a/ql/src/test/queries/clientpositive/explainuser_1.q +++ b/ql/src/test/queries/clientpositive/explainuser_1.q @@ -13,26 +13,26 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; set hive.explain.user=true; -explain create table src_orc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as orc; -create table src_orc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as orc; +explain create table src_orc_merge_test_part_n1(key int, value string) partitioned by (ds string, ts string) stored as orc; +create table src_orc_merge_test_part_n1(key int, value string) partitioned by (ds string, ts string) stored as orc; -alter table src_orc_merge_test_part add partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); -desc extended src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); +alter table src_orc_merge_test_part_n1 add partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); +desc extended src_orc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); -explain insert overwrite table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src; -insert overwrite table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src; -explain insert into table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src limit 100; +explain insert overwrite table src_orc_merge_test_part_n1 partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src; +insert overwrite table src_orc_merge_test_part_n1 partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src; +explain insert into table src_orc_merge_test_part_n1 partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src limit 100; -explain select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'; -explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'; +explain select count(1) from src_orc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31'; +explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31'; -alter table src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate; +alter table src_orc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate; -explain select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'; -explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'; +explain select count(1) from src_orc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31'; +explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31'; -drop table src_orc_merge_test_part; +drop table src_orc_merge_test_part_n1; set hive.auto.convert.join=true; @@ -128,7 +128,7 @@ having not exists ) ; -create view cv1 as +create view cv1_n5 as select * from src_cbo b where exists @@ -137,7 +137,7 @@ where exists where b.value = a.value and a.key = b.key and a.value > 'val_9') ; -explain select * from cv1; +explain select * from cv1_n5; explain select * from (select * @@ -250,59 +250,59 @@ FROM (select x.key AS key, count(1) AS cnt FROM src1 x LEFT SEMI JOIN src y ON (x.key = y.key) GROUP BY x.key) tmp; -explain create table abcd (a int, b int, c int, d int); -create table abcd (a int, b int, c int, d int); -LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd; +explain create table abcd_n1 (a int, b int, c int, d int); +create table abcd_n1 (a int, b int, c int, d int); +LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd_n1; set hive.map.aggr=true; -explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a; +explain select a, count(distinct b), count(distinct c), sum(d) from abcd_n1 group by a; set hive.map.aggr=false; -explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a; +explain select a, count(distinct b), count(distinct c), sum(d) from abcd_n1 group by a; -explain create table src_rc_merge_test(key int, value string) stored as rcfile; -create table src_rc_merge_test(key int, value string) stored as rcfile; +explain create table src_rc_merge_test_n0(key int, value string) stored as rcfile; +create table src_rc_merge_test_n0(key int, value string) stored as rcfile; -load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test; +load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_n0; set hive.exec.compress.output = true; -explain create table tgt_rc_merge_test(key int, value string) stored as rcfile; -create table tgt_rc_merge_test(key int, value string) stored as rcfile; -insert into table tgt_rc_merge_test select * from src_rc_merge_test; +explain create table tgt_rc_merge_test_n0(key int, value string) stored as rcfile; +create table tgt_rc_merge_test_n0(key int, value string) stored as rcfile; +insert into table tgt_rc_merge_test_n0 select * from src_rc_merge_test_n0; -show table extended like `tgt_rc_merge_test`; +show table extended like `tgt_rc_merge_test_n0`; -explain select count(1) from tgt_rc_merge_test; -explain select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test; +explain select count(1) from tgt_rc_merge_test_n0; +explain select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test_n0; -alter table tgt_rc_merge_test concatenate; +alter table tgt_rc_merge_test_n0 concatenate; -show table extended like `tgt_rc_merge_test`; +show table extended like `tgt_rc_merge_test_n0`; -explain select count(1) from tgt_rc_merge_test; -explain select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test; +explain select count(1) from tgt_rc_merge_test_n0; +explain select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test_n0; -drop table src_rc_merge_test; -drop table tgt_rc_merge_test; +drop table src_rc_merge_test_n0; +drop table tgt_rc_merge_test_n0; explain select src.key from src cross join src src2; -explain create table nzhang_Tmp(a int, b string); -create table nzhang_Tmp(a int, b string); +explain create table nzhang_Tmp_n1(a int, b string); +create table nzhang_Tmp_n1(a int, b string); -explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10; -create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10; +explain create table nzhang_CTAS1_n1 as select key k, value from src sort by k, value limit 10; +create table nzhang_CTAS1_n1 as select key k, value from src sort by k, value limit 10; -explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10; +explain create table nzhang_ctas3_n1 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10; -create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10; +create table nzhang_ctas3_n1 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10; -explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2; +explain create table if not exists nzhang_ctas3_n1 as select key, value from src sort by key, value limit 2; -create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2; +create table if not exists nzhang_ctas3_n1 as select key, value from src sort by key, value limit 2; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; @@ -324,70 +324,70 @@ select src1.key as k1, src1.value as v1, SORT BY k1, v1, k2, v2; -CREATE TABLE myinput1(key int, value int); -LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1; +CREATE TABLE myinput1_n7(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1_n7; -explain select * from myinput1 a join myinput1 b on a.key<=>b.value; +explain select * from myinput1_n7 a join myinput1_n7 b on a.key<=>b.value; -explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key; +explain select * from myinput1_n7 a join myinput1_n7 b on a.key<=>b.value join myinput1_n7 c on a.key=c.key; -explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key; +explain select * from myinput1_n7 a join myinput1_n7 b on a.key<=>b.value join myinput1_n7 c on a.key<=>c.key; -explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value; +explain select * from myinput1_n7 a join myinput1_n7 b on a.key<=>b.value AND a.value=b.key join myinput1_n7 c on a.key<=>c.key AND a.value=c.value; -explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value; +explain select * from myinput1_n7 a join myinput1_n7 b on a.key<=>b.value AND a.value<=>b.key join myinput1_n7 c on a.key<=>c.key AND a.value<=>c.value; -explain select * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value; -explain select * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key<=>b.value; -explain select * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key<=>b.value; +explain select * FROM myinput1_n7 a LEFT OUTER JOIN myinput1_n7 b ON a.key<=>b.value; +explain select * FROM myinput1_n7 a RIGHT OUTER JOIN myinput1_n7 b ON a.key<=>b.value; +explain select * FROM myinput1_n7 a FULL OUTER JOIN myinput1_n7 b ON a.key<=>b.value; -explain select /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value; +explain select /*+ MAPJOIN(b) */ * FROM myinput1_n7 a JOIN myinput1_n7 b ON a.key<=>b.value; -CREATE TABLE smb_input(key int, value int); -LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input; -LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input; +CREATE TABLE smb_input_n0(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input_n0; +LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input_n0; ; -CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; +CREATE TABLE smb_input1_n2(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE smb_input2_n2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; -from smb_input -insert overwrite table smb_input1 select * -insert overwrite table smb_input2 select *; +from smb_input_n0 +insert overwrite table smb_input1_n2 select * +insert overwrite table smb_input2_n2 select *; SET hive.optimize.bucketmapjoin = true; SET hive.optimize.bucketmapjoin.sortedmerge = true; SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -analyze table smb_input1 compute statistics; +analyze table smb_input1_n2 compute statistics; -explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key; -explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key AND a.value <=> b.value; -explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input1 b ON a.key <=> b.key; -explain select /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key; -explain select /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input1 b ON a.key <=> b.key; +explain select /*+ MAPJOIN(a) */ * FROM smb_input1_n2 a JOIN smb_input1_n2 b ON a.key <=> b.key; +explain select /*+ MAPJOIN(a) */ * FROM smb_input1_n2 a JOIN smb_input1_n2 b ON a.key <=> b.key AND a.value <=> b.value; +explain select /*+ MAPJOIN(a) */ * FROM smb_input1_n2 a RIGHT OUTER JOIN smb_input1_n2 b ON a.key <=> b.key; +explain select /*+ MAPJOIN(b) */ * FROM smb_input1_n2 a JOIN smb_input1_n2 b ON a.key <=> b.key; +explain select /*+ MAPJOIN(b) */ * FROM smb_input1_n2 a LEFT OUTER JOIN smb_input1_n2 b ON a.key <=> b.key; -drop table sales; -drop table things; +drop table sales_n0; +drop table things_n0; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -CREATE TABLE sales (name STRING, id INT) +CREATE TABLE sales_n0 (name STRING, id INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'; -CREATE TABLE things (id INT, name STRING) partitioned by (ds string) +CREATE TABLE things_n0 (id INT, name STRING) partitioned by (ds string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'; -load data local inpath '../../data/files/sales.txt' INTO TABLE sales; -load data local inpath '../../data/files/things.txt' INTO TABLE things partition(ds='2011-10-23'); -load data local inpath '../../data/files/things2.txt' INTO TABLE things partition(ds='2011-10-24'); +load data local inpath '../../data/files/sales.txt' INTO TABLE sales_n0; +load data local inpath '../../data/files/things.txt' INTO TABLE things_n0 partition(ds='2011-10-23'); +load data local inpath '../../data/files/things2.txt' INTO TABLE things_n0 partition(ds='2011-10-24'); -explain select name,id FROM sales LEFT SEMI JOIN things ON (sales.id = things.id); +explain select name,id FROM sales_n0 LEFT SEMI JOIN things_n0 ON (sales_n0.id = things_n0.id); -drop table sales; -drop table things; +drop table sales_n0; +drop table things_n0; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; @@ -525,13 +525,13 @@ order by p_name ; -explain create view IF NOT EXISTS mfgr_price_view as +explain create view IF NOT EXISTS mfgr_price_view_n3 as select p_mfgr, p_brand, sum(p_retailprice) as s from part group by p_mfgr, p_brand; -CREATE TABLE part_4( +CREATE TABLE part_4_n1( p_mfgr STRING, p_name STRING, p_size INT, @@ -539,7 +539,7 @@ r INT, dr INT, s DOUBLE); -CREATE TABLE part_5( +CREATE TABLE part_5_n1( p_mfgr STRING, p_name STRING, p_size INT, @@ -553,11 +553,11 @@ explain from noop(on part partition by p_mfgr order by p_name) -INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, +INSERT OVERWRITE TABLE part_4_n1 select p_mfgr, p_name, p_size, rank() over (distribute by p_mfgr sort by p_name) as r, dense_rank() over (distribute by p_mfgr sort by p_name) as dr, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s -INSERT OVERWRITE TABLE part_5 select p_mfgr,p_name, p_size, +INSERT OVERWRITE TABLE part_5_n1 select p_mfgr,p_name, p_size, round(sum(p_size) over (distribute by p_mfgr sort by p_size range between 5 preceding and current row),1) as s2, rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as r, dense_rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as dr, @@ -622,41 +622,41 @@ explain select explode(array('a', 'b')); set hive.optimize.skewjoin = true; set hive.skewjoin.key = 2; -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n119(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n70(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T3_n26(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T4_n15(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n16(key INT, value STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n119; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n70; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n26; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4_n15; explain FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 select src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n16 select src1.key, src2.value; FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 select src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n16 select src1.key, src2.value; explain select /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key; +FROM T1_n119 a JOIN T2_n70 b ON a.key = b.key + JOIN T3_n26 c ON b.key = c.key + JOIN T4_n15 d ON c.key = d.key; explain select /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key; +FROM T1_n119 a JOIN T2_n70 b ON a.key = b.key + JOIN T3_n26 c ON b.key = c.key + JOIN T4_n15 d ON c.key = d.key; -explain FROM T1 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); -FROM T1 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +explain FROM T1_n119 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +FROM T1_n119 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); explain select * FROM @@ -666,16 +666,16 @@ JOIN ON (x.key = Y.key); -explain select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.val; +explain select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1_n119 k join T1_n119 v on k.key=v.val; -explain select sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.key; +explain select sum(hash(k.key)), sum(hash(v.val)) from T1_n119 k join T1_n119 v on k.key=v.key; -explain select count(1) from T1 a join T1 b on a.key = b.key; +explain select count(1) from T1_n119 a join T1_n119 b on a.key = b.key; -explain FROM T1 a LEFT OUTER JOIN T2 c ON c.key+1=a.key select sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +explain FROM T1_n119 a LEFT OUTER JOIN T2_n70 c ON c.key+1=a.key select sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); -explain FROM T1 a RIGHT OUTER JOIN T2 c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +explain FROM T1_n119 a RIGHT OUTER JOIN T2_n70 c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); -explain FROM T1 a FULL OUTER JOIN T2 c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +explain FROM T1_n119 a FULL OUTER JOIN T2_n70 c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); -explain select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k left outer join T1 v on k.key+1=v.key; +explain select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from T1_n119 k left outer join T1_n119 v on k.key+1=v.key; diff --git a/ql/src/test/queries/clientpositive/explainuser_2.q b/ql/src/test/queries/clientpositive/explainuser_2.q index 1b32b47f6f..2449572382 100644 --- a/ql/src/test/queries/clientpositive/explainuser_2.q +++ b/ql/src/test/queries/clientpositive/explainuser_2.q @@ -1,6 +1,6 @@ --! qt:dataset:srcpart --! qt:dataset:src1 ---! qt:dataset:src +--! qt:dataset:src_n4 set hive.strict.checks.bucketing=false; set hive.explain.user=true; @@ -8,35 +8,35 @@ set hive.metastore.aggregate.stats.cache.enabled=false; -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n25(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; -CREATE TABLE ss(k1 STRING,v1 STRING,k2 STRING,v2 STRING,k3 STRING,v3 STRING) STORED AS TEXTFILE; +CREATE TABLE ss_n1(k1 STRING,v1 STRING,k2 STRING,v2 STRING,k3 STRING,v3 STRING) STORED AS TEXTFILE; CREATE TABLE sr(k1 STRING,v1 STRING,k2 STRING,v2 STRING,k3 STRING,v3 STRING) STORED AS TEXTFILE; CREATE TABLE cs(k1 STRING,v1 STRING,k2 STRING,v2 STRING,k3 STRING,v3 STRING) STORED AS TEXTFILE; -INSERT OVERWRITE TABLE ss +INSERT OVERWRITE TABLE ss_n1 SELECT x.key,x.value,y.key,y.value,z.key,z.value FROM src1 x -JOIN src y ON (x.key = y.key) +JOIN src_n4 y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11); INSERT OVERWRITE TABLE sr SELECT x.key,x.value,y.key,y.value,z.key,z.value FROM src1 x -JOIN src y ON (x.key = y.key) +JOIN src_n4 y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=12); INSERT OVERWRITE TABLE cs SELECT x.key,x.value,y.key,y.value,z.key,z.value FROM src1 x -JOIN src y ON (x.key = y.key) +JOIN src_n4 y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08'); -ANALYZE TABLE ss COMPUTE STATISTICS; -ANALYZE TABLE ss COMPUTE STATISTICS FOR COLUMNS k1,v1,k2,v2,k3,v3; +ANALYZE TABLE ss_n1 COMPUTE STATISTICS; +ANALYZE TABLE ss_n1 COMPUTE STATISTICS FOR COLUMNS k1,v1,k2,v2,k3,v3; ANALYZE TABLE sr COMPUTE STATISTICS; ANALYZE TABLE sr COMPUTE STATISTICS FOR COLUMNS k1,v1,k2,v2,k3,v3; @@ -48,25 +48,25 @@ set hive.auto.convert.join=false; EXPLAIN SELECT x.key, z.value, y.value -FROM src1 x JOIN src y ON (x.key = y.key) +FROM src1 x JOIN src_n4 y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11); EXPLAIN select -ss.k1,sr.k2,cs.k3,count(ss.v1),count(sr.v2),count(cs.v3) +ss.k1,sr.k2,cs.k3,count(ss_n1.v1),count(sr.v2),count(cs.v3) FROM -ss,sr,cs,src d1,src d2,src d3,src1,srcpart +ss,sr,cs,src_n4 d1,src_n4 d2,src_n4 d3,src1,srcpart where - ss.k1 = d1.key + ss_n1.k1 = d1.key and sr.k1 = d2.key and cs.k1 = d3.key -and ss.k2 = sr.k2 -and ss.k3 = sr.k3 -and ss.v1 = src1.value -and ss.v2 = srcpart.value +and ss_n1.k2 = sr.k2 +and ss_n1.k3 = sr.k3 +and ss_n1.v1 = src1.value +and ss_n1.v2 = srcpart.value and sr.v2 = cs.v2 and sr.v3 = cs.v3 -and ss.v3='ssv3' +and ss_n1.v3='ssv3' and sr.v1='srv1' and src1.key = 'src1key' and srcpart.key = 'srcpartkey' @@ -81,25 +81,25 @@ limit 100; explain SELECT x.key, z.value, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union select * from src)z ON (x.value = z.value) +FROM src1 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select * from src1 union select * from src_n4)z ON (x.value = z.value) union SELECT x.key, z.value, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union select * from src)z ON (x.value = z.value); +FROM src1 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select * from src1 union select * from src_n4)z ON (x.value = z.value); explain SELECT x.key, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union select * from src)z ON (x.value = z.value) +FROM src1 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select * from src1 union select * from src_n4)z ON (x.value = z.value) union SELECT x.key, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select key, value from src1 union select key, value from src union select key, value from src)z ON (x.value = z.value) +FROM src1 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select key, value from src1 union select key, value from src_n4 union select key, value from src_n4)z ON (x.value = z.value) union SELECT x.key, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select key, value from src1 union select key, value from src union select key, value from src union select key, value from src)z ON (x.value = z.value); +FROM src1 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select key, value from src1 union select key, value from src_n4 union select key, value from src_n4 union select key, value from src_n4)z ON (x.value = z.value); set hive.auto.convert.join=true; @@ -110,25 +110,25 @@ set hive.stats.fetch.column.stats=false; EXPLAIN SELECT x.key, z.value, y.value -FROM src1 x JOIN src y ON (x.key = y.key) +FROM src1 x JOIN src_n4 y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11); EXPLAIN select -ss.k1,sr.k2,cs.k3,count(ss.v1),count(sr.v2),count(cs.v3) +ss.k1,sr.k2,cs.k3,count(ss_n1.v1),count(sr.v2),count(cs.v3) FROM -ss,sr,cs,src d1,src d2,src d3,src1,srcpart +ss,sr,cs,src_n4 d1,src_n4 d2,src_n4 d3,src1,srcpart where - ss.k1 = d1.key + ss_n1.k1 = d1.key and sr.k1 = d2.key and cs.k1 = d3.key -and ss.k2 = sr.k2 -and ss.k3 = sr.k3 -and ss.v1 = src1.value -and ss.v2 = srcpart.value +and ss_n1.k2 = sr.k2 +and ss_n1.k3 = sr.k3 +and ss_n1.v1 = src1.value +and ss_n1.v2 = srcpart.value and sr.v2 = cs.v2 and sr.v3 = cs.v3 -and ss.v3='ssv3' +and ss_n1.v3='ssv3' and sr.v1='srv1' and src1.key = 'src1key' and srcpart.key = 'srcpartkey' @@ -143,25 +143,25 @@ limit 100; explain SELECT x.key, z.value, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union select * from src)z ON (x.value = z.value) +FROM src1 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select * from src1 union select * from src_n4)z ON (x.value = z.value) union SELECT x.key, z.value, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union select * from src)z ON (x.value = z.value); +FROM src1 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select * from src1 union select * from src_n4)z ON (x.value = z.value); explain SELECT x.key, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union select * from src)z ON (x.value = z.value) +FROM src1 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select * from src1 union select * from src_n4)z ON (x.value = z.value) union SELECT x.key, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select key, value from src1 union select key, value from src union select key, value from src)z ON (x.value = z.value) +FROM src1 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select key, value from src1 union select key, value from src_n4 union select key, value from src_n4)z ON (x.value = z.value) union SELECT x.key, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select key, value from src1 union select key, value from src union select key, value from src union select key, value from src)z ON (x.value = z.value); +FROM src1 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select key, value from src1 union select key, value from src_n4 union select key, value from src_n4 union select key, value from src_n4)z ON (x.value = z.value); set hive.auto.convert.join=true; @@ -169,31 +169,31 @@ set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000; set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ; -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n22(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part_n14 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part_n23 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data_n19 local inpath '../../data_n19/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n22 partition(ds='2008-04-08'); +load data_n19 local inpath '../../data_n19/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n22 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data_n19 local inpath '../../data_n19/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n23 partition(ds='2008-04-08'); +load data_n19 local inpath '../../data_n19/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n23 partition(ds='2008-04-08'); +load data_n19 local inpath '../../data_n19/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n23 partition(ds='2008-04-08'); +load data_n19 local inpath '../../data_n19/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n23 partition(ds='2008-04-08'); set hive.optimize.bucketingsorting=false; -insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +insert overwrite table tab_part_n14 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n23; -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n15(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab_n15 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n22; -CREATE TABLE tab2(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab2 partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab2_n7(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab2_n7 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n22; set hive.convert.join.bucket.mapjoin.tez = false; set hive.auto.convert.sortmerge.join = true; @@ -201,134 +201,134 @@ set hive.auto.convert.sortmerge.join = true; set hive.auto.convert.join.noconditionaltask.size=500; explain -select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key; +select s1.key as key, s1.value as value from tab_n15 s1 join tab_n15 s3 on s1.key=s3.key; explain -select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key join tab s2 on s1.value=s2.value; +select s1.key as key, s1.value as value from tab_n15 s1 join tab_n15 s3 on s1.key=s3.key join tab_n15 s2 on s1.value=s2.value; explain -select s1.key as key, s1.value as value from tab s1 join tab2 s3 on s1.key=s3.key; +select s1.key as key, s1.value as value from tab_n15 s1 join tab2_n7 s3 on s1.key=s3.key; explain -select s1.key as key, s1.value as value from tab s1 join tab2 s3 on s1.key=s3.key join tab2 s2 on s1.value=s2.value; +select s1.key as key, s1.value as value from tab_n15 s1 join tab2_n7 s3 on s1.key=s3.key join tab2_n7 s2 on s1.value=s2.value; explain -select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +select count(*) from (select s1.key as key, s1.value as value from tab_n15 s1 join tab_n15 s3 on s1.key=s3.key UNION ALL -select s2.key as key, s2.value as value from tab s2 -) a join tab_part b on (a.key = b.key); +select s2.key as key, s2.value as value from tab_n15 s2 +) a_n19 join tab_part_n14 b_n15 on (a_n19.key = b_n15.key); explain -select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key join tab s2 on s1.value=s2.value +select count(*) from (select s1.key as key, s1.value as value from tab_n15 s1 join tab_n15 s3 on s1.key=s3.key join tab_n15 s2 on s1.value=s2.value UNION ALL -select s2.key as key, s2.value as value from tab s2 -) a join tab_part b on (a.key = b.key);set hive.explain.user=true; +select s2.key as key, s2.value as value from tab_n15 s2 +) a_n19 join tab_part_n14 b_n15 on (a_n19.key = b_n15.key);set hive.explain.user=true; explain SELECT x.key, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union all select * from src)z ON (x.value = z.value) +FROM src1 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select * from src1 union all select * from src_n4)z ON (x.value = z.value) union all SELECT x.key, y.value -FROM src x JOIN src y ON (x.key = y.key) -JOIN (select key, value from src1 union all select key, value from src union all select key, value from src)z ON (x.value = z.value) +FROM src_n4 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select key, value from src1 union all select key, value from src_n4 union all select key, value from src_n4)z ON (x.value = z.value) union all SELECT x.key, y.value FROM src1 x JOIN src1 y ON (x.key = y.key) -JOIN (select key, value from src1 union all select key, value from src union all select key, value from src union all select key, value from src)z ON (x.value = z.value); +JOIN (select key, value from src1 union all select key, value from src_n4 union all select key, value from src_n4 union all select key, value from src_n4)z ON (x.value = z.value); explain SELECT x.key, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union select * from src)z ON (x.value = z.value) +FROM src1 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select * from src1 union select * from src_n4)z ON (x.value = z.value) union SELECT x.key, y.value -FROM src x JOIN src y ON (x.key = y.key) -JOIN (select key, value from src1 union select key, value from src union select key, value from src)z ON (x.value = z.value) +FROM src_n4 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select key, value from src1 union select key, value from src_n4 union select key, value from src_n4)z ON (x.value = z.value) union SELECT x.key, y.value FROM src1 x JOIN src1 y ON (x.key = y.key) -JOIN (select key, value from src1 union select key, value from src union select key, value from src union select key, value from src)z ON (x.value = z.value); +JOIN (select key, value from src1 union select key, value from src_n4 union select key, value from src_n4 union select key, value from src_n4)z ON (x.value = z.value); -CREATE TABLE a(key STRING, value STRING) STORED AS TEXTFILE; -CREATE TABLE b(key STRING, value STRING) STORED AS TEXTFILE; -CREATE TABLE c(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE a_n19(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE b_n15(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE c_n4(key STRING, value STRING) STORED AS TEXTFILE; explain from ( SELECT x.key, y.value -FROM src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union all select * from src)z ON (x.value = z.value) +FROM src1 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select * from src1 union all select * from src_n4)z ON (x.value = z.value) union all SELECT x.key, y.value -FROM src x JOIN src y ON (x.key = y.key) -JOIN (select key, value from src1 union all select key, value from src union all select key, value from src)z ON (x.value = z.value) +FROM src_n4 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select key, value from src1 union all select key, value from src_n4 union all select key, value from src_n4)z ON (x.value = z.value) union all SELECT x.key, y.value FROM src1 x JOIN src1 y ON (x.key = y.key) -JOIN (select key, value from src1 union all select key, value from src union all select key, value from src union all select key, value from src)z ON (x.value = z.value) +JOIN (select key, value from src1 union all select key, value from src_n4 union all select key, value from src_n4 union all select key, value from src_n4)z ON (x.value = z.value) ) tmp -INSERT OVERWRITE TABLE a SELECT tmp.key, tmp.value -INSERT OVERWRITE TABLE b SELECT tmp.key, tmp.value -INSERT OVERWRITE TABLE c SELECT tmp.key, tmp.value; +INSERT OVERWRITE TABLE a_n19 SELECT tmp.key, tmp.value +INSERT OVERWRITE TABLE b_n15 SELECT tmp.key, tmp.value +INSERT OVERWRITE TABLE c_n4 SELECT tmp.key, tmp.value; explain FROM ( -SELECT x.key as key, y.value as value from src1 x JOIN src y ON (x.key = y.key) -JOIN (select * from src1 union select * from src)z ON (x.value = z.value) +SELECT x.key as key, y.value as value from src1 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select * from src1 union select * from src_n4)z ON (x.value = z.value) union -SELECT x.key as key, y.value as value from src x JOIN src y ON (x.key = y.key) -JOIN (select key, value from src1 union select key, value from src union select key, value from src)z ON (x.value = z.value) +SELECT x.key as key, y.value as value from src_n4 x JOIN src_n4 y ON (x.key = y.key) +JOIN (select key, value from src1 union select key, value from src_n4 union select key, value from src_n4)z ON (x.value = z.value) union SELECT x.key as key, y.value as value from src1 x JOIN src1 y ON (x.key = y.key) -JOIN (select key, value from src1 union select key, value from src union select key, value from src union select key, value from src)z ON (x.value = z.value) +JOIN (select key, value from src1 union select key, value from src_n4 union select key, value from src_n4 union select key, value from src_n4)z ON (x.value = z.value) ) tmp -INSERT OVERWRITE TABLE a SELECT tmp.key, tmp.value -INSERT OVERWRITE TABLE b SELECT tmp.key, tmp.value -INSERT OVERWRITE TABLE c SELECT tmp.key, tmp.value; +INSERT OVERWRITE TABLE a_n19 SELECT tmp.key, tmp.value +INSERT OVERWRITE TABLE b_n15 SELECT tmp.key, tmp.value +INSERT OVERWRITE TABLE c_n4 SELECT tmp.key, tmp.value; -CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n172(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n43(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; explain -FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 +FROM (select 'tst1' as key, cast(count(1) as string) as value from src_n4 s1 UNION DISTINCT - select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; + select s2.key as key, s2.value as value from src_n4 s2) unionsrc_n4 +INSERT OVERWRITE TABLE DEST1_n172 SELECT unionsrc_n4.key, COUNT(DISTINCT SUBSTR(unionsrc_n4.value,5)) GROUP BY unionsrc_n4.key +INSERT OVERWRITE TABLE DEST2_n43 SELECT unionsrc_n4.key, unionsrc_n4.value, COUNT(DISTINCT SUBSTR(unionsrc_n4.value,5)) GROUP BY unionsrc_n4.key, unionsrc_n4.value; -EXPLAIN FROM UNIQUEJOIN PRESERVE src a (a.key), PRESERVE src1 b (b.key), PRESERVE srcpart c (c.key) SELECT a.key, b.key, c.key; +EXPLAIN FROM UNIQUEJOIN PRESERVE src_n4 a_n19 (a_n19.key), PRESERVE src1 b_n15 (b_n15.key), PRESERVE srcpart c_n4 (c_n4.key) SELECT a_n19.key, b_n15.key, c_n4.key; set hive.entity.capture.transform=true; EXPLAIN SELECT -TRANSFORM(a.key, a.value) USING 'cat' AS (tkey, tvalue) -FROM src a join src b -on a.key = b.key; +TRANSFORM(a_n19.key, a_n19.value) USING 'cat' AS (tkey, tvalue) +FROM src_n4 a_n19 join src_n4 b_n15 +on a_n19.key = b_n15.key; explain FROM ( select key, value from ( - select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as value2 from src s1 + select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as value2 from src_n4 s1 UNION all - select s2.key as key, s2.value as value, 'tst1' as value2 from src s2) unionsub + select s2.key as key, s2.value as value, 'tst1' as value2 from src_n4 s2) unionsub_n15 UNION all - select key, value from src s0 - ) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) -GROUP BY unionsrc.key, unionsrc.value; + select key, value from src_n4 s0 + ) unionsrc_n4 +INSERT OVERWRITE TABLE DEST1_n172 SELECT unionsrc_n4.key, COUNT(DISTINCT SUBSTR(unionsrc_n4.value,5)) GROUP BY unionsrc_n4.key +INSERT OVERWRITE TABLE DEST2_n43 SELECT unionsrc_n4.key, unionsrc_n4.value, COUNT(DISTINCT SUBSTR(unionsrc_n4.value,5)) +GROUP BY unionsrc_n4.key, unionsrc_n4.value; explain FROM ( - select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as value2 from src s1 + select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as value2 from src_n4 s1 UNION all - select s2.key as key, s2.value as value, 'tst1' as value2 from src s2 - ) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) -GROUP BY unionsrc.key, unionsrc.value; + select s2.key as key, s2.value as value, 'tst1' as value2 from src_n4 s2 + ) unionsrc_n4 +INSERT OVERWRITE TABLE DEST1_n172 SELECT unionsrc_n4.key, COUNT(DISTINCT SUBSTR(unionsrc_n4.value,5)) GROUP BY unionsrc_n4.key +INSERT OVERWRITE TABLE DEST2_n43 SELECT unionsrc_n4.key, unionsrc_n4.value, COUNT(DISTINCT SUBSTR(unionsrc_n4.value,5)) +GROUP BY unionsrc_n4.key, unionsrc_n4.value; diff --git a/ql/src/test/queries/clientpositive/explainuser_3.q b/ql/src/test/queries/clientpositive/explainuser_3.q index 0f63070362..0c110aea1a 100644 --- a/ql/src/test/queries/clientpositive/explainuser_3.q +++ b/ql/src/test/queries/clientpositive/explainuser_3.q @@ -14,10 +14,10 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.exec.dynamic.partition.mode=nonstrict; set hive.vectorized.execution.enabled=true; -CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10; -analyze table acid_vectorized compute statistics for columns; -explain select a, b from acid_vectorized order by a, b; +CREATE TABLE acid_vectorized_n0(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true'); +insert into table acid_vectorized_n0 select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10; +analyze table acid_vectorized_n0 compute statistics for columns; +explain select a, b from acid_vectorized_n0 order by a, b; explain select key, value FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol; @@ -36,13 +36,13 @@ explain use newDB; use newDB; -create table tab (name string); +create table tab_n1 (name string); -explain alter table tab rename to newName; +explain alter table tab_n1 rename to newName; -explain drop table tab; +explain drop table tab_n1; -drop table tab; +drop table tab_n1; explain use default; @@ -63,24 +63,24 @@ EXPLAIN SELECT SIGMOID(2) FROM src LIMIT 1; explain DROP TEMPORARY MACRO SIGMOID; DROP TEMPORARY MACRO SIGMOID; -explain create table src_autho_test as select * from src; -create table src_autho_test as select * from src; +explain create table src_autho_test_n3 as select * from src; +create table src_autho_test_n3 as select * from src; set hive.security.authorization.enabled=true; -explain grant select on table src_autho_test to user hive_test_user; -grant select on table src_autho_test to user hive_test_user; +explain grant select on table src_autho_test_n3 to user hive_test_user; +grant select on table src_autho_test_n3 to user hive_test_user; -explain show grant user hive_test_user on table src_autho_test; -explain show grant user hive_test_user on table src_autho_test(key); +explain show grant user hive_test_user on table src_autho_test_n3; +explain show grant user hive_test_user on table src_autho_test_n3(key); -select key from src_autho_test order by key limit 20; +select key from src_autho_test_n3 order by key limit 20; -explain revoke select on table src_autho_test from user hive_test_user; +explain revoke select on table src_autho_test_n3 from user hive_test_user; -explain grant select(key) on table src_autho_test to user hive_test_user; +explain grant select(key) on table src_autho_test_n3 to user hive_test_user; -explain revoke select(key) on table src_autho_test from user hive_test_user; +explain revoke select(key) on table src_autho_test_n3 from user hive_test_user; explain create role sRc_roLE; @@ -98,19 +98,19 @@ explain drop role sRc_roLE; drop role sRc_roLE; set hive.security.authorization.enabled=false; -drop table src_autho_test; +drop table src_autho_test_n3; -explain drop view v; +explain drop view v_n1; -explain create view v as with cte as (select * from src order by key limit 5) +explain create view v_n1 as with cte as (select * from src order by key limit 5) select * from cte; explain with cte as (select * from src order by key limit 5) select * from cte; -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; +create table orc_merge5_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; -load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5; +load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n0; SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; SET mapred.min.split.size=1000; @@ -129,40 +129,40 @@ set hive.merge.tezfiles=true; set hive.merge.mapfiles=true; set hive.merge.mapredfiles=true; -explain insert overwrite table orc_merge5 select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +explain insert overwrite table orc_merge5_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n0 where userid<=13; -drop table orc_merge5; +drop table orc_merge5_n0; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000; -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n3(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part_n2 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part_n3 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n3 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n3 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n3 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n3 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n3 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n3 partition(ds='2008-04-08'); set hive.optimize.bucketingsorting=false; -insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +insert overwrite table tab_part_n2 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n3; -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n1(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab_n1 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n3; set hive.convert.join.bucket.mapjoin.tez = true; explain select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key; +from tab_n1 a join tab_part_n2 b on a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/extrapolate_part_stats_date.q b/ql/src/test/queries/clientpositive/extrapolate_part_stats_date.q index 1f38a6526d..ad78071a85 100644 --- a/ql/src/test/queries/clientpositive/extrapolate_part_stats_date.q +++ b/ql/src/test/queries/clientpositive/extrapolate_part_stats_date.q @@ -1,14 +1,14 @@ set hive.exec.dynamic.partition.mode=nonstrict; set hive.stats.fetch.column.stats=true; -create table date_dim (d_date date) partitioned by (d_date_sk bigint) stored as orc; -insert into date_dim partition(d_date_sk=2416945) values('1905-04-09'); -insert into date_dim partition(d_date_sk=2416946) values('1905-04-10'); -insert into date_dim partition(d_date_sk=2416947) values('1905-04-11'); -analyze table date_dim partition(d_date_sk) compute statistics for columns; +create table date_dim_n1 (d_date date) partitioned by (d_date_sk bigint) stored as orc; +insert into date_dim_n1 partition(d_date_sk=2416945) values('1905-04-09'); +insert into date_dim_n1 partition(d_date_sk=2416946) values('1905-04-10'); +insert into date_dim_n1 partition(d_date_sk=2416947) values('1905-04-11'); +analyze table date_dim_n1 partition(d_date_sk) compute statistics for columns; -explain select count(*) from date_dim where d_date > date "1900-01-02" and d_date_sk= 2416945; +explain select count(*) from date_dim_n1 where d_date > date "1900-01-02" and d_date_sk= 2416945; -insert into date_dim partition(d_date_sk=2416948) values('1905-04-12'); +insert into date_dim_n1 partition(d_date_sk=2416948) values('1905-04-12'); -explain extended select d_date from date_dim; +explain extended select d_date from date_dim_n1; diff --git a/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q b/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q index 5a412a3589..4298a5ed11 100644 --- a/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q +++ b/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q @@ -5,57 +5,57 @@ set hive.exec.dynamic.partition.mode=nonstrict; set hive.metastore.aggregate.stats.cache.enabled=false; -create table if not exists ext_loc ( +create table if not exists ext_loc_n1 ( state string, locid int, zip int, year string ) row format delimited fields terminated by '|' stored as textfile; -LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_partial.txt' OVERWRITE INTO TABLE ext_loc; +LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_partial.txt' OVERWRITE INTO TABLE ext_loc_n1; -create table if not exists loc_orc_1d ( +create table if not exists loc_orc_1d_n1 ( state string, locid int, zip int ) partitioned by(year string) stored as orc; -insert overwrite table loc_orc_1d partition(year) select * from ext_loc; +insert overwrite table loc_orc_1d_n1 partition(year) select * from ext_loc_n1; -analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid; +analyze table loc_orc_1d_n1 partition(year='2001') compute statistics for columns state,locid; -analyze table loc_orc_1d partition(year='2002') compute statistics for columns state,locid; +analyze table loc_orc_1d_n1 partition(year='2002') compute statistics for columns state,locid; -describe formatted loc_orc_1d PARTITION(year='2001') state; +describe formatted loc_orc_1d_n1 PARTITION(year='2001') state; -describe formatted loc_orc_1d PARTITION(year='2002') state; +describe formatted loc_orc_1d_n1 PARTITION(year='2002') state; -- basicStatState: COMPLETE colStatState: PARTIAL -explain extended select state from loc_orc_1d; +explain extended select state from loc_orc_1d_n1; -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL -- basicStatState: COMPLETE colStatState: PARTIAL -explain extended select state,locid from loc_orc_1d; +explain extended select state,locid from loc_orc_1d_n1; -analyze table loc_orc_1d partition(year='2000') compute statistics for columns state; +analyze table loc_orc_1d_n1 partition(year='2000') compute statistics for columns state; -analyze table loc_orc_1d partition(year='2003') compute statistics for columns state; +analyze table loc_orc_1d_n1 partition(year='2003') compute statistics for columns state; -explain extended select state from loc_orc_1d; +explain extended select state from loc_orc_1d_n1; -explain extended select state,locid from loc_orc_1d; +explain extended select state,locid from loc_orc_1d_n1; -create table if not exists loc_orc_2d ( +create table if not exists loc_orc_2d_n1 ( state string, locid int ) partitioned by(zip int, year string) stored as orc; -insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc; +insert overwrite table loc_orc_2d_n1 partition(zip, year) select * from ext_loc_n1; -analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid; +analyze table loc_orc_2d_n1 partition(zip=94086, year='2001') compute statistics for columns state,locid; -analyze table loc_orc_2d partition(zip=94087, year='2002') compute statistics for columns state,locid; +analyze table loc_orc_2d_n1 partition(zip=94087, year='2002') compute statistics for columns state,locid; -explain extended select state from loc_orc_2d; +explain extended select state from loc_orc_2d_n1; -explain extended select state,locid from loc_orc_2d; +explain extended select state,locid from loc_orc_2d_n1; diff --git a/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial_ndv.q b/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial_ndv.q index a80c86ca3d..12853ef131 100644 --- a/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial_ndv.q +++ b/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial_ndv.q @@ -6,9 +6,9 @@ set hive.exec.dynamic.partition.mode=nonstrict; set hive.metastore.aggregate.stats.cache.enabled=false; -drop table if exists ext_loc; +drop table if exists ext_loc_n0; -create table ext_loc ( +create table ext_loc_n0 ( state string, locid double, cnt decimal, @@ -16,87 +16,87 @@ create table ext_loc ( year string ) row format delimited fields terminated by '|' stored as textfile; -LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_partial_ndv.txt' OVERWRITE INTO TABLE ext_loc; +LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_partial_ndv.txt' OVERWRITE INTO TABLE ext_loc_n0; -drop table if exists loc_orc_1d; +drop table if exists loc_orc_1d_n0; -create table loc_orc_1d ( +create table loc_orc_1d_n0 ( state string, locid double, cnt decimal, zip int ) partitioned by(year string) stored as orc; -insert overwrite table loc_orc_1d partition(year) select * from ext_loc; +insert overwrite table loc_orc_1d_n0 partition(year) select * from ext_loc_n0; -analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid,cnt,zip; +analyze table loc_orc_1d_n0 partition(year='2001') compute statistics for columns state,locid,cnt,zip; -analyze table loc_orc_1d partition(year='2002') compute statistics for columns state,locid,cnt,zip; +analyze table loc_orc_1d_n0 partition(year='2002') compute statistics for columns state,locid,cnt,zip; -describe formatted loc_orc_1d PARTITION(year='2001') state; +describe formatted loc_orc_1d_n0 PARTITION(year='2001') state; -describe formatted loc_orc_1d PARTITION(year='2002') state; +describe formatted loc_orc_1d_n0 PARTITION(year='2002') state; -describe formatted loc_orc_1d PARTITION(year='2001') locid; +describe formatted loc_orc_1d_n0 PARTITION(year='2001') locid; -describe formatted loc_orc_1d PARTITION(year='2002') locid; +describe formatted loc_orc_1d_n0 PARTITION(year='2002') locid; -describe formatted loc_orc_1d PARTITION(year='2001') cnt; +describe formatted loc_orc_1d_n0 PARTITION(year='2001') cnt; -describe formatted loc_orc_1d PARTITION(year='2002') cnt; +describe formatted loc_orc_1d_n0 PARTITION(year='2002') cnt; -describe formatted loc_orc_1d PARTITION(year='2001') zip; +describe formatted loc_orc_1d_n0 PARTITION(year='2001') zip; -describe formatted loc_orc_1d PARTITION(year='2002') zip; +describe formatted loc_orc_1d_n0 PARTITION(year='2002') zip; -explain extended select state,locid,cnt,zip from loc_orc_1d; +explain extended select state,locid,cnt,zip from loc_orc_1d_n0; -analyze table loc_orc_1d partition(year='2000') compute statistics for columns state,locid,cnt,zip; +analyze table loc_orc_1d_n0 partition(year='2000') compute statistics for columns state,locid,cnt,zip; -analyze table loc_orc_1d partition(year='2003') compute statistics for columns state,locid,cnt,zip; +analyze table loc_orc_1d_n0 partition(year='2003') compute statistics for columns state,locid,cnt,zip; -describe formatted loc_orc_1d PARTITION(year='2000') state; +describe formatted loc_orc_1d_n0 PARTITION(year='2000') state; -describe formatted loc_orc_1d PARTITION(year='2003') state; +describe formatted loc_orc_1d_n0 PARTITION(year='2003') state; -describe formatted loc_orc_1d PARTITION(year='2000') locid; +describe formatted loc_orc_1d_n0 PARTITION(year='2000') locid; -describe formatted loc_orc_1d PARTITION(year='2003') locid; +describe formatted loc_orc_1d_n0 PARTITION(year='2003') locid; -describe formatted loc_orc_1d PARTITION(year='2000') cnt; +describe formatted loc_orc_1d_n0 PARTITION(year='2000') cnt; -describe formatted loc_orc_1d PARTITION(year='2003') cnt; +describe formatted loc_orc_1d_n0 PARTITION(year='2003') cnt; -describe formatted loc_orc_1d PARTITION(year='2000') zip; +describe formatted loc_orc_1d_n0 PARTITION(year='2000') zip; -describe formatted loc_orc_1d PARTITION(year='2003') zip; +describe formatted loc_orc_1d_n0 PARTITION(year='2003') zip; -explain extended select state,locid,cnt,zip from loc_orc_1d; +explain extended select state,locid,cnt,zip from loc_orc_1d_n0; -drop table if exists loc_orc_2d; +drop table if exists loc_orc_2d_n0; -create table loc_orc_2d ( +create table loc_orc_2d_n0 ( state string, locid int, cnt decimal ) partitioned by(zip int, year string) stored as orc; -insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc; +insert overwrite table loc_orc_2d_n0 partition(zip, year) select * from ext_loc_n0; -analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid,cnt; +analyze table loc_orc_2d_n0 partition(zip=94086, year='2001') compute statistics for columns state,locid,cnt; -analyze table loc_orc_2d partition(zip=94087, year='2002') compute statistics for columns state,locid,cnt; +analyze table loc_orc_2d_n0 partition(zip=94087, year='2002') compute statistics for columns state,locid,cnt; -describe formatted loc_orc_2d partition(zip=94086, year='2001') state; +describe formatted loc_orc_2d_n0 partition(zip=94086, year='2001') state; -describe formatted loc_orc_2d partition(zip=94087, year='2002') state; +describe formatted loc_orc_2d_n0 partition(zip=94087, year='2002') state; -describe formatted loc_orc_2d partition(zip=94086, year='2001') locid; +describe formatted loc_orc_2d_n0 partition(zip=94086, year='2001') locid; -describe formatted loc_orc_2d partition(zip=94087, year='2002') locid; +describe formatted loc_orc_2d_n0 partition(zip=94087, year='2002') locid; -describe formatted loc_orc_2d partition(zip=94086, year='2001') cnt; +describe formatted loc_orc_2d_n0 partition(zip=94086, year='2001') cnt; -describe formatted loc_orc_2d partition(zip=94087, year='2002') cnt; +describe formatted loc_orc_2d_n0 partition(zip=94087, year='2002') cnt; -explain extended select state,locid,cnt,zip from loc_orc_2d; +explain extended select state,locid,cnt,zip from loc_orc_2d_n0; diff --git a/ql/src/test/queries/clientpositive/fileformat_sequencefile.q b/ql/src/test/queries/clientpositive/fileformat_sequencefile.q index 29a69da320..0059a13a03 100644 --- a/ql/src/test/queries/clientpositive/fileformat_sequencefile.q +++ b/ql/src/test/queries/clientpositive/fileformat_sequencefile.q @@ -1,18 +1,18 @@ --! qt:dataset:src EXPLAIN -CREATE TABLE dest1(key INT, value STRING) STORED AS +CREATE TABLE dest1_n85(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat'; -CREATE TABLE dest1(key INT, value STRING) STORED AS +CREATE TABLE dest1_n85(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat'; -DESCRIBE EXTENDED dest1; +DESCRIBE EXTENDED dest1_n85; FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 10; +INSERT OVERWRITE TABLE dest1_n85 SELECT src.key, src.value WHERE src.key < 10; -SELECT dest1.* FROM dest1; +SELECT dest1_n85.* FROM dest1_n85; diff --git a/ql/src/test/queries/clientpositive/fileformat_text.q b/ql/src/test/queries/clientpositive/fileformat_text.q index 4b4e557435..d80b9d31ff 100644 --- a/ql/src/test/queries/clientpositive/fileformat_text.q +++ b/ql/src/test/queries/clientpositive/fileformat_text.q @@ -1,18 +1,18 @@ --! qt:dataset:src EXPLAIN -CREATE TABLE dest1(key INT, value STRING) STORED AS +CREATE TABLE dest1_n107(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'; -CREATE TABLE dest1(key INT, value STRING) STORED AS +CREATE TABLE dest1_n107(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'; -DESCRIBE EXTENDED dest1; +DESCRIBE EXTENDED dest1_n107; FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 10; +INSERT OVERWRITE TABLE dest1_n107 SELECT src.key, src.value WHERE src.key < 10; -SELECT dest1.* FROM dest1; +SELECT dest1_n107.* FROM dest1_n107; diff --git a/ql/src/test/queries/clientpositive/filter_join_breaktask2.q b/ql/src/test/queries/clientpositive/filter_join_breaktask2.q index 78a19ebb5e..9382bc7cca 100644 --- a/ql/src/test/queries/clientpositive/filter_join_breaktask2.q +++ b/ql/src/test/queries/clientpositive/filter_join_breaktask2.q @@ -2,35 +2,35 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) +create table T1_n85(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) partitioned by (ds string); -create table T2(c1 string, c2 string, c3 string, c0 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string) partitioned by (ds string); +create table T2_n53(c1 string, c2 string, c3 string, c0 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string) partitioned by (ds string); -create table T3 (c0 bigint, c1 bigint, c2 int) partitioned by (ds string); +create table T3_n18 (c0 bigint, c1 bigint, c2 int) partitioned by (ds string); -create table T4 (c0 bigint, c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string, c26 string, c27 string, c28 string, c29 string, c30 string, c31 string, c32 string, c33 string, c34 string, c35 string, c36 string, c37 string, c38 string, c39 string, c40 string, c41 string, c42 string, c43 string, c44 string, c45 string, c46 string, c47 string, c48 string, c49 string, c50 string, c51 string, c52 string, c53 string, c54 string, c55 string, c56 string, c57 string, c58 string, c59 string, c60 string, c61 string, c62 string, c63 string, c64 string, c65 string, c66 string, c67 bigint, c68 string, c69 string, c70 bigint, c71 bigint, c72 bigint, c73 string, c74 string, c75 string, c76 string, c77 string, c78 string, c79 string, c80 string, c81 bigint, c82 bigint, c83 bigint) partitioned by (ds string); +create table T4_n8 (c0 bigint, c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string, c26 string, c27 string, c28 string, c29 string, c30 string, c31 string, c32 string, c33 string, c34 string, c35 string, c36 string, c37 string, c38 string, c39 string, c40 string, c41 string, c42 string, c43 string, c44 string, c45 string, c46 string, c47 string, c48 string, c49 string, c50 string, c51 string, c52 string, c53 string, c54 string, c55 string, c56 string, c57 string, c58 string, c59 string, c60 string, c61 string, c62 string, c63 string, c64 string, c65 string, c66 string, c67 bigint, c68 string, c69 string, c70 bigint, c71 bigint, c72 bigint, c73 string, c74 string, c75 string, c76 string, c77 string, c78 string, c79 string, c80 string, c81 bigint, c82 bigint, c83 bigint) partitioned by (ds string); -insert overwrite table T1 partition (ds='2010-04-17') select '5', '1', '1', '1', 0, 0,4 from src tablesample (1 rows); +insert overwrite table T1_n85 partition (ds='2010-04-17') select '5', '1', '1', '1', 0, 0,4 from src tablesample (1 rows); -insert overwrite table T2 partition(ds='2010-04-17') select '5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', '0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src tablesample (1 rows); +insert overwrite table T2_n53 partition(ds='2010-04-17') select '5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', '0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src tablesample (1 rows); -insert overwrite table T3 partition (ds='2010-04-17') select 4,5,0 from src tablesample (1 rows); +insert overwrite table T3_n18 partition (ds='2010-04-17') select 4,5,0 from src tablesample (1 rows); -insert overwrite table T4 partition(ds='2010-04-17') +insert overwrite table T4_n8 partition(ds='2010-04-17') select 4,'1','1','8','4','5','1','0','9','U','2','2', '0','2','1','1','J','C','A','U', '2','s', '2',NULL, NULL, NULL,NULL, NULL, NULL,'1','j', 'S', '6',NULL,'1', '2', 'J', 'g', '1', 'e', '2', '1', '2', 'U', 'P', 'p', '3', '0', '0', '0', '1', '1', '1', '0', '0', '0', '6', '2', 'j',NULL, NULL, NULL,NULL,NULL, NULL, '5',NULL, 'j', 'j', 2, 2, 1, '2', '2', '1', '1', '1', '1', '1', '1', 1, 1, 32,NULL from src limit 1; -select * from T2; -select * from T1; -select * from T3; -select * from T4; +select * from T2_n53; +select * from T1_n85; +select * from T3_n18; +select * from T4_n8; SELECT a.c1 as a_c1, b.c1 b_c1, d.c0 as d_c0 -FROM T1 a JOIN T2 b +FROM T1_n85 a JOIN T2_n53 b ON (a.c1 = b.c1 AND a.ds='2010-04-17' AND b.ds='2010-04-17') - JOIN T3 c + JOIN T3_n18 c ON (a.c1 = c.c1 AND a.ds='2010-04-17' AND c.ds='2010-04-17') - JOIN T4 d + JOIN T4_n8 d ON (c.c0 = d.c0 AND c.ds='2010-04-17' AND d.ds='2010-04-17'); diff --git a/ql/src/test/queries/clientpositive/floor_time.q b/ql/src/test/queries/clientpositive/floor_time.q index d73267d702..d239d9b1fe 100644 --- a/ql/src/test/queries/clientpositive/floor_time.q +++ b/ql/src/test/queries/clientpositive/floor_time.q @@ -1,47 +1,47 @@ --! qt:dataset:src -drop table extract_udf; +drop table extract_udf_n0; -create table extract_udf (t timestamp); +create table extract_udf_n0 (t timestamp); from (select * from src tablesample (1 rows)) s - insert overwrite table extract_udf + insert overwrite table extract_udf_n0 select '2011-05-06 07:08:09.1234567'; select t -from extract_udf; +from extract_udf_n0; explain select floor_day(t) -from extract_udf; +from extract_udf_n0; select floor_day(t) -from extract_udf; +from extract_udf_n0; -- new syntax explain select floor(t to day) -from extract_udf; +from extract_udf_n0; select floor(t to day) -from extract_udf; +from extract_udf_n0; select floor(t to second) -from extract_udf; +from extract_udf_n0; select floor(t to minute) -from extract_udf; +from extract_udf_n0; select floor(t to hour) -from extract_udf; +from extract_udf_n0; select floor(t to week) -from extract_udf; +from extract_udf_n0; select floor(t to month) -from extract_udf; +from extract_udf_n0; select floor(t to quarter) -from extract_udf; +from extract_udf_n0; select floor(t to year) -from extract_udf; +from extract_udf_n0; diff --git a/ql/src/test/queries/clientpositive/fm-sketch.q b/ql/src/test/queries/clientpositive/fm-sketch.q index 3120517881..e6e647e10e 100644 --- a/ql/src/test/queries/clientpositive/fm-sketch.q +++ b/ql/src/test/queries/clientpositive/fm-sketch.q @@ -2,58 +2,58 @@ set hive.mapred.mode=nonstrict; set hive.stats.ndv.algo=fm; -create table n(key int); +create table n_n0(key int); -insert overwrite table n select null from src; +insert overwrite table n_n0 select null from src; -explain analyze table n compute statistics for columns; +explain analyze table n_n0 compute statistics for columns; -analyze table n compute statistics for columns; +analyze table n_n0 compute statistics for columns; -desc formatted n key; +desc formatted n_n0 key; -create table i(key int); +create table i_n1(key int); -insert overwrite table i select key from src; +insert overwrite table i_n1 select key from src; -explain analyze table i compute statistics for columns; +explain analyze table i_n1 compute statistics for columns; -analyze table i compute statistics for columns; +analyze table i_n1 compute statistics for columns; -desc formatted i key; +desc formatted i_n1 key; -drop table i; +drop table i_n1; -create table i(key double); +create table i_n1(key double); -insert overwrite table i select key from src; +insert overwrite table i_n1 select key from src; -analyze table i compute statistics for columns; +analyze table i_n1 compute statistics for columns; -desc formatted i key; +desc formatted i_n1 key; -drop table i; +drop table i_n1; -create table i(key decimal); +create table i_n1(key decimal); -insert overwrite table i select key from src; +insert overwrite table i_n1 select key from src; -analyze table i compute statistics for columns; +analyze table i_n1 compute statistics for columns; -desc formatted i key; +desc formatted i_n1 key; -drop table i; +drop table i_n1; -create table i(key date); +create table i_n1(key date); -insert into i values ('2012-08-17'); -insert into i values ('2012-08-17'); -insert into i values ('2013-08-17'); -insert into i values ('2012-03-17'); -insert into i values ('2012-05-17'); +insert into i_n1 values ('2012-08-17'); +insert into i_n1 values ('2012-08-17'); +insert into i_n1 values ('2013-08-17'); +insert into i_n1 values ('2012-03-17'); +insert into i_n1 values ('2012-05-17'); -analyze table i compute statistics for columns; +analyze table i_n1 compute statistics for columns; -desc formatted i key; +desc formatted i_n1 key; diff --git a/ql/src/test/queries/clientpositive/gen_udf_example_add10.q b/ql/src/test/queries/clientpositive/gen_udf_example_add10.q index 69178c90b1..6732ac5305 100644 --- a/ql/src/test/queries/clientpositive/gen_udf_example_add10.q +++ b/ql/src/test/queries/clientpositive/gen_udf_example_add10.q @@ -2,12 +2,12 @@ add jar ${system:maven.local.repository}/org/apache/hive/hive-contrib/${system:h create temporary function example_add10 as 'org.apache.hadoop.hive.contrib.genericudf.example.GenericUDFAdd10'; -create table t1(x int,y double); -load data local inpath '../../data/files/T1.txt' into table t1; +create table t1_n102(x int,y double); +load data local inpath '../../data/files/T1.txt' into table t1_n102; -explain select example_add10(x) as a,example_add10(y) as b from t1 order by a desc,b limit 10; +explain select example_add10(x) as a,example_add10(y) as b from t1_n102 order by a desc,b limit 10; -select example_add10(x) as a,example_add10(y) as b from t1 order by a desc,b limit 10; +select example_add10(x) as a,example_add10(y) as b from t1_n102 order by a desc,b limit 10; -drop table t1; +drop table t1_n102; drop temporary function example_add10; diff --git a/ql/src/test/queries/clientpositive/groupby10.q b/ql/src/test/queries/clientpositive/groupby10.q index 5e78831dda..d26e0699fa 100644 --- a/ql/src/test/queries/clientpositive/groupby10.q +++ b/ql/src/test/queries/clientpositive/groupby10.q @@ -4,7 +4,7 @@ set hive.groupby.skewindata=true; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key INT, val1 INT, val2 INT); +CREATE TABLE dest1_n0(key INT, val1 INT, val2 INT); CREATE TABLE dest2(key INT, val1 INT, val2 INT); CREATE TABLE INPUT(key INT, value STRING) STORED AS TEXTFILE; @@ -12,28 +12,28 @@ LOAD DATA LOCAL INPATH '../../data/files/kv5.txt' INTO TABLE INPUT; EXPLAIN FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5)) GROUP BY INPUT.key; FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5)) GROUP BY INPUT.key; -SELECT * from dest1; +SELECT * from dest1_n0; SELECT * from dest2; set hive.multigroupby.singlereducer=true; EXPLAIN FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5)) GROUP BY INPUT.key; FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5)) GROUP BY INPUT.key; -SELECT * from dest1; +SELECT * from dest1_n0; SELECT * from dest2; set hive.groupby.skewindata=false; @@ -41,12 +41,12 @@ set hive.groupby.skewindata=false; EXPLAIN FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), avg(distinct substr(INPUT.value,5)) GROUP BY INPUT.key; FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), avg(distinct substr(INPUT.value,5)) GROUP BY INPUT.key; -SELECT * from dest1; +SELECT * from dest1_n0; SELECT * from dest2; diff --git a/ql/src/test/queries/clientpositive/groupby11.q b/ql/src/test/queries/clientpositive/groupby11.q index 630f6c664e..e0a531422b 100644 --- a/ql/src/test/queries/clientpositive/groupby11.q +++ b/ql/src/test/queries/clientpositive/groupby11.q @@ -5,24 +5,24 @@ set hive.groupby.skewindata=true; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key STRING, val1 INT, val2 INT) partitioned by (ds string); -CREATE TABLE dest2(key STRING, val1 INT, val2 INT) partitioned by (ds string); +CREATE TABLE dest1_n137(key STRING, val1 INT, val2 INT) partitioned by (ds string); +CREATE TABLE dest2_n36(key STRING, val1 INT, val2 INT) partitioned by (ds string); EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 partition(ds='111') +INSERT OVERWRITE TABLE dest1_n137 partition(ds='111') SELECT src.value, count(src.key), count(distinct src.key) GROUP BY src.value -INSERT OVERWRITE TABLE dest2 partition(ds='111') +INSERT OVERWRITE TABLE dest2_n36 partition(ds='111') SELECT substr(src.value, 5), count(src.key), count(distinct src.key) GROUP BY substr(src.value, 5); FROM src -INSERT OVERWRITE TABLE dest1 partition(ds='111') +INSERT OVERWRITE TABLE dest1_n137 partition(ds='111') SELECT src.value, count(src.key), count(distinct src.key) GROUP BY src.value -INSERT OVERWRITE TABLE dest2 partition(ds='111') +INSERT OVERWRITE TABLE dest2_n36 partition(ds='111') SELECT substr(src.value, 5), count(src.key), count(distinct src.key) GROUP BY substr(src.value, 5); -SELECT * from dest1; -SELECT * from dest2; +SELECT * from dest1_n137; +SELECT * from dest2_n36; diff --git a/ql/src/test/queries/clientpositive/groupby12.q b/ql/src/test/queries/clientpositive/groupby12.q index aa6545773e..879b213c65 100644 --- a/ql/src/test/queries/clientpositive/groupby12.q +++ b/ql/src/test/queries/clientpositive/groupby12.q @@ -1,14 +1,14 @@ --! qt:dataset:src set hive.map.aggr=false; -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n106(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key; +INSERT OVERWRITE TABLE dest1_n106 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key; FROM src -INSERT OVERWRITE TABLE dest1 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key; +INSERT OVERWRITE TABLE dest1_n106 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key; -SELECT dest1.* FROM dest1; +SELECT dest1_n106.* FROM dest1_n106; diff --git a/ql/src/test/queries/clientpositive/groupby1_limit.q b/ql/src/test/queries/clientpositive/groupby1_limit.q index 379017d878..8d97f97ad3 100644 --- a/ql/src/test/queries/clientpositive/groupby1_limit.q +++ b/ql/src/test/queries/clientpositive/groupby1_limit.q @@ -5,11 +5,11 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n125(key INT, value DOUBLE) STORED AS TEXTFILE; EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key LIMIT 5; +FROM src INSERT OVERWRITE TABLE dest1_n125 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key LIMIT 5; -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key ORDER BY src.key LIMIT 5; +FROM src INSERT OVERWRITE TABLE dest1_n125 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key ORDER BY src.key LIMIT 5; -SELECT dest1.* FROM dest1; +SELECT dest1_n125.* FROM dest1_n125; diff --git a/ql/src/test/queries/clientpositive/groupby1_map.q b/ql/src/test/queries/clientpositive/groupby1_map.q index 4f0a031013..0e71e87224 100644 --- a/ql/src/test/queries/clientpositive/groupby1_map.q +++ b/ql/src/test/queries/clientpositive/groupby1_map.q @@ -6,11 +6,11 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n61(key INT, value DOUBLE) STORED AS TEXTFILE; EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; +FROM src INSERT OVERWRITE TABLE dest1_n61 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; +FROM src INSERT OVERWRITE TABLE dest1_n61 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; -SELECT dest1.* FROM dest1; +SELECT dest1_n61.* FROM dest1_n61; diff --git a/ql/src/test/queries/clientpositive/groupby1_map_nomap.q b/ql/src/test/queries/clientpositive/groupby1_map_nomap.q index 926aed6dde..ad42e75a35 100644 --- a/ql/src/test/queries/clientpositive/groupby1_map_nomap.q +++ b/ql/src/test/queries/clientpositive/groupby1_map_nomap.q @@ -6,11 +6,11 @@ set hive.groupby.mapaggr.checkinterval=20; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n67(key INT, value DOUBLE) STORED AS TEXTFILE; EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; +FROM src INSERT OVERWRITE TABLE dest1_n67 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; +FROM src INSERT OVERWRITE TABLE dest1_n67 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; -SELECT dest1.* FROM dest1; +SELECT dest1_n67.* FROM dest1_n67; diff --git a/ql/src/test/queries/clientpositive/groupby1_map_skew.q b/ql/src/test/queries/clientpositive/groupby1_map_skew.q index 64cadb3d42..f0ff6d1516 100644 --- a/ql/src/test/queries/clientpositive/groupby1_map_skew.q +++ b/ql/src/test/queries/clientpositive/groupby1_map_skew.q @@ -6,11 +6,11 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n175(key INT, value DOUBLE) STORED AS TEXTFILE; EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; +FROM src INSERT OVERWRITE TABLE dest1_n175 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; +FROM src INSERT OVERWRITE TABLE dest1_n175 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; -SELECT dest1.* FROM dest1; +SELECT dest1_n175.* FROM dest1_n175; diff --git a/ql/src/test/queries/clientpositive/groupby1_noskew.q b/ql/src/test/queries/clientpositive/groupby1_noskew.q index 6e8382940a..0a519f568f 100644 --- a/ql/src/test/queries/clientpositive/groupby1_noskew.q +++ b/ql/src/test/queries/clientpositive/groupby1_noskew.q @@ -6,11 +6,11 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest_g1_n0(key INT, value DOUBLE) STORED AS TEXTFILE; EXPLAIN -FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; +FROM src INSERT OVERWRITE TABLE dest_g1_n0 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; -FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; +FROM src INSERT OVERWRITE TABLE dest_g1_n0 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key; -SELECT dest_g1.* FROM dest_g1; +SELECT dest_g1_n0.* FROM dest_g1_n0; diff --git a/ql/src/test/queries/clientpositive/groupby2.q b/ql/src/test/queries/clientpositive/groupby2.q old mode 100755 new mode 100644 index 6a8722aeee..9ab04617fd --- a/ql/src/test/queries/clientpositive/groupby2.q +++ b/ql/src/test/queries/clientpositive/groupby2.q @@ -5,15 +5,15 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.map.aggr=false; set hive.groupby.skewindata=true; -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_g2_n2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest_g2_n2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest_g2_n2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); -- SORT_QUERY_RESULTS -SELECT dest_g2.* FROM dest_g2; +SELECT dest_g2_n2.* FROM dest_g2_n2; diff --git a/ql/src/test/queries/clientpositive/groupby2_map.q b/ql/src/test/queries/clientpositive/groupby2_map.q index 457e401467..8c3bbbfdfb 100644 --- a/ql/src/test/queries/clientpositive/groupby2_map.q +++ b/ql/src/test/queries/clientpositive/groupby2_map.q @@ -6,13 +6,13 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n16(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest1_n16 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest1_n16 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n16.* FROM dest1_n16; diff --git a/ql/src/test/queries/clientpositive/groupby2_map_multi_distinct.q b/ql/src/test/queries/clientpositive/groupby2_map_multi_distinct.q index f4f241c712..730027e754 100644 --- a/ql/src/test/queries/clientpositive/groupby2_map_multi_distinct.q +++ b/ql/src/test/queries/clientpositive/groupby2_map_multi_distinct.q @@ -6,24 +6,24 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE; +CREATE TABLE dest1_n38(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest1_n38 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest1_n38 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n38.* FROM dest1_n38; -- HIVE-5560 when group by key is used in distinct funtion, invalid result are returned EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest1_n38 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest1_n38 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n38.* FROM dest1_n38; diff --git a/ql/src/test/queries/clientpositive/groupby2_map_skew.q b/ql/src/test/queries/clientpositive/groupby2_map_skew.q index 4fef7d1a0e..4130ba6046 100644 --- a/ql/src/test/queries/clientpositive/groupby2_map_skew.q +++ b/ql/src/test/queries/clientpositive/groupby2_map_skew.q @@ -4,13 +4,13 @@ set hive.map.aggr=true; set hive.groupby.skewindata=true; set mapred.reduce.tasks=31; -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n10(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest1_n10 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest1_n10 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); -SELECT dest1.* FROM dest1 order by key; +SELECT dest1_n10.* FROM dest1_n10 order by key; diff --git a/ql/src/test/queries/clientpositive/groupby2_noskew.q b/ql/src/test/queries/clientpositive/groupby2_noskew.q index f467fba459..09060464c4 100644 --- a/ql/src/test/queries/clientpositive/groupby2_noskew.q +++ b/ql/src/test/queries/clientpositive/groupby2_noskew.q @@ -6,13 +6,13 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_g2_n1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest_g2_n1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest_g2_n1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1); -SELECT dest_g2.* FROM dest_g2; +SELECT dest_g2_n1.* FROM dest_g2_n1; diff --git a/ql/src/test/queries/clientpositive/groupby2_noskew_multi_distinct.q b/ql/src/test/queries/clientpositive/groupby2_noskew_multi_distinct.q index 69b617b0ea..dbc96f589b 100644 --- a/ql/src/test/queries/clientpositive/groupby2_noskew_multi_distinct.q +++ b/ql/src/test/queries/clientpositive/groupby2_noskew_multi_distinct.q @@ -6,13 +6,13 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE; +CREATE TABLE dest_g2_n3(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest_g2_n3 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest_g2_n3 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1); -SELECT dest_g2.* FROM dest_g2; +SELECT dest_g2_n3.* FROM dest_g2_n3; diff --git a/ql/src/test/queries/clientpositive/groupby3.q b/ql/src/test/queries/clientpositive/groupby3.q old mode 100755 new mode 100644 index 0d6f00f65e..f399b3a54b --- a/ql/src/test/queries/clientpositive/groupby3.q +++ b/ql/src/test/queries/clientpositive/groupby3.q @@ -5,11 +5,11 @@ set hive.explain.user=false; set hive.map.aggr=false; set hive.groupby.skewindata=true; -CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n119(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n119 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -21,7 +21,7 @@ INSERT OVERWRITE TABLE dest1 SELECT var_samp(substr(src.value,5)); FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n119 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -32,6 +32,6 @@ INSERT OVERWRITE TABLE dest1 SELECT variance(substr(src.value,5)), var_samp(substr(src.value,5)); -SELECT dest1.* FROM dest1; +SELECT dest1_n119.* FROM dest1_n119; diff --git a/ql/src/test/queries/clientpositive/groupby3_map.q b/ql/src/test/queries/clientpositive/groupby3_map.q index ebfb69650e..0a0a7ce93e 100644 --- a/ql/src/test/queries/clientpositive/groupby3_map.q +++ b/ql/src/test/queries/clientpositive/groupby3_map.q @@ -4,11 +4,11 @@ set hive.map.aggr=true; set hive.groupby.skewindata=false; set mapred.reduce.tasks=31; -CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n53(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n53 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -20,7 +20,7 @@ INSERT OVERWRITE TABLE dest1 SELECT var_samp(substr(src.value,5)); FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n53 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -41,6 +41,6 @@ round(c6, 11) c6, round(c7, 11) c7, round(c8, 5) c8, round(c9, 9) c9 -FROM dest1; +FROM dest1_n53; diff --git a/ql/src/test/queries/clientpositive/groupby3_map_multi_distinct.q b/ql/src/test/queries/clientpositive/groupby3_map_multi_distinct.q index a72b6fb863..9e8270b328 100644 --- a/ql/src/test/queries/clientpositive/groupby3_map_multi_distinct.q +++ b/ql/src/test/queries/clientpositive/groupby3_map_multi_distinct.q @@ -4,11 +4,11 @@ set hive.map.aggr=true; set hive.groupby.skewindata=false; set mapred.reduce.tasks=31; -CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n68(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n68 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -22,7 +22,7 @@ INSERT OVERWRITE TABLE dest1 SELECT count(DISTINCT substr(src.value, 5)); FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n68 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -35,4 +35,4 @@ INSERT OVERWRITE TABLE dest1 SELECT sum(DISTINCT substr(src.value, 5)), count(DISTINCT substr(src.value, 5)); -SELECT dest1.* FROM dest1; +SELECT dest1_n68.* FROM dest1_n68; diff --git a/ql/src/test/queries/clientpositive/groupby3_map_skew.q b/ql/src/test/queries/clientpositive/groupby3_map_skew.q index 01b616c87b..3371582415 100644 --- a/ql/src/test/queries/clientpositive/groupby3_map_skew.q +++ b/ql/src/test/queries/clientpositive/groupby3_map_skew.q @@ -5,11 +5,11 @@ set hive.map.aggr=true; set hive.groupby.skewindata=true; set mapred.reduce.tasks=31; -CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n131(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n131 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -21,7 +21,7 @@ INSERT OVERWRITE TABLE dest1 SELECT var_samp(substr(src.value,5)); FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n131 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -33,4 +33,4 @@ INSERT OVERWRITE TABLE dest1 SELECT var_samp(substr(src.value,5)); SELECT ROUND(c1, 1), ROUND(c2, 3), ROUND(c3, 5), ROUND(c4, 1), ROUND(c5, 1), ROUND(c6, 5), -ROUND(c7,5), ROUND(c8, 5), ROUND(c9, 5) FROM dest1; +ROUND(c7,5), ROUND(c8, 5), ROUND(c9, 5) FROM dest1_n131; diff --git a/ql/src/test/queries/clientpositive/groupby3_noskew.q b/ql/src/test/queries/clientpositive/groupby3_noskew.q index cd049ba7ff..fead423392 100644 --- a/ql/src/test/queries/clientpositive/groupby3_noskew.q +++ b/ql/src/test/queries/clientpositive/groupby3_noskew.q @@ -5,11 +5,11 @@ set hive.map.aggr=false; set hive.groupby.skewindata=false; set mapred.reduce.tasks=31; -CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n63(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n63 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -21,7 +21,7 @@ INSERT OVERWRITE TABLE dest1 SELECT var_samp(substr(src.value,5)); FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n63 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -32,7 +32,7 @@ INSERT OVERWRITE TABLE dest1 SELECT variance(substr(src.value,5)), var_samp(substr(src.value,5)); -SELECT dest1.* FROM dest1; +SELECT dest1_n63.* FROM dest1_n63; diff --git a/ql/src/test/queries/clientpositive/groupby3_noskew_multi_distinct.q b/ql/src/test/queries/clientpositive/groupby3_noskew_multi_distinct.q index 2c8cf5de67..4b49abf284 100644 --- a/ql/src/test/queries/clientpositive/groupby3_noskew_multi_distinct.q +++ b/ql/src/test/queries/clientpositive/groupby3_noskew_multi_distinct.q @@ -5,11 +5,11 @@ set hive.map.aggr=false; set hive.groupby.skewindata=false; set mapred.reduce.tasks=31; -CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n24(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n24 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -23,7 +23,7 @@ INSERT OVERWRITE TABLE dest1 SELECT count(DISTINCT substr(src.value, 5)); FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n24 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -36,5 +36,5 @@ INSERT OVERWRITE TABLE dest1 SELECT sum(DISTINCT substr(src.value, 5)), count(DISTINCT substr(src.value, 5)); -SELECT dest1.* FROM dest1; +SELECT dest1_n24.* FROM dest1_n24; diff --git a/ql/src/test/queries/clientpositive/groupby4.q b/ql/src/test/queries/clientpositive/groupby4.q old mode 100755 new mode 100644 index 2660fde151..87efa4ebf2 --- a/ql/src/test/queries/clientpositive/groupby4.q +++ b/ql/src/test/queries/clientpositive/groupby4.q @@ -5,14 +5,14 @@ set hive.groupby.skewindata=true; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n168(c1 STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest1_n168 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1); FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest1_n168 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n168.* FROM dest1_n168; diff --git a/ql/src/test/queries/clientpositive/groupby4_map.q b/ql/src/test/queries/clientpositive/groupby4_map.q index 09f223a599..9051beeee8 100644 --- a/ql/src/test/queries/clientpositive/groupby4_map.q +++ b/ql/src/test/queries/clientpositive/groupby4_map.q @@ -4,11 +4,11 @@ set hive.map.aggr=true; set hive.groupby.skewindata=false; set mapred.reduce.tasks=31; -CREATE TABLE dest1(key INT) STORED AS TEXTFILE; +CREATE TABLE dest1_n40(key INT) STORED AS TEXTFILE; EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1); +FROM src INSERT OVERWRITE TABLE dest1_n40 SELECT count(1); -FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1); +FROM src INSERT OVERWRITE TABLE dest1_n40 SELECT count(1); -SELECT dest1.* FROM dest1; +SELECT dest1_n40.* FROM dest1_n40; diff --git a/ql/src/test/queries/clientpositive/groupby4_map_skew.q b/ql/src/test/queries/clientpositive/groupby4_map_skew.q index a8c8a8c331..00269e99d6 100644 --- a/ql/src/test/queries/clientpositive/groupby4_map_skew.q +++ b/ql/src/test/queries/clientpositive/groupby4_map_skew.q @@ -4,11 +4,11 @@ set hive.map.aggr=true; set hive.groupby.skewindata=true; set mapred.reduce.tasks=31; -CREATE TABLE dest1(key INT) STORED AS TEXTFILE; +CREATE TABLE dest1_n141(key INT) STORED AS TEXTFILE; EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1); +FROM src INSERT OVERWRITE TABLE dest1_n141 SELECT count(1); -FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1); +FROM src INSERT OVERWRITE TABLE dest1_n141 SELECT count(1); -SELECT dest1.* FROM dest1; +SELECT dest1_n141.* FROM dest1_n141; diff --git a/ql/src/test/queries/clientpositive/groupby4_noskew.q b/ql/src/test/queries/clientpositive/groupby4_noskew.q index 69e1597a48..cf2b1633dd 100644 --- a/ql/src/test/queries/clientpositive/groupby4_noskew.q +++ b/ql/src/test/queries/clientpositive/groupby4_noskew.q @@ -7,14 +7,14 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n33(c1 STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest1_n33 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1); FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest1_n33 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n33.* FROM dest1_n33; diff --git a/ql/src/test/queries/clientpositive/groupby5.q b/ql/src/test/queries/clientpositive/groupby5.q old mode 100755 new mode 100644 index 73e68c4227..f6f76c50c0 --- a/ql/src/test/queries/clientpositive/groupby5.q +++ b/ql/src/test/queries/clientpositive/groupby5.q @@ -5,18 +5,18 @@ set hive.groupby.skewindata=true; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n36(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n36 SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key; -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n36 SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key; -SELECT dest1.* FROM dest1; +SELECT dest1_n36.* FROM dest1_n36; diff --git a/ql/src/test/queries/clientpositive/groupby5_map.q b/ql/src/test/queries/clientpositive/groupby5_map.q index 0704b69a76..8651560d75 100644 --- a/ql/src/test/queries/clientpositive/groupby5_map.q +++ b/ql/src/test/queries/clientpositive/groupby5_map.q @@ -3,11 +3,11 @@ set hive.map.aggr=true; set hive.groupby.skewindata=false; set mapred.reduce.tasks=31; -CREATE TABLE dest1(key INT) STORED AS TEXTFILE; +CREATE TABLE dest1_n75(key INT) STORED AS TEXTFILE; EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key); +FROM src INSERT OVERWRITE TABLE dest1_n75 SELECT sum(src.key); -FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key); +FROM src INSERT OVERWRITE TABLE dest1_n75 SELECT sum(src.key); -SELECT dest1.* FROM dest1; +SELECT dest1_n75.* FROM dest1_n75; diff --git a/ql/src/test/queries/clientpositive/groupby5_map_skew.q b/ql/src/test/queries/clientpositive/groupby5_map_skew.q index 469c42f280..ecb6976c1b 100644 --- a/ql/src/test/queries/clientpositive/groupby5_map_skew.q +++ b/ql/src/test/queries/clientpositive/groupby5_map_skew.q @@ -3,11 +3,11 @@ set hive.map.aggr=true; set hive.groupby.skewindata=true; set mapred.reduce.tasks=31; -CREATE TABLE dest1(key INT) STORED AS TEXTFILE; +CREATE TABLE dest1_n76(key INT) STORED AS TEXTFILE; EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key); +FROM src INSERT OVERWRITE TABLE dest1_n76 SELECT sum(src.key); -FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key); +FROM src INSERT OVERWRITE TABLE dest1_n76 SELECT sum(src.key); -SELECT dest1.* FROM dest1; +SELECT dest1_n76.* FROM dest1_n76; diff --git a/ql/src/test/queries/clientpositive/groupby5_noskew.q b/ql/src/test/queries/clientpositive/groupby5_noskew.q index 392b265cef..4181c5fd59 100644 --- a/ql/src/test/queries/clientpositive/groupby5_noskew.q +++ b/ql/src/test/queries/clientpositive/groupby5_noskew.q @@ -7,18 +7,18 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n31(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n31 SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key; -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n31 SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key; -SELECT dest1.* FROM dest1; +SELECT dest1_n31.* FROM dest1_n31; diff --git a/ql/src/test/queries/clientpositive/groupby6.q b/ql/src/test/queries/clientpositive/groupby6.q old mode 100755 new mode 100644 index fee53e4495..f626640fe9 --- a/ql/src/test/queries/clientpositive/groupby6.q +++ b/ql/src/test/queries/clientpositive/groupby6.q @@ -5,15 +5,15 @@ set hive.groupby.skewindata=true; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n92(c1 STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1); +INSERT OVERWRITE TABLE dest1_n92 SELECT DISTINCT substr(src.value,5,1); FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1); +INSERT OVERWRITE TABLE dest1_n92 SELECT DISTINCT substr(src.value,5,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n92.* FROM dest1_n92; diff --git a/ql/src/test/queries/clientpositive/groupby6_map.q b/ql/src/test/queries/clientpositive/groupby6_map.q index 9d184e0bd9..68d327decf 100644 --- a/ql/src/test/queries/clientpositive/groupby6_map.q +++ b/ql/src/test/queries/clientpositive/groupby6_map.q @@ -6,15 +6,15 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n19(c1 STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1); +INSERT OVERWRITE TABLE dest1_n19 SELECT DISTINCT substr(src.value,5,1); FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1); +INSERT OVERWRITE TABLE dest1_n19 SELECT DISTINCT substr(src.value,5,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n19.* FROM dest1_n19; diff --git a/ql/src/test/queries/clientpositive/groupby6_map_skew.q b/ql/src/test/queries/clientpositive/groupby6_map_skew.q index d47750c6f0..704a346281 100644 --- a/ql/src/test/queries/clientpositive/groupby6_map_skew.q +++ b/ql/src/test/queries/clientpositive/groupby6_map_skew.q @@ -6,15 +6,15 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n98(c1 STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1); +INSERT OVERWRITE TABLE dest1_n98 SELECT DISTINCT substr(src.value,5,1); FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1); +INSERT OVERWRITE TABLE dest1_n98 SELECT DISTINCT substr(src.value,5,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n98.* FROM dest1_n98; diff --git a/ql/src/test/queries/clientpositive/groupby6_noskew.q b/ql/src/test/queries/clientpositive/groupby6_noskew.q index a40af5f30c..bbf67124c6 100644 --- a/ql/src/test/queries/clientpositive/groupby6_noskew.q +++ b/ql/src/test/queries/clientpositive/groupby6_noskew.q @@ -7,15 +7,15 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n100(c1 STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1); +INSERT OVERWRITE TABLE dest1_n100 SELECT DISTINCT substr(src.value,5,1); FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1); +INSERT OVERWRITE TABLE dest1_n100 SELECT DISTINCT substr(src.value,5,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n100.* FROM dest1_n100; diff --git a/ql/src/test/queries/clientpositive/groupby7.q b/ql/src/test/queries/clientpositive/groupby7.q index a3ea700904..6d07a07c31 100644 --- a/ql/src/test/queries/clientpositive/groupby7.q +++ b/ql/src/test/queries/clientpositive/groupby7.q @@ -4,15 +4,15 @@ set hive.groupby.skewindata=true; -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n132(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n34(key INT, value STRING) STORED AS TEXTFILE; SET hive.exec.compress.intermediate=true; SET hive.exec.compress.output=true; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n132 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n34 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n132.* FROM DEST1_n132; +SELECT DEST2_n34.* FROM DEST2_n34; diff --git a/ql/src/test/queries/clientpositive/groupby7_map.q b/ql/src/test/queries/clientpositive/groupby7_map.q index 8164042bf7..95e5386acd 100644 --- a/ql/src/test/queries/clientpositive/groupby7_map.q +++ b/ql/src/test/queries/clientpositive/groupby7_map.q @@ -6,20 +6,20 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n82(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n19(key INT, value STRING) STORED AS TEXTFILE; SET hive.exec.compress.intermediate=true; SET hive.exec.compress.output=true; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n82 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n19 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n82 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n19 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n82.* FROM DEST1_n82; +SELECT DEST2_n19.* FROM DEST2_n19; diff --git a/ql/src/test/queries/clientpositive/groupby7_map_multi_single_reducer.q b/ql/src/test/queries/clientpositive/groupby7_map_multi_single_reducer.q index c25d6029cf..36ac4bc4c6 100644 --- a/ql/src/test/queries/clientpositive/groupby7_map_multi_single_reducer.q +++ b/ql/src/test/queries/clientpositive/groupby7_map_multi_single_reducer.q @@ -5,20 +5,20 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n15(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n3(key INT, value STRING) STORED AS TEXTFILE; SET hive.exec.compress.intermediate=true; SET hive.exec.compress.output=true; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n15 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n3 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n15 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n3 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n15.* FROM DEST1_n15; +SELECT DEST2_n3.* FROM DEST2_n3; diff --git a/ql/src/test/queries/clientpositive/groupby7_map_skew.q b/ql/src/test/queries/clientpositive/groupby7_map_skew.q index afd09fe7e2..8a4c3c1dcc 100644 --- a/ql/src/test/queries/clientpositive/groupby7_map_skew.q +++ b/ql/src/test/queries/clientpositive/groupby7_map_skew.q @@ -5,20 +5,20 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n21(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n5(key INT, value STRING) STORED AS TEXTFILE; SET hive.exec.compress.intermediate=true; SET hive.exec.compress.output=true; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n21 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n5 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n21 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n5 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n21.* FROM DEST1_n21; +SELECT DEST2_n5.* FROM DEST2_n5; diff --git a/ql/src/test/queries/clientpositive/groupby7_noskew.q b/ql/src/test/queries/clientpositive/groupby7_noskew.q index fcdaeb4be2..20a7082a3d 100644 --- a/ql/src/test/queries/clientpositive/groupby7_noskew.q +++ b/ql/src/test/queries/clientpositive/groupby7_noskew.q @@ -6,20 +6,20 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n101(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n28(key INT, value STRING) STORED AS TEXTFILE; SET hive.exec.compress.intermediate=true; SET hive.exec.compress.output=true; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n101 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n28 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n101 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n28 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n101.* FROM DEST1_n101; +SELECT DEST2_n28.* FROM DEST2_n28; diff --git a/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q b/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q index f8ee474da7..07cef3f0b3 100644 --- a/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q +++ b/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q @@ -5,20 +5,20 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n170(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n42(key INT, value STRING) STORED AS TEXTFILE; SET hive.exec.compress.intermediate=true; SET hive.exec.compress.output=true; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10; +INSERT OVERWRITE TABLE DEST1_n170 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 +INSERT OVERWRITE TABLE DEST2_n42 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10; +INSERT OVERWRITE TABLE DEST1_n170 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 +INSERT OVERWRITE TABLE DEST2_n42 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n170.* FROM DEST1_n170; +SELECT DEST2_n42.* FROM DEST2_n42; diff --git a/ql/src/test/queries/clientpositive/groupby8.q b/ql/src/test/queries/clientpositive/groupby8.q index cb9f12c928..4f25cf6e74 100644 --- a/ql/src/test/queries/clientpositive/groupby8.q +++ b/ql/src/test/queries/clientpositive/groupby8.q @@ -4,31 +4,31 @@ set hive.groupby.skewindata=true; -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n71(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n15(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n71 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n15 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n71 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n15 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n71.* FROM DEST1_n71; +SELECT DEST2_n15.* FROM DEST2_n15; set hive.multigroupby.singlereducer=false; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n71 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n15 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n71 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n15 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n71.* FROM DEST1_n71; +SELECT DEST2_n15.* FROM DEST2_n15; diff --git a/ql/src/test/queries/clientpositive/groupby8_map.q b/ql/src/test/queries/clientpositive/groupby8_map.q index 96286d3998..1323dfffc5 100644 --- a/ql/src/test/queries/clientpositive/groupby8_map.q +++ b/ql/src/test/queries/clientpositive/groupby8_map.q @@ -5,18 +5,18 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n136(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n35(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n136 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n35 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n136 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n35 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n136.* FROM DEST1_n136; +SELECT DEST2_n35.* FROM DEST2_n35; diff --git a/ql/src/test/queries/clientpositive/groupby8_map_skew.q b/ql/src/test/queries/clientpositive/groupby8_map_skew.q index 4938b28fac..5e5a71f206 100644 --- a/ql/src/test/queries/clientpositive/groupby8_map_skew.q +++ b/ql/src/test/queries/clientpositive/groupby8_map_skew.q @@ -5,18 +5,18 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n87(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n22(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n87 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n22 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n87 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n22 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n87.* FROM DEST1_n87; +SELECT DEST2_n22.* FROM DEST2_n22; diff --git a/ql/src/test/queries/clientpositive/groupby8_noskew.q b/ql/src/test/queries/clientpositive/groupby8_noskew.q index 3d2fa39e20..8c0328c54a 100644 --- a/ql/src/test/queries/clientpositive/groupby8_noskew.q +++ b/ql/src/test/queries/clientpositive/groupby8_noskew.q @@ -6,17 +6,17 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n48(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n9(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n48 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n9 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; +INSERT OVERWRITE TABLE DEST1_n48 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n9 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n48.* FROM DEST1_n48; +SELECT DEST2_n9.* FROM DEST2_n9; diff --git a/ql/src/test/queries/clientpositive/groupby9.q b/ql/src/test/queries/clientpositive/groupby9.q index 5608df5f84..96111b57b7 100644 --- a/ql/src/test/queries/clientpositive/groupby9.q +++ b/ql/src/test/queries/clientpositive/groupby9.q @@ -1,69 +1,69 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key INT, val1 STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n117(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n31(key INT, val1 STRING, val2 STRING) STORED AS TEXTFILE; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value; +INSERT OVERWRITE TABLE DEST1_n117 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n31 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value; +INSERT OVERWRITE TABLE DEST1_n117 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n31 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n117.* FROM DEST1_n117; +SELECT DEST2_n31.* FROM DEST2_n31; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key; +INSERT OVERWRITE TABLE DEST1_n117 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n31 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key; +INSERT OVERWRITE TABLE DEST1_n117 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n31 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n117.* FROM DEST1_n117; +SELECT DEST2_n31.* FROM DEST2_n31; set hive.multigroupby.singlereducer=false; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value; +INSERT OVERWRITE TABLE DEST1_n117 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n31 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value; +INSERT OVERWRITE TABLE DEST1_n117 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n31 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n117.* FROM DEST1_n117; +SELECT DEST2_n31.* FROM DEST2_n31; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value; +INSERT OVERWRITE TABLE DEST1_n117 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n31 SELECT SRC.key, SRC.value, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value; +INSERT OVERWRITE TABLE DEST1_n117 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n31 SELECT SRC.key, SRC.value, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n117.* FROM DEST1_n117; +SELECT DEST2_n31.* FROM DEST2_n31; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key; +INSERT OVERWRITE TABLE DEST1_n117 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n31 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key; +INSERT OVERWRITE TABLE DEST1_n117 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n31 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n117.* FROM DEST1_n117; +SELECT DEST2_n31.* FROM DEST2_n31; diff --git a/ql/src/test/queries/clientpositive/groupby_complex_types.q b/ql/src/test/queries/clientpositive/groupby_complex_types.q index 7ea9f59ff6..87eec2f5d3 100644 --- a/ql/src/test/queries/clientpositive/groupby_complex_types.q +++ b/ql/src/test/queries/clientpositive/groupby_complex_types.q @@ -1,22 +1,22 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE; -CREATE TABLE DEST2(key MAP, value BIGINT) STORED AS TEXTFILE; -CREATE TABLE DEST3(key STRUCT, value BIGINT) STORED AS TEXTFILE; +CREATE TABLE DEST1_n163(key ARRAY, value BIGINT) STORED AS TEXTFILE; +CREATE TABLE DEST2_n41(key MAP, value BIGINT) STORED AS TEXTFILE; +CREATE TABLE DEST3_n7(key STRUCT, value BIGINT) STORED AS TEXTFILE; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key), COUNT(1) GROUP BY ARRAY(SRC.key) -INSERT OVERWRITE TABLE DEST2 SELECT MAP(SRC.key, SRC.value), COUNT(1) GROUP BY MAP(SRC.key, SRC.value) -INSERT OVERWRITE TABLE DEST3 SELECT STRUCT(SRC.key, SRC.value), COUNT(1) GROUP BY STRUCT(SRC.key, SRC.value); +INSERT OVERWRITE TABLE DEST1_n163 SELECT ARRAY(SRC.key), COUNT(1) GROUP BY ARRAY(SRC.key) +INSERT OVERWRITE TABLE DEST2_n41 SELECT MAP(SRC.key, SRC.value), COUNT(1) GROUP BY MAP(SRC.key, SRC.value) +INSERT OVERWRITE TABLE DEST3_n7 SELECT STRUCT(SRC.key, SRC.value), COUNT(1) GROUP BY STRUCT(SRC.key, SRC.value); FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key), COUNT(1) GROUP BY ARRAY(SRC.key) -INSERT OVERWRITE TABLE DEST2 SELECT MAP(SRC.key, SRC.value), COUNT(1) GROUP BY MAP(SRC.key, SRC.value) -INSERT OVERWRITE TABLE DEST3 SELECT STRUCT(SRC.key, SRC.value), COUNT(1) GROUP BY STRUCT(SRC.key, SRC.value); +INSERT OVERWRITE TABLE DEST1_n163 SELECT ARRAY(SRC.key), COUNT(1) GROUP BY ARRAY(SRC.key) +INSERT OVERWRITE TABLE DEST2_n41 SELECT MAP(SRC.key, SRC.value), COUNT(1) GROUP BY MAP(SRC.key, SRC.value) +INSERT OVERWRITE TABLE DEST3_n7 SELECT STRUCT(SRC.key, SRC.value), COUNT(1) GROUP BY STRUCT(SRC.key, SRC.value); -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; -SELECT DEST3.* FROM DEST3; +SELECT DEST1_n163.* FROM DEST1_n163; +SELECT DEST2_n41.* FROM DEST2_n41; +SELECT DEST3_n7.* FROM DEST3_n7; diff --git a/ql/src/test/queries/clientpositive/groupby_complex_types_multi_single_reducer.q b/ql/src/test/queries/clientpositive/groupby_complex_types_multi_single_reducer.q index 26236ab009..82ee1584cd 100644 --- a/ql/src/test/queries/clientpositive/groupby_complex_types_multi_single_reducer.q +++ b/ql/src/test/queries/clientpositive/groupby_complex_types_multi_single_reducer.q @@ -3,18 +3,18 @@ set hive.multigroupby.singlereducer=true; -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE; -CREATE TABLE DEST2(key MAP, value BIGINT) STORED AS TEXTFILE; +CREATE TABLE DEST1_n47(key ARRAY, value BIGINT) STORED AS TEXTFILE; +CREATE TABLE DEST2_n8(key MAP, value BIGINT) STORED AS TEXTFILE; EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10 -INSERT OVERWRITE TABLE DEST2 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10; +INSERT OVERWRITE TABLE DEST1_n47 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10 +INSERT OVERWRITE TABLE DEST2_n8 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10; FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10 -INSERT OVERWRITE TABLE DEST2 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10; +INSERT OVERWRITE TABLE DEST1_n47 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10 +INSERT OVERWRITE TABLE DEST2_n8 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n47.* FROM DEST1_n47; +SELECT DEST2_n8.* FROM DEST2_n8; diff --git a/ql/src/test/queries/clientpositive/groupby_cube1.q b/ql/src/test/queries/clientpositive/groupby_cube1.q index 92456d0f68..0250c60d2c 100644 --- a/ql/src/test/queries/clientpositive/groupby_cube1.q +++ b/ql/src/test/queries/clientpositive/groupby_cube1.q @@ -5,52 +5,52 @@ set hive.groupby.skewindata=false; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n82(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n82; EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube; +SELECT key, val, count(1) FROM T1_n82 GROUP BY key, val with cube; EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY CUBE(key, val); +SELECT key, val, count(1) FROM T1_n82 GROUP BY CUBE(key, val); -SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube; +SELECT key, val, count(1) FROM T1_n82 GROUP BY key, val with cube; EXPLAIN -SELECT key, val, GROUPING__ID, count(1) FROM T1 GROUP BY key, val with cube; +SELECT key, val, GROUPING__ID, count(1) FROM T1_n82 GROUP BY key, val with cube; -SELECT key, val, GROUPING__ID, count(1) FROM T1 GROUP BY key, val with cube; +SELECT key, val, GROUPING__ID, count(1) FROM T1_n82 GROUP BY key, val with cube; EXPLAIN -SELECT key, count(distinct val) FROM T1 GROUP BY key with cube; +SELECT key, count(distinct val) FROM T1_n82 GROUP BY key with cube; -SELECT key, count(distinct val) FROM T1 GROUP BY key with cube; +SELECT key, count(distinct val) FROM T1_n82 GROUP BY key with cube; set hive.groupby.skewindata=true; EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube; +SELECT key, val, count(1) FROM T1_n82 GROUP BY key, val with cube; -SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube; +SELECT key, val, count(1) FROM T1_n82 GROUP BY key, val with cube; EXPLAIN -SELECT key, count(distinct val) FROM T1 GROUP BY key with cube; +SELECT key, count(distinct val) FROM T1_n82 GROUP BY key with cube; -SELECT key, count(distinct val) FROM T1 GROUP BY key with cube; +SELECT key, count(distinct val) FROM T1_n82 GROUP BY key with cube; set hive.multigroupby.singlereducer=true; -CREATE TABLE T2(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE; -CREATE TABLE T3(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE; +CREATE TABLE T2_n51(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE; +CREATE TABLE T3_n16(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE; EXPLAIN -FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with cube -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by key, val with cube; +FROM T1_n82 +INSERT OVERWRITE TABLE T2_n51 SELECT key, val, count(1) group by key, val with cube +INSERT OVERWRITE TABLE T3_n16 SELECT key, val, sum(1) group by key, val with cube; -FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with cube -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by key, val with cube; +FROM T1_n82 +INSERT OVERWRITE TABLE T2_n51 SELECT key, val, count(1) group by key, val with cube +INSERT OVERWRITE TABLE T3_n16 SELECT key, val, sum(1) group by key, val with cube; diff --git a/ql/src/test/queries/clientpositive/groupby_cube_multi_gby.q b/ql/src/test/queries/clientpositive/groupby_cube_multi_gby.q index b2a0dfa463..535c3b1b52 100644 --- a/ql/src/test/queries/clientpositive/groupby_cube_multi_gby.q +++ b/ql/src/test/queries/clientpositive/groupby_cube_multi_gby.q @@ -1,13 +1,13 @@ --! qt:dataset:src set hive.multigroupby.singlereducer=false; -create table t1 like src; -create table t2 like src; +create table t1_n21 like src; +create table t2_n13 like src; explain from src -insert into table t1 select +insert into table t1_n21 select key, GROUPING__ID group by cube(key, value) -insert into table t2 select +insert into table t2_n13 select key, value group by key, value grouping sets ((key), (key, value)); \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/groupby_distinct_samekey.q b/ql/src/test/queries/clientpositive/groupby_distinct_samekey.q index 7a40d01038..d923d89f22 100644 --- a/ql/src/test/queries/clientpositive/groupby_distinct_samekey.q +++ b/ql/src/test/queries/clientpositive/groupby_distinct_samekey.q @@ -3,16 +3,16 @@ set hive.mapred.mode=nonstrict; -- This test covers HIVE-2332 -- SORT_QUERY_RESULTS -create table t1 (int1 int, int2 int, str1 string, str2 string); +create table t1_n60 (int1_n60 int, int2 int, str1 string, str2 string); set hive.optimize.reducededuplication=false; --disabled RS-dedup for keeping intention of test -insert into table t1 select cast(key as int), cast(key as int), value, value from src where key < 6; -explain select Q1.int1, sum(distinct Q1.int1) from (select * from t1 order by int1) Q1 group by Q1.int1; -explain select int1, sum(distinct int1) from t1 group by int1; +insert into table t1_n60 select cast(key as int), cast(key as int), value, value from src where key < 6; +explain select Q1.int1_n60, sum(distinct Q1.int1_n60) from (select * from t1_n60 order by int1_n60) Q1 group by Q1.int1_n60; +explain select int1_n60, sum(distinct int1_n60) from t1_n60 group by int1_n60; -select Q1.int1, sum(distinct Q1.int1) from (select * from t1 order by int1) Q1 group by Q1.int1; -select int1, sum(distinct int1) from t1 group by int1; +select Q1.int1_n60, sum(distinct Q1.int1_n60) from (select * from t1_n60 order by int1_n60) Q1 group by Q1.int1_n60; +select int1_n60, sum(distinct int1_n60) from t1_n60 group by int1_n60; -drop table t1; +drop table t1_n60; diff --git a/ql/src/test/queries/clientpositive/groupby_duplicate_key.q b/ql/src/test/queries/clientpositive/groupby_duplicate_key.q index 72e38f43ec..ce5a091785 100644 --- a/ql/src/test/queries/clientpositive/groupby_duplicate_key.q +++ b/ql/src/test/queries/clientpositive/groupby_duplicate_key.q @@ -5,13 +5,13 @@ select distinct key, "" as dummy1, "" as dummy2 from src tablesample (10 rows); select distinct key, "" as dummy1, "" as dummy2 from src tablesample (10 rows); explain -create table dummy as +create table dummy_n6 as select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows); -create table dummy as +create table dummy_n6 as select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows); -select key,dummy1,dummy2 from dummy; +select key,dummy1,dummy2 from dummy_n6; explain select max('pants'), max('pANTS') from src group by key limit 1; diff --git a/ql/src/test/queries/clientpositive/groupby_empty.q b/ql/src/test/queries/clientpositive/groupby_empty.q index 2ce33ae165..30604c3da9 100644 --- a/ql/src/test/queries/clientpositive/groupby_empty.q +++ b/ql/src/test/queries/clientpositive/groupby_empty.q @@ -1,9 +1,9 @@ -create table t (a int); +create table t_n34 (a int); -insert into t values (1),(1),(2); +insert into t_n34 values (1),(1),(2); -explain select count(*) from t group by (); +explain select count(*) from t_n34 group by (); -select count(*) from t group by (); +select count(*) from t_n34 group by (); diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_id1.q b/ql/src/test/queries/clientpositive/groupby_grouping_id1.q index 7068d21994..d4948b9c8f 100644 --- a/ql/src/test/queries/clientpositive/groupby_grouping_id1.q +++ b/ql/src/test/queries/clientpositive/groupby_grouping_id1.q @@ -1,17 +1,17 @@ SET hive.vectorized.execution.enabled=false; -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n158(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n158; -- SORT_QUERY_RESULTS -SELECT key, val, GROUPING__ID from T1 group by key, val with cube; -SELECT key, val, GROUPING__ID from T1 group by cube(key, val); +SELECT key, val, GROUPING__ID from T1_n158 group by key, val with cube; +SELECT key, val, GROUPING__ID from T1_n158 group by cube(key, val); -SELECT GROUPING__ID, key, val from T1 group by key, val with rollup; -SELECT GROUPING__ID, key, val from T1 group by rollup (key, val); +SELECT GROUPING__ID, key, val from T1_n158 group by key, val with rollup; +SELECT GROUPING__ID, key, val from T1_n158 group by rollup (key, val); -SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1 group by key, val with cube; -SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1 group by cube(key, val); +SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1_n158 group by key, val with cube; +SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1_n158 group by cube(key, val); diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_id2.q b/ql/src/test/queries/clientpositive/groupby_grouping_id2.q index ba755c4ca9..778d4b922c 100644 --- a/ql/src/test/queries/clientpositive/groupby_grouping_id2.q +++ b/ql/src/test/queries/clientpositive/groupby_grouping_id2.q @@ -3,40 +3,40 @@ SET hive.vectorized.execution.enabled=false; set hive.fetch.task.conversion=none; set hive.cli.print.header=true; -CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE; +CREATE TABLE T1_n123(key INT, value INT) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_n123; set hive.groupby.skewindata = true; -- SORT_QUERY_RESULTS -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP; -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY ROLLUP (key, value); +SELECT key, value, GROUPING__ID, count(*) from T1_n123 GROUP BY key, value WITH ROLLUP; +SELECT key, value, GROUPING__ID, count(*) from T1_n123 GROUP BY ROLLUP (key, value); SELECT GROUPING__ID, count(*) FROM ( -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP +SELECT key, value, GROUPING__ID, count(*) from T1_n123 GROUP BY key, value WITH ROLLUP ) t GROUP BY GROUPING__ID; SELECT GROUPING__ID, count(*) FROM ( -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY ROLLUP(key, value) +SELECT key, value, GROUPING__ID, count(*) from T1_n123 GROUP BY ROLLUP(key, value) ) t GROUP BY GROUPING__ID; -SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1 GROUP BY key,value WITH ROLLUP) t1 +SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1_n123 GROUP BY key,value WITH ROLLUP) t1 JOIN -(SELECT GROUPING__ID FROM T1 GROUP BY key, value WITH ROLLUP) t2 +(SELECT GROUPING__ID FROM T1_n123 GROUP BY key, value WITH ROLLUP) t2 ON t1.GROUPING__ID = t2.GROUPING__ID; -SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1 GROUP BY ROLLUP(key,value)) t1 +SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1_n123 GROUP BY ROLLUP(key,value)) t1 JOIN -(SELECT GROUPING__ID FROM T1 GROUP BY ROLLUP(key, value)) t2 +(SELECT GROUPING__ID FROM T1_n123 GROUP BY ROLLUP(key, value)) t2 ON t1.GROUPING__ID = t2.GROUPING__ID; @@ -45,18 +45,18 @@ ON t1.GROUPING__ID = t2.GROUPING__ID; set hive.groupby.skewindata = false; -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP; +SELECT key, value, GROUPING__ID, count(*) from T1_n123 GROUP BY key, value WITH ROLLUP; SELECT GROUPING__ID, count(*) FROM ( -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP +SELECT key, value, GROUPING__ID, count(*) from T1_n123 GROUP BY key, value WITH ROLLUP ) t GROUP BY GROUPING__ID; -SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1 GROUP BY key,value WITH ROLLUP) t1 +SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1_n123 GROUP BY key,value WITH ROLLUP) t1 JOIN -(SELECT GROUPING__ID FROM T1 GROUP BY key, value WITH ROLLUP) t2 +(SELECT GROUPING__ID FROM T1_n123 GROUP BY key, value WITH ROLLUP) t2 ON t1.GROUPING__ID = t2.GROUPING__ID; diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_id3.q b/ql/src/test/queries/clientpositive/groupby_grouping_id3.q index 29b2f15ca1..b1e765d0ae 100644 --- a/ql/src/test/queries/clientpositive/groupby_grouping_id3.q +++ b/ql/src/test/queries/clientpositive/groupby_grouping_id3.q @@ -1,8 +1,8 @@ SET hive.vectorized.execution.enabled=false; -CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE; +CREATE TABLE T1_n86(key INT, value INT) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_n86; set hive.cbo.enable = false; @@ -10,12 +10,12 @@ set hive.cbo.enable = false; EXPLAIN SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n86 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1; SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n86 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1; @@ -24,12 +24,12 @@ set hive.cbo.enable = true; EXPLAIN SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n86 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1; SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n86 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1; diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_sets1.q b/ql/src/test/queries/clientpositive/groupby_grouping_sets1.q index 86c5246c72..57b61f8dc4 100644 --- a/ql/src/test/queries/clientpositive/groupby_grouping_sets1.q +++ b/ql/src/test/queries/clientpositive/groupby_grouping_sets1.q @@ -5,36 +5,36 @@ set hive.cli.print.header=true; -- SORT_QUERY_RESULTS -CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_n41(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n41; -SELECT * FROM T1; +SELECT * FROM T1_n41; EXPLAIN -SELECT a, b, count(*) from T1 group by a, b with cube; -SELECT a, b, count(*) from T1 group by a, b with cube; +SELECT a, b, count(*) from T1_n41 group by a, b with cube; +SELECT a, b, count(*) from T1_n41 group by a, b with cube; EXPLAIN -SELECT a, b, count(*) from T1 group by cube(a, b); -SELECT a, b, count(*) from T1 group by cube(a, b); +SELECT a, b, count(*) from T1_n41 group by cube(a, b); +SELECT a, b, count(*) from T1_n41 group by cube(a, b); EXPLAIN -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()); -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()); +SELECT a, b, count(*) FROM T1_n41 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()); +SELECT a, b, count(*) FROM T1_n41 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()); EXPLAIN -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)); -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)); +SELECT a, b, count(*) FROM T1_n41 GROUP BY a, b GROUPING SETS (a, (a, b)); +SELECT a, b, count(*) FROM T1_n41 GROUP BY a, b GROUPING SETS (a, (a, b)); EXPLAIN -SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c); -SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c); +SELECT a FROM T1_n41 GROUP BY a, b, c GROUPING SETS (a, b, c); +SELECT a FROM T1_n41 GROUP BY a, b, c GROUPING SETS (a, b, c); EXPLAIN -SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)); -SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)); +SELECT a FROM T1_n41 GROUP BY a GROUPING SETS ((a), (a)); +SELECT a FROM T1_n41 GROUP BY a GROUPING SETS ((a), (a)); EXPLAIN -SELECT a + b, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b); -SELECT a + b, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b); +SELECT a + b, count(*) FROM T1_n41 GROUP BY a + b GROUPING SETS (a+b); +SELECT a + b, count(*) FROM T1_n41 GROUP BY a + b GROUPING SETS (a+b); diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_sets2.q b/ql/src/test/queries/clientpositive/groupby_grouping_sets2.q index 1934321b9f..b24c1e077d 100644 --- a/ql/src/test/queries/clientpositive/groupby_grouping_sets2.q +++ b/ql/src/test/queries/clientpositive/groupby_grouping_sets2.q @@ -6,27 +6,27 @@ set hive.new.job.grouping.set.cardinality=2; -- SORT_QUERY_RESULTS -CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_n81(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n81; -- Since 4 grouping sets would be generated for the query below, an additional MR job should be created EXPLAIN -SELECT a, b, count(*) from T1 group by a, b with cube; +SELECT a, b, count(*) from T1_n81 group by a, b with cube; EXPLAIN -SELECT a, b, count(*) from T1 group by cube(a, b); -SELECT a, b, count(*) from T1 group by a, b with cube; +SELECT a, b, count(*) from T1_n81 group by cube(a, b); +SELECT a, b, count(*) from T1_n81 group by a, b with cube; EXPLAIN -SELECT a, b, sum(c) from T1 group by a, b with cube; -SELECT a, b, sum(c) from T1 group by a, b with cube; +SELECT a, b, sum(c) from T1_n81 group by a, b with cube; +SELECT a, b, sum(c) from T1_n81 group by a, b with cube; -CREATE TABLE T2(a STRING, b STRING, c int, d int); +CREATE TABLE T2_n50(a STRING, b STRING, c int, d int); -INSERT OVERWRITE TABLE T2 -SELECT a, b, c, c from T1; +INSERT OVERWRITE TABLE T2_n50 +SELECT a, b, c, c from T1_n81; EXPLAIN -SELECT a, b, sum(c+d) from T2 group by a, b with cube; -SELECT a, b, sum(c+d) from T2 group by a, b with cube; +SELECT a, b, sum(c+d) from T2_n50 group by a, b with cube; +SELECT a, b, sum(c+d) from T2_n50 group by a, b with cube; diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_sets3.q b/ql/src/test/queries/clientpositive/groupby_grouping_sets3.q index 81267dc33d..5021a7a6ea 100644 --- a/ql/src/test/queries/clientpositive/groupby_grouping_sets3.q +++ b/ql/src/test/queries/clientpositive/groupby_grouping_sets3.q @@ -5,14 +5,14 @@ set hive.cli.print.header=true; -- SORT_QUERY_RESULTS --- In this test, 2 files are loaded into table T1. The data contains rows with the same value of a and b, +-- In this test, 2 files are loaded into table T1_n118. The data contains rows with the same value of a and b, -- with different number of rows for a and b in each file. Since bucketizedHiveInputFormat is used, -- this tests that the aggregate function stores the partial aggregate state correctly even if an -- additional MR job is created for processing the grouping sets. -CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_n118(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets1.txt' INTO TABLE T1; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets2.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets1.txt' INTO TABLE T1_n118; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets2.txt' INTO TABLE T1_n118; set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; set hive.new.job.grouping.set.cardinality = 30; @@ -21,17 +21,17 @@ set hive.new.job.grouping.set.cardinality = 30; -- (cube of a,b will lead to (a,b), (a, null), (null, b) and (null, null) and -- hive.new.job.grouping.set.cardinality is more than 4. EXPLAIN -SELECT a, b, avg(c), count(*) from T1 group by a, b with cube; +SELECT a, b, avg(c), count(*) from T1_n118 group by a, b with cube; EXPLAIN -SELECT a, b, avg(c), count(*) from T1 group by cube(a, b); -SELECT a, b, avg(c), count(*) from T1 group by a, b with cube; +SELECT a, b, avg(c), count(*) from T1_n118 group by cube(a, b); +SELECT a, b, avg(c), count(*) from T1_n118 group by a, b with cube; set hive.new.job.grouping.set.cardinality=2; -- The query below will execute in 2 MR jobs, since hive.new.job.grouping.set.cardinality is set to 2. -- The partial aggregation state should be maintained correctly across MR jobs. EXPLAIN -SELECT a, b, avg(c), count(*) from T1 group by a, b with cube; -SELECT a, b, avg(c), count(*) from T1 group by a, b with cube; +SELECT a, b, avg(c), count(*) from T1_n118 group by a, b with cube; +SELECT a, b, avg(c), count(*) from T1_n118 group by a, b with cube; diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_sets4.q b/ql/src/test/queries/clientpositive/groupby_grouping_sets4.q index fa62992d2d..86e8773b52 100644 --- a/ql/src/test/queries/clientpositive/groupby_grouping_sets4.q +++ b/ql/src/test/queries/clientpositive/groupby_grouping_sets4.q @@ -8,29 +8,29 @@ set hive.merge.mapredfiles = false; -- Set merging to false above to make the explain more readable -CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_n143(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n143; -- This tests that cubes and rollups work fine inside sub-queries. EXPLAIN SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a; EXPLAIN SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by cube(a, b) ) subq1 +(SELECT a, b, count(*) from T1_n143 where a < 3 group by cube(a, b) ) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by cube(a, b) ) subq2 +(SELECT a, b, count(*) from T1_n143 where a < 3 group by cube(a, b) ) subq2 on subq1.a = subq2.a; SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a; set hive.new.job.grouping.set.cardinality=2; @@ -39,14 +39,14 @@ set hive.new.job.grouping.set.cardinality=2; -- for each of them EXPLAIN SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a; SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a; diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_sets5.q b/ql/src/test/queries/clientpositive/groupby_grouping_sets5.q index 829a0c24ca..25676f5ed0 100644 --- a/ql/src/test/queries/clientpositive/groupby_grouping_sets5.q +++ b/ql/src/test/queries/clientpositive/groupby_grouping_sets5.q @@ -4,30 +4,30 @@ set hive.merge.mapfiles = false; set hive.merge.mapredfiles = false; -- Set merging to false above to make the explain more readable -CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_n24(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n24; -- SORT_QUERY_RESULTS -- This tests that cubes and rollups work fine where the source is a sub-query EXPLAIN SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube; +(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by a, b with cube; EXPLAIN SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by cube(a, b); +(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by cube(a, b); SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube; +(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by a, b with cube; set hive.new.job.grouping.set.cardinality=2; -- Since 4 grouping sets would be generated for the cube, an additional MR job should be created EXPLAIN SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube; +(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by a, b with cube; SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube; +(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by a, b with cube; diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_sets6.q b/ql/src/test/queries/clientpositive/groupby_grouping_sets6.q index 515dce3c5c..5c0bb75f06 100644 --- a/ql/src/test/queries/clientpositive/groupby_grouping_sets6.q +++ b/ql/src/test/queries/clientpositive/groupby_grouping_sets6.q @@ -1,9 +1,9 @@ set hive.mapred.mode=nonstrict; SET hive.vectorized.execution.enabled=false; -CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_n75(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n75; -- SORT_QUERY_RESULTS @@ -12,11 +12,11 @@ set hive.optimize.ppd = false; -- This filter is not pushed down EXPLAIN SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n75 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5; SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n75 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5; set hive.cbo.enable = true; @@ -24,9 +24,9 @@ set hive.cbo.enable = true; -- This filter is pushed down through aggregate with grouping sets by Calcite EXPLAIN SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n75 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5; SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n75 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5; diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_sets_grouping.q b/ql/src/test/queries/clientpositive/groupby_grouping_sets_grouping.q index 3f437a49d0..d400848b65 100644 --- a/ql/src/test/queries/clientpositive/groupby_grouping_sets_grouping.q +++ b/ql/src/test/queries/clientpositive/groupby_grouping_sets_grouping.q @@ -2,48 +2,48 @@ SET hive.vectorized.execution.enabled=false; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE; +CREATE TABLE T1_n64(key INT, value INT) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_n64; explain select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n64 group by rollup(key, value); select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n64 group by rollup(key, value); explain select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n64 group by cube(key, value); select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n64 group by cube(key, value); explain select key, value -from T1 +from T1_n64 group by cube(key, value) having grouping(key) = 1; select key, value -from T1 +from T1_n64 group by cube(key, value) having grouping(key) = 1; explain select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n64 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end; select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n64 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end; @@ -52,107 +52,107 @@ set hive.cbo.enable=false; explain select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n64 group by rollup(key, value); select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n64 group by rollup(key, value); explain select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n64 group by cube(key, value); select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n64 group by cube(key, value); explain select key, value -from T1 +from T1_n64 group by cube(key, value) having grouping(key) = 1; select key, value -from T1 +from T1_n64 group by cube(key, value) having grouping(key) = 1; explain select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n64 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end; select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n64 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end; explain select key, value, grouping(key), grouping(value) -from T1 +from T1_n64 group by key, value; select key, value, grouping(key), grouping(value) -from T1 +from T1_n64 group by key, value; explain select key, value, grouping(value) -from T1 +from T1_n64 group by key, value; select key, value, grouping(value) -from T1 +from T1_n64 group by key, value; explain select key, value -from T1 +from T1_n64 group by key, value having grouping(key) = 0; select key, value -from T1 +from T1_n64 group by key, value having grouping(key) = 0; explain select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n64 group by cube(key, value); select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n64 group by cube(key, value); explain select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n64 group by cube(key, value); select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n64 group by cube(key, value); explain select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n64 group by rollup(key, value); select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n64 group by rollup(key, value); explain select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n64 group by rollup(key, value); select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n64 group by rollup(key, value); diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_sets_limit.q b/ql/src/test/queries/clientpositive/groupby_grouping_sets_limit.q index b6c5143671..cda9c039dc 100644 --- a/ql/src/test/queries/clientpositive/groupby_grouping_sets_limit.q +++ b/ql/src/test/queries/clientpositive/groupby_grouping_sets_limit.q @@ -1,36 +1,36 @@ -- SORT_QUERY_RESULTS -CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_n141(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n141; EXPLAIN -SELECT a, b, count(*) from T1 group by a, b with cube LIMIT 10; +SELECT a, b, count(*) from T1_n141 group by a, b with cube LIMIT 10; -SELECT a, b, count(*) from T1 group by a, b with cube LIMIT 10; +SELECT a, b, count(*) from T1_n141 group by a, b with cube LIMIT 10; EXPLAIN -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) LIMIT 10; +SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) LIMIT 10; -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) LIMIT 10; +SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) LIMIT 10; EXPLAIN -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10; +SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10; -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10; +SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10; EXPLAIN -SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10; +SELECT a FROM T1_n141 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10; -SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10; +SELECT a FROM T1_n141 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10; EXPLAIN -SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10; +SELECT a FROM T1_n141 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10; -SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10; +SELECT a FROM T1_n141 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10; EXPLAIN -SELECT a + b, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10; +SELECT a + b, count(*) FROM T1_n141 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10; -SELECT a + b, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10; +SELECT a + b, count(*) FROM T1_n141 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10; diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_window.q b/ql/src/test/queries/clientpositive/groupby_grouping_window.q index 8a5c2906b3..7ec8752fbd 100644 --- a/ql/src/test/queries/clientpositive/groupby_grouping_window.q +++ b/ql/src/test/queries/clientpositive/groupby_grouping_window.q @@ -1,29 +1,29 @@ --! qt:dataset:src -create table t(category int, live int, comments int); -insert into table t select key, 0, 2 from src tablesample(3 rows); +create table t_n33(category int, live int, comments int); +insert into table t_n33 select key, 0, 2 from src tablesample(3 rows); explain select category, max(live) live, max(comments) comments, rank() OVER (PARTITION BY category ORDER BY comments) rank1 -FROM t +FROM t_n33 GROUP BY category GROUPING SETS ((), (category)) HAVING max(comments) > 0; select category, max(live) live, max(comments) comments, rank() OVER (PARTITION BY category ORDER BY comments) rank1 -FROM t +FROM t_n33 GROUP BY category GROUPING SETS ((), (category)) HAVING max(comments) > 0; SELECT grouping(category), lead(live) over(partition by grouping(category)) -FROM t +FROM t_n33 GROUP BY category, live GROUPING SETS ((), (category)); SELECT grouping(category), lead(live) over(partition by grouping(category)) -FROM t +FROM t_n33 GROUP BY category, live; SELECT grouping(category), lag(live) over(partition by grouping(category)) -FROM t +FROM t_n33 GROUP BY category, live; diff --git a/ql/src/test/queries/clientpositive/groupby_map_ppr.q b/ql/src/test/queries/clientpositive/groupby_map_ppr.q index d42416e5b8..4f3a5a1cfe 100644 --- a/ql/src/test/queries/clientpositive/groupby_map_ppr.q +++ b/ql/src/test/queries/clientpositive/groupby_map_ppr.q @@ -7,19 +7,19 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n144(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; EXPLAIN EXTENDED FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n144 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1); FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n144 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n144.* FROM dest1_n144; diff --git a/ql/src/test/queries/clientpositive/groupby_map_ppr_multi_distinct.q b/ql/src/test/queries/clientpositive/groupby_map_ppr_multi_distinct.q index 5d969838d9..2c2fbc4bf7 100644 --- a/ql/src/test/queries/clientpositive/groupby_map_ppr_multi_distinct.q +++ b/ql/src/test/queries/clientpositive/groupby_map_ppr_multi_distinct.q @@ -7,19 +7,19 @@ set mapred.reduce.tasks=31; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE; +CREATE TABLE dest1_n174(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE; EXPLAIN EXTENDED FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n174 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(DISTINCT src.value) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1); FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n174 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(DISTINCT src.value) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n174.* FROM dest1_n174; diff --git a/ql/src/test/queries/clientpositive/groupby_multi_insert_common_distinct.q b/ql/src/test/queries/clientpositive/groupby_multi_insert_common_distinct.q index 1d6d8dcf3e..a76ce46abd 100644 --- a/ql/src/test/queries/clientpositive/groupby_multi_insert_common_distinct.q +++ b/ql/src/test/queries/clientpositive/groupby_multi_insert_common_distinct.q @@ -3,19 +3,19 @@ set hive.map.aggr=true; -- SORT_QUERY_RESULTS -create table dest1(key int, cnt int); -create table dest2(key int, cnt int); +create table dest1_n99(key int, cnt int); +create table dest2_n27(key int, cnt int); explain from src -insert overwrite table dest1 select key, count(distinct value) group by key -insert overwrite table dest2 select key+key, count(distinct value) group by key+key; +insert overwrite table dest1_n99 select key, count(distinct value) group by key +insert overwrite table dest2_n27 select key+key, count(distinct value) group by key+key; from src -insert overwrite table dest1 select key, count(distinct value) group by key -insert overwrite table dest2 select key+key, count(distinct value) group by key+key; +insert overwrite table dest1_n99 select key, count(distinct value) group by key +insert overwrite table dest2_n27 select key+key, count(distinct value) group by key+key; -select * from dest1 where key < 10; -select * from dest2 where key < 20 order by key limit 10; +select * from dest1_n99 where key < 10; +select * from dest2_n27 where key < 20 order by key limit 10; diff --git a/ql/src/test/queries/clientpositive/groupby_multi_single_reducer2.q b/ql/src/test/queries/clientpositive/groupby_multi_single_reducer2.q index 5a5d149899..1721172b9d 100644 --- a/ql/src/test/queries/clientpositive/groupby_multi_single_reducer2.q +++ b/ql/src/test/queries/clientpositive/groupby_multi_single_reducer2.q @@ -1,22 +1,22 @@ --! qt:dataset:src set hive.multigroupby.singlereducer=true; -CREATE TABLE dest_g2(key STRING, c1 INT) STORED AS TEXTFILE; -CREATE TABLE dest_g3(key STRING, c1 INT, c2 INT) STORED AS TEXTFILE; +CREATE TABLE dest_g2_n4(key STRING, c1 INT) STORED AS TEXTFILE; +CREATE TABLE dest_g3_n0(key STRING, c1 INT, c2 INT) STORED AS TEXTFILE; -- SORT_QUERY_RESULTS EXPLAIN FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) -INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest_g2_n4 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g3_n0 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1); FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) -INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1); +INSERT OVERWRITE TABLE dest_g2_n4 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g3_n0 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1); -SELECT * FROM dest_g2; -SELECT * FROM dest_g3; +SELECT * FROM dest_g2_n4; +SELECT * FROM dest_g3_n0; -DROP TABLE dest_g2; -DROP TABLE dest_g3; +DROP TABLE dest_g2_n4; +DROP TABLE dest_g3_n0; diff --git a/ql/src/test/queries/clientpositive/groupby_multi_single_reducer3.q b/ql/src/test/queries/clientpositive/groupby_multi_single_reducer3.q index 224b67cd1e..94aea35c42 100644 --- a/ql/src/test/queries/clientpositive/groupby_multi_single_reducer3.q +++ b/ql/src/test/queries/clientpositive/groupby_multi_single_reducer3.q @@ -3,103 +3,103 @@ -- SORT_QUERY_RESULTS -create table e1 (key string, count int); -create table e2 (key string, count int); +create table e1_n1 (key string, count int); +create table e2_n2 (key string, count int); explain from src -insert overwrite table e1 +insert overwrite table e1_n1 select key, count(*) where src.value in ('val_100', 'val_200', 'val_300') AND key in (100, 150, 200) group by key -insert overwrite table e2 +insert overwrite table e2_n2 select key, count(*) where src.value in ('val_400', 'val_500') AND key in (400, 450) group by key; from src -insert overwrite table e1 +insert overwrite table e1_n1 select key, count(*) where src.value in ('val_100', 'val_200', 'val_300') AND key in (100, 150, 200) group by key -insert overwrite table e2 +insert overwrite table e2_n2 select key, count(*) where src.value in ('val_400', 'val_500') AND key in (400, 450) group by key; -select * from e1; -select * from e2; +select * from e1_n1; +select * from e2_n2; explain from src -insert overwrite table e1 +insert overwrite table e1_n1 select value, count(*) where src.key + src.key = 200 or src.key - 100 = 100 or src.key = 300 AND VALUE IS NOT NULL group by value -insert overwrite table e2 +insert overwrite table e2_n2 select value, count(*) where src.key + src.key = 400 or src.key - 100 = 500 AND VALUE IS NOT NULL group by value; from src -insert overwrite table e1 +insert overwrite table e1_n1 select value, count(*) where src.key + src.key = 200 or src.key - 100 = 100 or src.key = 300 AND VALUE IS NOT NULL group by value -insert overwrite table e2 +insert overwrite table e2_n2 select value, count(*) where src.key + src.key = 400 or src.key - 100 = 500 AND VALUE IS NOT NULL group by value; -select * from e1; -select * from e2; +select * from e1_n1; +select * from e2_n2; set hive.optimize.ppd=false; explain from src -insert overwrite table e1 +insert overwrite table e1_n1 select key, count(*) where src.value in ('val_100', 'val_200', 'val_300') AND key in (100, 150, 200) group by key -insert overwrite table e2 +insert overwrite table e2_n2 select key, count(*) where src.value in ('val_400', 'val_500') AND key in (400, 450) group by key; from src -insert overwrite table e1 +insert overwrite table e1_n1 select key, count(*) where src.value in ('val_100', 'val_200', 'val_300') AND key in (100, 150, 200) group by key -insert overwrite table e2 +insert overwrite table e2_n2 select key, count(*) where src.value in ('val_400', 'val_500') AND key in (400, 450) group by key; -select * from e1; -select * from e2; +select * from e1_n1; +select * from e2_n2; explain from src -insert overwrite table e1 +insert overwrite table e1_n1 select value, count(*) where src.key + src.key = 200 or src.key - 100 = 100 or src.key = 300 AND VALUE IS NOT NULL group by value -insert overwrite table e2 +insert overwrite table e2_n2 select value, count(*) where src.key + src.key = 400 or src.key - 100 = 500 AND VALUE IS NOT NULL group by value; from src -insert overwrite table e1 +insert overwrite table e1_n1 select value, count(*) where src.key + src.key = 200 or src.key - 100 = 100 or src.key = 300 AND VALUE IS NOT NULL group by value -insert overwrite table e2 +insert overwrite table e2_n2 select value, count(*) where src.key + src.key = 400 or src.key - 100 = 500 AND VALUE IS NOT NULL group by value; -select * from e1; -select * from e2; +select * from e1_n1; +select * from e2_n2; diff --git a/ql/src/test/queries/clientpositive/groupby_multialias.q b/ql/src/test/queries/clientpositive/groupby_multialias.q index b0a01715d2..80a2f600a0 100644 --- a/ql/src/test/queries/clientpositive/groupby_multialias.q +++ b/ql/src/test/queries/clientpositive/groupby_multialias.q @@ -1,7 +1,7 @@ -create table t1 (a int); +create table t1_n150 (a int); explain -select t1.a as a1, min(t1.a) as a -from t1 -group by t1.a; +select t1_n150.a as a1, min(t1_n150.a) as a +from t1_n150 +group by t1_n150.a; diff --git a/ql/src/test/queries/clientpositive/groupby_ppd.q b/ql/src/test/queries/clientpositive/groupby_ppd.q index b304dfb94f..1801b42851 100644 --- a/ql/src/test/queries/clientpositive/groupby_ppd.q +++ b/ql/src/test/queries/clientpositive/groupby_ppd.q @@ -1,5 +1,5 @@ set hive.mapred.mode=nonstrict; -- see HIVE-2382 -create table invites (id int, foo int, bar int); -explain select * from (select foo, bar from (select bar, foo from invites c union all select bar, foo from invites d) b) a group by bar, foo having bar=1; -drop table invites; \ No newline at end of file +create table invites_n0 (id int, foo int, bar int); +explain select * from (select foo, bar from (select bar, foo from invites_n0 c union all select bar, foo from invites_n0 d) b) a group by bar, foo having bar=1; +drop table invites_n0; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/groupby_ppr.q b/ql/src/test/queries/clientpositive/groupby_ppr.q index 8cad28f016..fb5f235b60 100644 --- a/ql/src/test/queries/clientpositive/groupby_ppr.q +++ b/ql/src/test/queries/clientpositive/groupby_ppr.q @@ -6,19 +6,19 @@ set hive.groupby.skewindata=false; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n79(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; EXPLAIN EXTENDED FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n79 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1); FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n79 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n79.* FROM dest1_n79; diff --git a/ql/src/test/queries/clientpositive/groupby_rollup1.q b/ql/src/test/queries/clientpositive/groupby_rollup1.q index 94f533cb20..4e34be3848 100644 --- a/ql/src/test/queries/clientpositive/groupby_rollup1.q +++ b/ql/src/test/queries/clientpositive/groupby_rollup1.q @@ -5,45 +5,45 @@ set hive.groupby.skewindata=false; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n91(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n91; EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup; +SELECT key, val, count(1) FROM T1_n91 GROUP BY key, val with rollup; -SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup; +SELECT key, val, count(1) FROM T1_n91 GROUP BY key, val with rollup; EXPLAIN -SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup; +SELECT key, count(distinct val) FROM T1_n91 GROUP BY key with rollup; -SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup; +SELECT key, count(distinct val) FROM T1_n91 GROUP BY key with rollup; set hive.groupby.skewindata=true; EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup; +SELECT key, val, count(1) FROM T1_n91 GROUP BY key, val with rollup; -SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup; +SELECT key, val, count(1) FROM T1_n91 GROUP BY key, val with rollup; EXPLAIN -SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup; +SELECT key, count(distinct val) FROM T1_n91 GROUP BY key with rollup; -SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup; +SELECT key, count(distinct val) FROM T1_n91 GROUP BY key with rollup; set hive.multigroupby.singlereducer=true; -CREATE TABLE T2(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE; -CREATE TABLE T3(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE; +CREATE TABLE T2_n56(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE; +CREATE TABLE T3_n20(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE; EXPLAIN -FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with rollup -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by rollup(key, val); +FROM T1_n91 +INSERT OVERWRITE TABLE T2_n56 SELECT key, val, count(1) group by key, val with rollup +INSERT OVERWRITE TABLE T3_n20 SELECT key, val, sum(1) group by rollup(key, val); -FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with rollup -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by key, val with rollup; +FROM T1_n91 +INSERT OVERWRITE TABLE T2_n56 SELECT key, val, count(1) group by key, val with rollup +INSERT OVERWRITE TABLE T3_n20 SELECT key, val, sum(1) group by key, val with rollup; diff --git a/ql/src/test/queries/clientpositive/groupby_rollup_empty.q b/ql/src/test/queries/clientpositive/groupby_rollup_empty.q index b64eef9b7e..8ba8d2a604 100644 --- a/ql/src/test/queries/clientpositive/groupby_rollup_empty.q +++ b/ql/src/test/queries/clientpositive/groupby_rollup_empty.q @@ -1,78 +1,78 @@ set hive.vectorized.execution.enabled=false; -drop table if exists tx1; -drop table if exists tx2; -create table tx1 (a integer,b integer,c integer); +drop table if exists tx1_n2; +drop table if exists tx2_n1; +create table tx1_n2 (a integer,b integer,c integer); select sum(c) -from tx1 +from tx1_n2 ; select sum(c), grouping(b), 'NULL,1' as expected -from tx1 +from tx1_n2 where a<0 group by a,b grouping sets ((), b, a); select sum(c), grouping(b), 'NULL,1' as expected -from tx1 +from tx1_n2 where a<0 group by rollup (b); -select '2 rows expected',sum(c) from tx1 group by rollup (a) +select '2 rows expected',sum(c) from tx1_n2 group by rollup (a) union all -select '2 rows expected',sum(c) from tx1 group by rollup (a); +select '2 rows expected',sum(c) from tx1_n2 group by rollup (a); -- non-empty table -insert into tx1 values (1,1,1); +insert into tx1_n2 values (1,1,1); select sum(c), grouping(b), 'NULL,1' as expected -from tx1 +from tx1_n2 where a<0 group by rollup (b); select sum(c), grouping(b), '1,1 and 1,0' as expected -from tx1 +from tx1_n2 group by rollup (b); set hive.vectorized.execution.enabled=true; -create table tx2 (a integer,b integer,c integer,d double,u string,bi binary) stored as orc; +create table tx2_n1 (a integer,b integer,c integer,d double,u string,bi binary) stored as orc; explain select sum(c), grouping(b), 'NULL,1' as expected -from tx2 +from tx2_n1 where a<0 group by a,b grouping sets ((), b, a); select sum(c),'NULL' as expected -from tx2; +from tx2_n1; select sum(c), max(u), 'asd', grouping(b), 'NULL,1' as expected -from tx2 +from tx2_n1 where a<0 group by a,b,d grouping sets ((), b, a, d); -select '2 rows expected',sum(c) from tx2 group by rollup (a) +select '2 rows expected',sum(c) from tx2_n1 group by rollup (a) union all -select '2 rows expected',sum(c) from tx2 group by rollup (a); +select '2 rows expected',sum(c) from tx2_n1 group by rollup (a); -insert into tx2 values +insert into tx2_n1 values (1,2,3,1.1,'x','b'), (3,2,3,1.1,'y','b'); @@ -82,6 +82,6 @@ select sum(a), 'asd', grouping(bi), 'NULL,1' as expected -from tx2 +from tx2_n1 where a=2 group by a,u,bi grouping sets ( u, (), bi); diff --git a/ql/src/test/queries/clientpositive/groupby_sort_1.q b/ql/src/test/queries/clientpositive/groupby_sort_1.q index 2255edec0a..46ec0be469 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_1.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_1.q @@ -6,58 +6,58 @@ set hive.map.groupby.sorted=true; -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n4(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n4; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1; +INSERT OVERWRITE TABLE T1_n4 select key, val from T1_n4; -CREATE TABLE outputTbl1(key int, cnt int); +CREATE TABLE outputTbl1_n2(key int, cnt int); -- The plan should be converted to a map-side group by if the group by key -- matches the sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n2 +SELECT key, count(1) FROM T1_n4 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n2 +SELECT key, count(1) FROM T1_n4 GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n2; -CREATE TABLE outputTbl2(key1 int, key2 string, cnt int); +CREATE TABLE outputTbl2_n0(key1 int, key2 string, cnt int); -- no map-side group by even if the group by key is a superset of sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl2_n0 +SELECT key, val, count(1) FROM T1_n4 GROUP BY key, val; -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl2_n0 +SELECT key, val, count(1) FROM T1_n4 GROUP BY key, val; -SELECT * FROM outputTbl2; +SELECT * FROM outputTbl2_n0; -- It should work for sub-queries EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n2 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n4) subq1 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n2 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n4) subq1 GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n2; -- It should work for sub-queries with column aliases EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k; +INSERT OVERWRITE TABLE outputTbl1_n2 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n4) subq1 GROUP BY k; -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k; +INSERT OVERWRITE TABLE outputTbl1_n2 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n4) subq1 GROUP BY k; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n2; CREATE TABLE outputTbl3(key1 int, key2 int, cnt int); @@ -65,10 +65,10 @@ CREATE TABLE outputTbl3(key1 int, key2 int, cnt int); -- by a match to the sorted key EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key; +SELECT 1, key, count(1) FROM T1_n4 GROUP BY 1, key; INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key; +SELECT 1, key, count(1) FROM T1_n4 GROUP BY 1, key; SELECT * FROM outputTbl3; @@ -77,20 +77,20 @@ CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int); -- no map-side group by if the group by key contains a constant followed by another column EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val; +SELECT key, 1, val, count(1) FROM T1_n4 GROUP BY key, 1, val; INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val; +SELECT key, 1, val, count(1) FROM T1_n4 GROUP BY key, 1, val; SELECT * FROM outputTbl4; -- no map-side group by if the group by key contains a function EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1; +SELECT key, key + 1, count(1) FROM T1_n4 GROUP BY key, key + 1; INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1; +SELECT key, key + 1, count(1) FROM T1_n4 GROUP BY key, key + 1; SELECT * FROM outputTbl3; @@ -99,104 +99,104 @@ SELECT * FROM outputTbl3; -- group by followed by another group by EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n2 SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq1 group by key + key; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n2 SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq1 group by key + key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n2; -- group by followed by a union EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n2 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n4 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n4 GROUP BY key ) subq1; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n2 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n4 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n4 GROUP BY key ) subq1; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n2; -- group by followed by a union where one of the sub-queries is map-side group by EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n2 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n4 GROUP BY key UNION ALL -SELECT key + key as key, count(1) FROM T1 GROUP BY key + key +SELECT key + key as key, count(1) FROM T1_n4 GROUP BY key + key ) subq1; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n2 SELECT * FROM ( -SELECT key, count(1) as cnt FROM T1 GROUP BY key +SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key UNION ALL -SELECT key + key as key, count(1) as cnt FROM T1 GROUP BY key + key +SELECT key + key as key, count(1) as cnt FROM T1_n4 GROUP BY key + key ) subq1; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n2; -- group by followed by a join EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n2 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq2 ON subq1.key = subq2.key; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n2 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq2 ON subq1.key = subq2.key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n2; -- group by followed by a join where one of the sub-queries can be performed in the mapper EXPLAIN EXTENDED SELECT * FROM -(SELECT key, count(1) FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) FROM T1_n4 GROUP BY key) subq1 JOIN -(SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 +(SELECT key, val, count(1) FROM T1_n4 GROUP BY key, val) subq2 ON subq1.key = subq2.key; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n3(key STRING, val STRING) CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1; +INSERT OVERWRITE TABLE T2_n3 select key, val from T1_n4; -- no mapside sort group by if the group by is a prefix of the sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n2 +SELECT key, count(1) FROM T2_n3 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n2 +SELECT key, count(1) FROM T2_n3 GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n2; -- The plan should be converted to a map-side group by if the group by key contains a constant in between the -- sorted keys EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val; +SELECT key, 1, val, count(1) FROM T2_n3 GROUP BY key, 1, val; INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val; +SELECT key, 1, val, count(1) FROM T2_n3 GROUP BY key, 1, val; SELECT * FROM outputTbl4; @@ -206,10 +206,10 @@ CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int); -- sorted keys followed by anything EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2; +SELECT key, 1, val, 2, count(1) FROM T2_n3 GROUP BY key, 1, val, 2; INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2; +SELECT key, 1, val, 2, count(1) FROM T2_n3 GROUP BY key, 1, val, 2; SELECT * FROM outputTbl5; @@ -217,12 +217,12 @@ SELECT * FROM outputTbl5; EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n3)subq group by key, constant, val; INSERT OVERWRITE TABLE outputTbl4 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n3)subq group by key, constant, val; SELECT * FROM outputTbl4; @@ -233,7 +233,7 @@ INSERT OVERWRITE TABLE outputTbl4 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n3)subq )subq2 group by key, constant3, val; @@ -241,7 +241,7 @@ INSERT OVERWRITE TABLE outputTbl4 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n3)subq )subq2 group by key, constant3, val; @@ -251,33 +251,33 @@ set hive.map.aggr=true; set hive.multigroupby.singlereducer=false; set mapred.reduce.tasks=31; -CREATE TABLE DEST1(key INT, cnt INT); -CREATE TABLE DEST2(key INT, val STRING, cnt INT); +CREATE TABLE DEST1_n7(key INT, cnt INT); +CREATE TABLE DEST2_n1(key INT, val STRING, cnt INT); SET hive.exec.compress.intermediate=true; SET hive.exec.compress.output=true; EXPLAIN -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM T2_n3 +INSERT OVERWRITE TABLE DEST1_n7 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n1 SELECT key, val, count(1) GROUP BY key, val; -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM T2_n3 +INSERT OVERWRITE TABLE DEST1_n7 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n1 SELECT key, val, count(1) GROUP BY key, val; -select * from DEST1; -select * from DEST2; +select * from DEST1_n7; +select * from DEST2_n1; -- multi-table insert with a sub-query EXPLAIN -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM (select key, val from T2_n3 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n7 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n1 SELECT key, val, count(1) GROUP BY key, val; -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM (select key, val from T2_n3 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n7 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n1 SELECT key, val, count(1) GROUP BY key, val; -select * from DEST1; -select * from DEST2; +select * from DEST1_n7; +select * from DEST2_n1; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_10.q b/ql/src/test/queries/clientpositive/groupby_sort_10.q index daf2db9476..626af886e8 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_10.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_10.q @@ -5,24 +5,24 @@ set hive.map.groupby.sorted=true; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +CREATE TABLE T1_n149(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') +INSERT OVERWRITE TABLE T1_n149 PARTITION (ds='1') SELECT * from src where key = 0 or key = 11; -- The plan is converted to a map-side plan -EXPLAIN select distinct key from T1; -select distinct key from T1; +EXPLAIN select distinct key from T1_n149; +select distinct key from T1_n149; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='2') +INSERT OVERWRITE TABLE T1_n149 PARTITION (ds='2') SELECT * from src where key = 0 or key = 11; -- The plan is not converted to a map-side, since although the sorting columns and grouping -- columns match, the user is querying multiple input partitions -EXPLAIN select distinct key from T1; -select distinct key from T1; +EXPLAIN select distinct key from T1_n149; +select distinct key from T1_n149; -DROP TABLE T1; +DROP TABLE T1_n149; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_11.q b/ql/src/test/queries/clientpositive/groupby_sort_11.q index 015d91b957..b8fd66a346 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_11.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_11.q @@ -6,38 +6,38 @@ set hive.map.groupby.sorted=true; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +CREATE TABLE T1_n18(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') +INSERT OVERWRITE TABLE T1_n18 PARTITION (ds='1') SELECT * from src where key < 10; -- The plan is optimized to perform partial aggregation on the mapper -EXPLAIN select count(distinct key) from T1; -select count(distinct key) from T1; +EXPLAIN select count(distinct key) from T1_n18; +select count(distinct key) from T1_n18; -- The plan is optimized to perform partial aggregation on the mapper -EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1; -select count(distinct key), count(1), count(key), sum(distinct key) from T1; +EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1_n18; +select count(distinct key), count(1), count(key), sum(distinct key) from T1_n18; -- The plan is not changed in the presence of a grouping key -EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key; -select count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key; +EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1_n18 group by key; +select count(distinct key), count(1), count(key), sum(distinct key) from T1_n18 group by key; -- The plan is not changed in the presence of a grouping key -EXPLAIN select key, count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key; -select key, count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key; +EXPLAIN select key, count(distinct key), count(1), count(key), sum(distinct key) from T1_n18 group by key; +select key, count(distinct key), count(1), count(key), sum(distinct key) from T1_n18 group by key; -- The plan is not changed in the presence of a grouping key expression -EXPLAIN select count(distinct key+key) from T1; -select count(distinct key+key) from T1; +EXPLAIN select count(distinct key+key) from T1_n18; +select count(distinct key+key) from T1_n18; -EXPLAIN select count(distinct 1) from T1; -select count(distinct 1) from T1; +EXPLAIN select count(distinct 1) from T1_n18; +select count(distinct 1) from T1_n18; set hive.map.aggr=false; -- no plan change if map aggr is turned off -EXPLAIN select count(distinct key) from T1; -select count(distinct key) from T1; +EXPLAIN select count(distinct key) from T1_n18; +select count(distinct key) from T1_n18; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_1_23.q b/ql/src/test/queries/clientpositive/groupby_sort_1_23.q index 91b693a634..b27aec422f 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_1_23.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_1_23.q @@ -5,279 +5,279 @@ set hive.map.groupby.sorted=true; -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n80(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n80; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1; +INSERT OVERWRITE TABLE T1_n80 select key, val from T1_n80; -CREATE TABLE outputTbl1(key int, cnt int); +CREATE TABLE outputTbl1_n18(key int, cnt int); -- The plan should be converted to a map-side group by if the group by key -- matches the sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM T1_n80 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM T1_n80 GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n18; -CREATE TABLE outputTbl2(key1 int, key2 string, cnt int); +CREATE TABLE outputTbl2_n5(key1 int, key2 string, cnt int); -- no map-side group by even if the group by key is a superset of sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl2_n5 +SELECT key, val, count(1) FROM T1_n80 GROUP BY key, val; -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl2_n5 +SELECT key, val, count(1) FROM T1_n80 GROUP BY key, val; -SELECT * FROM outputTbl2; +SELECT * FROM outputTbl2_n5; -- It should work for sub-queries EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n80) subq1 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n80) subq1 GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n18; -- It should work for sub-queries with column aliases EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k; +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n80) subq1 GROUP BY k; -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k; +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n80) subq1 GROUP BY k; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n18; -CREATE TABLE outputTbl3(key1 int, key2 int, cnt int); +CREATE TABLE outputTbl3_n2(key1 int, key2 int, cnt int); -- The plan should be converted to a map-side group by if the group by key contains a constant followed -- by a match to the sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key; +INSERT OVERWRITE TABLE outputTbl3_n2 +SELECT 1, key, count(1) FROM T1_n80 GROUP BY 1, key; -INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key; +INSERT OVERWRITE TABLE outputTbl3_n2 +SELECT 1, key, count(1) FROM T1_n80 GROUP BY 1, key; -SELECT * FROM outputTbl3; +SELECT * FROM outputTbl3_n2; -CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int); +CREATE TABLE outputTbl4_n2(key1 int, key2 int, key3 string, cnt int); -- no map-side group by if the group by key contains a constant followed by another column EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val; +INSERT OVERWRITE TABLE outputTbl4_n2 +SELECT key, 1, val, count(1) FROM T1_n80 GROUP BY key, 1, val; -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val; +INSERT OVERWRITE TABLE outputTbl4_n2 +SELECT key, 1, val, count(1) FROM T1_n80 GROUP BY key, 1, val; -SELECT * FROM outputTbl4; +SELECT * FROM outputTbl4_n2; -- no map-side group by if the group by key contains a function EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1; +INSERT OVERWRITE TABLE outputTbl3_n2 +SELECT key, key + 1, count(1) FROM T1_n80 GROUP BY key, key + 1; -INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1; +INSERT OVERWRITE TABLE outputTbl3_n2 +SELECT key, key + 1, count(1) FROM T1_n80 GROUP BY key, key + 1; -SELECT * FROM outputTbl3; +SELECT * FROM outputTbl3_n2; -- it should not matter what follows the group by -- test various cases -- group by followed by another group by EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n80 GROUP BY key) subq1 group by key + key; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n80 GROUP BY key) subq1 group by key + key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n18; -- group by followed by a union EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n80 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n80 GROUP BY key ) subq1; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n80 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n80 GROUP BY key ) subq1; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n18; -- group by followed by a union where one of the sub-queries is map-side group by EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n80 GROUP BY key UNION ALL -SELECT cast(key + key as string) as key, count(1) FROM T1 GROUP BY key + key +SELECT cast(key + key as string) as key, count(1) FROM T1_n80 GROUP BY key + key ) subq1; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT * FROM ( -SELECT key, count(1) as cnt FROM T1 GROUP BY key +SELECT key, count(1) as cnt FROM T1_n80 GROUP BY key UNION ALL -SELECT cast(key + key as string) as key, count(1) as cnt FROM T1 GROUP BY key + key +SELECT cast(key + key as string) as key, count(1) as cnt FROM T1_n80 GROUP BY key + key ) subq1; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n18; -- group by followed by a join EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n80 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n80 GROUP BY key) subq2 ON subq1.key = subq2.key; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n80 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n80 GROUP BY key) subq2 ON subq1.key = subq2.key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n18; -- group by followed by a join where one of the sub-queries can be performed in the mapper EXPLAIN EXTENDED SELECT * FROM -(SELECT key, count(1) FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) FROM T1_n80 GROUP BY key) subq1 JOIN -(SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 +(SELECT key, val, count(1) FROM T1_n80 GROUP BY key, val) subq2 ON subq1.key = subq2.key; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n49(key STRING, val STRING) CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1; +INSERT OVERWRITE TABLE T2_n49 select key, val from T1_n80; -- no mapside sort group by if the group by is a prefix of the sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM T2_n49 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM T2_n49 GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n18; -- The plan should be converted to a map-side group by if the group by key contains a constant in between the -- sorted keys EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val; +INSERT OVERWRITE TABLE outputTbl4_n2 +SELECT key, 1, val, count(1) FROM T2_n49 GROUP BY key, 1, val; -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val; +INSERT OVERWRITE TABLE outputTbl4_n2 +SELECT key, 1, val, count(1) FROM T2_n49 GROUP BY key, 1, val; -SELECT * FROM outputTbl4; +SELECT * FROM outputTbl4_n2; -CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int); +CREATE TABLE outputTbl5_n2(key1 int, key2 int, key3 string, key4 int, cnt int); -- The plan should be converted to a map-side group by if the group by key contains a constant in between the -- sorted keys followed by anything EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2; +INSERT OVERWRITE TABLE outputTbl5_n2 +SELECT key, 1, val, 2, count(1) FROM T2_n49 GROUP BY key, 1, val, 2; -INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2; +INSERT OVERWRITE TABLE outputTbl5_n2 +SELECT key, 1, val, 2, count(1) FROM T2_n49 GROUP BY key, 1, val, 2; -SELECT * FROM outputTbl5 +SELECT * FROM outputTbl5_n2 ORDER BY key1, key2, key3, key4; -- contants from sub-queries should work fine EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n2 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n49)subq group by key, constant, val; -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n2 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n49)subq group by key, constant, val; -SELECT * FROM outputTbl4; +SELECT * FROM outputTbl4_n2; -- multiple levels of contants from sub-queries should work fine EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n2 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n49)subq )subq2 group by key, constant3, val; -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n2 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n49)subq )subq2 group by key, constant3, val; -SELECT * FROM outputTbl4; +SELECT * FROM outputTbl4_n2; set hive.map.aggr=true; set hive.multigroupby.singlereducer=false; set mapred.reduce.tasks=31; -CREATE TABLE DEST1(key INT, cnt INT); -CREATE TABLE DEST2(key INT, val STRING, cnt INT); +CREATE TABLE DEST1_n80(key INT, cnt INT); +CREATE TABLE DEST2_n18(key INT, val STRING, cnt INT); SET hive.exec.compress.intermediate=true; SET hive.exec.compress.output=true; EXPLAIN -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM T2_n49 +INSERT OVERWRITE TABLE DEST1_n80 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n18 SELECT key, val, count(1) GROUP BY key, val; -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM T2_n49 +INSERT OVERWRITE TABLE DEST1_n80 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n18 SELECT key, val, count(1) GROUP BY key, val; -select * from DEST1; -select * from DEST2; +select * from DEST1_n80; +select * from DEST2_n18; -- multi-table insert with a sub-query EXPLAIN -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM (select key, val from T2_n49 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n80 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n18 SELECT key, val, count(1) GROUP BY key, val; -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM (select key, val from T2_n49 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n80 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n18 SELECT key, val, count(1) GROUP BY key, val; -select * from DEST1; -select * from DEST2; +select * from DEST1_n80; +select * from DEST2_n18; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_2.q b/ql/src/test/queries/clientpositive/groupby_sort_2.q index 216a9b940b..66905e194b 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_2.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_2.q @@ -4,23 +4,23 @@ set hive.map.groupby.sorted=true; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n51(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (val) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n51; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1; +INSERT OVERWRITE TABLE T1_n51 select key, val from T1_n51; -CREATE TABLE outputTbl1(val string, cnt int); +CREATE TABLE outputTbl1_n10(val string, cnt int); -- The plan should not be converted to a map-side group by even though the group by key -- matches the sorted key. EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT val, count(1) FROM T1 GROUP BY val; +INSERT OVERWRITE TABLE outputTbl1_n10 +SELECT val, count(1) FROM T1_n51 GROUP BY val; -INSERT OVERWRITE TABLE outputTbl1 -SELECT val, count(1) FROM T1 GROUP BY val; +INSERT OVERWRITE TABLE outputTbl1_n10 +SELECT val, count(1) FROM T1_n51 GROUP BY val; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n10; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_3.q b/ql/src/test/queries/clientpositive/groupby_sort_3.q index 996f44d415..e059e75f22 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_3.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_3.q @@ -4,34 +4,34 @@ set hive.map.groupby.sorted=true; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n89(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n89; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1; +INSERT OVERWRITE TABLE T1_n89 select key, val from T1_n89; -CREATE TABLE outputTbl1(key string, val string, cnt int); +CREATE TABLE outputTbl1_n20(key string, val string, cnt int); -- The plan should be converted to a map-side group by EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl1_n20 +SELECT key, val, count(1) FROM T1_n89 GROUP BY key, val; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl1_n20 +SELECT key, val, count(1) FROM T1_n89 GROUP BY key, val; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n20; -CREATE TABLE outputTbl2(key string, cnt int); +CREATE TABLE outputTbl2_n7(key string, cnt int); -- The plan should be converted to a map-side group by EXPLAIN -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl2_n7 +SELECT key, count(1) FROM T1_n89 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl2_n7 +SELECT key, count(1) FROM T1_n89 GROUP BY key; -SELECT * FROM outputTbl2; +SELECT * FROM outputTbl2_n7; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_4.q b/ql/src/test/queries/clientpositive/groupby_sort_4.q index df998d00a7..6c211383c9 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_4.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_4.q @@ -4,36 +4,36 @@ set hive.map.groupby.sorted=true; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n133(key STRING, val STRING) CLUSTERED BY (key, val) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n133; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1; +INSERT OVERWRITE TABLE T1_n133 select key, val from T1_n133; -CREATE TABLE outputTbl1(key STRING, cnt INT); +CREATE TABLE outputTbl1_n31(key STRING, cnt INT); -- The plan should not be converted to a map-side group by. -- However, there should no hash-based aggregation on the map-side EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n31 +SELECT key, count(1) FROM T1_n133 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n31 +SELECT key, count(1) FROM T1_n133 GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n31; -CREATE TABLE outputTbl2(key STRING, val STRING, cnt INT); +CREATE TABLE outputTbl2_n8(key STRING, val STRING, cnt INT); -- The plan should not be converted to a map-side group by. -- Hash-based aggregations should be performed on the map-side EXPLAIN -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl2_n8 +SELECT key, val, count(1) FROM T1_n133 GROUP BY key, val; -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl2_n8 +SELECT key, val, count(1) FROM T1_n133 GROUP BY key, val; -SELECT * FROM outputTbl2; +SELECT * FROM outputTbl2_n8; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_5.q b/ql/src/test/queries/clientpositive/groupby_sort_5.q index 1fdd404067..3d85d14990 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_5.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_5.q @@ -5,73 +5,73 @@ set hive.map.groupby.sorted=true; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n6(key STRING, val STRING) CLUSTERED BY (val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n6; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1; +INSERT OVERWRITE TABLE T1_n6 select key, val from T1_n6; -CREATE TABLE outputTbl1(key STRING, val STRING, cnt INT); +CREATE TABLE outputTbl1_n5(key STRING, val STRING, cnt INT); -- The plan should be converted to a map-side group by, since the -- sorting columns and grouping columns match, and all the bucketing columns -- are part of sorting columns EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl1_n5 +SELECT key, val, count(1) FROM T1_n6 GROUP BY key, val; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl1_n5 +SELECT key, val, count(1) FROM T1_n6 GROUP BY key, val; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n5; -DROP TABLE T1; +DROP TABLE T1_n6; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n6(key STRING, val STRING) CLUSTERED BY (val, key) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n6; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1; +INSERT OVERWRITE TABLE T1_n6 select key, val from T1_n6; -- The plan should be converted to a map-side group by, since the -- sorting columns and grouping columns match, and all the bucketing columns -- are part of sorting columns EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl1_n5 +SELECT key, val, count(1) FROM T1_n6 GROUP BY key, val; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl1_n5 +SELECT key, val, count(1) FROM T1_n6 GROUP BY key, val; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n5; -DROP TABLE T1; +DROP TABLE T1_n6; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n6(key STRING, val STRING) CLUSTERED BY (val) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n6; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1; +INSERT OVERWRITE TABLE T1_n6 select key, val from T1_n6; -CREATE TABLE outputTbl2(key STRING, cnt INT); +CREATE TABLE outputTbl2_n1(key STRING, cnt INT); -- The plan should not be converted to a map-side group by, since although the -- sorting columns and grouping columns match, all the bucketing columns -- are not part of sorting columns. However, no hash map aggregation is required -- on the mapside. EXPLAIN -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl2_n1 +SELECT key, count(1) FROM T1_n6 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl2_n1 +SELECT key, count(1) FROM T1_n6 GROUP BY key; -SELECT * FROM outputTbl2; +SELECT * FROM outputTbl2_n1; -DROP TABLE T1; +DROP TABLE T1_n6; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_6.q b/ql/src/test/queries/clientpositive/groupby_sort_6.q index 02fec42a0b..3aa0636f91 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_6.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_6.q @@ -4,39 +4,39 @@ set hive.map.groupby.sorted=true; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string); +CREATE TABLE T1_n61(key STRING, val STRING) PARTITIONED BY (ds string); -CREATE TABLE outputTbl1(key int, cnt int); +CREATE TABLE outputTbl1_n15(key int, cnt int); -- The plan should not be converted to a map-side group since no partition is being accessed EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n61 where ds = '1' GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n61 where ds = '1' GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n15; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 PARTITION (ds='2'); +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n61 PARTITION (ds='2'); -- The plan should not be converted to a map-side group since no partition is being accessed EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n61 where ds = '1' GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n61 where ds = '1' GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n15; -- The plan should not be converted to a map-side group since the partition being accessed -- is neither bucketed not sorted EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '2' GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n61 where ds = '2' GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '2' GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n61 where ds = '2' GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n15; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_7.q b/ql/src/test/queries/clientpositive/groupby_sort_7.q index f0aea3593d..4dc34eb057 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_7.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_7.q @@ -5,26 +5,26 @@ set hive.map.groupby.sorted=true; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +CREATE TABLE T1_n104(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 PARTITION (ds='1'); +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n104 PARTITION (ds='1'); -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'; +INSERT OVERWRITE TABLE T1_n104 PARTITION (ds='1') select key, val from T1_n104 where ds = '1'; -CREATE TABLE outputTbl1(key STRING, val STRING, cnt INT); +CREATE TABLE outputTbl1_n26(key STRING, val STRING, cnt INT); -- The plan should be converted to a map-side group by, since the -- sorting columns and grouping columns match, and all the bucketing columns -- are part of sorting columns EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 where ds = '1' GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl1_n26 +SELECT key, val, count(1) FROM T1_n104 where ds = '1' GROUP BY key, val; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 where ds = '1' GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl1_n26 +SELECT key, val, count(1) FROM T1_n104 where ds = '1' GROUP BY key, val; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n26; -DROP TABLE T1; +DROP TABLE T1_n104; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_8.q b/ql/src/test/queries/clientpositive/groupby_sort_8.q index 2c20b292ac..11568e9e03 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_8.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_8.q @@ -5,19 +5,19 @@ set hive.map.groupby.sorted=true; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +CREATE TABLE T1_n45(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 PARTITION (ds='1'); +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n45 PARTITION (ds='1'); -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'; +INSERT OVERWRITE TABLE T1_n45 PARTITION (ds='1') select key, val from T1_n45 where ds = '1'; -- The plan is not converted to a map-side, since although the sorting columns and grouping -- columns match, the user is issueing a distinct. -- However, after HIVE-4310, partial aggregation is performed on the mapper EXPLAIN -select count(distinct key) from T1; -select count(distinct key) from T1; +select count(distinct key) from T1_n45; +select count(distinct key) from T1_n45; -DROP TABLE T1; +DROP TABLE T1_n45; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_9.q b/ql/src/test/queries/clientpositive/groupby_sort_9.q index abf1a8ae19..0f9be6a30d 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_9.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_9.q @@ -2,19 +2,19 @@ set hive.mapred.mode=nonstrict; set hive.exec.reducers.max = 10; set hive.map.groupby.sorted=true; -CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +CREATE TABLE T1_n96(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 PARTITION (ds='1'); +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n96 PARTITION (ds='1'); -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'; -INSERT OVERWRITE TABLE T1 PARTITION (ds='2') select key, val from T1 where ds = '1'; +INSERT OVERWRITE TABLE T1_n96 PARTITION (ds='1') select key, val from T1_n96 where ds = '1'; +INSERT OVERWRITE TABLE T1_n96 PARTITION (ds='2') select key, val from T1_n96 where ds = '1'; -- The plan is not converted to a map-side, since although the sorting columns and grouping -- columns match, the user is querying multiple input partitions EXPLAIN -select key, count(1) from T1 group by key; -select key, count(1) from T1 group by key; +select key, count(1) from T1_n96 group by key; +select key, count(1) from T1_n96 group by key; -DROP TABLE T1; +DROP TABLE T1_n96; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q b/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q index 27cce8d143..7836c4d39e 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q @@ -7,279 +7,279 @@ set hive.groupby.skewindata=true; -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n35(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n35; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1; +INSERT OVERWRITE TABLE T1_n35 select key, val from T1_n35; -CREATE TABLE outputTbl1(key int, cnt int); +CREATE TABLE outputTbl1_n8(key int, cnt int); -- The plan should be converted to a map-side group by if the group by key -- matches the sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n8 +SELECT key, count(1) FROM T1_n35 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n8 +SELECT key, count(1) FROM T1_n35 GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n8; -CREATE TABLE outputTbl2(key1 int, key2 string, cnt int); +CREATE TABLE outputTbl2_n2(key1 int, key2 string, cnt int); -- no map-side group by even if the group by key is a superset of sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl2_n2 +SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val; -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl2_n2 +SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val; -SELECT * FROM outputTbl2; +SELECT * FROM outputTbl2_n2; -- It should work for sub-queries EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n8 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n35) subq1 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n8 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n35) subq1 GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n8; -- It should work for sub-queries with column aliases EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k; +INSERT OVERWRITE TABLE outputTbl1_n8 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n35) subq1 GROUP BY k; -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k; +INSERT OVERWRITE TABLE outputTbl1_n8 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n35) subq1 GROUP BY k; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n8; -CREATE TABLE outputTbl3(key1 int, key2 int, cnt int); +CREATE TABLE outputTbl3_n0(key1 int, key2 int, cnt int); -- The plan should be converted to a map-side group by if the group by key contains a constant followed -- by a match to the sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key; +INSERT OVERWRITE TABLE outputTbl3_n0 +SELECT 1, key, count(1) FROM T1_n35 GROUP BY 1, key; -INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key; +INSERT OVERWRITE TABLE outputTbl3_n0 +SELECT 1, key, count(1) FROM T1_n35 GROUP BY 1, key; -SELECT * FROM outputTbl3; +SELECT * FROM outputTbl3_n0; -CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int); +CREATE TABLE outputTbl4_n0(key1 int, key2 int, key3 string, cnt int); -- no map-side group by if the group by key contains a constant followed by another column EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val; +INSERT OVERWRITE TABLE outputTbl4_n0 +SELECT key, 1, val, count(1) FROM T1_n35 GROUP BY key, 1, val; -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val; +INSERT OVERWRITE TABLE outputTbl4_n0 +SELECT key, 1, val, count(1) FROM T1_n35 GROUP BY key, 1, val; -SELECT * FROM outputTbl4; +SELECT * FROM outputTbl4_n0; -- no map-side group by if the group by key contains a function EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1; +INSERT OVERWRITE TABLE outputTbl3_n0 +SELECT key, key + 1, count(1) FROM T1_n35 GROUP BY key, key + 1; -INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1; +INSERT OVERWRITE TABLE outputTbl3_n0 +SELECT key, key + 1, count(1) FROM T1_n35 GROUP BY key, key + 1; -SELECT * FROM outputTbl3; +SELECT * FROM outputTbl3_n0; -- it should not matter what follows the group by -- test various cases -- group by followed by another group by EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n8 SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1 group by key + key; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n8 SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1 group by key + key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n8; -- group by followed by a union EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n8 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key ) subq1; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n8 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key ) subq1; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n8; -- group by followed by a union where one of the sub-queries is map-side group by EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n8 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key UNION ALL -SELECT key + key as key, count(1) FROM T1 GROUP BY key + key +SELECT key + key as key, count(1) FROM T1_n35 GROUP BY key + key ) subq1; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n8 SELECT * FROM ( -SELECT key, count(1) as cnt FROM T1 GROUP BY key +SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key UNION ALL -SELECT key + key as key, count(1) as cnt FROM T1 GROUP BY key + key +SELECT key + key as key, count(1) as cnt FROM T1_n35 GROUP BY key + key ) subq1; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n8; -- group by followed by a join EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n8 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq2 ON subq1.key = subq2.key; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n8 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq2 ON subq1.key = subq2.key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n8; -- group by followed by a join where one of the sub-queries can be performed in the mapper EXPLAIN EXTENDED SELECT * FROM -(SELECT key, count(1) FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) FROM T1_n35 GROUP BY key) subq1 JOIN -(SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 +(SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val) subq2 ON subq1.key = subq2.key; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n23(key STRING, val STRING) CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1; +INSERT OVERWRITE TABLE T2_n23 select key, val from T1_n35; -- no mapside sort group by if the group by is a prefix of the sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n8 +SELECT key, count(1) FROM T2_n23 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n8 +SELECT key, count(1) FROM T2_n23 GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n8; -- The plan should be converted to a map-side group by if the group by key contains a constant in between the -- sorted keys EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val; +INSERT OVERWRITE TABLE outputTbl4_n0 +SELECT key, 1, val, count(1) FROM T2_n23 GROUP BY key, 1, val; -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val; +INSERT OVERWRITE TABLE outputTbl4_n0 +SELECT key, 1, val, count(1) FROM T2_n23 GROUP BY key, 1, val; -SELECT * FROM outputTbl4; +SELECT * FROM outputTbl4_n0; -CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int); +CREATE TABLE outputTbl5_n0(key1 int, key2 int, key3 string, key4 int, cnt int); -- The plan should be converted to a map-side group by if the group by key contains a constant in between the -- sorted keys followed by anything EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2; +INSERT OVERWRITE TABLE outputTbl5_n0 +SELECT key, 1, val, 2, count(1) FROM T2_n23 GROUP BY key, 1, val, 2; -INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2; +INSERT OVERWRITE TABLE outputTbl5_n0 +SELECT key, 1, val, 2, count(1) FROM T2_n23 GROUP BY key, 1, val, 2; -SELECT * FROM outputTbl5 +SELECT * FROM outputTbl5_n0 ORDER BY key1, key2, key3, key4; -- contants from sub-queries should work fine EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n0 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n23)subq group by key, constant, val; -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n0 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n23)subq group by key, constant, val; -SELECT * FROM outputTbl4; +SELECT * FROM outputTbl4_n0; -- multiple levels of contants from sub-queries should work fine EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n0 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n23)subq )subq2 group by key, constant3, val; -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n0 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n23)subq )subq2 group by key, constant3, val; -SELECT * FROM outputTbl4; +SELECT * FROM outputTbl4_n0; set hive.map.aggr=true; set hive.multigroupby.singlereducer=false; set mapred.reduce.tasks=31; -CREATE TABLE DEST1(key INT, cnt INT); -CREATE TABLE DEST2(key INT, val STRING, cnt INT); +CREATE TABLE DEST1_n30(key INT, cnt INT); +CREATE TABLE DEST2_n6(key INT, val STRING, cnt INT); SET hive.exec.compress.intermediate=true; SET hive.exec.compress.output=true; EXPLAIN -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM T2_n23 +INSERT OVERWRITE TABLE DEST1_n30 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n6 SELECT key, val, count(1) GROUP BY key, val; -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM T2_n23 +INSERT OVERWRITE TABLE DEST1_n30 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n6 SELECT key, val, count(1) GROUP BY key, val; -select * from DEST1; -select * from DEST2; +select * from DEST1_n30; +select * from DEST2_n6; -- multi-table insert with a sub-query EXPLAIN -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM (select key, val from T2_n23 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n30 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n6 SELECT key, val, count(1) GROUP BY key, val; -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM (select key, val from T2_n23 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n30 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n6 SELECT key, val, count(1) GROUP BY key, val; -select * from DEST1; -select * from DEST2; +select * from DEST1_n30; +select * from DEST2_n6; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q b/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q index 51e08a648c..8919f3bdec 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q @@ -6,279 +6,279 @@ set hive.groupby.skewindata=true; -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n56(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n56; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1; +INSERT OVERWRITE TABLE T1_n56 select key, val from T1_n56; -CREATE TABLE outputTbl1(key int, cnt int); +CREATE TABLE outputTbl1_n13(key int, cnt int); -- The plan should be converted to a map-side group by if the group by key -- matches the sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM T1_n56 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM T1_n56 GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n13; -CREATE TABLE outputTbl2(key1 int, key2 string, cnt int); +CREATE TABLE outputTbl2_n3(key1 int, key2 string, cnt int); -- no map-side group by even if the group by key is a superset of sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl2_n3 +SELECT key, val, count(1) FROM T1_n56 GROUP BY key, val; -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +INSERT OVERWRITE TABLE outputTbl2_n3 +SELECT key, val, count(1) FROM T1_n56 GROUP BY key, val; -SELECT * FROM outputTbl2; +SELECT * FROM outputTbl2_n3; -- It should work for sub-queries EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n56) subq1 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n56) subq1 GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n13; -- It should work for sub-queries with column aliases EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k; +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n56) subq1 GROUP BY k; -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k; +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n56) subq1 GROUP BY k; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n13; -CREATE TABLE outputTbl3(key1 int, key2 int, cnt int); +CREATE TABLE outputTbl3_n1(key1 int, key2 int, cnt int); -- The plan should be converted to a map-side group by if the group by key contains a constant followed -- by a match to the sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key; +INSERT OVERWRITE TABLE outputTbl3_n1 +SELECT 1, key, count(1) FROM T1_n56 GROUP BY 1, key; -INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key; +INSERT OVERWRITE TABLE outputTbl3_n1 +SELECT 1, key, count(1) FROM T1_n56 GROUP BY 1, key; -SELECT * FROM outputTbl3; +SELECT * FROM outputTbl3_n1; -CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int); +CREATE TABLE outputTbl4_n1(key1 int, key2 int, key3 string, cnt int); -- no map-side group by if the group by key contains a constant followed by another column EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val; +INSERT OVERWRITE TABLE outputTbl4_n1 +SELECT key, 1, val, count(1) FROM T1_n56 GROUP BY key, 1, val; -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val; +INSERT OVERWRITE TABLE outputTbl4_n1 +SELECT key, 1, val, count(1) FROM T1_n56 GROUP BY key, 1, val; -SELECT * FROM outputTbl4; +SELECT * FROM outputTbl4_n1; -- no map-side group by if the group by key contains a function EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1; +INSERT OVERWRITE TABLE outputTbl3_n1 +SELECT key, key + 1, count(1) FROM T1_n56 GROUP BY key, key + 1; -INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1; +INSERT OVERWRITE TABLE outputTbl3_n1 +SELECT key, key + 1, count(1) FROM T1_n56 GROUP BY key, key + 1; -SELECT * FROM outputTbl3; +SELECT * FROM outputTbl3_n1; -- it should not matter what follows the group by -- test various cases -- group by followed by another group by EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT cast(key + key as string), sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n56 GROUP BY key) subq1 group by key + key; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT cast(key + key as string), sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n56 GROUP BY key) subq1 group by key + key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n13; -- group by followed by a union EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n56 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n56 GROUP BY key ) subq1; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n56 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n56 GROUP BY key ) subq1; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n13; -- group by followed by a union where one of the sub-queries is map-side group by EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n56 GROUP BY key UNION ALL -SELECT cast(key + key as string) as key, count(1) FROM T1 GROUP BY key + key +SELECT cast(key + key as string) as key, count(1) FROM T1_n56 GROUP BY key + key ) subq1; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT * FROM ( -SELECT key, count(1) as cnt FROM T1 GROUP BY key +SELECT key, count(1) as cnt FROM T1_n56 GROUP BY key UNION ALL -SELECT cast(key + key as string) as key, count(1) as cnt FROM T1 GROUP BY key + key +SELECT cast(key + key as string) as key, count(1) as cnt FROM T1_n56 GROUP BY key + key ) subq1; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n13; -- group by followed by a join EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n56 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n56 GROUP BY key) subq2 ON subq1.key = subq2.key; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n56 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n56 GROUP BY key) subq2 ON subq1.key = subq2.key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n13; -- group by followed by a join where one of the sub-queries can be performed in the mapper EXPLAIN EXTENDED SELECT * FROM -(SELECT key, count(1) FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) FROM T1_n56 GROUP BY key) subq1 JOIN -(SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 +(SELECT key, val, count(1) FROM T1_n56 GROUP BY key, val) subq2 ON subq1.key = subq2.key; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n34(key STRING, val STRING) CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1; +INSERT OVERWRITE TABLE T2_n34 select key, val from T1_n56; -- no mapside sort group by if the group by is a prefix of the sorted key EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM T2_n34 GROUP BY key; -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM T2_n34 GROUP BY key; -SELECT * FROM outputTbl1; +SELECT * FROM outputTbl1_n13; -- The plan should be converted to a map-side group by if the group by key contains a constant in between the -- sorted keys EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val; +INSERT OVERWRITE TABLE outputTbl4_n1 +SELECT key, 1, val, count(1) FROM T2_n34 GROUP BY key, 1, val; -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val; +INSERT OVERWRITE TABLE outputTbl4_n1 +SELECT key, 1, val, count(1) FROM T2_n34 GROUP BY key, 1, val; -SELECT * FROM outputTbl4; +SELECT * FROM outputTbl4_n1; -CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int); +CREATE TABLE outputTbl5_n1(key1 int, key2 int, key3 string, key4 int, cnt int); -- The plan should be converted to a map-side group by if the group by key contains a constant in between the -- sorted keys followed by anything EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2; +INSERT OVERWRITE TABLE outputTbl5_n1 +SELECT key, 1, val, 2, count(1) FROM T2_n34 GROUP BY key, 1, val, 2; -INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2; +INSERT OVERWRITE TABLE outputTbl5_n1 +SELECT key, 1, val, 2, count(1) FROM T2_n34 GROUP BY key, 1, val, 2; -SELECT * FROM outputTbl5 +SELECT * FROM outputTbl5_n1 ORDER BY key1, key2, key3, key4; -- contants from sub-queries should work fine EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n1 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n34)subq group by key, constant, val; -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n1 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n34)subq group by key, constant, val; -SELECT * FROM outputTbl4; +SELECT * FROM outputTbl4_n1; -- multiple levels of contants from sub-queries should work fine EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n1 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n34)subq )subq2 group by key, constant3, val; -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n1 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n34)subq )subq2 group by key, constant3, val; -SELECT * FROM outputTbl4; +SELECT * FROM outputTbl4_n1; set hive.map.aggr=true; set hive.multigroupby.singlereducer=false; set mapred.reduce.tasks=31; -CREATE TABLE DEST1(key INT, cnt INT); -CREATE TABLE DEST2(key INT, val STRING, cnt INT); +CREATE TABLE DEST1_n57(key INT, cnt INT); +CREATE TABLE DEST2_n12(key INT, val STRING, cnt INT); SET hive.exec.compress.intermediate=true; SET hive.exec.compress.output=true; EXPLAIN -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM T2_n34 +INSERT OVERWRITE TABLE DEST1_n57 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n12 SELECT key, val, count(1) GROUP BY key, val; -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM T2_n34 +INSERT OVERWRITE TABLE DEST1_n57 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n12 SELECT key, val, count(1) GROUP BY key, val; -select * from DEST1; -select * from DEST2; +select * from DEST1_n57; +select * from DEST2_n12; -- multi-table insert with a sub-query EXPLAIN -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM (select key, val from T2_n34 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n57 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n12 SELECT key, val, count(1) GROUP BY key, val; -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val; +FROM (select key, val from T2_n34 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n57 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n12 SELECT key, val, count(1) GROUP BY key, val; -select * from DEST1; -select * from DEST2; +select * from DEST1_n57; +select * from DEST2_n12; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_test_1.q b/ql/src/test/queries/clientpositive/groupby_sort_test_1.q index 7e847a2a09..b1f4f1d18b 100644 --- a/ql/src/test/queries/clientpositive/groupby_sort_test_1.q +++ b/ql/src/test/queries/clientpositive/groupby_sort_test_1.q @@ -2,18 +2,18 @@ set hive.mapred.mode=nonstrict; set hive.exec.reducers.max = 10; set hive.map.groupby.sorted=true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n164(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n164; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1; +INSERT OVERWRITE TABLE T1_n164 select key, val from T1_n164; -CREATE TABLE outputTbl1(key int, cnt int); +CREATE TABLE outputTbl1_n35(key int, cnt int); -- The plan should be converted to a map-side group by if the group by key -- matches the sorted key. However, in test mode, the group by wont be converted. EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key; +INSERT OVERWRITE TABLE outputTbl1_n35 +SELECT key, count(1) FROM T1_n164 GROUP BY key; diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort.q b/ql/src/test/queries/clientpositive/infer_bucket_sort.q index 39789250c6..0785bca9d5 100644 --- a/ql/src/test/queries/clientpositive/infer_bucket_sort.q +++ b/ql/src/test/queries/clientpositive/infer_bucket_sort.q @@ -6,156 +6,156 @@ set hive.exec.infer.bucket.sort=true; -- This tests inferring how data is bucketed/sorted from the operators in the reducer -- and populating that information in partitions' metadata -CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING); +CREATE TABLE test_table_n5 (key STRING, value STRING) PARTITIONED BY (part STRING); -- Test group by, should be bucketed and sorted by group by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test group by where a key isn't selected, should not be bucketed or sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key, value; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test join, should be bucketed and sorted by join key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test join with two keys, should be bucketed and sorted by join keys -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key AND a.value = b.value; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test join with two keys and only one selected, should not be bucketed or sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, '1' FROM src a JOIN src b ON a.key = b.key AND a.value = b.value; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test join on three tables on same key, should be bucketed and sorted by join key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.key = c.key); -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test join on three tables on different keys, should be bucketed and sorted by latter key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.value = c.value); -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test distribute by, should only be bucketed by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src DISTRIBUTE BY key; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test sort by, should be sorted by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src SORT BY key ASC; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test sort by desc, should be sorted by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src SORT BY key DESC; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test cluster by, should be bucketed and sorted by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src CLUSTER BY key; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test distribute by and sort by different keys, should be bucketed by one key sorted by the other -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src DISTRIBUTE BY key SORT BY value; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test join in simple subquery, should be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value from (SELECT a.key, b.value FROM src a JOIN src b ON (a.key = b.key)) subq; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test join in simple subquery renaming key column, should be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT k, value FROM (SELECT a.key as k, b.value FROM src a JOIN src b ON (a.key = b.key)) subq; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test group by in simple subquery, should be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, cnt from (SELECT key, count(*) as cnt FROM src GROUP BY key) subq; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test group by in simple subquery renaming key column, should be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT k, cnt FROM (SELECT key as k, count(*) as cnt FROM src GROUP BY key) subq; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test group by in subquery with where outside, should still be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) AS value FROM src group by key) a where key < 10; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test group by in subquery with expression on value, should still be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value + 1 FROM (SELECT key, count(1) AS value FROM src group by key) a where key < 10; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test group by in subquery with lateral view outside, should still be bucketed and sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT key FROM src group by key) a lateral view explode(array(1, 2)) value as value; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test group by in subquery with another group by outside, should be bucketed and sorted by the -- key of the outer group by -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT count(1), value FROM (SELECT key, count(1) as value FROM src group by key) a group by value; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test group by in subquery with select on outside reordering the columns, should be bucketed and -- sorted by the column the group by key ends up in -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT value, key FROM (SELECT key, count(1) as value FROM src group by key) a; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test group by in subquery followed by distribute by, should only be bucketed by the distribute key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) as value FROM src group by key) a distribute by key; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test group by in subquery followed by sort by, should only be sorted by the sort key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) as value FROM src group by key) a sort by key; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test group by in subquery followed by transform script, should not be bucketed or sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT TRANSFORM (a.key, a.value) USING 'cat' AS (key, value) FROM (SELECT key, count(1) AS value FROM src GROUP BY KEY) a; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); -- Test group by on function, should be bucketed and sorted by key and value because the function is applied in the mapper -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT concat(key, "a") AS key, value, count(*) FROM src GROUP BY concat(key, "a"), value) a; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1'); diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_convert_join.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_convert_join.q index 10b5d4f85c..b33011aae4 100644 --- a/ql/src/test/queries/clientpositive/infer_bucket_sort_convert_join.q +++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_convert_join.q @@ -18,13 +18,13 @@ set hive.auto.convert.join=true; -- and populating that information in partitions' metadata. In particular, those cases -- where joins may be auto converted to map joins. -CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING); +CREATE TABLE test_table_n11 (key STRING, value STRING) PARTITIONED BY (part STRING); -- Tests a join which is converted to a map join, the output should be neither bucketed nor sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n11 PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n11 PARTITION (part = '1'); set hive.mapjoin.check.memory.rows=1; set hive.mapjoin.localtask.max.memory.usage = 0.0001; @@ -33,7 +33,7 @@ set hive.auto.convert.join.noconditionaltask = false; -- This test tests the scenario when the mapper dies. So, create a conditional task for the mapjoin. -- Tests a join which is not converted to a map join, the output should be bucketed and sorted. -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n11 PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n11 PARTITION (part = '1'); diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q index 155b78de85..29f807b892 100644 --- a/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q +++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q @@ -12,54 +12,54 @@ set hive.merge.mapredfiles=false; -- and populating that information in partitions' metadata. In particular, those cases -- where dynamic partitioning is used. -CREATE TABLE test_table LIKE srcpart; -ALTER TABLE test_table SET FILEFORMAT RCFILE; +CREATE TABLE test_table_n8 LIKE srcpart; +ALTER TABLE test_table_n8 SET FILEFORMAT RCFILE; -- Simple case, this should not be bucketed or sorted -INSERT OVERWRITE TABLE test_table PARTITION (ds, hr) +INSERT OVERWRITE TABLE test_table_n8 PARTITION (ds, hr) SELECT key, value, ds, hr FROM srcpart WHERE ds = '2008-04-08'; -DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='11'); -DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='12'); +DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='11'); +DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='12'); -- This should not be bucketed or sorted since the partition keys are in the set of bucketed -- and sorted columns for the output -INSERT OVERWRITE TABLE test_table PARTITION (ds, hr) +INSERT OVERWRITE TABLE test_table_n8 PARTITION (ds, hr) SELECT key, COUNT(*), ds, hr FROM srcpart WHERE ds = '2008-04-08' GROUP BY key, ds, hr; -DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='11'); -DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='12'); +DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='11'); +DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='12'); -- Both partitions should be bucketed and sorted by key -INSERT OVERWRITE TABLE test_table PARTITION (ds, hr) +INSERT OVERWRITE TABLE test_table_n8 PARTITION (ds, hr) SELECT key, value, '2008-04-08', IF (key % 2 == 0, '11', '12') FROM (SELECT key, COUNT(*) AS value FROM srcpart WHERE ds = '2008-04-08' GROUP BY key) a; -DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='11'); -DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='12'); +DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='11'); +DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='12'); -CREATE TABLE srcpart_merge_dp LIKE srcpart; +CREATE TABLE srcpart_merge_dp_n3 LIKE srcpart; -CREATE TABLE srcpart_merge_dp_rc LIKE srcpart; -ALTER TABLE srcpart_merge_dp_rc SET FILEFORMAT RCFILE; +CREATE TABLE srcpart_merge_dp_rc_n0 LIKE srcpart; +ALTER TABLE srcpart_merge_dp_rc_n0 SET FILEFORMAT RCFILE; -LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11); -LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11); -LOAD DATA LOCAL INPATH '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11); -LOAD DATA LOCAL INPATH '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11); +LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n3 PARTITION(ds='2008-04-08', hr=11); +LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n3 PARTITION(ds='2008-04-08', hr=11); +LOAD DATA LOCAL INPATH '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n3 PARTITION(ds='2008-04-08', hr=11); +LOAD DATA LOCAL INPATH '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n3 PARTITION(ds='2008-04-08', hr=11); -LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=12); +LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n3 PARTITION(ds='2008-04-08', hr=12); -INSERT OVERWRITE TABLE srcpart_merge_dp_rc PARTITION (ds = '2008-04-08', hr) -SELECT key, value, hr FROM srcpart_merge_dp WHERE ds = '2008-04-08'; +INSERT OVERWRITE TABLE srcpart_merge_dp_rc_n0 PARTITION (ds = '2008-04-08', hr) +SELECT key, value, hr FROM srcpart_merge_dp_n3 WHERE ds = '2008-04-08'; set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; set hive.merge.mapfiles=true; @@ -75,17 +75,17 @@ set mapred.reduce.tasks=2; -- definitely not be. EXPLAIN -INSERT OVERWRITE TABLE test_table PARTITION (ds = '2008-04-08', hr) +INSERT OVERWRITE TABLE test_table_n8 PARTITION (ds = '2008-04-08', hr) SELECT key, value, IF (key % 100 == 0, '11', '12') FROM (SELECT key, COUNT(*) AS value FROM srcpart WHERE ds = '2008-04-08' GROUP BY key) a; -INSERT OVERWRITE TABLE test_table PARTITION (ds = '2008-04-08', hr) +INSERT OVERWRITE TABLE test_table_n8 PARTITION (ds = '2008-04-08', hr) SELECT key, value, IF (key % 100 == 0, '11', '12') FROM (SELECT key, COUNT(*) AS value FROM srcpart WHERE ds = '2008-04-08' GROUP BY key) a; -DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='11'); -DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='12'); +DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='11'); +DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='12'); diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q index 782145b82a..c881d79a1c 100644 --- a/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q +++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q @@ -9,47 +9,47 @@ set hive.exec.infer.bucket.sort=true; -- and populating that information in partitions' metadata, in particular, this tests -- that operators in the mapper have no effect -CREATE TABLE test_table1 (key STRING, value STRING) +CREATE TABLE test_table1_n14 (key STRING, value STRING) CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS; -CREATE TABLE test_table2 (key STRING, value STRING) +CREATE TABLE test_table2_n13 (key STRING, value STRING) CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS; -INSERT OVERWRITE TABLE test_table1 SELECT key, value FROM src; +INSERT OVERWRITE TABLE test_table1_n14 SELECT key, value FROM src; -INSERT OVERWRITE TABLE test_table2 SELECT key, value FROM src; +INSERT OVERWRITE TABLE test_table2_n13 SELECT key, value FROM src; -CREATE TABLE test_table_out (key STRING, value STRING) PARTITIONED BY (part STRING); +CREATE TABLE test_table_out_n0 (key STRING, value STRING) PARTITIONED BY (part STRING); set hive.map.groupby.sorted=true; -- Test map group by doesn't affect inference, should not be bucketed or sorted -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') -SELECT key, count(*) FROM test_table1 GROUP BY key; +EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1') +SELECT key, count(*) FROM test_table1_n14 GROUP BY key; -INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') -SELECT key, count(*) FROM test_table1 GROUP BY key; +INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1') +SELECT key, count(*) FROM test_table1_n14 GROUP BY key; -DESCRIBE FORMATTED test_table_out PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1'); -- Test map group by doesn't affect inference, should be bucketed and sorted by value -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1') SELECT a.key, a.value FROM ( - SELECT key, count(*) AS value FROM test_table1 GROUP BY key + SELECT key, count(*) AS value FROM test_table1_n14 GROUP BY key ) a JOIN ( SELECT key, value FROM src ) b ON (a.value = b.value); -INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1') SELECT a.key, a.value FROM ( - SELECT key, cast(count(*) AS STRING) AS value FROM test_table1 GROUP BY key + SELECT key, cast(count(*) AS STRING) AS value FROM test_table1_n14 GROUP BY key ) a JOIN ( SELECT key, value FROM src ) b ON (a.value = b.value); -DESCRIBE FORMATTED test_table_out PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1'); set hive.map.groupby.sorted=false; set hive.optimize.bucketmapjoin = true; @@ -57,22 +57,22 @@ set hive.optimize.bucketmapjoin.sortedmerge = true; set hive.cbo.enable=false; -- Test SMB join doesn't affect inference, should not be bucketed or sorted -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') -SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key; +EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1') +SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key; -INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') -SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key; +INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1') +SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key; -DESCRIBE FORMATTED test_table_out PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1'); -- Test SMB join doesn't affect inference, should be bucketed and sorted by key -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') -SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key +EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1') +SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key GROUP BY b.value; -INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') -SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key +INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1') +SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key GROUP BY b.value; -DESCRIBE FORMATTED test_table_out PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1'); diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_merge.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_merge.q index cf44041eff..e9a5324234 100644 --- a/ql/src/test/queries/clientpositive/infer_bucket_sort_merge.q +++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_merge.q @@ -10,19 +10,19 @@ set mapred.reduce.tasks=2; -- and populating that information in partitions' metadata. In particular, those cases -- where where merging may or may not be used. -CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING); +CREATE TABLE test_table_n16 (key STRING, value STRING) PARTITIONED BY (part STRING); -- Tests a reduce task followed by a merge. The output should be neither bucketed nor sorted. -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n16 PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n16 PARTITION (part = '1'); set hive.merge.smallfiles.avgsize=2; set hive.exec.compress.output=false; -- Tests a reduce task followed by a move. The output should be bucketed and sorted. -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n16 PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n16 PARTITION (part = '1'); diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q index 23839782c6..a8f5e17bc7 100644 --- a/ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q +++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q @@ -4,7 +4,7 @@ set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; set mapred.reduce.tasks=2; -CREATE TABLE test_table (key INT, value STRING) PARTITIONED BY (ds STRING, hr STRING); +CREATE TABLE test_table_n0 (key INT, value STRING) PARTITIONED BY (ds STRING, hr STRING); -- Tests dynamic partitions where bucketing/sorting can be inferred, but not all reducers write -- all partitions. The subquery produces rows as follows @@ -20,19 +20,19 @@ CREATE TABLE test_table (key INT, value STRING) PARTITIONED BY (ds STRING, hr ST -- and hr=1 should not. EXPLAIN -INSERT OVERWRITE TABLE test_table PARTITION (ds = '2008-04-08', hr) +INSERT OVERWRITE TABLE test_table_n0 PARTITION (ds = '2008-04-08', hr) SELECT key2, value, cast(hr as int) FROM (SELECT if ((key % 3) < 2, 0, 1) as key2, value, (key % 2) as hr FROM srcpart WHERE ds = '2008-04-08') a DISTRIBUTE BY key2; -INSERT OVERWRITE TABLE test_table PARTITION (ds = '2008-04-08', hr) +INSERT OVERWRITE TABLE test_table_n0 PARTITION (ds = '2008-04-08', hr) SELECT key2, value, cast(hr as int) FROM (SELECT if ((key % 3) < 2, 0, 1) as key2, value, (key % 3 % 2) as hr FROM srcpart WHERE ds = '2008-04-08') a DISTRIBUTE BY key2; -DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='0'); -DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='1'); +DESCRIBE FORMATTED test_table_n0 PARTITION (ds='2008-04-08', hr='0'); +DESCRIBE FORMATTED test_table_n0 PARTITION (ds='2008-04-08', hr='1'); diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_reducers_power_two.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_reducers_power_two.q index c017502df5..08d3a16f85 100644 --- a/ql/src/test/queries/clientpositive/infer_bucket_sort_reducers_power_two.q +++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_reducers_power_two.q @@ -10,41 +10,41 @@ set hive.exec.reducers.bytes.per.reducer=2500; -- and populating that information in partitions' metadata, it also verifies that the -- number of reducers chosen will be a power of two -CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING); +CREATE TABLE test_table_n14 (key STRING, value STRING) PARTITIONED BY (part STRING); -- Test group by, should be bucketed and sorted by group by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n14 PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n14 PARTITION (part = '1'); -- Test join, should be bucketed and sorted by join key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n14 PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n14 PARTITION (part = '1'); -- Test join with two keys, should be bucketed and sorted by join keys -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n14 PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key AND a.value = b.value; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n14 PARTITION (part = '1'); -- Test join on three tables on same key, should be bucketed and sorted by join key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n14 PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.key = c.key); -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n14 PARTITION (part = '1'); -- Test join on three tables on different keys, should be bucketed and sorted by latter key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n14 PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.value = c.value); -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n14 PARTITION (part = '1'); -- Test group by in subquery with another group by outside, should be bucketed and sorted by the -- key of the outer group by -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +INSERT OVERWRITE TABLE test_table_n14 PARTITION (part = '1') SELECT count(1), value FROM (SELECT key, count(1) as value FROM src group by key) a group by value; -DESCRIBE FORMATTED test_table PARTITION (part = '1'); +DESCRIBE FORMATTED test_table_n14 PARTITION (part = '1'); diff --git a/ql/src/test/queries/clientpositive/infer_join_preds.q b/ql/src/test/queries/clientpositive/infer_join_preds.q index 4787de1b43..4a9ce6e561 100644 --- a/ql/src/test/queries/clientpositive/infer_join_preds.q +++ b/ql/src/test/queries/clientpositive/infer_join_preds.q @@ -65,7 +65,7 @@ right outer join explain select * from src join src1 on src.key = src1.key and src.value = src1.value where 4 between src.key and src.value; - CREATE TABLE `table1`( + CREATE TABLE `table1_n8`( `idp_warehouse_id` bigint, `idp_audit_id` bigint, `idp_effective_date` date, @@ -91,7 +91,7 @@ explain select * from src join src1 on src.key = src1.key and src.value = src1.v `odf_ss_actuals` bigint, `practsum` decimal(38,20)); - CREATE TABLE `table2`( + CREATE TABLE `table2_n4`( `idp_warehouse_id` bigint, `idp_audit_id` bigint, `idp_effective_date` date, @@ -118,14 +118,14 @@ explain select * from src join src1 on src.key = src1.key and src.value = src1.v `practsum` decimal(38,20)); explain SELECT s.idp_warehouse_id AS source_warehouse_id - FROM table1 s + FROM table1_n8 s JOIN - table2 d + table2_n4 d ON ( s.prid = d.prid ) JOIN - table2 e + table2_n4 e ON s.prid = e.prid WHERE @@ -279,7 +279,7 @@ explain select * from src join src1 on src.key = src1.key and src.value = src1.v ELSE d.odf_ss_actuals END ); -drop table table2; -drop table table1; +drop table table2_n4; +drop table table1_n8; diff --git a/ql/src/test/queries/clientpositive/innerjoin.q b/ql/src/test/queries/clientpositive/innerjoin.q index 9d38879159..dacc7de4ea 100644 --- a/ql/src/test/queries/clientpositive/innerjoin.q +++ b/ql/src/test/queries/clientpositive/innerjoin.q @@ -5,26 +5,26 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n20(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src src1 INNER JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n20 SELECT src1.key, src2.value; FROM src src1 INNER JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n20 SELECT src1.key, src2.value; -SELECT dest_j1.* FROM dest_j1; +SELECT dest_j1_n20.* FROM dest_j1_n20; -- verify that INNER is a non-reserved word for backwards compatibility -- change from HIVE-6617, inner is a SQL2011 reserved keyword. -create table `inner`(i int); +create table `inner`(i_n2 int); -select i from `inner`; +select i_n2 from `inner`; -create table i(`inner` int); +create table i_n2(`inner` int); -select `inner` from i; +select `inner` from i_n2; explain select * from (select * from src) `inner` left outer join src on `inner`.key=src.key; diff --git a/ql/src/test/queries/clientpositive/inoutdriver.q b/ql/src/test/queries/clientpositive/inoutdriver.q index 34b0e0b28e..b2fca93c97 100644 --- a/ql/src/test/queries/clientpositive/inoutdriver.q +++ b/ql/src/test/queries/clientpositive/inoutdriver.q @@ -1,2 +1,2 @@ -create table test (a int) stored as inputformat 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' outputformat 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'RCFileInDriver' outputdriver 'RCFileOutDriver'; -desc extended test; +create table test_n3 (a int) stored as inputformat 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' outputformat 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'RCFileInDriver' outputdriver 'RCFileOutDriver'; +desc extended test_n3; diff --git a/ql/src/test/queries/clientpositive/input1.q b/ql/src/test/queries/clientpositive/input1.q index 3f2cd96de9..148f0d2712 100644 --- a/ql/src/test/queries/clientpositive/input1.q +++ b/ql/src/test/queries/clientpositive/input1.q @@ -1,9 +1,9 @@ -CREATE TABLE TEST1(A INT, B DOUBLE) STORED AS TEXTFILE; +CREATE TABLE TEST1_n6(A INT, B DOUBLE) STORED AS TEXTFILE; EXPLAIN -DESCRIBE TEST1; +DESCRIBE TEST1_n6; -DESCRIBE TEST1; +DESCRIBE TEST1_n6; diff --git a/ql/src/test/queries/clientpositive/input11.q b/ql/src/test/queries/clientpositive/input11.q index 788e8e6a5f..17b9dd79c8 100644 --- a/ql/src/test/queries/clientpositive/input11.q +++ b/ql/src/test/queries/clientpositive/input11.q @@ -1,12 +1,12 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n152(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100; +INSERT OVERWRITE TABLE dest1_n152 SELECT src.key, src.value WHERE src.key < 100; FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100; +INSERT OVERWRITE TABLE dest1_n152 SELECT src.key, src.value WHERE src.key < 100; -SELECT dest1.* FROM dest1; +SELECT dest1_n152.* FROM dest1_n152; diff --git a/ql/src/test/queries/clientpositive/input11_limit.q b/ql/src/test/queries/clientpositive/input11_limit.q index c5e500778b..3bc08f4782 100644 --- a/ql/src/test/queries/clientpositive/input11_limit.q +++ b/ql/src/test/queries/clientpositive/input11_limit.q @@ -3,13 +3,13 @@ set hive.stats.column.autogather=false; set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n153(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10; +INSERT OVERWRITE TABLE dest1_n153 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10; FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10; +INSERT OVERWRITE TABLE dest1_n153 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10; -SELECT dest1.* FROM dest1; +SELECT dest1_n153.* FROM dest1_n153; diff --git a/ql/src/test/queries/clientpositive/input12.q b/ql/src/test/queries/clientpositive/input12.q index 5b812ff036..b75ce20f53 100644 --- a/ql/src/test/queries/clientpositive/input12.q +++ b/ql/src/test/queries/clientpositive/input12.q @@ -6,21 +6,21 @@ set hive.exec.mode.local.auto=true; -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE dest3(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n122(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest2_n32(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest3_n5(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200; +INSERT OVERWRITE TABLE dest1_n122 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n32 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest3_n5 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200; FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200; +INSERT OVERWRITE TABLE dest1_n122 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n32 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest3_n5 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200; -SELECT dest1.* FROM dest1; -SELECT dest2.* FROM dest2; -SELECT dest3.* FROM dest3; +SELECT dest1_n122.* FROM dest1_n122; +SELECT dest2_n32.* FROM dest2_n32; +SELECT dest3_n5.* FROM dest3_n5; diff --git a/ql/src/test/queries/clientpositive/input12_hadoop20.q b/ql/src/test/queries/clientpositive/input12_hadoop20.q index 3c00101a51..e9f2baf22d 100644 --- a/ql/src/test/queries/clientpositive/input12_hadoop20.q +++ b/ql/src/test/queries/clientpositive/input12_hadoop20.q @@ -4,21 +4,21 @@ set hive.exec.mode.local.auto=true; -- INCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE dest3(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n88(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest2_n23(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest3_n2(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200; +INSERT OVERWRITE TABLE dest1_n88 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n23 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest3_n2 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200; FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200; +INSERT OVERWRITE TABLE dest1_n88 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n23 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest3_n2 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200; -SELECT dest1.* FROM dest1; -SELECT dest2.* FROM dest2; -SELECT dest3.* FROM dest3; +SELECT dest1_n88.* FROM dest1_n88; +SELECT dest2_n23.* FROM dest2_n23; +SELECT dest3_n2.* FROM dest3_n2; diff --git a/ql/src/test/queries/clientpositive/input13.q b/ql/src/test/queries/clientpositive/input13.q index 5f274f650b..00acc29ea8 100644 --- a/ql/src/test/queries/clientpositive/input13.q +++ b/ql/src/test/queries/clientpositive/input13.q @@ -1,23 +1,23 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE dest3(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n77(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest2_n16(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest3_n1(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 +INSERT OVERWRITE TABLE dest1_n77 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n16 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest3_n1 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300; FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 +INSERT OVERWRITE TABLE dest1_n77 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n16 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest3_n1 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300; -SELECT dest1.* FROM dest1; -SELECT dest2.* FROM dest2; -SELECT dest3.* FROM dest3; +SELECT dest1_n77.* FROM dest1_n77; +SELECT dest2_n16.* FROM dest2_n16; +SELECT dest3_n1.* FROM dest3_n1; dfs -cat ${system:test.warehouse.dir}/dest4.out/*; diff --git a/ql/src/test/queries/clientpositive/input14.q b/ql/src/test/queries/clientpositive/input14.q index 5ddee8a27a..ffa9a5a480 100644 --- a/ql/src/test/queries/clientpositive/input14.q +++ b/ql/src/test/queries/clientpositive/input14.q @@ -1,5 +1,5 @@ --! qt:dataset:src -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n42(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM ( @@ -8,7 +8,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100; +INSERT OVERWRITE TABLE dest1_n42 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100; FROM ( FROM src @@ -16,8 +16,8 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100; +INSERT OVERWRITE TABLE dest1_n42 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100; -- SORT_QUERY_RESULTS -SELECT dest1.* FROM dest1; +SELECT dest1_n42.* FROM dest1_n42; diff --git a/ql/src/test/queries/clientpositive/input14_limit.q b/ql/src/test/queries/clientpositive/input14_limit.q index fee94a34b5..21557ae93f 100644 --- a/ql/src/test/queries/clientpositive/input14_limit.q +++ b/ql/src/test/queries/clientpositive/input14_limit.q @@ -1,7 +1,7 @@ --! qt:dataset:src set hive.stats.column.autogather=false; -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n13(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM ( @@ -10,7 +10,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey LIMIT 20 ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100; +INSERT OVERWRITE TABLE dest1_n13 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100; FROM ( FROM src @@ -18,6 +18,6 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey LIMIT 20 ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100; +INSERT OVERWRITE TABLE dest1_n13 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100; -SELECT dest1.* FROM dest1; +SELECT dest1_n13.* FROM dest1_n13; diff --git a/ql/src/test/queries/clientpositive/input17.q b/ql/src/test/queries/clientpositive/input17.q index bd7cd02362..df8c3e00d7 100644 --- a/ql/src/test/queries/clientpositive/input17.q +++ b/ql/src/test/queries/clientpositive/input17.q @@ -1,5 +1,5 @@ --! qt:dataset:src_thrift -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n81(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM ( @@ -8,7 +8,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue; +INSERT OVERWRITE TABLE dest1_n81 SELECT tmap.tkey, tmap.tvalue; FROM ( FROM src_thrift @@ -16,8 +16,8 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue; +INSERT OVERWRITE TABLE dest1_n81 SELECT tmap.tkey, tmap.tvalue; -- SORT_QUERY_RESULTS -SELECT dest1.* FROM dest1; +SELECT dest1_n81.* FROM dest1_n81; diff --git a/ql/src/test/queries/clientpositive/input18.q b/ql/src/test/queries/clientpositive/input18.q index 3481f01e53..4f361d1b9d 100644 --- a/ql/src/test/queries/clientpositive/input18.q +++ b/ql/src/test/queries/clientpositive/input18.q @@ -1,5 +1,5 @@ --! qt:dataset:src -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n124(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM ( @@ -8,7 +8,7 @@ FROM ( USING 'cat' CLUSTER BY key ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100; +INSERT OVERWRITE TABLE dest1_n124 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100; FROM ( FROM src @@ -16,8 +16,8 @@ FROM ( USING 'cat' CLUSTER BY key ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100; +INSERT OVERWRITE TABLE dest1_n124 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100; -- SORT_QUERY_RESULTS -SELECT dest1.* FROM dest1; +SELECT dest1_n124.* FROM dest1_n124; diff --git a/ql/src/test/queries/clientpositive/input1_limit.q b/ql/src/test/queries/clientpositive/input1_limit.q index 384976aa83..bfa5b32640 100644 --- a/ql/src/test/queries/clientpositive/input1_limit.q +++ b/ql/src/test/queries/clientpositive/input1_limit.q @@ -1,20 +1,20 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; -CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n12(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest2_n2(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5; +INSERT OVERWRITE TABLE dest1_n12 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 +INSERT OVERWRITE TABLE dest2_n2 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5; FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5; +INSERT OVERWRITE TABLE dest1_n12 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 +INSERT OVERWRITE TABLE dest2_n2 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5; -SELECT dest1.* FROM dest1; -SELECT dest2.* FROM dest2; +SELECT dest1_n12.* FROM dest1_n12; +SELECT dest2_n2.* FROM dest2_n2; diff --git a/ql/src/test/queries/clientpositive/input20.q b/ql/src/test/queries/clientpositive/input20.q index c4435382da..fb425b7044 100644 --- a/ql/src/test/queries/clientpositive/input20.q +++ b/ql/src/test/queries/clientpositive/input20.q @@ -1,5 +1,5 @@ --! qt:dataset:src -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n138(key INT, value STRING) STORED AS TEXTFILE; ADD FILE ../../data/scripts/input20_script.py; @@ -10,7 +10,7 @@ FROM ( USING 'cat' DISTRIBUTE BY key, value ) tmap -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n138 REDUCE tmap.key, tmap.value USING 'python input20_script.py' AS key, value; @@ -21,9 +21,9 @@ FROM ( USING 'cat' DISTRIBUTE BY key, value ) tmap -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n138 REDUCE tmap.key, tmap.value USING 'python input20_script.py' AS key, value; -SELECT * FROM dest1 ORDER BY key, value; +SELECT * FROM dest1_n138 ORDER BY key, value; diff --git a/ql/src/test/queries/clientpositive/input24.q b/ql/src/test/queries/clientpositive/input24.q index 95b2377f51..b4b9bfcfd8 100644 --- a/ql/src/test/queries/clientpositive/input24.q +++ b/ql/src/test/queries/clientpositive/input24.q @@ -1,9 +1,9 @@ -create table tst(a int, b int) partitioned by (d string); -alter table tst add partition (d='2009-01-01'); +create table tst_n1(a int, b int) partitioned by (d string); +alter table tst_n1 add partition (d='2009-01-01'); explain -select count(1) from tst x where x.d='2009-01-01'; +select count(1) from tst_n1 x where x.d='2009-01-01'; -select count(1) from tst x where x.d='2009-01-01'; +select count(1) from tst_n1 x where x.d='2009-01-01'; diff --git a/ql/src/test/queries/clientpositive/input28.q b/ql/src/test/queries/clientpositive/input28.q index edab7dd9af..47e36ebdb6 100644 --- a/ql/src/test/queries/clientpositive/input28.q +++ b/ql/src/test/queries/clientpositive/input28.q @@ -1,12 +1,12 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -create table tst(a string, b string) partitioned by (d string); -alter table tst add partition (d='2009-01-01'); +create table tst_n0(a string, b string) partitioned by (d string); +alter table tst_n0 add partition (d='2009-01-01'); -insert overwrite table tst partition(d='2009-01-01') -select tst.a, src.value from tst join src ON (tst.a = src.key); +insert overwrite table tst_n0 partition(d='2009-01-01') +select tst_n0.a, src.value from tst_n0 join src ON (tst_n0.a = src.key); -select * from tst where tst.d='2009-01-01'; +select * from tst_n0 where tst_n0.d='2009-01-01'; diff --git a/ql/src/test/queries/clientpositive/input33.q b/ql/src/test/queries/clientpositive/input33.q index 4398ebfee2..26cb725975 100644 --- a/ql/src/test/queries/clientpositive/input33.q +++ b/ql/src/test/queries/clientpositive/input33.q @@ -1,5 +1,5 @@ --! qt:dataset:src -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n135(key INT, value STRING) STORED AS TEXTFILE; ADD FILE ../../data/scripts/input20_script.py; @@ -10,7 +10,7 @@ FROM ( USING 'cat' DISTRIBUTE BY key, value ) tmap -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n135 REDUCE tmap.key, tmap.value USING 'python input20_script.py' AS (key STRING, value STRING); @@ -21,9 +21,9 @@ FROM ( USING 'cat' DISTRIBUTE BY key, value ) tmap -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n135 REDUCE tmap.key, tmap.value USING 'python input20_script.py' AS (key STRING, value STRING); -SELECT * FROM dest1 ORDER BY key, value; +SELECT * FROM dest1_n135 ORDER BY key, value; diff --git a/ql/src/test/queries/clientpositive/input34.q b/ql/src/test/queries/clientpositive/input34.q index 06fe223687..293a9c44de 100644 --- a/ql/src/test/queries/clientpositive/input34.q +++ b/ql/src/test/queries/clientpositive/input34.q @@ -1,5 +1,5 @@ --! qt:dataset:src -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n161(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM ( @@ -8,7 +8,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue; +INSERT OVERWRITE TABLE dest1_n161 SELECT tkey, tvalue; FROM ( FROM src @@ -16,6 +16,6 @@ FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue; +INSERT OVERWRITE TABLE dest1_n161 SELECT tkey, tvalue; -SELECT dest1.* FROM dest1; +SELECT dest1_n161.* FROM dest1_n161; diff --git a/ql/src/test/queries/clientpositive/input35.q b/ql/src/test/queries/clientpositive/input35.q index e4435c64e0..0c6fa62aed 100644 --- a/ql/src/test/queries/clientpositive/input35.q +++ b/ql/src/test/queries/clientpositive/input35.q @@ -1,5 +1,5 @@ --! qt:dataset:src -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n25(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM ( @@ -8,7 +8,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue; +INSERT OVERWRITE TABLE dest1_n25 SELECT tkey, tvalue; FROM ( FROM src @@ -16,6 +16,6 @@ FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue; +INSERT OVERWRITE TABLE dest1_n25 SELECT tkey, tvalue; -SELECT dest1.* FROM dest1; +SELECT dest1_n25.* FROM dest1_n25; diff --git a/ql/src/test/queries/clientpositive/input36.q b/ql/src/test/queries/clientpositive/input36.q index 6e97cb8c8a..2f8bee5dbb 100644 --- a/ql/src/test/queries/clientpositive/input36.q +++ b/ql/src/test/queries/clientpositive/input36.q @@ -1,5 +1,5 @@ --! qt:dataset:src -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n70(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM ( @@ -8,7 +8,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\003' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue; +INSERT OVERWRITE TABLE dest1_n70 SELECT tkey, tvalue; FROM ( FROM src @@ -16,6 +16,6 @@ FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\003' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue; +INSERT OVERWRITE TABLE dest1_n70 SELECT tkey, tvalue; -SELECT dest1.* FROM dest1; +SELECT dest1_n70.* FROM dest1_n70; diff --git a/ql/src/test/queries/clientpositive/input38.q b/ql/src/test/queries/clientpositive/input38.q index 1639764e5f..97049aca61 100644 --- a/ql/src/test/queries/clientpositive/input38.q +++ b/ql/src/test/queries/clientpositive/input38.q @@ -1,6 +1,6 @@ --! qt:dataset:src -CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n91(key STRING, value STRING) STORED AS TEXTFILE; EXPLAIN FROM ( @@ -8,16 +8,16 @@ FROM ( SELECT TRANSFORM(src.key, src.value, 1+2, 3+4) USING 'cat' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.key, tmap.value; +INSERT OVERWRITE TABLE dest1_n91 SELECT tmap.key, tmap.value; FROM ( FROM src SELECT TRANSFORM(src.key, src.value, 1+2, 3+4) USING 'cat' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.key, tmap.value; +INSERT OVERWRITE TABLE dest1_n91 SELECT tmap.key, tmap.value; -SELECT dest1.* FROM dest1; +SELECT dest1_n91.* FROM dest1_n91; diff --git a/ql/src/test/queries/clientpositive/input39.q b/ql/src/test/queries/clientpositive/input39.q index 5dd837298b..b757f8ef45 100644 --- a/ql/src/test/queries/clientpositive/input39.q +++ b/ql/src/test/queries/clientpositive/input39.q @@ -2,16 +2,16 @@ -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) -create table t1(key string, value string) partitioned by (ds string); -create table t2(key string, value string) partitioned by (ds string); +create table t1_n121(key string, value string) partitioned by (ds string); +create table t2_n71(key string, value string) partitioned by (ds string); -insert overwrite table t1 partition (ds='1') +insert overwrite table t1_n121 partition (ds='1') select key, value from src; -insert overwrite table t1 partition (ds='2') +insert overwrite table t1_n121 partition (ds='2') select key, value from src; -insert overwrite table t2 partition (ds='1') +insert overwrite table t2_n71 partition (ds='1') select key, value from src; set hive.test.mode=true; @@ -21,9 +21,9 @@ set mapreduce.jobtracker.address=localhost:58; set hive.exec.mode.local.auto=true; explain -select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1'; +select count(1) from t1_n121 join t2_n71 on t1_n121.key=t2_n71.key where t1_n121.ds='1' and t2_n71.ds='1'; -select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1'; +select count(1) from t1_n121 join t2_n71 on t1_n121.key=t2_n71.key where t1_n121.ds='1' and t2_n71.ds='1'; set hive.test.mode=false; set mapreduce.framework.name; diff --git a/ql/src/test/queries/clientpositive/input39_hadoop20.q b/ql/src/test/queries/clientpositive/input39_hadoop20.q index ad5207d8f2..26f2a6e1c3 100644 --- a/ql/src/test/queries/clientpositive/input39_hadoop20.q +++ b/ql/src/test/queries/clientpositive/input39_hadoop20.q @@ -2,16 +2,16 @@ -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) -create table t1(key string, value string) partitioned by (ds string); -create table t2(key string, value string) partitioned by (ds string); +create table t1_n77(key string, value string) partitioned by (ds string); +create table t2_n46(key string, value string) partitioned by (ds string); -insert overwrite table t1 partition (ds='1') +insert overwrite table t1_n77 partition (ds='1') select key, value from src; -insert overwrite table t1 partition (ds='2') +insert overwrite table t1_n77 partition (ds='2') select key, value from src; -insert overwrite table t2 partition (ds='1') +insert overwrite table t2_n46 partition (ds='1') select key, value from src; set hive.test.mode=true; @@ -20,9 +20,9 @@ set mapred.job.tracker=localhost:58; set hive.exec.mode.local.auto=true; explain -select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1'; +select count(1) from t1_n77 join t2_n46 on t1_n77.key=t2_n46.key where t1_n77.ds='1' and t2_n46.ds='1'; -select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1'; +select count(1) from t1_n77 join t2_n46 on t1_n77.key=t2_n46.key where t1_n77.ds='1' and t2_n46.ds='1'; set hive.test.mode=false; set mapred.job.tracker; diff --git a/ql/src/test/queries/clientpositive/input3_limit.q b/ql/src/test/queries/clientpositive/input3_limit.q index 3e9af60226..822f8dfd12 100644 --- a/ql/src/test/queries/clientpositive/input3_limit.q +++ b/ql/src/test/queries/clientpositive/input3_limit.q @@ -1,17 +1,17 @@ -CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1; -LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T1; +CREATE TABLE T1_n78(key STRING, value STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1_n78; +LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T1_n78; -CREATE TABLE T2(key STRING, value STRING); +CREATE TABLE T2_n47(key STRING, value STRING); EXPLAIN -INSERT OVERWRITE TABLE T2 SELECT * FROM (SELECT * FROM T1 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20; +INSERT OVERWRITE TABLE T2_n47 SELECT * FROM (SELECT * FROM T1_n78 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20; -INSERT OVERWRITE TABLE T2 SELECT * FROM (SELECT * FROM T1 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20; +INSERT OVERWRITE TABLE T2_n47 SELECT * FROM (SELECT * FROM T1_n78 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20; -SELECT * FROM T2 ORDER BY key, value; +SELECT * FROM T2_n47 ORDER BY key, value; diff --git a/ql/src/test/queries/clientpositive/input4.q b/ql/src/test/queries/clientpositive/input4.q index 90fcbdd695..adee91aee3 100644 --- a/ql/src/test/queries/clientpositive/input4.q +++ b/ql/src/test/queries/clientpositive/input4.q @@ -1,9 +1,9 @@ -CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE; +CREATE TABLE INPUT4_n0(KEY STRING, VALUE STRING) STORED AS TEXTFILE; EXPLAIN -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4; -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4_n0; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4_n0; EXPLAIN FORMATTED -SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias; -SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias +SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4_n0 AS Input4Alias; +SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4_n0 AS Input4Alias diff --git a/ql/src/test/queries/clientpositive/input44.q b/ql/src/test/queries/clientpositive/input44.q index cd99ad0f93..c4ed03259a 100644 --- a/ql/src/test/queries/clientpositive/input44.q +++ b/ql/src/test/queries/clientpositive/input44.q @@ -1,7 +1,7 @@ --! qt:dataset:src -CREATE TABLE dest(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest_n0(key INT, value STRING) STORED AS TEXTFILE; SET hive.output.file.extension=.txt; -INSERT OVERWRITE TABLE dest SELECT src.* FROM src; +INSERT OVERWRITE TABLE dest_n0 SELECT src.* FROM src; -dfs -cat ${system:test.warehouse.dir}/dest/*.txt +dfs -cat ${system:test.warehouse.dir}/dest_n0/*.txt diff --git a/ql/src/test/queries/clientpositive/input5.q b/ql/src/test/queries/clientpositive/input5.q index 8a2a34b0d7..bf598de0c8 100644 --- a/ql/src/test/queries/clientpositive/input5.q +++ b/ql/src/test/queries/clientpositive/input5.q @@ -1,5 +1,5 @@ --! qt:dataset:src_thrift -CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n94(key STRING, value STRING) STORED AS TEXTFILE; EXPLAIN FROM ( @@ -8,7 +8,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue; +INSERT OVERWRITE TABLE dest1_n94 SELECT tmap.tkey, tmap.tvalue; FROM ( FROM src_thrift @@ -16,6 +16,6 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue; +INSERT OVERWRITE TABLE dest1_n94 SELECT tmap.tkey, tmap.tvalue; -SELECT dest1.* FROM dest1; +SELECT dest1_n94.* FROM dest1_n94; diff --git a/ql/src/test/queries/clientpositive/input6.q b/ql/src/test/queries/clientpositive/input6.q index 9d6ffc8491..c243a6b5e0 100644 --- a/ql/src/test/queries/clientpositive/input6.q +++ b/ql/src/test/queries/clientpositive/input6.q @@ -1,11 +1,11 @@ --! qt:dataset:src1 -CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n35(key STRING, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src1.value WHERE src1.key is null; +INSERT OVERWRITE TABLE dest1_n35 SELECT src1.key, src1.value WHERE src1.key is null; FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src1.value WHERE src1.key is null; +INSERT OVERWRITE TABLE dest1_n35 SELECT src1.key, src1.value WHERE src1.key is null; -SELECT dest1.* FROM dest1; +SELECT dest1_n35.* FROM dest1_n35; diff --git a/ql/src/test/queries/clientpositive/input7.q b/ql/src/test/queries/clientpositive/input7.q index 11ce7ea74f..eba4c8872b 100644 --- a/ql/src/test/queries/clientpositive/input7.q +++ b/ql/src/test/queries/clientpositive/input7.q @@ -1,11 +1,11 @@ --! qt:dataset:src1 -CREATE TABLE dest1(c1 DOUBLE, c2 INT) STORED AS TEXTFILE; +CREATE TABLE dest1_n167(c1 DOUBLE, c2 INT) STORED AS TEXTFILE; EXPLAIN FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key; +INSERT OVERWRITE TABLE dest1_n167 SELECT NULL, src1.key; FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key; +INSERT OVERWRITE TABLE dest1_n167 SELECT NULL, src1.key; -SELECT dest1.* FROM dest1; +SELECT dest1_n167.* FROM dest1_n167; diff --git a/ql/src/test/queries/clientpositive/input8.q b/ql/src/test/queries/clientpositive/input8.q index 14b0e99022..cf605b4180 100644 --- a/ql/src/test/queries/clientpositive/input8.q +++ b/ql/src/test/queries/clientpositive/input8.q @@ -1,12 +1,12 @@ --! qt:dataset:src1 set hive.mapred.mode=nonstrict; -CREATE TABLE dest1(c1 STRING, c2 INT, c3 DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n28(c1 STRING, c2 INT, c3 DOUBLE) STORED AS TEXTFILE; EXPLAIN FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT 4 + NULL, src1.key - NULL, NULL + NULL; +INSERT OVERWRITE TABLE dest1_n28 SELECT 4 + NULL, src1.key - NULL, NULL + NULL; FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT 4 + NULL, src1.key - NULL, NULL + NULL; +INSERT OVERWRITE TABLE dest1_n28 SELECT 4 + NULL, src1.key - NULL, NULL + NULL; -SELECT dest1.* FROM dest1; +SELECT dest1_n28.* FROM dest1_n28; diff --git a/ql/src/test/queries/clientpositive/input9.q b/ql/src/test/queries/clientpositive/input9.q index 5235098170..d44114ce04 100644 --- a/ql/src/test/queries/clientpositive/input9.q +++ b/ql/src/test/queries/clientpositive/input9.q @@ -1,12 +1,12 @@ --! qt:dataset:src1 -CREATE TABLE dest1(value STRING, key INT) STORED AS TEXTFILE; +CREATE TABLE dest1_n159(value STRING, key INT) STORED AS TEXTFILE; EXPLAIN FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key where NULL = NULL; +INSERT OVERWRITE TABLE dest1_n159 SELECT NULL, src1.key where NULL = NULL; FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key where NULL = NULL; +INSERT OVERWRITE TABLE dest1_n159 SELECT NULL, src1.key where NULL = NULL; -SELECT dest1.* FROM dest1; +SELECT dest1_n159.* FROM dest1_n159; diff --git a/ql/src/test/queries/clientpositive/input_dynamicserde.q b/ql/src/test/queries/clientpositive/input_dynamicserde.q index 4c63a3545a..544977afb8 100644 --- a/ql/src/test/queries/clientpositive/input_dynamicserde.q +++ b/ql/src/test/queries/clientpositive/input_dynamicserde.q @@ -1,5 +1,5 @@ --! qt:dataset:src_thrift -CREATE TABLE dest1(a array, b array, c map, d int, e string) +CREATE TABLE dest1_n114(a array, b array, c map, d int, e string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' COLLECTION ITEMS TERMINATED BY '2' @@ -9,11 +9,11 @@ STORED AS TEXTFILE; EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring; +INSERT OVERWRITE TABLE dest1_n114 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring; FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring; +INSERT OVERWRITE TABLE dest1_n114 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring; -SELECT dest1.* FROM dest1; +SELECT dest1_n114.* FROM dest1_n114; -SELECT dest1.a[0], dest1.b[0], dest1.c['key2'], dest1.d, dest1.e FROM dest1; +SELECT dest1_n114.a[0], dest1_n114.b[0], dest1_n114.c['key2'], dest1_n114.d, dest1_n114.e FROM dest1_n114; diff --git a/ql/src/test/queries/clientpositive/input_lazyserde.q b/ql/src/test/queries/clientpositive/input_lazyserde.q index f053fbe136..f028b7ea7f 100644 --- a/ql/src/test/queries/clientpositive/input_lazyserde.q +++ b/ql/src/test/queries/clientpositive/input_lazyserde.q @@ -6,8 +6,8 @@ set hive.test.vectorized.execution.enabled.override=none; -- SORT_QUERY_RESULTS -DROP TABLE dest1; -CREATE TABLE dest1(a array, b array, c map, d int, e string) +DROP TABLE dest1_n43; +CREATE TABLE dest1_n43(a array, b array, c map, d int, e string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' COLLECTION ITEMS TERMINATED BY '2' @@ -17,38 +17,38 @@ STORED AS TEXTFILE; EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1; +INSERT OVERWRITE TABLE dest1_n43 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1; FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1; +INSERT OVERWRITE TABLE dest1_n43 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1; -SELECT dest1.* FROM dest1 CLUSTER BY 1; +SELECT dest1_n43.* FROM dest1_n43 CLUSTER BY 1; -SELECT dest1.a[0], dest1.b[0], dest1.c['key2'], dest1.d, dest1.e FROM dest1 CLUSTER BY 1; +SELECT dest1_n43.a[0], dest1_n43.b[0], dest1_n43.c['key2'], dest1_n43.d, dest1_n43.e FROM dest1_n43 CLUSTER BY 1; -DROP TABLE dest1; +DROP TABLE dest1_n43; -CREATE TABLE dest1(a array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\'; -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1; -SELECT * from dest1; -DROP TABLE dest1; +CREATE TABLE dest1_n43(a array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\'; +INSERT OVERWRITE TABLE dest1_n43 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1; +SELECT * from dest1_n43; +DROP TABLE dest1_n43; -CREATE TABLE dest1(a map) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\'; -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1; -SELECT * from dest1; +CREATE TABLE dest1_n43(a map) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\'; +INSERT OVERWRITE TABLE dest1_n43 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1; +SELECT * from dest1_n43; CREATE TABLE destBin(a UNIONTYPE, struct>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' STORED AS SEQUENCEFILE; INSERT OVERWRITE TABLE destBin SELECT create_union( CASE WHEN key < 100 THEN 0 WHEN key < 200 THEN 1 WHEN key < 300 THEN 2 WHEN key < 400 THEN 3 ELSE 0 END, key, 2.0D, array("one","two"), struct(5,"five")) FROM srcbucket2; SELECT * from destBin; DROP TABLE destBin; -DROP TABLE dest2; +DROP TABLE dest2_n7; DROP TABLE dest3; -CREATE TABLE dest2 (a map, map>>>>) +CREATE TABLE dest2_n7 (a map, map>>>>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' STORED AS SEQUENCEFILE; -INSERT OVERWRITE TABLE dest2 SELECT src_thrift.attributes FROM src_thrift; -SELECT a from dest2 limit 10; +INSERT OVERWRITE TABLE dest2_n7 SELECT src_thrift.attributes FROM src_thrift; +SELECT a from dest2_n7 limit 10; CREATE TABLE dest3 ( unionfield1 uniontype, map>, diff --git a/ql/src/test/queries/clientpositive/input_lazyserde2.q b/ql/src/test/queries/clientpositive/input_lazyserde2.q index a1101065ae..e19b0b6a06 100644 --- a/ql/src/test/queries/clientpositive/input_lazyserde2.q +++ b/ql/src/test/queries/clientpositive/input_lazyserde2.q @@ -6,8 +6,8 @@ set hive.test.vectorized.execution.enabled.override=none; -- SORT_QUERY_RESULTS -DROP TABLE dest1; -CREATE TABLE dest1(a array, b array, c map, d int, e string) +DROP TABLE dest1_n148; +CREATE TABLE dest1_n148(a array, b array, c map, d int, e string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' COLLECTION ITEMS TERMINATED BY '2' @@ -17,43 +17,43 @@ STORED AS TEXTFILE; EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1; +INSERT OVERWRITE TABLE dest1_n148 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1; FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1; +INSERT OVERWRITE TABLE dest1_n148 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1; -SELECT dest1.* FROM dest1 CLUSTER BY 1; +SELECT dest1_n148.* FROM dest1_n148 CLUSTER BY 1; -SELECT dest1.a[0], dest1.b[0], dest1.c['key2'], dest1.d, dest1.e FROM dest1 CLUSTER BY 1; +SELECT dest1_n148.a[0], dest1_n148.b[0], dest1_n148.c['key2'], dest1_n148.d, dest1_n148.e FROM dest1_n148 CLUSTER BY 1; -DROP TABLE dest1; +DROP TABLE dest1_n148; -CREATE TABLE dest1(a array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\'; -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1; -SELECT * from dest1; -DROP TABLE dest1; +CREATE TABLE dest1_n148(a array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\'; +INSERT OVERWRITE TABLE dest1_n148 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1; +SELECT * from dest1_n148; +DROP TABLE dest1_n148; -CREATE TABLE dest1(a map) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\'; -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1; -SELECT * from dest1; +CREATE TABLE dest1_n148(a map) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\'; +INSERT OVERWRITE TABLE dest1_n148 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1; +SELECT * from dest1_n148; -CREATE TABLE destBin(a UNIONTYPE, struct>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE; -INSERT OVERWRITE TABLE destBin SELECT create_union( CASE WHEN key < 100 THEN 0 WHEN key < 200 THEN 1 WHEN key < 300 THEN 2 WHEN key < 400 THEN 3 ELSE 0 END, key, 2.0D, array("one","two"), struct(5,"five")) FROM srcbucket2; -SELECT * from destBin; -DROP TABLE destBin; +CREATE TABLE destBin_n0(a UNIONTYPE, struct>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE; +INSERT OVERWRITE TABLE destBin_n0 SELECT create_union( CASE WHEN key < 100 THEN 0 WHEN key < 200 THEN 1 WHEN key < 300 THEN 2 WHEN key < 400 THEN 3 ELSE 0 END, key, 2.0D, array("one","two"), struct(5,"five")) FROM srcbucket2; +SELECT * from destBin_n0; +DROP TABLE destBin_n0; -DROP TABLE dest2; -DROP TABLE dest3; +DROP TABLE dest2_n38; +DROP TABLE dest3_n6; -CREATE TABLE dest2 (a map, map>>>>) +CREATE TABLE dest2_n38 (a map, map>>>>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE; -INSERT OVERWRITE TABLE dest2 SELECT src_thrift.attributes FROM src_thrift; -SELECT a from dest2 limit 10; +INSERT OVERWRITE TABLE dest2_n38 SELECT src_thrift.attributes FROM src_thrift; +SELECT a from dest2_n38 limit 10; -CREATE TABLE dest3 ( +CREATE TABLE dest3_n6 ( unionfield1 uniontype, map>, unionfield2 uniontype, map>, unionfield3 uniontype, map> ) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE; -INSERT OVERWRITE TABLE dest3 SELECT src_thrift.unionField1,src_thrift.unionField2,src_thrift.unionField3 from src_thrift; -SELECT unionfield1, unionField2, unionfield3 from dest3 limit 10; +INSERT OVERWRITE TABLE dest3_n6 SELECT src_thrift.unionField1,src_thrift.unionField2,src_thrift.unionField3 from src_thrift; +SELECT unionfield1, unionField2, unionfield3 from dest3_n6 limit 10; diff --git a/ql/src/test/queries/clientpositive/input_part1.q b/ql/src/test/queries/clientpositive/input_part1.q index 4bfc85da09..9648de10e7 100644 --- a/ql/src/test/queries/clientpositive/input_part1.q +++ b/ql/src/test/queries/clientpositive/input_part1.q @@ -1,13 +1,13 @@ --! qt:dataset:srcpart set hive.mapred.mode=nonstrict; -CREATE TABLE dest1(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n45(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE; EXPLAIN EXTENDED FROM srcpart -INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12'; +INSERT OVERWRITE TABLE dest1_n45 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12'; FROM srcpart -INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12'; +INSERT OVERWRITE TABLE dest1_n45 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12'; -SELECT dest1.* FROM dest1; +SELECT dest1_n45.* FROM dest1_n45; diff --git a/ql/src/test/queries/clientpositive/input_part2.q b/ql/src/test/queries/clientpositive/input_part2.q index 9df97a7156..67f064f68e 100644 --- a/ql/src/test/queries/clientpositive/input_part2.q +++ b/ql/src/test/queries/clientpositive/input_part2.q @@ -1,19 +1,19 @@ --! qt:dataset:srcpart -CREATE TABLE dest1(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE; -CREATE TABLE dest2(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n84(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE; +CREATE TABLE dest2_n20(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE; -- SORT_QUERY_RESULTS EXPLAIN EXTENDED FROM srcpart -INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' -INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12'; +INSERT OVERWRITE TABLE dest1_n84 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' +INSERT OVERWRITE TABLE dest2_n20 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12'; FROM srcpart -INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' -INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12'; +INSERT OVERWRITE TABLE dest1_n84 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' +INSERT OVERWRITE TABLE dest2_n20 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12'; -SELECT dest1.* FROM dest1 sort by key,value,ds,hr; -SELECT dest2.* FROM dest2 sort by key,value,ds,hr; +SELECT dest1_n84.* FROM dest1_n84 sort by key,value,ds,hr; +SELECT dest2_n20.* FROM dest2_n20 sort by key,value,ds,hr; diff --git a/ql/src/test/queries/clientpositive/input_part5.q b/ql/src/test/queries/clientpositive/input_part5.q index 62d76cbfe8..6ded831c60 100644 --- a/ql/src/test/queries/clientpositive/input_part5.q +++ b/ql/src/test/queries/clientpositive/input_part5.q @@ -1,14 +1,14 @@ --! qt:dataset:srcpart set hive.mapred.mode=nonstrict; -create table tmptable(key string, value string, hr string, ds string); +create table tmptable_n2(key string, value string, hr string, ds string); EXPLAIN -insert overwrite table tmptable +insert overwrite table tmptable_n2 SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.key < 100; -insert overwrite table tmptable +insert overwrite table tmptable_n2 SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.key < 100; -select * from tmptable x sort by x.key,x.value,x.ds,x.hr; +select * from tmptable_n2 x sort by x.key,x.value,x.ds,x.hr; diff --git a/ql/src/test/queries/clientpositive/input_testxpath.q b/ql/src/test/queries/clientpositive/input_testxpath.q old mode 100755 new mode 100644 index 368feb6f97..ddbbed49fa --- a/ql/src/test/queries/clientpositive/input_testxpath.q +++ b/ql/src/test/queries/clientpositive/input_testxpath.q @@ -1,11 +1,11 @@ --! qt:dataset:src_thrift -CREATE TABLE dest1(key INT, value STRING, mapvalue STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n6(key INT, value STRING, mapvalue STRING) STORED AS TEXTFILE; EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2']; +INSERT OVERWRITE TABLE dest1_n6 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2']; FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2']; +INSERT OVERWRITE TABLE dest1_n6 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2']; -SELECT dest1.* FROM dest1; +SELECT dest1_n6.* FROM dest1_n6; diff --git a/ql/src/test/queries/clientpositive/input_testxpath2.q b/ql/src/test/queries/clientpositive/input_testxpath2.q index 7c7b1fe579..bdb1a8c379 100644 --- a/ql/src/test/queries/clientpositive/input_testxpath2.q +++ b/ql/src/test/queries/clientpositive/input_testxpath2.q @@ -1,11 +1,11 @@ --! qt:dataset:src_thrift -CREATE TABLE dest1(lint_size INT, lintstring_size INT, mstringstring_size INT) STORED AS TEXTFILE; +CREATE TABLE dest1_n32(lint_size INT, lintstring_size INT, mstringstring_size INT) STORED AS TEXTFILE; EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT size(src_thrift.lint), size(src_thrift.lintstring), size(src_thrift.mstringstring) where src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL); +INSERT OVERWRITE TABLE dest1_n32 SELECT size(src_thrift.lint), size(src_thrift.lintstring), size(src_thrift.mstringstring) where src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL); FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT size(src_thrift.lint), size(src_thrift.lintstring), size(src_thrift.mstringstring) where src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL); +INSERT OVERWRITE TABLE dest1_n32 SELECT size(src_thrift.lint), size(src_thrift.lintstring), size(src_thrift.mstringstring) where src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL); -SELECT dest1.* FROM dest1; +SELECT dest1_n32.* FROM dest1_n32; diff --git a/ql/src/test/queries/clientpositive/inputddl7.q b/ql/src/test/queries/clientpositive/inputddl7.q index 27e587a283..6f775c1325 100644 --- a/ql/src/test/queries/clientpositive/inputddl7.q +++ b/ql/src/test/queries/clientpositive/inputddl7.q @@ -2,29 +2,29 @@ -- test for loading into partitions with the correct file format -CREATE TABLE T1(name STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1; -SELECT COUNT(1) FROM T1; +CREATE TABLE T1_n117(name STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1_n117; +SELECT COUNT(1) FROM T1_n117; -CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T2; -SELECT COUNT(1) FROM T2; +CREATE TABLE T2_n69(name STRING) STORED AS SEQUENCEFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T2_n69; +SELECT COUNT(1) FROM T2_n69; -CREATE TABLE T3(name STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3 PARTITION (ds='2008-04-09'); -SELECT COUNT(1) FROM T3 where T3.ds='2008-04-09'; +CREATE TABLE T3_n25(name STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3_n25 PARTITION (ds='2008-04-09'); +SELECT COUNT(1) FROM T3_n25 where T3_n25.ds='2008-04-09'; -CREATE TABLE T4(name STRING) PARTITIONED BY(ds STRING) STORED AS SEQUENCEFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T4 PARTITION (ds='2008-04-09'); -SELECT COUNT(1) FROM T4 where T4.ds='2008-04-09'; +CREATE TABLE T4_n14(name STRING) PARTITIONED BY(ds STRING) STORED AS SEQUENCEFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T4_n14 PARTITION (ds='2008-04-09'); +SELECT COUNT(1) FROM T4_n14 where T4_n14.ds='2008-04-09'; -DESCRIBE EXTENDED T1; -DESCRIBE EXTENDED T2; -DESCRIBE EXTENDED T3 PARTITION (ds='2008-04-09'); -DESCRIBE EXTENDED T4 PARTITION (ds='2008-04-09'); +DESCRIBE EXTENDED T1_n117; +DESCRIBE EXTENDED T2_n69; +DESCRIBE EXTENDED T3_n25 PARTITION (ds='2008-04-09'); +DESCRIBE EXTENDED T4_n14 PARTITION (ds='2008-04-09'); diff --git a/ql/src/test/queries/clientpositive/insert0.q b/ql/src/test/queries/clientpositive/insert0.q index 85f73a0629..fcb33adb68 100644 --- a/ql/src/test/queries/clientpositive/insert0.q +++ b/ql/src/test/queries/clientpositive/insert0.q @@ -2,19 +2,19 @@ set hive.mapred.mode=nonstrict; set hive.cbo.enable=true; -DROP TABLE insert_into1; +DROP TABLE insert_into1_n1; DROP TABLE ctas_table; DROP TABLE ctas_part; -CREATE TABLE insert_into1 (key int, value string); +CREATE TABLE insert_into1_n1 (key int, value string); -INSERT OVERWRITE TABLE insert_into1 SELECT * from src ORDER BY key LIMIT 10; +INSERT OVERWRITE TABLE insert_into1_n1 SELECT * from src ORDER BY key LIMIT 10; -select * from insert_into1 order by key; +select * from insert_into1_n1 order by key; -INSERT INTO TABLE insert_into1 SELECT * from src ORDER BY key DESC LIMIT 10; +INSERT INTO TABLE insert_into1_n1 SELECT * from src ORDER BY key DESC LIMIT 10; -select * from insert_into1 order by key; +select * from insert_into1_n1 order by key; create table ctas_table as SELECT key, count(value) as foo from src GROUP BY key LIMIT 10; @@ -35,6 +35,6 @@ select * from ctas_part order by key; -DROP TABLE insert_into1; +DROP TABLE insert_into1_n1; DROP TABLE ctas_table; DROP TABLE ctas_part; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/insert1.q b/ql/src/test/queries/clientpositive/insert1.q index 91daf7efb3..f9accc05bb 100644 --- a/ql/src/test/queries/clientpositive/insert1.q +++ b/ql/src/test/queries/clientpositive/insert1.q @@ -33,7 +33,7 @@ SELECT * FROM result; USE default; CREATE DATABASE db1; -CREATE TABLE db1.result(col1 STRING); -INSERT OVERWRITE TABLE db1.result SELECT 'db1_insert1' FROM src LIMIT 1; -INSERT INTO TABLE db1.result SELECT 'db1_insert2' FROM src LIMIT 1; -SELECT * FROM db1.result; +CREATE TABLE db1.result_n0(col1 STRING); +INSERT OVERWRITE TABLE db1.result_n0 SELECT 'db1_insert1' FROM src LIMIT 1; +INSERT INTO TABLE db1.result_n0 SELECT 'db1_insert2' FROM src LIMIT 1; +SELECT * FROM db1.result_n0; diff --git a/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q b/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q index b4058d79c5..612b227704 100644 --- a/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q +++ b/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q @@ -3,8 +3,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -create table acid_notbucketed(a int, b varchar(128)) stored as orc; +create table acid_notbucketed_n0(a int, b varchar(128)) stored as orc; -insert into table acid_notbucketed select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; +insert into table acid_notbucketed_n0 select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; -select * from acid_notbucketed; +select * from acid_notbucketed_n0; diff --git a/ql/src/test/queries/clientpositive/insert_into_default_keyword.q b/ql/src/test/queries/clientpositive/insert_into_default_keyword.q index 2e92e91cd9..ebef1a4ff2 100644 --- a/ql/src/test/queries/clientpositive/insert_into_default_keyword.q +++ b/ql/src/test/queries/clientpositive/insert_into_default_keyword.q @@ -2,102 +2,102 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -- SORT_QUERY_RESULTS -DROP TABLE insert_into1; +DROP TABLE insert_into1_n0; -- No default constraint -CREATE TABLE insert_into1 (key int, value string) +CREATE TABLE insert_into1_n0 (key int, value string) clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT); -INSERT INTO TABLE insert_into1 values(default, DEFAULT); -SELECT * from insert_into1; -TRUNCATE table insert_into1; +EXPLAIN INSERT INTO TABLE insert_into1_n0 values(default, DEFAULT); +INSERT INTO TABLE insert_into1_n0 values(default, DEFAULT); +SELECT * from insert_into1_n0; +TRUNCATE table insert_into1_n0; -- should be able to use any case for DEFAULT -EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt); -INSERT INTO TABLE insert_into1 values(234, dEfAULt); -SELECT * from insert_into1; -TRUNCATE table insert_into1; +EXPLAIN INSERT INTO TABLE insert_into1_n0 values(234, dEfAULt); +INSERT INTO TABLE insert_into1_n0 values(234, dEfAULt); +SELECT * from insert_into1_n0; +TRUNCATE table insert_into1_n0; -- multi values -explain insert into insert_into1 values(default, 3),(2,default); -insert into insert_into1 values(default, 3),(2,default); -select * from insert_into1; -TRUNCATE table insert_into1; +explain insert into insert_into1_n0 values(default, 3),(2,default); +insert into insert_into1_n0 values(default, 3),(2,default); +select * from insert_into1_n0; +TRUNCATE table insert_into1_n0; --with column schema -EXPLAIN INSERT INTO TABLE insert_into1(key) values(default); -INSERT INTO TABLE insert_into1(key) values(default); -select * from insert_into1; -TRUNCATE table insert_into1; +EXPLAIN INSERT INTO TABLE insert_into1_n0(key) values(default); +INSERT INTO TABLE insert_into1_n0(key) values(default); +select * from insert_into1_n0; +TRUNCATE table insert_into1_n0; -EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default); -INSERT INTO TABLE insert_into1(key, value) values(2,default); -select * from insert_into1; -TRUNCATE table insert_into1; +EXPLAIN INSERT INTO TABLE insert_into1_n0(key, value) values(2,default); +INSERT INTO TABLE insert_into1_n0(key, value) values(2,default); +select * from insert_into1_n0; +TRUNCATE table insert_into1_n0; -DROP TABLE insert_into1; +DROP TABLE insert_into1_n0; -- with default constraint -CREATE TABLE insert_into1 (key int DEFAULT 1, value string) +CREATE TABLE insert_into1_n0 (key int DEFAULT 1, value string) clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -EXPLAIN INSERT INTO TABLE insert_into1 values(default, DEFAULT); -INSERT INTO TABLE insert_into1 values(default, DEFAULT); -SELECT * from insert_into1; -TRUNCATE table insert_into1; +EXPLAIN INSERT INTO TABLE insert_into1_n0 values(default, DEFAULT); +INSERT INTO TABLE insert_into1_n0 values(default, DEFAULT); +SELECT * from insert_into1_n0; +TRUNCATE table insert_into1_n0; -- should be able to use any case for DEFAULT -EXPLAIN INSERT INTO TABLE insert_into1 values(234, dEfAULt); -INSERT INTO TABLE insert_into1 values(234, dEfAULt); -SELECT * from insert_into1; -TRUNCATE table insert_into1; +EXPLAIN INSERT INTO TABLE insert_into1_n0 values(234, dEfAULt); +INSERT INTO TABLE insert_into1_n0 values(234, dEfAULt); +SELECT * from insert_into1_n0; +TRUNCATE table insert_into1_n0; -- multi values -explain insert into insert_into1 values(default, 3),(2,default); -insert into insert_into1 values(default, 3),(2,default); -select * from insert_into1; -TRUNCATE table insert_into1; +explain insert into insert_into1_n0 values(default, 3),(2,default); +insert into insert_into1_n0 values(default, 3),(2,default); +select * from insert_into1_n0; +TRUNCATE table insert_into1_n0; --with column schema -EXPLAIN INSERT INTO TABLE insert_into1(key) values(default); -INSERT INTO TABLE insert_into1(key) values(default); -select * from insert_into1; -TRUNCATE table insert_into1; +EXPLAIN INSERT INTO TABLE insert_into1_n0(key) values(default); +INSERT INTO TABLE insert_into1_n0(key) values(default); +select * from insert_into1_n0; +TRUNCATE table insert_into1_n0; -EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default); -INSERT INTO TABLE insert_into1(key, value) values(2,default); -select * from insert_into1; -TRUNCATE table insert_into1; +EXPLAIN INSERT INTO TABLE insert_into1_n0(key, value) values(2,default); +INSERT INTO TABLE insert_into1_n0(key, value) values(2,default); +select * from insert_into1_n0; +TRUNCATE table insert_into1_n0; -EXPLAIN INSERT INTO TABLE insert_into1(value, key) values(2,default); -INSERT INTO TABLE insert_into1(value, key) values(2,default); -select * from insert_into1; -TRUNCATE table insert_into1; +EXPLAIN INSERT INTO TABLE insert_into1_n0(value, key) values(2,default); +INSERT INTO TABLE insert_into1_n0(value, key) values(2,default); +select * from insert_into1_n0; +TRUNCATE table insert_into1_n0; -EXPLAIN INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, default); -INSERT INTO TABLE insert_into1(key, value) values(2,default),(DEFAULT, default); -select * from insert_into1; -TRUNCATE table insert_into1; -DROP TABLE insert_into1; +EXPLAIN INSERT INTO TABLE insert_into1_n0(key, value) values(2,default),(DEFAULT, default); +INSERT INTO TABLE insert_into1_n0(key, value) values(2,default),(DEFAULT, default); +select * from insert_into1_n0; +TRUNCATE table insert_into1_n0; +DROP TABLE insert_into1_n0; -- UPDATE -CREATE TABLE insert_into1 (key int DEFAULT 1, value string, i int) +CREATE TABLE insert_into1_n0 (key int DEFAULT 1, value string, i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -INSERT INTO insert_into1 values(2,1, 45); -EXPLAIN UPDATE insert_into1 set key = DEFAULT where value=1; -UPDATE insert_into1 set key = DEFAULT where value=1; -SELECT * from insert_into1; -TRUNCATE table insert_into1; +INSERT INTO insert_into1_n0 values(2,1, 45); +EXPLAIN UPDATE insert_into1_n0 set key = DEFAULT where value=1; +UPDATE insert_into1_n0 set key = DEFAULT where value=1; +SELECT * from insert_into1_n0; +TRUNCATE table insert_into1_n0; -INSERT INTO insert_into1 values(2,1, 45); -EXPLAIN UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1; -UPDATE insert_into1 set key = DEFAULT, value=DEFAULT where value=1; -SELECT * from insert_into1; -TRUNCATE table insert_into1; +INSERT INTO insert_into1_n0 values(2,1, 45); +EXPLAIN UPDATE insert_into1_n0 set key = DEFAULT, value=DEFAULT where value=1; +UPDATE insert_into1_n0 set key = DEFAULT, value=DEFAULT where value=1; +SELECT * from insert_into1_n0; +TRUNCATE table insert_into1_n0; -DROP TABLE insert_into1; +DROP TABLE insert_into1_n0; -- partitioned table CREATE TABLE tpart(i int, j int DEFAULT 1001) partitioned by (ds string); @@ -120,9 +120,9 @@ set hive.mapred.mode=nonstrict; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -create table nonacid (key int, a1 string, value string) stored as orc; -insert into nonacid values(1, 'a11', 'val1'); -insert into nonacid values(2, 'a12', 'val2'); +create table nonacid_n1 (key int, a1 string, value string) stored as orc; +insert into nonacid_n1 values(1, 'a11', 'val1'); +insert into nonacid_n1 values(2, 'a12', 'val2'); create table acidTable(key int NOT NULL enable, a1 string DEFAULT 'a1', value string) clustered by (value) into 2 buckets stored as orc @@ -130,21 +130,21 @@ tblproperties ("transactional"="true"); insert into acidTable values(1, 'a10','val100'); -- only insert -explain MERGE INTO acidTable as t using nonacid as s ON t.key = s.key +explain MERGE INTO acidTable as t using nonacid_n1 as s ON t.key = s.key WHEN NOT MATCHED THEN INSERT VALUES (s.key, DEFAULT, DEFAULT); -MERGE INTO acidTable as t using nonacid as s ON t.key = s.key +MERGE INTO acidTable as t using nonacid_n1 as s ON t.key = s.key WHEN NOT MATCHED THEN INSERT VALUES (s.key, DEFAULT, DEFAULT); select * from acidTable; truncate table acidTable; insert into acidTable values(1, 'a10','val100'); -- insert + update + delete -explain MERGE INTO acidTable as t using nonacid as s ON t.key = s.key +explain MERGE INTO acidTable as t using nonacid_n1 as s ON t.key = s.key WHEN MATCHED AND s.key < 3 THEN DELETE WHEN MATCHED AND s.key > 3 THEN UPDATE set a1 = DEFAULT WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.a1, DEFAULT); -MERGE INTO acidTable as t using nonacid as s ON t.key = s.key +MERGE INTO acidTable as t using nonacid_n1 as s ON t.key = s.key WHEN MATCHED AND s.key < 3 THEN DELETE WHEN MATCHED AND s.key > 3 THEN UPDATE set a1 = DEFAULT WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.a1, DEFAULT); @@ -154,12 +154,12 @@ truncate table acidTable; create table acidTable2(key int DEFAULT 404) clustered by (key) into 2 buckets stored as orc tblproperties ("transactional"="true"); -explain MERGE INTO acidTable2 as t using nonacid as s ON t.key = s.key +explain MERGE INTO acidTable2 as t using nonacid_n1 as s ON t.key = s.key WHEN NOT MATCHED THEN INSERT VALUES (DEFAULT); -MERGE INTO acidTable2 as t using nonacid as s ON t.key = s.key +MERGE INTO acidTable2 as t using nonacid_n1 as s ON t.key = s.key WHEN NOT MATCHED THEN INSERT VALUES (DEFAULT); select * from acidTable2; DROP TABLE acidTable; DROP TABLE acidTable2; -DROP TABLE nonacid; \ No newline at end of file +DROP TABLE nonacid_n1; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/insert_into_with_schema.q b/ql/src/test/queries/clientpositive/insert_into_with_schema.q index b2d9d06eba..21f16504f8 100644 --- a/ql/src/test/queries/clientpositive/insert_into_with_schema.q +++ b/ql/src/test/queries/clientpositive/insert_into_with_schema.q @@ -4,40 +4,40 @@ set hive.mapred.mode=nonstrict; drop database if exists x314 cascade; create database x314; use x314; -create table source(s1 int, s2 int); +create table source_n0(s1 int, s2 int); create table target1(x int, y int, z int); create table target2(x int, y int, z int); create table target3(x int, y int, z int); -insert into source(s2,s1) values(2,1); --- expect source to contain 1 row (1,2) -select * from source; -insert into target1(z,x) select * from source; +insert into source_n0(s2,s1) values(2,1); +-- expect source_n0 to contain 1 row (1,2) +select * from source_n0; +insert into target1(z,x) select * from source_n0; -- expect target1 to contain 1 row (2,NULL,1) select * from target1; -- note that schema spec for target1 and target2 are different -from source insert into target1(x,y) select * insert into target2(x,z) select s2,s1; +from source_n0 insert into target1(x,y) select * insert into target2(x,z) select s2,s1; --expect target1 to have 2rows (2,NULL,1), (1,2,NULL) select * from target1 order by x,y,z; -- expect target2 to have 1 row: (2,NULL,1) select * from target2; -from source insert into target1(x,y,z) select null as x, * insert into target2(x,y,z) select null as x, source.*; +from source_n0 insert into target1(x,y,z) select null as x, * insert into target2(x,y,z) select null as x, source_n0.*; -- expect target1 to have 3 rows: (2,NULL,1), (1,2,NULL), (NULL, 1,2) select * from target1 order by x,y,z; -- expect target2 to have 2 rows: (2,NULL,1), (NULL, 1,2) select * from target2 order by x,y,z; create table source2(s1 int, s2 int); -insert into target3 (x,z) select source.s1,source2.s2 from source left outer join source2 on source.s1=source2.s2; +insert into target3 (x,z) select source_n0.s1,source2.s2 from source_n0 left outer join source2 on source_n0.s1=source2.s2; --expect target3 to have 1 row (1,NULL,NULL) select * from target3; -- partitioned tables -CREATE TABLE pageviews (userid VARCHAR(64), link STRING, source STRING) PARTITIONED BY (datestamp STRING, i int) CLUSTERED BY (userid) INTO 4 BUCKETS STORED AS ORC; +CREATE TABLE pageviews (userid VARCHAR(64), link STRING, source_n0 STRING) PARTITIONED BY (datestamp STRING, i int) CLUSTERED BY (userid) INTO 4 BUCKETS STORED AS ORC; INSERT INTO TABLE pageviews PARTITION (datestamp = '2014-09-23', i = 1)(userid,link) VALUES ('jsmith', 'mail.com'); -- expect 1 row: ('jsmith', 'mail.com', NULL) in partition '2014-09-23'/'1' select * from pageviews; diff --git a/ql/src/test/queries/clientpositive/insert_into_with_schema2.q b/ql/src/test/queries/clientpositive/insert_into_with_schema2.q index 032e6ae345..9d6fb29fc0 100644 --- a/ql/src/test/queries/clientpositive/insert_into_with_schema2.q +++ b/ql/src/test/queries/clientpositive/insert_into_with_schema2.q @@ -3,17 +3,17 @@ set hive.mapred.mode=nonstrict; -create table studenttab10k (age2 int); -insert into studenttab10k values(1); +create table studenttab10k_n0 (age2 int); +insert into studenttab10k_n0 values(1); create table student_acid (age int, grade int) clustered by (age) into 1 buckets; -insert into student_acid(age) select * from studenttab10k; +insert into student_acid(age) select * from studenttab10k_n0; select * from student_acid; -insert into student_acid(grade, age) select 3 g, * from studenttab10k; +insert into student_acid(grade, age) select 3 g, * from studenttab10k_n0; select * from student_acid; diff --git a/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q b/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q index 10a1d68982..90612a65f1 100644 --- a/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q +++ b/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q @@ -9,7 +9,7 @@ create table sample_06(name varchar(50), age int, gpa decimal(3, 2)) clustered b insert into table sample_06 values ('aaa', 35, 3.00), ('bbb', 32, 3.00), ('ccc', 32, 3.00), ('ddd', 35, 3.00), ('eee', 32, 3.00); select * from sample_06 where gpa = 3.00; -create table tab1 (name varchar(50), age int, gpa decimal(3, 2)); -insert into table tab1 select * from sample_06 where gpa = 3.00; -select * from tab1; +create table tab1_n2 (name varchar(50), age int, gpa decimal(3, 2)); +insert into table tab1_n2 select * from sample_06 where gpa = 3.00; +select * from tab1_n2; diff --git a/ql/src/test/queries/clientpositive/insert_overwrite_directory.q b/ql/src/test/queries/clientpositive/insert_overwrite_directory.q index 1180589001..15a00f3708 100644 --- a/ql/src/test/queries/clientpositive/insert_overwrite_directory.q +++ b/ql/src/test/queries/clientpositive/insert_overwrite_directory.q @@ -10,22 +10,22 @@ select * from src ; dfs -cat ../../data/files/src_table_2/000000_0; -create table array_table (a array, b array) +create table array_table_n1 (a array, b array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ','; -load data local inpath "../../data/files/array_table.txt" overwrite into table array_table; +load data local inpath "../../data/files/array_table.txt" overwrite into table array_table_n1; insert overwrite directory '../../data/files/array_table_1' -select * from array_table; +select * from array_table_n1; dfs -cat ../../data/files/array_table_1/000000_0; insert overwrite directory '../../data/files/array_table_2' ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' -select * from array_table; +select * from array_table_n1; dfs -cat ../../data/files/array_table_2/000000_0; @@ -33,22 +33,22 @@ insert overwrite directory '../../data/files/array_table_2_withfields' ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' -select b,a from array_table; +select b,a from array_table_n1; dfs -cat ../../data/files/array_table_2_withfields/000000_0; -create table map_table (foo STRING , bar MAP) +create table map_table_n2 (foo STRING , bar MAP) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' STORED AS TEXTFILE; -load data local inpath "../../data/files/map_table.txt" overwrite into table map_table; +load data local inpath "../../data/files/map_table.txt" overwrite into table map_table_n2; insert overwrite directory '../../data/files/map_table_1' -select * from map_table; +select * from map_table_n2; dfs -cat ../../data/files/map_table_1/000000_0; insert overwrite directory '../../data/files/map_table_2' @@ -56,7 +56,7 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' MAP KEYS TERMINATED BY '=' -select * from map_table; +select * from map_table_n2; dfs -cat ../../data/files/map_table_2/000000_0; @@ -65,14 +65,14 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' MAP KEYS TERMINATED BY '=' -select bar,foo from map_table; +select bar,foo from map_table_n2; dfs -cat ../../data/files/map_table_2_withfields/000000_0; insert overwrite directory '../../data/files/array_table_3' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe' STORED AS TEXTFILE -select * from array_table; +select * from array_table_n1; dfs -cat ../../data/files/array_table_3/000000_0; @@ -83,14 +83,14 @@ WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '(\"|\\[|\\])', 'field.delim'=',', 'serialization.null.format'='-NA-', 'collection.delim'='#') STORED AS TEXTFILE -select a, null, b from array_table; +select a, null, b from array_table_n1; dfs -cat ../../data/files/array_table_4/000000_0; insert overwrite directory '../../data/files/map_table_3' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe' STORED AS TEXTFILE -select * from map_table; +select * from map_table_n2; dfs -cat ../../data/files/map_table_3/000000_0; @@ -100,7 +100,7 @@ WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '(\"|\\[|\\])', 'field.delim'=':', 'serialization.null.format'='-NA-', 'collection.delim'='#', 'mapkey.delim'='%') STORED AS TEXTFILE -select foo, null, bar from map_table; +select foo, null, bar from map_table_n2; dfs -cat ../../data/files/map_table_4/000000_0; @@ -125,8 +125,8 @@ select key,value from rctable; dfs -cat ../../data/files/rctable_out/000000_0; drop table rctable; -drop table array_table; -drop table map_table; +drop table array_table_n1; +drop table map_table_n2; dfs -rmr ${system:test.tmp.dir}/rctable; dfs -rmr ../../data/files/array_table_1; dfs -rmr ../../data/files/array_table_2; diff --git a/ql/src/test/queries/clientpositive/insert_overwrite_directory2.q b/ql/src/test/queries/clientpositive/insert_overwrite_directory2.q index b58fb44c94..5c535f731d 100644 --- a/ql/src/test/queries/clientpositive/insert_overwrite_directory2.q +++ b/ql/src/test/queries/clientpositive/insert_overwrite_directory2.q @@ -1,18 +1,18 @@ --! qt:dataset:src -create external table result(key string) location "${system:test.tmp.dir}/result"; +create external table result_n0(key string) location "${system:test.tmp.dir}/result"; set mapreduce.job.reduces=2; insert overwrite directory "${system:test.tmp.dir}/result" select key from src group by key; -select count(*) from result; +select count(*) from result_n0; set mapreduce.job.reduces=1; insert overwrite directory "${system:test.tmp.dir}/result" select key from src group by key; -select count(*) from result; +select count(*) from result_n0; -drop table result; \ No newline at end of file +drop table result_n0; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/insert_overwrite_local_directory_1.q b/ql/src/test/queries/clientpositive/insert_overwrite_local_directory_1.q index 9fd8d151d0..ce7912c187 100644 --- a/ql/src/test/queries/clientpositive/insert_overwrite_local_directory_1.q +++ b/ql/src/test/queries/clientpositive/insert_overwrite_local_directory_1.q @@ -10,22 +10,22 @@ select * from src ; dfs -cat ../../data/files/local_src_table_2/000000_0; -create table array_table (a array, b array) +create table array_table_n0 (a array, b array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ','; -load data local inpath "../../data/files/array_table.txt" overwrite into table array_table; +load data local inpath "../../data/files/array_table.txt" overwrite into table array_table_n0; insert overwrite local directory '../../data/files/local_array_table_1' -select * from array_table; +select * from array_table_n0; dfs -cat ../../data/files/local_array_table_1/000000_0; insert overwrite local directory '../../data/files/local_array_table_2' ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' -select * from array_table; +select * from array_table_n0; dfs -cat ../../data/files/local_array_table_2/000000_0; @@ -33,22 +33,22 @@ insert overwrite local directory '../../data/files/local_array_table_2_withfield ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' -select b,a from array_table; +select b,a from array_table_n0; dfs -cat ../../data/files/local_array_table_2_withfields/000000_0; -create table map_table (foo STRING , bar MAP) +create table map_table_n1 (foo STRING , bar MAP) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' STORED AS TEXTFILE; -load data local inpath "../../data/files/map_table.txt" overwrite into table map_table; +load data local inpath "../../data/files/map_table.txt" overwrite into table map_table_n1; insert overwrite local directory '../../data/files/local_map_table_1' -select * from map_table; +select * from map_table_n1; dfs -cat ../../data/files/local_map_table_1/000000_0; insert overwrite local directory '../../data/files/local_map_table_2' @@ -56,7 +56,7 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' MAP KEYS TERMINATED BY '=' -select * from map_table; +select * from map_table_n1; dfs -cat ../../data/files/local_map_table_2/000000_0; @@ -65,21 +65,21 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' MAP KEYS TERMINATED BY '=' -select bar,foo from map_table; +select bar,foo from map_table_n1; dfs -cat ../../data/files/local_map_table_2_withfields/000000_0; insert overwrite local directory '../../data/files/local_array_table_3' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe' STORED AS TEXTFILE -select * from array_table; +select * from array_table_n0; dfs -cat ../../data/files/local_array_table_3/000000_0; insert overwrite local directory '../../data/files/local_map_table_3' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe' STORED AS TEXTFILE -select * from map_table; +select * from map_table_n1; dfs -cat ../../data/files/local_map_table_3/000000_0; @@ -104,7 +104,7 @@ select key,value from local_rctable; dfs -cat ../../data/files/local_rctable_out/000000_0; drop table local_rctable; -drop table array_table; -drop table map_table; +drop table array_table_n0; +drop table map_table_n1; dfs -rmr ${system:test.tmp.dir}/local_rctable; diff --git a/ql/src/test/queries/clientpositive/insert_values_orig_table.q b/ql/src/test/queries/clientpositive/insert_values_orig_table.q index a0fce90fc2..92a2df4db4 100644 --- a/ql/src/test/queries/clientpositive/insert_values_orig_table.q +++ b/ql/src/test/queries/clientpositive/insert_values_orig_table.q @@ -3,8 +3,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -drop table if exists acid_ivot_stage; -create table acid_ivot_stage( +drop table if exists acid_ivot_stage_n0; +create table acid_ivot_stage_n0( ctinyint TINYINT, csmallint SMALLINT, cint INT, @@ -17,9 +17,9 @@ create table acid_ivot_stage( ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, cboolean2 BOOLEAN) stored as orc; -LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_ivot_stage; +LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_ivot_stage_n0; -create table acid_ivot( +create table acid_ivot_n0( ctinyint TINYINT, csmallint SMALLINT, cint INT, @@ -33,13 +33,13 @@ create table acid_ivot( cboolean1 BOOLEAN, cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -insert into acid_ivot select * from acid_ivot_stage; +insert into acid_ivot_n0 select * from acid_ivot_stage_n0; -select count(*) from acid_ivot; +select count(*) from acid_ivot_n0; -insert into table acid_ivot values +insert into table acid_ivot_n0 values (1, 2, 3, 4, 3.14, 2.34, 'fred', 'bob', '2014-09-01 10:34:23.111', '1944-06-06 06:00:00', true, true), (111, 222, 3333, 444, 13.14, 10239302.34239320, 'fred', 'bob', '2014-09-01 10:34:23.111', '1944-06-06 06:00:00', true, true); -select count(*) from acid_ivot; +select count(*) from acid_ivot_n0; diff --git a/ql/src/test/queries/clientpositive/insertexternal1.q b/ql/src/test/queries/clientpositive/insertexternal1.q index 2e938b085c..6e3a235bc7 100644 --- a/ql/src/test/queries/clientpositive/insertexternal1.q +++ b/ql/src/test/queries/clientpositive/insertexternal1.q @@ -1,15 +1,15 @@ --! qt:dataset:src -create table texternal(key string, val string) partitioned by (insertdate string); +create table texternal_n0(key string, val string) partitioned by (insertdate string); dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/texternal/temp; dfs -rmr ${system:test.tmp.dir}/texternal; dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/texternal/2008-01-01; -alter table texternal add partition (insertdate='2008-01-01') location 'pfile://${system:test.tmp.dir}/texternal/2008-01-01'; -from src insert overwrite table texternal partition (insertdate='2008-01-01') select *; +alter table texternal_n0 add partition (insertdate='2008-01-01') location 'pfile://${system:test.tmp.dir}/texternal/2008-01-01'; +from src insert overwrite table texternal_n0 partition (insertdate='2008-01-01') select *; -select * from texternal where insertdate='2008-01-01'; +select * from texternal_n0 where insertdate='2008-01-01'; dfs -rmr ${system:test.tmp.dir}/texternal; diff --git a/ql/src/test/queries/clientpositive/intersect_all.q b/ql/src/test/queries/clientpositive/intersect_all.q index 35033bef9c..f97a4bcc34 100644 --- a/ql/src/test/queries/clientpositive/intersect_all.q +++ b/ql/src/test/queries/clientpositive/intersect_all.q @@ -2,33 +2,33 @@ set hive.mapred.mode=nonstrict; set hive.cbo.enable=true; -create table a(key int, value int); +create table a_n10(key int, value int); -insert into table a values (1,2),(1,2),(1,3),(2,3); +insert into table a_n10 values (1,2),(1,2),(1,3),(2,3); -create table b(key int, value int); +create table b_n8(key int, value int); -insert into table b values (1,2),(2,3); +insert into table b_n8 values (1,2),(2,3); -select key, value, count(1) as c from a group by key, value; +select key, value, count(1) as c from a_n10 group by key, value; -select * from a intersect all select * from b; +select * from a_n10 intersect all select * from b_n8; -select * from b intersect all select * from a intersect all select * from b; +select * from b_n8 intersect all select * from a_n10 intersect all select * from b_n8; -select * from a intersect all select * from b union all select * from a intersect all select * from b; +select * from a_n10 intersect all select * from b_n8 union all select * from a_n10 intersect all select * from b_n8; -select * from a intersect all select * from b union select * from a intersect all select * from b; +select * from a_n10 intersect all select * from b_n8 union select * from a_n10 intersect all select * from b_n8; -select * from a intersect all select * from b intersect all select * from a intersect all select * from b; +select * from a_n10 intersect all select * from b_n8 intersect all select * from a_n10 intersect all select * from b_n8; -select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +select * from (select a_n10.key, b_n8.value from a_n10 join b_n8 on a_n10.key=b_n8.key)sub1 intersect all -select * from (select a.key, b.value from a join b on a.key=b.key)sub2; +select * from (select a_n10.key, b_n8.value from a_n10 join b_n8 on a_n10.key=b_n8.key)sub2; -select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +select * from (select a_n10.key, b_n8.value from a_n10 join b_n8 on a_n10.key=b_n8.key)sub1 intersect all -select * from (select b.value as key, a.key as value from a join b on a.key=b.key)sub2; +select * from (select b_n8.value as key, a_n10.key as value from a_n10 join b_n8 on a_n10.key=b_n8.key)sub2; explain select * from src intersect all select * from src; @@ -38,6 +38,6 @@ explain select * from src intersect all select * from src intersect all select * select * from src intersect all select * from src intersect all select * from src intersect all select * from src; -explain select value from a group by value intersect all select key from b group by key; +explain select value from a_n10 group by value intersect all select key from b_n8 group by key; -select value from a group by value intersect all select key from b group by key; +select value from a_n10 group by value intersect all select key from b_n8 group by key; diff --git a/ql/src/test/queries/clientpositive/intersect_distinct.q b/ql/src/test/queries/clientpositive/intersect_distinct.q index 78b515d011..221c2f7983 100644 --- a/ql/src/test/queries/clientpositive/intersect_distinct.q +++ b/ql/src/test/queries/clientpositive/intersect_distinct.q @@ -2,33 +2,33 @@ set hive.mapred.mode=nonstrict; set hive.cbo.enable=true; -create table a(key int, value int); +create table a_n17(key int, value int); -insert into table a values (1,2),(1,2),(1,3),(2,3); +insert into table a_n17 values (1,2),(1,2),(1,3),(2,3); -create table b(key int, value int); +create table b_n13(key int, value int); -insert into table b values (1,2),(2,3); +insert into table b_n13 values (1,2),(2,3); -select key, count(1) as c from a group by key intersect all select value, max(key) as c from b group by value; +select key, count(1) as c from a_n17 group by key intersect all select value, max(key) as c from b_n13 group by value; -select * from a intersect distinct select * from b; +select * from a_n17 intersect distinct select * from b_n13; -select * from b intersect distinct select * from a intersect distinct select * from b; +select * from b_n13 intersect distinct select * from a_n17 intersect distinct select * from b_n13; -select * from a intersect distinct select * from b union all select * from a intersect distinct select * from b; +select * from a_n17 intersect distinct select * from b_n13 union all select * from a_n17 intersect distinct select * from b_n13; -select * from a intersect distinct select * from b union select * from a intersect distinct select * from b; +select * from a_n17 intersect distinct select * from b_n13 union select * from a_n17 intersect distinct select * from b_n13; -select * from a intersect distinct select * from b intersect distinct select * from a intersect distinct select * from b; +select * from a_n17 intersect distinct select * from b_n13 intersect distinct select * from a_n17 intersect distinct select * from b_n13; -select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +select * from (select a_n17.key, b_n13.value from a_n17 join b_n13 on a_n17.key=b_n13.key)sub1 intersect distinct -select * from (select a.key, b.value from a join b on a.key=b.key)sub2; +select * from (select a_n17.key, b_n13.value from a_n17 join b_n13 on a_n17.key=b_n13.key)sub2; -select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +select * from (select a_n17.key, b_n13.value from a_n17 join b_n13 on a_n17.key=b_n13.key)sub1 intersect distinct -select * from (select b.value as key, a.key as value from a join b on a.key=b.key)sub2; +select * from (select b_n13.value as key, a_n17.key as value from a_n17 join b_n13 on a_n17.key=b_n13.key)sub2; explain select * from src intersect distinct select * from src; @@ -38,6 +38,6 @@ explain select * from src intersect distinct select * from src intersect distinc select * from src intersect distinct select * from src intersect distinct select * from src intersect distinct select * from src; -explain select value from a group by value intersect distinct select key from b group by key; +explain select value from a_n17 group by value intersect distinct select key from b_n13 group by key; -select value from a group by value intersect distinct select key from b group by key; +select value from a_n17 group by value intersect distinct select key from b_n13 group by key; diff --git a/ql/src/test/queries/clientpositive/intersect_merge.q b/ql/src/test/queries/clientpositive/intersect_merge.q index 0d8789e7c9..7fd12671d9 100644 --- a/ql/src/test/queries/clientpositive/intersect_merge.q +++ b/ql/src/test/queries/clientpositive/intersect_merge.q @@ -1,27 +1,27 @@ set hive.mapred.mode=nonstrict; set hive.cbo.enable=true; -create table a(key int, value int); +create table a_n7(key int, value int); -insert into table a values (1,2),(1,2),(1,3),(2,3); +insert into table a_n7 values (1,2),(1,2),(1,3),(2,3); -create table b(key int, value int); +create table b_n5(key int, value int); -insert into table b values (1,2),(2,3); +insert into table b_n5 values (1,2),(2,3); -explain select * from b intersect distinct select * from a intersect distinct select * from b intersect distinct select * from a intersect distinct select * from b; +explain select * from b_n5 intersect distinct select * from a_n7 intersect distinct select * from b_n5 intersect distinct select * from a_n7 intersect distinct select * from b_n5; -explain (select * from b intersect distinct select * from a) intersect distinct (select * from b intersect distinct select * from a); +explain (select * from b_n5 intersect distinct select * from a_n7) intersect distinct (select * from b_n5 intersect distinct select * from a_n7); -explain select * from b intersect distinct (select * from a intersect distinct (select * from b intersect distinct (select * from a intersect distinct select * from b))); +explain select * from b_n5 intersect distinct (select * from a_n7 intersect distinct (select * from b_n5 intersect distinct (select * from a_n7 intersect distinct select * from b_n5))); -explain (((select * from b intersect distinct select * from a) intersect distinct select * from b) intersect distinct select * from a) intersect distinct select * from b; +explain (((select * from b_n5 intersect distinct select * from a_n7) intersect distinct select * from b_n5) intersect distinct select * from a_n7) intersect distinct select * from b_n5; -explain select * from b intersect distinct (select * from a intersect distinct select * from b) intersect distinct select * from a intersect distinct select * from b; +explain select * from b_n5 intersect distinct (select * from a_n7 intersect distinct select * from b_n5) intersect distinct select * from a_n7 intersect distinct select * from b_n5; -explain select * from b intersect distinct (select * from a intersect all select * from b); +explain select * from b_n5 intersect distinct (select * from a_n7 intersect all select * from b_n5); -explain select * from b intersect all (select * from a intersect all select * from b); +explain select * from b_n5 intersect all (select * from a_n7 intersect all select * from b_n5); -explain select * from b intersect all (select * from a intersect distinct select * from b); +explain select * from b_n5 intersect all (select * from a_n7 intersect distinct select * from b_n5); diff --git a/ql/src/test/queries/clientpositive/interval_alt.q b/ql/src/test/queries/clientpositive/interval_alt.q index 824e5d3ff7..67117804f5 100644 --- a/ql/src/test/queries/clientpositive/interval_alt.q +++ b/ql/src/test/queries/clientpositive/interval_alt.q @@ -21,8 +21,8 @@ select select date '2012-01-01' + 30 days; select date '2012-01-01' - 30 days; -create table t (dt int); -insert into t values (1),(2); +create table t_n18 (dt int); +insert into t_n18 values (1),(2); -- expressions/columnref explain @@ -31,11 +31,11 @@ select date '2012-01-01' - interval (-dt*dt) day, date '2012-01-01' + 1 day + '2' days, date '2012-01-01' + interval (dt || '-1') year to month - from t; + from t_n18; select date '2012-01-01' + interval (-dt*dt) day, date '2012-01-01' - interval (-dt*dt) day, date '2012-01-01' + 1 day + '2' days, date '2012-01-01' + interval (dt || '-1') year to month - from t; + from t_n18; diff --git a/ql/src/test/queries/clientpositive/interval_arithmetic.q b/ql/src/test/queries/clientpositive/interval_arithmetic.q index 7261311f2c..09b3723390 100644 --- a/ql/src/test/queries/clientpositive/interval_arithmetic.q +++ b/ql/src/test/queries/clientpositive/interval_arithmetic.q @@ -1,6 +1,6 @@ --! qt:dataset:alltypesorc -create table interval_arithmetic_1 (dateval date, tsval timestamp); -insert overwrite table interval_arithmetic_1 +create table interval_arithmetic_1_n0 (dateval date, tsval timestamp); +insert overwrite table interval_arithmetic_1_n0 select cast(ctimestamp1 as date), ctimestamp1 from alltypesorc; -- interval year-month arithmetic @@ -13,7 +13,7 @@ select dateval + interval '-2-2' year to month, - interval '2-2' year to month + dateval, interval '2-2' year to month + dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; select @@ -24,7 +24,7 @@ select dateval + interval '-2-2' year to month, - interval '2-2' year to month + dateval, interval '2-2' year to month + dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; explain @@ -33,7 +33,7 @@ select dateval - date '1999-06-07', date '1999-06-07' - dateval, dateval - dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; select @@ -41,7 +41,7 @@ select dateval - date '1999-06-07', date '1999-06-07' - dateval, dateval - dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; explain @@ -53,7 +53,7 @@ select tsval + interval '-2-2' year to month, - interval '2-2' year to month + tsval, interval '2-2' year to month + tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; select @@ -64,20 +64,20 @@ select tsval + interval '-2-2' year to month, - interval '2-2' year to month + tsval, interval '2-2' year to month + tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; explain select interval '2-2' year to month + interval '3-3' year to month, interval '2-2' year to month - interval '3-3' year to month -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; select interval '2-2' year to month + interval '3-3' year to month, interval '2-2' year to month - interval '3-3' year to month -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; @@ -91,7 +91,7 @@ select dateval + interval '-99 11:22:33.123456789' day to second, -interval '99 11:22:33.123456789' day to second + dateval, interval '99 11:22:33.123456789' day to second + dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; select @@ -102,7 +102,7 @@ select dateval + interval '-99 11:22:33.123456789' day to second, -interval '99 11:22:33.123456789' day to second + dateval, interval '99 11:22:33.123456789' day to second + dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; explain @@ -112,7 +112,7 @@ select dateval - tsval, tsval - dateval, tsval - tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; select @@ -121,7 +121,7 @@ select dateval - tsval, tsval - dateval, tsval - tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; explain @@ -133,7 +133,7 @@ select tsval + interval '-99 11:22:33.123456789' day to second, -interval '99 11:22:33.123456789' day to second + tsval, interval '99 11:22:33.123456789' day to second + tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; select @@ -144,23 +144,23 @@ select tsval + interval '-99 11:22:33.123456789' day to second, -interval '99 11:22:33.123456789' day to second + tsval, interval '99 11:22:33.123456789' day to second + tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; explain select interval '99 11:22:33.123456789' day to second + interval '10 9:8:7.123456789' day to second, interval '99 11:22:33.123456789' day to second - interval '10 9:8:7.123456789' day to second -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; select interval '99 11:22:33.123456789' day to second + interval '10 9:8:7.123456789' day to second, interval '99 11:22:33.123456789' day to second - interval '10 9:8:7.123456789' day to second -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2; explain -select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' day + interval '1' hour + interval '1' minute + interval '60' second from interval_arithmetic_1 limit 1; -select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' day + interval '1' hour + interval '1' minute + interval '60' second from interval_arithmetic_1 limit 1; -drop table interval_arithmetic_1; +select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' day + interval '1' hour + interval '1' minute + interval '60' second from interval_arithmetic_1_n0 limit 1; +select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' day + interval '1' hour + interval '1' minute + interval '60' second from interval_arithmetic_1_n0 limit 1; +drop table interval_arithmetic_1_n0; diff --git a/ql/src/test/queries/clientpositive/is_distinct_from.q b/ql/src/test/queries/clientpositive/is_distinct_from.q index f135f4eb8a..cf417a7c24 100644 --- a/ql/src/test/queries/clientpositive/is_distinct_from.q +++ b/ql/src/test/queries/clientpositive/is_distinct_from.q @@ -23,19 +23,19 @@ select 1 is not distinct from 1, null is not distinct from null from part; -create table test(x string, y string); -insert into test values ('q', 'q'), ('q', 'w'), (NULL, 'q'), ('q', NULL), (NULL, NULL); -select *, x is not distinct from y, not (x is not distinct from y), (x is distinct from y) = true from test; +create table test_n5(x string, y string); +insert into test_n5 values ('q', 'q'), ('q', 'w'), (NULL, 'q'), ('q', NULL), (NULL, NULL); +select *, x is not distinct from y, not (x is not distinct from y), (x is distinct from y) = true from test_n5; -select *, x||y is not distinct from y||x, not (x||y||x is not distinct from y||x||x) from test; +select *, x||y is not distinct from y||x, not (x||y||x is not distinct from y||x||x) from test_n5; -- where -explain select * from test where y is distinct from null; -select * from test where y is distinct from null; +explain select * from test_n5 where y is distinct from null; +select * from test_n5 where y is distinct from null; -explain select * from test where y is not distinct from null; -select * from test where y is not distinct from null; -drop table test; +explain select * from test_n5 where y is not distinct from null; +select * from test_n5 where y is not distinct from null; +drop table test_n5; -- where explain select * from part where p_size is distinct from 2; diff --git a/ql/src/test/queries/clientpositive/join1.q b/ql/src/test/queries/clientpositive/join1.q index 25759a8ec3..a5e01238b7 100644 --- a/ql/src/test/queries/clientpositive/join1.q +++ b/ql/src/test/queries/clientpositive/join1.q @@ -6,13 +6,13 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n15(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n15 SELECT src1.key, src2.value; FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n15 SELECT src1.key, src2.value; -SELECT dest_j1.* FROM dest_j1; +SELECT dest_j1_n15.* FROM dest_j1_n15; diff --git a/ql/src/test/queries/clientpositive/join14.q b/ql/src/test/queries/clientpositive/join14.q index d233b4257d..e0f725c674 100644 --- a/ql/src/test/queries/clientpositive/join14.q +++ b/ql/src/test/queries/clientpositive/join14.q @@ -4,7 +4,7 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n164(c1 INT, c2 STRING) STORED AS TEXTFILE; set mapreduce.framework.name=yarn; set mapreduce.jobtracker.address=localhost:58; @@ -13,9 +13,9 @@ set hive.exec.mode.local.auto.input.files.max=6; EXPLAIN FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value; +INSERT OVERWRITE TABLE dest1_n164 SELECT src.key, srcpart.value; FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value; +INSERT OVERWRITE TABLE dest1_n164 SELECT src.key, srcpart.value; -select dest1.* from dest1; +select dest1_n164.* from dest1_n164; diff --git a/ql/src/test/queries/clientpositive/join14_hadoop20.q b/ql/src/test/queries/clientpositive/join14_hadoop20.q index e9107b5d0e..489ad0cba5 100644 --- a/ql/src/test/queries/clientpositive/join14_hadoop20.q +++ b/ql/src/test/queries/clientpositive/join14_hadoop20.q @@ -2,16 +2,16 @@ --! qt:dataset:src -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n49(c1 INT, c2 STRING) STORED AS TEXTFILE; set mapred.job.tracker=localhost:58; set hive.exec.mode.local.auto=true; EXPLAIN FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value; +INSERT OVERWRITE TABLE dest1_n49 SELECT src.key, srcpart.value; FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value; +INSERT OVERWRITE TABLE dest1_n49 SELECT src.key, srcpart.value; -select dest1.* from dest1; +select dest1_n49.* from dest1_n49; diff --git a/ql/src/test/queries/clientpositive/join17.q b/ql/src/test/queries/clientpositive/join17.q index a62b004e6d..f62fa54599 100644 --- a/ql/src/test/queries/clientpositive/join17.q +++ b/ql/src/test/queries/clientpositive/join17.q @@ -3,13 +3,13 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n121(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE; EXPLAIN EXTENDED FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*; +INSERT OVERWRITE TABLE dest1_n121 SELECT src1.*, src2.*; FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*; +INSERT OVERWRITE TABLE dest1_n121 SELECT src1.*, src2.*; -SELECT dest1.* FROM dest1; +SELECT dest1_n121.* FROM dest1_n121; diff --git a/ql/src/test/queries/clientpositive/join2.q b/ql/src/test/queries/clientpositive/join2.q index 074255bb63..f1416decac 100644 --- a/ql/src/test/queries/clientpositive/join2.q +++ b/ql/src/test/queries/clientpositive/join2.q @@ -5,13 +5,13 @@ set hive.stats.column.autogather=false; set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j2_n2(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) -INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value; +INSERT OVERWRITE TABLE dest_j2_n2 SELECT src1.key, src3.value; FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) -INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value; +INSERT OVERWRITE TABLE dest_j2_n2 SELECT src1.key, src3.value; -SELECT dest_j2.* FROM dest_j2; +SELECT dest_j2_n2.* FROM dest_j2_n2; diff --git a/ql/src/test/queries/clientpositive/join25.q b/ql/src/test/queries/clientpositive/join25.q index 18eecf552c..3b888ad0aa 100644 --- a/ql/src/test/queries/clientpositive/join25.q +++ b/ql/src/test/queries/clientpositive/join25.q @@ -2,18 +2,18 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n18(key INT, value STRING, val2 STRING) STORED AS TEXTFILE; set hive.auto.convert.join=true; EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n18 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.key = y.key); -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n18 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.key = y.key); -select * from dest_j1 x; +select * from dest_j1_n18 x; diff --git a/ql/src/test/queries/clientpositive/join26.q b/ql/src/test/queries/clientpositive/join26.q index bc1e7afec6..d2bb97bc03 100644 --- a/ql/src/test/queries/clientpositive/join26.q +++ b/ql/src/test/queries/clientpositive/join26.q @@ -3,21 +3,21 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n10(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; set hive.auto.convert.join=true; EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n10 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key and z.ds='2008-04-08' and z.hr=11); -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n10 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key and z.ds='2008-04-08' and z.hr=11); -select * from dest_j1 x; +select * from dest_j1_n10 x; diff --git a/ql/src/test/queries/clientpositive/join27.q b/ql/src/test/queries/clientpositive/join27.q index fb2d89ce77..e1c256046e 100644 --- a/ql/src/test/queries/clientpositive/join27.q +++ b/ql/src/test/queries/clientpositive/join27.q @@ -2,15 +2,15 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n2(key INT, value STRING, val2 STRING) STORED AS TEXTFILE; set hive.auto.convert.join=true; EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n2 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.value = y.value); -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n2 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.value = y.value); -select * from dest_j1; +select * from dest_j1_n2; diff --git a/ql/src/test/queries/clientpositive/join28.q b/ql/src/test/queries/clientpositive/join28.q index 456e21f3ab..868ce9debd 100644 --- a/ql/src/test/queries/clientpositive/join28.q +++ b/ql/src/test/queries/clientpositive/join28.q @@ -4,7 +4,7 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n11(key STRING, value STRING) STORED AS TEXTFILE; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; @@ -13,21 +13,21 @@ set hive.auto.convert.join.noconditionaltask.size=10000; -- Since the inputs are small, it should be automatically converted to mapjoin EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n11 SELECT subq.key1, z.value FROM (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 FROM src1 x JOIN src y ON (x.key = y.key)) subq JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11); -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n11 SELECT subq.key1, z.value FROM (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 FROM src1 x JOIN src y ON (x.key = y.key)) subq JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11); -select * from dest_j1; +select * from dest_j1_n11; diff --git a/ql/src/test/queries/clientpositive/join29.q b/ql/src/test/queries/clientpositive/join29.q index d86eb03f29..20aadbbc26 100644 --- a/ql/src/test/queries/clientpositive/join29.q +++ b/ql/src/test/queries/clientpositive/join29.q @@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key STRING, cnt1 INT, cnt2 INT); +CREATE TABLE dest_j1_n6(key STRING, cnt1 INT, cnt2 INT); set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; @@ -12,14 +12,14 @@ set hive.auto.convert.join.noconditionaltask.size=10000; -- Since the inputs are small, it should be automatically converted to mapjoin EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n6 SELECT subq1.key, subq1.cnt, subq2.cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key); -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n6 SELECT subq1.key, subq1.cnt, subq2.cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key); -select * from dest_j1; +select * from dest_j1_n6; diff --git a/ql/src/test/queries/clientpositive/join3.q b/ql/src/test/queries/clientpositive/join3.q index da7cfa1187..ba4b21cbb4 100644 --- a/ql/src/test/queries/clientpositive/join3.q +++ b/ql/src/test/queries/clientpositive/join3.q @@ -3,13 +3,13 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n46(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value; +INSERT OVERWRITE TABLE dest1_n46 SELECT src1.key, src3.value; FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value; +INSERT OVERWRITE TABLE dest1_n46 SELECT src1.key, src3.value; -SELECT dest1.* FROM dest1; +SELECT dest1_n46.* FROM dest1_n46; diff --git a/ql/src/test/queries/clientpositive/join30.q b/ql/src/test/queries/clientpositive/join30.q index ec2bae6bbc..9c0ecafc11 100644 --- a/ql/src/test/queries/clientpositive/join30.q +++ b/ql/src/test/queries/clientpositive/join30.q @@ -2,13 +2,13 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key INT, cnt INT); +CREATE TABLE dest_j1_n0(key INT, cnt INT); set hive.auto.convert.join=true; EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n0 SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key; -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n0 SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key; -select * from dest_j1; +select * from dest_j1_n0; diff --git a/ql/src/test/queries/clientpositive/join31.q b/ql/src/test/queries/clientpositive/join31.q index 4fbf204c69..208340713c 100644 --- a/ql/src/test/queries/clientpositive/join31.q +++ b/ql/src/test/queries/clientpositive/join31.q @@ -4,7 +4,7 @@ set hive.mapred.mode=nonstrict; set hive.optimize.semijoin.conversion=true; -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key STRING, cnt INT); +CREATE TABLE dest_j1_n22(key STRING, cnt INT); set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; @@ -13,16 +13,16 @@ set hive.auto.convert.join.noconditionaltask.size=10000; -- Since the inputs are small, it should be automatically converted to mapjoin EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n22 SELECT subq1.key, count(1) as cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key) group by subq1.key; -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n22 SELECT subq1.key, count(1) as cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key) group by subq1.key; -select * from dest_j1; +select * from dest_j1_n22; diff --git a/ql/src/test/queries/clientpositive/join32.q b/ql/src/test/queries/clientpositive/join32.q index 9d3d64545f..c55e730ac1 100644 --- a/ql/src/test/queries/clientpositive/join32.q +++ b/ql/src/test/queries/clientpositive/join32.q @@ -4,7 +4,7 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n12(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; @@ -13,17 +13,17 @@ set hive.auto.convert.join.noconditionaltask.size=10000; -- Since the inputs are small, it should be automatically converted to mapjoin EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n12 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11); -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n12 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11); -select * from dest_j1; +select * from dest_j1_n12; diff --git a/ql/src/test/queries/clientpositive/join32_lessSize.q b/ql/src/test/queries/clientpositive/join32_lessSize.q index 6114300dbf..229ba56f4f 100644 --- a/ql/src/test/queries/clientpositive/join32_lessSize.q +++ b/ql/src/test/queries/clientpositive/join32_lessSize.q @@ -4,8 +4,8 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; -CREATE TABLE dest_j2(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n21(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j2_n1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; @@ -14,81 +14,81 @@ set hive.auto.convert.join.noconditionaltask.size=6000; -- Since the inputs are small, it should be automatically converted to mapjoin EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n21 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11); -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n21 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11); -select * from dest_j1; +select * from dest_j1_n21; EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n21 SELECT x.key, z.value, y.value FROM src w JOIN src1 x ON (x.value = w.value) JOIN src y ON (x.key = y.key) JOIN src1 z ON (x.key = z.key); -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n21 SELECT x.key, z.value, y.value FROM src w JOIN src1 x ON (x.value = w.value) JOIN src y ON (x.key = y.key) JOIN src1 z ON (x.key = z.key); -select * from dest_j1; +select * from dest_j1_n21; EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j2 +INSERT OVERWRITE TABLE dest_j2_n1 SELECT res.key, z.value, res.value FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11); -INSERT OVERWRITE TABLE dest_j2 +INSERT OVERWRITE TABLE dest_j2_n1 SELECT res.key, z.value, res.value FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11); -select * from dest_j2; +select * from dest_j2_n1; EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j2 +INSERT OVERWRITE TABLE dest_j2_n1 SELECT res.key, z.value, res.value FROM (select x.key, x.value from src1 x LEFT OUTER JOIN src y ON (x.key = y.key)) res JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11); -INSERT OVERWRITE TABLE dest_j2 +INSERT OVERWRITE TABLE dest_j2_n1 SELECT res.key, z.value, res.value FROM (select x.key, x.value from src1 x LEFT OUTER JOIN src y ON (x.key = y.key)) res JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11); -select * from dest_j2; +select * from dest_j2_n1; EXPLAIN -INSERT OVERWRITE TABLE dest_j2 +INSERT OVERWRITE TABLE dest_j2_n1 SELECT res.key, x.value, res.value FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res JOIN srcpart x ON (res.value = x.value and x.ds='2008-04-08' and x.hr=11); -INSERT OVERWRITE TABLE dest_j2 +INSERT OVERWRITE TABLE dest_j2_n1 SELECT res.key, x.value, res.value FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res JOIN srcpart x ON (res.value = x.value and x.ds='2008-04-08' and x.hr=11); -select * from dest_j2; +select * from dest_j2_n1; EXPLAIN -INSERT OVERWRITE TABLE dest_j2 +INSERT OVERWRITE TABLE dest_j2_n1 SELECT res.key, y.value, res.value FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res JOIN srcpart y ON (res.value = y.value and y.ds='2008-04-08' and y.hr=11); -INSERT OVERWRITE TABLE dest_j2 +INSERT OVERWRITE TABLE dest_j2_n1 SELECT res.key, y.value, res.value FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res JOIN srcpart y ON (res.value = y.value and y.ds='2008-04-08' and y.hr=11); -select * from dest_j2; +select * from dest_j2_n1; diff --git a/ql/src/test/queries/clientpositive/join33.q b/ql/src/test/queries/clientpositive/join33.q index 9d3d64545f..15275755ba 100644 --- a/ql/src/test/queries/clientpositive/join33.q +++ b/ql/src/test/queries/clientpositive/join33.q @@ -4,7 +4,7 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n7(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; @@ -13,17 +13,17 @@ set hive.auto.convert.join.noconditionaltask.size=10000; -- Since the inputs are small, it should be automatically converted to mapjoin EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n7 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11); -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n7 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11); -select * from dest_j1; +select * from dest_j1_n7; diff --git a/ql/src/test/queries/clientpositive/join34.q b/ql/src/test/queries/clientpositive/join34.q index 5077c19e3c..e0234c61d5 100644 --- a/ql/src/test/queries/clientpositive/join34.q +++ b/ql/src/test/queries/clientpositive/join34.q @@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; @@ -12,7 +12,7 @@ set hive.auto.convert.join.noconditionaltask.size=10000; -- Since the inputs are small, it should be automatically converted to mapjoin EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n1 SELECT x.key, x.value, subq1.value FROM ( SELECT x.key as key, x.value as value from src x where x.key < 20 @@ -21,7 +21,7 @@ FROM ) subq1 JOIN src1 x ON (x.key = subq1.key); -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n1 SELECT x.key, x.value, subq1.value FROM ( SELECT x.key as key, x.value as value from src x where x.key < 20 @@ -30,7 +30,7 @@ FROM ) subq1 JOIN src1 x ON (x.key = subq1.key); -select * from dest_j1; +select * from dest_j1_n1; diff --git a/ql/src/test/queries/clientpositive/join35.q b/ql/src/test/queries/clientpositive/join35.q index be3703e27b..29b6b000c6 100644 --- a/ql/src/test/queries/clientpositive/join35.q +++ b/ql/src/test/queries/clientpositive/join35.q @@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key STRING, value STRING, val2 INT) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n24(key STRING, value STRING, val2 INT) STORED AS TEXTFILE; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; @@ -12,7 +12,7 @@ set hive.auto.convert.join.noconditionaltask.size=10000; -- Since the inputs are small, it should be automatically converted to mapjoin EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n24 SELECT x.key, x.value, subq1.cnt FROM ( SELECT x.key as key, count(1) as cnt from src x where x.key < 20 group by x.key @@ -21,7 +21,7 @@ FROM ) subq1 JOIN src1 x ON (x.key = subq1.key); -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n24 SELECT x.key, x.value, subq1.cnt FROM ( SELECT x.key as key, count(1) as cnt from src x where x.key < 20 group by x.key @@ -30,7 +30,7 @@ FROM ) subq1 JOIN src1 x ON (x.key = subq1.key); -select * from dest_j1; +select * from dest_j1_n24; diff --git a/ql/src/test/queries/clientpositive/join36.q b/ql/src/test/queries/clientpositive/join36.q index 20005dd87f..a2aaa50f5e 100644 --- a/ql/src/test/queries/clientpositive/join36.q +++ b/ql/src/test/queries/clientpositive/join36.q @@ -1,26 +1,26 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -CREATE TABLE tmp1(key INT, cnt INT); -CREATE TABLE tmp2(key INT, cnt INT); -CREATE TABLE dest_j1(key INT, value INT, val2 INT); +CREATE TABLE tmp1_n0(key INT, cnt INT); +CREATE TABLE tmp2_n0(key INT, cnt INT); +CREATE TABLE dest_j1_n13(key INT, value INT, val2 INT); -INSERT OVERWRITE TABLE tmp1 +INSERT OVERWRITE TABLE tmp1_n0 SELECT key, count(1) from src group by key; -INSERT OVERWRITE TABLE tmp2 +INSERT OVERWRITE TABLE tmp2_n0 SELECT key, count(1) from src group by key; set hive.auto.convert.join=true; EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n13 SELECT /*+ MAPJOIN(x) */ x.key, x.cnt, y.cnt -FROM tmp1 x JOIN tmp2 y ON (x.key = y.key); +FROM tmp1_n0 x JOIN tmp2_n0 y ON (x.key = y.key); -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n13 SELECT /*+ MAPJOIN(x) */ x.key, x.cnt, y.cnt -FROM tmp1 x JOIN tmp2 y ON (x.key = y.key); +FROM tmp1_n0 x JOIN tmp2_n0 y ON (x.key = y.key); -select * from dest_j1; +select * from dest_j1_n13; diff --git a/ql/src/test/queries/clientpositive/join37.q b/ql/src/test/queries/clientpositive/join37.q index 3a19dd2e60..ad17c25cf0 100644 --- a/ql/src/test/queries/clientpositive/join37.q +++ b/ql/src/test/queries/clientpositive/join37.q @@ -2,19 +2,19 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n9(key INT, value STRING, val2 STRING) STORED AS TEXTFILE; set hive.auto.convert.join=true; EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n9 SELECT /*+ MAPJOIN(X) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.key = y.key); -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n9 SELECT /*+ MAPJOIN(X) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.key = y.key); -select * from dest_j1; +select * from dest_j1_n9; diff --git a/ql/src/test/queries/clientpositive/join38.q b/ql/src/test/queries/clientpositive/join38.q index b9f723c8e1..a1dbaa78d8 100644 --- a/ql/src/test/queries/clientpositive/join38.q +++ b/ql/src/test/queries/clientpositive/join38.q @@ -1,20 +1,20 @@ --! qt:dataset:src -create table tmp(col0 string, col1 string,col2 string,col3 string,col4 string,col5 string,col6 string,col7 string,col8 string,col9 string,col10 string,col11 string); +create table tmp_n1(col0 string, col1 string,col2 string,col3 string,col4 string,col5 string,col6 string,col7 string,col8 string,col9 string,col10 string,col11 string); -insert overwrite table tmp select key, cast(key + 1 as int), key +2, key+3, key+4, cast(key+5 as int), key+6, key+7, key+8, key+9, key+10, cast(key+11 as int) from src where key = 100; +insert overwrite table tmp_n1 select key, cast(key + 1 as int), key +2, key+3, key+4, cast(key+5 as int), key+6, key+7, key+8, key+9, key+10, cast(key+11 as int) from src where key = 100; -select * from tmp; +select * from tmp_n1; set hive.auto.convert.join=true; explain -FROM src a JOIN tmp b ON (a.key = b.col11) +FROM src a JOIN tmp_n1 b ON (a.key = b.col11) SELECT /*+ MAPJOIN(a) */ a.value, b.col5, count(1) as count where b.col11 = 111 group by a.value, b.col5; -FROM src a JOIN tmp b ON (a.key = b.col11) +FROM src a JOIN tmp_n1 b ON (a.key = b.col11) SELECT /*+ MAPJOIN(a) */ a.value, b.col5, count(1) as count where b.col11 = 111 group by a.value, b.col5; diff --git a/ql/src/test/queries/clientpositive/join39.q b/ql/src/test/queries/clientpositive/join39.q index b0358e9ed2..77832ab5f8 100644 --- a/ql/src/test/queries/clientpositive/join39.q +++ b/ql/src/test/queries/clientpositive/join39.q @@ -1,20 +1,20 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key STRING, value STRING, key1 string, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n8(key STRING, value STRING, key1 string, val2 STRING) STORED AS TEXTFILE; set hive.auto.convert.join=true; explain -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n8 SELECT /*+ MAPJOIN(y) */ x.key, x.value, y.key, y.value FROM src x left outer JOIN (select * from src where key <= 100) y ON (x.key = y.key); -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n8 SELECT /*+ MAPJOIN(y) */ x.key, x.value, y.key, y.value FROM src x left outer JOIN (select * from src where key <= 100) y ON (x.key = y.key); -select * from dest_j1; +select * from dest_j1_n8; diff --git a/ql/src/test/queries/clientpositive/join4.q b/ql/src/test/queries/clientpositive/join4.q index 055ac3dd3d..a501544630 100644 --- a/ql/src/test/queries/clientpositive/join4.q +++ b/ql/src/test/queries/clientpositive/join4.q @@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n72(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; EXPLAIN FROM ( @@ -18,7 +18,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; +INSERT OVERWRITE TABLE dest1_n72 SELECT c.c1, c.c2, c.c3, c.c4; FROM ( FROM @@ -32,6 +32,6 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; +INSERT OVERWRITE TABLE dest1_n72 SELECT c.c1, c.c2, c.c3, c.c4; -SELECT dest1.* FROM dest1; +SELECT dest1_n72.* FROM dest1_n72; diff --git a/ql/src/test/queries/clientpositive/join42.q b/ql/src/test/queries/clientpositive/join42.q index a48c127f42..dc0f927359 100644 --- a/ql/src/test/queries/clientpositive/join42.q +++ b/ql/src/test/queries/clientpositive/join42.q @@ -1,8 +1,8 @@ set hive.mapred.mode=nonstrict; create table L as select 4436 id; -create table LA as select 4436 loan_id, 4748 aid, 4415 pi_id; +create table LA_n11 as select 4436 loan_id, 4748 aid, 4415 pi_id; create table FR as select 4436 loan_id; -create table A as select 4748 id; +create table A_n11 as select 4748 id; create table PI as select 4415 id; create table acct as select 4748 aid, 10 acc_n, 122 brn; @@ -14,11 +14,11 @@ explain select acct.ACC_N, acct.brn FROM L -JOIN LA ON L.id = LA.loan_id +JOIN LA_n11 ON L.id = LA_n11.loan_id JOIN FR ON L.id = FR.loan_id -JOIN A ON LA.aid = A.id -JOIN PI ON PI.id = LA.pi_id -JOIN acct ON A.id = acct.aid +JOIN A_n11 ON LA_n11.aid = A_n11.id +JOIN PI ON PI.id = LA_n11.pi_id +JOIN acct ON A_n11.id = acct.aid WHERE L.id = 4436 and acct.brn is not null; @@ -27,11 +27,11 @@ select acct.ACC_N, acct.brn FROM L -JOIN LA ON L.id = LA.loan_id +JOIN LA_n11 ON L.id = LA_n11.loan_id JOIN FR ON L.id = FR.loan_id -JOIN A ON LA.aid = A.id -JOIN PI ON PI.id = LA.pi_id -JOIN acct ON A.id = acct.aid +JOIN A_n11 ON LA_n11.aid = A_n11.id +JOIN PI ON PI.id = LA_n11.pi_id +JOIN acct ON A_n11.id = acct.aid WHERE L.id = 4436 and acct.brn is not null; diff --git a/ql/src/test/queries/clientpositive/join44.q b/ql/src/test/queries/clientpositive/join44.q index 69aa088264..dc16df56ab 100644 --- a/ql/src/test/queries/clientpositive/join44.q +++ b/ql/src/test/queries/clientpositive/join44.q @@ -3,11 +3,11 @@ set hive.cbo.enable=false; -- SORT_QUERY_RESULTS -CREATE TABLE mytable(val1 INT, val2 INT, val3 INT); +CREATE TABLE mytable_n1(val1 INT, val2 INT, val3 INT); EXPLAIN SELECT * -FROM mytable src1, mytable src2 +FROM mytable_n1 src1, mytable_n1 src2 WHERE src1.val1=src2.val1 AND src1.val2 between 2450816 and 2451500 AND src2.val2 between 2450816 and 2451500; diff --git a/ql/src/test/queries/clientpositive/join46.q b/ql/src/test/queries/clientpositive/join46.q index a661c0fb28..f40acd489a 100644 --- a/ql/src/test/queries/clientpositive/join46.q +++ b/ql/src/test/queries/clientpositive/join46.q @@ -1,275 +1,275 @@ set hive.strict.checks.cartesian.product=false; set hive.join.emit.interval=2; -CREATE TABLE test1 (key INT, value INT, col_1 STRING); -INSERT INTO test1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), +CREATE TABLE test1_n2 (key INT, value INT, col_1 STRING); +INSERT INTO test1_n2 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car'); -CREATE TABLE test2 (key INT, value INT, col_2 STRING); -INSERT INTO test2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), +CREATE TABLE test2_n0 (key INT, value INT, col_2 STRING); +INSERT INTO test2_n0 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), (104, 3, 'Fli'), (105, NULL, 'None'); -- Basic outer join EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value); -- Conjunction with pred on multiple inputs and single inputs (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND test1_n2.key between 100 and 102 + AND test2_n0.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND test1_n2.key between 100 and 102 + AND test2_n0.key between 100 and 102); -- Conjunction with pred on single inputs (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.key between 100 and 102 + AND test2_n0.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.key between 100 and 102 + AND test2_n0.key between 100 and 102); -- Conjunction with pred on multiple inputs and none (left outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true); +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value AND true); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true); +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value AND true); -- Condition on one input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.key between 100 and 102); -- Disjunction with pred on multiple inputs and single inputs (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102); -- Disjunction with pred on multiple inputs and left input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102); -- Disjunction with pred on multiple inputs and right input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102); -- Keys plus residual (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)); -- Complex condition, projecting columns EXPLAIN SELECT col_1, col_2 -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key=test2.key); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key=test2_n0.key); SELECT col_1, col_2 -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key=test2.key); +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key=test2_n0.key); -- Disjunction with pred on multiple inputs and single inputs (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102); -- Disjunction with pred on multiple inputs and left input (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102); -- Disjunction with pred on multiple inputs and right input (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102); -- Keys plus residual (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)); -- Disjunction with pred on multiple inputs and single inputs (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102); -- Disjunction with pred on multiple inputs and left input (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102); -- Disjunction with pred on multiple inputs and right input (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102); -- Keys plus residual (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)); -- Mixed ( FOJ (ROJ, LOJ) ) EXPLAIN SELECT * FROM ( - SELECT test1.key AS key1, test1.value AS value1, test1.col_1 AS col_1, - test2.key AS key2, test2.value AS value2, test2.col_2 AS col_2 - FROM test1 RIGHT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n2.key AS key1, test1_n2.value AS value1, test1_n2.col_1 AS col_1, + test2_n0.key AS key2, test2_n0.value AS value2, test2_n0.col_2 AS col_2 + FROM test1_n2 RIGHT OUTER JOIN test2_n0 + ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) ) sq1 FULL OUTER JOIN ( - SELECT test1.key AS key3, test1.value AS value3, test1.col_1 AS col_3, - test2.key AS key4, test2.value AS value4, test2.col_2 AS col_4 - FROM test1 LEFT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n2.key AS key3, test1_n2.value AS value3, test1_n2.col_1 AS col_3, + test2_n0.key AS key4, test2_n0.value AS value4, test2_n0.col_2 AS col_4 + FROM test1_n2 LEFT OUTER JOIN test2_n0 + ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) ) sq2 ON (sq1.value1 is null or sq2.value4 is null and sq2.value3 != sq1.value2); SELECT * FROM ( - SELECT test1.key AS key1, test1.value AS value1, test1.col_1 AS col_1, - test2.key AS key2, test2.value AS value2, test2.col_2 AS col_2 - FROM test1 RIGHT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n2.key AS key1, test1_n2.value AS value1, test1_n2.col_1 AS col_1, + test2_n0.key AS key2, test2_n0.value AS value2, test2_n0.col_2 AS col_2 + FROM test1_n2 RIGHT OUTER JOIN test2_n0 + ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) ) sq1 FULL OUTER JOIN ( - SELECT test1.key AS key3, test1.value AS value3, test1.col_1 AS col_3, - test2.key AS key4, test2.value AS value4, test2.col_2 AS col_4 - FROM test1 LEFT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n2.key AS key3, test1_n2.value AS value3, test1_n2.col_1 AS col_3, + test2_n0.key AS key4, test2_n0.value AS value4, test2_n0.col_2 AS col_4 + FROM test1_n2 LEFT OUTER JOIN test2_n0 + ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) ) sq2 ON (sq1.value1 is null or sq2.value4 is null and sq2.value3 != sq1.value2); diff --git a/ql/src/test/queries/clientpositive/join5.q b/ql/src/test/queries/clientpositive/join5.q index 0cb12d5d1a..f7cf954ad4 100644 --- a/ql/src/test/queries/clientpositive/join5.q +++ b/ql/src/test/queries/clientpositive/join5.q @@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n126(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; EXPLAIN FROM ( @@ -18,7 +18,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; +INSERT OVERWRITE TABLE dest1_n126 SELECT c.c1, c.c2, c.c3, c.c4; FROM ( FROM @@ -32,6 +32,6 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; +INSERT OVERWRITE TABLE dest1_n126 SELECT c.c1, c.c2, c.c3, c.c4; -SELECT dest1.* FROM dest1; +SELECT dest1_n126.* FROM dest1_n126; diff --git a/ql/src/test/queries/clientpositive/join6.q b/ql/src/test/queries/clientpositive/join6.q index 4fe1885890..6b3095f6eb 100644 --- a/ql/src/test/queries/clientpositive/join6.q +++ b/ql/src/test/queries/clientpositive/join6.q @@ -1,7 +1,7 @@ --! qt:dataset:src1 --! qt:dataset:src set hive.mapred.mode=nonstrict; -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n156(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; -- SORT_QUERY_RESULTS @@ -18,7 +18,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; +INSERT OVERWRITE TABLE dest1_n156 SELECT c.c1, c.c2, c.c3, c.c4; FROM ( FROM @@ -32,7 +32,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4; +INSERT OVERWRITE TABLE dest1_n156 SELECT c.c1, c.c2, c.c3, c.c4; -SELECT dest1.* FROM dest1; +SELECT dest1_n156.* FROM dest1_n156; diff --git a/ql/src/test/queries/clientpositive/join7.q b/ql/src/test/queries/clientpositive/join7.q index 104770709d..d3a22a672f 100644 --- a/ql/src/test/queries/clientpositive/join7.q +++ b/ql/src/test/queries/clientpositive/join7.q @@ -1,7 +1,7 @@ --! qt:dataset:src1 --! qt:dataset:src set hive.mapred.mode=nonstrict; -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n17(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE; -- SORT_QUERY_RESULTS @@ -23,7 +23,7 @@ FROM ( ON (a.c1 = c.c5) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6; +INSERT OVERWRITE TABLE dest1_n17 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6; FROM ( FROM @@ -42,6 +42,6 @@ FROM ( ON (a.c1 = c.c5) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6; +INSERT OVERWRITE TABLE dest1_n17 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6; -SELECT dest1.* FROM dest1; +SELECT dest1_n17.* FROM dest1_n17; diff --git a/ql/src/test/queries/clientpositive/join8.q b/ql/src/test/queries/clientpositive/join8.q index 292c07c36a..3561bb6022 100644 --- a/ql/src/test/queries/clientpositive/join8.q +++ b/ql/src/test/queries/clientpositive/join8.q @@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n173(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE; EXPLAIN FROM ( @@ -18,7 +18,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL; +INSERT OVERWRITE TABLE dest1_n173 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL; FROM ( FROM @@ -32,6 +32,6 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL; +INSERT OVERWRITE TABLE dest1_n173 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL; -SELECT dest1.* FROM dest1; +SELECT dest1_n173.* FROM dest1_n173; diff --git a/ql/src/test/queries/clientpositive/join9.q b/ql/src/test/queries/clientpositive/join9.q index 0c17b04bc1..670f2fd375 100644 --- a/ql/src/test/queries/clientpositive/join9.q +++ b/ql/src/test/queries/clientpositive/join9.q @@ -4,13 +4,13 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n39(key INT, value STRING) STORED AS TEXTFILE; EXPLAIN EXTENDED FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12'; +INSERT OVERWRITE TABLE dest1_n39 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12'; FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12'; +INSERT OVERWRITE TABLE dest1_n39 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12'; -SELECT dest1.* FROM dest1; +SELECT dest1_n39.* FROM dest1_n39; diff --git a/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual1.q b/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual1.q index 2be8dcc846..174dc3656b 100644 --- a/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual1.q +++ b/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual1.q @@ -1,6 +1,6 @@ --! qt:dataset:part set hive.mapred.mode=nonstrict; -create table part2( +create table part2_n0( p2_partkey INT, p2_name STRING, p2_mfgr STRING, @@ -12,7 +12,7 @@ create table part2( p2_comment STRING ); -create table part3( +create table part3_n0( p3_partkey INT, p3_name STRING, p3_mfgr STRING, @@ -25,13 +25,13 @@ create table part3( ); explain select * -from part p1 join part2 p2 join part3 p3 on p1.p_name = p2_name and p2_name = p3_name; +from part p1 join part2_n0 p2 join part3_n0 p3 on p1.p_name = p2_name and p2_name = p3_name; explain select * -from part p1 join part2 p2 join part3 p3 on p2_name = p1.p_name and p3_name = p2_name; +from part p1 join part2_n0 p2 join part3_n0 p3 on p2_name = p1.p_name and p3_name = p2_name; explain select * -from part p1 join part2 p2 join part3 p3 on p2_partkey + p_partkey = p1.p_partkey and p3_name = p2_name; +from part p1 join part2_n0 p2 join part3_n0 p3 on p2_partkey + p_partkey = p1.p_partkey and p3_name = p2_name; explain select * -from part p1 join part2 p2 join part3 p3 on p2_partkey = 1 and p3_name = p2_name; +from part p1 join part2_n0 p2 join part3_n0 p3 on p2_partkey = 1 and p3_name = p2_name; diff --git a/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q b/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q index 5f0c985651..b18745e36b 100644 --- a/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q +++ b/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q @@ -1,6 +1,6 @@ --! qt:dataset:part set hive.mapred.mode=nonstrict; -create table part2( +create table part2_n5( p2_partkey INT, p2_name STRING, p2_mfgr STRING, @@ -12,7 +12,7 @@ create table part2( p2_comment STRING ); -create table part3( +create table part3_n2( p3_partkey INT, p3_name STRING, p3_mfgr STRING, @@ -25,17 +25,17 @@ create table part3( ); explain select * -from part p1 join part2 p2 join part3 p3 +from part p1 join part2_n5 p2 join part3_n2 p3 where p1.p_name = p2_name and p2_name = p3_name; explain select * -from part p1 join part2 p2 join part3 p3 +from part p1 join part2_n5 p2 join part3_n2 p3 where p2_name = p1.p_name and p3_name = p2_name; explain select * -from part p1 join part2 p2 join part3 p3 +from part p1 join part2_n5 p2 join part3_n2 p3 where p2_partkey + p1.p_partkey = p1.p_partkey and p3_name = p2_name; explain select * -from part p1 join part2 p2 join part3 p3 +from part p1 join part2_n5 p2 join part3_n2 p3 where p2_partkey = 1 and p3_name = p2_name; diff --git a/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q b/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q index 9bbecca4b5..320ebfb7e8 100644 --- a/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q +++ b/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q @@ -1,6 +1,6 @@ --! qt:dataset:part set hive.mapred.mode=nonstrict; -create table part2( +create table part2_n4( p2_partkey INT, p2_name STRING, p2_mfgr STRING, @@ -12,7 +12,7 @@ create table part2( p2_comment STRING ); -create table part3( +create table part3_n1( p3_partkey INT, p3_name STRING, p3_mfgr STRING, @@ -25,10 +25,10 @@ create table part3( ); explain select * -from part p1 join part2 p2 join part3 p3 on p1.p_name = p2_name join part p4 +from part p1 join part2_n4 p2 join part3_n1 p3 on p1.p_name = p2_name join part p4 where p2_name = p3_name and p1.p_name = p4.p_name; explain select * -from part p1 join part2 p2 join part3 p3 on p2_name = p1.p_name join part p4 +from part p1 join part2_n4 p2 join part3_n1 p3 on p2_name = p1.p_name join part p4 where p2_name = p3_name and p1.p_partkey = p4.p_partkey and p1.p_partkey = p2_partkey; diff --git a/ql/src/test/queries/clientpositive/join_emit_interval.q b/ql/src/test/queries/clientpositive/join_emit_interval.q index c59d97dfc0..1ebbff1607 100644 --- a/ql/src/test/queries/clientpositive/join_emit_interval.q +++ b/ql/src/test/queries/clientpositive/join_emit_interval.q @@ -1,31 +1,31 @@ set hive.strict.checks.cartesian.product=false; set hive.join.emit.interval=1; -CREATE TABLE test1 (key INT, value INT, col_1 STRING); -INSERT INTO test1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), +CREATE TABLE test1_n7 (key INT, value INT, col_1 STRING); +INSERT INTO test1_n7 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car'); -CREATE TABLE test2 (key INT, value INT, col_2 STRING); -INSERT INTO test2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), +CREATE TABLE test2_n4 (key INT, value INT, col_2 STRING); +INSERT INTO test2_n4 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), (104, 3, 'Fli'), (105, NULL, 'None'); -- Equi-condition and condition on one input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value AND test1.key between 100 and 102); +FROM test1_n7 LEFT OUTER JOIN test2_n4 +ON (test1_n7.value=test2_n4.value AND test1_n7.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value AND test1.key between 100 and 102); +FROM test1_n7 LEFT OUTER JOIN test2_n4 +ON (test1_n7.value=test2_n4.value AND test1_n7.key between 100 and 102); -- Condition on one input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102); +FROM test1_n7 LEFT OUTER JOIN test2_n4 +ON (test1_n7.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102); +FROM test1_n7 LEFT OUTER JOIN test2_n4 +ON (test1_n7.key between 100 and 102); diff --git a/ql/src/test/queries/clientpositive/join_filters.q b/ql/src/test/queries/clientpositive/join_filters.q index 0113c40130..3469dfca73 100644 --- a/ql/src/test/queries/clientpositive/join_filters.q +++ b/ql/src/test/queries/clientpositive/join_filters.q @@ -1,156 +1,156 @@ set hive.mapred.mode=nonstrict; -- SORT_AND_HASH_QUERY_RESULTS -CREATE TABLE myinput1(key int, value int); -LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1; - -SELECT * FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT * FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT * from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT * from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; -SELECT * from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT * from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; - -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; -LOAD DATA LOCAL INPATH '../../data/files/in/000000_0' into table smb_input1; -LOAD DATA LOCAL INPATH '../../data/files/in/000001_0' into table smb_input1; -LOAD DATA LOCAL INPATH '../../data/files/in/000000_0' into table smb_input2; -LOAD DATA LOCAL INPATH '../../data/files/in/000001_0' into table smb_input2; +CREATE TABLE myinput1_n8(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1_n8; + +SELECT * FROM myinput1_n8 a JOIN myinput1_n8 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a FULL OUTER JOIN myinput1_n8 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT * FROM myinput1_n8 a FULL OUTER JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a FULL OUTER JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a FULL OUTER JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a FULL OUTER JOIN myinput1_n8 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT * from myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1_n8 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT * from myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1_n8 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b RIGHT OUTER JOIN myinput1_n8 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; +SELECT * from myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1_n8 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT * from myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1_n8 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b RIGHT OUTER JOIN myinput1_n8 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; + +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a JOIN myinput1_n8 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.value = b.value and a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a JOIN myinput1_n8 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.value = b.value and a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +CREATE TABLE smb_input1_n3(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE smb_input2_n3(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; +LOAD DATA LOCAL INPATH '../../data/files/in/000000_0' into table smb_input1_n3; +LOAD DATA LOCAL INPATH '../../data/files/in/000001_0' into table smb_input1_n3; +LOAD DATA LOCAL INPATH '../../data/files/in/000000_0' into table smb_input2_n3; +LOAD DATA LOCAL INPATH '../../data/files/in/000001_0' into table smb_input2_n3; SET hive.optimize.bucketmapjoin = true; SET hive.optimize.bucketmapjoin.sortedmerge = true; SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key = b.key AND a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a JOIN smb_input2 b ON a.key = b.key AND a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input2 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a LEFT OUTER JOIN smb_input2 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input2 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a RIGHT OUTER JOIN smb_input2 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT * FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT * FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT * from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT * from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; -SELECT * from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT * from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; - -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key = b.key AND a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a JOIN smb_input2 b ON a.key = b.key AND a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input2 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a LEFT OUTER JOIN smb_input2 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; - -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input2 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a RIGHT OUTER JOIN smb_input2 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n3 a JOIN smb_input1_n3 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n3 a JOIN smb_input2_n3 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input2_n3 a JOIN smb_input2_n3 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n3 a JOIN smb_input1_n3 b ON a.key = b.key AND a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n3 a JOIN smb_input1_n3 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n3 a JOIN smb_input2_n3 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input2_n3 a JOIN smb_input2_n3 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input2_n3 a JOIN smb_input2_n3 b ON a.key = b.key AND a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n3 a LEFT OUTER JOIN smb_input1_n3 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n3 a LEFT OUTER JOIN smb_input2_n3 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input2_n3 a LEFT OUTER JOIN smb_input2_n3 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n3 a RIGHT OUTER JOIN smb_input1_n3 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n3 a RIGHT OUTER JOIN smb_input2_n3 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input2_n3 a RIGHT OUTER JOIN smb_input2_n3 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT * FROM myinput1_n8 a JOIN myinput1_n8 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a FULL OUTER JOIN myinput1_n8 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT * FROM myinput1_n8 a FULL OUTER JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a FULL OUTER JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a FULL OUTER JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT * FROM myinput1_n8 a FULL OUTER JOIN myinput1_n8 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT * from myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1_n8 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT * from myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1_n8 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b RIGHT OUTER JOIN myinput1_n8 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; +SELECT * from myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1_n8 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT * from myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1_n8 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b RIGHT OUTER JOIN myinput1_n8 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; + +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a JOIN myinput1_n8 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.value = b.value and a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a JOIN myinput1_n8 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a JOIN myinput1_n8 b ON a.value = b.value and a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n8 a LEFT OUTER JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n8 a RIGHT OUTER JOIN myinput1_n8 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n3 a JOIN smb_input1_n3 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n3 a JOIN smb_input2_n3 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input2_n3 a JOIN smb_input2_n3 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n3 a JOIN smb_input1_n3 b ON a.key = b.key AND a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n3 a JOIN smb_input1_n3 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n3 a JOIN smb_input2_n3 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input2_n3 a JOIN smb_input2_n3 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input2_n3 a JOIN smb_input2_n3 b ON a.key = b.key AND a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n3 a LEFT OUTER JOIN smb_input1_n3 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n3 a LEFT OUTER JOIN smb_input2_n3 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input2_n3 a LEFT OUTER JOIN smb_input2_n3 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; + +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n3 a RIGHT OUTER JOIN smb_input1_n3 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n3 a RIGHT OUTER JOIN smb_input2_n3 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input2_n3 a RIGHT OUTER JOIN smb_input2_n3 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; diff --git a/ql/src/test/queries/clientpositive/join_filters_overlap.q b/ql/src/test/queries/clientpositive/join_filters_overlap.q index a361024ce4..ffe7db09c6 100644 --- a/ql/src/test/queries/clientpositive/join_filters_overlap.q +++ b/ql/src/test/queries/clientpositive/join_filters_overlap.q @@ -3,28 +3,28 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -- HIVE-3411 Filter predicates on outer join overlapped on single alias is not handled properly -create table a as SELECT 100 as key, a.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a as value limit 3; +create table a_n4 as SELECT 100 as key, a_n4.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a_n4 as value limit 3; --- overlap on a -explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60); -select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60); -select /*+ MAPJOIN(b,c)*/ * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60); +-- overlap on a_n4 +explain extended select * from a_n4 left outer join a_n4 b on (a_n4.key=b.key AND a_n4.value=50 AND b.value=50) left outer join a_n4 c on (a_n4.key=c.key AND a_n4.value=60 AND c.value=60); +select * from a_n4 left outer join a_n4 b on (a_n4.key=b.key AND a_n4.value=50 AND b.value=50) left outer join a_n4 c on (a_n4.key=c.key AND a_n4.value=60 AND c.value=60); +select /*+ MAPJOIN(b,c)*/ * from a_n4 left outer join a_n4 b on (a_n4.key=b.key AND a_n4.value=50 AND b.value=50) left outer join a_n4 c on (a_n4.key=c.key AND a_n4.value=60 AND c.value=60); -- overlap on b -explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60); -select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60); -select /*+ MAPJOIN(a,c)*/ * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60); +explain extended select * from a_n4 right outer join a_n4 b on (a_n4.key=b.key AND a_n4.value=50 AND b.value=50) left outer join a_n4 c on (b.key=c.key AND b.value=60 AND c.value=60); +select * from a_n4 right outer join a_n4 b on (a_n4.key=b.key AND a_n4.value=50 AND b.value=50) left outer join a_n4 c on (b.key=c.key AND b.value=60 AND c.value=60); +select /*+ MAPJOIN(a_n4,c)*/ * from a_n4 right outer join a_n4 b on (a_n4.key=b.key AND a_n4.value=50 AND b.value=50) left outer join a_n4 c on (b.key=c.key AND b.value=60 AND c.value=60); -- overlap on b with two filters for each -explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60); -select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60); -select /*+ MAPJOIN(a,c)*/ * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60); +explain extended select * from a_n4 right outer join a_n4 b on (a_n4.key=b.key AND a_n4.value=50 AND b.value=50 AND b.value>10) left outer join a_n4 c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60); +select * from a_n4 right outer join a_n4 b on (a_n4.key=b.key AND a_n4.value=50 AND b.value=50 AND b.value>10) left outer join a_n4 c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60); +select /*+ MAPJOIN(a_n4,c)*/ * from a_n4 right outer join a_n4 b on (a_n4.key=b.key AND a_n4.value=50 AND b.value=50 AND b.value>10) left outer join a_n4 c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60); --- overlap on a, b -explain extended select * from a full outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40); -select * from a full outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40); +-- overlap on a_n4, b +explain extended select * from a_n4 full outer join a_n4 b on (a_n4.key=b.key AND a_n4.value=50 AND b.value=50) left outer join a_n4 c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a_n4 d on (a_n4.key=d.key AND a_n4.value=40 AND d.value=40); +select * from a_n4 full outer join a_n4 b on (a_n4.key=b.key AND a_n4.value=50 AND b.value=50) left outer join a_n4 c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a_n4 d on (a_n4.key=d.key AND a_n4.value=40 AND d.value=40); --- triple overlap on a -explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40); -select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40); -select /*+ MAPJOIN(b,c, d)*/ * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40); +-- triple overlap on a_n4 +explain extended select * from a_n4 left outer join a_n4 b on (a_n4.key=b.key AND a_n4.value=50 AND b.value=50) left outer join a_n4 c on (a_n4.key=c.key AND a_n4.value=60 AND c.value=60) left outer join a_n4 d on (a_n4.key=d.key AND a_n4.value=40 AND d.value=40); +select * from a_n4 left outer join a_n4 b on (a_n4.key=b.key AND a_n4.value=50 AND b.value=50) left outer join a_n4 c on (a_n4.key=c.key AND a_n4.value=60 AND c.value=60) left outer join a_n4 d on (a_n4.key=d.key AND a_n4.value=40 AND d.value=40); +select /*+ MAPJOIN(b,c, d)*/ * from a_n4 left outer join a_n4 b on (a_n4.key=b.key AND a_n4.value=50 AND b.value=50) left outer join a_n4 c on (a_n4.key=c.key AND a_n4.value=60 AND c.value=60) left outer join a_n4 d on (a_n4.key=d.key AND a_n4.value=40 AND d.value=40); diff --git a/ql/src/test/queries/clientpositive/join_is_not_distinct_from.q b/ql/src/test/queries/clientpositive/join_is_not_distinct_from.q index ebe832d2f3..aec7deac54 100644 --- a/ql/src/test/queries/clientpositive/join_is_not_distinct_from.q +++ b/ql/src/test/queries/clientpositive/join_is_not_distinct_from.q @@ -1,71 +1,71 @@ set hive.explain.user=false; -- SORT_QUERY_RESULTS -CREATE TABLE myinput1(key int, value int); -LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1; +CREATE TABLE myinput1_n10(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1_n10; -- merging -explain select * from myinput1 a join myinput1 b on a.key is not distinct from b.value; +explain select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from b.value; -- SORT_QUERY_RESULTS -select * from myinput1 a join myinput1 b on a.key is not distinct from b.value; +select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from b.value; -explain select * from myinput1 a join myinput1 b on a.key is not distinct from b.value join myinput1 c on a.key=c.key; -select * from myinput1 a join myinput1 b on a.key is not distinct from b.value join myinput1 c on a.key=c.key; +explain select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from b.value join myinput1_n10 c on a.key=c.key; +select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from b.value join myinput1_n10 c on a.key=c.key; -explain select * from myinput1 a join myinput1 b on a.key is not distinct from b.value join myinput1 c on a.key is not distinct from c.key; -select * from myinput1 a join myinput1 b on a.key is not distinct from b.value join myinput1 c on a.key is not distinct from c.key; +explain select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from b.value join myinput1_n10 c on a.key is not distinct from c.key; +select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from b.value join myinput1_n10 c on a.key is not distinct from c.key; -explain select * from myinput1 a join myinput1 b on a.key is not distinct from b.value AND a.value=b.key join myinput1 c on a.key is not distinct from c.key AND a.value=c.value; +explain select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from b.value AND a.value=b.key join myinput1_n10 c on a.key is not distinct from c.key AND a.value=c.value; -select * from myinput1 a join myinput1 b on a.key is not distinct from b.value AND a.value=b.key join myinput1 c on a.key is not distinct from c.key AND a.value=c.value; +select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from b.value AND a.value=b.key join myinput1_n10 c on a.key is not distinct from c.key AND a.value=c.value; -explain select * from myinput1 a join myinput1 b on a.key is not distinct from b.value AND a.value is not distinct from b.key join myinput1 c on a.key is not distinct from c.key AND a.value is not distinct from c.value; -select * from myinput1 a join myinput1 b on a.key is not distinct from b.value AND a.value is not distinct from b.key join myinput1 c on a.key is not distinct from c.key AND a.value is not distinct from c.value; +explain select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from b.value AND a.value is not distinct from b.key join myinput1_n10 c on a.key is not distinct from c.key AND a.value is not distinct from c.value; +select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from b.value AND a.value is not distinct from b.key join myinput1_n10 c on a.key is not distinct from c.key AND a.value is not distinct from c.value; -- outer joins -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key is not distinct from b.value; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key is not distinct from b.value; -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key is not distinct from b.value; +SELECT * FROM myinput1_n10 a LEFT OUTER JOIN myinput1_n10 b ON a.key is not distinct from b.value; +SELECT * FROM myinput1_n10 a RIGHT OUTER JOIN myinput1_n10 b ON a.key is not distinct from b.value; +SELECT * FROM myinput1_n10 a FULL OUTER JOIN myinput1_n10 b ON a.key is not distinct from b.value; -- map joins -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key is not distinct from b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key is not distinct from b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n10 a JOIN myinput1_n10 b ON a.key is not distinct from b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n10 a JOIN myinput1_n10 b ON a.key is not distinct from b.value; -CREATE TABLE smb_input(key int, value int); -LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input; -LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input; +CREATE TABLE smb_input_n2(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input_n2; +LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input_n2; ; -- smbs -CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; +CREATE TABLE smb_input1_n5(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE smb_input2_n5(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; -from smb_input -insert overwrite table smb_input1 select * -insert overwrite table smb_input2 select *; +from smb_input_n2 +insert overwrite table smb_input1_n5 select * +insert overwrite table smb_input2_n5 select *; SET hive.optimize.bucketmapjoin = true; SET hive.optimize.bucketmapjoin.sortedmerge = true; SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key is not distinct from b.key; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key is not distinct from b.key AND a.value is not distinct from b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input1 b ON a.key is not distinct from b.key; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key is not distinct from b.key; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input1 b ON a.key is not distinct from b.key; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n5 a JOIN smb_input1_n5 b ON a.key is not distinct from b.key; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n5 a JOIN smb_input1_n5 b ON a.key is not distinct from b.key AND a.value is not distinct from b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n5 a RIGHT OUTER JOIN smb_input1_n5 b ON a.key is not distinct from b.key; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n5 a JOIN smb_input1_n5 b ON a.key is not distinct from b.key; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n5 a LEFT OUTER JOIN smb_input1_n5 b ON a.key is not distinct from b.key; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key is not distinct from b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key is not distinct from b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input2 b ON a.key is not distinct from b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input2 b ON a.key is not distinct from b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n5 a JOIN smb_input2_n5 b ON a.key is not distinct from b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n5 a JOIN smb_input2_n5 b ON a.key is not distinct from b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n5 a LEFT OUTER JOIN smb_input2_n5 b ON a.key is not distinct from b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n5 a RIGHT OUTER JOIN smb_input2_n5 b ON a.key is not distinct from b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value is not distinct from b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a RIGHT OUTER JOIN smb_input2 b ON a.value is not distinct from b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value is not distinct from b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a LEFT OUTER JOIN smb_input2 b ON a.value is not distinct from b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input2_n5 a JOIN smb_input2_n5 b ON a.value is not distinct from b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input2_n5 a RIGHT OUTER JOIN smb_input2_n5 b ON a.value is not distinct from b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input2_n5 a JOIN smb_input2_n5 b ON a.value is not distinct from b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input2_n5 a LEFT OUTER JOIN smb_input2_n5 b ON a.value is not distinct from b.value; --HIVE-3315 join predicate transitive -explain select * from myinput1 a join myinput1 b on a.key is not distinct from b.value AND a.key is NULL; -select * from myinput1 a join myinput1 b on a.key is not distinct from b.value AND a.key is NULL; +explain select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from b.value AND a.key is NULL; +select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from b.value AND a.key is NULL; diff --git a/ql/src/test/queries/clientpositive/join_map_ppr.q b/ql/src/test/queries/clientpositive/join_map_ppr.q index 4d0e559cb2..8634d7d4f9 100644 --- a/ql/src/test/queries/clientpositive/join_map_ppr.q +++ b/ql/src/test/queries/clientpositive/join_map_ppr.q @@ -3,23 +3,23 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n4(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE; set hive.cbo.enable=false; EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n4 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key) WHERE z.ds='2008-04-08' and z.hr=11; -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n4 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key) WHERE z.ds='2008-04-08' and z.hr=11; -select * from dest_j1; +select * from dest_j1_n4; CREATE TABLE src_copy(key int, value string); CREATE TABLE src1_copy(key string, value string); @@ -27,19 +27,19 @@ INSERT OVERWRITE TABLE src_copy select key, value from src; INSERT OVERWRITE TABLE src1_copy select key, value from src1; EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n4 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1_copy x JOIN src_copy y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key) WHERE z.ds='2008-04-08' and z.hr=11; -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n4 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1_copy x JOIN src_copy y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key) WHERE z.ds='2008-04-08' and z.hr=11; -select * from dest_j1; +select * from dest_j1_n4; diff --git a/ql/src/test/queries/clientpositive/join_nulls.q b/ql/src/test/queries/clientpositive/join_nulls.q index 3b7ecd94ef..4c85512352 100644 --- a/ql/src/test/queries/clientpositive/join_nulls.q +++ b/ql/src/test/queries/clientpositive/join_nulls.q @@ -1,47 +1,47 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE myinput1(key int, value int); -LOAD DATA LOCAL INPATH '../../data/files/in1.txt' INTO TABLE myinput1; +CREATE TABLE myinput1_n3(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in1.txt' INTO TABLE myinput1_n3; -SELECT * FROM myinput1 a JOIN myinput1 b; -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b; -SELECT * FROM myinput1 a JOIN myinput1 b ON a.key = b.value; -SELECT * FROM myinput1 a JOIN myinput1 b ON a.key = b.key; -SELECT * FROM myinput1 a JOIN myinput1 b ON a.value = b.value; -SELECT * FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key; -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value; -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value; -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key; -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value; -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value; -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key; -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value; -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key; +SELECT * FROM myinput1_n3 a JOIN myinput1_n3 b; +SELECT * FROM myinput1_n3 a LEFT OUTER JOIN myinput1_n3 b; +SELECT * FROM myinput1_n3 a RIGHT OUTER JOIN myinput1_n3 b; +SELECT * FROM myinput1_n3 a JOIN myinput1_n3 b ON a.key = b.value; +SELECT * FROM myinput1_n3 a JOIN myinput1_n3 b ON a.key = b.key; +SELECT * FROM myinput1_n3 a JOIN myinput1_n3 b ON a.value = b.value; +SELECT * FROM myinput1_n3 a JOIN myinput1_n3 b ON a.value = b.value and a.key=b.key; +SELECT * FROM myinput1_n3 a LEFT OUTER JOIN myinput1_n3 b ON a.key = b.value; +SELECT * FROM myinput1_n3 a LEFT OUTER JOIN myinput1_n3 b ON a.value = b.value; +SELECT * FROM myinput1_n3 a LEFT OUTER JOIN myinput1_n3 b ON a.key = b.key; +SELECT * FROM myinput1_n3 a LEFT OUTER JOIN myinput1_n3 b ON a.key = b.key and a.value=b.value; +SELECT * FROM myinput1_n3 a RIGHT OUTER JOIN myinput1_n3 b ON a.key = b.value; +SELECT * FROM myinput1_n3 a RIGHT OUTER JOIN myinput1_n3 b ON a.key = b.key; +SELECT * FROM myinput1_n3 a RIGHT OUTER JOIN myinput1_n3 b ON a.value = b.value; +SELECT * FROM myinput1_n3 a RIGHT OUTER JOIN myinput1_n3 b ON a.key=b.key and a.value = b.value; +SELECT * FROM myinput1_n3 a FULL OUTER JOIN myinput1_n3 b ON a.key = b.value; +SELECT * FROM myinput1_n3 a FULL OUTER JOIN myinput1_n3 b ON a.key = b.key; +SELECT * FROM myinput1_n3 a FULL OUTER JOIN myinput1_n3 b ON a.value = b.value; +SELECT * FROM myinput1_n3 a FULL OUTER JOIN myinput1_n3 b ON a.value = b.value and a.key=b.key; -SELECT * from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value); -SELECT * from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value); -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value; +SELECT * from myinput1_n3 a LEFT OUTER JOIN myinput1_n3 b ON (a.value=b.value) RIGHT OUTER JOIN myinput1_n3 c ON (b.value=c.value); +SELECT * from myinput1_n3 a RIGHT OUTER JOIN myinput1_n3 b ON (a.value=b.value) LEFT OUTER JOIN myinput1_n3 c ON (b.value=c.value); +SELECT * FROM myinput1_n3 a LEFT OUTER JOIN myinput1_n3 b RIGHT OUTER JOIN myinput1_n3 c ON a.value = b.value and b.value = c.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key = b.key; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.value = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key = b.key; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.value = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key = b.key; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key; -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n3 a JOIN myinput1_n3 b; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n3 a JOIN myinput1_n3 b ON a.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n3 a JOIN myinput1_n3 b ON a.key = b.key; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n3 a JOIN myinput1_n3 b ON a.value = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n3 a JOIN myinput1_n3 b ON a.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n3 a JOIN myinput1_n3 b ON a.key = b.key; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n3 a JOIN myinput1_n3 b ON a.value = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n3 a JOIN myinput1_n3 b ON a.value = b.value and a.key = b.key; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n3 a LEFT OUTER JOIN myinput1_n3 b ON a.key = b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n3 a LEFT OUTER JOIN myinput1_n3 b ON a.key = b.key; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n3 a LEFT OUTER JOIN myinput1_n3 b ON a.value = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n3 a RIGHT OUTER JOIN myinput1_n3 b ON a.key = b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n3 a RIGHT OUTER JOIN myinput1_n3 b ON a.key = b.key; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n3 a RIGHT OUTER JOIN myinput1_n3 b ON a.value = b.value; CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; diff --git a/ql/src/test/queries/clientpositive/join_nullsafe.q b/ql/src/test/queries/clientpositive/join_nullsafe.q index e96cc71671..0cf0056bf4 100644 --- a/ql/src/test/queries/clientpositive/join_nullsafe.q +++ b/ql/src/test/queries/clientpositive/join_nullsafe.q @@ -1,71 +1,71 @@ set hive.explain.user=false; -- SORT_QUERY_RESULTS -CREATE TABLE myinput1(key int, value int); -LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1; +CREATE TABLE myinput1_n9(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1_n9; -- merging -explain select * from myinput1 a join myinput1 b on a.key<=>b.value; +explain select * from myinput1_n9 a join myinput1_n9 b on a.key<=>b.value; -- SORT_QUERY_RESULTS -select * from myinput1 a join myinput1 b on a.key<=>b.value; +select * from myinput1_n9 a join myinput1_n9 b on a.key<=>b.value; -explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key; -select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key; +explain select * from myinput1_n9 a join myinput1_n9 b on a.key<=>b.value join myinput1_n9 c on a.key=c.key; +select * from myinput1_n9 a join myinput1_n9 b on a.key<=>b.value join myinput1_n9 c on a.key=c.key; -explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key; -select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key; +explain select * from myinput1_n9 a join myinput1_n9 b on a.key<=>b.value join myinput1_n9 c on a.key<=>c.key; +select * from myinput1_n9 a join myinput1_n9 b on a.key<=>b.value join myinput1_n9 c on a.key<=>c.key; -explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value; +explain select * from myinput1_n9 a join myinput1_n9 b on a.key<=>b.value AND a.value=b.key join myinput1_n9 c on a.key<=>c.key AND a.value=c.value; -select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value; +select * from myinput1_n9 a join myinput1_n9 b on a.key<=>b.value AND a.value=b.key join myinput1_n9 c on a.key<=>c.key AND a.value=c.value; -explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value; -select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value; +explain select * from myinput1_n9 a join myinput1_n9 b on a.key<=>b.value AND a.value<=>b.key join myinput1_n9 c on a.key<=>c.key AND a.value<=>c.value; +select * from myinput1_n9 a join myinput1_n9 b on a.key<=>b.value AND a.value<=>b.key join myinput1_n9 c on a.key<=>c.key AND a.value<=>c.value; -- outer joins -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value; -SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key<=>b.value; -SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key<=>b.value; +SELECT * FROM myinput1_n9 a LEFT OUTER JOIN myinput1_n9 b ON a.key<=>b.value; +SELECT * FROM myinput1_n9 a RIGHT OUTER JOIN myinput1_n9 b ON a.key<=>b.value; +SELECT * FROM myinput1_n9 a FULL OUTER JOIN myinput1_n9 b ON a.key<=>b.value; -- map joins -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value; -SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value; +SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n9 a JOIN myinput1_n9 b ON a.key<=>b.value; +SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n9 a JOIN myinput1_n9 b ON a.key<=>b.value; -CREATE TABLE smb_input(key int, value int); -LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input; -LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input; +CREATE TABLE smb_input_n1(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input_n1; +LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input_n1; ; -- smbs -CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; +CREATE TABLE smb_input1_n4(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE smb_input2_n4(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; -from smb_input -insert overwrite table smb_input1 select * -insert overwrite table smb_input2 select *; +from smb_input_n1 +insert overwrite table smb_input1_n4 select * +insert overwrite table smb_input2_n4 select *; SET hive.optimize.bucketmapjoin = true; SET hive.optimize.bucketmapjoin.sortedmerge = true; SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key AND a.value <=> b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input1 b ON a.key <=> b.key; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input1 b ON a.key <=> b.key; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n4 a JOIN smb_input1_n4 b ON a.key <=> b.key; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n4 a JOIN smb_input1_n4 b ON a.key <=> b.key AND a.value <=> b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n4 a RIGHT OUTER JOIN smb_input1_n4 b ON a.key <=> b.key; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n4 a JOIN smb_input1_n4 b ON a.key <=> b.key; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n4 a LEFT OUTER JOIN smb_input1_n4 b ON a.key <=> b.key; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key <=> b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key <=> b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input2 b ON a.key <=> b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input2 b ON a.key <=> b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n4 a JOIN smb_input2_n4 b ON a.key <=> b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n4 a JOIN smb_input2_n4 b ON a.key <=> b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input1_n4 a LEFT OUTER JOIN smb_input2_n4 b ON a.key <=> b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input1_n4 a RIGHT OUTER JOIN smb_input2_n4 b ON a.key <=> b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value <=> b.value; -SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a RIGHT OUTER JOIN smb_input2 b ON a.value <=> b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value <=> b.value; -SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a LEFT OUTER JOIN smb_input2 b ON a.value <=> b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input2_n4 a JOIN smb_input2_n4 b ON a.value <=> b.value; +SELECT /*+ MAPJOIN(a) */ * FROM smb_input2_n4 a RIGHT OUTER JOIN smb_input2_n4 b ON a.value <=> b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input2_n4 a JOIN smb_input2_n4 b ON a.value <=> b.value; +SELECT /*+ MAPJOIN(b) */ * FROM smb_input2_n4 a LEFT OUTER JOIN smb_input2_n4 b ON a.value <=> b.value; --HIVE-3315 join predicate transitive -explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL; -select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL; +explain select * from myinput1_n9 a join myinput1_n9 b on a.key<=>b.value AND a.key is NULL; +select * from myinput1_n9 a join myinput1_n9 b on a.key<=>b.value AND a.key is NULL; diff --git a/ql/src/test/queries/clientpositive/join_on_varchar.q b/ql/src/test/queries/clientpositive/join_on_varchar.q index d3c61f7d59..a60e68734f 100644 --- a/ql/src/test/queries/clientpositive/join_on_varchar.q +++ b/ql/src/test/queries/clientpositive/join_on_varchar.q @@ -2,14 +2,14 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -create table tbl1(c1 varchar(10), intcol int); -create table tbl2(c2 varchar(30)); -insert into table tbl1 select repeat('t', 10), 11 from src limit 1; -insert into table tbl1 select repeat('s', 10), 22 from src limit 1; -insert into table tbl2 select concat(repeat('t', 10), 'ppp') from src limit 1; -insert into table tbl2 select repeat('s', 10) from src limit 1; +create table tbl1_n3(c1 varchar(10), intcol int); +create table tbl2_n2(c2 varchar(30)); +insert into table tbl1_n3 select repeat('t', 10), 11 from src limit 1; +insert into table tbl1_n3 select repeat('s', 10), 22 from src limit 1; +insert into table tbl2_n2 select concat(repeat('t', 10), 'ppp') from src limit 1; +insert into table tbl2_n2 select repeat('s', 10) from src limit 1; set hive.auto.convert.join=true; explain -select /*+ MAPJOIN(tbl2) */ c1,c2 from tbl1 join tbl2 on (c1 = c2) order by c1,c2; -select /*+ MAPJOIN(tbl2) */ c1,c2 from tbl1 join tbl2 on (c1 = c2) order by c1,c2; +select /*+ MAPJOIN(tbl2_n2) */ c1,c2 from tbl1_n3 join tbl2_n2 on (c1 = c2) order by c1,c2; +select /*+ MAPJOIN(tbl2_n2) */ c1,c2 from tbl1_n3 join tbl2_n2 on (c1 = c2) order by c1,c2; diff --git a/ql/src/test/queries/clientpositive/join_reorder.q b/ql/src/test/queries/clientpositive/join_reorder.q index 9b87596b8f..e59b1db76e 100644 --- a/ql/src/test/queries/clientpositive/join_reorder.q +++ b/ql/src/test/queries/clientpositive/join_reorder.q @@ -5,70 +5,70 @@ set hive.cbo.enable=false; -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n37(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n24(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T3_n8(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n37; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n24; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n8; -- SORT_QUERY_RESULTS -EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key +EXPLAIN FROM T1_n37 a JOIN src c ON c.key+1=a.key SELECT a.key, a.val, c.key; -EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key +EXPLAIN FROM T1_n37 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ a.key, a.val, c.key; -FROM T1 a JOIN src c ON c.key+1=a.key +FROM T1_n37 a JOIN src c ON c.key+1=a.key SELECT a.key, a.val, c.key; -FROM T1 a JOIN src c ON c.key+1=a.key +FROM T1_n37 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ a.key, a.val, c.key; -EXPLAIN FROM T1 a - LEFT OUTER JOIN T2 b ON (b.key=a.key) - RIGHT OUTER JOIN T3 c ON (c.val = a.val) +EXPLAIN FROM T1_n37 a + LEFT OUTER JOIN T2_n24 b ON (b.key=a.key) + RIGHT OUTER JOIN T3_n8 c ON (c.val = a.val) SELECT a.key, b.key, a.val, c.val; -EXPLAIN FROM T1 a - LEFT OUTER JOIN T2 b ON (b.key=a.key) - RIGHT OUTER JOIN T3 c ON (c.val = a.val) +EXPLAIN FROM T1_n37 a + LEFT OUTER JOIN T2_n24 b ON (b.key=a.key) + RIGHT OUTER JOIN T3_n8 c ON (c.val = a.val) SELECT /*+ STREAMTABLE(a) */ a.key, b.key, a.val, c.val; -FROM T1 a - LEFT OUTER JOIN T2 b ON (b.key=a.key) - RIGHT OUTER JOIN T3 c ON (c.val = a.val) +FROM T1_n37 a + LEFT OUTER JOIN T2_n24 b ON (b.key=a.key) + RIGHT OUTER JOIN T3_n8 c ON (c.val = a.val) SELECT a.key, b.key, a.val, c.val; -FROM T1 a - LEFT OUTER JOIN T2 b ON (b.key=a.key) - RIGHT OUTER JOIN T3 c ON (c.val = a.val) +FROM T1_n37 a + LEFT OUTER JOIN T2_n24 b ON (b.key=a.key) + RIGHT OUTER JOIN T3_n8 c ON (c.val = a.val) SELECT /*+ STREAMTABLE(a) */ a.key, b.key, a.val, c.val; EXPLAIN FROM UNIQUEJOIN - PRESERVE T1 a (a.key, a.val), - PRESERVE T2 b (b.key, b.val), - PRESERVE T3 c (c.key, c.val) + PRESERVE T1_n37 a (a.key, a.val), + PRESERVE T2_n24 b (b.key, b.val), + PRESERVE T3_n8 c (c.key, c.val) SELECT a.key, b.key, c.key; EXPLAIN FROM UNIQUEJOIN - PRESERVE T1 a (a.key, a.val), - PRESERVE T2 b (b.key, b.val), - PRESERVE T3 c (c.key, c.val) + PRESERVE T1_n37 a (a.key, a.val), + PRESERVE T2_n24 b (b.key, b.val), + PRESERVE T3_n8 c (c.key, c.val) SELECT /*+ STREAMTABLE(b) */ a.key, b.key, c.key; FROM UNIQUEJOIN - PRESERVE T1 a (a.key, a.val), - PRESERVE T2 b (b.key, b.val), - PRESERVE T3 c (c.key, c.val) + PRESERVE T1_n37 a (a.key, a.val), + PRESERVE T2_n24 b (b.key, b.val), + PRESERVE T3_n8 c (c.key, c.val) SELECT a.key, b.key, c.key; FROM UNIQUEJOIN - PRESERVE T1 a (a.key, a.val), - PRESERVE T2 b (b.key, b.val), - PRESERVE T3 c (c.key, c.val) + PRESERVE T1_n37 a (a.key, a.val), + PRESERVE T2_n24 b (b.key, b.val), + PRESERVE T3_n8 c (c.key, c.val) SELECT /*+ STREAMTABLE(b) */ a.key, b.key, c.key; diff --git a/ql/src/test/queries/clientpositive/join_reorder2.q b/ql/src/test/queries/clientpositive/join_reorder2.q index 93a0dbabf7..924b2ee843 100644 --- a/ql/src/test/queries/clientpositive/join_reorder2.q +++ b/ql/src/test/queries/clientpositive/join_reorder2.q @@ -4,39 +4,39 @@ set hive.cbo.enable=false; -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n49(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n30(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T3_n10(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T4_n3(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n49; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n30; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n10; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4_n3; EXPLAIN SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key; +FROM T1_n49 a JOIN T2_n30 b ON a.key = b.key + JOIN T3_n10 c ON b.key = c.key + JOIN T4_n3 d ON c.key = d.key; SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key; +FROM T1_n49 a JOIN T2_n30 b ON a.key = b.key + JOIN T3_n10 c ON b.key = c.key + JOIN T4_n3 d ON c.key = d.key; EXPLAIN SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON a.val = c.val - JOIN T4 d ON a.key + 1 = d.key + 1; +FROM T1_n49 a JOIN T2_n30 b ON a.key = b.key + JOIN T3_n10 c ON a.val = c.val + JOIN T4_n3 d ON a.key + 1 = d.key + 1; SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON a.val = c.val - JOIN T4 d ON a.key + 1 = d.key + 1; +FROM T1_n49 a JOIN T2_n30 b ON a.key = b.key + JOIN T3_n10 c ON a.val = c.val + JOIN T4_n3 d ON a.key + 1 = d.key + 1; diff --git a/ql/src/test/queries/clientpositive/join_reorder3.q b/ql/src/test/queries/clientpositive/join_reorder3.q index 7d5a95d71e..1ebf2426c1 100644 --- a/ql/src/test/queries/clientpositive/join_reorder3.q +++ b/ql/src/test/queries/clientpositive/join_reorder3.q @@ -3,39 +3,39 @@ set hive.cbo.enable=false; -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n92(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n57(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T3_n21(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T4_n10(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n92; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n57; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n21; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4_n10; EXPLAIN SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key; +FROM T1_n92 a JOIN T2_n57 b ON a.key = b.key + JOIN T3_n21 c ON b.key = c.key + JOIN T4_n10 d ON c.key = d.key; SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key; +FROM T1_n92 a JOIN T2_n57 b ON a.key = b.key + JOIN T3_n21 c ON b.key = c.key + JOIN T4_n10 d ON c.key = d.key; EXPLAIN SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON a.val = c.val - JOIN T4 d ON a.key + 1 = d.key + 1; +FROM T1_n92 a JOIN T2_n57 b ON a.key = b.key + JOIN T3_n21 c ON a.val = c.val + JOIN T4_n10 d ON a.key + 1 = d.key + 1; SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON a.val = c.val - JOIN T4 d ON a.key + 1 = d.key + 1; +FROM T1_n92 a JOIN T2_n57 b ON a.key = b.key + JOIN T3_n21 c ON a.val = c.val + JOIN T4_n10 d ON a.key + 1 = d.key + 1; diff --git a/ql/src/test/queries/clientpositive/join_reorder4.q b/ql/src/test/queries/clientpositive/join_reorder4.q index 265980be59..16ed71cb54 100644 --- a/ql/src/test/queries/clientpositive/join_reorder4.q +++ b/ql/src/test/queries/clientpositive/join_reorder4.q @@ -1,18 +1,18 @@ -CREATE TABLE T1(key1 STRING, val1 STRING) STORED AS TEXTFILE; -CREATE TABLE T2(key2 STRING, val2 STRING) STORED AS TEXTFILE; -CREATE TABLE T3(key3 STRING, val3 STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n134(key1 STRING, val1 STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n80(key2 STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE T3_n32(key3 STRING, val3 STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n134; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n80; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n32; set hive.cbo.enable=false; -explain select /*+ STREAMTABLE(a) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3; -select /*+ STREAMTABLE(a) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3; +explain select /*+ STREAMTABLE(a) */ a.*, b.*, c.* from T1_n134 a join T2_n80 b on a.key1=b.key2 join T3_n32 c on a.key1=c.key3; +select /*+ STREAMTABLE(a) */ a.*, b.*, c.* from T1_n134 a join T2_n80 b on a.key1=b.key2 join T3_n32 c on a.key1=c.key3; -explain select /*+ STREAMTABLE(b) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3; -select /*+ STREAMTABLE(b) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3; +explain select /*+ STREAMTABLE(b) */ a.*, b.*, c.* from T1_n134 a join T2_n80 b on a.key1=b.key2 join T3_n32 c on a.key1=c.key3; +select /*+ STREAMTABLE(b) */ a.*, b.*, c.* from T1_n134 a join T2_n80 b on a.key1=b.key2 join T3_n32 c on a.key1=c.key3; -explain select /*+ STREAMTABLE(c) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3; -select /*+ STREAMTABLE(c) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3; +explain select /*+ STREAMTABLE(c) */ a.*, b.*, c.* from T1_n134 a join T2_n80 b on a.key1=b.key2 join T3_n32 c on a.key1=c.key3; +select /*+ STREAMTABLE(c) */ a.*, b.*, c.* from T1_n134 a join T2_n80 b on a.key1=b.key2 join T3_n32 c on a.key1=c.key3; diff --git a/ql/src/test/queries/clientpositive/join_reordering_no_stats.q b/ql/src/test/queries/clientpositive/join_reordering_no_stats.q index 3ea9f0cfc1..e848c9f6fe 100644 --- a/ql/src/test/queries/clientpositive/join_reordering_no_stats.q +++ b/ql/src/test/queries/clientpositive/join_reordering_no_stats.q @@ -40,24 +40,24 @@ explain select count(1) from part_nostats,supplier_nostats,lineitem_nostats wher set hive.stats.estimate=false; explain select count(1) from part_nostats,supplier_nostats,lineitem_nostats where p_partkey = l_partkey and s_suppkey = l_suppkey; -CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string) +CREATE TABLE Employee_Part_n1(employeeID int, employeeName String) partitioned by (employeeSalary double, country string) row format delimited fields terminated by '|' stored as textfile; -LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA'); -LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK'); -LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA'); -LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA'); -LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK'); -LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK'); +LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part_n1 partition(employeeSalary='2000.0', country='USA'); +LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n1 partition(employeeSalary='2000.0', country='UK'); +LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n1 partition(employeeSalary='3000.0', country='USA'); +LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n1 partition(employeeSalary='4000.0', country='USA'); +LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n1 partition(employeeSalary='3500.0', country='UK'); +LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part_n1 partition(employeeSalary='3000.0', country='UK'); -- partitioned table set hive.stats.estimate=true; -explain select count(1) from Employee_Part,supplier_nostats,lineitem_nostats where employeeID= l_partkey and s_suppkey = l_suppkey; +explain select count(1) from Employee_Part_n1,supplier_nostats,lineitem_nostats where employeeID= l_partkey and s_suppkey = l_suppkey; set hive.stats.estimate=false; -explain select count(1) from Employee_Part,supplier_nostats,lineitem_nostats where employeeID= l_partkey and s_suppkey = l_suppkey; +explain select count(1) from Employee_Part_n1,supplier_nostats,lineitem_nostats where employeeID= l_partkey and s_suppkey = l_suppkey; -drop table Employee_Part; +drop table Employee_Part_n1; drop table supplier_nostats; drop table lineitem_nostats; drop table part_nostats; diff --git a/ql/src/test/queries/clientpositive/json_serde_tsformat.q b/ql/src/test/queries/clientpositive/json_serde_tsformat.q index c00450c01a..3cbd91db34 100644 --- a/ql/src/test/queries/clientpositive/json_serde_tsformat.q +++ b/ql/src/test/queries/clientpositive/json_serde_tsformat.q @@ -1,12 +1,12 @@ add jar ${system:maven.local.repository}/org/apache/hive/hcatalog/hive-hcatalog-core/${system:hive.version}/hive-hcatalog-core-${system:hive.version}.jar; -CREATE TABLE t1 (c1 int, c2 string, c3 timestamp) +CREATE TABLE t1_n156 (c1 int, c2 string, c3 timestamp) ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe' WITH SERDEPROPERTIES ('timestamp.formats'='yyyy-MM-dd\'T\'HH:mm:ss') ; -LOAD DATA LOCAL INPATH "../../data/files/tsformat.json" INTO TABLE t1; +LOAD DATA LOCAL INPATH "../../data/files/tsformat.json" INTO TABLE t1_n156; select a.c1, a.c2, b.c3 -from t1 a join t1 b on a.c1 = b.c1; +from t1_n156 a join t1_n156 b on a.c1 = b.c1; -drop table t1; +drop table t1_n156; diff --git a/ql/src/test/queries/clientpositive/keyword_2.q b/ql/src/test/queries/clientpositive/keyword_2.q index 7d3f20510a..9a33dc9186 100644 --- a/ql/src/test/queries/clientpositive/keyword_2.q +++ b/ql/src/test/queries/clientpositive/keyword_2.q @@ -1,14 +1,14 @@ --! qt:dataset:src -drop table varchar_udf_1; +drop table varchar_udf_1_n1; -create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)); -insert overwrite table varchar_udf_1 +create table varchar_udf_1_n1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)); +insert overwrite table varchar_udf_1_n1 select key, value, key, value from src where key = '238' limit 1; select c2 regexp 'val', c4 regexp 'val', (c2 regexp 'val') = (c4 regexp 'val') -from varchar_udf_1 limit 1; +from varchar_udf_1_n1 limit 1; -drop table varchar_udf_1; +drop table varchar_udf_1_n1; diff --git a/ql/src/test/queries/clientpositive/lateral_view_multi_lateralviews.q b/ql/src/test/queries/clientpositive/lateral_view_multi_lateralviews.q index 29e026a83b..f5050e03af 100644 --- a/ql/src/test/queries/clientpositive/lateral_view_multi_lateralviews.q +++ b/ql/src/test/queries/clientpositive/lateral_view_multi_lateralviews.q @@ -1,7 +1,7 @@ -CREATE TABLE t1(x5 STRUCT>>> >); -INSERT INTO t1 SELECT NAMED_STRUCT('x4', NAMED_STRUCT('x3', ARRAY(NAMED_STRUCT('x1', 'x1_1', 'x2', ARRAY('x2_1', 'x2_2'))))); +CREATE TABLE t1_n135(x5 STRUCT>>> >); +INSERT INTO t1_n135 SELECT NAMED_STRUCT('x4', NAMED_STRUCT('x3', ARRAY(NAMED_STRUCT('x1', 'x1_1', 'x2', ARRAY('x2_1', 'x2_2'))))); SELECT c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16 -FROM t1 +FROM t1_n135 LATERAL VIEW EXPLODE(x5.x4.x3) lv as c1 LATERAL VIEW EXPLODE(c1.x2) lv as c2 LATERAL VIEW EXPLODE(x5.x4.x3) lv as c3 diff --git a/ql/src/test/queries/clientpositive/lateral_view_onview.q b/ql/src/test/queries/clientpositive/lateral_view_onview.q index 47134f1364..4d23634106 100644 --- a/ql/src/test/queries/clientpositive/lateral_view_onview.q +++ b/ql/src/test/queries/clientpositive/lateral_view_onview.q @@ -1,8 +1,8 @@ --! qt:dataset:src -CREATE TABLE lv_table( c1 STRING, c2 ARRAY, c3 INT, c4 CHAR(1)); -INSERT OVERWRITE TABLE lv_table SELECT 'abc ', array(1,2,3), 100, 't' FROM src; +CREATE TABLE lv_table_n0( c1 STRING, c2 ARRAY, c3 INT, c4 CHAR(1)); +INSERT OVERWRITE TABLE lv_table_n0 SELECT 'abc ', array(1,2,3), 100, 't' FROM src; -CREATE OR REPLACE VIEW lv_view AS SELECT * FROM lv_table; +CREATE OR REPLACE VIEW lv_view AS SELECT * FROM lv_table_n0; EXPLAIN SELECT * FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol SORT BY c1 ASC, myCol ASC LIMIT 1; EXPLAIN SELECT myTable.* FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LIMIT 3; @@ -23,9 +23,9 @@ SELECT SIZE(c2),c3,TRIM(c1),c4,myCol from lv_view LATERAL VIEW explode(array(1,2 SELECT SIZE(c2),c3,TRIM(c1),c4,myCol from lv_view LATERAL VIEW explode(array(1,2,3)) myTab as myCol limit 3; -CREATE TABLE lv_table1( c1 STRING, c3 INT, c4 CHAR(1), c5 STRING, c6 STRING, c7 STRING, c8 STRING, c9 STRING, c10 STRING, c11 STRING, c12 STRING, c13 STRING); +CREATE TABLE lv_table1_n0( c1 STRING, c3 INT, c4 CHAR(1), c5 STRING, c6 STRING, c7 STRING, c8 STRING, c9 STRING, c10 STRING, c11 STRING, c12 STRING, c13 STRING); CREATE TABLE lv_table2( c1 STRING, c2 ARRAY); -INSERT OVERWRITE TABLE lv_table1 SELECT 'abc ', 100, 't', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test' FROM src; +INSERT OVERWRITE TABLE lv_table1_n0 SELECT 'abc ', 100, 't', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test' FROM src; INSERT OVERWRITE TABLE lv_table2 SELECT 'abc ', array(1,2,3) FROM src; -EXPLAIN WITH lv_view1 AS (SELECT lv_table1.*, c2 FROM lv_table1 JOIN lv_table2 ON lv_table1.c1 = lv_table2.c1), lv_view2 AS (SELECT * FROM lv_view1 LATERAL VIEW explode(c2) myTable AS myCol) SELECT * FROM lv_view2 SORT BY c1 ASC, myCol ASC LIMIT 1; -WITH lv_view1 AS (SELECT lv_table1.*, c2 FROM lv_table1 JOIN lv_table2 ON lv_table1.c1 = lv_table2.c1), lv_view2 AS (SELECT * FROM lv_view1 LATERAL VIEW explode(c2) myTable AS myCol) SELECT * FROM lv_view2 SORT BY c1 ASC, myCol ASC LIMIT 1; \ No newline at end of file +EXPLAIN WITH lv_view1 AS (SELECT lv_table1_n0.*, c2 FROM lv_table1_n0 JOIN lv_table2 ON lv_table1_n0.c1 = lv_table2.c1), lv_view2 AS (SELECT * FROM lv_view1 LATERAL VIEW explode(c2) myTable AS myCol) SELECT * FROM lv_view2 SORT BY c1 ASC, myCol ASC LIMIT 1; +WITH lv_view1 AS (SELECT lv_table1_n0.*, c2 FROM lv_table1_n0 JOIN lv_table2 ON lv_table1_n0.c1 = lv_table2.c1), lv_view2 AS (SELECT * FROM lv_view1 LATERAL VIEW explode(c2) myTable AS myCol) SELECT * FROM lv_view2 SORT BY c1 ASC, myCol ASC LIMIT 1; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/lateral_view_onview2.q b/ql/src/test/queries/clientpositive/lateral_view_onview2.q index 2c4836ede9..46e2330308 100644 --- a/ql/src/test/queries/clientpositive/lateral_view_onview2.q +++ b/ql/src/test/queries/clientpositive/lateral_view_onview2.q @@ -1,7 +1,7 @@ --! qt:dataset:src -CREATE TABLE lv_table( c1 STRING, c2 ARRAY, c3 INT, c4 CHAR(1)); -INSERT OVERWRITE TABLE lv_table SELECT 'abc ', array(1,2,3), 100, 't' FROM src; +CREATE TABLE lv_table_n1( c1 STRING, c2 ARRAY, c3 INT, c4 CHAR(1)); +INSERT OVERWRITE TABLE lv_table_n1 SELECT 'abc ', array(1,2,3), 100, 't' FROM src; -CREATE OR REPLACE VIEW lv_view AS SELECT * FROM lv_table; +CREATE OR REPLACE VIEW lv_view AS SELECT * FROM lv_table_n1; EXPLAIN SELECT myTable.myCol, myTable2.myCol2 FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array('a', 'b', 'c')) myTable2 AS myCol2 LIMIT 9; diff --git a/ql/src/test/queries/clientpositive/lb_fs_stats.q b/ql/src/test/queries/clientpositive/lb_fs_stats.q index 08baae74bd..7cadaf95a3 100644 --- a/ql/src/test/queries/clientpositive/lb_fs_stats.q +++ b/ql/src/test/queries/clientpositive/lb_fs_stats.q @@ -9,10 +9,10 @@ set hive.stats.dbclass=fs; -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) -CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE; +CREATE TABLE test_tab_n0 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE; -ALTER TABLE test_tab SKEWED BY (key) ON ("484") STORED AS DIRECTORIES; +ALTER TABLE test_tab_n0 SKEWED BY (key) ON ("484") STORED AS DIRECTORIES; -INSERT OVERWRITE TABLE test_tab PARTITION (part = '1') SELECT * FROM src; +INSERT OVERWRITE TABLE test_tab_n0 PARTITION (part = '1') SELECT * FROM src; -describe formatted test_tab partition (part='1'); +describe formatted test_tab_n0 partition (part='1'); diff --git a/ql/src/test/queries/clientpositive/leftsemijoin.q b/ql/src/test/queries/clientpositive/leftsemijoin.q index c4717f6c2e..ac6d96e62e 100644 --- a/ql/src/test/queries/clientpositive/leftsemijoin.q +++ b/ql/src/test/queries/clientpositive/leftsemijoin.q @@ -2,29 +2,29 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -drop table sales; -drop table things; +drop table sales_n1; +drop table things_n1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -CREATE TABLE sales (name STRING, id INT) +CREATE TABLE sales_n1 (name STRING, id INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'; -CREATE TABLE things (id INT, name STRING) partitioned by (ds string) +CREATE TABLE things_n1 (id INT, name STRING) partitioned by (ds string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'; -load data local inpath '../../data/files/sales.txt' INTO TABLE sales; -load data local inpath '../../data/files/things.txt' INTO TABLE things partition(ds='2011-10-23'); -load data local inpath '../../data/files/things2.txt' INTO TABLE things partition(ds='2011-10-24'); +load data local inpath '../../data/files/sales.txt' INTO TABLE sales_n1; +load data local inpath '../../data/files/things.txt' INTO TABLE things_n1 partition(ds='2011-10-23'); +load data local inpath '../../data/files/things2.txt' INTO TABLE things_n1 partition(ds='2011-10-24'); -SELECT name,id FROM sales; +SELECT name,id FROM sales_n1; -SELECT id,name FROM things; +SELECT id,name FROM things_n1; -SELECT name,id FROM sales LEFT SEMI JOIN things ON (sales.id = things.id); +SELECT name,id FROM sales_n1 LEFT SEMI JOIN things_n1 ON (sales_n1.id = things_n1.id); -drop table sales; -drop table things; +drop table sales_n1; +drop table things_n1; -- HIVE-15458 explain select part.p_type from part join (select p1.p_name from part p1, part p2 group by p1.p_name) pp ON pp.p_name = part.p_name; diff --git a/ql/src/test/queries/clientpositive/leftsemijoin_mr.q b/ql/src/test/queries/clientpositive/leftsemijoin_mr.q index c9ebe0e8fa..c1304d95d3 100644 --- a/ql/src/test/queries/clientpositive/leftsemijoin_mr.q +++ b/ql/src/test/queries/clientpositive/leftsemijoin_mr.q @@ -1,20 +1,20 @@ -CREATE TABLE T1(key INT); -LOAD DATA LOCAL INPATH '../../data/files/leftsemijoin_mr_t1.txt' INTO TABLE T1; -CREATE TABLE T2(key INT); -LOAD DATA LOCAL INPATH '../../data/files/leftsemijoin_mr_t2.txt' INTO TABLE T2; +CREATE TABLE T1_n43(key INT); +LOAD DATA LOCAL INPATH '../../data/files/leftsemijoin_mr_t1.txt' INTO TABLE T1_n43; +CREATE TABLE T2_n27(key INT); +LOAD DATA LOCAL INPATH '../../data/files/leftsemijoin_mr_t2.txt' INTO TABLE T2_n27; -- Run this query using TestMinimrCliDriver -SELECT * FROM T1; -SELECT * FROM T2; +SELECT * FROM T1_n43; +SELECT * FROM T2_n27; set hive.auto.convert.join=false; set mapred.reduce.tasks=2; set hive.join.emit.interval=100; -SELECT T1.key FROM T1 LEFT SEMI JOIN (SELECT key FROM T2 SORT BY key) tmp ON (T1.key=tmp.key); +SELECT T1_n43.key FROM T1_n43 LEFT SEMI JOIN (SELECT key FROM T2_n27 SORT BY key) tmp ON (T1_n43.key=tmp.key); set hive.join.emit.interval=1; -SELECT T1.key FROM T1 LEFT SEMI JOIN (SELECT key FROM T2 SORT BY key) tmp ON (T1.key=tmp.key); +SELECT T1_n43.key FROM T1_n43 LEFT SEMI JOIN (SELECT key FROM T2_n27 SORT BY key) tmp ON (T1_n43.key=tmp.key); diff --git a/ql/src/test/queries/clientpositive/lineage2.q b/ql/src/test/queries/clientpositive/lineage2.q index 4299e7478c..a3a98a03c8 100644 --- a/ql/src/test/queries/clientpositive/lineage2.q +++ b/ql/src/test/queries/clientpositive/lineage2.q @@ -1,54 +1,54 @@ ---! qt:dataset:src1 ---! qt:dataset:src +--! qt_n10:dataset_n10:src1 +--! qt_n10:dataset_n10:src SET hive.vectorized.execution.enabled=false; -set hive.mapred.mode=nonstrict; -set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.LineageLogger; +set_n10 hive.mapred.mode=nonstrict_n10; +set_n10 hive.exec.post_n10.hooks=org.apache.hadoop.hive.ql.hooks.LineageLogger; drop table if exists src2; -create table src2 as select key key2, value value2 from src1; - -select * from src1 where key is not null and value is not null limit 3; -select * from src1 where key > 10 and value > 'val' order by key limit 5; - -drop table if exists dest1; -create table dest1 as select * from src1; -insert into table dest1 select * from src2; - -select key k, dest1.value from dest1; -select key from src1 union select key2 from src2 order by key; -select key k from src1 union select key2 from src2 order by k; - -select key, count(1) a from dest1 group by key; -select key k, count(*) from dest1 group by key; -select key k, count(value) from dest1 group by key; -select value, max(length(key)) from dest1 group by value; -select value, max(length(key)) from dest1 group by value order by value limit 5; - -select key, length(value) from dest1; -select length(value) + 3 from dest1; -select 5 from dest1; -select 3 * 5 from dest1; - -drop table if exists dest2; -create table dest2 as select * from src1 JOIN src2 ON src1.key = src2.key2; -insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2; -insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2; -insert into table dest2 - select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1; - -select * from src1 where length(key) > 2; -select * from src1 where length(key) > 2 and value > 'a'; - -drop table if exists dest3; -create table dest3 as - select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 1; -insert overwrite table dest2 - select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3; - -drop table if exists dest_l1; -CREATE TABLE dest_l1(key INT, value STRING) STORED AS TEXTFILE; - -INSERT OVERWRITE TABLE dest_l1 +create table src2 as select_n10 key key2, value value2 from src1; + +select_n10 * from src1 where key is not_n10 null and value is not_n10 null limit_n10 3; +select_n10 * from src1 where key > 10 and value > 'val' order by key limit_n10 5; + +drop table if exists dest1_n56; +create table dest1_n56 as select_n10 * from src1; +insert_n10 into table dest1_n56 select_n10 * from src2; + +select_n10 key k, dest1_n56.value from dest1_n56; +select_n10 key from src1 union select_n10 key2 from src2 order by key; +select_n10 key k from src1 union select_n10 key2 from src2 order by k; + +select_n10 key, count_n10(1) a from dest1_n56 group by key; +select_n10 key k, count_n10(*) from dest1_n56 group by key; +select_n10 key k, count_n10(value) from dest1_n56 group by key; +select_n10 value, max(length(key)) from dest1_n56 group by value; +select_n10 value, max(length(key)) from dest1_n56 group by value order by value limit_n10 5; + +select_n10 key, length(value) from dest1_n56; +select_n10 length(value) + 3 from dest1_n56; +select_n10 5 from dest1_n56; +select_n10 3 * 5 from dest1_n56; + +drop table if exists dest2_n11; +create table dest2_n11 as select_n10 * from src1 JOIN src2 ON src1.key = src2.key2; +insert_n10 overwrite table dest2_n11 select_n10 * from src1 JOIN src2 ON src1.key = src2.key2; +insert_n10 into table dest2_n11 select_n10 * from src1 JOIN src2 ON src1.key = src2.key2; +insert_n10 into table dest2_n11 + select_n10 * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1; + +select_n10 * from src1 where length(key) > 2; +select_n10 * from src1 where length(key) > 2 and value > 'a'; + +drop table if exists dest3_n0; +create table dest3_n0 as + select_n10 * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 1; +insert_n10 overwrite table dest2_n11 + select_n10 * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3; + +drop table if exists dest_l1_n0; +CREATE TABLE dest_l1_n0(key INT, value STRING) STORED AS TEXTFILE; + +INSERT OVERWRITE TABLE dest_l1_n0 SELECT j.* FROM (SELECT t1.key, p1.value FROM src1 t1 @@ -61,16 +61,16 @@ FROM (SELECT t1.key, p1.value ON (t2.key = p2.key)) j; drop table if exists emp; -drop table if exists dept; -drop table if exists project; -drop table if exists tgt; -create table emp(emp_id int, name string, mgr_id int, dept_id int); -create table dept(dept_id int, dept_name string); -create table project(project_id int, project_name string); -create table tgt(dept_name string, name string, - emp_id int, mgr_id int, proj_id int, proj_name string); - -INSERT INTO TABLE tgt +drop table if exists dept_n10; +drop table if exists project_n10; +drop table if exists tgt_n10; +create table emp(emp_id int_n10, name string, mgr_id int_n10, dept_id int_n10); +create table dept_n10(dept_id int_n10, dept_name string); +create table project_n10(project_id int_n10, project_name string); +create table tgt_n10(dept_name string, name string, + emp_id int_n10, mgr_id int_n10, proj_id int_n10, proj_name string); + +INSERT INTO TABLE tgt_n10 SELECT emd.dept_name, emd.name, emd.emp_id, emd.mgr_id, p.project_id, p.project_name FROM ( SELECT d.dept_name, em.name, em.emp_id, em.mgr_id, em.dept_id @@ -78,43 +78,43 @@ FROM ( SELECT e.name, e.dept_id, e.emp_id emp_id, m.emp_id mgr_id FROM emp e JOIN emp m ON e.emp_id = m.emp_id ) em - JOIN dept d ON d.dept_id = em.dept_id - ) emd JOIN project p ON emd.dept_id = p.project_id; + JOIN dept_n10 d ON d.dept_id = em.dept_id + ) emd JOIN project_n10 p ON emd.dept_id = p.project_id; drop table if exists dest_l2; -create table dest_l2 (id int, c1 tinyint, c2 int, c3 bigint) stored as textfile; -insert into dest_l2 values(0, 1, 100, 10000); +create table dest_l2 (id int_n10, c1 tinyint_n10, c2 int_n10, c3 bigint_n10) stored as textfile; +insert_n10 into dest_l2 values(0, 1, 100, 10000); -select * from ( - select c1 + c2 x from dest_l2 +select_n10 * from ( + select_n10 c1 + c2 x from dest_l2 union all - select sum(c3) y from (select c3 from dest_l2) v1) v2 order by x; + select_n10 sum(c3) y from (select_n10 c3 from dest_l2) v1) v2 order by x; drop table if exists dest_l3; -create table dest_l3 (id int, c1 string, c2 string, c3 int) stored as textfile; -insert into dest_l3 values(0, "s1", "s2", 15); +create table dest_l3 (id int_n10, c1 string, c2 string, c3 int_n10) stored as textfile; +insert_n10 into dest_l3 values(0, "s1", "s2", 15); -select sum(a.c1) over (partition by a.c1 order by a.id) +select_n10 sum(a.c1) over (partition by a.c1 order by a.id) from dest_l2 a where a.c2 != 10 group by a.c1, a.c2, a.id -having count(a.c2) > 0; +having count_n10(a.c2) > 0; -select sum(a.c1), count(b.c1), b.c2, b.c3 +select_n10 sum(a.c1), count_n10(b.c1), b.c2, b.c3 from dest_l2 a join dest_l3 b on (a.id = b.id) where a.c2 != 10 and b.c3 > 0 group by a.c1, a.c2, a.id, b.c1, b.c2, b.c3 -having count(a.c2) > 0 -order by b.c3 limit 5; +having count_n10(a.c2) > 0 +order by b.c3 limit_n10 5; -drop table if exists t; -create table t as -select distinct a.c2, a.c3 from dest_l2 a +drop table if exists t_n10; +create table t_n10 as +select_n10 distinct_n10 a.c2, a.c3 from dest_l2 a inner join dest_l3 b on (a.id = b.id) where a.id > 0 and b.c3 = 15; -SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)), -concat(substr(src1.key,1,1),sum(substr(src1.value,5))) +SELECT substr(src1.key,1,1), count_n10(DISTINCT substr(src1.value,5)), +concat_n10(substr(src1.key,1,1),sum(substr(src1.value,5))) from src1 GROUP BY substr(src1.key,1,1); @@ -128,10 +128,10 @@ create table rels_exploded (identity char(32), type string, ep1_src_type string, ep1_type string, ep2_src_type string, ep2_type string, ep1_id char(32), ep2_id char(32)); -select identity, ep1_id from relations +select_n10 identity, ep1_id from relations lateral view explode(ep1_ids) nav_rel as ep1_id; -insert into rels_exploded select identity, type, +insert_n10 into rels_exploded select_n10 identity, type, ep1_src_type, ep1_type, ep2_src_type, ep2_type, ep1_id, ep2_id from relations lateral view explode(ep1_ids) rel1 as ep1_id lateral view explode (ep2_ids) rel2 as ep2_id; diff --git a/ql/src/test/queries/clientpositive/lineage3.q b/ql/src/test/queries/clientpositive/lineage3.q index 86c40f06b7..5eb0c07f21 100644 --- a/ql/src/test/queries/clientpositive/lineage3.q +++ b/ql/src/test/queries/clientpositive/lineage3.q @@ -1,97 +1,97 @@ ---! qt:dataset:src1 ---! qt:dataset:alltypesorc +--! qt_n20:dataset_n20:src1 +--! qt_n20:dataset_n20:alltypesorc SET hive.vectorized.execution.enabled=false; -set hive.mapred.mode=nonstrict; -set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.LineageLogger; -set hive.metastore.disallow.incompatible.col.type.changes=false; +set_n20 hive.mapred.mode=nonstrict_n20; +set_n20 hive.exec.post_n20.hooks=org.apache.hadoop.hive.ql.hooks.LineageLogger; +set_n20 hive.metastore.disallow.incompatible.col.type.changes=false; drop table if exists d1; -create table d1(a int); +create table d1(a int_n20); -from (select a.ctinyint x, b.cstring1 y -from alltypesorc a join alltypesorc b on a.cint = b.cbigint) t -insert into table d1 select x + length(y); +from (select_n20 a.ctinyint_n20 x, b.cstring1 y +from alltypesorc a join alltypesorc b on a.cint_n20 = b.cbigint_n20) t_n20 +insert_n20 into table d1 select_n20 x + length(y); drop table if exists d2; create table d2(b varchar(128)); -from (select a.ctinyint x, b.cstring1 y -from alltypesorc a join alltypesorc b on a.cint = b.cbigint) t -insert into table d1 select x where y is null -insert into table d2 select y where x > 0; +from (select_n20 a.ctinyint_n20 x, b.cstring1 y +from alltypesorc a join alltypesorc b on a.cint_n20 = b.cbigint_n20) t_n20 +insert_n20 into table d1 select_n20 x where y is null +insert_n20 into table d2 select_n20 y where x > 0; -drop table if exists t; -create table t as -select * from - (select * from - (select key from src1 limit 1) v1) v2; +drop table if exists t_n20; +create table t_n20 as +select_n20 * from + (select_n20 * from + (select_n20 key from src1 limit_n20 1) v1) v2; -drop table if exists dest_l1; -create table dest_l1(a int, b varchar(128)) +drop table if exists dest_l1_n2; +create table dest_l1_n2(a int_n20, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets; -insert into table dest_l1 partition (ds='today') -select cint, cast(cstring1 as varchar(128)) as cs +insert_n20 into table dest_l1_n2 partition (ds='today') +select_n20 cint_n20, cast_n20(cstring1 as varchar(128)) as cs from alltypesorc -where cint is not null and cint < 0 order by cint, cs limit 5; +where cint_n20 is not_n20 null and cint_n20 < 0 order by cint_n20, cs limit_n20 5; -insert into table dest_l1 partition (ds='tomorrow') -select min(cint), cast(min(cstring1) as varchar(128)) as cs +insert_n20 into table dest_l1_n2 partition (ds='tomorrow') +select_n20 min(cint_n20), cast_n20(min(cstring1) as varchar(128)) as cs from alltypesorc -where cint is not null and cboolean1 = true -group by csmallint -having min(cbigint) > 10; +where cint_n20 is not_n20 null and cboolean1 = true +group by csmallint_n20 +having min(cbigint_n20) > 10; -select cint, rank() over(order by cint) from alltypesorc -where cint > 10 and cint < 10000 limit 10; +select_n20 cint_n20, rank() over(order by cint_n20) from alltypesorc +where cint_n20 > 10 and cint_n20 < 10000 limit_n20 10; -select a.ctinyint, a.cint, count(a.cdouble) - over(partition by a.ctinyint order by a.cint desc +select_n20 a.ctinyint_n20, a.cint_n20, count_n20(a.cdouble) + over(partition by a.ctinyint_n20 order by a.cint_n20 desc rows between 1 preceding and 1 following) -from alltypesorc a inner join alltypesorc b on a.cint = b.cbigint -order by a.ctinyint, a.cint; +from alltypesorc a inner join alltypesorc b on a.cint_n20 = b.cbigint_n20 +order by a.ctinyint_n20, a.cint_n20; with v2 as - (select cdouble, count(cint) over() a, - sum(cint + cbigint) over(partition by cboolean1) b - from (select * from alltypesorc) v1) -select cdouble, a, b, a + b, cdouble + a from v2 -where cdouble is not null -order by cdouble, a, b limit 5; - -select a.cbigint, a.ctinyint, b.cint, b.ctinyint + (select_n20 cdouble, count_n20(cint_n20) over() a, + sum(cint_n20 + cbigint_n20) over(partition by cboolean1) b + from (select_n20 * from alltypesorc) v1) +select_n20 cdouble, a, b, a + b, cdouble + a from v2 +where cdouble is not_n20 null +order by cdouble, a, b limit_n20 5; + +select_n20 a.cbigint_n20, a.ctinyint_n20, b.cint_n20, b.ctinyint_n20 from - (select ctinyint, cbigint from alltypesorc + (select_n20 ctinyint_n20, cbigint_n20 from alltypesorc union all - select ctinyint, cbigint from alltypesorc) a + select_n20 ctinyint_n20, cbigint_n20 from alltypesorc) a inner join alltypesorc b - on (a.ctinyint = b.ctinyint) -where b.ctinyint < 100 and a.cbigint is not null and b.cint is not null -order by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5; + on (a.ctinyint_n20 = b.ctinyint_n20) +where b.ctinyint_n20 < 100 and a.cbigint_n20 is not_n20 null and b.cint_n20 is not_n20 null +order by a.cbigint_n20, a.ctinyint_n20, b.cint_n20, b.ctinyint_n20 limit_n20 5; -select x.ctinyint, x.cint, c.cbigint-100, c.cstring1 +select_n20 x.ctinyint_n20, x.cint_n20, c.cbigint_n20-100, c.cstring1 from alltypesorc c join ( - select a.ctinyint ctinyint, b.cint cint - from (select * from alltypesorc a where cboolean1=false) a - join alltypesorc b on (a.cint = b.cbigint - 224870380) - ) x on (x.cint = c.cint) -where x.ctinyint > 10 -and x.cint < 4.5 -and x.ctinyint + length(c.cstring2) < 1000; - -select c1, x2, x3 + select_n20 a.ctinyint_n20 ctinyint_n20, b.cint_n20 cint_n20 + from (select_n20 * from alltypesorc a where cboolean1=false) a + join alltypesorc b on (a.cint_n20 = b.cbigint_n20 - 224870380) + ) x on (x.cint_n20 = c.cint_n20) +where x.ctinyint_n20 > 10 +and x.cint_n20 < 4.5 +and x.ctinyint_n20 + length(c.cstring2) < 1000; + +select_n20 c1, x2, x3 from ( - select c1, min(c2) x2, sum(c3) x3 + select_n20 c1, min(c2) x2, sum(c3) x3 from ( - select c1, c2, c3 + select_n20 c1, c2, c3 from ( - select cint c1, ctinyint c2, min(cbigint) c3 + select_n20 cint_n20 c1, ctinyint_n20 c2, min(cbigint_n20) c3 from alltypesorc - where cint is not null - group by cint, ctinyint - order by cint, ctinyint - limit 5 + where cint_n20 is not_n20 null + group by cint_n20, ctinyint_n20 + order by cint_n20, ctinyint_n20 + limit_n20 5 ) x ) x2 group by c1 @@ -99,53 +99,53 @@ from ( where x2 > 0 order by x2, c1 desc; -select key, value from src1 -where key in (select key+18 from src1) order by key; +select_n20 key, value from src1 +where key in (select_n20 key+18 from src1) order by key; -select * from src1 a +select_n20 * from src1 a where exists - (select cint from alltypesorc b - where a.key = b.ctinyint + 300) + (select_n20 cint_n20 from alltypesorc b + where a.key = b.ctinyint_n20 + 300) and key > 300; -select key, value from src1 -where key not in (select key+18 from src1) order by key; +select_n20 key, value from src1 +where key not_n20 in (select_n20 key+18 from src1) order by key; -select * from src1 a -where not exists - (select cint from alltypesorc b - where a.key = b.ctinyint + 300) +select_n20 * from src1 a +where not_n20 exists + (select_n20 cint_n20 from alltypesorc b + where a.key = b.ctinyint_n20 + 300) and key > 300; -with t as (select key x, value y from src1 where key > '2') -select x, y from t where y > 'v' order by x, y limit 5; +with t_n20 as (select_n20 key x, value y from src1 where key > '2') +select_n20 x, y from t_n20 where y > 'v' order by x, y limit_n20 5; -from (select key x, value y from src1 where key > '2') t -select x, y where y > 'v' order by x, y limit 5; +from (select_n20 key x, value y from src1 where key > '2') t_n20 +select_n20 x, y where y > 'v' order by x, y limit_n20 5; drop view if exists dest_v1; create view dest_v1 as - select ctinyint, cint from alltypesorc where ctinyint is not null; + select_n20 ctinyint_n20, cint_n20 from alltypesorc where ctinyint_n20 is not_n20 null; -select * from dest_v1 order by ctinyint, cint limit 2; +select_n20 * from dest_v1 order by ctinyint_n20, cint_n20 limit_n20 2; -alter view dest_v1 as select ctinyint from alltypesorc; +alter view dest_v1 as select_n20 ctinyint_n20 from alltypesorc; -select t.ctinyint from (select * from dest_v1 where ctinyint is not null) t -where ctinyint > 10 order by ctinyint limit 2; +select_n20 t.ctinyint_n20 from (select_n20 * from dest_v1 where ctinyint_n20 is not_n20 null) t_n20 +where ctinyint_n20 > 10 order by ctinyint_n20 limit_n20 2; drop view if exists dest_v2; -create view dest_v2 (a, b) as select c1, x2 +create view dest_v2 (a, b) as select_n20 c1, x2 from ( - select c1, min(c2) x2 + select_n20 c1, min(c2) x2 from ( - select c1, c2, c3 + select_n20 c1, c2, c3 from ( - select cint c1, ctinyint c2, min(cfloat) c3 + select_n20 cint_n20 c1, ctinyint_n20 c2, min(cfloat_n20) c3 from alltypesorc - group by cint, ctinyint - order by cint, ctinyint - limit 1 + group by cint_n20, ctinyint_n20 + order by cint_n20, ctinyint_n20 + limit_n20 1 ) x ) x2 group by c1 @@ -154,56 +154,56 @@ order by x2,c1 desc; drop view if exists dest_v3; create view dest_v3 (a1, a2, a3, a4, a5, a6, a7) as - select x.csmallint, x.cbigint bint1, x.ctinyint, c.cbigint bint2, x.cint, x.cfloat, c.cstring1 + select_n20 x.csmallint_n20, x.cbigint_n20 bint1, x.ctinyint_n20, c.cbigint_n20 bint2, x.cint_n20, x.cfloat_n20, c.cstring1 from alltypesorc c join ( - select a.csmallint csmallint, a.ctinyint ctinyint, a.cstring2 cstring2, - a.cint cint, a.cstring1 ctring1, b.cfloat cfloat, b.cbigint cbigint - from ( select * from alltypesorc a where cboolean1=true ) a - join alltypesorc b on (a.csmallint = b.cint) - ) x on (x.ctinyint = c.cbigint) - where x.csmallint=11 - and x.cint > 899 - and x.cfloat > 4.5 + select_n20 a.csmallint_n20 csmallint_n20, a.ctinyint_n20 ctinyint_n20, a.cstring2 cstring2, + a.cint_n20 cint_n20, a.cstring1 ctring1, b.cfloat_n20 cfloat_n20, b.cbigint_n20 cbigint_n20 + from ( select_n20 * from alltypesorc a where cboolean1=true ) a + join alltypesorc b on (a.csmallint_n20 = b.cint_n20) + ) x on (x.ctinyint_n20 = c.cbigint_n20) + where x.csmallint_n20=11 + and x.cint_n20 > 899 + and x.cfloat_n20 > 4.5 and c.cstring1 < '7' - and x.cint + x.cfloat + length(c.cstring1) < 1000; + and x.cint_n20 + x.cfloat_n20 + length(c.cstring1) < 1000; alter view dest_v3 as - select * from ( - select sum(a.ctinyint) over (partition by a.csmallint order by a.csmallint) a, - count(b.cstring1) x, b.cboolean1 - from alltypesorc a join alltypesorc b on (a.cint = b.cint) - where a.cboolean2 = true and b.cfloat > 0 - group by a.ctinyint, a.csmallint, b.cboolean1 - having count(a.cint) > 10 - order by a, x, b.cboolean1 limit 10) t; + select_n20 * from ( + select_n20 sum(a.ctinyint_n20) over (partition by a.csmallint_n20 order by a.csmallint_n20) a, + count_n20(b.cstring1) x, b.cboolean1 + from alltypesorc a join alltypesorc b on (a.cint_n20 = b.cint_n20) + where a.cboolean2 = true and b.cfloat_n20 > 0 + group by a.ctinyint_n20, a.csmallint_n20, b.cboolean1 + having count_n20(a.cint_n20) > 10 + order by a, x, b.cboolean1 limit_n20 10) t_n20; -select * from dest_v3 limit 2; +select_n20 * from dest_v3 limit_n20 2; drop table if exists src_dp; -create table src_dp (first string, word string, year int, month int, day int); +create table src_dp (first_n20 string, word string, year int_n20, month int_n20, day int_n20); drop table if exists dest_dp1; -create table dest_dp1 (first string, word string) partitioned by (year int); +create table dest_dp1 (first_n20 string, word string) partitioned by (year int_n20); drop table if exists dest_dp2; -create table dest_dp2 (first string, word string) partitioned by (y int, m int); +create table dest_dp2 (first_n20 string, word string) partitioned by (y int_n20, m int_n20); drop table if exists dest_dp3; -create table dest_dp3 (first string, word string) partitioned by (y int, m int, d int); +create table dest_dp3 (first_n20 string, word string) partitioned by (y int_n20, m int_n20, d int_n20); -set hive.exec.dynamic.partition.mode=nonstrict; +set_n20 hive.exec.dynamic.partition.mode=nonstrict_n20; -insert into dest_dp1 partition (year) select first, word, year from src_dp; -insert into dest_dp2 partition (y, m) select first, word, year, month from src_dp; -insert into dest_dp2 partition (y=0, m) select first, word, month from src_dp where year=0; -insert into dest_dp3 partition (y=0, m, d) select first, word, month m, day d from src_dp where year=0; +insert_n20 into dest_dp1 partition (year) select_n20 first_n20, word, year from src_dp; +insert_n20 into dest_dp2 partition (y, m) select_n20 first_n20, word, year, month from src_dp; +insert_n20 into dest_dp2 partition (y=0, m) select_n20 first_n20, word, month from src_dp where year=0; +insert_n20 into dest_dp3 partition (y=0, m, d) select_n20 first_n20, word, month m, day d from src_dp where year=0; drop table if exists src_dp1; -create table src_dp1 (f string, w string, m int); +create table src_dp1 (f string, w string, m int_n20); from src_dp, src_dp1 -insert into dest_dp1 partition (year) select first, word, year -insert into dest_dp2 partition (y, m) select first, word, year, month -insert into dest_dp3 partition (y=2, m, d) select first, word, month m, day d where year=2 -insert into dest_dp2 partition (y=1, m) select f, w, m -insert into dest_dp1 partition (year=0) select f, w; +insert_n20 into dest_dp1 partition (year) select_n20 first_n20, word, year +insert_n20 into dest_dp2 partition (y, m) select_n20 first_n20, word, year, month +insert_n20 into dest_dp3 partition (y=2, m, d) select_n20 first_n20, word, month m, day d where year=2 +insert_n20 into dest_dp2 partition (y=1, m) select_n20 f, w, m +insert_n20 into dest_dp1 partition (year=0) select_n20 f, w; -reset hive.metastore.disallow.incompatible.col.type.changes; +reset_n20 hive.metastore.disallow.incompatible.col.type.changes; diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_1.q b/ql/src/test/queries/clientpositive/list_bucket_dml_1.q index 7196d6adc1..23e303fe5e 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_dml_1.q +++ b/ql/src/test/queries/clientpositive/list_bucket_dml_1.q @@ -12,7 +12,7 @@ set mapred.input.dir.recursive=true; -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) -- create a skewed table -create table list_bucketing_dynamic_part (key String, value String) +create table list_bucketing_dynamic_part_n0 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES @@ -20,22 +20,22 @@ stored as DIRECTORIES -- list bucketing DML explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'; -insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'; +insert overwrite table list_bucketing_dynamic_part_n0 partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'; +insert overwrite table list_bucketing_dynamic_part_n0 partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'; -- check DML result -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11'); -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='12'); +desc formatted list_bucketing_dynamic_part_n0 partition (ds='2008-04-08', hr='11'); +desc formatted list_bucketing_dynamic_part_n0 partition (ds='2008-04-08', hr='12'); select count(1) from srcpart where ds='2008-04-08'; -select count(1) from list_bucketing_dynamic_part where ds='2008-04-08'; +select count(1) from list_bucketing_dynamic_part_n0 where ds='2008-04-08'; select key, value from srcpart where ds='2008-04-08' and hr='11' and key = "484"; set hive.optimize.listbucketing=true; explain extended -select key, value from list_bucketing_dynamic_part where ds='2008-04-08' and hr='11' and key = "484"; -select key, value from list_bucketing_dynamic_part where ds='2008-04-08' and hr='11' and key = "484"; +select key, value from list_bucketing_dynamic_part_n0 where ds='2008-04-08' and hr='11' and key = "484"; +select key, value from list_bucketing_dynamic_part_n0 where ds='2008-04-08' and hr='11' and key = "484"; -- clean up resources -drop table list_bucketing_dynamic_part; +drop table list_bucketing_dynamic_part_n0; diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_11.q b/ql/src/test/queries/clientpositive/list_bucket_dml_11.q index 7ebf856fbc..e0acf2aeec 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_dml_11.q +++ b/ql/src/test/queries/clientpositive/list_bucket_dml_11.q @@ -11,7 +11,7 @@ set hive.merge.mapredfiles=false; -- list bucketing DML: static partition. multiple skewed columns. -- create a skewed table -create table list_bucketing_static_part (key String, value String) +create table list_bucketing_static_part_n3 (key String, value String) partitioned by (ds String, hr String) skewed by (value) on ('val_466','val_287','val_82') stored as DIRECTORIES @@ -19,19 +19,19 @@ create table list_bucketing_static_part (key String, value String) -- list bucketing DML without merge. use bucketize to generate a few small files. explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n3 partition (ds = '2008-04-08', hr = '11') select key, value from src; -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n3 partition (ds = '2008-04-08', hr = '11') select key, value from src; -- check DML result -show partitions list_bucketing_static_part; -desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11'); +show partitions list_bucketing_static_part_n3; +desc formatted list_bucketing_static_part_n3 partition (ds='2008-04-08', hr='11'); set hive.optimize.listbucketing=true; explain extended -select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466"; -select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466"; +select key, value from list_bucketing_static_part_n3 where ds='2008-04-08' and hr='11' and value = "val_466"; +select key, value from list_bucketing_static_part_n3 where ds='2008-04-08' and hr='11' and value = "val_466"; -drop table list_bucketing_static_part; +drop table list_bucketing_static_part_n3; diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_12.q b/ql/src/test/queries/clientpositive/list_bucket_dml_12.q index daaffbf04f..d81355aa9b 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_dml_12.q +++ b/ql/src/test/queries/clientpositive/list_bucket_dml_12.q @@ -10,7 +10,7 @@ set hive.merge.mapredfiles=false; -- SORT_QUERY_RESULTS -- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns -create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) +create table list_bucketing_mul_col_n0 (col1 String, col2 String, col3 String, col4 String, col5 string) partitioned by (ds String, hr String) skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82')) stored as DIRECTORIES @@ -18,27 +18,27 @@ create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 -- list bucketing DML explain extended -insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_mul_col_n0 partition (ds = '2008-04-08', hr = '11') select 1, key, 1, value, 1 from src; -insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_mul_col_n0 partition (ds = '2008-04-08', hr = '11') select 1, key, 1, value, 1 from src; -- check DML result -show partitions list_bucketing_mul_col; -desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='11'); +show partitions list_bucketing_mul_col_n0; +desc formatted list_bucketing_mul_col_n0 partition (ds='2008-04-08', hr='11'); set hive.optimize.listbucketing=true; explain extended -select * from list_bucketing_mul_col +select * from list_bucketing_mul_col_n0 where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"; -select * from list_bucketing_mul_col +select * from list_bucketing_mul_col_n0 where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"; explain extended -select * from list_bucketing_mul_col +select * from list_bucketing_mul_col_n0 where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"; -select * from list_bucketing_mul_col +select * from list_bucketing_mul_col_n0 where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"; -drop table list_bucketing_mul_col; +drop table list_bucketing_mul_col_n0; diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_2.q b/ql/src/test/queries/clientpositive/list_bucket_dml_2.q index 948eabe4ad..b80e51d013 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_dml_2.q +++ b/ql/src/test/queries/clientpositive/list_bucket_dml_2.q @@ -24,7 +24,7 @@ set hive.stats.reliable=true; -- 87 000001_0 -- create a skewed table -create table list_bucketing_static_part (key String, value String) +create table list_bucketing_static_part_n4 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES @@ -32,42 +32,42 @@ create table list_bucketing_static_part (key String, value String) -- list bucketing DML without merge. use bucketize to generate a few small files. explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n4 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08'; -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n4 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08'; -- check DML result -show partitions list_bucketing_static_part; -desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11'); +show partitions list_bucketing_static_part_n4; +desc formatted list_bucketing_static_part_n4 partition (ds='2008-04-08', hr='11'); select count(1) from srcpart where ds = '2008-04-08'; -select count(*) from list_bucketing_static_part; +select count(*) from list_bucketing_static_part_n4; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.optimize.listbucketing=true; explain extended -select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484'; -select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484'; +select * from list_bucketing_static_part_n4 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484'; +select * from list_bucketing_static_part_n4 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484'; select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'; -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none -- but query should succeed for 51 or 51 and val_14 select * from srcpart where ds = '2008-04-08' and key = '51'; -select * from list_bucketing_static_part where key = '51'; +select * from list_bucketing_static_part_n4 where key = '51'; select * from srcpart where ds = '2008-04-08' and key = '51' and value = 'val_14'; -select * from list_bucketing_static_part where key = '51' and value = 'val_14'; +select * from list_bucketing_static_part_n4 where key = '51' and value = 'val_14'; -- queries with < <= > >= should work for skewed test although we don't benefit from pruning select count(1) from srcpart where ds = '2008-04-08' and key < '51'; -select count(1) from list_bucketing_static_part where key < '51'; +select count(1) from list_bucketing_static_part_n4 where key < '51'; select count(1) from srcpart where ds = '2008-04-08' and key <= '51'; -select count(1) from list_bucketing_static_part where key <= '51'; +select count(1) from list_bucketing_static_part_n4 where key <= '51'; select count(1) from srcpart where ds = '2008-04-08' and key > '51'; -select count(1) from list_bucketing_static_part where key > '51'; +select count(1) from list_bucketing_static_part_n4 where key > '51'; select count(1) from srcpart where ds = '2008-04-08' and key >= '51'; -select count(1) from list_bucketing_static_part where key >= '51'; +select count(1) from list_bucketing_static_part_n4 where key >= '51'; -- clean up -drop table list_bucketing_static_part; +drop table list_bucketing_static_part_n4; diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_3.q b/ql/src/test/queries/clientpositive/list_bucket_dml_3.q index e66861ec39..08c8ce2ff0 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_dml_3.q +++ b/ql/src/test/queries/clientpositive/list_bucket_dml_3.q @@ -12,23 +12,23 @@ set mapred.input.dir.recursive=true; -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) -- create a skewed table -create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES; +create table list_bucketing_static_part_n1 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES; -- list bucketing DML explain extended -insert overwrite table list_bucketing_static_part partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08'; -insert overwrite table list_bucketing_static_part partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08'; +insert overwrite table list_bucketing_static_part_n1 partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08'; +insert overwrite table list_bucketing_static_part_n1 partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08'; -- check DML result -desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11'); +desc formatted list_bucketing_static_part_n1 partition (ds='2008-04-08', hr='11'); select count(1) from srcpart where ds='2008-04-08'; -select count(1) from list_bucketing_static_part where ds='2008-04-08'; +select count(1) from list_bucketing_static_part_n1 where ds='2008-04-08'; select key, value from srcpart where ds='2008-04-08' and hr='11' and key = "484"; set hive.optimize.listbucketing=true; explain extended -select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and key = "484"; -select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and key = "484"; +select key, value from list_bucketing_static_part_n1 where ds='2008-04-08' and hr='11' and key = "484"; +select key, value from list_bucketing_static_part_n1 where ds='2008-04-08' and hr='11' and key = "484"; -- clean up resources -drop table list_bucketing_static_part; +drop table list_bucketing_static_part_n1; diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_4.q b/ql/src/test/queries/clientpositive/list_bucket_dml_4.q index c19ebe7417..a13915e18b 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_dml_4.q +++ b/ql/src/test/queries/clientpositive/list_bucket_dml_4.q @@ -27,7 +27,7 @@ set hive.merge.mapredfiles=false; -- 118 000001_0 -- create a skewed table -create table list_bucketing_static_part (key String, value String) +create table list_bucketing_static_part_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES @@ -35,39 +35,39 @@ create table list_bucketing_static_part (key String, value String) -- list bucketing DML without merge. use bucketize to generate a few small files. explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n2 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08'; -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n2 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08'; -- check DML result -show partitions list_bucketing_static_part; -desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11'); +show partitions list_bucketing_static_part_n2; +desc formatted list_bucketing_static_part_n2 partition (ds='2008-04-08', hr='11'); set hive.merge.mapfiles=true; set hive.merge.mapredfiles=true; -- list bucketing DML with merge. use bucketize to generate a few small files. explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n2 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08'; -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n2 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08'; -- check DML result -show partitions list_bucketing_static_part; -desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11'); +show partitions list_bucketing_static_part_n2; +desc formatted list_bucketing_static_part_n2 partition (ds='2008-04-08', hr='11'); select count(1) from srcpart where ds = '2008-04-08'; -select count(*) from list_bucketing_static_part; +select count(*) from list_bucketing_static_part_n2; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.optimize.listbucketing=true; explain extended -select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484'; -select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484'; +select * from list_bucketing_static_part_n2 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484'; +select * from list_bucketing_static_part_n2 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484'; select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'; -- clean up -drop table list_bucketing_static_part; +drop table list_bucketing_static_part_n2; diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_5.q b/ql/src/test/queries/clientpositive/list_bucket_dml_5.q index bd6a5cb728..bbfb317d10 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_dml_5.q +++ b/ql/src/test/queries/clientpositive/list_bucket_dml_5.q @@ -13,28 +13,28 @@ set mapred.input.dir.recursive=true; -- SORT_QUERY_RESULTS -- create a skewed table -create table list_bucketing_dynamic_part (key String, value String) +create table list_bucketing_dynamic_part_n1 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES; -- list bucketing DML explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'; -insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'; +insert overwrite table list_bucketing_dynamic_part_n1 partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'; +insert overwrite table list_bucketing_dynamic_part_n1 partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'; -- check DML result -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11'); -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='12'); +desc formatted list_bucketing_dynamic_part_n1 partition (ds='2008-04-08', hr='11'); +desc formatted list_bucketing_dynamic_part_n1 partition (ds='2008-04-08', hr='12'); select count(1) from srcpart where ds='2008-04-08'; -select count(1) from list_bucketing_dynamic_part where ds='2008-04-08'; +select count(1) from list_bucketing_dynamic_part_n1 where ds='2008-04-08'; select key, value from srcpart where ds='2008-04-08' and key = "103" and value ="val_103"; set hive.optimize.listbucketing=true; explain extended -select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103"; -select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103"; +select key, value, ds, hr from list_bucketing_dynamic_part_n1 where ds='2008-04-08' and key = "103" and value ="val_103"; +select key, value, ds, hr from list_bucketing_dynamic_part_n1 where ds='2008-04-08' and key = "103" and value ="val_103"; -- clean up resources -drop table list_bucketing_dynamic_part; +drop table list_bucketing_dynamic_part_n1; diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_6.q b/ql/src/test/queries/clientpositive/list_bucket_dml_6.q index 6cb29cb2a3..b9a526b5ef 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_dml_6.q +++ b/ql/src/test/queries/clientpositive/list_bucket_dml_6.q @@ -50,7 +50,7 @@ set hive.merge.mapredfiles=false; -- SORT_QUERY_RESULTS -- create a skewed table -create table list_bucketing_dynamic_part (key String, value String) +create table list_bucketing_dynamic_part_n3 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES @@ -58,42 +58,42 @@ create table list_bucketing_dynamic_part (key String, value String) -- list bucketing DML without merge. use bucketize to generate a few small files. explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +insert overwrite table list_bucketing_dynamic_part_n3 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'; -insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +insert overwrite table list_bucketing_dynamic_part_n3 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'; -- check DML result -show partitions list_bucketing_dynamic_part; -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1'); -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1'); +show partitions list_bucketing_dynamic_part_n3; +desc formatted list_bucketing_dynamic_part_n3 partition (ds='2008-04-08', hr='a1'); +desc formatted list_bucketing_dynamic_part_n3 partition (ds='2008-04-08', hr='b1'); set hive.merge.mapfiles=true; set hive.merge.mapredfiles=true; -- list bucketing DML with merge. use bucketize to generate a few small files. explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +insert overwrite table list_bucketing_dynamic_part_n3 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'; -insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +insert overwrite table list_bucketing_dynamic_part_n3 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'; -- check DML result -show partitions list_bucketing_dynamic_part; -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1'); -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1'); +show partitions list_bucketing_dynamic_part_n3; +desc formatted list_bucketing_dynamic_part_n3 partition (ds='2008-04-08', hr='a1'); +desc formatted list_bucketing_dynamic_part_n3 partition (ds='2008-04-08', hr='b1'); select count(1) from srcpart where ds = '2008-04-08'; -select count(*) from list_bucketing_dynamic_part; +select count(*) from list_bucketing_dynamic_part_n3; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.optimize.listbucketing=true; explain extended -select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'; -select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'; +select * from list_bucketing_dynamic_part_n3 where key = '484' and value = 'val_484'; +select * from list_bucketing_dynamic_part_n3 where key = '484' and value = 'val_484'; select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'; -- clean up -drop table list_bucketing_dynamic_part; +drop table list_bucketing_dynamic_part_n3; diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_8.q b/ql/src/test/queries/clientpositive/list_bucket_dml_8.q index f215852dcf..87f2624d4a 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_dml_8.q +++ b/ql/src/test/queries/clientpositive/list_bucket_dml_8.q @@ -51,7 +51,7 @@ set hive.merge.mapredfiles=false; -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) -- create a skewed table -create table list_bucketing_dynamic_part (key String, value String) +create table list_bucketing_dynamic_part_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES @@ -59,32 +59,32 @@ create table list_bucketing_dynamic_part (key String, value String) -- list bucketing DML without merge. use bucketize to generate a few small files. explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +insert overwrite table list_bucketing_dynamic_part_n2 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'; -insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +insert overwrite table list_bucketing_dynamic_part_n2 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'; -- check DML result -show partitions list_bucketing_dynamic_part; -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1'); -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1'); +show partitions list_bucketing_dynamic_part_n2; +desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', hr='a1'); +desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', hr='b1'); -- concatenate the partition and it will merge files -alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') concatenate; +alter table list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', hr='b1') concatenate; -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1'); +desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', hr='b1'); set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; select count(1) from srcpart where ds = '2008-04-08'; -select count(*) from list_bucketing_dynamic_part; +select count(*) from list_bucketing_dynamic_part_n2; explain extended -select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'; -select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'; +select * from list_bucketing_dynamic_part_n2 where key = '484' and value = 'val_484'; +select * from list_bucketing_dynamic_part_n2 where key = '484' and value = 'val_484'; select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484' order by hr; -- clean up -drop table list_bucketing_dynamic_part; +drop table list_bucketing_dynamic_part_n2; diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_9.q b/ql/src/test/queries/clientpositive/list_bucket_dml_9.q index b9de4307a4..e130f05e82 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_dml_9.q +++ b/ql/src/test/queries/clientpositive/list_bucket_dml_9.q @@ -27,7 +27,7 @@ set hive.merge.mapredfiles=false; -- 118 000001_0 -- create a skewed table -create table list_bucketing_static_part (key String, value String) +create table list_bucketing_static_part_n0 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','103') stored as DIRECTORIES @@ -35,39 +35,39 @@ create table list_bucketing_static_part (key String, value String) -- list bucketing DML without merge. use bucketize to generate a few small files. explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n0 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08'; -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n0 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08'; -- check DML result -show partitions list_bucketing_static_part; -desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11'); +show partitions list_bucketing_static_part_n0; +desc formatted list_bucketing_static_part_n0 partition (ds='2008-04-08', hr='11'); set hive.merge.mapfiles=true; set hive.merge.mapredfiles=true; -- list bucketing DML with merge. use bucketize to generate a few small files. explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n0 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08'; -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n0 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08'; -- check DML result -show partitions list_bucketing_static_part; -desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11'); +show partitions list_bucketing_static_part_n0; +desc formatted list_bucketing_static_part_n0 partition (ds='2008-04-08', hr='11'); select count(1) from srcpart where ds = '2008-04-08'; -select count(*) from list_bucketing_static_part; +select count(*) from list_bucketing_static_part_n0; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.optimize.listbucketing=true; explain extended -select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484'; -select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484'; +select * from list_bucketing_static_part_n0 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484'; +select * from list_bucketing_static_part_n0 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484'; select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'; -- clean up -drop table list_bucketing_static_part; +drop table list_bucketing_static_part_n0; diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q index 9af9132055..a5f5522a46 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q +++ b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q @@ -18,35 +18,35 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- 2. query result is right -- create a skewed table -create table fact_daily (key String, value String) +create table fact_daily_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES; -insert overwrite table fact_daily partition (ds = '1', hr = '4') +insert overwrite table fact_daily_n2 partition (ds = '1', hr = '4') select key, value from src; -describe formatted fact_daily PARTITION (ds = '1', hr='4'); +describe formatted fact_daily_n2 PARTITION (ds = '1', hr='4'); -SELECT count(1) FROM fact_daily WHERE ds='1' and hr='4'; +SELECT count(1) FROM fact_daily_n2 WHERE ds='1' and hr='4'; -- pruner only pick up default directory -- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484'; +explain extended SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and value= 'val_484'; -- List Bucketing Query -SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484'; +SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and value= 'val_484'; -- pruner only pick up default directory -- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key FROM fact_daily WHERE ds='1' and hr='4' and key= '406'; +explain extended SELECT key FROM fact_daily_n2 WHERE ds='1' and hr='4' and key= '406'; -- List Bucketing Query -SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and key= '406'; +SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and key= '406'; -- pruner only pick up skewed-value directory -- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')); +explain extended SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')); -- List Bucketing Query -SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')); +SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')); -- clean up -drop table fact_daily; +drop table fact_daily_n2; diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q index b68ec64ca5..4020063aea 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q +++ b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q @@ -19,40 +19,40 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- 2. query result is right -- create a skewed table -create table fact_daily (key String, value String) +create table fact_daily_n3 (key String, value String) partitioned by (ds String, hr String) ; -- partition no skew -insert overwrite table fact_daily partition (ds = '1', hr = '1') +insert overwrite table fact_daily_n3 partition (ds = '1', hr = '1') select key, value from src; -describe formatted fact_daily PARTITION (ds = '1', hr='1'); +describe formatted fact_daily_n3 PARTITION (ds = '1', hr='1'); -- partition. skewed value is 484/238 -alter table fact_daily skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES; -insert overwrite table fact_daily partition (ds = '1', hr = '2') +alter table fact_daily_n3 skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES; +insert overwrite table fact_daily_n3 partition (ds = '1', hr = '2') select key, value from src; -describe formatted fact_daily PARTITION (ds = '1', hr='2'); +describe formatted fact_daily_n3 PARTITION (ds = '1', hr='2'); -- another partition. skewed value is 327 -alter table fact_daily skewed by (key, value) on (('327','val_327')) stored as DIRECTORIES; -insert overwrite table fact_daily partition (ds = '1', hr = '3') +alter table fact_daily_n3 skewed by (key, value) on (('327','val_327')) stored as DIRECTORIES; +insert overwrite table fact_daily_n3 partition (ds = '1', hr = '3') select key, value from src; -describe formatted fact_daily PARTITION (ds = '1', hr='3'); +describe formatted fact_daily_n3 PARTITION (ds = '1', hr='3'); -- query non-skewed partition explain extended -select * from fact_daily where ds = '1' and hr='1' and key='145'; -select * from fact_daily where ds = '1' and hr='1' and key='145'; +select * from fact_daily_n3 where ds = '1' and hr='1' and key='145'; +select * from fact_daily_n3 where ds = '1' and hr='1' and key='145'; explain extended -select count(*) from fact_daily where ds = '1' and hr='1'; -select count(*) from fact_daily where ds = '1' and hr='1'; +select count(*) from fact_daily_n3 where ds = '1' and hr='1'; +select count(*) from fact_daily_n3 where ds = '1' and hr='1'; -- query skewed partition explain extended -SELECT * FROM fact_daily WHERE ds='1' and hr='2' and (key='484' and value='val_484'); -SELECT * FROM fact_daily WHERE ds='1' and hr='2' and (key='484' and value='val_484'); +SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='2' and (key='484' and value='val_484'); +SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='2' and (key='484' and value='val_484'); -- query another skewed partition explain extended -SELECT * FROM fact_daily WHERE ds='1' and hr='3' and (key='327' and value='val_327'); -SELECT * FROM fact_daily WHERE ds='1' and hr='3' and (key='327' and value='val_327'); +SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='3' and (key='327' and value='val_327'); +SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='3' and (key='327' and value='val_327'); diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q index 9032542089..54ab75eec4 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q +++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q @@ -16,19 +16,19 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- 1. pruner only pick up right directory -- 2. query result is right --- create 2 tables: fact_daily and fact_tz --- fact_daily will be used for list bucketing query --- fact_tz is a table used to prepare data and test directories -CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING); -CREATE TABLE fact_tz(x int) PARTITIONED BY (ds STRING, hr STRING) +-- create 2 tables: fact_daily_n4 and fact_tz_n1 +-- fact_daily_n4 will be used for list bucketing query +-- fact_tz_n1 is a table used to prepare data and test directories +CREATE TABLE fact_daily_n4(x int) PARTITIONED BY (ds STRING); +CREATE TABLE fact_tz_n1(x int) PARTITIONED BY (ds STRING, hr STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/fact_tz'; -- create /fact_tz/ds=1/hr=1 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +INSERT OVERWRITE TABLE fact_tz_n1 PARTITION (ds='1', hr='1') SELECT key FROM src WHERE key=484; -- create /fact_tz/ds=1/hr=2 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') +INSERT OVERWRITE TABLE fact_tz_n1 PARTITION (ds='1', hr='2') SELECT key+11 FROM src WHERE key=484; dfs -lsr ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1; @@ -36,28 +36,28 @@ dfs -mv ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/hr=1 ${hiveconf:hi dfs -mv ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/hr=2 ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME; dfs -lsr ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1; --- switch fact_daily to skewed table and point its location to /fact_tz/ds=1 -alter table fact_daily skewed by (x) on (484); -ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE'); -ALTER TABLE fact_daily ADD PARTITION (ds='1') +-- switch fact_daily_n4 to skewed table and point its location to /fact_tz/ds=1 +alter table fact_daily_n4 skewed by (x) on (484); +ALTER TABLE fact_daily_n4 SET TBLPROPERTIES('EXTERNAL'='TRUE'); +ALTER TABLE fact_daily_n4 ADD PARTITION (ds='1') LOCATION '${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1'; -- set List Bucketing location map -alter table fact_daily PARTITION (ds = '1') set skewed location (484='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=484','HIVE_DEFAULT_LIST_BUCKETING_KEY'='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME'); -describe formatted fact_daily PARTITION (ds = '1'); +alter table fact_daily_n4 PARTITION (ds = '1') set skewed location (484='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=484','HIVE_DEFAULT_LIST_BUCKETING_KEY'='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME'); +describe formatted fact_daily_n4 PARTITION (ds = '1'); -SELECT * FROM fact_daily WHERE ds='1'; +SELECT * FROM fact_daily_n4 WHERE ds='1'; -- pruner only pick up skewed-value directory -- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT x FROM fact_daily WHERE ds='1' and x=484; +explain extended SELECT x FROM fact_daily_n4 WHERE ds='1' and x=484; -- List Bucketing Query -SELECT x FROM fact_daily WHERE ds='1' and x=484; +SELECT x FROM fact_daily_n4 WHERE ds='1' and x=484; -- pruner only pick up default directory since x equal to non-skewed value -- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT x FROM fact_daily WHERE ds='1' and x=495; +explain extended SELECT x FROM fact_daily_n4 WHERE ds='1' and x=495; -- List Bucketing Query -SELECT x FROM fact_daily WHERE ds='1' and x=495; -explain extended SELECT x FROM fact_daily WHERE ds='1' and x=1; -SELECT x FROM fact_daily WHERE ds='1' and x=1; +SELECT x FROM fact_daily_n4 WHERE ds='1' and x=495; +explain extended SELECT x FROM fact_daily_n4 WHERE ds='1' and x=1; +SELECT x FROM fact_daily_n4 WHERE ds='1' and x=1; diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q index 1fa0638bf9..77974cf1df 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q +++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q @@ -17,19 +17,19 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- 1. pruner only pick up right directory -- 2. query result is right --- create 2 tables: fact_daily and fact_tz --- fact_daily will be used for list bucketing query --- fact_tz is a table used to prepare data and test directories -CREATE TABLE fact_daily(x int, y STRING) PARTITIONED BY (ds STRING); -CREATE TABLE fact_tz(x int, y STRING) PARTITIONED BY (ds STRING, hr STRING) +-- create 2 tables: fact_daily_n5 and fact_tz_n2 +-- fact_daily_n5 will be used for list bucketing query +-- fact_tz_n2 is a table used to prepare data and test directories +CREATE TABLE fact_daily_n5(x int, y STRING) PARTITIONED BY (ds STRING); +CREATE TABLE fact_tz_n2(x int, y STRING) PARTITIONED BY (ds STRING, hr STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/fact_tz'; -- create /fact_tz/ds=1/hr=1 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +INSERT OVERWRITE TABLE fact_tz_n2 PARTITION (ds='1', hr='1') SELECT key, value FROM src WHERE key=484; -- create /fact_tz/ds=1/hr=2 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') +INSERT OVERWRITE TABLE fact_tz_n2 PARTITION (ds='1', hr='2') SELECT key+11, value FROM src WHERE key=484; dfs -lsr ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1; @@ -37,39 +37,39 @@ dfs -mv ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/hr=1 ${hiveconf:hi dfs -mv ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/hr=2 ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME; dfs -lsr ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1; --- switch fact_daily to skewed table and point its location to /fact_tz/ds=1 -alter table fact_daily skewed by (x) on (484); -ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE'); -ALTER TABLE fact_daily ADD PARTITION (ds='1') +-- switch fact_daily_n5 to skewed table and point its location to /fact_tz/ds=1 +alter table fact_daily_n5 skewed by (x) on (484); +ALTER TABLE fact_daily_n5 SET TBLPROPERTIES('EXTERNAL'='TRUE'); +ALTER TABLE fact_daily_n5 ADD PARTITION (ds='1') LOCATION '${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1'; -- set List Bucketing location map -alter table fact_daily PARTITION (ds = '1') set skewed location (484='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=484','HIVE_DEFAULT_LIST_BUCKETING_KEY'='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME'); -describe formatted fact_daily PARTITION (ds = '1'); +alter table fact_daily_n5 PARTITION (ds = '1') set skewed location (484='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=484','HIVE_DEFAULT_LIST_BUCKETING_KEY'='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME'); +describe formatted fact_daily_n5 PARTITION (ds = '1'); -SELECT * FROM fact_daily WHERE ds='1'; +SELECT * FROM fact_daily_n5 WHERE ds='1'; -- The first subquery -- explain plan shows which directory selected: Truncated Path -> Alias -explain extended select x from (select x from fact_daily where ds = '1') subq where x = 484; +explain extended select x from (select x from fact_daily_n5 where ds = '1') subq where x = 484; -- List Bucketing Query -select x from (select * from fact_daily where ds = '1') subq where x = 484; +select x from (select * from fact_daily_n5 where ds = '1') subq where x = 484; -- The second subquery -- explain plan shows which directory selected: Truncated Path -> Alias -explain extended select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484; +explain extended select x1, y1 from(select x as x1, y as y1 from fact_daily_n5 where ds ='1') subq where x1 = 484; -- List Bucketing Query -select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484; +select x1, y1 from(select x as x1, y as y1 from fact_daily_n5 where ds ='1') subq where x1 = 484; -- The third subquery -- explain plan shows which directory selected: Truncated Path -> Alias -explain extended select y, count(1) from fact_daily where ds ='1' and x = 484 group by y; +explain extended select y, count(1) from fact_daily_n5 where ds ='1' and x = 484 group by y; -- List Bucketing Query -select y, count(1) from fact_daily where ds ='1' and x = 484 group by y; +select y, count(1) from fact_daily_n5 where ds ='1' and x = 484 group by y; -- The fourth subquery -- explain plan shows which directory selected: Truncated Path -> Alias -explain extended select x, c from (select x, count(1) as c from fact_daily where ds = '1' group by x) subq where x = 484;; +explain extended select x, c from (select x, count(1) as c from fact_daily_n5 where ds = '1' group by x) subq where x = 484;; -- List Bucketing Query -select x, c from (select x, count(1) as c from fact_daily where ds = '1' group by x) subq where x = 484; +select x, c from (select x, count(1) as c from fact_daily_n5 where ds = '1' group by x) subq where x = 484; diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q index 7ff3d8dfbf..bf6b2270ca 100644 --- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q +++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q @@ -17,10 +17,10 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- 1. pruner only pick up right directory -- 2. query result is right --- create 2 tables: fact_daily and fact_tz --- fact_daily will be used for list bucketing query +-- create 2 tables: fact_daily_n0 and fact_tz +-- fact_daily_n0 will be used for list bucketing query -- fact_tz is a table used to prepare data and test directories -CREATE TABLE fact_daily(x int, y STRING, z STRING) PARTITIONED BY (ds STRING); +CREATE TABLE fact_daily_n0(x int, y STRING, z STRING) PARTITIONED BY (ds STRING); CREATE TABLE fact_tz(x int, y STRING, z STRING) PARTITIONED BY (ds STRING, hr STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/fact_tz'; @@ -42,22 +42,22 @@ dfs -mv ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/hr=2 ${hiveconf:hi dfs -mv ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/hr=3 ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=238; dfs -lsr ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1; --- switch fact_daily to skewed table and point its location to /fact_tz/ds=1 -alter table fact_daily skewed by (x) on (484,238); -ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE'); -ALTER TABLE fact_daily ADD PARTITION (ds='1') +-- switch fact_daily_n0 to skewed table and point its location to /fact_tz/ds=1 +alter table fact_daily_n0 skewed by (x) on (484,238); +ALTER TABLE fact_daily_n0 SET TBLPROPERTIES('EXTERNAL'='TRUE'); +ALTER TABLE fact_daily_n0 ADD PARTITION (ds='1') LOCATION '${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1'; -- set List Bucketing location map -alter table fact_daily PARTITION (ds = '1') set skewed location (484='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=484', +alter table fact_daily_n0 PARTITION (ds = '1') set skewed location (484='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=484', 238='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=238', 'HIVE_DEFAULT_LIST_BUCKETING_KEY'='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME'); -describe formatted fact_daily PARTITION (ds = '1'); +describe formatted fact_daily_n0 PARTITION (ds = '1'); -SELECT * FROM fact_daily WHERE ds='1'; +SELECT * FROM fact_daily_n0 WHERE ds='1'; -- pruner pick up right directory -- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT x FROM fact_daily WHERE ds='1' and not (x = 86); +explain extended SELECT x FROM fact_daily_n0 WHERE ds='1' and not (x = 86); -- List Bucketing Query -SELECT x FROM fact_daily WHERE ds='1' and not (x = 86); +SELECT x FROM fact_daily_n0 WHERE ds='1' and not (x = 86); diff --git a/ql/src/test/queries/clientpositive/llap_acid.q b/ql/src/test/queries/clientpositive/llap_acid.q index dfc7b0480b..754461d8ea 100644 --- a/ql/src/test/queries/clientpositive/llap_acid.q +++ b/ql/src/test/queries/clientpositive/llap_acid.q @@ -13,9 +13,9 @@ set hive.exec.dynamic.partition.mode=nonstrict; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -DROP TABLE orc_llap; +DROP TABLE orc_llap_n1; -CREATE TABLE orc_llap ( +CREATE TABLE orc_llap_n1 ( cint INT, cbigint BIGINT, cfloat FLOAT, @@ -23,37 +23,37 @@ CREATE TABLE orc_llap ( partitioned by (csmallint smallint) clustered by (cint) into 2 buckets stored as orc; -insert into table orc_llap partition (csmallint = 1) +insert into table orc_llap_n1 partition (csmallint = 1) select cint, cbigint, cfloat, cdouble from alltypesorc where cdouble is not null order by cdouble asc limit 10; -insert into table orc_llap partition (csmallint = 2) +insert into table orc_llap_n1 partition (csmallint = 2) select cint, cbigint, cfloat, cdouble from alltypesorc where cdouble is not null order by cdouble asc limit 10; -alter table orc_llap SET TBLPROPERTIES ('transactional'='true'); +alter table orc_llap_n1 SET TBLPROPERTIES ('transactional'='true'); -insert into table orc_llap partition (csmallint = 3) +insert into table orc_llap_n1 partition (csmallint = 3) select cint, cbigint, cfloat, cdouble from alltypesorc where cdouble is not null order by cdouble desc limit 10; SET hive.llap.io.enabled=true; explain vectorization only detail -select cint, csmallint, cbigint from orc_llap where cint is not null order +select cint, csmallint, cbigint from orc_llap_n1 where cint is not null order by csmallint, cint; -select cint, csmallint, cbigint from orc_llap where cint is not null order +select cint, csmallint, cbigint from orc_llap_n1 where cint is not null order by csmallint, cint; -insert into table orc_llap partition (csmallint = 1) values (1, 1, 1, 1); +insert into table orc_llap_n1 partition (csmallint = 1) values (1, 1, 1, 1); explain vectorization only detail -update orc_llap set cbigint = 2 where cint = 1; -update orc_llap set cbigint = 2 where cint = 1; +update orc_llap_n1 set cbigint = 2 where cint = 1; +update orc_llap_n1 set cbigint = 2 where cint = 1; explain vectorization only detail -select cint, csmallint, cbigint from orc_llap where cint is not null order +select cint, csmallint, cbigint from orc_llap_n1 where cint is not null order by csmallint, cint; -select cint, csmallint, cbigint from orc_llap where cint is not null order +select cint, csmallint, cbigint from orc_llap_n1 where cint is not null order by csmallint, cint; -DROP TABLE orc_llap; +DROP TABLE orc_llap_n1; diff --git a/ql/src/test/queries/clientpositive/llap_acid2.q b/ql/src/test/queries/clientpositive/llap_acid2.q index 108f00a84e..a409c26aff 100644 --- a/ql/src/test/queries/clientpositive/llap_acid2.q +++ b/ql/src/test/queries/clientpositive/llap_acid2.q @@ -13,9 +13,9 @@ set hive.exec.dynamic.partition.mode=nonstrict; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -DROP TABLE orc_llap; +DROP TABLE orc_llap_n2; -CREATE TABLE orc_llap ( +CREATE TABLE orc_llap_n2 ( cint INT, cbigint BIGINT, cfloat FLOAT, @@ -33,7 +33,7 @@ CREATE TABLE orc_llap ( ) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into table orc_llap +insert into table orc_llap_n2 select cint, cbigint, cfloat, cdouble, cint as c1, cbigint as c2, cfloat as c3, cdouble as c4, cint as c8, cbigint as c7, cfloat as c6, cdouble as c5, @@ -73,13 +73,13 @@ update orc_llap2 set cstring1 = 'testvalue' where cstring1 = 'N016jPED08o'; SET hive.llap.io.enabled=true; -select cstring1 from orc_llap; -select cfloat2, cint from orc_llap; -select * from orc_llap; +select cstring1 from orc_llap_n2; +select cfloat2, cint from orc_llap_n2; +select * from orc_llap_n2; select cstring1 from orc_llap2; select cfloat2, cint from orc_llap2; select * from orc_llap2; -DROP TABLE orc_llap; +DROP TABLE orc_llap_n2; diff --git a/ql/src/test/queries/clientpositive/llap_nullscan.q b/ql/src/test/queries/clientpositive/llap_nullscan.q index c123e9e22b..c0057a515a 100644 --- a/ql/src/test/queries/clientpositive/llap_nullscan.q +++ b/ql/src/test/queries/clientpositive/llap_nullscan.q @@ -8,24 +8,24 @@ set hive.auto.convert.join=false; set hive.vectorized.execution.enabled=true; set hive.llap.io.enabled=true; -drop table if exists src_orc; +drop table if exists src_orc_n1; -create table src_orc stored as orc as select * from srcpart limit 10; +create table src_orc_n1 stored as orc as select * from srcpart limit 10; explain extended -select * from src_orc where 1=2; -select * from src_orc where 1=2; +select * from src_orc_n1 where 1=2; +select * from src_orc_n1 where 1=2; explain -select * from (select key from src_orc where false) a left outer join (select key from src_orc limit 0) b on a.key=b.key; -select * from (select key from src_orc where false) a left outer join (select key from src_orc limit 0) b on a.key=b.key; +select * from (select key from src_orc_n1 where false) a left outer join (select key from src_orc_n1 limit 0) b on a.key=b.key; +select * from (select key from src_orc_n1 where false) a left outer join (select key from src_orc_n1 limit 0) b on a.key=b.key; explain -select count(key) from src_orc where false union all select count(key) from src_orc ; -select count(key) from src_orc where false union all select count(key) from src_orc ; +select count(key) from src_orc_n1 where false union all select count(key) from src_orc_n1 ; +select count(key) from src_orc_n1 where false union all select count(key) from src_orc_n1 ; explain -select * from src_orc s1, src_orc s2 where false and s1.value = s2.value; -select * from src_orc s1, src_orc s2 where false and s1.value = s2.value; +select * from src_orc_n1 s1, src_orc_n1 s2 where false and s1.value = s2.value; +select * from src_orc_n1 s1, src_orc_n1 s2 where false and s1.value = s2.value; -drop table if exists src_orc; +drop table if exists src_orc_n1; diff --git a/ql/src/test/queries/clientpositive/llap_partitioned.q b/ql/src/test/queries/clientpositive/llap_partitioned.q index 2ba38ce02a..0bbd7f6a1c 100644 --- a/ql/src/test/queries/clientpositive/llap_partitioned.q +++ b/ql/src/test/queries/clientpositive/llap_partitioned.q @@ -48,7 +48,7 @@ SELECT csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring1, cstring1, INSERT OVERWRITE TABLE orc_llap_dim_part PARTITION (ctinyint) SELECT null, null, sum(cbigint) as cbigint, null, null, null, null, null, null, null, ctinyint FROM alltypesorc WHERE ctinyint > 10 AND ctinyint < 21 GROUP BY ctinyint; -drop table llap_temp_table; +drop table llap_temp_table_n0; set hive.cbo.enable=false; SET hive.llap.io.enabled=true; @@ -57,14 +57,14 @@ SET hive.vectorized.execution.enabled=true; explain vectorization detail SELECT oft.ctinyint, oft.cint, oft.cchar1, oft.cvchar1 FROM orc_llap_part oft INNER JOIN orc_llap_dim_part od ON oft.ctinyint = od.ctinyint; -create table llap_temp_table as +create table llap_temp_table_n0 as SELECT oft.ctinyint, oft.cint, oft.cchar1, oft.cvchar1 FROM orc_llap_part oft INNER JOIN orc_llap_dim_part od ON oft.ctinyint = od.ctinyint; explain vectorization detail -select sum(hash(*)) from llap_temp_table; -select sum(hash(*)) from llap_temp_table; -drop table llap_temp_table; +select sum(hash(*)) from llap_temp_table_n0; +select sum(hash(*)) from llap_temp_table_n0; +drop table llap_temp_table_n0; DROP TABLE orc_llap_part; diff --git a/ql/src/test/queries/clientpositive/llap_reader.q b/ql/src/test/queries/clientpositive/llap_reader.q index ac0624daa6..53baf5d873 100644 --- a/ql/src/test/queries/clientpositive/llap_reader.q +++ b/ql/src/test/queries/clientpositive/llap_reader.q @@ -3,21 +3,21 @@ SET hive.llap.io.enabled=true; SET hive.map.aggr=false; SET hive.exec.post.hooks=; -CREATE TABLE test(f1 int, f2 int, f3 int) stored as orc; -INSERT INTO TABLE test VALUES (1,1,1), (2,2,2), (3,3,3), (4,4,4); +CREATE TABLE test_n7(f1 int, f2 int, f3 int) stored as orc; +INSERT INTO TABLE test_n7 VALUES (1,1,1), (2,2,2), (3,3,3), (4,4,4); -ALTER TABLE test CHANGE f1 f1 bigint; -ALTER TABLE test CHANGE f2 f2 bigint; -ALTER TABLE test CHANGE f3 f3 bigint; +ALTER TABLE test_n7 CHANGE f1 f1 bigint; +ALTER TABLE test_n7 CHANGE f2 f2 bigint; +ALTER TABLE test_n7 CHANGE f3 f3 bigint; -- llap counters with data and meta cache SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter; -SELECT count(f1) FROM test GROUP BY f1; -SELECT count(f1) FROM test GROUP BY f1; +SELECT count(f1) FROM test_n7 GROUP BY f1; +SELECT count(f1) FROM test_n7 GROUP BY f1; SET hive.exec.post.hooks=; CREATE TABLE test_bigint(f1 bigint, f2 bigint, f3 bigint) stored as orc; -INSERT OVERWRITE TABLE test_bigint select * from test; +INSERT OVERWRITE TABLE test_bigint select * from test_n7; ALTER TABLE test_bigint CHANGE f1 f1 double; ALTER TABLE test_bigint CHANGE f2 f2 double; ALTER TABLE test_bigint CHANGE f3 f3 double; @@ -32,9 +32,9 @@ select count(f1) from test_bigint group by f1; SET hive.exec.post.hooks=; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.support.concurrency=true; -CREATE TABLE test_acid (f1 int, f2 int, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -INSERT INTO TABLE test_acid VALUES (1,1,'b1'), (2,2,'b2'), (3,3,'b3'), (4,4,'b4'); +CREATE TABLE test_acid_n0 (f1 int, f2 int, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); +INSERT INTO TABLE test_acid_n0 VALUES (1,1,'b1'), (2,2,'b2'), (3,3,'b3'), (4,4,'b4'); -- should not have llap counters SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter; -SELECT count(f1) FROM test_acid GROUP BY f1; +SELECT count(f1) FROM test_acid_n0 GROUP BY f1; diff --git a/ql/src/test/queries/clientpositive/llap_udf.q b/ql/src/test/queries/clientpositive/llap_udf.q index 65d930e456..919e7ccc26 100644 --- a/ql/src/test/queries/clientpositive/llap_udf.q +++ b/ql/src/test/queries/clientpositive/llap_udf.q @@ -6,15 +6,15 @@ set hive.llap.execution.mode=all; set hive.fetch.task.conversion=none; set hive.llap.allow.permanent.fns=true; -drop table if exists src_orc; -create table src_orc stored as orc as select * from src; +drop table if exists src_orc_n0; +create table src_orc_n0 stored as orc as select * from src; -- Not using GenericUDFTestGetJavaBoolean; that is already registered when tests begin CREATE TEMPORARY FUNCTION test_udf0 AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFEvaluateNPE'; set hive.llap.execution.mode=auto; -EXPLAIN SELECT test_udf0(cast(key as string)) from src_orc; +EXPLAIN SELECT test_udf0(cast(key as string)) from src_orc_n0; set hive.llap.execution.mode=all; CREATE FUNCTION test_udf2 AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestGetJavaString'; @@ -22,32 +22,32 @@ CREATE FUNCTION test_udf3 AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTe CREATE FUNCTION test_udf4 AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFEvaluateNPE'; EXPLAIN -SELECT test_udf2(cast(key as string)), test_udf3(cast(key as string)), test_udf4(cast(key as string)) from src_orc; +SELECT test_udf2(cast(key as string)), test_udf3(cast(key as string)), test_udf4(cast(key as string)) from src_orc_n0; set hive.llap.execution.mode=auto; -- Verification is based on classes, so 0 would work based on 4. EXPLAIN -SELECT test_udf0(cast(key as string)) from src_orc; +SELECT test_udf0(cast(key as string)) from src_orc_n0; DROP FUNCTION test_udf2; set hive.llap.execution.mode=all; -- ...verify that 3 still works EXPLAIN -SELECT test_udf3(cast(key as string)), test_udf4(cast(key as string)) from src_orc; +SELECT test_udf3(cast(key as string)), test_udf4(cast(key as string)) from src_orc_n0; DROP FUNCTION test_udf4; set hive.llap.execution.mode=auto; -- ...now 0 should stop working EXPLAIN -SELECT test_udf0(cast(key as string)) from src_orc; +SELECT test_udf0(cast(key as string)) from src_orc_n0; set hive.llap.allow.permanent.fns=false; EXPLAIN -SELECT test_udf3(cast(key as string)) from src_orc; +SELECT test_udf3(cast(key as string)) from src_orc_n0; -drop table if exists src_orc; +drop table if exists src_orc_n0; set hive.execution.mode=container; diff --git a/ql/src/test/queries/clientpositive/llap_uncompressed.q b/ql/src/test/queries/clientpositive/llap_uncompressed.q index 0282457e8c..875356c73c 100644 --- a/ql/src/test/queries/clientpositive/llap_uncompressed.q +++ b/ql/src/test/queries/clientpositive/llap_uncompressed.q @@ -7,12 +7,12 @@ SET hive.exec.orc.default.row.index.stride=1000; SET hive.optimize.index.filter=true; set hive.auto.convert.join=false; -DROP TABLE orc_llap; +DROP TABLE orc_llap_n0; set hive.auto.convert.join=true; SET hive.llap.io.enabled=false; -CREATE TABLE orc_llap( +CREATE TABLE orc_llap_n0( ctinyint TINYINT, csmallint SMALLINT, cint INT, @@ -27,7 +27,7 @@ CREATE TABLE orc_llap( cboolean2 BOOLEAN) STORED AS ORC tblproperties ("orc.compress"="NONE"); -insert into table orc_llap +insert into table orc_llap_n0 select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 from alltypesorc; @@ -36,14 +36,14 @@ SET hive.llap.io.enabled=true; drop table llap_temp_table; explain -select * from orc_llap where cint > 10 and cbigint is not null; +select * from orc_llap_n0 where cint > 10 and cbigint is not null; create table llap_temp_table as -select * from orc_llap where cint > 10 and cbigint is not null; +select * from orc_llap_n0 where cint > 10 and cbigint is not null; select sum(hash(*)) from llap_temp_table; explain -select * from orc_llap where cint > 10 and cint < 5000000; -select * from orc_llap where cint > 10 and cint < 5000000; +select * from orc_llap_n0 where cint > 10 and cint < 5000000; +select * from orc_llap_n0 where cint > 10 and cint < 5000000; -DROP TABLE orc_llap; +DROP TABLE orc_llap_n0; drop table llap_temp_table; diff --git a/ql/src/test/queries/clientpositive/llapdecider.q b/ql/src/test/queries/clientpositive/llapdecider.q index 86f4c6e385..17a57bc982 100644 --- a/ql/src/test/queries/clientpositive/llapdecider.q +++ b/ql/src/test/queries/clientpositive/llapdecider.q @@ -8,63 +8,63 @@ set hive.llap.execution.mode=auto; -- simple query with multiple reduce stages EXPLAIN SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt; -create table src_orc stored as orc as select * from src; +create table src_orc_n2 stored as orc as select * from src; -EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt; +EXPLAIN SELECT key, count(value) as cnt FROM src_orc_n2 GROUP BY key ORDER BY cnt; set hive.llap.auto.enforce.stats=false; -EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt; +EXPLAIN SELECT key, count(value) as cnt FROM src_orc_n2 GROUP BY key ORDER BY cnt; set hive.llap.auto.enforce.stats=true; -analyze table src_orc compute statistics for columns; +analyze table src_orc_n2 compute statistics for columns; -EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt; +EXPLAIN SELECT key, count(value) as cnt FROM src_orc_n2 GROUP BY key ORDER BY cnt; -EXPLAIN SELECT * from src_orc join src on (src_orc.key = src.key) order by src.value; +EXPLAIN SELECT * from src_orc_n2 join src on (src_orc_n2.key = src.key) order by src.value; -EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value; +EXPLAIN SELECT * from src_orc_n2 s1 join src_orc_n2 s2 on (s1.key = s2.key) order by s2.value; set hive.llap.auto.enforce.tree=false; -EXPLAIN SELECT * from src_orc join src on (src_orc.key = src.key) order by src.value; +EXPLAIN SELECT * from src_orc_n2 join src on (src_orc_n2.key = src.key) order by src.value; set hive.llap.auto.enforce.tree=true; set hive.llap.auto.max.input.size=10; -EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value; +EXPLAIN SELECT * from src_orc_n2 s1 join src_orc_n2 s2 on (s1.key = s2.key) order by s2.value; set hive.llap.auto.max.input.size=1000000000; set hive.llap.auto.max.output.size=10; -EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value; +EXPLAIN SELECT * from src_orc_n2 s1 join src_orc_n2 s2 on (s1.key = s2.key) order by s2.value; set hive.llap.auto.max.output.size=1000000000; set hive.llap.execution.mode=map; -EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value; +EXPLAIN SELECT * from src_orc_n2 s1 join src_orc_n2 s2 on (s1.key = s2.key) order by s2.value; set hive.llap.execution.mode=none; -EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value; +EXPLAIN SELECT * from src_orc_n2 s1 join src_orc_n2 s2 on (s1.key = s2.key) order by s2.value; set hive.llap.execution.mode=all; -EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value; +EXPLAIN SELECT * from src_orc_n2 s1 join src_orc_n2 s2 on (s1.key = s2.key) order by s2.value; CREATE TEMPORARY FUNCTION test_udf_get_java_string AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestGetJavaString'; set hive.llap.execution.mode=auto; -EXPLAIN SELECT sum(cast(key as int) + 1) from src_orc where cast(key as int) > 1; -EXPLAIN SELECT sum(cast(test_udf_get_java_string(cast(key as string)) as int) + 1) from src_orc where cast(key as int) > 1; -EXPLAIN SELECT sum(cast(key as int) + 1) from src_orc where cast(test_udf_get_java_string(cast(key as string)) as int) > 1; +EXPLAIN SELECT sum(cast(key as int) + 1) from src_orc_n2 where cast(key as int) > 1; +EXPLAIN SELECT sum(cast(test_udf_get_java_string(cast(key as string)) as int) + 1) from src_orc_n2 where cast(key as int) > 1; +EXPLAIN SELECT sum(cast(key as int) + 1) from src_orc_n2 where cast(test_udf_get_java_string(cast(key as string)) as int) > 1; set hive.llap.skip.compile.udf.check=true; -EXPLAIN SELECT sum(cast(test_udf_get_java_string(cast(key as string)) as int) + 1) from src_orc where cast(key as int) > 1; +EXPLAIN SELECT sum(cast(test_udf_get_java_string(cast(key as string)) as int) + 1) from src_orc_n2 where cast(key as int) > 1; set hive.execution.mode=container; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/load_binary_data.q b/ql/src/test/queries/clientpositive/load_binary_data.q index 653918afc0..81d41e8123 100644 --- a/ql/src/test/queries/clientpositive/load_binary_data.q +++ b/ql/src/test/queries/clientpositive/load_binary_data.q @@ -1,13 +1,13 @@ -CREATE TABLE mytable(key binary, value int) +CREATE TABLE mytable_n2(key binary, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '9' STORED AS TEXTFILE; -- this query loads native binary data, stores in a table and then queries it. Note that string.txt contains binary data. Also uses transform clause and then length udf. -LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable; +LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable_n2; -create table dest1 (key binary, value int); +create table dest1_n155 (key binary, value int); -insert overwrite table dest1 select transform(*) using 'cat' as key binary, value int from mytable; +insert overwrite table dest1_n155 select transform(*) using 'cat' as key binary, value int from mytable_n2; -select key, value, length (key) from dest1; +select key, value, length (key) from dest1_n155; diff --git a/ql/src/test/queries/clientpositive/load_data_using_job.q b/ql/src/test/queries/clientpositive/load_data_using_job.q index 3659b6ec8e..b760d9bc7e 100644 --- a/ql/src/test/queries/clientpositive/load_data_using_job.q +++ b/ql/src/test/queries/clientpositive/load_data_using_job.q @@ -11,84 +11,84 @@ set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hado -- Single partition -- Regular load happens. -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) STORED AS TEXTFILE; -explain load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -select * from srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds string) STORED AS TEXTFILE; +explain load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n8 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n8 partition(ds='2008-04-08'); +select * from srcbucket_mapjoin_n8; -drop table srcbucket_mapjoin; +drop table srcbucket_mapjoin_n8; -- Triggers a Tez job as partition info is missing from load data. -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) STORED AS TEXTFILE; -explain load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin; -select * from srcbucket_mapjoin; -drop table srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds string) STORED AS TEXTFILE; +explain load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin_n8; +load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin_n8; +select * from srcbucket_mapjoin_n8; +drop table srcbucket_mapjoin_n8; -- Multi partitions -- Triggers a Tez job as partition info is missing from load data. -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string, hr int) STORED AS TEXTFILE; -explain load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin; -select * from srcbucket_mapjoin; -drop table srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds string, hr int) STORED AS TEXTFILE; +explain load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin_n8; +load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin_n8; +select * from srcbucket_mapjoin_n8; +drop table srcbucket_mapjoin_n8; -- Multi partitions and directory with files (no sub dirs) -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string, hr int) STORED AS TEXTFILE; -explain load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO TABLE srcbucket_mapjoin; -select * from srcbucket_mapjoin; -drop table srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds string, hr int) STORED AS TEXTFILE; +explain load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO TABLE srcbucket_mapjoin_n8; +load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO TABLE srcbucket_mapjoin_n8; +select * from srcbucket_mapjoin_n8; +drop table srcbucket_mapjoin_n8; -- Bucketing -CREATE TABLE srcbucket_mapjoin(key int, value string) clustered by (key) sorted by (key) into 5 buckets STORED AS TEXTFILE; -explain load data local inpath '../../data/files/load_data_job/bucketing.txt' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/load_data_job/bucketing.txt' INTO TABLE srcbucket_mapjoin; -select * from srcbucket_mapjoin; -drop table srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n8(key int, value string) clustered by (key) sorted by (key) into 5 buckets STORED AS TEXTFILE; +explain load data local inpath '../../data/files/load_data_job/bucketing.txt' INTO TABLE srcbucket_mapjoin_n8; +load data local inpath '../../data/files/load_data_job/bucketing.txt' INTO TABLE srcbucket_mapjoin_n8; +select * from srcbucket_mapjoin_n8; +drop table srcbucket_mapjoin_n8; -- Single partition and bucketing -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) clustered by (key) sorted by (key) into 5 buckets STORED AS TEXTFILE; -explain load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin; -select * from srcbucket_mapjoin; -drop table srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds string) clustered by (key) sorted by (key) into 5 buckets STORED AS TEXTFILE; +explain load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin_n8; +load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin_n8; +select * from srcbucket_mapjoin_n8; +drop table srcbucket_mapjoin_n8; -- Multiple partitions and bucketing -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string, hr int) clustered by (key) sorted by (key) into 5 buckets STORED AS TEXTFILE; -explain load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin; -select * from srcbucket_mapjoin; -drop table srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds string, hr int) clustered by (key) sorted by (key) into 5 buckets STORED AS TEXTFILE; +explain load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin_n8; +load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin_n8; +select * from srcbucket_mapjoin_n8; +drop table srcbucket_mapjoin_n8; -- Multiple partitions, bucketing, and directory with files (no sub dirs) -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string, hr int) clustered by (key) sorted by (key) into 5 buckets STORED AS TEXTFILE; -explain load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO TABLE srcbucket_mapjoin; -select * from srcbucket_mapjoin; -drop table srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds string, hr int) clustered by (key) sorted by (key) into 5 buckets STORED AS TEXTFILE; +explain load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO TABLE srcbucket_mapjoin_n8; +load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO TABLE srcbucket_mapjoin_n8; +select * from srcbucket_mapjoin_n8; +drop table srcbucket_mapjoin_n8; -- Multiple partitions, bucketing, and directory with files and sub dirs -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string, hr int) clustered by (key) sorted by (key) into 5 buckets STORED AS TEXTFILE; -explain load data local inpath '../../data/files/load_data_job/partitions' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/load_data_job/partitions' INTO TABLE srcbucket_mapjoin; -select * from srcbucket_mapjoin; -drop table srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds string, hr int) clustered by (key) sorted by (key) into 5 buckets STORED AS TEXTFILE; +explain load data local inpath '../../data/files/load_data_job/partitions' INTO TABLE srcbucket_mapjoin_n8; +load data local inpath '../../data/files/load_data_job/partitions' INTO TABLE srcbucket_mapjoin_n8; +select * from srcbucket_mapjoin_n8; +drop table srcbucket_mapjoin_n8; -- Single partition, multiple buckets -CREATE TABLE srcbucket_mapjoin(key int, value string, ds string) partitioned by (hr int) clustered by (key, value) sorted by (key, value) into 5 buckets STORED AS TEXTFILE; -explain load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin; -select * from srcbucket_mapjoin; -drop table srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n8(key int, value string, ds string) partitioned by (hr int) clustered by (key, value) sorted by (key, value) into 5 buckets STORED AS TEXTFILE; +explain load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin_n8; +load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin_n8; +select * from srcbucket_mapjoin_n8; +drop table srcbucket_mapjoin_n8; -- Load into ORC table using text files -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) STORED AS ORC; -explain load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin +CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds string) STORED AS ORC; +explain load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin_n8 INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'; -load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin +load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin_n8 INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'; -select * from srcbucket_mapjoin; -drop table srcbucket_mapjoin; \ No newline at end of file +select * from srcbucket_mapjoin_n8; +drop table srcbucket_mapjoin_n8; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/load_dyn_part1.q b/ql/src/test/queries/clientpositive/load_dyn_part1.q index bac1f1e73d..8004f89a11 100644 --- a/ql/src/test/queries/clientpositive/load_dyn_part1.q +++ b/ql/src/test/queries/clientpositive/load_dyn_part1.q @@ -7,28 +7,28 @@ show partitions srcpart; -create table if not exists nzhang_part1 like srcpart; -create table if not exists nzhang_part2 like srcpart; -describe extended nzhang_part1; +create table if not exists nzhang_part1_n0 like srcpart; +create table if not exists nzhang_part2_n0 like srcpart; +describe extended nzhang_part1_n0; set hive.exec.dynamic.partition.mode=nonstrict; set hive.exec.dynamic.partition=true; explain from srcpart -insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' -insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'; +insert overwrite table nzhang_part1_n0 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part2_n0 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'; from srcpart -insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' -insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'; +insert overwrite table nzhang_part1_n0 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part2_n0 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'; -show partitions nzhang_part1; -show partitions nzhang_part2; +show partitions nzhang_part1_n0; +show partitions nzhang_part2_n0; -select * from nzhang_part1 where ds is not null and hr is not null; -select * from nzhang_part2 where ds is not null and hr is not null; +select * from nzhang_part1_n0 where ds is not null and hr is not null; +select * from nzhang_part2_n0 where ds is not null and hr is not null; diff --git a/ql/src/test/queries/clientpositive/load_dyn_part11.q b/ql/src/test/queries/clientpositive/load_dyn_part11.q index 9e46d7615f..1e61981d9f 100644 --- a/ql/src/test/queries/clientpositive/load_dyn_part11.q +++ b/ql/src/test/queries/clientpositive/load_dyn_part11.q @@ -2,17 +2,17 @@ show partitions srcpart; -create table if not exists nzhang_part like srcpart; -describe extended nzhang_part; +create table if not exists nzhang_part_n0 like srcpart; +describe extended nzhang_part_n0; set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; set hive.exec.compress.output=true; set hive.exec.dynamic.partition=true; -insert overwrite table nzhang_part partition (ds="2010-03-03", hr) select key, value, hr from srcpart where ds is not null and hr is not null; +insert overwrite table nzhang_part_n0 partition (ds="2010-03-03", hr) select key, value, hr from srcpart where ds is not null and hr is not null; -select * from nzhang_part where ds = '2010-03-03' and hr = '11'; -select * from nzhang_part where ds = '2010-03-03' and hr = '12'; +select * from nzhang_part_n0 where ds = '2010-03-03' and hr = '11'; +select * from nzhang_part_n0 where ds = '2010-03-03' and hr = '12'; diff --git a/ql/src/test/queries/clientpositive/load_dyn_part14.q b/ql/src/test/queries/clientpositive/load_dyn_part14.q index 13bcc6fe96..74a74d1e15 100644 --- a/ql/src/test/queries/clientpositive/load_dyn_part14.q +++ b/ql/src/test/queries/clientpositive/load_dyn_part14.q @@ -1,16 +1,16 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -create table if not exists nzhang_part14 (key string) +create table if not exists nzhang_part14_n0 (key string) partitioned by (value string); -describe extended nzhang_part14; +describe extended nzhang_part14_n0; set hive.exec.dynamic.partition=true; set hive.exec.dynamic.partition.mode=nonstrict; explain -insert overwrite table nzhang_part14 partition(value) +insert overwrite table nzhang_part14_n0 partition(value) select key, value from ( select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a union all @@ -19,7 +19,7 @@ select key, value from ( select * from (select 'k3' as key, ' ' as value from src limit 2)c ) T; -insert overwrite table nzhang_part14 partition(value) +insert overwrite table nzhang_part14_n0 partition(value) select key, value from ( select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a union all @@ -29,8 +29,8 @@ select key, value from ( ) T; -show partitions nzhang_part14; +show partitions nzhang_part14_n0; -select * from nzhang_part14 where value <> 'a'; +select * from nzhang_part14_n0 where value <> 'a'; diff --git a/ql/src/test/queries/clientpositive/load_dyn_part8.q b/ql/src/test/queries/clientpositive/load_dyn_part8.q index 4330e0cb6b..a74be62a9d 100644 --- a/ql/src/test/queries/clientpositive/load_dyn_part8.q +++ b/ql/src/test/queries/clientpositive/load_dyn_part8.q @@ -5,8 +5,8 @@ show partitions srcpart; -create table if not exists nzhang_part8 like srcpart; -describe extended nzhang_part8; +create table if not exists nzhang_part8_n0 like srcpart; +describe extended nzhang_part8_n0; set hive.merge.mapfiles=false; set hive.exec.dynamic.partition=true; @@ -14,14 +14,14 @@ set hive.exec.dynamic.partition.mode=nonstrict; explain extended from srcpart -insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' -insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'; +insert overwrite table nzhang_part8_n0 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part8_n0 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'; from srcpart -insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' -insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'; +insert overwrite table nzhang_part8_n0 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part8_n0 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'; -show partitions nzhang_part8; +show partitions nzhang_part8_n0; -select * from nzhang_part8 where ds is not null and hr is not null; +select * from nzhang_part8_n0 where ds is not null and hr is not null; diff --git a/ql/src/test/queries/clientpositive/load_exist_part_authsuccess.q b/ql/src/test/queries/clientpositive/load_exist_part_authsuccess.q index 1ce482438b..922e13254a 100644 --- a/ql/src/test/queries/clientpositive/load_exist_part_authsuccess.q +++ b/ql/src/test/queries/clientpositive/load_exist_part_authsuccess.q @@ -1,6 +1,6 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; -create table hive_test_src ( col1 string ) partitioned by (pcol1 string) stored as textfile; -alter table hive_test_src add partition (pcol1 = 'test_part'); +create table hive_test_src_n1 ( col1 string ) partitioned by (pcol1 string) stored as textfile; +alter table hive_test_src_n1 add partition (pcol1 = 'test_part'); set hive.security.authorization.enabled=true; -grant Update on table hive_test_src to user hive_test_user; -load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src partition (pcol1 = 'test_part'); +grant Update on table hive_test_src_n1 to user hive_test_user; +load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n1 partition (pcol1 = 'test_part'); diff --git a/ql/src/test/queries/clientpositive/load_file_with_space_in_the_name.q b/ql/src/test/queries/clientpositive/load_file_with_space_in_the_name.q index 6bac47fb90..60a4a0d1a6 100644 --- a/ql/src/test/queries/clientpositive/load_file_with_space_in_the_name.q +++ b/ql/src/test/queries/clientpositive/load_file_with_space_in_the_name.q @@ -1,6 +1,6 @@ -- test for loading into tables with the file with space in the name -CREATE TABLE load_file_with_space_in_the_name(name STRING, age INT); -LOAD DATA LOCAL INPATH '../../data/files/person age.txt' INTO TABLE load_file_with_space_in_the_name; -LOAD DATA LOCAL INPATH '../../data/files/person+age.txt' INTO TABLE load_file_with_space_in_the_name; +CREATE TABLE load_file_with_space_in_the_name_n0(name STRING, age INT); +LOAD DATA LOCAL INPATH '../../data/files/person age.txt' INTO TABLE load_file_with_space_in_the_name_n0; +LOAD DATA LOCAL INPATH '../../data/files/person+age.txt' INTO TABLE load_file_with_space_in_the_name_n0; diff --git a/ql/src/test/queries/clientpositive/load_fs2.q b/ql/src/test/queries/clientpositive/load_fs2.q index a75758a072..d247c0ec13 100644 --- a/ql/src/test/queries/clientpositive/load_fs2.q +++ b/ql/src/test/queries/clientpositive/load_fs2.q @@ -1,20 +1,20 @@ -- HIVE-3300 [jira] LOAD DATA INPATH fails if a hdfs file with same name is added to table -- 'loader' table is used only for uploading kv1.txt to HDFS (!hdfs -put is not working on minMRDriver) -create table result (key string, value string); +create table result_n2 (key string, value string); create table loader (key string, value string); load data local inpath '../../data/files/kv1.txt' into table loader; -load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table result; -show table extended like result; +load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table result_n2; +show table extended like result_n2; load data local inpath '../../data/files/kv1.txt' into table loader; -load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table result; -show table extended like result; +load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table result_n2; +show table extended like result_n2; load data local inpath '../../data/files/kv1.txt' into table loader; -load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table result; -show table extended like result; +load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table result_n2; +show table extended like result_n2; diff --git a/ql/src/test/queries/clientpositive/load_fs_overwrite.q b/ql/src/test/queries/clientpositive/load_fs_overwrite.q index 51a803130a..ec76b158b8 100644 --- a/ql/src/test/queries/clientpositive/load_fs_overwrite.q +++ b/ql/src/test/queries/clientpositive/load_fs_overwrite.q @@ -1,20 +1,20 @@ --HIVE 6209 -drop table target; +drop table target_n0; drop table temp; -create table target (key string, value string) stored as textfile location 'file:${system:test.tmp.dir}/target'; +create table target_n0 (key string, value string) stored as textfile location 'file:${system:test.tmp.dir}/target'; create table temp (key string, value string) stored as textfile location 'file:${system:test.tmp.dir}/temp'; set fs.pfile.impl.disable.cache=false; load data local inpath '../../data/files/kv1.txt' into table temp; -load data inpath '${system:test.tmp.dir}/temp/kv1.txt' overwrite into table target; -select count(*) from target; +load data inpath '${system:test.tmp.dir}/temp/kv1.txt' overwrite into table target_n0; +select count(*) from target_n0; load data local inpath '../../data/files/kv2.txt' into table temp; -load data inpath '${system:test.tmp.dir}/temp/kv2.txt' overwrite into table target; -select count(*) from target; +load data inpath '${system:test.tmp.dir}/temp/kv2.txt' overwrite into table target_n0; +select count(*) from target_n0; -drop table target; +drop table target_n0; drop table temp; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/load_non_hdfs_path.q b/ql/src/test/queries/clientpositive/load_non_hdfs_path.q index 824ce69611..d787278132 100644 --- a/ql/src/test/queries/clientpositive/load_non_hdfs_path.q +++ b/ql/src/test/queries/clientpositive/load_non_hdfs_path.q @@ -2,5 +2,5 @@ dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/non_hdfs_path; dfs -touchz ${system:test.tmp.dir}/non_hdfs_path/1.txt; dfs -chmod 555 ${system:test.tmp.dir}/non_hdfs_path/1.txt; -create table t1(i int); -load data inpath 'pfile:${system:test.tmp.dir}/non_hdfs_path/' overwrite into table t1; +create table t1_n129(i int); +load data inpath 'pfile:${system:test.tmp.dir}/non_hdfs_path/' overwrite into table t1_n129; diff --git a/ql/src/test/queries/clientpositive/load_orc.q b/ql/src/test/queries/clientpositive/load_orc.q index 2eaf0989ff..219d07d88a 100644 --- a/ql/src/test/queries/clientpositive/load_orc.q +++ b/ql/src/test/queries/clientpositive/load_orc.q @@ -1,10 +1,10 @@ set hive.default.fileformat=ORC; -create table orc_staging (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp); -create table orc_test (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp); +create table orc_staging_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp); +create table orc_test_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp); -load data local inpath '../../data/files/orc_split_elim.orc' into table orc_staging; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_staging/; +load data local inpath '../../data/files/orc_split_elim.orc' into table orc_staging_n0; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_staging_n0/; -load data inpath '${hiveconf:hive.metastore.warehouse.dir}/orc_staging/orc_split_elim.orc' into table orc_test; -load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_test/; +load data inpath '${hiveconf:hive.metastore.warehouse.dir}/orc_staging_n0/orc_split_elim.orc' into table orc_test_n1; +load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test_n1; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_test_n1/; diff --git a/ql/src/test/queries/clientpositive/load_overwrite.q b/ql/src/test/queries/clientpositive/load_overwrite.q index e9e6280250..50edda36b6 100644 --- a/ql/src/test/queries/clientpositive/load_overwrite.q +++ b/ql/src/test/queries/clientpositive/load_overwrite.q @@ -1,16 +1,16 @@ --! qt:dataset:src -create table load_overwrite like src; +create table load_overwrite_n0 like src; -insert overwrite table load_overwrite select * from src; -show table extended like load_overwrite; -select count(*) from load_overwrite; +insert overwrite table load_overwrite_n0 select * from src; +show table extended like load_overwrite_n0; +select count(*) from load_overwrite_n0; -load data local inpath '../../data/files/kv1.txt' into table load_overwrite; -show table extended like load_overwrite; -select count(*) from load_overwrite; +load data local inpath '../../data/files/kv1.txt' into table load_overwrite_n0; +show table extended like load_overwrite_n0; +select count(*) from load_overwrite_n0; -load data local inpath '../../data/files/kv1.txt' overwrite into table load_overwrite; -show table extended like load_overwrite; -select count(*) from load_overwrite; +load data local inpath '../../data/files/kv1.txt' overwrite into table load_overwrite_n0; +show table extended like load_overwrite_n0; +select count(*) from load_overwrite_n0; diff --git a/ql/src/test/queries/clientpositive/load_part_authsuccess.q b/ql/src/test/queries/clientpositive/load_part_authsuccess.q index 868fd6cdbf..47c471e04f 100644 --- a/ql/src/test/queries/clientpositive/load_part_authsuccess.q +++ b/ql/src/test/queries/clientpositive/load_part_authsuccess.q @@ -1,5 +1,5 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider; -create table hive_test_src ( col1 string ) partitioned by (pcol1 string) stored as textfile; +create table hive_test_src_n0 ( col1 string ) partitioned by (pcol1 string) stored as textfile; set hive.security.authorization.enabled=true; -grant Update on table hive_test_src to user hive_test_user; -load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src partition (pcol1 = 'test_part'); +grant Update on table hive_test_src_n0 to user hive_test_user; +load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n0 partition (pcol1 = 'test_part'); diff --git a/ql/src/test/queries/clientpositive/loadpart1.q b/ql/src/test/queries/clientpositive/loadpart1.q index 735befef6f..7c11f24c6d 100644 --- a/ql/src/test/queries/clientpositive/loadpart1.q +++ b/ql/src/test/queries/clientpositive/loadpart1.q @@ -1,14 +1,14 @@ -create table hive_test_src ( col1 string ) stored as textfile ; -load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src ; +create table hive_test_src_n2 ( col1 string ) stored as textfile ; +load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n2 ; create table hive_test_dst ( col1 string ) partitioned by ( pcol1 string , pcol2 string) stored as sequencefile; -insert overwrite table hive_test_dst partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src ; +insert overwrite table hive_test_dst partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src_n2 ; select * from hive_test_dst where pcol1='test_part' and pcol2='test_Part'; -insert overwrite table hive_test_dst partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src ; +insert overwrite table hive_test_dst partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src_n2 ; select * from hive_test_dst where pcol1='test_part' and pcol2='test_part'; select * from hive_test_dst where pcol1='test_part'; diff --git a/ql/src/test/queries/clientpositive/lock1.q b/ql/src/test/queries/clientpositive/lock1.q index a6b2c5b4d4..c05a1180bf 100644 --- a/ql/src/test/queries/clientpositive/lock1.q +++ b/ql/src/test/queries/clientpositive/lock1.q @@ -1,30 +1,30 @@ --! qt:dataset:src -drop table tstsrc; -create table tstsrc like src; -insert overwrite table tstsrc select key, value from src; +drop table tstsrc_n1; +create table tstsrc_n1 like src; +insert overwrite table tstsrc_n1 select key, value from src; SHOW LOCKS; -SHOW LOCKS tstsrc; +SHOW LOCKS tstsrc_n1; -LOCK TABLE tstsrc shared; +LOCK TABLE tstsrc_n1 shared; SHOW LOCKS; -SHOW LOCKS tstsrc; -SHOW LOCKS tstsrc extended; +SHOW LOCKS tstsrc_n1; +SHOW LOCKS tstsrc_n1 extended; -UNLOCK TABLE tstsrc; +UNLOCK TABLE tstsrc_n1; SHOW LOCKS; SHOW LOCKS extended; -SHOW LOCKS tstsrc; -lock TABLE tstsrc SHARED; +SHOW LOCKS tstsrc_n1; +lock TABLE tstsrc_n1 SHARED; SHOW LOCKS; SHOW LOCKS extended; -SHOW LOCKS tstsrc; -LOCK TABLE tstsrc SHARED; +SHOW LOCKS tstsrc_n1; +LOCK TABLE tstsrc_n1 SHARED; SHOW LOCKS; SHOW LOCKS extended; -SHOW LOCKS tstsrc; -UNLOCK TABLE tstsrc; +SHOW LOCKS tstsrc_n1; +UNLOCK TABLE tstsrc_n1; SHOW LOCKS; SHOW LOCKS extended; -SHOW LOCKS tstsrc; -drop table tstsrc; +SHOW LOCKS tstsrc_n1; +drop table tstsrc_n1; diff --git a/ql/src/test/queries/clientpositive/lock2.q b/ql/src/test/queries/clientpositive/lock2.q index e990312268..81b995c39e 100644 --- a/ql/src/test/queries/clientpositive/lock2.q +++ b/ql/src/test/queries/clientpositive/lock2.q @@ -4,35 +4,35 @@ drop table tstsrc; create table tstsrc like src; insert overwrite table tstsrc select key, value from src; -drop table tstsrcpart; -create table tstsrcpart like srcpart; +drop table tstsrcpart_n0; +create table tstsrcpart_n0 like srcpart; -insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11') +insert overwrite table tstsrcpart_n0 partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' and hr='11'; LOCK TABLE tstsrc SHARED; -LOCK TABLE tstsrcpart SHARED; -LOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11') EXCLUSIVE; +LOCK TABLE tstsrcpart_n0 SHARED; +LOCK TABLE tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') EXCLUSIVE; SHOW LOCKS; -SHOW LOCKS tstsrcpart; -SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11'); -SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11') extended; +SHOW LOCKS tstsrcpart_n0; +SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11'); +SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') extended; UNLOCK TABLE tstsrc; SHOW LOCKS; -SHOW LOCKS tstsrcpart; -SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11'); +SHOW LOCKS tstsrcpart_n0; +SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11'); -UNLOCK TABLE tstsrcpart; +UNLOCK TABLE tstsrcpart_n0; SHOW LOCKS; -SHOW LOCKS tstsrcpart; -SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11'); +SHOW LOCKS tstsrcpart_n0; +SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11'); -UNLOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11'); +UNLOCK TABLE tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11'); SHOW LOCKS; -SHOW LOCKS tstsrcpart; -SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11'); +SHOW LOCKS tstsrcpart_n0; +SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11'); drop table tstsrc; -drop table tstsrcpart; +drop table tstsrcpart_n0; diff --git a/ql/src/test/queries/clientpositive/lock3.q b/ql/src/test/queries/clientpositive/lock3.q index dd1843f6b3..1da387f418 100644 --- a/ql/src/test/queries/clientpositive/lock3.q +++ b/ql/src/test/queries/clientpositive/lock3.q @@ -1,9 +1,9 @@ --! qt:dataset:srcpart -drop table tstsrcpart; -create table tstsrcpart like srcpart; +drop table tstsrcpart_n4; +create table tstsrcpart_n4 like srcpart; from srcpart -insert overwrite table tstsrcpart partition (ds='2008-04-08',hr='11') +insert overwrite table tstsrcpart_n4 partition (ds='2008-04-08',hr='11') select key, value where ds='2008-04-08' and hr='11'; set hive.exec.dynamic.partition.mode=nonstrict; @@ -11,23 +11,23 @@ set hive.exec.dynamic.partition=true; from srcpart -insert overwrite table tstsrcpart partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'; +insert overwrite table tstsrcpart_n4 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'; from srcpart -insert overwrite table tstsrcpart partition (ds ='2008-04-08', hr) select key, value, hr where ds = '2008-04-08'; +insert overwrite table tstsrcpart_n4 partition (ds ='2008-04-08', hr) select key, value, hr where ds = '2008-04-08'; SHOW LOCKS; -SHOW LOCKS tstsrcpart; +SHOW LOCKS tstsrcpart_n4; -drop table tstsrcpart; +drop table tstsrcpart_n4; -drop table tst1; -create table tst1 (key string, value string) partitioned by (a string, b string, c string, d string); +drop table tst1_n4; +create table tst1_n4 (key string, value string) partitioned by (a string, b string, c string, d string); from srcpart -insert overwrite table tst1 partition (a='1', b='2', c, d) select key, value, ds, hr where ds = '2008-04-08'; +insert overwrite table tst1_n4 partition (a='1', b='2', c, d) select key, value, ds, hr where ds = '2008-04-08'; -drop table tst1; +drop table tst1_n4; diff --git a/ql/src/test/queries/clientpositive/lock4.q b/ql/src/test/queries/clientpositive/lock4.q index f11365961d..256ca9deb4 100644 --- a/ql/src/test/queries/clientpositive/lock4.q +++ b/ql/src/test/queries/clientpositive/lock4.q @@ -1,10 +1,10 @@ --! qt:dataset:srcpart set hive.lock.mapred.only.operation=true; -drop table tstsrcpart; -create table tstsrcpart like srcpart; +drop table tstsrcpart_n3; +create table tstsrcpart_n3 like srcpart; from srcpart -insert overwrite table tstsrcpart partition (ds='2008-04-08',hr='11') +insert overwrite table tstsrcpart_n3 partition (ds='2008-04-08',hr='11') select key, value where ds='2008-04-08' and hr='11'; set hive.exec.dynamic.partition.mode=nonstrict; @@ -12,23 +12,23 @@ set hive.exec.dynamic.partition=true; from srcpart -insert overwrite table tstsrcpart partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'; +insert overwrite table tstsrcpart_n3 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'; from srcpart -insert overwrite table tstsrcpart partition (ds ='2008-04-08', hr) select key, value, hr where ds = '2008-04-08'; +insert overwrite table tstsrcpart_n3 partition (ds ='2008-04-08', hr) select key, value, hr where ds = '2008-04-08'; SHOW LOCKS; -SHOW LOCKS tstsrcpart; +SHOW LOCKS tstsrcpart_n3; -drop table tstsrcpart; +drop table tstsrcpart_n3; -drop table tst1; -create table tst1 (key string, value string) partitioned by (a string, b string, c string, d string); +drop table tst1_n3; +create table tst1_n3 (key string, value string) partitioned by (a string, b string, c string, d string); from srcpart -insert overwrite table tst1 partition (a='1', b='2', c, d) select key, value, ds, hr where ds = '2008-04-08'; +insert overwrite table tst1_n3 partition (a='1', b='2', c, d) select key, value, ds, hr where ds = '2008-04-08'; -drop table tst1; +drop table tst1_n3; diff --git a/ql/src/test/queries/clientpositive/mapjoin2.q b/ql/src/test/queries/clientpositive/mapjoin2.q index b132e49d21..e194bd0177 100644 --- a/ql/src/test/queries/clientpositive/mapjoin2.q +++ b/ql/src/test/queries/clientpositive/mapjoin2.q @@ -1,16 +1,16 @@ set hive.mapred.mode=nonstrict; set hive.auto.convert.join=true; -create table tbl (n bigint, t string); +create table tbl_n1 (n bigint, t string); -insert into tbl values (1, 'one'); -insert into tbl values(2, 'two'); +insert into tbl_n1 values (1, 'one'); +insert into tbl_n1 values(2, 'two'); -select a.n, a.t, isnull(b.n), isnull(b.t) from (select * from tbl where n = 1) a left outer join (select * from tbl where 1 = 2) b on a.n = b.n; +select a.n, a.t, isnull(b.n), isnull(b.t) from (select * from tbl_n1 where n = 1) a left outer join (select * from tbl_n1 where 1 = 2) b on a.n = b.n; -select isnull(a.n), isnull(a.t), b.n, b.t from (select * from tbl where 2 = 1) a right outer join (select * from tbl where n = 2) b on a.n = b.n; +select isnull(a.n), isnull(a.t), b.n, b.t from (select * from tbl_n1 where 2 = 1) a right outer join (select * from tbl_n1 where n = 2) b on a.n = b.n; -select isnull(a.n), isnull(a.t), isnull(b.n), isnull(b.t) from (select * from tbl where n = 1) a full outer join (select * from tbl where n = 2) b on a.n = b.n; +select isnull(a.n), isnull(a.t), isnull(b.n), isnull(b.t) from (select * from tbl_n1 where n = 1) a full outer join (select * from tbl_n1 where n = 2) b on a.n = b.n; select a.key, a.a_one, b.b_one, a.a_zero, b.b_zero from ( SELECT 11 key, 0 confuse_you, 1 a_one, 0 a_zero ) a join ( SELECT 11 key, 0 confuse_you, 1 b_one, 0 b_zero ) b on a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/mapjoin46.q b/ql/src/test/queries/clientpositive/mapjoin46.q index b7aa092a2f..9de7113907 100644 --- a/ql/src/test/queries/clientpositive/mapjoin46.q +++ b/ql/src/test/queries/clientpositive/mapjoin46.q @@ -3,263 +3,263 @@ set hive.auto.convert.join=true; set hive.strict.checks.cartesian.product=false; set hive.join.emit.interval=2; -CREATE TABLE test1 (key INT, value INT, col_1 STRING); -INSERT INTO test1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), +CREATE TABLE test1_n4 (key INT, value INT, col_1 STRING); +INSERT INTO test1_n4 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car'); -CREATE TABLE test2 (key INT, value INT, col_2 STRING); -INSERT INTO test2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), +CREATE TABLE test2_n2 (key INT, value INT, col_2 STRING); +INSERT INTO test2_n2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), (104, 3, 'Fli'), (105, NULL, 'None'); -- Basic outer join EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value); -- Conjunction with pred on multiple inputs and single inputs (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND test1_n4.key between 100 and 102 + AND test2_n2.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND test1_n4.key between 100 and 102 + AND test2_n2.key between 100 and 102); -- Conjunction with pred on single inputs (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.key between 100 and 102 + AND test2_n2.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.key between 100 and 102 + AND test2_n2.key between 100 and 102); -- Conjunction with pred on multiple inputs and none (left outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true); +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value AND true); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true); +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value AND true); -- Condition on one input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.key between 100 and 102); -- Disjunction with pred on multiple inputs and single inputs (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102); -- Disjunction with pred on multiple inputs and left input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102); -- Disjunction with pred on multiple inputs and right input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102); -- Keys plus residual (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)); -- Disjunction with pred on multiple inputs and single inputs (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102); -- Disjunction with pred on multiple inputs and left input (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102); -- Disjunction with pred on multiple inputs and right input (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102); -- Keys plus residual (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)); -- Disjunction with pred on multiple inputs and single inputs (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102); -- Disjunction with pred on multiple inputs and left input (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102); -- Disjunction with pred on multiple inputs and right input (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102); -- Keys plus residual (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)); -- Mixed ( FOJ (ROJ, LOJ) ) EXPLAIN SELECT * FROM ( - SELECT test1.key AS key1, test1.value AS value1, test1.col_1 AS col_1, - test2.key AS key2, test2.value AS value2, test2.col_2 AS col_2 - FROM test1 RIGHT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n4.key AS key1, test1_n4.value AS value1, test1_n4.col_1 AS col_1, + test2_n2.key AS key2, test2_n2.value AS value2, test2_n2.col_2 AS col_2 + FROM test1_n4 RIGHT OUTER JOIN test2_n2 + ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) ) sq1 FULL OUTER JOIN ( - SELECT test1.key AS key3, test1.value AS value3, test1.col_1 AS col_3, - test2.key AS key4, test2.value AS value4, test2.col_2 AS col_4 - FROM test1 LEFT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n4.key AS key3, test1_n4.value AS value3, test1_n4.col_1 AS col_3, + test2_n2.key AS key4, test2_n2.value AS value4, test2_n2.col_2 AS col_4 + FROM test1_n4 LEFT OUTER JOIN test2_n2 + ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) ) sq2 ON (sq1.value1 is null or sq2.value4 is null and sq2.value3 != sq1.value2); SELECT * FROM ( - SELECT test1.key AS key1, test1.value AS value1, test1.col_1 AS col_1, - test2.key AS key2, test2.value AS value2, test2.col_2 AS col_2 - FROM test1 RIGHT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n4.key AS key1, test1_n4.value AS value1, test1_n4.col_1 AS col_1, + test2_n2.key AS key2, test2_n2.value AS value2, test2_n2.col_2 AS col_2 + FROM test1_n4 RIGHT OUTER JOIN test2_n2 + ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) ) sq1 FULL OUTER JOIN ( - SELECT test1.key AS key3, test1.value AS value3, test1.col_1 AS col_3, - test2.key AS key4, test2.value AS value4, test2.col_2 AS col_4 - FROM test1 LEFT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n4.key AS key3, test1_n4.value AS value3, test1_n4.col_1 AS col_3, + test2_n2.key AS key4, test2_n2.value AS value4, test2_n2.col_2 AS col_4 + FROM test1_n4 LEFT OUTER JOIN test2_n2 + ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) ) sq2 ON (sq1.value1 is null or sq2.value4 is null and sq2.value3 != sq1.value2); diff --git a/ql/src/test/queries/clientpositive/mapjoin_addjar.q b/ql/src/test/queries/clientpositive/mapjoin_addjar.q index cc8bb8bd00..6b726787c7 100644 --- a/ql/src/test/queries/clientpositive/mapjoin_addjar.q +++ b/ql/src/test/queries/clientpositive/mapjoin_addjar.q @@ -4,12 +4,12 @@ set hive.auto.convert.join.use.nonstaged=false; ADD JAR ${system:maven.local.repository}/org/apache/hive/hive-it-test-serde/${system:hive.version}/hive-it-test-serde-${system:hive.version}.jar; -CREATE TABLE t1(KEY STRING, VALUE STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv1_cb.txt' INTO TABLE t1; +CREATE TABLE t1_n66(KEY STRING, VALUE STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv1_cb.txt' INTO TABLE t1_n66; -select * from t1 l join t1 r on l.key =r.key; +select * from t1_n66 l join t1_n66 r on l.key =r.key; -drop table t1; +drop table t1_n66; DELETE JAR ${system:maven.local.repository}/org/apache/hive/hive-it-test-serde/${system:hive.version}/hive-it-test-serde-${system:hive.version}.jar; set hive.auto.convert.join=false; diff --git a/ql/src/test/queries/clientpositive/mapjoin_decimal.q b/ql/src/test/queries/clientpositive/mapjoin_decimal.q index 29eae50f1e..c84267a049 100644 --- a/ql/src/test/queries/clientpositive/mapjoin_decimal.q +++ b/ql/src/test/queries/clientpositive/mapjoin_decimal.q @@ -7,7 +7,7 @@ set hive.auto.convert.join.noconditionaltask.size=10000000; -- SORT_QUERY_RESULTS -CREATE TABLE over1k(t tinyint, +CREATE TABLE over1k_n5(t tinyint, si smallint, i int, b bigint, @@ -21,20 +21,20 @@ CREATE TABLE over1k(t tinyint, ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k; +LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k_n5; -CREATE TABLE t1(`dec` decimal(4,2)) STORED AS ORC; -INSERT INTO TABLE t1 select `dec` from over1k; -CREATE TABLE t2(`dec` decimal(4,0)) STORED AS ORC; -INSERT INTO TABLE t2 select `dec` from over1k; +CREATE TABLE t1_n95(`dec` decimal(4,2)) STORED AS ORC; +INSERT INTO TABLE t1_n95 select `dec` from over1k_n5; +CREATE TABLE t2_n59(`dec` decimal(4,0)) STORED AS ORC; +INSERT INTO TABLE t2_n59 select `dec` from over1k_n5; explain -select t1.`dec`, t2.`dec` from t1 join t2 on (t1.`dec`=t2.`dec`) order by t1.`dec`; +select t1_n95.`dec`, t2_n59.`dec` from t1_n95 join t2_n59 on (t1_n95.`dec`=t2_n59.`dec`) order by t1_n95.`dec`; set hive.mapjoin.optimized.hashtable=false; -select t1.`dec`, t2.`dec` from t1 join t2 on (t1.`dec`=t2.`dec`) order by t1.`dec`; +select t1_n95.`dec`, t2_n59.`dec` from t1_n95 join t2_n59 on (t1_n95.`dec`=t2_n59.`dec`) order by t1_n95.`dec`; set hive.mapjoin.optimized.hashtable=true; -select t1.`dec`, t2.`dec` from t1 join t2 on (t1.`dec`=t2.`dec`) order by t1.`dec`; +select t1_n95.`dec`, t2_n59.`dec` from t1_n95 join t2_n59 on (t1_n95.`dec`=t2_n59.`dec`) order by t1_n95.`dec`; diff --git a/ql/src/test/queries/clientpositive/mapjoin_emit_interval.q b/ql/src/test/queries/clientpositive/mapjoin_emit_interval.q index fe2a32a9d5..5bbeb17662 100644 --- a/ql/src/test/queries/clientpositive/mapjoin_emit_interval.q +++ b/ql/src/test/queries/clientpositive/mapjoin_emit_interval.q @@ -2,8 +2,8 @@ set hive.auto.convert.join=true; set hive.strict.checks.cartesian.product=false; set hive.join.emit.interval=1; -CREATE TABLE test1 (key INT, value INT, col_1 STRING); -INSERT INTO test1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), +CREATE TABLE test1_n0 (key INT, value INT, col_1 STRING); +INSERT INTO test1_n0 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car'); CREATE TABLE test2 (key INT, value INT, col_2 STRING); @@ -14,19 +14,19 @@ INSERT INTO test2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), -- Equi-condition and condition on one input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value AND test1.key between 100 and 102); +FROM test1_n0 LEFT OUTER JOIN test2 +ON (test1_n0.value=test2.value AND test1_n0.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value AND test1.key between 100 and 102); +FROM test1_n0 LEFT OUTER JOIN test2 +ON (test1_n0.value=test2.value AND test1_n0.key between 100 and 102); -- Condition on one input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102); +FROM test1_n0 LEFT OUTER JOIN test2 +ON (test1_n0.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102); +FROM test1_n0 LEFT OUTER JOIN test2 +ON (test1_n0.key between 100 and 102); diff --git a/ql/src/test/queries/clientpositive/mapjoin_hint.q b/ql/src/test/queries/clientpositive/mapjoin_hint.q index 21b4896b84..7189f070e1 100644 --- a/ql/src/test/queries/clientpositive/mapjoin_hint.q +++ b/ql/src/test/queries/clientpositive/mapjoin_hint.q @@ -16,41 +16,41 @@ set hive.stats.fetch.column.stats=true; set hive.tez.bloom.filter.factor=1.0f; -- Create Tables -create table srcpart_date (key string, value string) partitioned by (ds string ) stored as ORC; -CREATE TABLE srcpart_small(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; +create table srcpart_date_n5 (key string, value string) partitioned by (ds string ) stored as ORC; +CREATE TABLE srcpart_small_n1(key1 STRING, value1 STRING) partitioned by (ds string) STORED as ORC; -- Add Partitions -alter table srcpart_date add partition (ds = "2008-04-08"); -alter table srcpart_date add partition (ds = "2008-04-09"); +alter table srcpart_date_n5 add partition (ds = "2008-04-08"); +alter table srcpart_date_n5 add partition (ds = "2008-04-09"); -alter table srcpart_small add partition (ds = "2008-04-08"); -alter table srcpart_small add partition (ds = "2008-04-09"); +alter table srcpart_small_n1 add partition (ds = "2008-04-08"); +alter table srcpart_small_n1 add partition (ds = "2008-04-09"); -- Load -insert overwrite table srcpart_date partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; -insert overwrite table srcpart_date partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; -insert overwrite table srcpart_small partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; +insert overwrite table srcpart_date_n5 partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"; +insert overwrite table srcpart_date_n5 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"; +insert overwrite table srcpart_small_n1 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20; -analyze table srcpart_date compute statistics for columns; -analyze table srcpart_small compute statistics for columns; +analyze table srcpart_date_n5 compute statistics for columns; +analyze table srcpart_small_n1 compute statistics for columns; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=100000000000; --HIVE-17475 -EXPLAIN select /*+ mapjoin(None)*/ count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); -EXPLAIN select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = srcpart_small.key1); +EXPLAIN select /*+ mapjoin(None)*/ count(*) from srcpart_date_n5 join srcpart_small_n1 on (srcpart_date_n5.key = srcpart_small_n1.key1); +EXPLAIN select count(*) from srcpart_date_n5 join srcpart_small_n1 on (srcpart_date_n5.key = srcpart_small_n1.key1); -- Ensure that hint works even with CBO on, on a query with subquery. -create table tnull(i int, c char(2)); -insert into tnull values(NULL, NULL), (NULL, NULL); +create table tnull_n1(i int, c char(2)); +insert into tnull_n1 values(NULL, NULL), (NULL, NULL); -create table tempty(c char(2)); +create table tempty_n1(c char(2)); -CREATE TABLE part_null( +CREATE TABLE part_null_n1( p_partkey INT, p_name STRING, p_mfgr STRING, @@ -64,9 +64,9 @@ p_comment STRING ROW FORMAT DELIMITED FIELDS TERMINATED BY "," ; -LOAD DATA LOCAL INPATH '../../data/files/part_tiny_nulls.txt' overwrite into table part_null; +LOAD DATA LOCAL INPATH '../../data/files/part_tiny_nulls.txt' overwrite into table part_null_n1; -insert into part_null values(78487,NULL,'Manufacturer#6','Brand#52','LARGE BRUSHED BRASS', 23, 'MED BAG',1464.48,'hely blith'); +insert into part_null_n1 values(78487,NULL,'Manufacturer#6','Brand#52','LARGE BRUSHED BRASS', 23, 'MED BAG',1464.48,'hely blith'); -explain select /*+ mapjoin(None)*/ * from part where p_name = (select p_name from part_null where p_name is null); -explain select * from part where p_name = (select p_name from part_null where p_name is null); +explain select /*+ mapjoin(None)*/ * from part where p_name = (select p_name from part_null_n1 where p_name is null); +explain select * from part where p_name = (select p_name from part_null_n1 where p_name is null); diff --git a/ql/src/test/queries/clientpositive/mapjoin_hook.q b/ql/src/test/queries/clientpositive/mapjoin_hook.q index 3dc479ca3d..c929356764 100644 --- a/ql/src/test/queries/clientpositive/mapjoin_hook.q +++ b/ql/src/test/queries/clientpositive/mapjoin_hook.q @@ -4,17 +4,17 @@ set hive.stats.column.autogather=false; set hive.exec.post.hooks = org.apache.hadoop.hive.ql.hooks.MapJoinCounterHook,org.apache.hadoop.hive.ql.hooks.PrintCompletedTasksHook; -drop table dest1; -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +drop table dest1_n171; +CREATE TABLE dest1_n171(key INT, value STRING) STORED AS TEXTFILE; set hive.auto.convert.join = true; -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n171 SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key; FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value; +INSERT OVERWRITE TABLE dest1_n171 SELECT src1.key, src3.value; set hive.mapjoin.localtask.max.memory.usage = 0.0001; @@ -23,12 +23,12 @@ set hive.auto.convert.join.noconditionaltask = false; FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n171 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11'); FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value; +INSERT OVERWRITE TABLE dest1_n171 SELECT src1.key, src3.value; diff --git a/ql/src/test/queries/clientpositive/mapjoin_test_outer.q b/ql/src/test/queries/clientpositive/mapjoin_test_outer.q index 96275aba44..3c6383ec46 100644 --- a/ql/src/test/queries/clientpositive/mapjoin_test_outer.q +++ b/ql/src/test/queries/clientpositive/mapjoin_test_outer.q @@ -9,50 +9,50 @@ create table dest_1 (key STRING, value STRING) stored as textfile; insert overwrite table dest_1 select * from src1 order by src1.value limit 8; insert into table dest_1 select "333444","555666" from src1 limit 1; -create table dest_2 (key STRING, value STRING) stored as textfile; +create table dest_2_n0 (key STRING, value STRING) stored as textfile; -insert into table dest_2 select * from dest_1; +insert into table dest_2_n0 select * from dest_1; SELECT * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value; explain SELECT /*+ mapjoin(src1, src2) */ * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value; SELECT /*+ mapjoin(src1, src2) */ * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value; SELECT /*+ mapjoin(src1, src2) */ * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src1.key = src3.key) + JOIN dest_2_n0 src3 ON (src1.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value; set hive.auto.convert.join = true; SELECT * FROM src1 LEFT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src1.key = src3.key) + JOIN dest_2_n0 src3 ON (src1.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value; SELECT * FROM src1 LEFT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value; explain SELECT * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value; SELECT * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value; diff --git a/ql/src/test/queries/clientpositive/mapreduce1.q b/ql/src/test/queries/clientpositive/mapreduce1.q index 06c7c53451..e806d56d6e 100644 --- a/ql/src/test/queries/clientpositive/mapreduce1.q +++ b/ql/src/test/queries/clientpositive/mapreduce1.q @@ -1,10 +1,10 @@ --! qt:dataset:src set hive.explain.user=false; -CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n120(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n120 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey @@ -12,11 +12,11 @@ SORT BY ten, one; FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n120 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey SORT BY ten, one; -SELECT dest1.* FROM dest1; +SELECT dest1_n120.* FROM dest1_n120; diff --git a/ql/src/test/queries/clientpositive/mapreduce2.q b/ql/src/test/queries/clientpositive/mapreduce2.q index dfb8ec399e..b1b7bdc72e 100644 --- a/ql/src/test/queries/clientpositive/mapreduce2.q +++ b/ql/src/test/queries/clientpositive/mapreduce2.q @@ -1,20 +1,20 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; set hive.explain.user=false; -CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n162(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n162 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey; FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n162 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey; -SELECT * FROM (SELECT dest1.* FROM dest1 DISTRIBUTE BY key SORT BY key, ten, one, value) T ORDER BY key; +SELECT * FROM (SELECT dest1_n162.* FROM dest1_n162 DISTRIBUTE BY key SORT BY key, ten, one, value) T ORDER BY key; diff --git a/ql/src/test/queries/clientpositive/mapreduce3.q b/ql/src/test/queries/clientpositive/mapreduce3.q index ad9908ffb3..d5b3b4ec50 100644 --- a/ql/src/test/queries/clientpositive/mapreduce3.q +++ b/ql/src/test/queries/clientpositive/mapreduce3.q @@ -1,18 +1,18 @@ --! qt:dataset:src -CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n23(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n23 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) SORT BY tvalue, tkey; FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n23 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) SORT BY tvalue, tkey; -SELECT dest1.* FROM dest1; +SELECT dest1_n23.* FROM dest1_n23; diff --git a/ql/src/test/queries/clientpositive/mapreduce4.q b/ql/src/test/queries/clientpositive/mapreduce4.q index 74e907eb53..d8f8a9badf 100644 --- a/ql/src/test/queries/clientpositive/mapreduce4.q +++ b/ql/src/test/queries/clientpositive/mapreduce4.q @@ -1,9 +1,9 @@ --! qt:dataset:src -CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n93(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n93 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey @@ -11,11 +11,11 @@ SORT BY ten DESC, one ASC; FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n93 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey SORT BY ten DESC, one ASC; -SELECT dest1.* FROM dest1; +SELECT dest1_n93.* FROM dest1_n93; diff --git a/ql/src/test/queries/clientpositive/mapreduce5.q b/ql/src/test/queries/clientpositive/mapreduce5.q index 841ead5421..ba6a6f8c2d 100644 --- a/ql/src/test/queries/clientpositive/mapreduce5.q +++ b/ql/src/test/queries/clientpositive/mapreduce5.q @@ -1,18 +1,18 @@ --! qt:dataset:src -CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n133(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n133 SELECT src.key as c1, CAST(src.key / 10 AS INT) as c2, CAST(src.key % 10 AS INT) as c3, src.value as c4 DISTRIBUTE BY c4, c1 SORT BY c2 DESC, c3 ASC; FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n133 SELECT src.key as c1, CAST(src.key / 10 AS INT) as c2, CAST(src.key % 10 AS INT) as c3, src.value as c4 DISTRIBUTE BY c4, c1 SORT BY c2 DESC, c3 ASC; -SELECT dest1.* FROM dest1; +SELECT dest1_n133.* FROM dest1_n133; diff --git a/ql/src/test/queries/clientpositive/mapreduce6.q b/ql/src/test/queries/clientpositive/mapreduce6.q index 213db77e99..f035c46cd3 100644 --- a/ql/src/test/queries/clientpositive/mapreduce6.q +++ b/ql/src/test/queries/clientpositive/mapreduce6.q @@ -1,18 +1,18 @@ --! qt:dataset:src -CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n169(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n169 SELECT src.key, CAST(src.key / 10 AS INT) as c2, CAST(src.key % 10 AS INT) as c3, src.value DISTRIBUTE BY value, key SORT BY c2 DESC, c3 ASC; FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n169 SELECT src.key, CAST(src.key / 10 AS INT) as c2, CAST(src.key % 10 AS INT) as c3, src.value DISTRIBUTE BY value, key SORT BY c2 DESC, c3 ASC; -SELECT dest1.* FROM dest1; +SELECT dest1_n169.* FROM dest1_n169; diff --git a/ql/src/test/queries/clientpositive/mapreduce7.q b/ql/src/test/queries/clientpositive/mapreduce7.q index 77b82ef3bc..aaf08facf1 100644 --- a/ql/src/test/queries/clientpositive/mapreduce7.q +++ b/ql/src/test/queries/clientpositive/mapreduce7.q @@ -1,18 +1,18 @@ --! qt:dataset:src -CREATE TABLE dest1(k STRING, v STRING, key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n37(k STRING, v STRING, key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n37 MAP src.*, src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (k, v, tkey, ten, one, tvalue) SORT BY tvalue, tkey; FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n37 MAP src.*, src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (k, v, tkey, ten, one, tvalue) SORT BY tvalue, tkey; -SELECT dest1.* FROM dest1; +SELECT dest1_n37.* FROM dest1_n37; diff --git a/ql/src/test/queries/clientpositive/mapreduce8.q b/ql/src/test/queries/clientpositive/mapreduce8.q index 7db7510ea8..dd8abc3b1f 100644 --- a/ql/src/test/queries/clientpositive/mapreduce8.q +++ b/ql/src/test/queries/clientpositive/mapreduce8.q @@ -1,9 +1,9 @@ --! qt:dataset:src -CREATE TABLE dest1(k STRING, v STRING, key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n158(k STRING, v STRING, key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n158 MAP src.*, src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (k, v, tkey, ten, one, tvalue) DISTRIBUTE BY rand(3) @@ -11,10 +11,10 @@ SORT BY tvalue, tkey; FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n158 MAP src.*, src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (k, v, tkey, ten, one, tvalue) DISTRIBUTE BY rand(3) SORT BY tvalue, tkey; -SELECT dest1.* FROM dest1; +SELECT dest1_n158.* FROM dest1_n158; diff --git a/ql/src/test/queries/clientpositive/masking_1.q b/ql/src/test/queries/clientpositive/masking_1.q index 6e4f6a7680..858162c5ea 100644 --- a/ql/src/test/queries/clientpositive/masking_1.q +++ b/ql/src/test/queries/clientpositive/masking_1.q @@ -3,27 +3,27 @@ set hive.mapred.mode=nonstrict; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -create table masking_test as select cast(key as int) as key, value from src; +create table masking_test_n8 as select cast(key as int) as key, value from src; -explain select * from masking_test; -select * from masking_test; +explain select * from masking_test_n8; +select * from masking_test_n8; -explain select * from masking_test where key > 0; -select * from masking_test where key > 0; +explain select * from masking_test_n8 where key > 0; +select * from masking_test_n8 where key > 0; -explain select key from masking_test where key > 0; -select key from masking_test where key > 0; +explain select key from masking_test_n8 where key > 0; +select key from masking_test_n8 where key > 0; -explain select value from masking_test where key > 0; -select value from masking_test where key > 0; +explain select value from masking_test_n8 where key > 0; +select value from masking_test_n8 where key > 0; -explain select * from masking_test join srcpart on (masking_test.key = srcpart.key); -select * from masking_test join srcpart on (masking_test.key = srcpart.key); +explain select * from masking_test_n8 join srcpart on (masking_test_n8.key = srcpart.key); +select * from masking_test_n8 join srcpart on (masking_test_n8.key = srcpart.key); -explain select * from default.masking_test where key > 0; -select * from default.masking_test where key > 0; +explain select * from default.masking_test_n8 where key > 0; +select * from default.masking_test_n8 where key > 0; -explain select * from masking_test where masking_test.key > 0; -select * from masking_test where masking_test.key > 0; +explain select * from masking_test_n8 where masking_test_n8.key > 0; +select * from masking_test_n8 where masking_test_n8.key > 0; explain select key, value from (select key, value from (select key, upper(value) as value from src where key > 0) t where key < 10) t2 where key % 2 = 0; diff --git a/ql/src/test/queries/clientpositive/masking_12.q b/ql/src/test/queries/clientpositive/masking_12.q index 707d53afd5..6bb941e87c 100644 --- a/ql/src/test/queries/clientpositive/masking_12.q +++ b/ql/src/test/queries/clientpositive/masking_12.q @@ -2,24 +2,24 @@ set hive.mapred.mode=nonstrict; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -create table `masking_test` as select cast(key as int) as key, value from src; +create table `masking_test_n5` as select cast(key as int) as key, value from src; -create view `v0` as select * from `masking_test`; +create view `v0` as select * from `masking_test_n5`; explain select * from `v0`; select * from `v0`; -create table `masking_test_subq` as select cast(key as int) as key, value from src; +create table `masking_test_subq_n1` as select cast(key as int) as key, value from src; -create view `v1` as select * from `masking_test_subq`; +create view `v1_n9` as select * from `masking_test_subq_n1`; explain -select * from `v1` +select * from `v1_n9` limit 20; -select * from `v1` +select * from `v1_n9` limit 20; create view `masking_test_view` as select key from `v0`; diff --git a/ql/src/test/queries/clientpositive/masking_1_newdb.q b/ql/src/test/queries/clientpositive/masking_1_newdb.q index 7438349b82..6626facac8 100644 --- a/ql/src/test/queries/clientpositive/masking_1_newdb.q +++ b/ql/src/test/queries/clientpositive/masking_1_newdb.q @@ -6,13 +6,13 @@ create database newdb; use newdb; -create table masking_test as select cast(key as int) as key, value from default.src; +create table masking_test_n12 as select cast(key as int) as key, value from default.src; use default; -explain select * from newdb.masking_test; -select * from newdb.masking_test; +explain select * from newdb.masking_test_n12; +select * from newdb.masking_test_n12; -explain select * from newdb.masking_test where key > 0; -select * from newdb.masking_test where key > 0; +explain select * from newdb.masking_test_n12 where key > 0; +select * from newdb.masking_test_n12 where key > 0; diff --git a/ql/src/test/queries/clientpositive/masking_2.q b/ql/src/test/queries/clientpositive/masking_2.q index 3192992811..ece15186f9 100644 --- a/ql/src/test/queries/clientpositive/masking_2.q +++ b/ql/src/test/queries/clientpositive/masking_2.q @@ -2,17 +2,17 @@ set hive.mapred.mode=nonstrict; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -create view masking_test as select cast(key as int) as key, value from src; +create view masking_test_n1 as select cast(key as int) as key, value from src; -explain select * from masking_test; -select * from masking_test; +explain select * from masking_test_n1; +select * from masking_test_n1; -explain select * from masking_test where key > 0; -select * from masking_test where key > 0; +explain select * from masking_test_n1 where key > 0; +select * from masking_test_n1 where key > 0; -explain select * from src a join masking_test b on a.key = b.value where b.key > 0; +explain select * from src a join masking_test_n1 b on a.key = b.value where b.key > 0; -explain select a.*, b.key from masking_test a join masking_test b on a.key = b.value where b.key > 0; +explain select a.*, b.key from masking_test_n1 a join masking_test_n1 b on a.key = b.value where b.key > 0; -explain select * from masking_test a union select b.* from masking_test b where b.key > 0; +explain select * from masking_test_n1 a union select b.* from masking_test_n1 b where b.key > 0; diff --git a/ql/src/test/queries/clientpositive/masking_3.q b/ql/src/test/queries/clientpositive/masking_3.q index 018f998791..42706d01e0 100644 --- a/ql/src/test/queries/clientpositive/masking_3.q +++ b/ql/src/test/queries/clientpositive/masking_3.q @@ -3,27 +3,27 @@ set hive.mapred.mode=nonstrict; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -create table masking_test_subq as select cast(key as int) as key, value from src; +create table masking_test_subq_n3 as select cast(key as int) as key, value from src; -explain select * from masking_test_subq; -select * from masking_test_subq; +explain select * from masking_test_subq_n3; +select * from masking_test_subq_n3; -explain select * from masking_test_subq where key > 0; -select * from masking_test_subq where key > 0; +explain select * from masking_test_subq_n3 where key > 0; +select * from masking_test_subq_n3 where key > 0; -explain select key from masking_test_subq where key > 0; -select key from masking_test_subq where key > 0; +explain select key from masking_test_subq_n3 where key > 0; +select key from masking_test_subq_n3 where key > 0; -explain select value from masking_test_subq where key > 0; -select value from masking_test_subq where key > 0; +explain select value from masking_test_subq_n3 where key > 0; +select value from masking_test_subq_n3 where key > 0; -explain select * from masking_test_subq join srcpart on (masking_test_subq.key = srcpart.key); -select * from masking_test_subq join srcpart on (masking_test_subq.key = srcpart.key); +explain select * from masking_test_subq_n3 join srcpart on (masking_test_subq_n3.key = srcpart.key); +select * from masking_test_subq_n3 join srcpart on (masking_test_subq_n3.key = srcpart.key); -explain select * from default.masking_test_subq where key > 0; -select * from default.masking_test_subq where key > 0; +explain select * from default.masking_test_subq_n3 where key > 0; +select * from default.masking_test_subq_n3 where key > 0; -explain select * from masking_test_subq where masking_test_subq.key > 0; -select * from masking_test_subq where masking_test_subq.key > 0; +explain select * from masking_test_subq_n3 where masking_test_subq_n3.key > 0; +select * from masking_test_subq_n3 where masking_test_subq_n3.key > 0; explain select key, value from (select key, value from (select key, upper(value) as value from src where key > 0) t where key < 10) t2 where key % 2 = 0; diff --git a/ql/src/test/queries/clientpositive/masking_4.q b/ql/src/test/queries/clientpositive/masking_4.q index 49af1cc0ec..48fe33c5aa 100644 --- a/ql/src/test/queries/clientpositive/masking_4.q +++ b/ql/src/test/queries/clientpositive/masking_4.q @@ -2,8 +2,8 @@ set hive.mapred.mode=nonstrict; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -create table masking_test as select cast(key as int) as key, value from src; -create table masking_test_subq as select cast(key as int) as key, value from src; +create table masking_test_n11 as select cast(key as int) as key, value from src; +create table masking_test_subq_n2 as select cast(key as int) as key, value from src; explain @@ -12,20 +12,20 @@ q2 as ( select key from src where key = '5') select * from (select key from q1) a; ---should mask masking_test +--should mask masking_test_n11 explain -with q1 as ( select * from masking_test where key = '5') +with q1 as ( select * from masking_test_n11 where key = '5') select * from q1; ---should not mask masking_test_subq +--should not mask masking_test_subq_n2 explain -with masking_test_subq as ( select * from masking_test where key = '5') -select * from masking_test_subq; +with masking_test_subq_n2 as ( select * from masking_test_n11 where key = '5') +select * from masking_test_subq_n2; ---should mask masking_test_subq +--should mask masking_test_subq_n2 explain -with q1 as ( select * from masking_test where key = '5') -select * from masking_test_subq; \ No newline at end of file +with q1 as ( select * from masking_test_n11 where key = '5') +select * from masking_test_subq_n2; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/masking_5.q b/ql/src/test/queries/clientpositive/masking_5.q index e8f392fb96..e24bb931d9 100644 --- a/ql/src/test/queries/clientpositive/masking_5.q +++ b/ql/src/test/queries/clientpositive/masking_5.q @@ -2,22 +2,22 @@ set hive.mapred.mode=nonstrict; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -create table masking_test as select cast(key as int) as key, value from src; +create table masking_test_n6 as select cast(key as int) as key, value from src; -explain select * from masking_test tablesample (10 rows); -select * from masking_test tablesample (10 rows); +explain select * from masking_test_n6 tablesample (10 rows); +select * from masking_test_n6 tablesample (10 rows); explain -select * from masking_test tablesample(1 percent); -select * from masking_test tablesample(1 percent); +select * from masking_test_n6 tablesample(1 percent); +select * from masking_test_n6 tablesample(1 percent); -drop table masking_test; +drop table masking_test_n6; -CREATE TABLE masking_test(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS; +CREATE TABLE masking_test_n6(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS; -insert overwrite table masking_test +insert overwrite table masking_test_n6 select * from src; explain -select * from masking_test tablesample (bucket 1 out of 2) s; -select * from masking_test tablesample (bucket 1 out of 2) s; +select * from masking_test_n6 tablesample (bucket 1 out of 2) s; +select * from masking_test_n6 tablesample (bucket 1 out of 2) s; diff --git a/ql/src/test/queries/clientpositive/masking_6.q b/ql/src/test/queries/clientpositive/masking_6.q index f773ec1f20..ddbe476878 100644 --- a/ql/src/test/queries/clientpositive/masking_6.q +++ b/ql/src/test/queries/clientpositive/masking_6.q @@ -2,28 +2,28 @@ set hive.mapred.mode=nonstrict; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -drop view masking_test; +drop view masking_test_n0; -create view masking_test as select cast(key as int) as key, value, '12' from src; +create view masking_test_n0 as select cast(key as int) as key, value, '12' from src; -explain select * from masking_test; +explain select * from masking_test_n0; -select * from masking_test; +select * from masking_test_n0; -explain select * from masking_test where key > 0; +explain select * from masking_test_n0 where key > 0; -select * from masking_test where key > 0; +select * from masking_test_n0 where key > 0; -drop view masking_test; +drop view masking_test_n0; -create view masking_test as select cast(key as int) as key, '12', +create view masking_test_n0 as select cast(key as int) as key, '12', '12', '12', '12', '12', '12', '12', '12', '12', '12', '12' from src; -explain select * from masking_test; +explain select * from masking_test_n0; -select * from masking_test; +select * from masking_test_n0; -explain select * from masking_test where key > 0; +explain select * from masking_test_n0 where key > 0; -select * from masking_test where key > 0; +select * from masking_test_n0 where key > 0; diff --git a/ql/src/test/queries/clientpositive/masking_7.q b/ql/src/test/queries/clientpositive/masking_7.q index 16552218df..e4d6387d2d 100644 --- a/ql/src/test/queries/clientpositive/masking_7.q +++ b/ql/src/test/queries/clientpositive/masking_7.q @@ -2,28 +2,28 @@ set hive.mapred.mode=nonstrict; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -drop view masking_test; +drop view masking_test_n14; -create view masking_test as select cast(key as int) as key, value, '12', ROW__ID from src; +create view masking_test_n14 as select cast(key as int) as key, value, '12', ROW__ID from src; -explain select * from masking_test; +explain select * from masking_test_n14; -select * from masking_test; +select * from masking_test_n14; -explain select * from masking_test where key > 0; +explain select * from masking_test_n14 where key > 0; -select * from masking_test where key > 0; +select * from masking_test_n14 where key > 0; -drop view masking_test; +drop view masking_test_n14; -create view masking_test as select cast(key as int) as key, '12', ROW__ID, +create view masking_test_n14 as select cast(key as int) as key, '12', ROW__ID, '12', '12', '12', '12', '12', '12', '12', '12', '12', '12' from src; -explain select * from masking_test; +explain select * from masking_test_n14; -select * from masking_test; +select * from masking_test_n14; -explain select * from masking_test where key > 0; +explain select * from masking_test_n14 where key > 0; -select * from masking_test where key > 0; +select * from masking_test_n14 where key > 0; diff --git a/ql/src/test/queries/clientpositive/masking_8.q b/ql/src/test/queries/clientpositive/masking_8.q index 2db065275b..94e4106101 100644 --- a/ql/src/test/queries/clientpositive/masking_8.q +++ b/ql/src/test/queries/clientpositive/masking_8.q @@ -2,36 +2,36 @@ set hive.mapred.mode=nonstrict; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -drop table masking_test; +drop table masking_test_n2; -create table masking_test as select cast(key as int) as key, value, '12' from src; +create table masking_test_n2 as select cast(key as int) as key, value, '12' from src; -explain select *, ROW__ID from masking_test; +explain select *, ROW__ID from masking_test_n2; -select *, ROW__ID from masking_test; +select *, ROW__ID from masking_test_n2; -explain select * from masking_test; +explain select * from masking_test_n2; -select * from masking_test; +select * from masking_test_n2; -explain select INPUT__FILE__NAME, *, ROW__ID from masking_test; +explain select INPUT__FILE__NAME, *, ROW__ID from masking_test_n2; -select INPUT__FILE__NAME, *, ROW__ID from masking_test; +select INPUT__FILE__NAME, *, ROW__ID from masking_test_n2; -drop table masking_test; +drop table masking_test_n2; -create table masking_test as select cast(key as int) as key, '12' +create table masking_test_n2 as select cast(key as int) as key, '12' '12', '12', '12', '12', '12', '12', '12', '12', '12', '12' from src; -explain select ROW__ID, * from masking_test; +explain select ROW__ID, * from masking_test_n2; -select ROW__ID, * from masking_test; +select ROW__ID, * from masking_test_n2; -drop table masking_test; +drop table masking_test_n2; -create table masking_test as select cast(key as int) as key, '12' +create table masking_test_n2 as select cast(key as int) as key, '12' '12', '12', '12', '12', '12', INPUT__FILE__NAME, '12', '12', '12', '12', '12' from src; -select INPUT__FILE__NAME, *, ROW__ID from masking_test; +select INPUT__FILE__NAME, *, ROW__ID from masking_test_n2; diff --git a/ql/src/test/queries/clientpositive/masking_acid_no_masking.q b/ql/src/test/queries/clientpositive/masking_acid_no_masking.q index 2d19826bef..75de085e11 100644 --- a/ql/src/test/queries/clientpositive/masking_acid_no_masking.q +++ b/ql/src/test/queries/clientpositive/masking_acid_no_masking.q @@ -6,7 +6,7 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.autho set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -create table nonacid (key int, value string) stored as orc; +create table nonacid_n0 (key int, value string) stored as orc; create table masking_acid_no_masking (key int, value string) clustered by (value) into 2 buckets stored as orc @@ -16,7 +16,7 @@ update masking_acid_no_masking set key=1 where value='ddd'; delete from masking_acid_no_masking where value='ddd'; -MERGE INTO masking_acid_no_masking as t using nonacid as s ON t.key = s.key +MERGE INTO masking_acid_no_masking as t using nonacid_n0 as s ON t.key = s.key WHEN MATCHED AND s.key < 5 THEN DELETE WHEN MATCHED AND s.key < 3 THEN UPDATE set key = 1 WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.value); diff --git a/ql/src/test/queries/clientpositive/masking_disablecbo_1.q b/ql/src/test/queries/clientpositive/masking_disablecbo_1.q index aaf17e0bdc..0a4e09b77b 100644 --- a/ql/src/test/queries/clientpositive/masking_disablecbo_1.q +++ b/ql/src/test/queries/clientpositive/masking_disablecbo_1.q @@ -4,27 +4,27 @@ set hive.cbo.enable=false; set hive.mapred.mode=nonstrict; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -create table masking_test as select cast(key as int) as key, value from src; +create table masking_test_n10 as select cast(key as int) as key, value from src; -explain select * from masking_test; -select * from masking_test; +explain select * from masking_test_n10; +select * from masking_test_n10; -explain select * from masking_test where key > 0; -select * from masking_test where key > 0; +explain select * from masking_test_n10 where key > 0; +select * from masking_test_n10 where key > 0; -explain select key from masking_test where key > 0; -select key from masking_test where key > 0; +explain select key from masking_test_n10 where key > 0; +select key from masking_test_n10 where key > 0; -explain select value from masking_test where key > 0; -select value from masking_test where key > 0; +explain select value from masking_test_n10 where key > 0; +select value from masking_test_n10 where key > 0; -explain select * from masking_test join srcpart on (masking_test.key = srcpart.key); -select * from masking_test join srcpart on (masking_test.key = srcpart.key); +explain select * from masking_test_n10 join srcpart on (masking_test_n10.key = srcpart.key); +select * from masking_test_n10 join srcpart on (masking_test_n10.key = srcpart.key); -explain select * from default.masking_test where key > 0; -select * from default.masking_test where key > 0; +explain select * from default.masking_test_n10 where key > 0; +select * from default.masking_test_n10 where key > 0; -explain select * from masking_test where masking_test.key > 0; -select * from masking_test where masking_test.key > 0; +explain select * from masking_test_n10 where masking_test_n10.key > 0; +select * from masking_test_n10 where masking_test_n10.key > 0; explain select key, value from (select key, value from (select key, upper(value) as value from src where key > 0) t where key < 10) t2 where key % 2 = 0; diff --git a/ql/src/test/queries/clientpositive/masking_disablecbo_2.q b/ql/src/test/queries/clientpositive/masking_disablecbo_2.q index 039ffe91f8..a4129c2975 100644 --- a/ql/src/test/queries/clientpositive/masking_disablecbo_2.q +++ b/ql/src/test/queries/clientpositive/masking_disablecbo_2.q @@ -3,17 +3,17 @@ set hive.cbo.enable=false; set hive.mapred.mode=nonstrict; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -create view masking_test as select cast(key as int) as key, value from src; +create view masking_test_n13 as select cast(key as int) as key, value from src; -explain select * from masking_test; -select * from masking_test; +explain select * from masking_test_n13; +select * from masking_test_n13; -explain select * from masking_test where key > 0; -select * from masking_test where key > 0; +explain select * from masking_test_n13 where key > 0; +select * from masking_test_n13 where key > 0; -explain select * from src a join masking_test b on a.key = b.value where b.key > 0; +explain select * from src a join masking_test_n13 b on a.key = b.value where b.key > 0; -explain select a.*, b.key from masking_test a join masking_test b on a.key = b.value where b.key > 0; +explain select a.*, b.key from masking_test_n13 a join masking_test_n13 b on a.key = b.value where b.key > 0; -explain select * from masking_test a union select b.* from masking_test b where b.key > 0; +explain select * from masking_test_n13 a union select b.* from masking_test_n13 b where b.key > 0; diff --git a/ql/src/test/queries/clientpositive/masking_disablecbo_4.q b/ql/src/test/queries/clientpositive/masking_disablecbo_4.q index 9981969e3a..d81fd3b769 100644 --- a/ql/src/test/queries/clientpositive/masking_disablecbo_4.q +++ b/ql/src/test/queries/clientpositive/masking_disablecbo_4.q @@ -3,8 +3,8 @@ set hive.cbo.enable=false; set hive.mapred.mode=nonstrict; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -create table masking_test as select cast(key as int) as key, value from src; -create table masking_test_subq as select cast(key as int) as key, value from src; +create table masking_test_n3 as select cast(key as int) as key, value from src; +create table masking_test_subq_n0 as select cast(key as int) as key, value from src; explain @@ -13,20 +13,20 @@ q2 as ( select key from src where key = '5') select * from (select key from q1) a; ---should mask masking_test +--should mask masking_test_n3 explain -with q1 as ( select * from masking_test where key = '5') +with q1 as ( select * from masking_test_n3 where key = '5') select * from q1; ---should not mask masking_test_subq +--should not mask masking_test_subq_n0 explain -with masking_test_subq as ( select * from masking_test where key = '5') -select * from masking_test_subq; +with masking_test_subq_n0 as ( select * from masking_test_n3 where key = '5') +select * from masking_test_subq_n0; ---should mask masking_test_subq +--should mask masking_test_subq_n0 explain -with q1 as ( select * from masking_test where key = '5') -select * from masking_test_subq; +with q1 as ( select * from masking_test_n3 where key = '5') +select * from masking_test_subq_n0; diff --git a/ql/src/test/queries/clientpositive/materialized_view_create.q b/ql/src/test/queries/clientpositive/materialized_view_create.q index 8452d163ef..bca3bd7de7 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_create.q +++ b/ql/src/test/queries/clientpositive/materialized_view_create.q @@ -1,25 +1,25 @@ set hive.vectorized.execution.enabled=false; -create table cmv_basetable (a int, b varchar(256), c decimal(10,2)); +create table cmv_basetable_n4 (a int, b varchar(256), c decimal(10,2)); -insert into cmv_basetable values (1, 'alfred', 10.30),(2, 'bob', 3.14),(2, 'bonnie', 172342.2),(3, 'calvin', 978.76),(3, 'charlie', 9.8); +insert into cmv_basetable_n4 values (1, 'alfred', 10.30),(2, 'bob', 3.14),(2, 'bonnie', 172342.2),(3, 'calvin', 978.76),(3, 'charlie', 9.8); -create materialized view cmv_mat_view as select a, b, c from cmv_basetable; +create materialized view cmv_mat_view_n4 as select a, b, c from cmv_basetable_n4; -desc formatted cmv_mat_view; +desc formatted cmv_mat_view_n4; -select * from cmv_mat_view; +select * from cmv_mat_view_n4; -create materialized view if not exists cmv_mat_view2 as select a, c from cmv_basetable; +create materialized view if not exists cmv_mat_view2_n1 as select a, c from cmv_basetable_n4; -desc formatted cmv_mat_view2; +desc formatted cmv_mat_view2_n1; -select * from cmv_mat_view2; +select * from cmv_mat_view2_n1; -create materialized view if not exists cmv_mat_view3 as select * from cmv_basetable where a > 1; +create materialized view if not exists cmv_mat_view3 as select * from cmv_basetable_n4 where a > 1; select * from cmv_mat_view3; -create materialized view cmv_mat_view4 comment 'this is a comment' as select a, sum(c) from cmv_basetable group by a; +create materialized view cmv_mat_view4 comment 'this is a comment' as select a, sum(c) from cmv_basetable_n4 group by a; select * from cmv_mat_view4; @@ -29,14 +29,14 @@ create table cmv_basetable2 (d int, e varchar(256), f decimal(10,2)); insert into cmv_basetable2 values (4, 'alfred', 100.30),(4, 'bob', 6133.14),(5, 'bonnie', 172.2),(6, 'calvin', 8.76),(17, 'charlie', 13144339.8); -create materialized view cmv_mat_view5 tblproperties ('key'='value') as select a, b, d, c, f from cmv_basetable t1 join cmv_basetable2 t2 on (t1.b = t2.e); +create materialized view cmv_mat_view5 tblproperties ('key'='value') as select a, b, d, c, f from cmv_basetable_n4 t1 join cmv_basetable2 t2 on (t1.b = t2.e); select * from cmv_mat_view5; show tblproperties cmv_mat_view5; -drop materialized view cmv_mat_view; -drop materialized view cmv_mat_view2; +drop materialized view cmv_mat_view_n4; +drop materialized view cmv_mat_view2_n1; drop materialized view cmv_mat_view3; drop materialized view cmv_mat_view4; drop materialized view cmv_mat_view5; diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q index ede548d8e0..b67b888552 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q +++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q @@ -6,82 +6,82 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.strict.checks.cartesian.product=false; set hive.materializedview.rewriting=true; -create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); +create table cmv_basetable_n10 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into cmv_basetable values +insert into cmv_basetable_n10 values (1, 'alfred', 10.30, 2), (2, 'bob', 3.14, 3), (2, 'bonnie', 172342.2, 3), (3, 'calvin', 978.76, 3), (3, 'charlie', 9.8, 1); -analyze table cmv_basetable compute statistics for columns; +analyze table cmv_basetable_n10 compute statistics for columns; -create materialized view cmv_mat_view enable rewrite -as select a, b, c from cmv_basetable where a = 2; +create materialized view cmv_mat_view_n10 enable rewrite +as select a, b, c from cmv_basetable_n10 where a = 2; -select * from cmv_mat_view; +select * from cmv_mat_view_n10; -show tblproperties cmv_mat_view; +show tblproperties cmv_mat_view_n10; -create materialized view if not exists cmv_mat_view2 enable rewrite -as select a, c from cmv_basetable where a = 3; +create materialized view if not exists cmv_mat_view2_n4 enable rewrite +as select a, c from cmv_basetable_n10 where a = 3; -select * from cmv_mat_view2; +select * from cmv_mat_view2_n4; -show tblproperties cmv_mat_view2; +show tblproperties cmv_mat_view2_n4; explain -select a, c from cmv_basetable where a = 3; +select a, c from cmv_basetable_n10 where a = 3; -select a, c from cmv_basetable where a = 3; +select a, c from cmv_basetable_n10 where a = 3; -alter materialized view cmv_mat_view2 disable rewrite; +alter materialized view cmv_mat_view2_n4 disable rewrite; explain select * from ( - (select a, c from cmv_basetable where a = 3) table1 + (select a, c from cmv_basetable_n10 where a = 3) table1 join - (select a, c from cmv_basetable where d = 3) table2 + (select a, c from cmv_basetable_n10 where d = 3) table2 on table1.a = table2.a); select * from ( - (select a, c from cmv_basetable where a = 3) table1 + (select a, c from cmv_basetable_n10 where a = 3) table1 join - (select a, c from cmv_basetable where d = 3) table2 + (select a, c from cmv_basetable_n10 where d = 3) table2 on table1.a = table2.a); explain -alter materialized view cmv_mat_view2 enable rewrite; +alter materialized view cmv_mat_view2_n4 enable rewrite; -alter materialized view cmv_mat_view2 enable rewrite; +alter materialized view cmv_mat_view2_n4 enable rewrite; explain select * from ( - (select a, c from cmv_basetable where a = 3) table1 + (select a, c from cmv_basetable_n10 where a = 3) table1 join - (select a, c from cmv_basetable where d = 3) table2 + (select a, c from cmv_basetable_n10 where d = 3) table2 on table1.a = table2.a); select * from ( - (select a, c from cmv_basetable where a = 3) table1 + (select a, c from cmv_basetable_n10 where a = 3) table1 join - (select a, c from cmv_basetable where d = 3) table2 + (select a, c from cmv_basetable_n10 where d = 3) table2 on table1.a = table2.a); -drop materialized view cmv_mat_view2; +drop materialized view cmv_mat_view2_n4; explain select * from ( - (select a, c from cmv_basetable where a = 3) table1 + (select a, c from cmv_basetable_n10 where a = 3) table1 join - (select a, c from cmv_basetable where d = 3) table2 + (select a, c from cmv_basetable_n10 where d = 3) table2 on table1.a = table2.a); select * from ( - (select a, c from cmv_basetable where a = 3) table1 + (select a, c from cmv_basetable_n10 where a = 3) table1 join - (select a, c from cmv_basetable where d = 3) table2 + (select a, c from cmv_basetable_n10 where d = 3) table2 on table1.a = table2.a); -drop materialized view cmv_mat_view; +drop materialized view cmv_mat_view_n10; diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_2.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_2.q index 82c06e4ed9..21579d4dfe 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_2.q +++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_2.q @@ -4,90 +4,90 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.strict.checks.cartesian.product=false; set hive.materializedview.rewriting=true; -create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); +create table cmv_basetable_n9 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into cmv_basetable values +insert into cmv_basetable_n9 values (1, 'alfred', 10.30, 2), (2, 'bob', 3.14, 3), (2, 'bonnie', 172342.2, 3), (3, 'calvin', 978.76, 3), (3, 'charlie', 9.8, 1); -analyze table cmv_basetable compute statistics for columns; +analyze table cmv_basetable_n9 compute statistics for columns; -create materialized view cmv_mat_view enable rewrite -as select b from cmv_basetable where c > 10.0 group by a, b, c; +create materialized view cmv_mat_view_n9 enable rewrite +as select b from cmv_basetable_n9 where c > 10.0 group by a, b, c; -- CANNOT BE TRIGGERED explain -select b from cmv_basetable where c > 20.0 group by a, b; +select b from cmv_basetable_n9 where c > 20.0 group by a, b; -select b from cmv_basetable where c > 20.0 group by a, b; +select b from cmv_basetable_n9 where c > 20.0 group by a, b; create materialized view cmv_mat_view_2 enable rewrite -as select b, c from cmv_basetable where c > 10.0 group by a, b, c; +as select b, c from cmv_basetable_n9 where c > 10.0 group by a, b, c; -- CANNOT BE TRIGGERED explain -select b from cmv_basetable where c > 20.0 group by a, b; +select b from cmv_basetable_n9 where c > 20.0 group by a, b; -select b from cmv_basetable where c > 20.0 group by a, b; +select b from cmv_basetable_n9 where c > 20.0 group by a, b; create materialized view cmv_mat_view_3 enable rewrite -as select a, b, c from cmv_basetable where c > 10.0 group by a, b, c; +as select a, b, c from cmv_basetable_n9 where c > 10.0 group by a, b, c; -- CAN BE TRIGGERED explain -select b from cmv_basetable where c > 20.0 group by a, b; +select b from cmv_basetable_n9 where c > 20.0 group by a, b; -select b from cmv_basetable where c > 20.0 group by a, b; +select b from cmv_basetable_n9 where c > 20.0 group by a, b; create materialized view cmv_mat_view_4 enable rewrite -as select a, b from cmv_basetable group by a, b; +as select a, b from cmv_basetable_n9 group by a, b; -- CAN BE TRIGGERED explain -select b from cmv_basetable group by b; +select b from cmv_basetable_n9 group by b; -select b from cmv_basetable group by b; +select b from cmv_basetable_n9 group by b; -create table cmv_basetable_2 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); +create table cmv_basetable_2_n4 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into cmv_basetable_2 values +insert into cmv_basetable_2_n4 values (1, 'alfred', 10.30, 2), (3, 'calvin', 978.76, 3); -analyze table cmv_basetable_2 compute statistics for columns; +analyze table cmv_basetable_2_n4 compute statistics for columns; create materialized view cmv_mat_view_5 enable rewrite -as select cmv_basetable.a, cmv_basetable_2.c - from cmv_basetable join cmv_basetable_2 on (cmv_basetable.a = cmv_basetable_2.a) - where cmv_basetable_2.c > 10.0 - group by cmv_basetable.a, cmv_basetable_2.c; +as select cmv_basetable_n9.a, cmv_basetable_2_n4.c + from cmv_basetable_n9 join cmv_basetable_2_n4 on (cmv_basetable_n9.a = cmv_basetable_2_n4.a) + where cmv_basetable_2_n4.c > 10.0 + group by cmv_basetable_n9.a, cmv_basetable_2_n4.c; explain -select cmv_basetable.a -from cmv_basetable join cmv_basetable_2 on (cmv_basetable.a = cmv_basetable_2.a) -where cmv_basetable_2.c > 10.10 -group by cmv_basetable.a, cmv_basetable_2.c; +select cmv_basetable_n9.a +from cmv_basetable_n9 join cmv_basetable_2_n4 on (cmv_basetable_n9.a = cmv_basetable_2_n4.a) +where cmv_basetable_2_n4.c > 10.10 +group by cmv_basetable_n9.a, cmv_basetable_2_n4.c; -select cmv_basetable.a -from cmv_basetable join cmv_basetable_2 on (cmv_basetable.a = cmv_basetable_2.a) -where cmv_basetable_2.c > 10.10 -group by cmv_basetable.a, cmv_basetable_2.c; +select cmv_basetable_n9.a +from cmv_basetable_n9 join cmv_basetable_2_n4 on (cmv_basetable_n9.a = cmv_basetable_2_n4.a) +where cmv_basetable_2_n4.c > 10.10 +group by cmv_basetable_n9.a, cmv_basetable_2_n4.c; explain -select cmv_basetable.a -from cmv_basetable join cmv_basetable_2 on (cmv_basetable.a = cmv_basetable_2.a) -where cmv_basetable_2.c > 10.10 -group by cmv_basetable.a; +select cmv_basetable_n9.a +from cmv_basetable_n9 join cmv_basetable_2_n4 on (cmv_basetable_n9.a = cmv_basetable_2_n4.a) +where cmv_basetable_2_n4.c > 10.10 +group by cmv_basetable_n9.a; -select cmv_basetable.a -from cmv_basetable join cmv_basetable_2 on (cmv_basetable.a = cmv_basetable_2.a) -where cmv_basetable_2.c > 10.10 -group by cmv_basetable.a; +select cmv_basetable_n9.a +from cmv_basetable_n9 join cmv_basetable_2_n4 on (cmv_basetable_n9.a = cmv_basetable_2_n4.a) +where cmv_basetable_2_n4.c > 10.10 +group by cmv_basetable_n9.a; -drop materialized view cmv_mat_view; +drop materialized view cmv_mat_view_n9; drop materialized view cmv_mat_view_2; drop materialized view cmv_mat_view_3; drop materialized view cmv_mat_view_4; diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q index ff1290bac7..ec0413ff15 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q +++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_4.q @@ -4,162 +4,162 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.strict.checks.cartesian.product=false; set hive.materializedview.rewriting=true; -create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); +create table cmv_basetable_n5 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into cmv_basetable values +insert into cmv_basetable_n5 values (1, 'alfred', 10.30, 2), (2, 'bob', 3.14, 3), (2, 'bonnie', 172342.2, 3), (3, 'calvin', 978.76, 3), (3, 'charlie', 9.8, 1); -analyze table cmv_basetable compute statistics for columns; +analyze table cmv_basetable_n5 compute statistics for columns; -create table cmv_basetable_2 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); +create table cmv_basetable_2_n2 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into cmv_basetable_2 values +insert into cmv_basetable_2_n2 values (1, 'alfred', 10.30, 2), (3, 'calvin', 978.76, 3); -analyze table cmv_basetable_2 compute statistics for columns; +analyze table cmv_basetable_2_n2 compute statistics for columns; -- CREATE VIEW WITH REWRITE DISABLED EXPLAIN -CREATE MATERIALIZED VIEW cmv_mat_view TBLPROPERTIES ('transactional'='true') AS - SELECT cmv_basetable.a, cmv_basetable_2.c, sum(cmv_basetable_2.d) - FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) - WHERE cmv_basetable_2.c > 10.0 - GROUP BY cmv_basetable.a, cmv_basetable_2.c; +CREATE MATERIALIZED VIEW cmv_mat_view_n5 TBLPROPERTIES ('transactional'='true') AS + SELECT cmv_basetable_n5.a, cmv_basetable_2_n2.c, sum(cmv_basetable_2_n2.d) + FROM cmv_basetable_n5 JOIN cmv_basetable_2_n2 ON (cmv_basetable_n5.a = cmv_basetable_2_n2.a) + WHERE cmv_basetable_2_n2.c > 10.0 + GROUP BY cmv_basetable_n5.a, cmv_basetable_2_n2.c; -CREATE MATERIALIZED VIEW cmv_mat_view TBLPROPERTIES ('transactional'='true') AS - SELECT cmv_basetable.a, cmv_basetable_2.c, sum(cmv_basetable_2.d) - FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) - WHERE cmv_basetable_2.c > 10.0 - GROUP BY cmv_basetable.a, cmv_basetable_2.c; +CREATE MATERIALIZED VIEW cmv_mat_view_n5 TBLPROPERTIES ('transactional'='true') AS + SELECT cmv_basetable_n5.a, cmv_basetable_2_n2.c, sum(cmv_basetable_2_n2.d) + FROM cmv_basetable_n5 JOIN cmv_basetable_2_n2 ON (cmv_basetable_n5.a = cmv_basetable_2_n2.a) + WHERE cmv_basetable_2_n2.c > 10.0 + GROUP BY cmv_basetable_n5.a, cmv_basetable_2_n2.c; -analyze table cmv_mat_view compute statistics for columns; +analyze table cmv_mat_view_n5 compute statistics for columns; -DESCRIBE FORMATTED cmv_mat_view; +DESCRIBE FORMATTED cmv_mat_view_n5; -- CANNOT USE THE VIEW, IT IS DISABLED FOR REWRITE EXPLAIN -SELECT cmv_basetable.a, sum(cmv_basetable_2.d) -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n5.a, sum(cmv_basetable_2_n2.d) +FROM cmv_basetable_n5 join cmv_basetable_2_n2 ON (cmv_basetable_n5.a = cmv_basetable_2_n2.a) +WHERE cmv_basetable_2_n2.c > 10.10 +GROUP BY cmv_basetable_n5.a, cmv_basetable_2_n2.c; -SELECT cmv_basetable.a, sum(cmv_basetable_2.d) -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n5.a, sum(cmv_basetable_2_n2.d) +FROM cmv_basetable_n5 JOIN cmv_basetable_2_n2 ON (cmv_basetable_n5.a = cmv_basetable_2_n2.a) +WHERE cmv_basetable_2_n2.c > 10.10 +GROUP BY cmv_basetable_n5.a, cmv_basetable_2_n2.c; -insert into cmv_basetable_2 values +insert into cmv_basetable_2_n2 values (3, 'charlie', 15.8, 1); -analyze table cmv_basetable_2 compute statistics for columns; +analyze table cmv_basetable_2_n2 compute statistics for columns; -- ENABLE FOR REWRITE EXPLAIN -ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE; +ALTER MATERIALIZED VIEW cmv_mat_view_n5 ENABLE REWRITE; -ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE; +ALTER MATERIALIZED VIEW cmv_mat_view_n5 ENABLE REWRITE; -DESCRIBE FORMATTED cmv_mat_view; +DESCRIBE FORMATTED cmv_mat_view_n5; -- CANNOT USE THE VIEW, IT IS OUTDATED EXPLAIN -SELECT cmv_basetable.a, sum(cmv_basetable_2.d) -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n5.a, sum(cmv_basetable_2_n2.d) +FROM cmv_basetable_n5 join cmv_basetable_2_n2 ON (cmv_basetable_n5.a = cmv_basetable_2_n2.a) +WHERE cmv_basetable_2_n2.c > 10.10 +GROUP BY cmv_basetable_n5.a, cmv_basetable_2_n2.c; -SELECT cmv_basetable.a, sum(cmv_basetable_2.d) -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n5.a, sum(cmv_basetable_2_n2.d) +FROM cmv_basetable_n5 JOIN cmv_basetable_2_n2 ON (cmv_basetable_n5.a = cmv_basetable_2_n2.a) +WHERE cmv_basetable_2_n2.c > 10.10 +GROUP BY cmv_basetable_n5.a, cmv_basetable_2_n2.c; -- REBUILD EXPLAIN -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n5 REBUILD; -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n5 REBUILD; -DESCRIBE FORMATTED cmv_mat_view; +DESCRIBE FORMATTED cmv_mat_view_n5; -- NOW IT CAN BE USED AGAIN EXPLAIN -SELECT cmv_basetable.a, sum(cmv_basetable_2.d) -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n5.a, sum(cmv_basetable_2_n2.d) +FROM cmv_basetable_n5 join cmv_basetable_2_n2 ON (cmv_basetable_n5.a = cmv_basetable_2_n2.a) +WHERE cmv_basetable_2_n2.c > 10.10 +GROUP BY cmv_basetable_n5.a, cmv_basetable_2_n2.c; -SELECT cmv_basetable.a, sum(cmv_basetable_2.d) -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n5.a, sum(cmv_basetable_2_n2.d) +FROM cmv_basetable_n5 JOIN cmv_basetable_2_n2 ON (cmv_basetable_n5.a = cmv_basetable_2_n2.a) +WHERE cmv_basetable_2_n2.c > 10.10 +GROUP BY cmv_basetable_n5.a, cmv_basetable_2_n2.c; -- NOW AN UPDATE -UPDATE cmv_basetable_2 SET a=2 WHERE a=1; +UPDATE cmv_basetable_2_n2 SET a=2 WHERE a=1; -- INCREMENTAL REBUILD CANNOT BE TRIGGERED EXPLAIN -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n5 REBUILD; -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n5 REBUILD; -- MV CAN BE USED EXPLAIN -SELECT cmv_basetable.a, sum(cmv_basetable_2.d) -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n5.a, sum(cmv_basetable_2_n2.d) +FROM cmv_basetable_n5 join cmv_basetable_2_n2 ON (cmv_basetable_n5.a = cmv_basetable_2_n2.a) +WHERE cmv_basetable_2_n2.c > 10.10 +GROUP BY cmv_basetable_n5.a, cmv_basetable_2_n2.c; -SELECT cmv_basetable.a, sum(cmv_basetable_2.d) -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n5.a, sum(cmv_basetable_2_n2.d) +FROM cmv_basetable_n5 JOIN cmv_basetable_2_n2 ON (cmv_basetable_n5.a = cmv_basetable_2_n2.a) +WHERE cmv_basetable_2_n2.c > 10.10 +GROUP BY cmv_basetable_n5.a, cmv_basetable_2_n2.c; -- NOW A DELETE -DELETE FROM cmv_basetable_2 WHERE a=2; +DELETE FROM cmv_basetable_2_n2 WHERE a=2; -- INCREMENTAL REBUILD CANNOT BE TRIGGERED EXPLAIN -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n5 REBUILD; -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n5 REBUILD; -- MV CAN BE USED EXPLAIN -SELECT cmv_basetable.a, sum(cmv_basetable_2.d) -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n5.a, sum(cmv_basetable_2_n2.d) +FROM cmv_basetable_n5 join cmv_basetable_2_n2 ON (cmv_basetable_n5.a = cmv_basetable_2_n2.a) +WHERE cmv_basetable_2_n2.c > 10.10 +GROUP BY cmv_basetable_n5.a, cmv_basetable_2_n2.c; -SELECT cmv_basetable.a, sum(cmv_basetable_2.d) -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n5.a, sum(cmv_basetable_2_n2.d) +FROM cmv_basetable_n5 JOIN cmv_basetable_2_n2 ON (cmv_basetable_n5.a = cmv_basetable_2_n2.a) +WHERE cmv_basetable_2_n2.c > 10.10 +GROUP BY cmv_basetable_n5.a, cmv_basetable_2_n2.c; -- NOW AN INSERT -insert into cmv_basetable_2 values +insert into cmv_basetable_2_n2 values (1, 'charlie', 15.8, 1); -- INCREMENTAL REBUILD CAN BE TRIGGERED AGAIN EXPLAIN -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n5 REBUILD; -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n5 REBUILD; -- MV CAN BE USED EXPLAIN -SELECT cmv_basetable.a, sum(cmv_basetable_2.d) -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n5.a, sum(cmv_basetable_2_n2.d) +FROM cmv_basetable_n5 join cmv_basetable_2_n2 ON (cmv_basetable_n5.a = cmv_basetable_2_n2.a) +WHERE cmv_basetable_2_n2.c > 10.10 +GROUP BY cmv_basetable_n5.a, cmv_basetable_2_n2.c; -SELECT cmv_basetable.a, sum(cmv_basetable_2.d) -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n5.a, sum(cmv_basetable_2_n2.d) +FROM cmv_basetable_n5 JOIN cmv_basetable_2_n2 ON (cmv_basetable_n5.a = cmv_basetable_2_n2.a) +WHERE cmv_basetable_2_n2.c > 10.10 +GROUP BY cmv_basetable_n5.a, cmv_basetable_2_n2.c; -drop materialized view cmv_mat_view; +drop materialized view cmv_mat_view_n5; diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q index 1d97325b8a..141c92ec7f 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q +++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q @@ -4,121 +4,121 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.strict.checks.cartesian.product=false; set hive.materializedview.rewriting=true; -create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); +create table cmv_basetable_n6 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into cmv_basetable values +insert into cmv_basetable_n6 values (1, 'alfred', 10.30, 2), (2, 'bob', 3.14, 3), (2, 'bonnie', 172342.2, 3), (3, 'calvin', 978.76, 3), (3, 'charlie', 9.8, 1); -analyze table cmv_basetable compute statistics for columns; +analyze table cmv_basetable_n6 compute statistics for columns; -create table cmv_basetable_2 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); +create table cmv_basetable_2_n3 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into cmv_basetable_2 values +insert into cmv_basetable_2_n3 values (1, 'alfred', 10.30, 2), (3, 'calvin', 978.76, 3); -analyze table cmv_basetable_2 compute statistics for columns; +analyze table cmv_basetable_2_n3 compute statistics for columns; -CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE +CREATE MATERIALIZED VIEW cmv_mat_view_n6 ENABLE REWRITE TBLPROPERTIES ('transactional'='true') AS - SELECT cmv_basetable.a, cmv_basetable_2.c - FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) - WHERE cmv_basetable_2.c > 10.0; -analyze table cmv_mat_view compute statistics for columns; + SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c + FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) + WHERE cmv_basetable_2_n3.c > 10.0; +analyze table cmv_mat_view_n6 compute statistics for columns; -insert into cmv_basetable_2 values +insert into cmv_basetable_2_n3 values (3, 'charlie', 15.8, 1); -analyze table cmv_basetable_2 compute statistics for columns; +analyze table cmv_basetable_2_n3 compute statistics for columns; -- CANNOT USE THE VIEW, IT IS OUTDATED EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10; +SELECT cmv_basetable_n6.a +FROM cmv_basetable_n6 join cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_2_n3.c > 10.10; -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10; +SELECT cmv_basetable_n6.a +FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_2_n3.c > 10.10; -- REBUILD EXPLAIN -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD; -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD; -DESCRIBE FORMATTED cmv_mat_view; +DESCRIBE FORMATTED cmv_mat_view_n6; -- NOW IT CAN BE USED AGAIN EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10; +SELECT cmv_basetable_n6.a +FROM cmv_basetable_n6 join cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_2_n3.c > 10.10; -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10; +SELECT cmv_basetable_n6.a +FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_2_n3.c > 10.10; -- NOW AN UPDATE -UPDATE cmv_basetable_2 SET a=2 WHERE a=1; +UPDATE cmv_basetable_2_n3 SET a=2 WHERE a=1; -- INCREMENTAL REBUILD CANNOT BE TRIGGERED EXPLAIN -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD; -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD; -- MV CAN BE USED EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10; +SELECT cmv_basetable_n6.a +FROM cmv_basetable_n6 join cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_2_n3.c > 10.10; -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10; +SELECT cmv_basetable_n6.a +FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_2_n3.c > 10.10; -- NOW A DELETE -DELETE FROM cmv_basetable_2 WHERE a=2; +DELETE FROM cmv_basetable_2_n3 WHERE a=2; -- INCREMENTAL REBUILD CANNOT BE TRIGGERED EXPLAIN -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD; -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD; -- MV CAN BE USED EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10; +SELECT cmv_basetable_n6.a +FROM cmv_basetable_n6 join cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_2_n3.c > 10.10; -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10; +SELECT cmv_basetable_n6.a +FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_2_n3.c > 10.10; -- NOW AN INSERT -insert into cmv_basetable_2 values +insert into cmv_basetable_2_n3 values (1, 'charlie', 15.8, 1); -- INCREMENTAL REBUILD CAN BE TRIGGERED AGAIN EXPLAIN -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD; -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD; -- MV CAN BE USED EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10; +SELECT cmv_basetable_n6.a +FROM cmv_basetable_n6 join cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_2_n3.c > 10.10; -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10; +SELECT cmv_basetable_n6.a +FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_2_n3.c > 10.10; -drop materialized view cmv_mat_view; +drop materialized view cmv_mat_view_n6; diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_dummy.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_dummy.q index c9aeea81d0..be72d3b3f8 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_dummy.q +++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_dummy.q @@ -7,49 +7,49 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.strict.checks.cartesian.product=false; set hive.materializedview.rewriting=true; -create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); +create table cmv_basetable_n0 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into cmv_basetable values +insert into cmv_basetable_n0 values (1, 'alfred', 10.30, 2), (2, 'bob', 3.14, 3), (2, 'bonnie', 172342.2, 3), (3, 'calvin', 978.76, 3), (3, 'charlie', 9.8, 1); -analyze table cmv_basetable compute statistics for columns; +analyze table cmv_basetable_n0 compute statistics for columns; -create materialized view cmv_mat_view enable rewrite -as select a, b, c from cmv_basetable where a = 2; +create materialized view cmv_mat_view_n0 enable rewrite +as select a, b, c from cmv_basetable_n0 where a = 2; -select * from cmv_mat_view; +select * from cmv_mat_view_n0; -show tblproperties cmv_mat_view; +show tblproperties cmv_mat_view_n0; create materialized view if not exists cmv_mat_view2 enable rewrite -as select a, c from cmv_basetable where a = 3; +as select a, c from cmv_basetable_n0 where a = 3; select * from cmv_mat_view2; show tblproperties cmv_mat_view2; explain -select a, c from cmv_basetable where a = 3; +select a, c from cmv_basetable_n0 where a = 3; -select a, c from cmv_basetable where a = 3; +select a, c from cmv_basetable_n0 where a = 3; alter materialized view cmv_mat_view2 disable rewrite; explain select * from ( - (select a, c from cmv_basetable where a = 3) table1 + (select a, c from cmv_basetable_n0 where a = 3) table1 join - (select a, c from cmv_basetable where d = 3) table2 + (select a, c from cmv_basetable_n0 where d = 3) table2 on table1.a = table2.a); select * from ( - (select a, c from cmv_basetable where a = 3) table1 + (select a, c from cmv_basetable_n0 where a = 3) table1 join - (select a, c from cmv_basetable where d = 3) table2 + (select a, c from cmv_basetable_n0 where d = 3) table2 on table1.a = table2.a); explain @@ -59,30 +59,30 @@ alter materialized view cmv_mat_view2 enable rewrite; explain select * from ( - (select a, c from cmv_basetable where a = 3) table1 + (select a, c from cmv_basetable_n0 where a = 3) table1 join - (select a, c from cmv_basetable where d = 3) table2 + (select a, c from cmv_basetable_n0 where d = 3) table2 on table1.a = table2.a); select * from ( - (select a, c from cmv_basetable where a = 3) table1 + (select a, c from cmv_basetable_n0 where a = 3) table1 join - (select a, c from cmv_basetable where d = 3) table2 + (select a, c from cmv_basetable_n0 where d = 3) table2 on table1.a = table2.a); drop materialized view cmv_mat_view2; explain select * from ( - (select a, c from cmv_basetable where a = 3) table1 + (select a, c from cmv_basetable_n0 where a = 3) table1 join - (select a, c from cmv_basetable where d = 3) table2 + (select a, c from cmv_basetable_n0 where d = 3) table2 on table1.a = table2.a); select * from ( - (select a, c from cmv_basetable where a = 3) table1 + (select a, c from cmv_basetable_n0 where a = 3) table1 join - (select a, c from cmv_basetable where d = 3) table2 + (select a, c from cmv_basetable_n0 where d = 3) table2 on table1.a = table2.a); -drop materialized view cmv_mat_view; +drop materialized view cmv_mat_view_n0; diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_multi_db.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_multi_db.q index 9927e9d5bc..59341747c5 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_multi_db.q +++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_multi_db.q @@ -8,38 +8,38 @@ set hive.stats.column.autogather=true; create database db1; use db1; -create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); +create table cmv_basetable_n7 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into cmv_basetable values +insert into cmv_basetable_n7 values (1, 'alfred', 10.30, 2), (2, 'bob', 3.14, 3), (2, 'bonnie', 172342.2, 3), (3, 'calvin', 978.76, 3), (3, 'charlie', 9.8, 1); -analyze table cmv_basetable compute statistics for columns; +analyze table cmv_basetable_n7 compute statistics for columns; create database db2; use db2; -create materialized view cmv_mat_view enable rewrite -as select a, b, c from db1.cmv_basetable where a = 2; +create materialized view cmv_mat_view_n7 enable rewrite +as select a, b, c from db1.cmv_basetable_n7 where a = 2; -select * from cmv_mat_view; +select * from cmv_mat_view_n7; -show tblproperties cmv_mat_view; +show tblproperties cmv_mat_view_n7; -create materialized view if not exists cmv_mat_view2 enable rewrite -as select a, c from db1.cmv_basetable where a = 3; +create materialized view if not exists cmv_mat_view2_n2 enable rewrite +as select a, c from db1.cmv_basetable_n7 where a = 3; -select * from cmv_mat_view2; +select * from cmv_mat_view2_n2; -show tblproperties cmv_mat_view2; +show tblproperties cmv_mat_view2_n2; create database db3; use db3; explain -select a, c from db1.cmv_basetable where a = 3; +select a, c from db1.cmv_basetable_n7 where a = 3; -select a, c from db1.cmv_basetable where a = 3; +select a, c from db1.cmv_basetable_n7 where a = 3; diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_rebuild_dummy.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_rebuild_dummy.q index 86e5a1e18a..1c1a45f0bc 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_rebuild_dummy.q +++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_rebuild_dummy.q @@ -5,145 +5,145 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.strict.checks.cartesian.product=false; set hive.materializedview.rewriting=true; -create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); +create table cmv_basetable_n1 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into cmv_basetable values +insert into cmv_basetable_n1 values (1, 'alfred', 10.30, 2), (2, 'bob', 3.14, 3), (2, 'bonnie', 172342.2, 3), (3, 'calvin', 978.76, 3), (3, 'charlie', 9.8, 1); -analyze table cmv_basetable compute statistics for columns; +analyze table cmv_basetable_n1 compute statistics for columns; -create table cmv_basetable_2 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); +create table cmv_basetable_2_n0 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into cmv_basetable_2 values +insert into cmv_basetable_2_n0 values (1, 'alfred', 10.30, 2), (3, 'calvin', 978.76, 3); -analyze table cmv_basetable_2 compute statistics for columns; +analyze table cmv_basetable_2_n0 compute statistics for columns; EXPLAIN -CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE AS - SELECT cmv_basetable.a, cmv_basetable_2.c - FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) - WHERE cmv_basetable_2.c > 10.0 - GROUP BY cmv_basetable.a, cmv_basetable_2.c; - -CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE AS - SELECT cmv_basetable.a, cmv_basetable_2.c - FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) - WHERE cmv_basetable_2.c > 10.0 - GROUP BY cmv_basetable.a, cmv_basetable_2.c; +CREATE MATERIALIZED VIEW cmv_mat_view_n1 ENABLE REWRITE AS + SELECT cmv_basetable_n1.a, cmv_basetable_2_n0.c + FROM cmv_basetable_n1 JOIN cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) + WHERE cmv_basetable_2_n0.c > 10.0 + GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; + +CREATE MATERIALIZED VIEW cmv_mat_view_n1 ENABLE REWRITE AS + SELECT cmv_basetable_n1.a, cmv_basetable_2_n0.c + FROM cmv_basetable_n1 JOIN cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) + WHERE cmv_basetable_2_n0.c > 10.0 + GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; -- USE THE VIEW EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n1.a +FROM cmv_basetable_n1 join cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) +WHERE cmv_basetable_2_n0.c > 10.10 +GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n1.a +FROM cmv_basetable_n1 JOIN cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) +WHERE cmv_basetable_2_n0.c > 10.10 +GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; -insert into cmv_basetable_2 values +insert into cmv_basetable_2_n0 values (3, 'charlie', 15.8, 1); -analyze table cmv_basetable_2 compute statistics for columns; +analyze table cmv_basetable_2_n0 compute statistics for columns; -- CANNOT USE THE VIEW, IT IS OUTDATED EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n1.a +FROM cmv_basetable_n1 join cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) +WHERE cmv_basetable_2_n0.c > 10.10 +GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n1.a +FROM cmv_basetable_n1 JOIN cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) +WHERE cmv_basetable_2_n0.c > 10.10 +GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; -- REBUILD EXPLAIN -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n1 REBUILD; -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n1 REBUILD; -- NOW IT CAN BE USED AGAIN EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n1.a +FROM cmv_basetable_n1 join cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) +WHERE cmv_basetable_2_n0.c > 10.10 +GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n1.a +FROM cmv_basetable_n1 JOIN cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) +WHERE cmv_basetable_2_n0.c > 10.10 +GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; -DELETE FROM cmv_basetable_2 WHERE a = 3; +DELETE FROM cmv_basetable_2_n0 WHERE a = 3; -- CANNOT USE THE VIEW, IT IS OUTDATED EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n1.a +FROM cmv_basetable_n1 join cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) +WHERE cmv_basetable_2_n0.c > 10.10 +GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n1.a +FROM cmv_basetable_n1 JOIN cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) +WHERE cmv_basetable_2_n0.c > 10.10 +GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; -- REBUILD -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n1 REBUILD; -- NOW IT CAN BE USED AGAIN EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n1.a +FROM cmv_basetable_n1 join cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) +WHERE cmv_basetable_2_n0.c > 10.10 +GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n1.a +FROM cmv_basetable_n1 JOIN cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) +WHERE cmv_basetable_2_n0.c > 10.10 +GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; -- IRRELEVANT OPERATIONS -create table cmv_irrelevant_table (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); +create table cmv_irrelevant_table_n0 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into cmv_irrelevant_table values +insert into cmv_irrelevant_table_n0 values (1, 'alfred', 10.30, 2), (3, 'charlie', 9.8, 1); -analyze table cmv_irrelevant_table compute statistics for columns; +analyze table cmv_irrelevant_table_n0 compute statistics for columns; -- IT CAN STILL BE USED EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n1.a +FROM cmv_basetable_n1 join cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) +WHERE cmv_basetable_2_n0.c > 10.10 +GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n1.a +FROM cmv_basetable_n1 JOIN cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) +WHERE cmv_basetable_2_n0.c > 10.10 +GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; -drop materialized view cmv_mat_view; +drop materialized view cmv_mat_view_n1; -- NOT USED ANYMORE EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; - -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n1.a +FROM cmv_basetable_n1 join cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) +WHERE cmv_basetable_2_n0.c > 10.10 +GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; + +SELECT cmv_basetable_n1.a +FROM cmv_basetable_n1 JOIN cmv_basetable_2_n0 ON (cmv_basetable_n1.a = cmv_basetable_2_n0.a) +WHERE cmv_basetable_2_n0.c > 10.10 +GROUP BY cmv_basetable_n1.a, cmv_basetable_2_n0.c; diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_time_window.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_time_window.q index fe3ddeb71a..492264c30c 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_time_window.q +++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_time_window.q @@ -4,97 +4,97 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.strict.checks.cartesian.product=false; set hive.materializedview.rewriting=true; -create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); +create table cmv_basetable_n3 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into cmv_basetable values +insert into cmv_basetable_n3 values (1, 'alfred', 10.30, 2), (2, 'bob', 3.14, 3), (2, 'bonnie', 172342.2, 3), (3, 'calvin', 978.76, 3), (3, 'charlie', 9.8, 1); -analyze table cmv_basetable compute statistics for columns; +analyze table cmv_basetable_n3 compute statistics for columns; -create table cmv_basetable_2 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); +create table cmv_basetable_2_n1 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into cmv_basetable_2 values +insert into cmv_basetable_2_n1 values (1, 'alfred', 10.30, 2), (3, 'calvin', 978.76, 3); -analyze table cmv_basetable_2 compute statistics for columns; +analyze table cmv_basetable_2_n1 compute statistics for columns; -- CREATE VIEW WITH REWRITE DISABLED EXPLAIN -CREATE MATERIALIZED VIEW cmv_mat_view TBLPROPERTIES('rewriting.time.window'='300s') AS - SELECT cmv_basetable.a, cmv_basetable_2.c - FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) - WHERE cmv_basetable_2.c > 10.0 - GROUP BY cmv_basetable.a, cmv_basetable_2.c; +CREATE MATERIALIZED VIEW cmv_mat_view_n3 TBLPROPERTIES('rewriting.time.window'='300s') AS + SELECT cmv_basetable_n3.a, cmv_basetable_2_n1.c + FROM cmv_basetable_n3 JOIN cmv_basetable_2_n1 ON (cmv_basetable_n3.a = cmv_basetable_2_n1.a) + WHERE cmv_basetable_2_n1.c > 10.0 + GROUP BY cmv_basetable_n3.a, cmv_basetable_2_n1.c; -CREATE MATERIALIZED VIEW cmv_mat_view TBLPROPERTIES('rewriting.time.window'='300s') AS - SELECT cmv_basetable.a, cmv_basetable_2.c - FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) - WHERE cmv_basetable_2.c > 10.0 - GROUP BY cmv_basetable.a, cmv_basetable_2.c; +CREATE MATERIALIZED VIEW cmv_mat_view_n3 TBLPROPERTIES('rewriting.time.window'='300s') AS + SELECT cmv_basetable_n3.a, cmv_basetable_2_n1.c + FROM cmv_basetable_n3 JOIN cmv_basetable_2_n1 ON (cmv_basetable_n3.a = cmv_basetable_2_n1.a) + WHERE cmv_basetable_2_n1.c > 10.0 + GROUP BY cmv_basetable_n3.a, cmv_basetable_2_n1.c; -DESCRIBE FORMATTED cmv_mat_view; +DESCRIBE FORMATTED cmv_mat_view_n3; -- CANNOT USE THE VIEW, IT IS DISABLED FOR REWRITE EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n3.a +FROM cmv_basetable_n3 join cmv_basetable_2_n1 ON (cmv_basetable_n3.a = cmv_basetable_2_n1.a) +WHERE cmv_basetable_2_n1.c > 10.10 +GROUP BY cmv_basetable_n3.a, cmv_basetable_2_n1.c; -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n3.a +FROM cmv_basetable_n3 JOIN cmv_basetable_2_n1 ON (cmv_basetable_n3.a = cmv_basetable_2_n1.a) +WHERE cmv_basetable_2_n1.c > 10.10 +GROUP BY cmv_basetable_n3.a, cmv_basetable_2_n1.c; -insert into cmv_basetable_2 values +insert into cmv_basetable_2_n1 values (3, 'charlie', 15.8, 1); -analyze table cmv_basetable_2 compute statistics for columns; +analyze table cmv_basetable_2_n1 compute statistics for columns; -- ENABLE FOR REWRITE EXPLAIN -ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE; +ALTER MATERIALIZED VIEW cmv_mat_view_n3 ENABLE REWRITE; -ALTER MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE; +ALTER MATERIALIZED VIEW cmv_mat_view_n3 ENABLE REWRITE; -DESCRIBE FORMATTED cmv_mat_view; +DESCRIBE FORMATTED cmv_mat_view_n3; -- CAN USE THE MATERIALIZED VIEW, AS TIME WINDOW IS HUGE -- WE GET OUTDATED RESULTS EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n3.a +FROM cmv_basetable_n3 join cmv_basetable_2_n1 ON (cmv_basetable_n3.a = cmv_basetable_2_n1.a) +WHERE cmv_basetable_2_n1.c > 10.10 +GROUP BY cmv_basetable_n3.a, cmv_basetable_2_n1.c; -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n3.a +FROM cmv_basetable_n3 JOIN cmv_basetable_2_n1 ON (cmv_basetable_n3.a = cmv_basetable_2_n1.a) +WHERE cmv_basetable_2_n1.c > 10.10 +GROUP BY cmv_basetable_n3.a, cmv_basetable_2_n1.c; -- REBUILD EXPLAIN -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n3 REBUILD; -ALTER MATERIALIZED VIEW cmv_mat_view REBUILD; +ALTER MATERIALIZED VIEW cmv_mat_view_n3 REBUILD; -DESCRIBE FORMATTED cmv_mat_view; +DESCRIBE FORMATTED cmv_mat_view_n3; -- CAN USE IT AGAIN EXPLAIN -SELECT cmv_basetable.a -FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n3.a +FROM cmv_basetable_n3 join cmv_basetable_2_n1 ON (cmv_basetable_n3.a = cmv_basetable_2_n1.a) +WHERE cmv_basetable_2_n1.c > 10.10 +GROUP BY cmv_basetable_n3.a, cmv_basetable_2_n1.c; -SELECT cmv_basetable.a -FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = cmv_basetable_2.a) -WHERE cmv_basetable_2.c > 10.10 -GROUP BY cmv_basetable.a, cmv_basetable_2.c; +SELECT cmv_basetable_n3.a +FROM cmv_basetable_n3 JOIN cmv_basetable_2_n1 ON (cmv_basetable_n3.a = cmv_basetable_2_n1.a) +WHERE cmv_basetable_2_n1.c > 10.10 +GROUP BY cmv_basetable_n3.a, cmv_basetable_2_n1.c; -drop materialized view cmv_mat_view; +drop materialized view cmv_mat_view_n3; diff --git a/ql/src/test/queries/clientpositive/materialized_view_describe.q b/ql/src/test/queries/clientpositive/materialized_view_describe.q index b5bd4671be..d137861ffb 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_describe.q +++ b/ql/src/test/queries/clientpositive/materialized_view_describe.q @@ -1,59 +1,59 @@ set hive.vectorized.execution.enabled=false; -create table cmv_basetable (a int, b varchar(256), c decimal(10,2)); +create table cmv_basetable_n8 (a int, b varchar(256), c decimal(10,2)); -insert into cmv_basetable values (1, 'alfred', 10.30),(2, 'bob', 3.14),(2, 'bonnie', 172342.2),(3, 'calvin', 978.76),(3, 'charlie', 9.8); +insert into cmv_basetable_n8 values (1, 'alfred', 10.30),(2, 'bob', 3.14),(2, 'bonnie', 172342.2),(3, 'calvin', 978.76),(3, 'charlie', 9.8); -create materialized view cmv_mat_view +create materialized view cmv_mat_view_n8 comment 'this is the first view' -tblproperties ('key'='foo') as select a, c from cmv_basetable; +tblproperties ('key'='foo') as select a, c from cmv_basetable_n8; -describe cmv_mat_view; +describe cmv_mat_view_n8; -describe extended cmv_mat_view; +describe extended cmv_mat_view_n8; -describe formatted cmv_mat_view; +describe formatted cmv_mat_view_n8; -show tblproperties cmv_mat_view; +show tblproperties cmv_mat_view_n8; -select a, c from cmv_mat_view; +select a, c from cmv_mat_view_n8; -drop materialized view cmv_mat_view; +drop materialized view cmv_mat_view_n8; -create materialized view cmv_mat_view2 +create materialized view cmv_mat_view2_n3 comment 'this is the second view' stored as textfile -tblproperties ('key'='alice','key2'='bob') as select a from cmv_basetable; +tblproperties ('key'='alice','key2'='bob') as select a from cmv_basetable_n8; -describe formatted cmv_mat_view2; +describe formatted cmv_mat_view2_n3; -select a from cmv_mat_view2; +select a from cmv_mat_view2_n3; -drop materialized view cmv_mat_view2; +drop materialized view cmv_mat_view2_n3; -create materialized view cmv_mat_view3 +create materialized view cmv_mat_view3_n0 comment 'this is the third view' row format delimited fields terminated by '\t' -as select * from cmv_basetable; +as select * from cmv_basetable_n8; -describe formatted cmv_mat_view3; +describe formatted cmv_mat_view3_n0; -select a, b, c from cmv_mat_view3; +select a, b, c from cmv_mat_view3_n0; -select distinct a from cmv_mat_view3; +select distinct a from cmv_mat_view3_n0; -drop materialized view cmv_mat_view3; +drop materialized view cmv_mat_view3_n0; dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/t; -create materialized view cmv_mat_view4 +create materialized view cmv_mat_view4_n0 comment 'this is the last view' stored as textfile location '${system:test.tmp.dir}/t' -as select a from cmv_basetable; +as select a from cmv_basetable_n8; -describe formatted cmv_mat_view4; +describe formatted cmv_mat_view4_n0; -select a from cmv_mat_view4; +select a from cmv_mat_view4_n0; -drop materialized view cmv_mat_view4; +drop materialized view cmv_mat_view4_n0; diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_1.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_1.q index 7b90f8010b..e2e438495a 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_1.q +++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_1.q @@ -7,178 +7,178 @@ set hive.strict.checks.cartesian.product=false; set hive.stats.fetch.column.stats=true; set hive.materializedview.rewriting=true; -create table emps ( +create table emps_n3 ( empid int, deptno int, name varchar(256), salary float, commission int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into emps values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), +insert into emps_n3 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250), (110, 10, 'Bill', 10000, 250); -analyze table emps compute statistics for columns; +analyze table emps_n3 compute statistics for columns; -create table depts ( +create table depts_n2 ( deptno int, name varchar(256), locationid int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into depts values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20); -analyze table depts compute statistics for columns; +insert into depts_n2 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20); +analyze table depts_n2 compute statistics for columns; -create table dependents ( +create table dependents_n2 ( empid int, name varchar(256)) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into dependents values (10, 'Michael'), (10, 'Jane'); -analyze table dependents compute statistics for columns; +insert into dependents_n2 values (10, 'Michael'), (10, 'Jane'); +analyze table dependents_n2 compute statistics for columns; -create table locations ( +create table locations_n2 ( locationid int, name varchar(256)) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into locations values (10, 'San Francisco'), (10, 'San Diego'); -analyze table locations compute statistics for columns; +insert into locations_n2 values (10, 'San Francisco'), (10, 'San Diego'); +analyze table locations_n2 compute statistics for columns; -alter table emps add constraint pk1 primary key (empid) disable novalidate rely; -alter table depts add constraint pk2 primary key (deptno) disable novalidate rely; -alter table dependents add constraint pk3 primary key (empid) disable novalidate rely; -alter table locations add constraint pk4 primary key (locationid) disable novalidate rely; +alter table emps_n3 add constraint pk1 primary key (empid) disable novalidate rely; +alter table depts_n2 add constraint pk2 primary key (deptno) disable novalidate rely; +alter table dependents_n2 add constraint pk3 primary key (empid) disable novalidate rely; +alter table locations_n2 add constraint pk4 primary key (locationid) disable novalidate rely; -alter table emps add constraint fk1 foreign key (deptno) references depts(deptno) disable novalidate rely; -alter table depts add constraint fk2 foreign key (locationid) references locations(locationid) disable novalidate rely; +alter table emps_n3 add constraint fk1 foreign key (deptno) references depts_n2(deptno) disable novalidate rely; +alter table depts_n2 add constraint fk2 foreign key (locationid) references locations_n2(locationid) disable novalidate rely; -- EXAMPLE 1 -create materialized view mv1 enable rewrite as -select * from emps where empid < 150; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n2 enable rewrite as +select * from emps_n3 where empid < 150; +analyze table mv1_n2 compute statistics for columns; explain select * -from (select * from emps where empid < 120) t -join depts using (deptno); +from (select * from emps_n3 where empid < 120) t +join depts_n2 using (deptno); select * -from (select * from emps where empid < 120) t -join depts using (deptno); +from (select * from emps_n3 where empid < 120) t +join depts_n2 using (deptno); -drop materialized view mv1; +drop materialized view mv1_n2; -- EXAMPLE 2 -create materialized view mv1 enable rewrite as +create materialized view mv1_n2 enable rewrite as select deptno, name, salary, commission -from emps; -analyze table mv1 compute statistics for columns; +from emps_n3; +analyze table mv1_n2 compute statistics for columns; explain -select emps.name, emps.salary, emps.commission -from emps -join depts using (deptno); +select emps_n3.name, emps_n3.salary, emps_n3.commission +from emps_n3 +join depts_n2 using (deptno); -select emps.name, emps.salary, emps.commission -from emps -join depts using (deptno); +select emps_n3.name, emps_n3.salary, emps_n3.commission +from emps_n3 +join depts_n2 using (deptno); -drop materialized view mv1; +drop materialized view mv1_n2; -- EXAMPLE 3 -create materialized view mv1 enable rewrite as -select empid deptno from emps -join depts using (deptno); -analyze table mv1 compute statistics for columns; +create materialized view mv1_n2 enable rewrite as +select empid deptno from emps_n3 +join depts_n2 using (deptno); +analyze table mv1_n2 compute statistics for columns; explain -select empid deptno from emps -join depts using (deptno) where empid = 1; +select empid deptno from emps_n3 +join depts_n2 using (deptno) where empid = 1; -select empid deptno from emps -join depts using (deptno) where empid = 1; +select empid deptno from emps_n3 +join depts_n2 using (deptno) where empid = 1; -drop materialized view mv1; +drop materialized view mv1_n2; -- EXAMPLE 4 -create materialized view mv1 enable rewrite as -select * from emps where empid < 200; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n2 enable rewrite as +select * from emps_n3 where empid < 200; +analyze table mv1_n2 compute statistics for columns; explain -select * from emps where empid > 120 -union all select * from emps where empid < 150; +select * from emps_n3 where empid > 120 +union all select * from emps_n3 where empid < 150; -select * from emps where empid > 120 -union all select * from emps where empid < 150; +select * from emps_n3 where empid > 120 +union all select * from emps_n3 where empid < 150; -drop materialized view mv1; +drop materialized view mv1_n2; -- EXAMPLE 5 - NO MV, ALREADY UNIQUE -create materialized view mv1 enable rewrite as -select empid, deptno from emps group by empid, deptno; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n2 enable rewrite as +select empid, deptno from emps_n3 group by empid, deptno; +analyze table mv1_n2 compute statistics for columns; explain -select empid, deptno from emps group by empid, deptno; +select empid, deptno from emps_n3 group by empid, deptno; -select empid, deptno from emps group by empid, deptno; +select empid, deptno from emps_n3 group by empid, deptno; -drop materialized view mv1; +drop materialized view mv1_n2; -- EXAMPLE 5 - NO MV, ALREADY UNIQUE -create materialized view mv1 enable rewrite as -select empid, name from emps group by empid, name; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n2 enable rewrite as +select empid, name from emps_n3 group by empid, name; +analyze table mv1_n2 compute statistics for columns; explain -select empid, name from emps group by empid, name; +select empid, name from emps_n3 group by empid, name; -select empid, name from emps group by empid, name; +select empid, name from emps_n3 group by empid, name; -drop materialized view mv1; +drop materialized view mv1_n2; -- EXAMPLE 5 -create materialized view mv1 enable rewrite as -select name, salary from emps group by name, salary; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n2 enable rewrite as +select name, salary from emps_n3 group by name, salary; +analyze table mv1_n2 compute statistics for columns; explain -select name, salary from emps group by name, salary; +select name, salary from emps_n3 group by name, salary; -select name, salary from emps group by name, salary; +select name, salary from emps_n3 group by name, salary; -drop materialized view mv1; +drop materialized view mv1_n2; -- EXAMPLE 6 -create materialized view mv1 enable rewrite as -select name, salary from emps group by name, salary; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n2 enable rewrite as +select name, salary from emps_n3 group by name, salary; +analyze table mv1_n2 compute statistics for columns; explain -select name from emps group by name; +select name from emps_n3 group by name; -select name from emps group by name; +select name from emps_n3 group by name; -drop materialized view mv1; +drop materialized view mv1_n2; -- EXAMPLE 7 -create materialized view mv1 enable rewrite as -select name, salary from emps where deptno = 10 group by name, salary; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n2 enable rewrite as +select name, salary from emps_n3 where deptno = 10 group by name, salary; +analyze table mv1_n2 compute statistics for columns; explain -select name from emps where deptno = 10 group by name; +select name from emps_n3 where deptno = 10 group by name; -select name from emps where deptno = 10 group by name; +select name from emps_n3 where deptno = 10 group by name; -drop materialized view mv1; +drop materialized view mv1_n2; -- EXAMPLE 9 -create materialized view mv1 enable rewrite as +create materialized view mv1_n2 enable rewrite as select name, salary, count(*) as c, sum(empid) as s -from emps group by name, salary; -analyze table mv1 compute statistics for columns; +from emps_n3 group by name, salary; +analyze table mv1_n2 compute statistics for columns; explain -select name from emps group by name; +select name from emps_n3 group by name; -select name from emps group by name; +select name from emps_n3 group by name; -drop materialized view mv1; +drop materialized view mv1_n2; diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_2.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_2.q index 6f66a856df..a49726cc01 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_2.q +++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_2.q @@ -7,158 +7,158 @@ set hive.strict.checks.cartesian.product=false; set hive.stats.fetch.column.stats=true; set hive.materializedview.rewriting=true; -create table emps ( +create table emps_n0 ( empid int, deptno int, name varchar(256), salary float, commission int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into emps values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), +insert into emps_n0 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250), (110, 10, 'Bill', 10000, 250); -analyze table emps compute statistics for columns; +analyze table emps_n0 compute statistics for columns; -create table depts ( +create table depts_n0 ( deptno int, name varchar(256), locationid int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into depts values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20); -analyze table depts compute statistics for columns; +insert into depts_n0 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20); +analyze table depts_n0 compute statistics for columns; -create table dependents ( +create table dependents_n0 ( empid int, name varchar(256)) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into dependents values (10, 'Michael'), (10, 'Jane'); -analyze table dependents compute statistics for columns; +insert into dependents_n0 values (10, 'Michael'), (10, 'Jane'); +analyze table dependents_n0 compute statistics for columns; -create table locations ( +create table locations_n0 ( locationid int, name varchar(256)) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into locations values (10, 'San Francisco'), (10, 'San Diego'); -analyze table locations compute statistics for columns; +insert into locations_n0 values (10, 'San Francisco'), (10, 'San Diego'); +analyze table locations_n0 compute statistics for columns; -alter table emps add constraint pk1 primary key (empid) disable novalidate rely; -alter table depts add constraint pk2 primary key (deptno) disable novalidate rely; -alter table dependents add constraint pk3 primary key (empid) disable novalidate rely; -alter table locations add constraint pk4 primary key (locationid) disable novalidate rely; +alter table emps_n0 add constraint pk1 primary key (empid) disable novalidate rely; +alter table depts_n0 add constraint pk2 primary key (deptno) disable novalidate rely; +alter table dependents_n0 add constraint pk3 primary key (empid) disable novalidate rely; +alter table locations_n0 add constraint pk4 primary key (locationid) disable novalidate rely; -alter table emps add constraint fk1 foreign key (deptno) references depts(deptno) disable novalidate rely; -alter table depts add constraint fk2 foreign key (locationid) references locations(locationid) disable novalidate rely; +alter table emps_n0 add constraint fk1 foreign key (deptno) references depts_n0(deptno) disable novalidate rely; +alter table depts_n0 add constraint fk2 foreign key (locationid) references locations_n0(locationid) disable novalidate rely; -- EXAMPLE 16 -create materialized view mv1 enable rewrite as -select empid, depts.deptno from emps -join depts using (deptno) where depts.deptno > 10 -group by empid, depts.deptno; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n0 enable rewrite as +select empid, depts_n0.deptno from emps_n0 +join depts_n0 using (deptno) where depts_n0.deptno > 10 +group by empid, depts_n0.deptno; +analyze table mv1_n0 compute statistics for columns; explain -select empid from emps -join depts using (deptno) where depts.deptno > 20 -group by empid, depts.deptno; +select empid from emps_n0 +join depts_n0 using (deptno) where depts_n0.deptno > 20 +group by empid, depts_n0.deptno; -select empid from emps -join depts using (deptno) where depts.deptno > 20 -group by empid, depts.deptno; +select empid from emps_n0 +join depts_n0 using (deptno) where depts_n0.deptno > 20 +group by empid, depts_n0.deptno; -drop materialized view mv1; +drop materialized view mv1_n0; -- EXAMPLE 17 -create materialized view mv1 enable rewrite as -select depts.deptno, empid from depts -join emps using (deptno) where depts.deptno > 10 -group by empid, depts.deptno; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n0 enable rewrite as +select depts_n0.deptno, empid from depts_n0 +join emps_n0 using (deptno) where depts_n0.deptno > 10 +group by empid, depts_n0.deptno; +analyze table mv1_n0 compute statistics for columns; explain -select empid from emps -join depts using (deptno) where depts.deptno > 20 -group by empid, depts.deptno; +select empid from emps_n0 +join depts_n0 using (deptno) where depts_n0.deptno > 20 +group by empid, depts_n0.deptno; -select empid from emps -join depts using (deptno) where depts.deptno > 20 -group by empid, depts.deptno; +select empid from emps_n0 +join depts_n0 using (deptno) where depts_n0.deptno > 20 +group by empid, depts_n0.deptno; -drop materialized view mv1; +drop materialized view mv1_n0; -- EXAMPLE 18 -create materialized view mv1 enable rewrite as -select empid, depts.deptno from emps -join depts using (deptno) where emps.deptno > 10 -group by empid, depts.deptno; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n0 enable rewrite as +select empid, depts_n0.deptno from emps_n0 +join depts_n0 using (deptno) where emps_n0.deptno > 10 +group by empid, depts_n0.deptno; +analyze table mv1_n0 compute statistics for columns; explain -select empid from emps -join depts using (deptno) where depts.deptno > 20 -group by empid, depts.deptno; +select empid from emps_n0 +join depts_n0 using (deptno) where depts_n0.deptno > 20 +group by empid, depts_n0.deptno; -select empid from emps -join depts using (deptno) where depts.deptno > 20 -group by empid, depts.deptno; +select empid from emps_n0 +join depts_n0 using (deptno) where depts_n0.deptno > 20 +group by empid, depts_n0.deptno; -drop materialized view mv1; +drop materialized view mv1_n0; -- EXAMPLE 19 -create materialized view mv1 enable rewrite as -select depts.deptno, emps.empid from depts -join emps using (deptno) where emps.empid > 10 -group by depts.deptno, emps.empid; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n0 enable rewrite as +select depts_n0.deptno, emps_n0.empid from depts_n0 +join emps_n0 using (deptno) where emps_n0.empid > 10 +group by depts_n0.deptno, emps_n0.empid; +analyze table mv1_n0 compute statistics for columns; explain -select depts.deptno from depts -join emps using (deptno) where emps.empid > 15 -group by depts.deptno, emps.empid; +select depts_n0.deptno from depts_n0 +join emps_n0 using (deptno) where emps_n0.empid > 15 +group by depts_n0.deptno, emps_n0.empid; -select depts.deptno from depts -join emps using (deptno) where emps.empid > 15 -group by depts.deptno, emps.empid; +select depts_n0.deptno from depts_n0 +join emps_n0 using (deptno) where emps_n0.empid > 15 +group by depts_n0.deptno, emps_n0.empid; -drop materialized view mv1; +drop materialized view mv1_n0; -- EXAMPLE 20 -create materialized view mv1 enable rewrite as -select depts.deptno, emps.empid from depts -join emps using (deptno) where emps.empid > 10 -group by depts.deptno, emps.empid; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n0 enable rewrite as +select depts_n0.deptno, emps_n0.empid from depts_n0 +join emps_n0 using (deptno) where emps_n0.empid > 10 +group by depts_n0.deptno, emps_n0.empid; +analyze table mv1_n0 compute statistics for columns; explain -select depts.deptno from depts -join emps using (deptno) where emps.empid > 15 -group by depts.deptno; +select depts_n0.deptno from depts_n0 +join emps_n0 using (deptno) where emps_n0.empid > 15 +group by depts_n0.deptno; -select depts.deptno from depts -join emps using (deptno) where emps.empid > 15 -group by depts.deptno; +select depts_n0.deptno from depts_n0 +join emps_n0 using (deptno) where emps_n0.empid > 15 +group by depts_n0.deptno; -drop materialized view mv1; +drop materialized view mv1_n0; -- EXAMPLE 23 -create materialized view mv1 enable rewrite as -select depts.name, dependents.name as name2, emps.deptno, depts.deptno as deptno2, dependents.empid -from depts, dependents, emps -where depts.deptno > 10 -group by depts.name, dependents.name, emps.deptno, depts.deptno, dependents.empid; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n0 enable rewrite as +select depts_n0.name, dependents_n0.name as name2, emps_n0.deptno, depts_n0.deptno as deptno2, dependents_n0.empid +from depts_n0, dependents_n0, emps_n0 +where depts_n0.deptno > 10 +group by depts_n0.name, dependents_n0.name, emps_n0.deptno, depts_n0.deptno, dependents_n0.empid; +analyze table mv1_n0 compute statistics for columns; explain -select dependents.empid -from depts -join dependents on (depts.name = dependents.name) -join emps on (emps.deptno = depts.deptno) -where depts.deptno > 10 -group by dependents.empid; - -select dependents.empid -from depts -join dependents on (depts.name = dependents.name) -join emps on (emps.deptno = depts.deptno) -where depts.deptno > 10 -group by dependents.empid; - -drop materialized view mv1; +select dependents_n0.empid +from depts_n0 +join dependents_n0 on (depts_n0.name = dependents_n0.name) +join emps_n0 on (emps_n0.deptno = depts_n0.deptno) +where depts_n0.deptno > 10 +group by dependents_n0.empid; + +select dependents_n0.empid +from depts_n0 +join dependents_n0 on (depts_n0.name = dependents_n0.name) +join emps_n0 on (emps_n0.deptno = depts_n0.deptno) +where depts_n0.deptno > 10 +group by dependents_n0.empid; + +drop materialized view mv1_n0; diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_3.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_3.q index ba87d8040c..fe00860290 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_3.q +++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_3.q @@ -7,109 +7,109 @@ set hive.strict.checks.cartesian.product=false; set hive.stats.fetch.column.stats=true; set hive.materializedview.rewriting=true; -create table emps ( +create table emps_n9 ( empid int, deptno int, name varchar(256), salary float, commission int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into emps values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), +insert into emps_n9 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250); -analyze table emps compute statistics for columns; +analyze table emps_n9 compute statistics for columns; -create table depts ( +create table depts_n7 ( deptno int, name varchar(256), locationid int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into depts values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20); -analyze table depts compute statistics for columns; +insert into depts_n7 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20); +analyze table depts_n7 compute statistics for columns; -create table dependents ( +create table dependents_n5 ( empid int, name varchar(256)) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into dependents values (10, 'Michael'), (10, 'Jane'); -analyze table dependents compute statistics for columns; +insert into dependents_n5 values (10, 'Michael'), (10, 'Jane'); +analyze table dependents_n5 compute statistics for columns; -create table locations ( +create table locations_n5 ( locationid int, name varchar(256)) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into locations values (10, 'San Francisco'), (10, 'San Diego'); -analyze table locations compute statistics for columns; +insert into locations_n5 values (10, 'San Francisco'), (10, 'San Diego'); +analyze table locations_n5 compute statistics for columns; -alter table emps add constraint pk1 primary key (empid) disable novalidate rely; -alter table depts add constraint pk2 primary key (deptno) disable novalidate rely; -alter table dependents add constraint pk3 primary key (empid) disable novalidate rely; -alter table locations add constraint pk4 primary key (locationid) disable novalidate rely; +alter table emps_n9 add constraint pk1 primary key (empid) disable novalidate rely; +alter table depts_n7 add constraint pk2 primary key (deptno) disable novalidate rely; +alter table dependents_n5 add constraint pk3 primary key (empid) disable novalidate rely; +alter table locations_n5 add constraint pk4 primary key (locationid) disable novalidate rely; -alter table emps add constraint fk1 foreign key (deptno) references depts(deptno) disable novalidate rely; -alter table depts add constraint fk2 foreign key (locationid) references locations(locationid) disable novalidate rely; +alter table emps_n9 add constraint fk1 foreign key (deptno) references depts_n7(deptno) disable novalidate rely; +alter table depts_n7 add constraint fk2 foreign key (locationid) references locations_n5(locationid) disable novalidate rely; -- EXAMPLE 34 -create materialized view mv1 enable rewrite as -select empid deptno from emps -join depts using (deptno); -analyze table mv1 compute statistics for columns; +create materialized view mv1_n5 enable rewrite as +select empid deptno from emps_n9 +join depts_n7 using (deptno); +analyze table mv1_n5 compute statistics for columns; explain -select empid deptno from emps -join depts using (deptno) where empid = 1; +select empid deptno from emps_n9 +join depts_n7 using (deptno) where empid = 1; -select empid deptno from emps -join depts using (deptno) where empid = 1; +select empid deptno from emps_n9 +join depts_n7 using (deptno) where empid = 1; -drop materialized view mv1; +drop materialized view mv1_n5; -- EXAMPLE 35 -create materialized view mv1 enable rewrite as -select cast(empid as BIGINT) from emps -join depts using (deptno); -analyze table mv1 compute statistics for columns; +create materialized view mv1_n5 enable rewrite as +select cast(empid as BIGINT) from emps_n9 +join depts_n7 using (deptno); +analyze table mv1_n5 compute statistics for columns; explain -select empid deptno from emps -join depts using (deptno) where empid > 1; +select empid deptno from emps_n9 +join depts_n7 using (deptno) where empid > 1; -select empid deptno from emps -join depts using (deptno) where empid > 1; +select empid deptno from emps_n9 +join depts_n7 using (deptno) where empid > 1; -drop materialized view mv1; +drop materialized view mv1_n5; -- EXAMPLE 36 -create materialized view mv1 enable rewrite as -select cast(empid as BIGINT) from emps -join depts using (deptno); -analyze table mv1 compute statistics for columns; +create materialized view mv1_n5 enable rewrite as +select cast(empid as BIGINT) from emps_n9 +join depts_n7 using (deptno); +analyze table mv1_n5 compute statistics for columns; explain -select empid deptno from emps -join depts using (deptno) where empid = 1; +select empid deptno from emps_n9 +join depts_n7 using (deptno) where empid = 1; -select empid deptno from emps -join depts using (deptno) where empid = 1; +select empid deptno from emps_n9 +join depts_n7 using (deptno) where empid = 1; -drop materialized view mv1; +drop materialized view mv1_n5; -- EXAMPLE 38 -create materialized view mv1 enable rewrite as -select depts.name -from emps -join depts on (emps.deptno = depts.deptno); -analyze table mv1 compute statistics for columns; +create materialized view mv1_n5 enable rewrite as +select depts_n7.name +from emps_n9 +join depts_n7 on (emps_n9.deptno = depts_n7.deptno); +analyze table mv1_n5 compute statistics for columns; explain -select dependents.empid -from depts -join dependents on (depts.name = dependents.name) -join emps on (emps.deptno = depts.deptno); +select dependents_n5.empid +from depts_n7 +join dependents_n5 on (depts_n7.name = dependents_n5.name) +join emps_n9 on (emps_n9.deptno = depts_n7.deptno); -select dependents.empid -from depts -join dependents on (depts.name = dependents.name) -join emps on (emps.deptno = depts.deptno); +select dependents_n5.empid +from depts_n7 +join dependents_n5 on (depts_n7.name = dependents_n5.name) +join emps_n9 on (emps_n9.deptno = depts_n7.deptno); -drop materialized view mv1; +drop materialized view mv1_n5; diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_4.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_4.q index eaba93d0f5..4b097cd622 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_4.q +++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_4.q @@ -7,172 +7,172 @@ set hive.strict.checks.cartesian.product=false; set hive.stats.fetch.column.stats=true; set hive.materializedview.rewriting=true; -create table emps ( +create table emps_n5 ( empid int, deptno int, name varchar(256), salary float, commission int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into emps values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), +insert into emps_n5 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250), (110, 10, 'Bill', 10000, 250); -analyze table emps compute statistics for columns; +analyze table emps_n5 compute statistics for columns; -create table depts ( +create table depts_n4 ( deptno int, name varchar(256), locationid int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into depts values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20); -analyze table depts compute statistics for columns; +insert into depts_n4 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20); +analyze table depts_n4 compute statistics for columns; -create table dependents ( +create table dependents_n3 ( empid int, name varchar(256)) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into dependents values (10, 'Michael'), (10, 'Jane'); -analyze table dependents compute statistics for columns; +insert into dependents_n3 values (10, 'Michael'), (10, 'Jane'); +analyze table dependents_n3 compute statistics for columns; -create table locations ( +create table locations_n3 ( locationid int, name varchar(256)) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into locations values (10, 'San Francisco'), (10, 'San Diego'); -analyze table locations compute statistics for columns; +insert into locations_n3 values (10, 'San Francisco'), (10, 'San Diego'); +analyze table locations_n3 compute statistics for columns; -alter table emps add constraint pk1 primary key (empid) disable novalidate rely; -alter table depts add constraint pk2 primary key (deptno) disable novalidate rely; -alter table dependents add constraint pk3 primary key (empid) disable novalidate rely; -alter table locations add constraint pk4 primary key (locationid) disable novalidate rely; +alter table emps_n5 add constraint pk1 primary key (empid) disable novalidate rely; +alter table depts_n4 add constraint pk2 primary key (deptno) disable novalidate rely; +alter table dependents_n3 add constraint pk3 primary key (empid) disable novalidate rely; +alter table locations_n3 add constraint pk4 primary key (locationid) disable novalidate rely; -alter table emps add constraint fk1 foreign key (deptno) references depts(deptno) disable novalidate rely; -alter table depts add constraint fk2 foreign key (locationid) references locations(locationid) disable novalidate rely; +alter table emps_n5 add constraint fk1 foreign key (deptno) references depts_n4(deptno) disable novalidate rely; +alter table depts_n4 add constraint fk2 foreign key (locationid) references locations_n3(locationid) disable novalidate rely; -- EXAMPLE 10 -create materialized view mv1 enable rewrite as +create materialized view mv1_n3 enable rewrite as select name, salary, count(*) as c, sum(empid) as s -from emps group by name, salary; -analyze table mv1 compute statistics for columns; +from emps_n5 group by name, salary; +analyze table mv1_n3 compute statistics for columns; explain select name, count(*) as c, sum(empid) as s -from emps group by name; +from emps_n5 group by name; select name, count(*) as c, sum(empid) as s -from emps group by name; +from emps_n5 group by name; -drop materialized view mv1; +drop materialized view mv1_n3; -- EXAMPLE 11 -create materialized view mv1 enable rewrite as +create materialized view mv1_n3 enable rewrite as select name, salary, count(*) as c, sum(empid) as s -from emps group by name, salary; -analyze table mv1 compute statistics for columns; +from emps_n5 group by name, salary; +analyze table mv1_n3 compute statistics for columns; explain select salary, name, sum(empid) as s, count(*) as c -from emps group by name, salary; +from emps_n5 group by name, salary; select salary, name, sum(empid) as s, count(*) as c -from emps group by name, salary; +from emps_n5 group by name, salary; -drop materialized view mv1; +drop materialized view mv1_n3; -- EXAMPLE 25 -create materialized view mv1 enable rewrite as -select empid, emps.deptno, count(*) as c, sum(empid) as s -from emps join depts using (deptno) -group by empid, emps.deptno; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n3 enable rewrite as +select empid, emps_n5.deptno, count(*) as c, sum(empid) as s +from emps_n5 join depts_n4 using (deptno) +group by empid, emps_n5.deptno; +analyze table mv1_n3 compute statistics for columns; explain -select depts.deptno, count(*) as c, sum(empid) as s -from emps join depts using (deptno) -group by depts.deptno; +select depts_n4.deptno, count(*) as c, sum(empid) as s +from emps_n5 join depts_n4 using (deptno) +group by depts_n4.deptno; -select depts.deptno, count(*) as c, sum(empid) as s -from emps join depts using (deptno) -group by depts.deptno; +select depts_n4.deptno, count(*) as c, sum(empid) as s +from emps_n5 join depts_n4 using (deptno) +group by depts_n4.deptno; -drop materialized view mv1; +drop materialized view mv1_n3; -- EXAMPLE 27 -create materialized view mv1 enable rewrite as -select empid, emps.deptno, count(*) as c, sum(empid) as s -from emps join depts using (deptno) -where emps.deptno >= 10 group by empid, emps.deptno; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n3 enable rewrite as +select empid, emps_n5.deptno, count(*) as c, sum(empid) as s +from emps_n5 join depts_n4 using (deptno) +where emps_n5.deptno >= 10 group by empid, emps_n5.deptno; +analyze table mv1_n3 compute statistics for columns; explain -select depts.deptno, sum(empid) as s -from emps join depts using (deptno) -where emps.deptno > 10 group by depts.deptno; +select depts_n4.deptno, sum(empid) as s +from emps_n5 join depts_n4 using (deptno) +where emps_n5.deptno > 10 group by depts_n4.deptno; -select depts.deptno, sum(empid) as s -from emps join depts using (deptno) -where emps.deptno > 10 group by depts.deptno; +select depts_n4.deptno, sum(empid) as s +from emps_n5 join depts_n4 using (deptno) +where emps_n5.deptno > 10 group by depts_n4.deptno; -drop materialized view mv1; +drop materialized view mv1_n3; -- EXAMPLE 28 -create materialized view mv1 enable rewrite as -select empid, depts.deptno, count(*) + 1 as c, sum(empid) as s -from emps join depts using (deptno) -where depts.deptno >= 10 group by empid, depts.deptno; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n3 enable rewrite as +select empid, depts_n4.deptno, count(*) + 1 as c, sum(empid) as s +from emps_n5 join depts_n4 using (deptno) +where depts_n4.deptno >= 10 group by empid, depts_n4.deptno; +analyze table mv1_n3 compute statistics for columns; explain -select depts.deptno, sum(empid) + 1 as s -from emps join depts using (deptno) -where depts.deptno > 10 group by depts.deptno; +select depts_n4.deptno, sum(empid) + 1 as s +from emps_n5 join depts_n4 using (deptno) +where depts_n4.deptno > 10 group by depts_n4.deptno; -select depts.deptno, sum(empid) + 1 as s -from emps join depts using (deptno) -where depts.deptno > 10 group by depts.deptno; +select depts_n4.deptno, sum(empid) + 1 as s +from emps_n5 join depts_n4 using (deptno) +where depts_n4.deptno > 10 group by depts_n4.deptno; -drop materialized view mv1; +drop materialized view mv1_n3; -- EXAMPLE 29 -create materialized view mv1 enable rewrite as -select depts.name, sum(salary) as s -from emps -join depts on (emps.deptno = depts.deptno) -group by depts.name; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n3 enable rewrite as +select depts_n4.name, sum(salary) as s +from emps_n5 +join depts_n4 on (emps_n5.deptno = depts_n4.deptno) +group by depts_n4.name; +analyze table mv1_n3 compute statistics for columns; explain -select dependents.empid, sum(salary) as s -from emps -join depts on (emps.deptno = depts.deptno) -join dependents on (depts.name = dependents.name) -group by dependents.empid; +select dependents_n3.empid, sum(salary) as s +from emps_n5 +join depts_n4 on (emps_n5.deptno = depts_n4.deptno) +join dependents_n3 on (depts_n4.name = dependents_n3.name) +group by dependents_n3.empid; -select dependents.empid, sum(salary) as s -from emps -join depts on (emps.deptno = depts.deptno) -join dependents on (depts.name = dependents.name) -group by dependents.empid; +select dependents_n3.empid, sum(salary) as s +from emps_n5 +join depts_n4 on (emps_n5.deptno = depts_n4.deptno) +join dependents_n3 on (depts_n4.name = dependents_n3.name) +group by dependents_n3.empid; -drop materialized view mv1; +drop materialized view mv1_n3; -- EXAMPLE 32 -create materialized view mv1 enable rewrite as -select dependents.empid, emps.deptno, count(distinct salary) as s -from emps -join dependents on (emps.empid = dependents.empid) -group by dependents.empid, emps.deptno; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n3 enable rewrite as +select dependents_n3.empid, emps_n5.deptno, count(distinct salary) as s +from emps_n5 +join dependents_n3 on (emps_n5.empid = dependents_n3.empid) +group by dependents_n3.empid, emps_n5.deptno; +analyze table mv1_n3 compute statistics for columns; explain -select emps.deptno, count(distinct salary) as s -from emps -join dependents on (emps.empid = dependents.empid) -group by dependents.empid, emps.deptno; +select emps_n5.deptno, count(distinct salary) as s +from emps_n5 +join dependents_n3 on (emps_n5.empid = dependents_n3.empid) +group by dependents_n3.empid, emps_n5.deptno; -select emps.deptno, count(distinct salary) as s -from emps -join dependents on (emps.empid = dependents.empid) -group by dependents.empid, emps.deptno; +select emps_n5.deptno, count(distinct salary) as s +from emps_n5 +join dependents_n3 on (emps_n5.empid = dependents_n3.empid) +group by dependents_n3.empid, emps_n5.deptno; -drop materialized view mv1; +drop materialized view mv1_n3; diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_5.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_5.q index 9ffae37785..2964c8390a 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_5.q +++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_5.q @@ -7,291 +7,291 @@ set hive.strict.checks.cartesian.product=false; set hive.stats.fetch.column.stats=true; set hive.materializedview.rewriting=true; -create table emps ( +create table emps_n2 ( empid int, deptno int, name varchar(256), salary float, commission int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into emps values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), +insert into emps_n2 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250), (110, 10, 'Bill', 10000, 250); -analyze table emps compute statistics for columns; +analyze table emps_n2 compute statistics for columns; -create table depts ( +create table depts_n1 ( deptno int, name varchar(256), locationid int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into depts values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20); -analyze table depts compute statistics for columns; +insert into depts_n1 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20); +analyze table depts_n1 compute statistics for columns; -create table dependents ( +create table dependents_n1 ( empid int, name varchar(256)) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into dependents values (10, 'Michael'), (10, 'Jane'); -analyze table dependents compute statistics for columns; +insert into dependents_n1 values (10, 'Michael'), (10, 'Jane'); +analyze table dependents_n1 compute statistics for columns; -create table locations ( +create table locations_n1 ( locationid int, name varchar(256)) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into locations values (10, 'San Francisco'), (10, 'San Diego'); -analyze table locations compute statistics for columns; +insert into locations_n1 values (10, 'San Francisco'), (10, 'San Diego'); +analyze table locations_n1 compute statistics for columns; -alter table emps add constraint pk1 primary key (empid) disable novalidate rely; -alter table depts add constraint pk2 primary key (deptno) disable novalidate rely; -alter table dependents add constraint pk3 primary key (empid) disable novalidate rely; -alter table locations add constraint pk4 primary key (locationid) disable novalidate rely; +alter table emps_n2 add constraint pk1 primary key (empid) disable novalidate rely; +alter table depts_n1 add constraint pk2 primary key (deptno) disable novalidate rely; +alter table dependents_n1 add constraint pk3 primary key (empid) disable novalidate rely; +alter table locations_n1 add constraint pk4 primary key (locationid) disable novalidate rely; -alter table emps add constraint fk1 foreign key (deptno) references depts(deptno) disable novalidate rely; -alter table depts add constraint fk2 foreign key (locationid) references locations(locationid) disable novalidate rely; +alter table emps_n2 add constraint fk1 foreign key (deptno) references depts_n1(deptno) disable novalidate rely; +alter table depts_n1 add constraint fk2 foreign key (locationid) references locations_n1(locationid) disable novalidate rely; -alter table emps change column deptno deptno int constraint nn1 not null disable novalidate rely; -alter table depts change column locationid locationid int constraint nn2 not null disable novalidate rely; +alter table emps_n2 change column deptno deptno int constraint nn1 not null disable novalidate rely; +alter table depts_n1 change column locationid locationid int constraint nn2 not null disable novalidate rely; -- EXAMPLE 8 -create materialized view mv1 enable rewrite as -select name, deptno, salary from emps where deptno > 15 group by name, deptno, salary; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n1 enable rewrite as +select name, deptno, salary from emps_n2 where deptno > 15 group by name, deptno, salary; +analyze table mv1_n1 compute statistics for columns; explain -select name from emps where deptno >= 20 group by name; +select name from emps_n2 where deptno >= 20 group by name; -select name from emps where deptno >= 20 group by name; +select name from emps_n2 where deptno >= 20 group by name; -drop materialized view mv1; +drop materialized view mv1_n1; -- EXAMPLE 12 -create materialized view mv1 enable rewrite as +create materialized view mv1_n1 enable rewrite as select name, deptno, salary, count(*) as c, sum(empid) as s -from emps where deptno >= 15 group by name, deptno, salary; -analyze table mv1 compute statistics for columns; +from emps_n2 where deptno >= 15 group by name, deptno, salary; +analyze table mv1_n1 compute statistics for columns; explain select name, sum(empid) as s -from emps where deptno > 15 group by name; +from emps_n2 where deptno > 15 group by name; select name, sum(empid) as s -from emps where deptno > 15 group by name; +from emps_n2 where deptno > 15 group by name; -drop materialized view mv1; +drop materialized view mv1_n1; -- EXAMPLE 22 -create materialized view mv1 enable rewrite as -select depts.deptno, dependents.empid -from depts -join dependents on (depts.name = dependents.name) -join locations on (locations.name = dependents.name) -join emps on (emps.deptno = depts.deptno) -where depts.deptno > 10 and depts.deptno < 20 -group by depts.deptno, dependents.empid; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n1 enable rewrite as +select depts_n1.deptno, dependents_n1.empid +from depts_n1 +join dependents_n1 on (depts_n1.name = dependents_n1.name) +join locations_n1 on (locations_n1.name = dependents_n1.name) +join emps_n2 on (emps_n2.deptno = depts_n1.deptno) +where depts_n1.deptno > 10 and depts_n1.deptno < 20 +group by depts_n1.deptno, dependents_n1.empid; +analyze table mv1_n1 compute statistics for columns; explain -select dependents.empid -from depts -join dependents on (depts.name = dependents.name) -join locations on (locations.name = dependents.name) -join emps on (emps.deptno = depts.deptno) -where depts.deptno > 11 and depts.deptno < 19 -group by dependents.empid; - -select dependents.empid -from depts -join dependents on (depts.name = dependents.name) -join locations on (locations.name = dependents.name) -join emps on (emps.deptno = depts.deptno) -where depts.deptno > 11 and depts.deptno < 19 -group by dependents.empid; - -drop materialized view mv1; +select dependents_n1.empid +from depts_n1 +join dependents_n1 on (depts_n1.name = dependents_n1.name) +join locations_n1 on (locations_n1.name = dependents_n1.name) +join emps_n2 on (emps_n2.deptno = depts_n1.deptno) +where depts_n1.deptno > 11 and depts_n1.deptno < 19 +group by dependents_n1.empid; + +select dependents_n1.empid +from depts_n1 +join dependents_n1 on (depts_n1.name = dependents_n1.name) +join locations_n1 on (locations_n1.name = dependents_n1.name) +join emps_n2 on (emps_n2.deptno = depts_n1.deptno) +where depts_n1.deptno > 11 and depts_n1.deptno < 19 +group by dependents_n1.empid; + +drop materialized view mv1_n1; -- EXAMPLE 24 -create materialized view mv1 enable rewrite as -select empid, depts.deptno, count(*) as c, sum(empid) as s -from emps join depts using (deptno) -group by empid, depts.deptno; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n1 enable rewrite as +select empid, depts_n1.deptno, count(*) as c, sum(empid) as s +from emps_n2 join depts_n1 using (deptno) +group by empid, depts_n1.deptno; +analyze table mv1_n1 compute statistics for columns; explain -select deptno from emps group by deptno; +select deptno from emps_n2 group by deptno; -select deptno from emps group by deptno; +select deptno from emps_n2 group by deptno; -drop materialized view mv1; +drop materialized view mv1_n1; -- EXAMPLE 26 -create materialized view mv1 enable rewrite as -select empid, depts.deptno, count(*) as c, sum(empid) as s -from emps join depts using (deptno) -group by empid, depts.deptno; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n1 enable rewrite as +select empid, depts_n1.deptno, count(*) as c, sum(empid) as s +from emps_n2 join depts_n1 using (deptno) +group by empid, depts_n1.deptno; +analyze table mv1_n1 compute statistics for columns; explain select deptno, empid, sum(empid) as s, count(*) as c -from emps group by empid, deptno; +from emps_n2 group by empid, deptno; select deptno, empid, sum(empid) as s, count(*) as c -from emps group by empid, deptno; +from emps_n2 group by empid, deptno; -drop materialized view mv1; +drop materialized view mv1_n1; -- EXAMPLE 30 -create materialized view mv1 enable rewrite as -select dependents.empid, emps.deptno, sum(salary) as s -from emps -join dependents on (emps.empid = dependents.empid) -group by dependents.empid, emps.deptno; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n1 enable rewrite as +select dependents_n1.empid, emps_n2.deptno, sum(salary) as s +from emps_n2 +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +group by dependents_n1.empid, emps_n2.deptno; +analyze table mv1_n1 compute statistics for columns; explain -select dependents.empid, sum(salary) as s -from emps -join depts on (emps.deptno = depts.deptno) -join dependents on (emps.empid = dependents.empid) -group by dependents.empid; +select dependents_n1.empid, sum(salary) as s +from emps_n2 +join depts_n1 on (emps_n2.deptno = depts_n1.deptno) +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +group by dependents_n1.empid; -select dependents.empid, sum(salary) as s -from emps -join depts on (emps.deptno = depts.deptno) -join dependents on (emps.empid = dependents.empid) -group by dependents.empid; +select dependents_n1.empid, sum(salary) as s +from emps_n2 +join depts_n1 on (emps_n2.deptno = depts_n1.deptno) +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +group by dependents_n1.empid; -drop materialized view mv1; +drop materialized view mv1_n1; -- EXAMPLE 31 -create materialized view mv1 enable rewrite as -select dependents.empid, emps.deptno, sum(salary) as s -from emps -join dependents on (emps.empid = dependents.empid) -group by dependents.empid, emps.deptno; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n1 enable rewrite as +select dependents_n1.empid, emps_n2.deptno, sum(salary) as s +from emps_n2 +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +group by dependents_n1.empid, emps_n2.deptno; +analyze table mv1_n1 compute statistics for columns; explain -select depts.name, sum(salary) as s -from emps -join depts on (emps.deptno = depts.deptno) -join dependents on (emps.empid = dependents.empid) -group by depts.name; +select depts_n1.name, sum(salary) as s +from emps_n2 +join depts_n1 on (emps_n2.deptno = depts_n1.deptno) +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +group by depts_n1.name; -select depts.name, sum(salary) as s -from emps -join depts on (emps.deptno = depts.deptno) -join dependents on (emps.empid = dependents.empid) -group by depts.name; +select depts_n1.name, sum(salary) as s +from emps_n2 +join depts_n1 on (emps_n2.deptno = depts_n1.deptno) +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +group by depts_n1.name; -drop materialized view mv1; +drop materialized view mv1_n1; -- EXAMPLE 41 -create materialized view mv1 enable rewrite as +create materialized view mv1_n1 enable rewrite as select a.empid deptno from -(select * from emps where empid = 1) a -join depts on (a.deptno = depts.deptno) -join dependents on (a.empid = dependents.empid); -analyze table mv1 compute statistics for columns; +(select * from emps_n2 where empid = 1) a +join depts_n1 on (a.deptno = depts_n1.deptno) +join dependents_n1 on (a.empid = dependents_n1.empid); +analyze table mv1_n1 compute statistics for columns; explain select a.empid from -(select * from emps where empid = 1) a -join dependents on (a.empid = dependents.empid); +(select * from emps_n2 where empid = 1) a +join dependents_n1 on (a.empid = dependents_n1.empid); select a.empid from -(select * from emps where empid = 1) a -join dependents on (a.empid = dependents.empid); +(select * from emps_n2 where empid = 1) a +join dependents_n1 on (a.empid = dependents_n1.empid); -drop materialized view mv1; +drop materialized view mv1_n1; -- EXAMPLE 42 -create materialized view mv1 enable rewrite as +create materialized view mv1_n1 enable rewrite as select a.empid, a.deptno from -(select * from emps where empid = 1) a -join depts on (a.deptno = depts.deptno) -join dependents on (a.empid = dependents.empid); -analyze table mv1 compute statistics for columns; +(select * from emps_n2 where empid = 1) a +join depts_n1 on (a.deptno = depts_n1.deptno) +join dependents_n1 on (a.empid = dependents_n1.empid); +analyze table mv1_n1 compute statistics for columns; explain select a.empid from -(select * from emps where empid = 1) a -join dependents on (a.empid = dependents.empid); +(select * from emps_n2 where empid = 1) a +join dependents_n1 on (a.empid = dependents_n1.empid); select a.empid from -(select * from emps where empid = 1) a -join dependents on (a.empid = dependents.empid); +(select * from emps_n2 where empid = 1) a +join dependents_n1 on (a.empid = dependents_n1.empid); -drop materialized view mv1; +drop materialized view mv1_n1; -- EXAMPLE 43 -create materialized view mv1 enable rewrite as +create materialized view mv1_n1 enable rewrite as select empid deptno from -(select * from emps where empid = 1) a -join depts on (a.deptno = depts.deptno); -analyze table mv1 compute statistics for columns; +(select * from emps_n2 where empid = 1) a +join depts_n1 on (a.deptno = depts_n1.deptno); +analyze table mv1_n1 compute statistics for columns; explain -select empid from emps where empid = 1; +select empid from emps_n2 where empid = 1; -select empid from emps where empid = 1; +select empid from emps_n2 where empid = 1; -drop materialized view mv1; +drop materialized view mv1_n1; -- EXAMPLE 44 -create materialized view mv1 enable rewrite as -select emps.empid, emps.deptno from emps -join depts on (emps.deptno = depts.deptno) -join dependents on (emps.empid = dependents.empid) -where emps.empid = 1; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n1 enable rewrite as +select emps_n2.empid, emps_n2.deptno from emps_n2 +join depts_n1 on (emps_n2.deptno = depts_n1.deptno) +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +where emps_n2.empid = 1; +analyze table mv1_n1 compute statistics for columns; explain -select emps.empid from emps -join dependents on (emps.empid = dependents.empid) -where emps.empid = 1; +select emps_n2.empid from emps_n2 +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +where emps_n2.empid = 1; -select emps.empid from emps -join dependents on (emps.empid = dependents.empid) -where emps.empid = 1; +select emps_n2.empid from emps_n2 +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +where emps_n2.empid = 1; -drop materialized view mv1; +drop materialized view mv1_n1; -- EXAMPLE 45a -create materialized view mv1 enable rewrite as -select emps.empid, emps.deptno from emps -join depts a on (emps.deptno=a.deptno) -join depts b on (emps.deptno=b.deptno) -join dependents on (emps.empid = dependents.empid) -where emps.empid = 1; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n1 enable rewrite as +select emps_n2.empid, emps_n2.deptno from emps_n2 +join depts_n1 a on (emps_n2.deptno=a.deptno) +join depts_n1 b on (emps_n2.deptno=b.deptno) +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +where emps_n2.empid = 1; +analyze table mv1_n1 compute statistics for columns; explain -select emps.empid from emps -join dependents on (emps.empid = dependents.empid) -where emps.empid = 1; +select emps_n2.empid from emps_n2 +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +where emps_n2.empid = 1; -select emps.empid from emps -join dependents on (emps.empid = dependents.empid) -where emps.empid = 1; +select emps_n2.empid from emps_n2 +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +where emps_n2.empid = 1; -drop materialized view mv1; +drop materialized view mv1_n1; -- EXAMPLE 45b -create materialized view mv1 enable rewrite as -select emps.empid, emps.deptno from emps -join depts a on (emps.deptno=a.deptno) -join depts b on (emps.deptno=b.deptno) -join dependents on (emps.empid = dependents.empid) -where emps.name = 'Sebastian'; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n1 enable rewrite as +select emps_n2.empid, emps_n2.deptno from emps_n2 +join depts_n1 a on (emps_n2.deptno=a.deptno) +join depts_n1 b on (emps_n2.deptno=b.deptno) +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +where emps_n2.name = 'Sebastian'; +analyze table mv1_n1 compute statistics for columns; explain -select emps.empid from emps -join dependents on (emps.empid = dependents.empid) -where emps.name = 'Sebastian'; +select emps_n2.empid from emps_n2 +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +where emps_n2.name = 'Sebastian'; -select emps.empid from emps -join dependents on (emps.empid = dependents.empid) -where emps.name = 'Sebastian'; +select emps_n2.empid from emps_n2 +join dependents_n1 on (emps_n2.empid = dependents_n1.empid) +where emps_n2.name = 'Sebastian'; -drop materialized view mv1; +drop materialized view mv1_n1; diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_7.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_7.q index e39c223ca0..7d2eb481d9 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_7.q +++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_7.q @@ -7,132 +7,132 @@ set hive.strict.checks.cartesian.product=false; set hive.stats.fetch.column.stats=true; set hive.materializedview.rewriting=true; -create table emps ( +create table emps_n8 ( empid int, deptno int, name varchar(256), salary float, commission int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into emps values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), +insert into emps_n8 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250); -analyze table emps compute statistics for columns; +analyze table emps_n8 compute statistics for columns; -create table depts ( +create table depts_n6 ( deptno int, name varchar(256), locationid int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into depts values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20); -analyze table depts compute statistics for columns; +insert into depts_n6 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20); +analyze table depts_n6 compute statistics for columns; -create table dependents ( +create table dependents_n4 ( empid int, name varchar(256)) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into dependents values (10, 'Michael'), (10, 'Jane'); -analyze table dependents compute statistics for columns; +insert into dependents_n4 values (10, 'Michael'), (10, 'Jane'); +analyze table dependents_n4 compute statistics for columns; -create table locations ( +create table locations_n4 ( locationid int, name varchar(256)) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into locations values (10, 'San Francisco'), (10, 'San Diego'); -analyze table locations compute statistics for columns; +insert into locations_n4 values (10, 'San Francisco'), (10, 'San Diego'); +analyze table locations_n4 compute statistics for columns; -alter table emps add constraint pk1 primary key (empid) disable novalidate rely; -alter table depts add constraint pk2 primary key (deptno) disable novalidate rely; -alter table dependents add constraint pk3 primary key (empid) disable novalidate rely; -alter table locations add constraint pk4 primary key (locationid) disable novalidate rely; +alter table emps_n8 add constraint pk1 primary key (empid) disable novalidate rely; +alter table depts_n6 add constraint pk2 primary key (deptno) disable novalidate rely; +alter table dependents_n4 add constraint pk3 primary key (empid) disable novalidate rely; +alter table locations_n4 add constraint pk4 primary key (locationid) disable novalidate rely; -alter table emps add constraint fk1 foreign key (deptno) references depts(deptno) disable novalidate rely; -alter table depts add constraint fk2 foreign key (locationid) references locations(locationid) disable novalidate rely; +alter table emps_n8 add constraint fk1 foreign key (deptno) references depts_n6(deptno) disable novalidate rely; +alter table depts_n6 add constraint fk2 foreign key (locationid) references locations_n4(locationid) disable novalidate rely; -alter table emps change column deptno deptno int constraint nn1 not null disable novalidate rely; -alter table depts change column locationid locationid int constraint nn2 not null disable novalidate rely; +alter table emps_n8 change column deptno deptno int constraint nn1 not null disable novalidate rely; +alter table depts_n6 change column locationid locationid int constraint nn2 not null disable novalidate rely; -- EXAMPLE 21 -- WORKS NOW -create materialized view mv1 enable rewrite as -select depts.deptno, dependents.empid -from depts -join dependents on (depts.name = dependents.name) -join locations on (locations.name = dependents.name) -join emps on (emps.deptno = depts.deptno) -where depts.deptno > 11 -group by depts.deptno, dependents.empid; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n4 enable rewrite as +select depts_n6.deptno, dependents_n4.empid +from depts_n6 +join dependents_n4 on (depts_n6.name = dependents_n4.name) +join locations_n4 on (locations_n4.name = dependents_n4.name) +join emps_n8 on (emps_n8.deptno = depts_n6.deptno) +where depts_n6.deptno > 11 +group by depts_n6.deptno, dependents_n4.empid; +analyze table mv1_n4 compute statistics for columns; explain -select dependents.empid, depts.deptno -from depts -join dependents on (depts.name = dependents.name) -join locations on (locations.name = dependents.name) -join emps on (emps.deptno = depts.deptno) -where depts.deptno > 10 -group by dependents.empid, depts.deptno; - -select dependents.empid, depts.deptno -from depts -join dependents on (depts.name = dependents.name) -join locations on (locations.name = dependents.name) -join emps on (emps.deptno = depts.deptno) -where depts.deptno > 10 -group by dependents.empid, depts.deptno; - -drop materialized view mv1; +select dependents_n4.empid, depts_n6.deptno +from depts_n6 +join dependents_n4 on (depts_n6.name = dependents_n4.name) +join locations_n4 on (locations_n4.name = dependents_n4.name) +join emps_n8 on (emps_n8.deptno = depts_n6.deptno) +where depts_n6.deptno > 10 +group by dependents_n4.empid, depts_n6.deptno; + +select dependents_n4.empid, depts_n6.deptno +from depts_n6 +join dependents_n4 on (depts_n6.name = dependents_n4.name) +join locations_n4 on (locations_n4.name = dependents_n4.name) +join emps_n8 on (emps_n8.deptno = depts_n6.deptno) +where depts_n6.deptno > 10 +group by dependents_n4.empid, depts_n6.deptno; + +drop materialized view mv1_n4; -- EXAMPLE 33 -create materialized view mv1 enable rewrite as -select depts.deptno, dependents.empid, count(emps.salary) as s -from depts -join dependents on (depts.name = dependents.name) -join locations on (locations.name = dependents.name) -join emps on (emps.deptno = depts.deptno) -where depts.deptno > 11 and depts.deptno < 19 -group by depts.deptno, dependents.empid; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n4 enable rewrite as +select depts_n6.deptno, dependents_n4.empid, count(emps_n8.salary) as s +from depts_n6 +join dependents_n4 on (depts_n6.name = dependents_n4.name) +join locations_n4 on (locations_n4.name = dependents_n4.name) +join emps_n8 on (emps_n8.deptno = depts_n6.deptno) +where depts_n6.deptno > 11 and depts_n6.deptno < 19 +group by depts_n6.deptno, dependents_n4.empid; +analyze table mv1_n4 compute statistics for columns; explain -select dependents.empid, count(emps.salary) + 1 -from depts -join dependents on (depts.name = dependents.name) -join locations on (locations.name = dependents.name) -join emps on (emps.deptno = depts.deptno) -where depts.deptno > 10 and depts.deptno < 20 -group by dependents.empid; - -select dependents.empid, count(emps.salary) + 1 -from depts -join dependents on (depts.name = dependents.name) -join locations on (locations.name = dependents.name) -join emps on (emps.deptno = depts.deptno) -where depts.deptno > 10 and depts.deptno < 20 -group by dependents.empid; - -drop materialized view mv1; +select dependents_n4.empid, count(emps_n8.salary) + 1 +from depts_n6 +join dependents_n4 on (depts_n6.name = dependents_n4.name) +join locations_n4 on (locations_n4.name = dependents_n4.name) +join emps_n8 on (emps_n8.deptno = depts_n6.deptno) +where depts_n6.deptno > 10 and depts_n6.deptno < 20 +group by dependents_n4.empid; + +select dependents_n4.empid, count(emps_n8.salary) + 1 +from depts_n6 +join dependents_n4 on (depts_n6.name = dependents_n4.name) +join locations_n4 on (locations_n4.name = dependents_n4.name) +join emps_n8 on (emps_n8.deptno = depts_n6.deptno) +where depts_n6.deptno > 10 and depts_n6.deptno < 20 +group by dependents_n4.empid; + +drop materialized view mv1_n4; -- EXAMPLE 40 -- REWRITING HAPPENS BUT DISCARDED -- DUE TO COST EXCEPT WITH HEURISTICS -create materialized view mv1 enable rewrite as -select depts.deptno, dependents.empid -from depts -join dependents on (depts.name = dependents.name) -join emps on (emps.deptno = depts.deptno) -where depts.deptno >= 10; -analyze table mv1 compute statistics for columns; +create materialized view mv1_n4 enable rewrite as +select depts_n6.deptno, dependents_n4.empid +from depts_n6 +join dependents_n4 on (depts_n6.name = dependents_n4.name) +join emps_n8 on (emps_n8.deptno = depts_n6.deptno) +where depts_n6.deptno >= 10; +analyze table mv1_n4 compute statistics for columns; explain -select dependents.empid -from depts -join dependents on (depts.name = dependents.name) -join emps on (emps.deptno = depts.deptno) -where depts.deptno > 0; - -select dependents.empid -from depts -join dependents on (depts.name = dependents.name) -join emps on (emps.deptno = depts.deptno) -where depts.deptno > 0; - -drop materialized view mv1; +select dependents_n4.empid +from depts_n6 +join dependents_n4 on (depts_n6.name = dependents_n4.name) +join emps_n8 on (emps_n8.deptno = depts_n6.deptno) +where depts_n6.deptno > 0; + +select dependents_n4.empid +from depts_n6 +join dependents_n4 on (depts_n6.name = dependents_n4.name) +join emps_n8 on (emps_n8.deptno = depts_n6.deptno) +where depts_n6.deptno > 0; + +drop materialized view mv1_n4; diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_9.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_9.q index fda6d030bc..97700b0628 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_9.q +++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_9.q @@ -7,7 +7,7 @@ set hive.strict.checks.cartesian.product=false; set hive.stats.fetch.column.stats=true; set hive.materializedview.rewriting=true; -create table if not exists source_table_001 ( +create table if not exists source_table_001_n0 ( MY_DATE timestamp, MY_ID bigint, MY_ID2 bigint, @@ -17,56 +17,56 @@ UP_VOLUME bigint ) stored AS ORC TBLPROPERTIES("transactional"="true"); -insert into table source_table_001 +insert into table source_table_001_n0 values ('2010-10-10 00:00:00', 1, 1, 'env', 1, 1); -analyze table source_table_001 compute statistics for columns; +analyze table source_table_001_n0 compute statistics for columns; -CREATE MATERIALIZED VIEW source_table_001_mv ENABLE REWRITE AS +CREATE MATERIALIZED VIEW source_table_001_mv_n0 ENABLE REWRITE AS SELECT SUM(A.DOWN_VOLUME) AS DOWN_VOLUME_SUM, SUM(A.UP_VOLUME) AS UP_VOLUME_SUM, A.MY_ID,A.MY_DATE,A.MY_ID2,A.ENVIRONMENT -from source_table_001 AS A +from source_table_001_n0 AS A group by A.MY_ID,A.MY_ID2,A.ENVIRONMENT,A.MY_DATE; -analyze table source_table_001_mv compute statistics for columns; +analyze table source_table_001_mv_n0 compute statistics for columns; explain select SUM(A.DOWN_VOLUME) AS DOWNLOAD_VOLUME_BYTES, FLOOR(A.MY_DATE to hour),A.MY_ID2,A.ENVIRONMENT -FROM source_table_001 AS A +FROM source_table_001_n0 AS A group by A.MY_ID,A.MY_ID2,A.ENVIRONMENT,FLOOR(A.MY_DATE to hour); -DROP MATERIALIZED VIEW source_table_001_mv; +DROP MATERIALIZED VIEW source_table_001_mv_n0; -CREATE MATERIALIZED VIEW source_table_001_mv ENABLE REWRITE AS +CREATE MATERIALIZED VIEW source_table_001_mv_n0 ENABLE REWRITE AS SELECT SUM(A.DOWN_VOLUME) AS DOWN_VOLUME_SUM, SUM(A.UP_VOLUME) AS UP_VOLUME_SUM, A.MY_ID,FLOOR(A.MY_DATE to hour),A.MY_ID2,A.ENVIRONMENT -from source_table_001 AS A +from source_table_001_n0 AS A group by A.MY_ID,A.MY_ID2,A.ENVIRONMENT,FLOOR(A.MY_DATE to hour); -analyze table source_table_001_mv compute statistics for columns; +analyze table source_table_001_mv_n0 compute statistics for columns; explain select SUM(A.DOWN_VOLUME) AS DOWNLOAD_VOLUME_BYTES, FLOOR(A.MY_DATE to day),A.MY_ID2,A.ENVIRONMENT -FROM source_table_001 AS A +FROM source_table_001_n0 AS A group by A.MY_ID,A.MY_ID2,A.ENVIRONMENT,FLOOR(A.MY_DATE to day); explain select SUM(A.DOWN_VOLUME) AS DOWNLOAD_VOLUME_BYTES, FLOOR(A.MY_DATE to hour),A.MY_ID2,A.ENVIRONMENT -FROM source_table_001 AS A +FROM source_table_001_n0 AS A group by A.MY_ID,A.MY_ID2,A.ENVIRONMENT,FLOOR(A.MY_DATE to hour); explain select SUM(A.DOWN_VOLUME) AS DOWNLOAD_VOLUME_BYTES, FLOOR(A.MY_DATE to second),A.MY_ID2,A.ENVIRONMENT -FROM source_table_001 AS A +FROM source_table_001_n0 AS A group by A.MY_ID,A.MY_ID2,A.ENVIRONMENT,FLOOR(A.MY_DATE to second); -DROP MATERIALIZED VIEW source_table_001_mv; +DROP MATERIALIZED VIEW source_table_001_mv_n0; diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb.q index ca9157ed77..880f4f5433 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb.q +++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb.q @@ -7,7 +7,7 @@ set hive.strict.checks.cartesian.product=false; set hive.materializedview.rewriting=true; set hive.stats.column.autogather=true; -CREATE TABLE `customer_ext`( +CREATE TABLE `customer_ext_n0`( `c_custkey` bigint, `c_name` string, `c_address` string, @@ -20,9 +20,9 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/ssb/customer/' into table `customer_ext`; +LOAD DATA LOCAL INPATH '../../data/files/ssb/customer/' into table `customer_ext_n0`; -CREATE TABLE `customer`( +CREATE TABLE `customer_n1`( `c_custkey` bigint, `c_name` string, `c_address` string, @@ -35,10 +35,10 @@ CREATE TABLE `customer`( STORED AS ORC TBLPROPERTIES ('transactional'='true'); -INSERT INTO `customer` -SELECT * FROM `customer_ext`; +INSERT INTO `customer_n1` +SELECT * FROM `customer_ext_n0`; -CREATE TABLE `dates_ext`( +CREATE TABLE `dates_ext_n0`( `d_datekey` bigint, `d_date` string, `d_dayofweek` string, @@ -60,9 +60,9 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/ssb/date/' into table `dates_ext`; +LOAD DATA LOCAL INPATH '../../data/files/ssb/date/' into table `dates_ext_n0`; -CREATE TABLE `dates`( +CREATE TABLE `dates_n0`( `d_datekey` bigint, `d_date` string, `d_dayofweek` string, @@ -85,10 +85,10 @@ CREATE TABLE `dates`( STORED AS ORC TBLPROPERTIES ('transactional'='true'); -INSERT INTO `dates` -SELECT * FROM `dates_ext`; +INSERT INTO `dates_n0` +SELECT * FROM `dates_ext_n0`; -CREATE TABLE `ssb_part_ext`( +CREATE TABLE `ssb_part_ext_n0`( `p_partkey` bigint, `p_name` string, `p_mfgr` string, @@ -102,9 +102,9 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/ssb/part/' into table `ssb_part_ext`; +LOAD DATA LOCAL INPATH '../../data/files/ssb/part/' into table `ssb_part_ext_n0`; -CREATE TABLE `ssb_part`( +CREATE TABLE `ssb_part_n0`( `p_partkey` bigint, `p_name` string, `p_mfgr` string, @@ -118,10 +118,10 @@ CREATE TABLE `ssb_part`( STORED AS ORC TBLPROPERTIES ('transactional'='true'); -INSERT INTO `ssb_part` -SELECT * FROM `ssb_part_ext`; +INSERT INTO `ssb_part_n0` +SELECT * FROM `ssb_part_ext_n0`; -CREATE TABLE `supplier_ext`( +CREATE TABLE `supplier_ext_n0`( `s_suppkey` bigint, `s_name` string, `s_address` string, @@ -133,9 +133,9 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/ssb/supplier/' into table `supplier_ext`; +LOAD DATA LOCAL INPATH '../../data/files/ssb/supplier/' into table `supplier_ext_n0`; -CREATE TABLE `supplier`( +CREATE TABLE `supplier_n0`( `s_suppkey` bigint, `s_name` string, `s_address` string, @@ -147,10 +147,10 @@ CREATE TABLE `supplier`( STORED AS ORC TBLPROPERTIES ('transactional'='true'); -INSERT INTO `supplier` -SELECT * FROM `supplier_ext`; +INSERT INTO `supplier_n0` +SELECT * FROM `supplier_ext_n0`; -CREATE TABLE `lineorder_ext`( +CREATE TABLE `lineorder_ext_n0`( `lo_orderkey` bigint, `lo_linenumber` int, `lo_custkey` bigint not null disable rely, @@ -172,9 +172,9 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/ssb/lineorder/' into table `lineorder_ext`; +LOAD DATA LOCAL INPATH '../../data/files/ssb/lineorder/' into table `lineorder_ext_n0`; -CREATE TABLE `lineorder`( +CREATE TABLE `lineorder_n0`( `lo_orderkey` bigint, `lo_linenumber` int, `lo_custkey` bigint not null disable rely, @@ -193,23 +193,23 @@ CREATE TABLE `lineorder`( `lo_commitdate` bigint, `lo_shipmode` string, primary key (`lo_orderkey`) disable rely, - constraint fk1 foreign key (`lo_custkey`) references `customer`(`c_custkey`) disable rely, - constraint fk2 foreign key (`lo_orderdate`) references `dates`(`d_datekey`) disable rely, - constraint fk3 foreign key (`lo_partkey`) references `ssb_part`(`p_partkey`) disable rely, - constraint fk4 foreign key (`lo_suppkey`) references `supplier`(`s_suppkey`) disable rely) + constraint fk1 foreign key (`lo_custkey`) references `customer_n1`(`c_custkey`) disable rely, + constraint fk2 foreign key (`lo_orderdate`) references `dates_n0`(`d_datekey`) disable rely, + constraint fk3 foreign key (`lo_partkey`) references `ssb_part_n0`(`p_partkey`) disable rely, + constraint fk4 foreign key (`lo_suppkey`) references `supplier_n0`(`s_suppkey`) disable rely) STORED AS ORC TBLPROPERTIES ('transactional'='true'); -INSERT INTO `lineorder` -SELECT * FROM `lineorder_ext`; +INSERT INTO `lineorder_n0` +SELECT * FROM `lineorder_ext_n0`; -analyze table customer compute statistics for columns; -analyze table dates compute statistics for columns; -analyze table ssb_part compute statistics for columns; -analyze table supplier compute statistics for columns; -analyze table lineorder compute statistics for columns; +analyze table customer_n1 compute statistics for columns; +analyze table dates_n0 compute statistics for columns; +analyze table ssb_part_n0 compute statistics for columns; +analyze table supplier_n0 compute statistics for columns; +analyze table lineorder_n0 compute statistics for columns; -CREATE MATERIALIZED VIEW `ssb_mv` ENABLE REWRITE +CREATE MATERIALIZED VIEW `ssb_mv_n0` ENABLE REWRITE AS SELECT c_city, @@ -231,7 +231,7 @@ SELECT lo_extendedprice * lo_discount discounted_price, lo_revenue - lo_supplycost net_revenue FROM - customer, dates, lineorder, ssb_part, supplier + customer_n1, dates_n0, lineorder_n0, ssb_part_n0, supplier_n0 where lo_orderdate = d_datekey and lo_partkey = p_partkey @@ -243,7 +243,7 @@ explain select sum(lo_extendedprice*lo_discount) as revenue from - lineorder, dates + lineorder_n0, dates_n0 where lo_orderdate = d_datekey and d_year = 1993 @@ -255,7 +255,7 @@ explain select sum(lo_extendedprice*lo_discount) as revenue from - lineorder, dates + lineorder_n0, dates_n0 where lo_orderdate = d_datekey and d_yearmonthnum = 199401 @@ -267,7 +267,7 @@ explain select sum(lo_extendedprice*lo_discount) as revenue from - lineorder, dates + lineorder_n0, dates_n0 where lo_orderdate = d_datekey and d_weeknuminyear = 6 @@ -280,7 +280,7 @@ explain select sum(lo_revenue) as lo_revenue, d_year, p_brand1 from - lineorder, dates, ssb_part, supplier + lineorder_n0, dates_n0, ssb_part_n0, supplier_n0 where lo_orderdate = d_datekey and lo_partkey = p_partkey @@ -297,7 +297,7 @@ explain select sum(lo_revenue) as lo_revenue, d_year, p_brand1 from - lineorder, dates, ssb_part, supplier + lineorder_n0, dates_n0, ssb_part_n0, supplier_n0 where lo_orderdate = d_datekey and lo_partkey = p_partkey @@ -314,7 +314,7 @@ explain select sum(lo_revenue) as lo_revenue, d_year, p_brand1 from - lineorder, dates, ssb_part, supplier + lineorder_n0, dates_n0, ssb_part_n0, supplier_n0 where lo_orderdate = d_datekey and lo_partkey = p_partkey @@ -332,7 +332,7 @@ select c_nation, s_nation, d_year, sum(lo_revenue) as lo_revenue from - customer, lineorder, supplier, dates + customer_n1, lineorder_n0, supplier_n0, dates_n0 where lo_custkey = c_custkey and lo_suppkey = s_suppkey @@ -350,7 +350,7 @@ explain select c_city, s_city, d_year, sum(lo_revenue) as lo_revenue from - customer, lineorder, supplier, dates + customer_n1, lineorder_n0, supplier_n0, dates_n0 where lo_custkey = c_custkey and lo_suppkey = s_suppkey @@ -368,7 +368,7 @@ explain select c_city, s_city, d_year, sum(lo_revenue) as lo_revenue from - customer, lineorder, supplier, dates + customer_n1, lineorder_n0, supplier_n0, dates_n0 where lo_custkey = c_custkey and lo_suppkey = s_suppkey @@ -386,7 +386,7 @@ explain select c_city, s_city, d_year, sum(lo_revenue) as lo_revenue from - customer, lineorder, supplier, dates + customer_n1, lineorder_n0, supplier_n0, dates_n0 where lo_custkey = c_custkey and lo_suppkey = s_suppkey @@ -405,7 +405,7 @@ select d_year, c_nation, sum(lo_revenue - lo_supplycost) as profit from - dates, customer, supplier, ssb_part, lineorder + dates_n0, customer_n1, supplier_n0, ssb_part_n0, lineorder_n0 where lo_custkey = c_custkey and lo_suppkey = s_suppkey @@ -425,7 +425,7 @@ select d_year, s_nation, p_category, sum(lo_revenue - lo_supplycost) as profit from - dates, customer, supplier, ssb_part, lineorder + dates_n0, customer_n1, supplier_n0, ssb_part_n0, lineorder_n0 where lo_custkey = c_custkey and lo_suppkey = s_suppkey @@ -446,7 +446,7 @@ select d_year, s_city, p_brand1, sum(lo_revenue - lo_supplycost) as profit from - dates, customer, supplier, ssb_part, lineorder + dates_n0, customer_n1, supplier_n0, ssb_part_n0, lineorder_n0 where lo_custkey = c_custkey and lo_suppkey = s_suppkey @@ -461,4 +461,4 @@ group by order by d_year, s_city, p_brand1; -DROP MATERIALIZED VIEW `ssb_mv`; +DROP MATERIALIZED VIEW `ssb_mv_n0`; diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb_2.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb_2.q index 6752f873bd..6a92d9a52f 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb_2.q +++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb_2.q @@ -22,7 +22,7 @@ STORED AS TEXTFILE; LOAD DATA LOCAL INPATH '../../data/files/ssb/customer/' into table `customer_ext`; -CREATE TABLE `customer`( +CREATE TABLE `customer_n0`( `c_custkey` bigint, `c_name` string, `c_address` string, @@ -35,7 +35,7 @@ CREATE TABLE `customer`( STORED AS ORC TBLPROPERTIES ('transactional'='true'); -INSERT INTO `customer` +INSERT INTO `customer_n0` SELECT * FROM `customer_ext`; CREATE TABLE `dates_ext`( @@ -193,7 +193,7 @@ CREATE TABLE `lineorder`( `lo_commitdate` bigint, `lo_shipmode` string, primary key (`lo_orderkey`) disable rely, - constraint fk1 foreign key (`lo_custkey`) references `customer`(`c_custkey`) disable rely, + constraint fk1 foreign key (`lo_custkey`) references `customer_n0`(`c_custkey`) disable rely, constraint fk2 foreign key (`lo_orderdate`) references `dates`(`d_datekey`) disable rely, constraint fk3 foreign key (`lo_partkey`) references `ssb_part`(`p_partkey`) disable rely, constraint fk4 foreign key (`lo_suppkey`) references `supplier`(`s_suppkey`) disable rely) @@ -203,7 +203,7 @@ TBLPROPERTIES ('transactional'='true'); INSERT INTO `lineorder` SELECT * FROM `lineorder_ext`; -analyze table customer compute statistics for columns; +analyze table customer_n0 compute statistics for columns; analyze table dates compute statistics for columns; analyze table ssb_part compute statistics for columns; analyze table supplier compute statistics for columns; @@ -232,7 +232,7 @@ SELECT lo_extendedprice * lo_discount discounted_price, lo_revenue - lo_supplycost net_revenue FROM - customer, dates, lineorder, ssb_part, supplier + customer_n0, dates, lineorder, ssb_part, supplier where lo_orderdate = d_datekey and lo_partkey = p_partkey @@ -333,7 +333,7 @@ select c_nation, s_nation, d_year, sum(lo_revenue) as lo_revenue from - customer, lineorder, supplier, dates + customer_n0, lineorder, supplier, dates where lo_custkey = c_custkey and lo_suppkey = s_suppkey @@ -351,7 +351,7 @@ explain select c_city, s_city, d_year, sum(lo_revenue) as lo_revenue from - customer, lineorder, supplier, dates + customer_n0, lineorder, supplier, dates where lo_custkey = c_custkey and lo_suppkey = s_suppkey @@ -369,7 +369,7 @@ explain select c_city, s_city, d_year, sum(lo_revenue) as lo_revenue from - customer, lineorder, supplier, dates + customer_n0, lineorder, supplier, dates where lo_custkey = c_custkey and lo_suppkey = s_suppkey @@ -387,7 +387,7 @@ explain select c_city, s_city, d_year, sum(lo_revenue) as lo_revenue from - customer, lineorder, supplier, dates + customer_n0, lineorder, supplier, dates where lo_custkey = c_custkey and lo_suppkey = s_suppkey @@ -406,7 +406,7 @@ select d_year, c_nation, sum(lo_revenue - lo_supplycost) as profit from - dates, customer, supplier, ssb_part, lineorder + dates, customer_n0, supplier, ssb_part, lineorder where lo_custkey = c_custkey and lo_suppkey = s_suppkey @@ -426,7 +426,7 @@ select d_year, s_nation, p_category, sum(lo_revenue - lo_supplycost) as profit from - dates, customer, supplier, ssb_part, lineorder + dates, customer_n0, supplier, ssb_part, lineorder where lo_custkey = c_custkey and lo_suppkey = s_suppkey @@ -447,7 +447,7 @@ select d_year, s_city, p_brand1, sum(lo_revenue - lo_supplycost) as profit from - dates, customer, supplier, ssb_part, lineorder + dates, customer_n0, supplier, ssb_part, lineorder where lo_custkey = c_custkey and lo_suppkey = s_suppkey diff --git a/ql/src/test/queries/clientpositive/merge1.q b/ql/src/test/queries/clientpositive/merge1.q index bd8a8570fc..8c2e6b30bd 100644 --- a/ql/src/test/queries/clientpositive/merge1.q +++ b/ql/src/test/queries/clientpositive/merge1.q @@ -6,30 +6,30 @@ set hive.merge.sparkfiles=true; -- SORT_QUERY_RESULTS -create table dest1(key int, val int); +create table dest1_n145(key int, val int); explain -insert overwrite table dest1 +insert overwrite table dest1_n145 select key, count(1) from src group by key; -insert overwrite table dest1 +insert overwrite table dest1_n145 select key, count(1) from src group by key; -select * from dest1; +select * from dest1_n145; -drop table dest1; +drop table dest1_n145; -create table test_src(key string, value string) partitioned by (ds string); -create table dest1(key string); +create table test_src_n2(key string, value string) partitioned by (ds string); +create table dest1_n145(key string); -insert overwrite table test_src partition(ds='101') select * from src; -insert overwrite table test_src partition(ds='102') select * from src; +insert overwrite table test_src_n2 partition(ds='101') select * from src; +insert overwrite table test_src_n2 partition(ds='102') select * from src; explain -insert overwrite table dest1 select key from test_src; -insert overwrite table dest1 select key from test_src; +insert overwrite table dest1_n145 select key from test_src_n2; +insert overwrite table dest1_n145 select key from test_src_n2; set hive.merge.smallfiles.avgsize=16; explain -insert overwrite table dest1 select key from test_src; -insert overwrite table dest1 select key from test_src; +insert overwrite table dest1_n145 select key from test_src_n2; +insert overwrite table dest1_n145 select key from test_src_n2; diff --git a/ql/src/test/queries/clientpositive/merge2.q b/ql/src/test/queries/clientpositive/merge2.q index e3ec76a953..a95cc889ad 100644 --- a/ql/src/test/queries/clientpositive/merge2.q +++ b/ql/src/test/queries/clientpositive/merge2.q @@ -11,31 +11,31 @@ set mapred.max.split.size=256; -- SORT_QUERY_RESULTS -create table test1(key int, val int); +create table test1_n10(key int, val int); explain -insert overwrite table test1 +insert overwrite table test1_n10 select key, count(1) from src group by key; -insert overwrite table test1 +insert overwrite table test1_n10 select key, count(1) from src group by key; -select * from test1; +select * from test1_n10; -drop table test1; +drop table test1_n10; -create table test_src(key string, value string) partitioned by (ds string); -create table test1(key string); +create table test_src_n0(key string, value string) partitioned by (ds string); +create table test1_n10(key string); -insert overwrite table test_src partition(ds='101') select * from src; -insert overwrite table test_src partition(ds='102') select * from src; +insert overwrite table test_src_n0 partition(ds='101') select * from src; +insert overwrite table test_src_n0 partition(ds='102') select * from src; explain -insert overwrite table test1 select key from test_src; -insert overwrite table test1 select key from test_src; +insert overwrite table test1_n10 select key from test_src_n0; +insert overwrite table test1_n10 select key from test_src_n0; set hive.merge.smallfiles.avgsize=16; explain -insert overwrite table test1 select key from test_src; -insert overwrite table test1 select key from test_src; +insert overwrite table test1_n10 select key from test_src_n0; +insert overwrite table test1_n10 select key from test_src_n0; diff --git a/ql/src/test/queries/clientpositive/merge_dynamic_partition.q b/ql/src/test/queries/clientpositive/merge_dynamic_partition.q index 26a793196b..adcbf974de 100644 --- a/ql/src/test/queries/clientpositive/merge_dynamic_partition.q +++ b/ql/src/test/queries/clientpositive/merge_dynamic_partition.q @@ -8,14 +8,14 @@ set hive.exec.dynamic.partition.mode=nonstrict; -- SORT_QUERY_RESULTS -create table srcpart_merge_dp like srcpart; +create table srcpart_merge_dp_n1 like srcpart; -create table merge_dynamic_part like srcpart; +create table merge_dynamic_part_n1 like srcpart; -load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n1 partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n1 partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n1 partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n1 partition(ds='2008-04-08', hr=11); set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; set hive.merge.mapfiles=false; @@ -23,11 +23,11 @@ set hive.merge.mapredfiles=false; set hive.merge.smallfiles.avgsize=1000000000; set hive.optimize.sort.dynamic.partition=false; explain -insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08'; -insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08'; +insert overwrite table merge_dynamic_part_n1 partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp_n1 where ds='2008-04-08'; +insert overwrite table merge_dynamic_part_n1 partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp_n1 where ds='2008-04-08'; -select * from merge_dynamic_part; -show table extended like `merge_dynamic_part`; +select * from merge_dynamic_part_n1; +show table extended like `merge_dynamic_part_n1`; set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; @@ -35,20 +35,20 @@ set hive.merge.mapfiles=true; set hive.merge.mapredfiles=true; set hive.merge.smallfiles.avgsize=1000000000; explain -insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp where ds='2008-04-08'; -insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp where ds='2008-04-08'; +insert overwrite table merge_dynamic_part_n1 partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp_n1 where ds='2008-04-08'; +insert overwrite table merge_dynamic_part_n1 partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp_n1 where ds='2008-04-08'; -select * from merge_dynamic_part; -show table extended like `merge_dynamic_part`; +select * from merge_dynamic_part_n1; +show table extended like `merge_dynamic_part_n1`; set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; set hive.merge.mapfiles=true; set hive.merge.mapredfiles=true; set hive.merge.smallfiles.avgsize=1000000000; explain -insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds='2008-04-08' and hr=11; -insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds='2008-04-08' and hr=11;; +insert overwrite table merge_dynamic_part_n1 partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp_n1 where ds='2008-04-08' and hr=11; +insert overwrite table merge_dynamic_part_n1 partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp_n1 where ds='2008-04-08' and hr=11;; -select * from merge_dynamic_part; -show table extended like `merge_dynamic_part`; +select * from merge_dynamic_part_n1; +show table extended like `merge_dynamic_part_n1`; diff --git a/ql/src/test/queries/clientpositive/merge_dynamic_partition2.q b/ql/src/test/queries/clientpositive/merge_dynamic_partition2.q index 6bfa5d94dc..94c5c975de 100644 --- a/ql/src/test/queries/clientpositive/merge_dynamic_partition2.q +++ b/ql/src/test/queries/clientpositive/merge_dynamic_partition2.q @@ -5,16 +5,16 @@ set hive.strict.checks.bucketing=false; set hive.exec.dynamic.partition=true; set hive.exec.dynamic.partition.mode=nonstrict; -create table srcpart_merge_dp like srcpart; +create table srcpart_merge_dp_n0 like srcpart; -create table merge_dynamic_part like srcpart; +create table merge_dynamic_part_n0 like srcpart; -load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket0.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12); -load data local inpath '../../data/files/srcbucket1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12); +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket0.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=12); +load data local inpath '../../data/files/srcbucket1.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=12); set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; @@ -25,8 +25,8 @@ set hive.exec.compress.output=false; set hive.optimize.sort.dynamic.partition=false; explain -insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08'; -insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08'; +insert overwrite table merge_dynamic_part_n0 partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp_n0 where ds='2008-04-08'; +insert overwrite table merge_dynamic_part_n0 partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp_n0 where ds='2008-04-08'; -show table extended like `merge_dynamic_part`; +show table extended like `merge_dynamic_part_n0`; diff --git a/ql/src/test/queries/clientpositive/merge_dynamic_partition3.q b/ql/src/test/queries/clientpositive/merge_dynamic_partition3.q index e3070b16c2..8915efadec 100644 --- a/ql/src/test/queries/clientpositive/merge_dynamic_partition3.q +++ b/ql/src/test/queries/clientpositive/merge_dynamic_partition3.q @@ -7,26 +7,26 @@ set hive.exec.dynamic.partition.mode=nonstrict; -- SORT_QUERY_RESULTS -create table srcpart_merge_dp like srcpart; +create table srcpart_merge_dp_n2 like srcpart; -create table merge_dynamic_part like srcpart; +create table merge_dynamic_part_n2 like srcpart; -load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12); -load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12); -load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12); -load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12); +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=12); +load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=12); +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=12); +load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=12); -load data local inpath '../../data/files/kv1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=11); -load data local inpath '../../data/files/kv2.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=11); -load data local inpath '../../data/files/kv1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=12); -load data local inpath '../../data/files/kv2.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=12); +load data local inpath '../../data/files/kv1.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-09', hr=11); +load data local inpath '../../data/files/kv2.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-09', hr=11); +load data local inpath '../../data/files/kv1.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-09', hr=12); +load data local inpath '../../data/files/kv2.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-09', hr=12); -show partitions srcpart_merge_dp; +show partitions srcpart_merge_dp_n2; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.merge.mapfiles=true; @@ -35,10 +35,10 @@ set hive.merge.smallfiles.avgsize=3000; set hive.exec.compress.output=false; explain -insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds>='2008-04-08'; +insert overwrite table merge_dynamic_part_n2 partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp_n2 where ds>='2008-04-08'; -insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds>='2008-04-08'; +insert overwrite table merge_dynamic_part_n2 partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp_n2 where ds>='2008-04-08'; -select ds, hr, count(1) from merge_dynamic_part where ds>='2008-04-08' group by ds, hr; +select ds, hr, count(1) from merge_dynamic_part_n2 where ds>='2008-04-08' group by ds, hr; -show table extended like `merge_dynamic_part`; +show table extended like `merge_dynamic_part_n2`; diff --git a/ql/src/test/queries/clientpositive/merge_dynamic_partition4.q b/ql/src/test/queries/clientpositive/merge_dynamic_partition4.q index f94db72c56..813ad8c5f0 100644 --- a/ql/src/test/queries/clientpositive/merge_dynamic_partition4.q +++ b/ql/src/test/queries/clientpositive/merge_dynamic_partition4.q @@ -6,26 +6,26 @@ set hive.mapred.mode=nonstrict; -- this test verifies that the block merge task that can follow a query to generate dynamic -- partitions does not produce incorrect results by dropping partitions -create table srcpart_merge_dp like srcpart; +create table srcpart_merge_dp_n4 like srcpart; -create table srcpart_merge_dp_rc like srcpart; -alter table srcpart_merge_dp_rc set fileformat RCFILE; +create table srcpart_merge_dp_rc_n1 like srcpart; +alter table srcpart_merge_dp_rc_n1 set fileformat RCFILE; -create table merge_dynamic_part like srcpart; -alter table merge_dynamic_part set fileformat RCFILE; +create table merge_dynamic_part_n3 like srcpart; +alter table merge_dynamic_part_n3 set fileformat RCFILE; -load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=11); +load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=11); -load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12); -load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12); -load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12); -load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12); +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=12); +load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=12); +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=12); +load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=12); -insert overwrite table srcpart_merge_dp_rc partition (ds = '2008-04-08', hr) -select key, value, hr from srcpart_merge_dp where ds = '2008-04-08'; +insert overwrite table srcpart_merge_dp_rc_n1 partition (ds = '2008-04-08', hr) +select key, value, hr from srcpart_merge_dp_n4 where ds = '2008-04-08'; set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; set hive.merge.mapfiles=true; @@ -36,12 +36,12 @@ set hive.exec.dynamic.partition=true; set hive.exec.dynamic.partition.mode=nonstrict; explain -insert overwrite table merge_dynamic_part partition (ds = '2008-04-08', hr) -select key, value, if(key % 2 == 0, 'a1', 'b1') as hr from srcpart_merge_dp_rc where ds = '2008-04-08'; +insert overwrite table merge_dynamic_part_n3 partition (ds = '2008-04-08', hr) +select key, value, if(key % 2 == 0, 'a1', 'b1') as hr from srcpart_merge_dp_rc_n1 where ds = '2008-04-08'; -insert overwrite table merge_dynamic_part partition (ds = '2008-04-08', hr) -select key, value, if(key % 2 == 0, 'a1', 'b1') as hr from srcpart_merge_dp_rc where ds = '2008-04-08'; +insert overwrite table merge_dynamic_part_n3 partition (ds = '2008-04-08', hr) +select key, value, if(key % 2 == 0, 'a1', 'b1') as hr from srcpart_merge_dp_rc_n1 where ds = '2008-04-08'; -show partitions merge_dynamic_part; +show partitions merge_dynamic_part_n3; -select count(*) from merge_dynamic_part; +select count(*) from merge_dynamic_part_n3; diff --git a/ql/src/test/queries/clientpositive/merge_empty.q b/ql/src/test/queries/clientpositive/merge_empty.q index 83c44b56ec..40e694274a 100644 --- a/ql/src/test/queries/clientpositive/merge_empty.q +++ b/ql/src/test/queries/clientpositive/merge_empty.q @@ -4,12 +4,12 @@ set hive.merge.sparkfiles=true; set hive.auto.convert.join=false; set mapreduce.job.reduces=1000; -create table dummy (a string); -insert overwrite directory '/tmp/test' select src.key from src join dummy on src.key = dummy.a; +create table dummy_n3 (a string); +insert overwrite directory '/tmp/test' select src.key from src join dummy_n3 on src.key = dummy_n3.a; dfs -ls /tmp/test; -- verify that this doesn't merge for bucketed tables -create table foo (a bigint, b string) clustered by (a) into 256 buckets; -create table bar (a bigint, b string); -insert overwrite table foo select * from bar; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/foo; +create table foo_n6 (a bigint, b string) clustered by (a) into 256 buckets; +create table bar_n1 (a bigint, b string); +insert overwrite table foo_n6 select * from bar_n1; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/foo_n6; diff --git a/ql/src/test/queries/clientpositive/mergejoin.q b/ql/src/test/queries/clientpositive/mergejoin.q index 0f37002ab8..8636f1320e 100644 --- a/ql/src/test/queries/clientpositive/mergejoin.q +++ b/ql/src/test/queries/clientpositive/mergejoin.q @@ -25,136 +25,136 @@ select * from src a join src1 b on a.key = b.key; select * from src a join src1 b on a.key = b.key; -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n16(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part_n10 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE; +CREATE TABLE srcbucket_mapjoin_part_n17 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n16 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n16 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n17 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n17 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n17 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n17 partition(ds='2008-04-08'); set hive.optimize.bucketingsorting=false; -insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +insert overwrite table tab_part_n10 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n17; -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n9(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE; +insert overwrite table tab_n9 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n16; explain vectorization detail select count(*) -from tab a join tab_part b on a.key = b.key; +from tab_n9 a join tab_part_n10 b on a.key = b.key; -select * from tab a join tab_part b on a.key = b.key; +select * from tab_n9 a join tab_part_n10 b on a.key = b.key; set hive.join.emit.interval=2; -select * from tab a join tab_part b on a.key = b.key; +select * from tab_n9 a join tab_part_n10 b on a.key = b.key; explain vectorization detail select count(*) -from tab a left outer join tab_part b on a.key = b.key; +from tab_n9 a left outer join tab_part_n10 b on a.key = b.key; select count(*) -from tab a left outer join tab_part b on a.key = b.key; +from tab_n9 a left outer join tab_part_n10 b on a.key = b.key; explain vectorization detail select count (*) -from tab a right outer join tab_part b on a.key = b.key; +from tab_n9 a right outer join tab_part_n10 b on a.key = b.key; select count (*) -from tab a right outer join tab_part b on a.key = b.key; +from tab_n9 a right outer join tab_part_n10 b on a.key = b.key; explain vectorization detail select count(*) -from tab a full outer join tab_part b on a.key = b.key; +from tab_n9 a full outer join tab_part_n10 b on a.key = b.key; select count(*) -from tab a full outer join tab_part b on a.key = b.key; +from tab_n9 a full outer join tab_part_n10 b on a.key = b.key; explain vectorization detail -select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; -select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; +select count(*) from tab_n9 a join tab_part_n10 b on a.key = b.key join src1 c on a.value = c.value; +select count(*) from tab_n9 a join tab_part_n10 b on a.key = b.key join src1 c on a.value = c.value; explain vectorization detail -select count(*) from tab a join tab_part b on a.value = b.value; -select count(*) from tab a join tab_part b on a.value = b.value; +select count(*) from tab_n9 a join tab_part_n10 b on a.value = b.value; +select count(*) from tab_n9 a join tab_part_n10 b on a.value = b.value; explain vectorization detail -select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +select count(*) from (select s1.key as key, s1.value as value from tab_n9 s1 join tab_n9 s3 on s1.key=s3.key UNION ALL -select s2.key as key, s2.value as value from tab s2 -) a join tab_part b on (a.key = b.key); +select s2.key as key, s2.value as value from tab_n9 s2 +) a join tab_part_n10 b on (a.key = b.key); explain vectorization detail -select count(*) from tab a join tab_part b on a.value = b.value; -select count(*) from tab a join tab_part b on a.value = b.value; +select count(*) from tab_n9 a join tab_part_n10 b on a.value = b.value; +select count(*) from tab_n9 a join tab_part_n10 b on a.value = b.value; explain vectorization detail -select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; -select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; +select count(*) from tab_n9 a join tab_part_n10 b on a.key = b.key join src1 c on a.value = c.value; +select count(*) from tab_n9 a join tab_part_n10 b on a.key = b.key join src1 c on a.value = c.value; explain vectorization detail -select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +select count(*) from (select s1.key as key, s1.value as value from tab_n9 s1 join tab_n9 s3 on s1.key=s3.key UNION ALL -select s2.key as key, s2.value as value from tab s2 -) a join tab_part b on (a.key = b.key); +select s2.key as key, s2.value as value from tab_n9 s2 +) a join tab_part_n10 b on (a.key = b.key); explain vectorization detail select count(*) from (select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 +(select t1.key as id, t1.value as od from tab_n9 t1 order by id, od) rt1) vt1 join (select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 +(select t2.key as id, t2.value as od from tab_part_n10 t2 order by id, od) rt2) vt2 where vt1.id=vt2.id; select count(*) from (select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 +(select t1.key as id, t1.value as od from tab_n9 t1 order by id, od) rt1) vt1 join (select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 +(select t2.key as id, t2.value as od from tab_part_n10 t2 order by id, od) rt2) vt2 where vt1.id=vt2.id; set mapred.reduce.tasks=3; -select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key; -select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key; +select * from (select * from tab_n9 where tab_n9.key = 0)a full outer join (select * from tab_part_n10 where tab_part_n10.key = 98)b on a.key = b.key; +select * from (select * from tab_n9 where tab_n9.key = 0)a right outer join (select * from tab_part_n10 where tab_part_n10.key = 98)b on a.key = b.key; select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a full outer join -(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key; +(select * from tab_part_n10 where tab_part_n10.key = 98)b join tab_part_n10 c on a.key = b.key and b.key = c.key; select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a full outer join -(select * from tab_part where tab_part.key = 98)b on a.key = b.key join tab_part c on b.key = c.key; +(select * from tab_part_n10 where tab_part_n10.key = 98)b on a.key = b.key join tab_part_n10 c on b.key = c.key; select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a join -(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key; +(select * from tab_part_n10 where tab_part_n10.key = 98)b full outer join tab_part_n10 c on a.key = b.key and b.key = c.key; select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a join -(select * from tab_part where tab_part.key = 98)b on a.key = b.key full outer join tab_part c on b.key = c.key; +(select * from tab_part_n10 where tab_part_n10.key = 98)b on a.key = b.key full outer join tab_part_n10 c on b.key = c.key; set hive.cbo.enable = false; select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a full outer join -(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key; +(select * from tab_part_n10 where tab_part_n10.key = 98)b join tab_part_n10 c on a.key = b.key and b.key = c.key; select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a join -(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key; +(select * from tab_part_n10 where tab_part_n10.key = 98)b full outer join tab_part_n10 c on a.key = b.key and b.key = c.key; diff --git a/ql/src/test/queries/clientpositive/mergejoins.q b/ql/src/test/queries/clientpositive/mergejoins.q index 2b1ecbac54..fb5c9ac975 100644 --- a/ql/src/test/queries/clientpositive/mergejoins.q +++ b/ql/src/test/queries/clientpositive/mergejoins.q @@ -1,12 +1,12 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -create table a (val1 int, val2 int); -create table b (val1 int, val2 int); -create table c (val1 int, val2 int); -create table d (val1 int, val2 int); -create table e (val1 int, val2 int); +create table a_n0 (val1 int, val2 int); +create table b_n0 (val1 int, val2 int); +create table c_n0 (val1 int, val2 int); +create table d_n0 (val1 int, val2 int); +create table e_n0 (val1 int, val2 int); -explain select * from a join b on a.val1=b.val1 join c on a.val1=c.val1 join d on a.val1=d.val1 join e on a.val2=e.val2; +explain select * from a_n0 join b_n0 on a_n0.val1=b_n0.val1 join c_n0 on a_n0.val1=c_n0.val1 join d_n0 on a_n0.val1=d_n0.val1 join e_n0 on a_n0.val2=e_n0.val2; --HIVE-3070 filter on outer join condition removed while merging join tree -explain select * from src a join src b on a.key=b.key left outer join src c on b.key=c.key and b.key<10; +explain select * from src a_n0 join src b_n0 on a_n0.key=b_n0.key left outer join src c_n0 on b_n0.key=c_n0.key and b_n0.key<10; diff --git a/ql/src/test/queries/clientpositive/mergejoins_mixed.q b/ql/src/test/queries/clientpositive/mergejoins_mixed.q index 51f04cdd9e..f2fbfa92e0 100644 --- a/ql/src/test/queries/clientpositive/mergejoins_mixed.q +++ b/ql/src/test/queries/clientpositive/mergejoins_mixed.q @@ -1,44 +1,44 @@ set hive.mapred.mode=nonstrict; -- HIVE-3464 -create table a (key string, value string); +create table a_n5 (key string, value string); --- (a-b-c-d) +-- (a_n5-b-c-d) explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.key=c.key) left outer join a d on (a.key=d.key); +select * from a_n5 join a_n5 b on (a_n5.key=b.key) left outer join a_n5 c on (b.key=c.key) left outer join a_n5 d on (a_n5.key=d.key); explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.key=c.key) right outer join a d on (a.key=d.key); +select * from a_n5 join a_n5 b on (a_n5.key=b.key) left outer join a_n5 c on (b.key=c.key) right outer join a_n5 d on (a_n5.key=d.key); explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.key=c.key) left outer join a d on (a.key=d.key); +select * from a_n5 join a_n5 b on (a_n5.key=b.key) right outer join a_n5 c on (b.key=c.key) left outer join a_n5 d on (a_n5.key=d.key); explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.key=c.key) right outer join a d on (a.key=d.key); +select * from a_n5 join a_n5 b on (a_n5.key=b.key) right outer join a_n5 c on (b.key=c.key) right outer join a_n5 d on (a_n5.key=d.key); --- ((a-b-d)-c) (reordered) +-- ((a_n5-b-d)-c) (reordered) explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) left outer join a d on (a.key=d.key); +select * from a_n5 join a_n5 b on (a_n5.key=b.key) left outer join a_n5 c on (b.value=c.key) left outer join a_n5 d on (a_n5.key=d.key); explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.value=c.key) right outer join a d on (a.key=d.key); +select * from a_n5 join a_n5 b on (a_n5.key=b.key) right outer join a_n5 c on (b.value=c.key) right outer join a_n5 d on (a_n5.key=d.key); explain -select * from a join a b on (a.key=b.key) full outer join a c on (b.value=c.key) full outer join a d on (a.key=d.key); +select * from a_n5 join a_n5 b on (a_n5.key=b.key) full outer join a_n5 c on (b.value=c.key) full outer join a_n5 d on (a_n5.key=d.key); --- (((a-b)-c)-d) +-- (((a_n5-b)-c)-d) explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) right outer join a d on (a.key=d.key); +select * from a_n5 join a_n5 b on (a_n5.key=b.key) left outer join a_n5 c on (b.value=c.key) right outer join a_n5 d on (a_n5.key=d.key); explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) full outer join a d on (a.key=d.key); +select * from a_n5 join a_n5 b on (a_n5.key=b.key) left outer join a_n5 c on (b.value=c.key) full outer join a_n5 d on (a_n5.key=d.key); explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.value=c.key) left outer join a d on (a.key=d.key); +select * from a_n5 join a_n5 b on (a_n5.key=b.key) right outer join a_n5 c on (b.value=c.key) left outer join a_n5 d on (a_n5.key=d.key); explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.value=c.key) full outer join a d on (a.key=d.key); +select * from a_n5 join a_n5 b on (a_n5.key=b.key) right outer join a_n5 c on (b.value=c.key) full outer join a_n5 d on (a_n5.key=d.key); --- ((a-b)-c-d) +-- ((a_n5-b)-c-d) explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) left outer join a d on (c.key=d.key); +select * from a_n5 join a_n5 b on (a_n5.key=b.key) left outer join a_n5 c on (b.value=c.key) left outer join a_n5 d on (c.key=d.key); diff --git a/ql/src/test/queries/clientpositive/metadata_only_queries.q b/ql/src/test/queries/clientpositive/metadata_only_queries.q index bcf320b0c5..cdbb76e4d9 100644 --- a/ql/src/test/queries/clientpositive/metadata_only_queries.q +++ b/ql/src/test/queries/clientpositive/metadata_only_queries.q @@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.compute.query.using.stats=true; set hive.stats.autogather=true; -create table over10k( +create table over10k_n12( t tinyint, si smallint, i int, @@ -18,7 +18,7 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n12; create table stats_tbl( t tinyint, @@ -47,11 +47,11 @@ create table stats_tbl_part( bin binary) partitioned by (dt string); -insert overwrite table stats_tbl select * from over10k; +insert overwrite table stats_tbl select * from over10k_n12; -insert into table stats_tbl_part partition (dt='2010') select * from over10k where t>0 and t<30; -insert into table stats_tbl_part partition (dt='2011') select * from over10k where t>30 and t<60; -insert into table stats_tbl_part partition (dt='2012') select * from over10k where t>60; +insert into table stats_tbl_part partition (dt='2010') select * from over10k_n12 where t>0 and t<30; +insert into table stats_tbl_part partition (dt='2011') select * from over10k_n12 where t>30 and t<60; +insert into table stats_tbl_part partition (dt='2012') select * from over10k_n12 where t>60; explain select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b) from stats_tbl; diff --git a/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q b/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q index 692c414354..ed75c57d96 100644 --- a/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q +++ b/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q @@ -2,7 +2,7 @@ set hive.stats.column.autogather=false; set hive.stats.dbclass=fs; set hive.compute.query.using.stats=true; set hive.explain.user=false; -create table over10k( +create table over10k_n23( t tinyint, si smallint, i int, @@ -17,9 +17,9 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n23; -create table stats_tbl_part( +create table stats_tbl_part_n0( t tinyint, si smallint, i int, @@ -33,22 +33,22 @@ create table stats_tbl_part( bin binary) partitioned by (dt int); -from over10k -insert overwrite table stats_tbl_part partition (dt=2010) select t,si,i,b,f,d,bo,s,ts,`dec`,bin where t>0 and t<30 -insert overwrite table stats_tbl_part partition (dt=2014) select t,si,i,b,f,d,bo,s,ts,`dec`,bin where t > 30 and t<60; +from over10k_n23 +insert overwrite table stats_tbl_part_n0 partition (dt=2010) select t,si,i,b,f,d,bo,s,ts,`dec`,bin where t>0 and t<30 +insert overwrite table stats_tbl_part_n0 partition (dt=2014) select t,si,i,b,f,d,bo,s,ts,`dec`,bin where t > 30 and t<60; -analyze table stats_tbl_part partition(dt) compute statistics; -analyze table stats_tbl_part partition(dt=2010) compute statistics for columns t,si,i,b,f,d,bo,s,bin; -analyze table stats_tbl_part partition(dt=2014) compute statistics for columns t,si,i,b,f,d,bo,s,bin; +analyze table stats_tbl_part_n0 partition(dt) compute statistics; +analyze table stats_tbl_part_n0 partition(dt=2010) compute statistics for columns t,si,i,b,f,d,bo,s,bin; +analyze table stats_tbl_part_n0 partition(dt=2014) compute statistics for columns t,si,i,b,f,d,bo,s,bin; explain -select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010; -select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010; +select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt = 2010; +select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt = 2010; explain -select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010; -select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010; +select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt > 2010; +select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt > 2010; -select count(*) from stats_tbl_part; -select count(*)/2 from stats_tbl_part; -drop table stats_tbl_part; +select count(*) from stats_tbl_part_n0; +select count(*)/2 from stats_tbl_part_n0; +drop table stats_tbl_part_n0; set hive.compute.query.using.stats=false; diff --git a/ql/src/test/queries/clientpositive/metadataonly1.q b/ql/src/test/queries/clientpositive/metadataonly1.q index e3bf819929..e247c6ab81 100644 --- a/ql/src/test/queries/clientpositive/metadataonly1.q +++ b/ql/src/test/queries/clientpositive/metadataonly1.q @@ -1,48 +1,48 @@ --! qt:dataset:srcpart set hive.mapred.mode=nonstrict; set hive.optimize.metadataonly=true; -CREATE TABLE TEST1(A INT, B DOUBLE) partitioned by (ds string); -explain extended select max(ds) from TEST1; -select max(ds) from TEST1; +CREATE TABLE TEST1_n12(A INT, B DOUBLE) partitioned by (ds string); +explain extended select max(ds) from TEST1_n12; +select max(ds) from TEST1_n12; -alter table TEST1 add partition (ds='1'); -explain extended select max(ds) from TEST1; -select max(ds) from TEST1; +alter table TEST1_n12 add partition (ds='1'); +explain extended select max(ds) from TEST1_n12; +select max(ds) from TEST1_n12; -explain extended select count(distinct ds) from TEST1; -select count(distinct ds) from TEST1; +explain extended select count(distinct ds) from TEST1_n12; +select count(distinct ds) from TEST1_n12; -explain extended select count(ds) from TEST1; -select count(ds) from TEST1; +explain extended select count(ds) from TEST1_n12; +select count(ds) from TEST1_n12; -alter table TEST1 add partition (ds='2'); +alter table TEST1_n12 add partition (ds='2'); explain extended -select count(*) from TEST1 a2 join (select max(ds) m from TEST1) b on a2.ds=b.m; -select count(*) from TEST1 a2 join (select max(ds) m from TEST1) b on a2.ds=b.m; +select count(*) from TEST1_n12 a2 join (select max(ds) m from TEST1_n12) b on a2.ds=b.m; +select count(*) from TEST1_n12 a2 join (select max(ds) m from TEST1_n12) b on a2.ds=b.m; -CREATE TABLE TEST2(A INT, B DOUBLE) partitioned by (ds string, hr string); -alter table TEST2 add partition (ds='1', hr='1'); -alter table TEST2 add partition (ds='1', hr='2'); -alter table TEST2 add partition (ds='1', hr='3'); +CREATE TABLE TEST2_n8(A INT, B DOUBLE) partitioned by (ds string, hr string); +alter table TEST2_n8 add partition (ds='1', hr='1'); +alter table TEST2_n8 add partition (ds='1', hr='2'); +alter table TEST2_n8 add partition (ds='1', hr='3'); -explain extended select ds, count(distinct hr) from TEST2 group by ds; -select ds, count(distinct hr) from TEST2 group by ds; +explain extended select ds, count(distinct hr) from TEST2_n8 group by ds; +select ds, count(distinct hr) from TEST2_n8 group by ds; -explain extended select ds, count(hr) from TEST2 group by ds; -select ds, count(hr) from TEST2 group by ds; +explain extended select ds, count(hr) from TEST2_n8 group by ds; +select ds, count(hr) from TEST2_n8 group by ds; set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -explain extended select max(ds) from TEST1; -select max(ds) from TEST1; +explain extended select max(ds) from TEST1_n12; +select max(ds) from TEST1_n12; select distinct ds from srcpart; select min(ds),max(ds) from srcpart; -- HIVE-3594 URI encoding for temporary path -alter table TEST2 add partition (ds='01:10:10', hr='01'); -alter table TEST2 add partition (ds='01:10:20', hr='02'); +alter table TEST2_n8 add partition (ds='01:10:10', hr='01'); +alter table TEST2_n8 add partition (ds='01:10:20', hr='02'); -explain extended select ds, count(distinct hr) from TEST2 group by ds; -select ds, count(distinct hr) from TEST2 group by ds; +explain extended select ds, count(distinct hr) from TEST2_n8 group by ds; +select ds, count(distinct hr) from TEST2_n8 group by ds; diff --git a/ql/src/test/queries/clientpositive/misc_json.q b/ql/src/test/queries/clientpositive/misc_json.q index 541e36933a..95a3e5bc16 100644 --- a/ql/src/test/queries/clientpositive/misc_json.q +++ b/ql/src/test/queries/clientpositive/misc_json.q @@ -1,10 +1,10 @@ set hive.ddl.output.format=json; -CREATE TABLE IF NOT EXISTS jsontable (key INT, value STRING) COMMENT 'json table' STORED AS TEXTFILE; +CREATE TABLE IF NOT EXISTS jsontable_n0 (key INT, value STRING) COMMENT 'json table' STORED AS TEXTFILE; -ALTER TABLE jsontable ADD COLUMNS (name STRING COMMENT 'a new column'); +ALTER TABLE jsontable_n0 ADD COLUMNS (name STRING COMMENT 'a new column'); -ALTER TABLE jsontable RENAME TO jsontable2; +ALTER TABLE jsontable_n0 RENAME TO jsontable2; SHOW TABLE EXTENDED LIKE jsontable2; diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q index 7d0955bfbb..61dd3e7475 100644 --- a/ql/src/test/queries/clientpositive/mm_all.q +++ b/ql/src/test/queries/clientpositive/mm_all.q @@ -13,32 +13,32 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -- Force multiple writers when reading -drop table intermediate; -create table intermediate(key int) partitioned by (p int) stored as orc; -insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; -insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; -insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; - - -drop table part_mm; -create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); -explain insert into table part_mm partition(key_mm=455) select key from intermediate; -insert into table part_mm partition(key_mm=455) select key from intermediate; -insert into table part_mm partition(key_mm=456) select key from intermediate; -insert into table part_mm partition(key_mm=455) select key from intermediate; -select * from part_mm order by key, key_mm; - --- TODO: doesn't work truncate table part_mm partition(key_mm=455); -select * from part_mm order by key, key_mm; -truncate table part_mm; -select * from part_mm order by key, key_mm; -drop table part_mm; +drop table intermediate_n0; +create table intermediate_n0(key int) partitioned by (p int) stored as orc; +insert into table intermediate_n0 partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; +insert into table intermediate_n0 partition(p='456') select distinct key from src where key is not null order by key asc limit 2; +insert into table intermediate_n0 partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; + + +drop table part_mm_n0; +create table part_mm_n0(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); +explain insert into table part_mm_n0 partition(key_mm=455) select key from intermediate_n0; +insert into table part_mm_n0 partition(key_mm=455) select key from intermediate_n0; +insert into table part_mm_n0 partition(key_mm=456) select key from intermediate_n0; +insert into table part_mm_n0 partition(key_mm=455) select key from intermediate_n0; +select * from part_mm_n0 order by key, key_mm; + +-- TODO: doesn't work truncate table part_mm_n0 partition(key_mm=455); +select * from part_mm_n0 order by key, key_mm; +truncate table part_mm_n0; +select * from part_mm_n0 order by key, key_mm; +drop table part_mm_n0; drop table simple_mm; create table simple_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); -insert into table simple_mm select key from intermediate; +insert into table simple_mm select key from intermediate_n0; select * from simple_mm order by key; -insert into table simple_mm select key from intermediate; +insert into table simple_mm select key from intermediate_n0; select * from simple_mm order by key; truncate table simple_mm; select * from simple_mm; @@ -57,7 +57,7 @@ set hive.merge.tezfiles=false; create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only"); -insert into table dp_mm partition (key1='123', key2) select key, key from intermediate; +insert into table dp_mm partition (key1='123', key2) select key, key from intermediate_n0; select * from dp_mm order by key; @@ -69,35 +69,35 @@ drop table dp_mm; create table union_mm(id int) tblproperties ("transactional"="true", "transactional_properties"="insert_only"); insert into table union_mm select temps.p from ( -select key as p from intermediate +select key as p from intermediate_n0 union all -select key + 1 as p from intermediate ) temps; +select key + 1 as p from intermediate_n0 ) temps; select * from union_mm order by id; insert into table union_mm select p from ( -select key + 1 as p from intermediate +select key + 1 as p from intermediate_n0 union all -select key from intermediate +select key from intermediate_n0 ) tab group by p union all -select key + 2 as p from intermediate; +select key + 2 as p from intermediate_n0; select * from union_mm order by id; insert into table union_mm SELECT p FROM ( - SELECT key + 1 as p FROM intermediate + SELECT key + 1 as p FROM intermediate_n0 UNION ALL SELECT key as p FROM ( SELECT distinct key FROM ( SELECT key FROM ( - SELECT key + 2 as key FROM intermediate + SELECT key + 2 as key FROM intermediate_n0 UNION ALL - SELECT key FROM intermediate + SELECT key FROM intermediate_n0 )t1 group by key)t2 )t3 @@ -112,9 +112,9 @@ drop table union_mm; create table partunion_mm(id int) partitioned by (key int) tblproperties ("transactional"="true", "transactional_properties"="insert_only"); insert into table partunion_mm partition(key) select temps.* from ( -select key as p, key from intermediate +select key as p, key from intermediate_n0 union all -select key + 1 as p, key + 1 from intermediate ) temps; +select key + 1 as p, key + 1 from intermediate_n0 ) temps; select * from partunion_mm order by id; drop table partunion_mm; @@ -127,7 +127,7 @@ create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1), stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only"); insert into table skew_mm -select key, key, key from intermediate; +select key, key, key from intermediate_n0; select * from skew_mm order by k2, k1, k4; drop table skew_mm; @@ -137,9 +137,9 @@ create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only"); insert into table skew_dp_union_mm partition (k3) -select key as i, key as j, key as k, key as l from intermediate +select key as i, key as j, key as k, key as l from intermediate_n0 union all -select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate; +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate_n0; select * from skew_dp_union_mm order by k2, k1, k4; @@ -155,11 +155,11 @@ set hive.merge.mapredfiles=true; create table merge0_mm (id int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only"); -insert into table merge0_mm select key from intermediate; +insert into table merge0_mm select key from intermediate_n0; select * from merge0_mm; set tez.grouping.split-count=1; -insert into table merge0_mm select key from intermediate; +insert into table merge0_mm select key from intermediate_n0; set tez.grouping.split-count=0; select * from merge0_mm; @@ -168,11 +168,11 @@ drop table merge0_mm; create table merge2_mm (id int) tblproperties("transactional"="true", "transactional_properties"="insert_only"); -insert into table merge2_mm select key from intermediate; +insert into table merge2_mm select key from intermediate_n0; select * from merge2_mm; set tez.grouping.split-count=1; -insert into table merge2_mm select key from intermediate; +insert into table merge2_mm select key from intermediate_n0; set tez.grouping.split-count=0; select * from merge2_mm; @@ -181,11 +181,11 @@ drop table merge2_mm; create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only"); -insert into table merge1_mm partition (key) select key, key from intermediate; +insert into table merge1_mm partition (key) select key, key from intermediate_n0; select * from merge1_mm order by id, key; set tez.grouping.split-count=1; -insert into table merge1_mm partition (key) select key, key from intermediate; +insert into table merge1_mm partition (key) select key, key from intermediate_n0; set tez.grouping.split-count=0; select * from merge1_mm order by id, key; @@ -199,13 +199,13 @@ set hive.merge.mapredfiles=false; drop table ctas0_mm; -create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate; +create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate_n0; select * from ctas0_mm; drop table ctas0_mm; drop table ctas1_mm; create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as - select * from intermediate union all select * from intermediate; + select * from intermediate_n0 union all select * from intermediate_n0; select * from ctas1_mm; drop table ctas1_mm; @@ -215,7 +215,7 @@ drop table multi0_2_mm; create table multi0_1_mm (key int, key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only"); create table multi0_2_mm (key int, key2 int) tblproperties("transactional"="true", "transactional_properties"="insert_only"); -from intermediate +from intermediate_n0 insert overwrite table multi0_1_mm select key, p insert overwrite table multi0_2_mm select p, key; @@ -226,7 +226,7 @@ set hive.merge.mapredfiles=true; set hive.merge.sparkfiles=true; set hive.merge.tezfiles=true; -from intermediate +from intermediate_n0 insert into table multi0_1_mm select p, key insert overwrite table multi0_2_mm select key, p; @@ -243,25 +243,25 @@ drop table multi0_2_mm; drop table multi1_mm; create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only"); -from intermediate +from intermediate_n0 insert into table multi1_mm partition(p=1) select p, key insert into table multi1_mm partition(p=2) select key, p; select * from multi1_mm order by key, key2, p; -from intermediate +from intermediate_n0 insert into table multi1_mm partition(p=2) select p, key insert overwrite table multi1_mm partition(p=1) select key, p; select * from multi1_mm order by key, key2, p; -from intermediate +from intermediate_n0 insert into table multi1_mm partition(p) select p, key, p insert into table multi1_mm partition(p=1) select key, p; select key, key2, p from multi1_mm order by key, key2, p; -from intermediate +from intermediate_n0 insert into table multi1_mm partition(p) select p, key, 1 insert into table multi1_mm partition(p=1) select key, p; @@ -277,11 +277,11 @@ set hive.stats.autogather=true; drop table stats_mm; create table stats_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only"); ---insert overwrite table stats_mm select key from intermediate; -insert into table stats_mm select key from intermediate; +--insert overwrite table stats_mm select key from intermediate_n0; +insert into table stats_mm select key from intermediate_n0; desc formatted stats_mm; -insert into table stats_mm select key from intermediate; +insert into table stats_mm select key from intermediate_n0; desc formatted stats_mm; drop table stats_mm; @@ -321,12 +321,12 @@ set hive.auto.convert.join=true; DROP TABLE IF EXISTS temp1; CREATE TEMPORARY TABLE temp1 (a int) TBLPROPERTIES ("transactional"="true", "transactional_properties"="insert_only"); -INSERT INTO temp1 SELECT key FROM intermediate; +INSERT INTO temp1 SELECT key FROM intermediate_n0; DESC EXTENDED temp1; SELECT * FROM temp1; -drop table intermediate; +drop table intermediate_n0; diff --git a/ql/src/test/queries/clientpositive/mm_buckets.q b/ql/src/test/queries/clientpositive/mm_buckets.q index 3e91cdd713..12dc4fe3dd 100644 --- a/ql/src/test/queries/clientpositive/mm_buckets.q +++ b/ql/src/test/queries/clientpositive/mm_buckets.q @@ -12,11 +12,11 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -- Bucketing tests are slow and some tablesample ones don't work w/o MM -- Force multiple writers when reading -drop table intermediate; -create table intermediate(key int) partitioned by (p int) stored as orc; -insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; -insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; -insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; +drop table intermediate_n2; +create table intermediate_n2(key int) partitioned by (p int) stored as orc; +insert into table intermediate_n2 partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; +insert into table intermediate_n2 partition(p='456') select distinct key from src where key is not null order by key asc limit 2; +insert into table intermediate_n2 partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; @@ -24,11 +24,11 @@ drop table bucket0_mm; create table bucket0_mm(key int, id int) clustered by (key) into 2 buckets tblproperties("transactional"="true", "transactional_properties"="insert_only"); -insert into table bucket0_mm select key, key from intermediate; +insert into table bucket0_mm select key, key from intermediate_n2; select * from bucket0_mm order by key, id; select * from bucket0_mm tablesample (bucket 1 out of 2) s; select * from bucket0_mm tablesample (bucket 2 out of 2) s; -insert into table bucket0_mm select key, key from intermediate; +insert into table bucket0_mm select key, key from intermediate_n2; select * from bucket0_mm order by key, id; select * from bucket0_mm tablesample (bucket 1 out of 2) s; select * from bucket0_mm tablesample (bucket 2 out of 2) s; @@ -40,9 +40,9 @@ create table bucket1_mm(key int, id int) partitioned by (key2 int) clustered by (key) sorted by (key) into 2 buckets tblproperties("transactional"="true", "transactional_properties"="insert_only"); insert into table bucket1_mm partition (key2) -select key + 1, key, key - 1 from intermediate +select key + 1, key, key - 1 from intermediate_n2 union all -select key - 1, key, key + 1 from intermediate; +select key - 1, key, key + 1 from intermediate_n2; select * from bucket1_mm order by key, id; select * from bucket1_mm tablesample (bucket 1 out of 2) s order by key, id; select * from bucket1_mm tablesample (bucket 2 out of 2) s order by key, id; @@ -54,14 +54,14 @@ drop table bucket2_mm; create table bucket2_mm(key int, id int) clustered by (key) into 10 buckets tblproperties("transactional"="true", "transactional_properties"="insert_only"); -insert into table bucket2_mm select key, key from intermediate where key == 0; +insert into table bucket2_mm select key, key from intermediate_n2 where key == 0; select * from bucket2_mm order by key, id; select * from bucket2_mm tablesample (bucket 1 out of 10) s order by key, id; select * from bucket2_mm tablesample (bucket 4 out of 10) s order by key, id; -insert into table bucket2_mm select key, key from intermediate where key in (0, 103); +insert into table bucket2_mm select key, key from intermediate_n2 where key in (0, 103); select * from bucket2_mm; select * from bucket2_mm tablesample (bucket 1 out of 10) s order by key, id; select * from bucket2_mm tablesample (bucket 4 out of 10) s order by key, id; drop table bucket2_mm; -drop table intermediate; \ No newline at end of file +drop table intermediate_n2; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/mm_cttas.q b/ql/src/test/queries/clientpositive/mm_cttas.q index da8b84df50..8f1274c610 100644 --- a/ql/src/test/queries/clientpositive/mm_cttas.q +++ b/ql/src/test/queries/clientpositive/mm_cttas.q @@ -3,19 +3,19 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -drop table intermediate; -create table intermediate(key int) partitioned by (p int) stored as orc; -insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; -insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; -insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; +drop table intermediate_n1; +create table intermediate_n1(key int) partitioned by (p int) stored as orc; +insert into table intermediate_n1 partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; +insert into table intermediate_n1 partition(p='456') select distinct key from src where key is not null order by key asc limit 2; +insert into table intermediate_n1 partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; drop table cttas1_mm; -create temporary table cttas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate; +create temporary table cttas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate_n1; select * from cttas1_mm; drop table cttas1_mm; -drop table intermediate; +drop table intermediate_n1; diff --git a/ql/src/test/queries/clientpositive/mm_default.q b/ql/src/test/queries/clientpositive/mm_default.q index 0fa46a96f2..8e34bf3a0b 100644 --- a/ql/src/test/queries/clientpositive/mm_default.q +++ b/ql/src/test/queries/clientpositive/mm_default.q @@ -14,10 +14,10 @@ create table acid0 (key string) stored as ORC tblproperties("transactional"="tr set hive.create.as.insert.only=true; create table mm1 like non_mm0; create table mm2 like mm0; -create table acid1 like acid0; +create table acid1_n0 like acid0; create table mm3 as select key from src limit 1; create table mm4 (key string); -create table acid2 (key string) stored as ORC tblproperties("transactional"="true"); +create table acid2_n0 (key string) stored as ORC tblproperties("transactional"="true"); create table non_mm1 tblproperties("transactional"="false") as select key from src limit 1; @@ -28,8 +28,8 @@ desc formatted mm2; desc formatted mm3; desc formatted mm4; desc formatted non_mm1; -desc formatted acid1; -desc formatted acid2; +desc formatted acid1_n0; +desc formatted acid2_n0; drop table non_mm0; @@ -40,8 +40,8 @@ drop table mm2; drop table mm3; drop table mm4; drop table acid0; -drop table acid1; -drop table acid2; +drop table acid1_n0; +drop table acid2_n0; diff --git a/ql/src/test/queries/clientpositive/mm_exim.q b/ql/src/test/queries/clientpositive/mm_exim.q index a2b6e08603..9870bf4ed8 100644 --- a/ql/src/test/queries/clientpositive/mm_exim.q +++ b/ql/src/test/queries/clientpositive/mm_exim.q @@ -10,27 +10,27 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -drop table intermediate; -create table intermediate(key int) partitioned by (p int) stored as orc tblproperties("transactional"="false"); -insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; -insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; -insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; +drop table intermediate_n4; +create table intermediate_n4(key int) partitioned by (p int) stored as orc tblproperties("transactional"="false"); +insert into table intermediate_n4 partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; +insert into table intermediate_n4 partition(p='456') select distinct key from src where key is not null order by key asc limit 2; +insert into table intermediate_n4 partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; drop table intermediate_nonpart; drop table intermmediate_part; drop table intermmediate_nonpart; create table intermediate_nonpart(key int, p int) tblproperties("transactional"="false"); -insert into intermediate_nonpart select * from intermediate; +insert into intermediate_nonpart select * from intermediate_n4; create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only"); -insert into intermmediate_nonpart select * from intermediate; +insert into intermmediate_nonpart select * from intermediate_n4; create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only"); -insert into table intermmediate partition(p) select key, p from intermediate; +insert into table intermmediate partition(p) select key, p from intermediate_n4; set hive.exim.test.mode=true; export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart'; export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart'; -export table intermediate to 'ql/test/data/exports/intermediate_part'; +export table intermediate_n4 to 'ql/test/data/exports/intermediate_part'; export table intermmediate to 'ql/test/data/exports/intermmediate_part'; drop table intermediate_nonpart; diff --git a/ql/src/test/queries/clientpositive/msck_repair_0.q b/ql/src/test/queries/clientpositive/msck_repair_0.q index cb291fee41..aeb4820af5 100644 --- a/ql/src/test/queries/clientpositive/msck_repair_0.q +++ b/ql/src/test/queries/clientpositive/msck_repair_0.q @@ -1,36 +1,36 @@ set hive.msck.repair.batch.size=1; set hive.mv.files.thread=0; -DROP TABLE IF EXISTS repairtable; +DROP TABLE IF EXISTS repairtable_n5; -CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING); +CREATE TABLE repairtable_n5(col STRING) PARTITIONED BY (p1 STRING, p2 STRING); -MSCK TABLE repairtable; +MSCK TABLE repairtable_n5; -show partitions repairtable; +show partitions repairtable_n5; dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=c/p2=a/p3=b; dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=c/p2=a/p3=b/datafile; -MSCK TABLE default.repairtable; +MSCK TABLE default.repairtable_n5; -show partitions default.repairtable; +show partitions default.repairtable_n5; -MSCK REPAIR TABLE default.repairtable; +MSCK REPAIR TABLE default.repairtable_n5; -show partitions default.repairtable; +show partitions default.repairtable_n5; -MSCK TABLE repairtable; +MSCK TABLE repairtable_n5; -show partitions repairtable; +show partitions repairtable_n5; set hive.mapred.mode=strict; dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=e/p2=f/p3=g; dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=e/p2=f/p3=g/datafile; -MSCK REPAIR TABLE default.repairtable; +MSCK REPAIR TABLE default.repairtable_n5; -show partitions default.repairtable; +show partitions default.repairtable_n5; -DROP TABLE default.repairtable; +DROP TABLE default.repairtable_n5; diff --git a/ql/src/test/queries/clientpositive/msck_repair_2.q b/ql/src/test/queries/clientpositive/msck_repair_2.q index 77785e9503..be745b2d60 100644 --- a/ql/src/test/queries/clientpositive/msck_repair_2.q +++ b/ql/src/test/queries/clientpositive/msck_repair_2.q @@ -1,25 +1,25 @@ set hive.msck.repair.batch.size=1; set hive.msck.path.validation=skip; -DROP TABLE IF EXISTS repairtable; +DROP TABLE IF EXISTS repairtable_n2; -CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING); +CREATE TABLE repairtable_n2(col STRING) PARTITIONED BY (p1 STRING, p2 STRING); -MSCK TABLE repairtable; +MSCK TABLE repairtable_n2; -show partitions repairtable; +show partitions repairtable_n2; dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=c/p2=a/p3=b; dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=c/p2=a/p3=b/datafile; dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=c/datafile; -MSCK TABLE default.repairtable; -show partitions repairtable; +MSCK TABLE default.repairtable_n2; +show partitions repairtable_n2; -MSCK REPAIR TABLE default.repairtable; -show partitions repairtable; +MSCK REPAIR TABLE default.repairtable_n2; +show partitions repairtable_n2; -MSCK TABLE repairtable; -show partitions repairtable; +MSCK TABLE repairtable_n2; +show partitions repairtable_n2; -DROP TABLE default.repairtable; +DROP TABLE default.repairtable_n2; diff --git a/ql/src/test/queries/clientpositive/msck_repair_3.q b/ql/src/test/queries/clientpositive/msck_repair_3.q index f42443f547..140a6904dd 100644 --- a/ql/src/test/queries/clientpositive/msck_repair_3.q +++ b/ql/src/test/queries/clientpositive/msck_repair_3.q @@ -1,21 +1,21 @@ set hive.msck.repair.batch.size=1; -DROP TABLE IF EXISTS repairtable; +DROP TABLE IF EXISTS repairtable_n3; -CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING); +CREATE TABLE repairtable_n3(col STRING) PARTITIONED BY (p1 STRING, p2 STRING); -MSCK TABLE repairtable; -show partitions repairtable; +MSCK TABLE repairtable_n3; +show partitions repairtable_n3; dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=c/p2=a/p3=b; -MSCK TABLE default.repairtable; -show partitions repairtable; +MSCK TABLE default.repairtable_n3; +show partitions repairtable_n3; -MSCK REPAIR TABLE default.repairtable; -show partitions repairtable; +MSCK REPAIR TABLE default.repairtable_n3; +show partitions repairtable_n3; -MSCK TABLE repairtable; -show partitions repairtable; +MSCK TABLE repairtable_n3; +show partitions repairtable_n3; -DROP TABLE default.repairtable; +DROP TABLE default.repairtable_n3; diff --git a/ql/src/test/queries/clientpositive/msck_repair_batchsize.q b/ql/src/test/queries/clientpositive/msck_repair_batchsize.q index a44c00eea9..5a7afcca5b 100644 --- a/ql/src/test/queries/clientpositive/msck_repair_batchsize.q +++ b/ql/src/test/queries/clientpositive/msck_repair_batchsize.q @@ -1,10 +1,10 @@ set hive.msck.repair.batch.size=2; -DROP TABLE IF EXISTS repairtable; +DROP TABLE IF EXISTS repairtable_n0; -CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING); +CREATE TABLE repairtable_n0(col STRING) PARTITIONED BY (p1 STRING, p2 STRING); -MSCK TABLE repairtable; +MSCK TABLE repairtable_n0; dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=a/p2=a; dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=b/p2=a; @@ -13,24 +13,24 @@ dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=a/p2=a/datafile; dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=b/p2=a/datafile; dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=c/p2=a/datafile; -MSCK TABLE default.repairtable; -show partitions default.repairtable; +MSCK TABLE default.repairtable_n0; +show partitions default.repairtable_n0; -MSCK REPAIR TABLE default.repairtable; -show partitions default.repairtable; +MSCK REPAIR TABLE default.repairtable_n0; +show partitions default.repairtable_n0; -MSCK TABLE repairtable; -show partitions repairtable; +MSCK TABLE repairtable_n0; +show partitions repairtable_n0; -DROP TABLE default.repairtable; +DROP TABLE default.repairtable_n0; dfs ${system:test.dfs.mkdir} -p ${system:test.tmp.dir}/apps/hive/warehouse/test.db/repairtable/p1=c/p2=a/p3=b; -CREATE TABLE `repairtable`( `col` string) PARTITIONED BY ( `p1` string, `p2` string) location '${system:test.tmp.dir}/apps/hive/warehouse/test.db/repairtable/'; +CREATE TABLE `repairtable_n0`( `col` string) PARTITIONED BY ( `p1` string, `p2` string) location '${system:test.tmp.dir}/apps/hive/warehouse/test.db/repairtable/'; dfs -touchz ${system:test.tmp.dir}/apps/hive/warehouse/test.db/repairtable/p1=c/p2=a/p3=b/datafile; set hive.mv.files.thread=1; -MSCK TABLE repairtable; -show partitions repairtable; +MSCK TABLE repairtable_n0; +show partitions repairtable_n0; -DROP TABLE default.repairtable; +DROP TABLE default.repairtable_n0; diff --git a/ql/src/test/queries/clientpositive/msck_repair_drop.q b/ql/src/test/queries/clientpositive/msck_repair_drop.q index bc14d98172..9923fb50cb 100644 --- a/ql/src/test/queries/clientpositive/msck_repair_drop.q +++ b/ql/src/test/queries/clientpositive/msck_repair_drop.q @@ -1,9 +1,9 @@ set hive.mv.files.thread=0; -DROP TABLE IF EXISTS repairtable; +DROP TABLE IF EXISTS repairtable_n1; -CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING); +CREATE TABLE repairtable_n1(col STRING) PARTITIONED BY (p1 STRING, p2 STRING); --- repairtable will have partitions created with part keys p1=1, p1=2, p1=3, p1=4 and p1=5 +-- repairtable_n1 will have partitions created with part keys p1=1, p1=2, p1=3, p1=4 and p1=5 -- p1=2 will be used to test drop in 3 tests -- 1. each partition is dropped individually: set hive.msck.repair.batch.size=1; -- 2. partition are dropped in groups of 3: set hive.msck.repair.batch.size=3; @@ -37,20 +37,20 @@ dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=2/p2=29/p3=291/datafile; dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=2/p2=210/p3=2101; dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=2/p2=210/p3=2101/datafile; -MSCK TABLE default.repairtable; -MSCK REPAIR TABLE default.repairtable; +MSCK TABLE default.repairtable_n1; +MSCK REPAIR TABLE default.repairtable_n1; -- Now all 12 partitions are in -show partitions default.repairtable; +show partitions default.repairtable_n1; -- Remove all p1=2 partitions from file system dfs -rmr ${system:test.warehouse.dir}/repairtable/p1=2; -- test 1: each partition is dropped individually set hive.msck.repair.batch.size=1; -MSCK TABLE default.repairtable DROP PARTITIONS; -MSCK REPAIR TABLE default.repairtable DROP PARTITIONS; -show partitions default.repairtable; +MSCK TABLE default.repairtable_n1 DROP PARTITIONS; +MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS; +show partitions default.repairtable_n1; -- Recreate p1=2 partitions dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=2/p2=21/p3=211; @@ -74,20 +74,20 @@ dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=2/p2=29/p3=291/datafile; dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=2/p2=210/p3=2101; dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=2/p2=210/p3=2101/datafile; -MSCK TABLE default.repairtable; -MSCK REPAIR TABLE default.repairtable; +MSCK TABLE default.repairtable_n1; +MSCK REPAIR TABLE default.repairtable_n1; -- Now all 12 partitions are in -show partitions default.repairtable; +show partitions default.repairtable_n1; -- Remove all p1=2 partitions from file system dfs -rmr ${system:test.warehouse.dir}/repairtable/p1=2; -- test 2: partition are dropped in groups of 3 set hive.msck.repair.batch.size=3; -MSCK TABLE default.repairtable DROP PARTITIONS; -MSCK REPAIR TABLE default.repairtable DROP PARTITIONS; -show partitions default.repairtable; +MSCK TABLE default.repairtable_n1 DROP PARTITIONS; +MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS; +show partitions default.repairtable_n1; -- Recreate p1=2 partitions dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=2/p2=21/p3=211; @@ -111,20 +111,20 @@ dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=2/p2=29/p3=291/datafile; dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=2/p2=210/p3=2101; dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=2/p2=210/p3=2101/datafile; -MSCK TABLE default.repairtable; -MSCK REPAIR TABLE default.repairtable; +MSCK TABLE default.repairtable_n1; +MSCK REPAIR TABLE default.repairtable_n1; -- Now all 12 partitions are in -show partitions default.repairtable; +show partitions default.repairtable_n1; -- Remove all p1=2 partitions from file system dfs -rmr ${system:test.warehouse.dir}/repairtable/p1=2; -- test 3. all partitions are dropped in 1 shot set hive.msck.repair.batch.size=0; -MSCK TABLE default.repairtable DROP PARTITIONS; -MSCK REPAIR TABLE default.repairtable DROP PARTITIONS; -show partitions default.repairtable; +MSCK TABLE default.repairtable_n1 DROP PARTITIONS; +MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS; +show partitions default.repairtable_n1; -- test add parition keyword: begin dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=3/p2=31/p3=311; @@ -132,9 +132,9 @@ dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=3/p2=31/p3=311/datafile; dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=3/p2=32/p3=321; dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=3/p2=32/p3=321/datafile; -MSCK TABLE default.repairtable; -MSCK REPAIR TABLE default.repairtable; -show partitions default.repairtable; +MSCK TABLE default.repairtable_n1; +MSCK REPAIR TABLE default.repairtable_n1; +show partitions default.repairtable_n1; -- Create p1=4 in filesystem dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=4/p2=41/p3=411; @@ -148,9 +148,9 @@ dfs -rmr ${system:test.warehouse.dir}/repairtable/p1=3; -- Status: p1=3 dropped from filesystem, but exists in metastore -- p1=4 exists in filesystem but not in metastore -- test add partition: only brings in p1=4 and doesn't remove p1=3 -MSCK TABLE default.repairtable ADD PARTITIONS; -MSCK REPAIR TABLE default.repairtable ADD PARTITIONS; -show partitions default.repairtable; +MSCK TABLE default.repairtable_n1 ADD PARTITIONS; +MSCK REPAIR TABLE default.repairtable_n1 ADD PARTITIONS; +show partitions default.repairtable_n1; -- test add partition keyword: end -- test drop partition keyword: begin @@ -162,9 +162,9 @@ dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=5/p2=52/p3=521/datafile; -- Status: p1=3 removed from filesystem, but exists in metastore (as part of add test) -- p1=5 exists in filesystem but not in metastore -- test drop partition: only removes p1=3 from metastore but doesn't update metadata for p1=5 -MSCK TABLE default.repairtable DROP PARTITIONS; -MSCK REPAIR TABLE default.repairtable DROP PARTITIONS; -show partitions default.repairtable; +MSCK TABLE default.repairtable_n1 DROP PARTITIONS; +MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS; +show partitions default.repairtable_n1; -- test drop partition keyword: end -- test sync partition keyword: begin @@ -174,7 +174,7 @@ dfs -rmr ${system:test.warehouse.dir}/repairtable/p1=4; -- Status: p1=4 dropped from filesystem, but exists in metastore -- p1=5 exists in filesystem but not in metastore (as part of drop test) -- test sync partition: removes p1=4 from metastore and updates metadata for p1=5 -MSCK TABLE default.repairtable SYNC PARTITIONS; -MSCK REPAIR TABLE default.repairtable SYNC PARTITIONS; -show partitions default.repairtable; +MSCK TABLE default.repairtable_n1 SYNC PARTITIONS; +MSCK REPAIR TABLE default.repairtable_n1 SYNC PARTITIONS; +show partitions default.repairtable_n1; -- test sync partition keyword: end diff --git a/ql/src/test/queries/clientpositive/multiMapJoin2.q b/ql/src/test/queries/clientpositive/multiMapJoin2.q index efaadcb597..166cb09271 100644 --- a/ql/src/test/queries/clientpositive/multiMapJoin2.q +++ b/ql/src/test/queries/clientpositive/multiMapJoin2.q @@ -179,18 +179,18 @@ GROUP BY tmp1.key ORDER BY key, cnt; -- Check if we can correctly handle partitioned table. -CREATE TABLE part_table(key string, value string) PARTITIONED BY (partitionId int); -INSERT OVERWRITE TABLE part_table PARTITION (partitionId=1) +CREATE TABLE part_table_n0(key string, value string) PARTITIONED BY (partitionId int); +INSERT OVERWRITE TABLE part_table_n0 PARTITION (partitionId=1) SELECT key, value FROM src ORDER BY key, value LIMIT 100; -INSERT OVERWRITE TABLE part_table PARTITION (partitionId=2) +INSERT OVERWRITE TABLE part_table_n0 PARTITION (partitionId=2) SELECT key, value FROM src1 ORDER BY key, value; EXPLAIN SELECT count(*) -FROM part_table x JOIN src1 y ON (x.key = y.key); +FROM part_table_n0 x JOIN src1 y ON (x.key = y.key); SELECT count(*) -FROM part_table x JOIN src1 y ON (x.key = y.key); +FROM part_table_n0 x JOIN src1 y ON (x.key = y.key); set hive.auto.convert.join.noconditionaltask.size=10000000; set hive.optimize.correlation=false; diff --git a/ql/src/test/queries/clientpositive/multi_column_in.q b/ql/src/test/queries/clientpositive/multi_column_in.q index 288406c77c..44fb1d9519 100644 --- a/ql/src/test/queries/clientpositive/multi_column_in.q +++ b/ql/src/test/queries/clientpositive/multi_column_in.q @@ -1,72 +1,72 @@ set hive.mapred.mode=nonstrict; -drop table emps; +drop table emps_n1; -create table emps (empno int, deptno int, empname string); +create table emps_n1 (empno int, deptno int, empname string); -insert into table emps values (1,2,"11"),(1,2,"11"),(3,4,"33"),(1,3,"11"),(2,5,"22"),(2,5,"22"); +insert into table emps_n1 values (1,2,"11"),(1,2,"11"),(3,4,"33"),(1,3,"11"),(2,5,"22"),(2,5,"22"); -select * from emps; +select * from emps_n1; -select * from emps where (int(empno+deptno/2), int(deptno/3)) in ((2,0),(3,2)); +select * from emps_n1 where (int(empno+deptno/2), int(deptno/3)) in ((2,0),(3,2)); -select * from emps where (int(empno+deptno/2), int(deptno/3)) not in ((2,0),(3,2)); +select * from emps_n1 where (int(empno+deptno/2), int(deptno/3)) not in ((2,0),(3,2)); -select * from emps where (empno,deptno) in ((1,2),(3,2)); +select * from emps_n1 where (empno,deptno) in ((1,2),(3,2)); -select * from emps where (empno,deptno) not in ((1,2),(3,2)); +select * from emps_n1 where (empno,deptno) not in ((1,2),(3,2)); -select * from emps where (empno,deptno) in ((1,2),(1,3)); +select * from emps_n1 where (empno,deptno) in ((1,2),(1,3)); -select * from emps where (empno,deptno) not in ((1,2),(1,3)); +select * from emps_n1 where (empno,deptno) not in ((1,2),(1,3)); explain -select * from emps where (empno+1,deptno) in ((1,2),(3,2)); +select * from emps_n1 where (empno+1,deptno) in ((1,2),(3,2)); explain -select * from emps where (empno+1,deptno) not in ((1,2),(3,2)); +select * from emps_n1 where (empno+1,deptno) not in ((1,2),(3,2)); -select * from emps where empno in (1,2); +select * from emps_n1 where empno in (1,2); -select * from emps where empno in (1,2) and deptno > 2; +select * from emps_n1 where empno in (1,2) and deptno > 2; -select * from emps where (empno) in (1,2) and deptno > 2; +select * from emps_n1 where (empno) in (1,2) and deptno > 2; -select * from emps where ((empno) in (1,2) and deptno > 2); +select * from emps_n1 where ((empno) in (1,2) and deptno > 2); -explain select * from emps where ((empno*2)|1,deptno) in ((empno+1,2),(empno+2,2)); +explain select * from emps_n1 where ((empno*2)|1,deptno) in ((empno+1,2),(empno+2,2)); -select * from emps where ((empno*2)|1,deptno) in ((empno+1,2),(empno+2,2)); +select * from emps_n1 where ((empno*2)|1,deptno) in ((empno+1,2),(empno+2,2)); -select (empno*2)|1,substr(empname,1,1) from emps; +select (empno*2)|1,substr(empname,1,1) from emps_n1; -select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+2,'2')); +select * from emps_n1 where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+2,'2')); -select * from emps where ((empno*2)|1,substr(empname,1,1)) not in ((empno+1,'2'),(empno+2,'2')); +select * from emps_n1 where ((empno*2)|1,substr(empname,1,1)) not in ((empno+1,'2'),(empno+2,'2')); -select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2')); +select * from emps_n1 where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2')); -select * from emps where ((empno*2)|1,substr(empname,1,1)) not in ((empno+1,'2'),(empno+3,'2')); +select * from emps_n1 where ((empno*2)|1,substr(empname,1,1)) not in ((empno+1,'2'),(empno+3,'2')); -select sum(empno), empname from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2')) +select sum(empno), empname from emps_n1 where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2')) group by empname; -select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2')) +select * from emps_n1 where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2')) union -select * from emps where (empno,deptno) in ((1,2),(3,2)); +select * from emps_n1 where (empno,deptno) in ((1,2),(3,2)); -drop view v; +drop view v_n2; -create view v as +create view v_n2 as select * from( -select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2')) +select * from emps_n1 where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2')) union -select * from emps where (empno,deptno) in ((1,2),(3,2)))subq order by empno desc; +select * from emps_n1 where (empno,deptno) in ((1,2),(3,2)))subq order by empno desc; -select * from v; +select * from v_n2; select subq.e1 from -(select (empno*2)|1 as e1, substr(empname,1,1) as n1 from emps)subq +(select (empno*2)|1 as e1, substr(empname,1,1) as n1 from emps_n1)subq join -(select empno as e2 from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2')))subq2 +(select empno as e2 from emps_n1 where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2')))subq2 on e1=e2+1; diff --git a/ql/src/test/queries/clientpositive/multi_column_in_single.q b/ql/src/test/queries/clientpositive/multi_column_in_single.q index 7151fc4792..32a2167728 100644 --- a/ql/src/test/queries/clientpositive/multi_column_in_single.q +++ b/ql/src/test/queries/clientpositive/multi_column_in_single.q @@ -3,66 +3,66 @@ set hive.mapred.mode=nonstrict; select * from src where (key, value) in (('238','val_238')); -drop table emps; +drop table emps_n7; -create table emps (empno int, deptno int, empname string); +create table emps_n7 (empno int, deptno int, empname string); -insert into table emps values (1,2,"11"),(1,2,"11"),(3,4,"33"),(1,3,"11"),(2,5,"22"),(2,5,"22"); +insert into table emps_n7 values (1,2,"11"),(1,2,"11"),(3,4,"33"),(1,3,"11"),(2,5,"22"),(2,5,"22"); -select * from emps; +select * from emps_n7; -select * from emps where (int(empno+deptno/2), int(deptno/3)) in ((3,2)); +select * from emps_n7 where (int(empno+deptno/2), int(deptno/3)) in ((3,2)); -select * from emps where (int(empno+deptno/2), int(deptno/3)) not in ((3,2)); +select * from emps_n7 where (int(empno+deptno/2), int(deptno/3)) not in ((3,2)); -select * from emps where (empno,deptno) in ((3,2)); +select * from emps_n7 where (empno,deptno) in ((3,2)); -select * from emps where (empno,deptno) not in ((3,2)); +select * from emps_n7 where (empno,deptno) not in ((3,2)); -select * from emps where (empno,deptno) in ((1,3)); +select * from emps_n7 where (empno,deptno) in ((1,3)); -select * from emps where (empno,deptno) not in ((1,3)); +select * from emps_n7 where (empno,deptno) not in ((1,3)); explain -select * from emps where (empno+1,deptno) in ((3,2)); +select * from emps_n7 where (empno+1,deptno) in ((3,2)); explain -select * from emps where (empno+1,deptno) not in ((3,2)); +select * from emps_n7 where (empno+1,deptno) not in ((3,2)); -explain select * from emps where ((empno*2)|1,deptno) in ((empno+2,2)); +explain select * from emps_n7 where ((empno*2)|1,deptno) in ((empno+2,2)); -select * from emps where ((empno*2)|1,deptno) in ((empno+2,2)); +select * from emps_n7 where ((empno*2)|1,deptno) in ((empno+2,2)); -select (empno*2)|1,substr(empname,1,1) from emps; +select (empno*2)|1,substr(empname,1,1) from emps_n7; -select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+2,'2')); +select * from emps_n7 where ((empno*2)|1,substr(empname,1,1)) in ((empno+2,'2')); -select * from emps where ((empno*2)|1,substr(empname,1,1)) not in ((empno+2,'2')); +select * from emps_n7 where ((empno*2)|1,substr(empname,1,1)) not in ((empno+2,'2')); -select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2')); +select * from emps_n7 where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2')); -select * from emps where ((empno*2)|1,substr(empname,1,1)) not in ((empno+3,'2')); +select * from emps_n7 where ((empno*2)|1,substr(empname,1,1)) not in ((empno+3,'2')); -select sum(empno), empname from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2')) +select sum(empno), empname from emps_n7 where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2')) group by empname; -select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2')) +select * from emps_n7 where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2')) union -select * from emps where (empno,deptno) in ((3,2)); +select * from emps_n7 where (empno,deptno) in ((3,2)); -drop view v; +drop view v_n11; -create view v as +create view v_n11 as select * from( -select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2')) +select * from emps_n7 where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2')) union -select * from emps where (empno,deptno) in ((3,2)))subq order by empno desc; +select * from emps_n7 where (empno,deptno) in ((3,2)))subq order by empno desc; -select * from v; +select * from v_n11; select subq.e1 from -(select (empno*2)|1 as e1, substr(empname,1,1) as n1 from emps)subq +(select (empno*2)|1 as e1, substr(empname,1,1) as n1 from emps_n7)subq join -(select empno as e2 from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2')))subq2 +(select empno as e2 from emps_n7 where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2')))subq2 on e1=e2+1; diff --git a/ql/src/test/queries/clientpositive/multi_count_distinct.q b/ql/src/test/queries/clientpositive/multi_count_distinct.q index 038be3ec9a..6a9cbc90f8 100644 --- a/ql/src/test/queries/clientpositive/multi_count_distinct.q +++ b/ql/src/test/queries/clientpositive/multi_count_distinct.q @@ -1,36 +1,36 @@ SET hive.vectorized.execution.enabled=false; set hive.mapred.mode=nonstrict; -drop table employee; +drop table employee_n1; -create table employee (department_id int, gender varchar(10), education_level int); +create table employee_n1 (department_id int, gender varchar(10), education_level int); -insert into employee values (1, 'M', 1),(1, 'M', 1),(2, 'F', 1),(1, 'F', 3),(1, 'M', 2),(4, 'M', 1),(2, 'F', 1),(2, 'F', 3),(3, 'M', 2); +insert into employee_n1 values (1, 'M', 1),(1, 'M', 1),(2, 'F', 1),(1, 'F', 3),(1, 'M', 2),(4, 'M', 1),(2, 'F', 1),(2, 'F', 3),(3, 'M', 2); -explain select count(distinct department_id), count(distinct gender), count(distinct education_level) from employee; +explain select count(distinct department_id), count(distinct gender), count(distinct education_level) from employee_n1; -select count(distinct department_id), count(distinct gender), count(distinct education_level) from employee; +select count(distinct department_id), count(distinct gender), count(distinct education_level) from employee_n1; -select count(distinct department_id), count(distinct gender), count(distinct education_level), count(distinct education_level) from employee; +select count(distinct department_id), count(distinct gender), count(distinct education_level), count(distinct education_level) from employee_n1; select count(distinct department_id), count(distinct gender), count(distinct education_level), -count(distinct education_level, department_id) from employee; +count(distinct education_level, department_id) from employee_n1; select count(distinct gender), count(distinct department_id), count(distinct gender), count(distinct education_level), -count(distinct education_level, department_id), count(distinct department_id, education_level) from employee; +count(distinct education_level, department_id), count(distinct department_id, education_level) from employee_n1; explain select count(distinct gender), count(distinct department_id), count(distinct gender), count(distinct education_level), -count(distinct education_level, department_id), count(distinct department_id, education_level), count(distinct department_id, education_level, gender) from employee; +count(distinct education_level, department_id), count(distinct department_id, education_level), count(distinct department_id, education_level, gender) from employee_n1; select count(distinct gender), count(distinct department_id), count(distinct gender), count(distinct education_level), -count(distinct education_level, department_id), count(distinct department_id, education_level), count(distinct department_id, education_level, gender) from employee; +count(distinct education_level, department_id), count(distinct department_id, education_level), count(distinct department_id, education_level, gender) from employee_n1; select count(case i when 3 then 1 else null end) as c0, count(case i when 5 then 1 else null end) as c1, count(case i when 6 then 1 else null end) as c2 from (select grouping__id as i, department_id, gender, -education_level from employee group by department_id, gender, education_level grouping sets +education_level from employee_n1 group by department_id, gender, education_level grouping sets (department_id, gender, education_level))subq; -select grouping__id as i, department_id, gender, education_level from employee +select grouping__id as i, department_id, gender, education_level from employee_n1 group by department_id, gender, education_level grouping sets (department_id, gender, education_level, (education_level, department_id)); diff --git a/ql/src/test/queries/clientpositive/multi_insert.q b/ql/src/test/queries/clientpositive/multi_insert.q index 9e33a84361..634bdb49f2 100644 --- a/ql/src/test/queries/clientpositive/multi_insert.q +++ b/ql/src/test/queries/clientpositive/multi_insert.q @@ -1,23 +1,23 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -create table src_multi1 like src; -create table src_multi2 like src; +create table src_multi1_n5 like src; +create table src_multi2_n6 like src; set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; set hive.stats.dbclass=fs; explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n5; +select * from src_multi2_n6; set hive.merge.mapfiles=true; @@ -25,45 +25,45 @@ set hive.merge.mapredfiles=false; explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n5; +select * from src_multi2_n6; set hive.merge.mapfiles=false; set hive.merge.mapredfiles=true; explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n5; +select * from src_multi2_n6; set hive.merge.mapfiles=true; set hive.merge.mapredfiles=true; explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n5; +select * from src_multi2_n6; @@ -72,15 +72,15 @@ set hive.merge.mapredfiles=false; explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n5 select * where key < 10 group by key, value +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20 group by key, value; from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n5 select * where key < 10 group by key, value +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20 group by key, value; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n5; +select * from src_multi2_n6; set hive.merge.mapfiles=false; @@ -88,30 +88,30 @@ set hive.merge.mapredfiles=true; explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n5 select * where key < 10 group by key, value +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20 group by key, value; from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n5 select * where key < 10 group by key, value +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20 group by key, value; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n5; +select * from src_multi2_n6; set hive.merge.mapfiles=true; set hive.merge.mapredfiles=false; explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n5 select * where key < 10 group by key, value +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20 group by key, value; from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n5 select * where key < 10 group by key, value +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20 group by key, value; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n5; +select * from src_multi2_n6; set hive.merge.mapfiles=true; @@ -119,15 +119,15 @@ set hive.merge.mapredfiles=true; explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n5 select * where key < 10 group by key, value +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20 group by key, value; from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n5 select * where key < 10 group by key, value +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20 group by key, value; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n5; +select * from src_multi2_n6; @@ -137,60 +137,60 @@ set hive.merge.mapredfiles=false; explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n5; +select * from src_multi2_n6; set hive.merge.mapfiles=true; set hive.merge.mapredfiles=false; explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n5; +select * from src_multi2_n6; set hive.merge.mapfiles=false; set hive.merge.mapredfiles=true; explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n5; +select * from src_multi2_n6; set hive.merge.mapfiles=true; set hive.merge.mapredfiles=true; explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n5 select * where key < 10 +insert overwrite table src_multi2_n6 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n5; +select * from src_multi2_n6; diff --git a/ql/src/test/queries/clientpositive/multi_insert_gby.q b/ql/src/test/queries/clientpositive/multi_insert_gby.q index 7c9e8dee9c..0ec2b35151 100644 --- a/ql/src/test/queries/clientpositive/multi_insert_gby.q +++ b/ql/src/test/queries/clientpositive/multi_insert_gby.q @@ -2,35 +2,35 @@ -- SORT_QUERY_RESULTS --HIVE-3699 Multiple insert overwrite into multiple tables query stores same results in all tables -create table e1 (key string, count int); -create table e2 (key string, count int); +create table e1_n0 (key string, count int); +create table e2_n1 (key string, count int); explain FROM src -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n0 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n1 SELECT key, COUNT(*) WHERE key>500 GROUP BY key; FROM src -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n0 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n1 SELECT key, COUNT(*) WHERE key>500 GROUP BY key; -select * from e1; -select * from e2; +select * from e1_n0; +select * from e2_n1; explain FROM src -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n0 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n1 SELECT key, COUNT(*) GROUP BY key; FROM src -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n0 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n1 SELECT key, COUNT(*) GROUP BY key; -select * from e1; -select * from e2; +select * from e1_n0; +select * from e2_n1; diff --git a/ql/src/test/queries/clientpositive/multi_insert_gby2.q b/ql/src/test/queries/clientpositive/multi_insert_gby2.q index a477f839c8..67077ba043 100644 --- a/ql/src/test/queries/clientpositive/multi_insert_gby2.q +++ b/ql/src/test/queries/clientpositive/multi_insert_gby2.q @@ -2,20 +2,20 @@ set hive.mapred.mode=nonstrict; --HIVE-3699 Multiple insert overwrite into multiple tables query stores same results in all tables create table e1 (count int); -create table e2 (percentile double); +create table e2_n0 (percentile double); set hive.stats.dbclass=fs; explain FROM (select key, cast(key as double) as value from src order by key) a INSERT OVERWRITE TABLE e1 SELECT COUNT(*) -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n0 SELECT percentile_approx(value, 0.5); FROM (select key, cast(key as double) as value from src order by key) a INSERT OVERWRITE TABLE e1 SELECT COUNT(*) -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n0 SELECT percentile_approx(value, 0.5); select * from e1; -select * from e2; +select * from e2_n0; diff --git a/ql/src/test/queries/clientpositive/multi_insert_gby3.q b/ql/src/test/queries/clientpositive/multi_insert_gby3.q index 6bed476fff..de60fa7c59 100644 --- a/ql/src/test/queries/clientpositive/multi_insert_gby3.q +++ b/ql/src/test/queries/clientpositive/multi_insert_gby3.q @@ -1,45 +1,45 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -create table e1 (key string, keyD double); -create table e2 (key string, keyD double, value string); +create table e1_n2 (key string, keyD double); +create table e2_n3 (key string, keyD double, value string); create table e3 (key string, keyD double); set hive.stats.dbclass=fs; explain FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value; explain FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key; FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value; -select * from e1; -select * from e2; +select * from e1_n2; +select * from e2_n3; FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key; -select * from e1; -select * from e2; +select * from e1_n2; +select * from e2_n3; explain from src -insert overwrite table e1 +insert overwrite table e1_n2 select key, count(distinct value) group by key insert overwrite table e3 select value, count(distinct key) group by value; @@ -47,9 +47,9 @@ select value, count(distinct key) group by value; explain FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value INSERT overwrite table e3 SELECT key, COUNT(distinct keyD) group by key, keyD, value; diff --git a/ql/src/test/queries/clientpositive/multi_insert_gby4.q b/ql/src/test/queries/clientpositive/multi_insert_gby4.q index d4858137d8..b89d6743a7 100644 --- a/ql/src/test/queries/clientpositive/multi_insert_gby4.q +++ b/ql/src/test/queries/clientpositive/multi_insert_gby4.q @@ -1,27 +1,27 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -create table e1 (key string, count int); -create table e2 (key string, count int); -create table e3 (key string, count int); +create table e1_n4 (key string, count int); +create table e2_n5 (key string, count int); +create table e3_n0 (key string, count int); explain FROM (SELECT key, value FROM src) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n4 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n5 SELECT key, COUNT(*) WHERE key>500 GROUP BY key -INSERT OVERWRITE TABLE e3 +INSERT OVERWRITE TABLE e3_n0 SELECT key, COUNT(*) WHERE key>490 GROUP BY key; FROM (SELECT key, value FROM src) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n4 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n5 SELECT key, COUNT(*) WHERE key>500 GROUP BY key -INSERT OVERWRITE TABLE e3 +INSERT OVERWRITE TABLE e3_n0 SELECT key, COUNT(*) WHERE key>490 GROUP BY key; -select * from e1; -select * from e2; -select * from e3; +select * from e1_n4; +select * from e2_n5; +select * from e3_n0; diff --git a/ql/src/test/queries/clientpositive/multi_insert_lateral_view.q b/ql/src/test/queries/clientpositive/multi_insert_lateral_view.q index 481bdf2095..3442e8246b 100644 --- a/ql/src/test/queries/clientpositive/multi_insert_lateral_view.q +++ b/ql/src/test/queries/clientpositive/multi_insert_lateral_view.q @@ -2,7 +2,7 @@ set hive.stats.dbclass=fs; -- SORT_QUERY_RESULTS -create table src_10 as select * from src limit 10; +create table src_10_n0 as select * from src limit 10; create table src_lv1 (key string, value string); create table src_lv2 (key string, value string); @@ -14,11 +14,11 @@ create table src_lv3 (key string, value string); -- -LVF[6]-SEL[7]-LVJ[10]-SEL[13]-FS[14] -- -SEL[8]-UDTF[9]-LVJ[10] explain -from src_10 +from src_10_n0 insert overwrite table src_lv1 select key, C lateral view explode(array(key+1, key+2)) A as C insert overwrite table src_lv2 select key, C lateral view explode(array(key+3, key+4)) A as C; -from src_10 +from src_10_n0 insert overwrite table src_lv1 select key, C lateral view explode(array(key+1, key+2)) A as C insert overwrite table src_lv2 select key, C lateral view explode(array(key+3, key+4)) A as C; @@ -31,11 +31,11 @@ select * from src_lv2; -- -LVF[6]-SEL[7]-LVJ[10]-SEL[17]-GBY[18]-RS[19]-GBY[20]-SEL[21]-FS[22] -- -SEL[8]-UDTF[9]-LVJ[10] explain -from src_10 +from src_10_n0 insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key insert overwrite table src_lv2 select key, sum(C) lateral view explode(array(key+3, key+4)) A as C group by key; -from src_10 +from src_10_n0 insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key insert overwrite table src_lv2 select key, sum(C) lateral view explode(array(key+3, key+4)) A as C group by key; @@ -48,12 +48,12 @@ select * from src_lv2; -- -FIL[12]-SEL[13]-RS[14]-FOR[15]-FIL[16]-GBY[17]-SEL[18]-FS[19] -- -FIL[20]-GBY[21]-SEL[22]-FS[23] explain -from src_10 +from src_10_n0 insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key insert overwrite table src_lv2 select key, count(value) where key > 200 group by key insert overwrite table src_lv3 select key, count(value) where key < 200 group by key; -from src_10 +from src_10_n0 insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key insert overwrite table src_lv2 select key, count(value) where key > 200 group by key insert overwrite table src_lv3 select key, count(value) where key < 200 group by key; @@ -70,12 +70,12 @@ select * from src_lv3; -- -SEL[8]-UDTF[9]-LVJ[10] -- -SEL[23]-GBY[24]-RS[25]-GBY[26]-SEL[27]-FS[28] explain -from src_10 +from src_10_n0 insert overwrite table src_lv1 select C, sum(distinct key) lateral view explode(array(key+1, key+2)) A as C group by C insert overwrite table src_lv2 select C, sum(distinct key) lateral view explode(array(key+3, key+4)) A as C group by C insert overwrite table src_lv3 select value, sum(distinct key) group by value; -from src_10 +from src_10_n0 insert overwrite table src_lv1 select C, sum(distinct key) lateral view explode(array(key+1, key+2)) A as C group by C insert overwrite table src_lv2 select C, sum(distinct key) lateral view explode(array(key+3, key+4)) A as C group by C insert overwrite table src_lv3 select value, sum(distinct key) group by value; @@ -88,13 +88,13 @@ create table src_lv4 (key string, value string); -- Common distincts optimization works across non-lateral view queries, but not across lateral view multi inserts explain -from src_10 +from src_10_n0 insert overwrite table src_lv1 select key, sum(distinct C) lateral view explode(array(key+1, key+2)) A as C group by key insert overwrite table src_lv2 select key, sum(distinct C) lateral view explode(array(key+3, key+4)) A as C group by key insert overwrite table src_lv3 select value, sum(distinct key) where key > 200 group by value insert overwrite table src_lv4 select value, sum(distinct key) where key < 200 group by value; -from src_10 +from src_10_n0 insert overwrite table src_lv1 select key, sum(distinct C) lateral view explode(array(key+1, key+2)) A as C group by key insert overwrite table src_lv2 select key, sum(distinct C) lateral view explode(array(key+3, key+4)) A as C group by key insert overwrite table src_lv3 select value, sum(distinct key) where key > 200 group by value diff --git a/ql/src/test/queries/clientpositive/multi_insert_mixed.q b/ql/src/test/queries/clientpositive/multi_insert_mixed.q index a5c1e8738c..9c8fb2ec0d 100644 --- a/ql/src/test/queries/clientpositive/multi_insert_mixed.q +++ b/ql/src/test/queries/clientpositive/multi_insert_mixed.q @@ -1,7 +1,7 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -create table src_multi1 like src; -create table src_multi2 like src; +create table src_multi1_n2 like src; +create table src_multi2_n3 like src; create table src_multi3 like src; set hive.stats.dbclass=fs; -- Testing the case where a map work contains both shuffling (ReduceSinkOperator) @@ -9,15 +9,15 @@ set hive.stats.dbclass=fs; explain from src -insert overwrite table src_multi1 select key, count(1) group by key order by key -insert overwrite table src_multi2 select value, count(1) group by value order by value +insert overwrite table src_multi1_n2 select key, count(1) group by key order by key +insert overwrite table src_multi2_n3 select value, count(1) group by value order by value insert overwrite table src_multi3 select * where key < 10; from src -insert overwrite table src_multi1 select key, count(1) group by key order by key -insert overwrite table src_multi2 select value, count(1) group by value order by value +insert overwrite table src_multi1_n2 select key, count(1) group by key order by key +insert overwrite table src_multi2_n3 select value, count(1) group by value order by value insert overwrite table src_multi3 select * where key < 10; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n2; +select * from src_multi2_n3; select * from src_multi3; diff --git a/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q b/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q index 670211c6aa..94211717b0 100644 --- a/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q +++ b/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q @@ -3,23 +3,23 @@ set hive.multi.insert.move.tasks.share.dependencies=true; set hive.stats.dbclass=fs; -- SORT_QUERY_RESULTS -create table src_multi1 like src; -create table src_multi2 like src; +create table src_multi1_n4 like src; +create table src_multi2_n5 like src; set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; set hive.merge.mapfiles=true; @@ -27,45 +27,45 @@ set hive.merge.mapredfiles=false; explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; set hive.merge.mapfiles=false; set hive.merge.mapredfiles=true; explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; set hive.merge.mapfiles=true; set hive.merge.mapredfiles=true; explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; @@ -74,15 +74,15 @@ set hive.merge.mapredfiles=false; explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value; from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; set hive.merge.mapfiles=false; @@ -90,30 +90,30 @@ set hive.merge.mapredfiles=true; explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value; from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; set hive.merge.mapfiles=true; set hive.merge.mapredfiles=false; explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value; from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; set hive.merge.mapfiles=true; @@ -121,15 +121,15 @@ set hive.merge.mapredfiles=true; explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value; from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value; +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; @@ -139,60 +139,60 @@ set hive.merge.mapredfiles=false; explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; set hive.merge.mapfiles=true; set hive.merge.mapredfiles=false; explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; set hive.merge.mapfiles=false; set hive.merge.mapredfiles=true; explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; set hive.merge.mapfiles=true; set hive.merge.mapredfiles=true; explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20; +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; @@ -333,19 +333,19 @@ set hive.merge.mapredfiles=false; explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value; from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; dfs -ls ${system:test.tmp.dir}/hive_test/multiins_local; dfs -rmr ${system:test.tmp.dir}/hive_test/multiins_local; @@ -355,19 +355,19 @@ set hive.merge.mapredfiles=true; explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value; from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; dfs -ls ${system:test.tmp.dir}/hive_test/multiins_local; dfs -rmr ${system:test.tmp.dir}/hive_test/multiins_local; @@ -377,19 +377,19 @@ set hive.merge.mapredfiles=false; explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value; from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; dfs -ls ${system:test.tmp.dir}/hive_test/multiins_local; dfs -rmr ${system:test.tmp.dir}/hive_test/multiins_local; @@ -399,19 +399,19 @@ set hive.merge.mapredfiles=true; explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value; from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n4; +select * from src_multi2_n5; dfs -ls ${system:test.tmp.dir}/hive_test/multiins_local; dfs -rmr ${system:test.tmp.dir}/hive_test/multiins_local; diff --git a/ql/src/test/queries/clientpositive/multi_insert_partitioned.q b/ql/src/test/queries/clientpositive/multi_insert_partitioned.q index 0d78c72a61..d818613dfc 100644 --- a/ql/src/test/queries/clientpositive/multi_insert_partitioned.q +++ b/ql/src/test/queries/clientpositive/multi_insert_partitioned.q @@ -4,46 +4,46 @@ set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.exec.dynamic.partition.mode=nonstrict; -drop table intermediate; +drop table intermediate_n3; -create table intermediate(key int) partitioned by (p int) stored as orc; -insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; -insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; -insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; +create table intermediate_n3(key int) partitioned by (p int) stored as orc; +insert into table intermediate_n3 partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; +insert into table intermediate_n3 partition(p='456') select distinct key from src where key is not null order by key asc limit 2; +insert into table intermediate_n3 partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; drop table multi_partitioned; create table multi_partitioned (key int, key2 int) partitioned by (p int); -from intermediate +from intermediate_n3 insert into table multi_partitioned partition(p=1) select p, key insert into table multi_partitioned partition(p=2) select key, p; select * from multi_partitioned order by key, key2, p; desc formatted multi_partitioned; -from intermediate +from intermediate_n3 insert overwrite table multi_partitioned partition(p=2) select p, key insert overwrite table multi_partitioned partition(p=1) select key, p; select * from multi_partitioned order by key, key2, p; desc formatted multi_partitioned; -from intermediate +from intermediate_n3 insert into table multi_partitioned partition(p=2) select p, key insert overwrite table multi_partitioned partition(p=1) select key, p; select * from multi_partitioned order by key, key2, p; desc formatted multi_partitioned; -from intermediate +from intermediate_n3 insert into table multi_partitioned partition(p) select p, key, p insert into table multi_partitioned partition(p=1) select key, p; select key, key2, p from multi_partitioned order by key, key2, p; desc formatted multi_partitioned; -from intermediate +from intermediate_n3 insert into table multi_partitioned partition(p) select p, key, 1 insert into table multi_partitioned partition(p=1) select key, p; @@ -52,6 +52,6 @@ desc formatted multi_partitioned; drop table multi_partitioned; -drop table intermediate; +drop table intermediate_n3; diff --git a/ql/src/test/queries/clientpositive/multi_insert_union_src.q b/ql/src/test/queries/clientpositive/multi_insert_union_src.q index ae4c8d4b60..4776e7ea92 100644 --- a/ql/src/test/queries/clientpositive/multi_insert_union_src.q +++ b/ql/src/test/queries/clientpositive/multi_insert_union_src.q @@ -1,23 +1,23 @@ --! qt:dataset:src1 --! qt:dataset:src set hive.mapred.mode=nonstrict; -drop table if exists src2; -drop table if exists src_multi1; -drop table if exists src_multi2; +drop table if exists src2_n4; +drop table if exists src_multi1_n3; +drop table if exists src_multi2_n4; set hive.stats.dbclass=fs; -CREATE TABLE src2 as SELECT * FROM src; +CREATE TABLE src2_n4 as SELECT * FROM src; -create table src_multi1 like src; -create table src_multi2 like src; +create table src_multi1_n3 like src; +create table src_multi2_n4 like src; explain -from (select * from src1 where key < 10 union all select * from src2 where key > 100) s -insert overwrite table src_multi1 select key, value where key < 150 order by key -insert overwrite table src_multi2 select key, value where key > 400 order by value; +from (select * from src1 where key < 10 union all select * from src2_n4 where key > 100) s +insert overwrite table src_multi1_n3 select key, value where key < 150 order by key +insert overwrite table src_multi2_n4 select key, value where key > 400 order by value; -from (select * from src1 where key < 10 union all select * from src2 where key > 100) s -insert overwrite table src_multi1 select key, value where key < 150 order by key -insert overwrite table src_multi2 select key, value where key > 400 order by value; +from (select * from src1 where key < 10 union all select * from src2_n4 where key > 100) s +insert overwrite table src_multi1_n3 select key, value where key < 150 order by key +insert overwrite table src_multi2_n4 select key, value where key > 400 order by value; -select * from src_multi1; -select * from src_multi2; +select * from src_multi1_n3; +select * from src_multi2_n4; diff --git a/ql/src/test/queries/clientpositive/multigroupby_singlemr.q b/ql/src/test/queries/clientpositive/multigroupby_singlemr.q index a4e86ade47..a5f6fea093 100644 --- a/ql/src/test/queries/clientpositive/multigroupby_singlemr.q +++ b/ql/src/test/queries/clientpositive/multigroupby_singlemr.q @@ -1,33 +1,33 @@ -CREATE TABLE TBL(C1 INT, C2 INT, C3 INT, C4 INT); +CREATE TABLE TBL_n0(C1 INT, C2 INT, C3 INT, C4 INT); -CREATE TABLE DEST1(d1 INT, d2 INT) STORED AS TEXTFILE; -CREATE TABLE DEST2(d1 INT, d2 INT, d3 INT) STORED AS TEXTFILE; -CREATE TABLE DEST3(d1 INT, d2 INT, d3 INT, d4 INT) STORED AS TEXTFILE; +CREATE TABLE DEST1_n116(d1 INT, d2 INT) STORED AS TEXTFILE; +CREATE TABLE DEST2_n30(d1 INT, d2 INT, d3 INT) STORED AS TEXTFILE; +CREATE TABLE DEST3_n4(d1 INT, d2 INT, d3 INT, d4 INT) STORED AS TEXTFILE; CREATE TABLE DEST4(d1 INT, d2 INT, d3 INT, d4 INT) STORED AS TEXTFILE; EXPLAIN -FROM TBL -INSERT OVERWRITE TABLE DEST1 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1 -INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2; +FROM TBL_n0 +INSERT OVERWRITE TABLE DEST1_n116 SELECT TBL_n0.C1, COUNT(TBL_n0.C2) GROUP BY TBL_n0.C1 +INSERT OVERWRITE TABLE DEST2_n30 SELECT TBL_n0.C1, TBL_n0.C2, COUNT(TBL_n0.C3) GROUP BY TBL_n0.C1, TBL_n0.C2; EXPLAIN -FROM TBL -INSERT OVERWRITE TABLE DEST1 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1 -INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C2, TBL.C1; +FROM TBL_n0 +INSERT OVERWRITE TABLE DEST1_n116 SELECT TBL_n0.C1, COUNT(TBL_n0.C2) GROUP BY TBL_n0.C1 +INSERT OVERWRITE TABLE DEST2_n30 SELECT TBL_n0.C1, TBL_n0.C2, COUNT(TBL_n0.C3) GROUP BY TBL_n0.C2, TBL_n0.C1; EXPLAIN -FROM TBL -INSERT OVERWRITE TABLE DEST3 SELECT TBL.C1, TBL.C2, TBL.C3, COUNT(TBL.C4) GROUP BY TBL.C1, TBL.C2, TBL.C3 -INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2; +FROM TBL_n0 +INSERT OVERWRITE TABLE DEST3_n4 SELECT TBL_n0.C1, TBL_n0.C2, TBL_n0.C3, COUNT(TBL_n0.C4) GROUP BY TBL_n0.C1, TBL_n0.C2, TBL_n0.C3 +INSERT OVERWRITE TABLE DEST2_n30 SELECT TBL_n0.C1, TBL_n0.C2, COUNT(TBL_n0.C3) GROUP BY TBL_n0.C1, TBL_n0.C2; EXPLAIN -FROM TBL -INSERT OVERWRITE TABLE DEST3 SELECT TBL.C1, TBL.C2, TBL.C3, COUNT(TBL.C4) GROUP BY TBL.C1, TBL.C2, TBL.C3 -INSERT OVERWRITE TABLE DEST4 SELECT TBL.C1, TBL.C2, TBL.C3, COUNT(TBL.C4) GROUP BY TBL.C1, TBL.C3, TBL.C2; +FROM TBL_n0 +INSERT OVERWRITE TABLE DEST3_n4 SELECT TBL_n0.C1, TBL_n0.C2, TBL_n0.C3, COUNT(TBL_n0.C4) GROUP BY TBL_n0.C1, TBL_n0.C2, TBL_n0.C3 +INSERT OVERWRITE TABLE DEST4 SELECT TBL_n0.C1, TBL_n0.C2, TBL_n0.C3, COUNT(TBL_n0.C4) GROUP BY TBL_n0.C1, TBL_n0.C3, TBL_n0.C2; EXPLAIN -FROM TBL -INSERT OVERWRITE TABLE DEST3 SELECT TBL.C1, TBL.C2, TBL.C3, COUNT(TBL.C4) GROUP BY TBL.C1, TBL.C2, TBL.C3 -INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2 -INSERT OVERWRITE TABLE DEST1 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1; +FROM TBL_n0 +INSERT OVERWRITE TABLE DEST3_n4 SELECT TBL_n0.C1, TBL_n0.C2, TBL_n0.C3, COUNT(TBL_n0.C4) GROUP BY TBL_n0.C1, TBL_n0.C2, TBL_n0.C3 +INSERT OVERWRITE TABLE DEST2_n30 SELECT TBL_n0.C1, TBL_n0.C2, COUNT(TBL_n0.C3) GROUP BY TBL_n0.C1, TBL_n0.C2 +INSERT OVERWRITE TABLE DEST1_n116 SELECT TBL_n0.C1, COUNT(TBL_n0.C2) GROUP BY TBL_n0.C1; diff --git a/ql/src/test/queries/clientpositive/named_column_join.q b/ql/src/test/queries/clientpositive/named_column_join.q index 6de9325fba..7a0cc58ca5 100644 --- a/ql/src/test/queries/clientpositive/named_column_join.q +++ b/ql/src/test/queries/clientpositive/named_column_join.q @@ -1,52 +1,52 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -create table t (a int, b int); +create table t_n8 (a int, b int); -insert into t values (1,2),(2,1),(3,4),(4,3),(3,3),(null,null),(null,1),(2,null); +insert into t_n8 values (1,2),(2,1),(3,4),(4,3),(3,3),(null,null),(null,1),(2,null); -explain select * from t t1 join t t2 using (a); +explain select * from t_n8 t1 join t_n8 t2 using (a); -select * from t t1 join t t2 using (a); +select * from t_n8 t1 join t_n8 t2 using (a); -select * from t t1 join t t2 using (a,b); +select * from t_n8 t1 join t_n8 t2 using (a,b); -select t1.a,t2.b,t1.b,t2.a from t t1 join t t2 using (a); +select t1.a,t2.b,t1.b,t2.a from t_n8 t1 join t_n8 t2 using (a); -select * from t t1 left outer join t t2 using (a,b); +select * from t_n8 t1 left outer join t_n8 t2 using (a,b); -select t1.a,t1.b from t t1 right outer join t t2 on (t1.a=t2.a and t1.b=t2.b); +select t1.a,t1.b from t_n8 t1 right outer join t_n8 t2 on (t1.a=t2.a and t1.b=t2.b); -select * from t t1 right outer join t t2 using (a,b); +select * from t_n8 t1 right outer join t_n8 t2 using (a,b); -select * from t t1 inner join t t2 using (a,b); +select * from t_n8 t1 inner join t_n8 t2 using (a,b); -select * from t t1 left outer join t t2 using (b); +select * from t_n8 t1 left outer join t_n8 t2 using (b); -select * from t t1 right outer join t t2 using (b); +select * from t_n8 t1 right outer join t_n8 t2 using (b); -select * from t t1 inner join t t2 using (b); +select * from t_n8 t1 inner join t_n8 t2 using (b); -drop view v; +drop view v_n4; -create view v as select * from t t1 join t t2 using (a,b); +create view v_n4 as select * from t_n8 t1 join t_n8 t2 using (a,b); -desc formatted v; +desc formatted v_n4; -select * from v; +select * from v_n4; -drop view v; +drop view v_n4; -create view v as select * from t t1 right outer join t t2 using (b,a); +create view v_n4 as select * from t_n8 t1 right outer join t_n8 t2 using (b,a); -desc formatted v; +desc formatted v_n4; -select * from v; +select * from v_n4; -select * from (select t1.b b from t t1 inner join t t2 using (b)) t3 join t t4 using(b); +select * from (select t1.b b from t_n8 t1 inner join t_n8 t2 using (b)) t3 join t_n8 t4 using(b); -select * from (select t2.a a from t t1 inner join t t2 using (b)) t3 join t t4 using(a); +select * from (select t2.a a from t_n8 t1 inner join t_n8 t2 using (b)) t3 join t_n8 t4 using(a); -create table tt as select * from (select t2.a a from t t1 inner join t t2 using (b)) t3 join t t4 using(a); +create table tt_n0 as select * from (select t2.a a from t_n8 t1 inner join t_n8 t2 using (b)) t3 join t_n8 t4 using(a); -desc formatted tt; +desc formatted tt_n0; diff --git a/ql/src/test/queries/clientpositive/nested_column_pruning.q b/ql/src/test/queries/clientpositive/nested_column_pruning.q index ab5e16b4b4..5b41f18e97 100644 --- a/ql/src/test/queries/clientpositive/nested_column_pruning.q +++ b/ql/src/test/queries/clientpositive/nested_column_pruning.q @@ -7,12 +7,12 @@ set hive.exec.dynamic.partition.mode = nonstrict; set hive.strict.checks.cartesian.product=false; -- First, create source tables -DROP TABLE IF EXISTS dummy; -CREATE TABLE dummy (i int); -INSERT INTO TABLE dummy VALUES (42); +DROP TABLE IF EXISTS dummy_n5; +CREATE TABLE dummy_n5 (i int); +INSERT INTO TABLE dummy_n5 VALUES (42); -DROP TABLE IF EXISTS nested_tbl_1; -CREATE TABLE nested_tbl_1 ( +DROP TABLE IF EXISTS nested_tbl_1_n1; +CREATE TABLE nested_tbl_1_n1 ( a int, s1 struct, f6: int>, s2 struct, f11: map>>, @@ -22,7 +22,7 @@ CREATE TABLE nested_tbl_1 ( s6 map>>>> ) STORED AS PARQUET; -INSERT INTO TABLE nested_tbl_1 SELECT +INSERT INTO TABLE nested_tbl_1_n1 SELECT 1, named_struct('f1', false, 'f2', 'foo', 'f3', named_struct('f4', 4, 'f5', cast(5.0 as double)), 'f6', 4), named_struct('f7', 'f7', 'f8', named_struct('f9', true, 'f10', array(10, 11), 'f11', map('key1', true, 'key2', false))), named_struct('f12', array(named_struct('f13', 'foo', 'f14', 14), named_struct('f13', 'bar', 'f14', 28))), @@ -30,12 +30,12 @@ INSERT INTO TABLE nested_tbl_1 SELECT named_struct('f16', array(named_struct('f17', 'foo', 'f18', named_struct('f19', 14)), named_struct('f17', 'bar', 'f18', named_struct('f19', 28)))), map('key1', named_struct('f20', array(named_struct('f21', named_struct('f22', 1)))), 'key2', named_struct('f20', array(named_struct('f21', named_struct('f22', 2))))) -FROM dummy; +FROM dummy_n5; -DROP TABLE IF EXISTS nested_tbl_2; -CREATE TABLE nested_tbl_2 LIKE nested_tbl_1; +DROP TABLE IF EXISTS nested_tbl_2_n1; +CREATE TABLE nested_tbl_2_n1 LIKE nested_tbl_1_n1; -INSERT INTO TABLE nested_tbl_2 SELECT +INSERT INTO TABLE nested_tbl_2_n1 SELECT 2, named_struct('f1', true, 'f2', 'bar', 'f3', named_struct('f4', 4, 'f5', cast(6.5 as double)), 'f6', 4), named_struct('f7', 'f72', 'f8', named_struct('f9', false, 'f10', array(20, 22), 'f11', map('key3', true, 'key4', false))), named_struct('f12', array(named_struct('f13', 'bar', 'f14', 28), named_struct('f13', 'foo', 'f14', 56))), @@ -43,175 +43,175 @@ INSERT INTO TABLE nested_tbl_2 SELECT named_struct('f16', array(named_struct('f17', 'bar', 'f18', named_struct('f19', 28)), named_struct('f17', 'foo', 'f18', named_struct('f19', 56)))), map('key3', named_struct('f20', array(named_struct('f21', named_struct('f22', 3)))), 'key4', named_struct('f20', array(named_struct('f21', named_struct('f22', 4))))) -FROM dummy; +FROM dummy_n5; -- Testing only select statements -EXPLAIN SELECT a FROM nested_tbl_1; -SELECT a FROM nested_tbl_1; +EXPLAIN SELECT a FROM nested_tbl_1_n1; +SELECT a FROM nested_tbl_1_n1; -EXPLAIN SELECT s1.f1 FROM nested_tbl_1; -SELECT s1.f1 FROM nested_tbl_1; +EXPLAIN SELECT s1.f1 FROM nested_tbl_1_n1; +SELECT s1.f1 FROM nested_tbl_1_n1; -EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1; -SELECT s1.f1, s1.f2 FROM nested_tbl_1; +EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1_n1; +SELECT s1.f1, s1.f2 FROM nested_tbl_1_n1; -- In this case 's1.f3' and 's1.f3.f4' should be merged -EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1; -SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1; +EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n1; +SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n1; -- Testing select array and index shifting -EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1; -SELECT s1.f3.f5 FROM nested_tbl_1; +EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n1; +SELECT s1.f3.f5 FROM nested_tbl_1_n1; -- Testing select from multiple structs -EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1; -SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1; +EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n1; +SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n1; -- Testing select with filter -EXPLAIN SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE; -SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE; +EXPLAIN SELECT s1.f2 FROM nested_tbl_1_n1 WHERE s1.f1 = FALSE; +SELECT s1.f2 FROM nested_tbl_1_n1 WHERE s1.f1 = FALSE; -EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4; -SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4; +EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n1 WHERE s1.f3.f4 = 4; +SELECT s1.f3.f5 FROM nested_tbl_1_n1 WHERE s1.f3.f4 = 4; -EXPLAIN SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE; -SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE; +EXPLAIN SELECT s2.f8 FROM nested_tbl_1_n1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE; +SELECT s2.f8 FROM nested_tbl_1_n1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE; -- Testing lateral view -EXPLAIN SELECT col1, col2 FROM nested_tbl_1 +EXPLAIN SELECT col1, col2 FROM nested_tbl_1_n1 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1 LATERAL VIEW explode(s3.f12) tbl2 AS col2; -SELECT col1, col2 FROM nested_tbl_1 +SELECT col1, col2 FROM nested_tbl_1_n1 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1 LATERAL VIEW explode(s3.f12) tbl2 AS col2; -- Testing UDFs -EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1; -SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1; +EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n1; +SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n1; -- Testing aggregations -EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5; -SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5; +EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3.f5; +SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3.f5; -EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3; -SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3; +EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3; +SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3; -EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3; -SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3; +EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3 ORDER BY s1.f3; +SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3 ORDER BY s1.f3; -- Testing joins EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2 +FROM nested_tbl_1_n1 t1 JOIN nested_tbl_2_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == FALSE; SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2 +FROM nested_tbl_1_n1 t1 JOIN nested_tbl_2_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == FALSE; EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == TRUE; SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == TRUE; EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE; SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE; EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f1 <> t2.s2.f8.f9; SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f1 <> t2.s2.f8.f9; EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9; SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9; -- Testing insert with aliases -DROP TABLE IF EXISTS nested_tbl_3; -CREATE TABLE nested_tbl_3 (f1 boolean, f2 string) PARTITIONED BY (f3 int) STORED AS PARQUET; +DROP TABLE IF EXISTS nested_tbl_3_n1; +CREATE TABLE nested_tbl_3_n1 (f1 boolean, f2 string) PARTITIONED BY (f3 int) STORED AS PARQUET; -INSERT OVERWRITE TABLE nested_tbl_3 PARTITION(f3) +INSERT OVERWRITE TABLE nested_tbl_3_n1 PARTITION(f3) SELECT s1.f1 AS f1, S1.f2 AS f2, s1.f6 AS f3 -FROM nested_tbl_1; +FROM nested_tbl_1_n1; -SELECT * FROM nested_tbl_3; +SELECT * FROM nested_tbl_3_n1; -- Testing select struct field from elements in array or map EXPLAIN SELECT count(s1.f6), s3.f12[0].f14 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s3.f12[0].f14; SELECT count(s1.f6), s3.f12[0].f14 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s3.f12[0].f14; EXPLAIN SELECT count(s1.f6), s4['key1'].f15 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s4['key1'].f15; SELECT count(s1.f6), s4['key1'].f15 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s4['key1'].f15; EXPLAIN SELECT count(s1.f6), s5.f16[0].f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s5.f16[0].f18.f19; SELECT count(s1.f6), s5.f16[0].f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s5.f16[0].f18.f19; EXPLAIN SELECT count(s1.f6), s5.f16.f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s5.f16.f18.f19; SELECT count(s1.f6), s5.f16.f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s5.f16.f18.f19; EXPLAIN SELECT count(s1.f6), s6['key1'].f20[0].f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s6['key1'].f20[0].f21.f22; SELECT count(s1.f6), s6['key1'].f20[0].f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s6['key1'].f20[0].f21.f22; EXPLAIN SELECT count(s1.f6), s6['key1'].f20.f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s6['key1'].f20.f21.f22; SELECT count(s1.f6), s6['key1'].f20.f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s6['key1'].f20.f21.f22; diff --git a/ql/src/test/queries/clientpositive/newline.q b/ql/src/test/queries/clientpositive/newline.q index 584ff9805c..f886a3e410 100644 --- a/ql/src/test/queries/clientpositive/newline.q +++ b/ql/src/test/queries/clientpositive/newline.q @@ -5,56 +5,56 @@ set hive.transform.escape.input=true; -- SORT_QUERY_RESULTS -create table tmp_tmp(key string, value string) stored as rcfile; -insert overwrite table tmp_tmp +create table tmp_tmp_n0(key string, value string) stored as rcfile; +insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python newline.py' AS key, value FROM src limit 6; -select * from tmp_tmp; +select * from tmp_tmp_n0; -drop table tmp_tmp; +drop table tmp_tmp_n0; add file ../../data/scripts/escapednewline.py; add file ../../data/scripts/escapedtab.py; add file ../../data/scripts/doubleescapedtab.py; add file ../../data/scripts/escapedcarriagereturn.py; -create table tmp_tmp(key string, value string) stored as rcfile; -insert overwrite table tmp_tmp +create table tmp_tmp_n0(key string, value string) stored as rcfile; +insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python escapednewline.py' AS key, value FROM src limit 5; -select * from tmp_tmp; +select * from tmp_tmp_n0; SELECT TRANSFORM(key, value) USING -'cat' AS (key, value) FROM tmp_tmp; +'cat' AS (key, value) FROM tmp_tmp_n0; -insert overwrite table tmp_tmp +insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python escapedcarriagereturn.py' AS key, value FROM src limit 5; -select * from tmp_tmp; +select * from tmp_tmp_n0; SELECT TRANSFORM(key, value) USING -'cat' AS (key, value) FROM tmp_tmp; +'cat' AS (key, value) FROM tmp_tmp_n0; -insert overwrite table tmp_tmp +insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python escapedtab.py' AS key, value FROM src limit 5; -select * from tmp_tmp; +select * from tmp_tmp_n0; SELECT TRANSFORM(key, value) USING -'cat' AS (key, value) FROM tmp_tmp; +'cat' AS (key, value) FROM tmp_tmp_n0; -insert overwrite table tmp_tmp +insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python doubleescapedtab.py' AS key, value FROM src limit 5; -select * from tmp_tmp; +select * from tmp_tmp_n0; SELECT TRANSFORM(key, value) USING -'cat' AS (key, value) FROM tmp_tmp; +'cat' AS (key, value) FROM tmp_tmp_n0; SELECT key FROM (SELECT TRANSFORM ('a\tb', 'c') USING 'cat' AS (key, value) FROM src limit 1)a ORDER BY key ASC; diff --git a/ql/src/test/queries/clientpositive/notable_alias1.q b/ql/src/test/queries/clientpositive/notable_alias1.q index 846660b961..68b8982164 100644 --- a/ql/src/test/queries/clientpositive/notable_alias1.q +++ b/ql/src/test/queries/clientpositive/notable_alias1.q @@ -1,12 +1,12 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -CREATE TABLE dest1(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n4(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', key, count(1) WHERE src.key < 100 group by key; +INSERT OVERWRITE TABLE dest1_n4 SELECT '1234', key, count(1) WHERE src.key < 100 group by key; FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', key, count(1) WHERE src.key < 100 group by key; +INSERT OVERWRITE TABLE dest1_n4 SELECT '1234', key, count(1) WHERE src.key < 100 group by key; -SELECT dest1.* FROM dest1; +SELECT dest1_n4.* FROM dest1_n4; diff --git a/ql/src/test/queries/clientpositive/notable_alias2.q b/ql/src/test/queries/clientpositive/notable_alias2.q index be858074ac..c1338241a7 100644 --- a/ql/src/test/queries/clientpositive/notable_alias2.q +++ b/ql/src/test/queries/clientpositive/notable_alias2.q @@ -1,12 +1,12 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -CREATE TABLE dest1(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n50(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key; +INSERT OVERWRITE TABLE dest1_n50 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key; FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key; +INSERT OVERWRITE TABLE dest1_n50 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key; -SELECT dest1.* FROM dest1; +SELECT dest1_n50.* FROM dest1_n50; diff --git a/ql/src/test/queries/clientpositive/notable_alias3.q b/ql/src/test/queries/clientpositive/notable_alias3.q index 9cb76053df..414ba24702 100644 --- a/ql/src/test/queries/clientpositive/notable_alias3.q +++ b/ql/src/test/queries/clientpositive/notable_alias3.q @@ -1,5 +1,5 @@ --! qt:dataset:src -CREATE TABLE dest1(c string, key INT, value DOUBLE) STORED AS TEXTFILE; +CREATE TABLE dest1_n102(c string, key INT, value DOUBLE) STORED AS TEXTFILE; FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, sum(src.value) WHERE src.key < 100 group by key; \ No newline at end of file +INSERT OVERWRITE TABLE dest1_n102 SELECT '1234', src.key, sum(src.value) WHERE src.key < 100 group by key; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/nullMap.q b/ql/src/test/queries/clientpositive/nullMap.q index f272bb9a76..28d9e2d6ae 100644 --- a/ql/src/test/queries/clientpositive/nullMap.q +++ b/ql/src/test/queries/clientpositive/nullMap.q @@ -1,6 +1,6 @@ SET hive.vectorized.execution.enabled=false; -create table map_txt ( +create table map_txt_n0 ( id int, content map ) @@ -9,8 +9,8 @@ null defined as '\\N' stored as textfile ; -LOAD DATA LOCAL INPATH '../../data/files/mapNull.txt' INTO TABLE map_txt; +LOAD DATA LOCAL INPATH '../../data/files/mapNull.txt' INTO TABLE map_txt_n0; -select * from map_txt; +select * from map_txt_n0; -select id, map_keys(content) from map_txt; +select id, map_keys(content) from map_txt_n0; diff --git a/ql/src/test/queries/clientpositive/null_column.q b/ql/src/test/queries/clientpositive/null_column.q index 4275ce5537..5c0873b981 100644 --- a/ql/src/test/queries/clientpositive/null_column.q +++ b/ql/src/test/queries/clientpositive/null_column.q @@ -8,9 +8,9 @@ load data local inpath '../../data/files/test.dat' overwrite into table temp_nul select null, null from temp_null; -create table tt(a int, b string); -insert overwrite table tt select null, null from temp_null; -select * from tt; +create table tt_n1(a int, b string); +insert overwrite table tt_n1 select null, null from temp_null; +select * from tt_n1; create table tt_b(a int, b string) row format serde "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe"; insert overwrite table tt_b select null, null from temp_null; diff --git a/ql/src/test/queries/clientpositive/nullability_transitive_inference.q b/ql/src/test/queries/clientpositive/nullability_transitive_inference.q index ff0ce3ad05..ebde989c7b 100644 --- a/ql/src/test/queries/clientpositive/nullability_transitive_inference.q +++ b/ql/src/test/queries/clientpositive/nullability_transitive_inference.q @@ -6,35 +6,35 @@ set hive.strict.checks.cartesian.product=false; set hive.stats.fetch.column.stats=true; set hive.materializedview.rewriting=true; -create table emps ( +create table emps_n6 ( empid int, deptno int, name varchar(256), salary float, commission int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into emps values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), +insert into emps_n6 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250), (110, 10, 'Bill', 10000, 250); -analyze table emps compute statistics for columns; +analyze table emps_n6 compute statistics for columns; -create table depts ( +create table depts_n5 ( deptno int, name varchar(256), locationid int) stored as orc TBLPROPERTIES ('transactional'='true'); -insert into depts values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20); -analyze table depts compute statistics for columns; +insert into depts_n5 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20); +analyze table depts_n5 compute statistics for columns; -alter table emps add constraint pk1 primary key (empid) disable novalidate rely; -alter table depts add constraint pk2 primary key (deptno) disable novalidate rely; +alter table emps_n6 add constraint pk1 primary key (empid) disable novalidate rely; +alter table depts_n5 add constraint pk2 primary key (deptno) disable novalidate rely; -alter table emps add constraint fk1 foreign key (deptno) references depts(deptno) disable novalidate rely; +alter table emps_n6 add constraint fk1 foreign key (deptno) references depts_n5(deptno) disable novalidate rely; explain -select empid from emps -join depts using (deptno) where depts.deptno >= 20 -group by empid, depts.deptno; +select empid from emps_n6 +join depts_n5 using (deptno) where depts_n5.deptno >= 20 +group by empid, depts_n5.deptno; -select empid from emps -join depts using (deptno) where depts.deptno >= 20 -group by empid, depts.deptno; +select empid from emps_n6 +join depts_n5 using (deptno) where depts_n5.deptno >= 20 +group by empid, depts_n5.deptno; diff --git a/ql/src/test/queries/clientpositive/nullformatCTAS.q b/ql/src/test/queries/clientpositive/nullformatCTAS.q index d077981d02..093742fc4a 100644 --- a/ql/src/test/queries/clientpositive/nullformatCTAS.q +++ b/ql/src/test/queries/clientpositive/nullformatCTAS.q @@ -1,15 +1,15 @@ -- base table with null data -DROP TABLE IF EXISTS base_tab; -CREATE TABLE base_tab(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab; -DESCRIBE EXTENDED base_tab; +DROP TABLE IF EXISTS base_tab_n2; +CREATE TABLE base_tab_n2(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab_n2; +DESCRIBE EXTENDED base_tab_n2; -- table with non-default null format DROP TABLE IF EXISTS null_tab3; EXPLAIN CREATE TABLE null_tab3 ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' - AS SELECT a, b FROM base_tab; + AS SELECT a, b FROM base_tab_n2; CREATE TABLE null_tab3 ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' - AS SELECT a, b FROM base_tab; + AS SELECT a, b FROM base_tab_n2; DESCRIBE EXTENDED null_tab3; SHOW CREATE TABLE null_tab3; @@ -21,4 +21,4 @@ SELECT * FROM null_tab3; DROP TABLE null_tab3; -DROP TABLE base_tab; +DROP TABLE base_tab_n2; diff --git a/ql/src/test/queries/clientpositive/nullformatdir.q b/ql/src/test/queries/clientpositive/nullformatdir.q index d29863839f..60637abf86 100644 --- a/ql/src/test/queries/clientpositive/nullformatdir.q +++ b/ql/src/test/queries/clientpositive/nullformatdir.q @@ -1,13 +1,13 @@ -- base table with null data -DROP TABLE IF EXISTS base_tab; -CREATE TABLE base_tab(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab; -DESCRIBE EXTENDED base_tab; +DROP TABLE IF EXISTS base_tab_n1; +CREATE TABLE base_tab_n1(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab_n1; +DESCRIBE EXTENDED base_tab_n1; dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/hive_test/nullformat/tmp; dfs -rmr ${system:test.tmp.dir}/hive_test/nullformat/*; INSERT OVERWRITE LOCAL DIRECTORY '${system:test.tmp.dir}/hive_test/nullformat' - ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' SELECT a,b FROM base_tab; + ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' SELECT a,b FROM base_tab_n1; dfs -cat ${system:test.tmp.dir}/hive_test/nullformat/000000_0; -- load the exported data back into a table with same null format and verify null values @@ -18,4 +18,4 @@ SELECT * FROM null_tab2; dfs -rmr ${system:test.tmp.dir}/hive_test/nullformat; -DROP TABLE base_tab; +DROP TABLE base_tab_n1; diff --git a/ql/src/test/queries/clientpositive/nullgroup3.q b/ql/src/test/queries/clientpositive/nullgroup3.q index 282f6c3b7d..af6689e8c0 100644 --- a/ql/src/test/queries/clientpositive/nullgroup3.q +++ b/ql/src/test/queries/clientpositive/nullgroup3.q @@ -1,29 +1,29 @@ set hive.mapred.mode=nonstrict; -CREATE TABLE tstparttbl(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-09'); -LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-08'); +CREATE TABLE tstparttbl_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-09'); +LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-08'); explain -select count(1) from tstparttbl; -select count(1) from tstparttbl; +select count(1) from tstparttbl_n0; +select count(1) from tstparttbl_n0; -CREATE TABLE tstparttbl2(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-09'); -LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-08'); +CREATE TABLE tstparttbl2_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-09'); +LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-08'); explain -select count(1) from tstparttbl2; -select count(1) from tstparttbl2; -DROP TABLE tstparttbl; -CREATE TABLE tstparttbl(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-09'); -LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-08'); +select count(1) from tstparttbl2_n0; +select count(1) from tstparttbl2_n0; +DROP TABLE tstparttbl_n0; +CREATE TABLE tstparttbl_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-09'); +LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-08'); explain -select count(1) from tstparttbl; -select count(1) from tstparttbl; +select count(1) from tstparttbl_n0; +select count(1) from tstparttbl_n0; -DROP TABLE tstparttbl2; -CREATE TABLE tstparttbl2(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-09'); -LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-08'); +DROP TABLE tstparttbl2_n0; +CREATE TABLE tstparttbl2_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-09'); +LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-08'); explain -select count(1) from tstparttbl2; -select count(1) from tstparttbl2; +select count(1) from tstparttbl2_n0; +select count(1) from tstparttbl2_n0; diff --git a/ql/src/test/queries/clientpositive/optimize_filter_literal.q b/ql/src/test/queries/clientpositive/optimize_filter_literal.q index 28bed095fa..b0ce4ae31e 100644 --- a/ql/src/test/queries/clientpositive/optimize_filter_literal.q +++ b/ql/src/test/queries/clientpositive/optimize_filter_literal.q @@ -12,38 +12,38 @@ set hive.vectorized.execution.enabled=true; -- SORT_QUERY_RESULTS -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n21(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part_n13 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE; +CREATE TABLE srcbucket_mapjoin_part_n22 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n21 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n21 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08'); set hive.optimize.bucketingsorting=false; -insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +insert overwrite table tab_part_n13 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n22; -analyze table tab_part partition (ds='2008-04-08') compute statistics for columns; +analyze table tab_part_n13 partition (ds='2008-04-08') compute statistics for columns; -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n14(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE; +insert overwrite table tab_n14 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n21; -analyze table tab partition (ds='2008-04-08') compute statistics for columns; +analyze table tab_n14 partition (ds='2008-04-08') compute statistics for columns; set hive.join.emit.interval=2; set mapred.reduce.tasks=3; select * from -(select * from tab where tab.key = 0)a +(select * from tab_n14 where tab_n14.key = 0)a full outer join -(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key; +(select * from tab_part_n13 where tab_part_n13.key = 98)b join tab_part_n13 c on a.key = b.key and b.key = c.key; diff --git a/ql/src/test/queries/clientpositive/optimize_join_ptp.q b/ql/src/test/queries/clientpositive/optimize_join_ptp.q index 5807ec350d..81d4f5e44a 100644 --- a/ql/src/test/queries/clientpositive/optimize_join_ptp.q +++ b/ql/src/test/queries/clientpositive/optimize_join_ptp.q @@ -1,16 +1,16 @@ set hive.mapred.mode=nonstrict; set hive.explain.user=false; -create table t1 (v string, k int); -insert into t1 values ('people', 10), ('strangers', 20), ('parents', 30); +create table t1_n97 (v string, k int); +insert into t1_n97 values ('people', 10), ('strangers', 20), ('parents', 30); -create table t2 (v string, k double); -insert into t2 values ('people', 10), ('strangers', 20), ('parents', 30); +create table t2_n60 (v string, k double); +insert into t2_n60 values ('people', 10), ('strangers', 20), ('parents', 30); -- should not produce exceptions explain -select * from t1 where t1.k in (select t2.k from t2 where t2.v='people') and t1.k<15; +select * from t1_n97 where t1_n97.k in (select t2_n60.k from t2_n60 where t2_n60.v='people') and t1_n97.k<15; -select * from t1 where t1.k in (select t2.k from t2 where t2.v='people') and t1.k<15; +select * from t1_n97 where t1_n97.k in (select t2_n60.k from t2_n60 where t2_n60.v='people') and t1_n97.k<15; diff --git a/ql/src/test/queries/clientpositive/orc_analyze.q b/ql/src/test/queries/clientpositive/orc_analyze.q index 2683fdc924..aea09d4767 100644 --- a/ql/src/test/queries/clientpositive/orc_analyze.q +++ b/ql/src/test/queries/clientpositive/orc_analyze.q @@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict; set hive.exec.submitviachild=false; set hive.exec.submit.local.task.via.child=false; -CREATE TABLE orc_create_people_staging ( +CREATE TABLE orc_create_people_staging_n0 ( id int, first_name string, last_name string, @@ -12,14 +12,14 @@ CREATE TABLE orc_create_people_staging ( start_date timestamp, state string); -LOAD DATA LOCAL INPATH '../../data/files/orc_create_people.txt' OVERWRITE INTO TABLE orc_create_people_staging; +LOAD DATA LOCAL INPATH '../../data/files/orc_create_people.txt' OVERWRITE INTO TABLE orc_create_people_staging_n0; set hive.exec.dynamic.partition.mode=nonstrict; set hive.stats.autogather=false; -- non-partitioned table -- partial scan gather -CREATE TABLE orc_create_people ( +CREATE TABLE orc_create_people_n0 ( id int, first_name string, last_name string, @@ -29,19 +29,19 @@ CREATE TABLE orc_create_people ( state string) STORED AS orc; -INSERT OVERWRITE TABLE orc_create_people SELECT * FROM orc_create_people_staging ORDER BY id; +INSERT OVERWRITE TABLE orc_create_people_n0 SELECT * FROM orc_create_people_staging_n0 ORDER BY id; set hive.stats.autogather = true; -analyze table orc_create_people compute statistics; -desc formatted orc_create_people; +analyze table orc_create_people_n0 compute statistics; +desc formatted orc_create_people_n0; -analyze table orc_create_people compute statistics noscan; -desc formatted orc_create_people; +analyze table orc_create_people_n0 compute statistics noscan; +desc formatted orc_create_people_n0; -drop table orc_create_people; +drop table orc_create_people_n0; -- auto stats gather -CREATE TABLE orc_create_people ( +CREATE TABLE orc_create_people_n0 ( id int, first_name string, last_name string, @@ -51,16 +51,16 @@ CREATE TABLE orc_create_people ( state string) STORED AS orc; -INSERT OVERWRITE TABLE orc_create_people SELECT * FROM orc_create_people_staging ORDER BY id; +INSERT OVERWRITE TABLE orc_create_people_n0 SELECT * FROM orc_create_people_staging_n0 ORDER BY id; -desc formatted orc_create_people; +desc formatted orc_create_people_n0; -drop table orc_create_people; +drop table orc_create_people_n0; set hive.stats.autogather=false; -- partitioned table -- partial scan gather -CREATE TABLE orc_create_people ( +CREATE TABLE orc_create_people_n0 ( id int, first_name string, last_name string, @@ -70,22 +70,22 @@ CREATE TABLE orc_create_people ( PARTITIONED BY (state string) STORED AS orc; -INSERT OVERWRITE TABLE orc_create_people PARTITION (state) - SELECT * FROM orc_create_people_staging ORDER BY id; +INSERT OVERWRITE TABLE orc_create_people_n0 PARTITION (state) + SELECT * FROM orc_create_people_staging_n0 ORDER BY id; set hive.stats.autogather = true; -analyze table orc_create_people partition(state) compute statistics; -desc formatted orc_create_people partition(state="Ca"); -desc formatted orc_create_people partition(state="Or"); +analyze table orc_create_people_n0 partition(state) compute statistics; +desc formatted orc_create_people_n0 partition(state="Ca"); +desc formatted orc_create_people_n0 partition(state="Or"); -analyze table orc_create_people partition(state) compute statistics noscan; -desc formatted orc_create_people partition(state="Ca"); -desc formatted orc_create_people partition(state="Or"); +analyze table orc_create_people_n0 partition(state) compute statistics noscan; +desc formatted orc_create_people_n0 partition(state="Ca"); +desc formatted orc_create_people_n0 partition(state="Or"); -drop table orc_create_people; +drop table orc_create_people_n0; -- auto stats gather -CREATE TABLE orc_create_people ( +CREATE TABLE orc_create_people_n0 ( id int, first_name string, last_name string, @@ -95,18 +95,18 @@ CREATE TABLE orc_create_people ( PARTITIONED BY (state string) STORED AS orc; -INSERT OVERWRITE TABLE orc_create_people PARTITION (state) - SELECT * FROM orc_create_people_staging ORDER BY id; +INSERT OVERWRITE TABLE orc_create_people_n0 PARTITION (state) + SELECT * FROM orc_create_people_staging_n0 ORDER BY id; -desc formatted orc_create_people partition(state="Ca"); -desc formatted orc_create_people partition(state="Or"); +desc formatted orc_create_people_n0 partition(state="Ca"); +desc formatted orc_create_people_n0 partition(state="Or"); -drop table orc_create_people; +drop table orc_create_people_n0; set hive.stats.autogather=false; -- partitioned and bucketed table -- partial scan gather -CREATE TABLE orc_create_people ( +CREATE TABLE orc_create_people_n0 ( id int, first_name string, last_name string, @@ -119,22 +119,22 @@ sorted by (last_name) into 4 buckets STORED AS orc; -INSERT OVERWRITE TABLE orc_create_people PARTITION (state) - SELECT * FROM orc_create_people_staging ORDER BY id; +INSERT OVERWRITE TABLE orc_create_people_n0 PARTITION (state) + SELECT * FROM orc_create_people_staging_n0 ORDER BY id; set hive.stats.autogather = true; -analyze table orc_create_people partition(state) compute statistics; -desc formatted orc_create_people partition(state="Ca"); -desc formatted orc_create_people partition(state="Or"); +analyze table orc_create_people_n0 partition(state) compute statistics; +desc formatted orc_create_people_n0 partition(state="Ca"); +desc formatted orc_create_people_n0 partition(state="Or"); -analyze table orc_create_people partition(state) compute statistics noscan; -desc formatted orc_create_people partition(state="Ca"); -desc formatted orc_create_people partition(state="Or"); +analyze table orc_create_people_n0 partition(state) compute statistics noscan; +desc formatted orc_create_people_n0 partition(state="Ca"); +desc formatted orc_create_people_n0 partition(state="Or"); -drop table orc_create_people; +drop table orc_create_people_n0; -- auto stats gather -CREATE TABLE orc_create_people ( +CREATE TABLE orc_create_people_n0 ( id int, first_name string, last_name string, @@ -147,19 +147,19 @@ sorted by (last_name) into 4 buckets STORED AS orc; -INSERT OVERWRITE TABLE orc_create_people PARTITION (state) - SELECT * FROM orc_create_people_staging ORDER BY id; +INSERT OVERWRITE TABLE orc_create_people_n0 PARTITION (state) + SELECT * FROM orc_create_people_staging_n0 ORDER BY id; -desc formatted orc_create_people partition(state="Ca"); -desc formatted orc_create_people partition(state="Or"); +desc formatted orc_create_people_n0 partition(state="Ca"); +desc formatted orc_create_people_n0 partition(state="Or"); -drop table orc_create_people; +drop table orc_create_people_n0; set hive.stats.autogather=false; -- create table with partitions containing text and ORC files. -- ORC files implements StatsProvidingRecordReader but text files does not. -- So the partition containing text file should not have statistics. -CREATE TABLE orc_create_people ( +CREATE TABLE orc_create_people_n0 ( id int, first_name string, last_name string, @@ -169,14 +169,14 @@ CREATE TABLE orc_create_people ( PARTITIONED BY (state string) STORED AS orc; -INSERT OVERWRITE TABLE orc_create_people PARTITION (state) - SELECT * FROM orc_create_people_staging ORDER BY id; +INSERT OVERWRITE TABLE orc_create_people_n0 PARTITION (state) + SELECT * FROM orc_create_people_staging_n0 ORDER BY id; set hive.stats.autogather = true; -analyze table orc_create_people partition(state) compute statistics; -desc formatted orc_create_people partition(state="Ca"); +analyze table orc_create_people_n0 partition(state) compute statistics; +desc formatted orc_create_people_n0 partition(state="Ca"); -analyze table orc_create_people partition(state) compute statistics noscan; -desc formatted orc_create_people partition(state="Ca"); +analyze table orc_create_people_n0 partition(state) compute statistics noscan; +desc formatted orc_create_people_n0 partition(state="Ca"); -drop table orc_create_people; +drop table orc_create_people_n0; diff --git a/ql/src/test/queries/clientpositive/orc_create.q b/ql/src/test/queries/clientpositive/orc_create.q index 930a7cb820..6d410099ea 100644 --- a/ql/src/test/queries/clientpositive/orc_create.q +++ b/ql/src/test/queries/clientpositive/orc_create.q @@ -6,13 +6,13 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS DROP TABLE orc_create; -DROP TABLE orc_create_complex; -DROP TABLE orc_create_staging; +DROP TABLE orc_create_complex_n1; +DROP TABLE orc_create_staging_n1; DROP TABLE orc_create_people_staging; DROP TABLE orc_create_people; DROP TABLE if exists orc_create_cprl; -CREATE TABLE orc_create_staging ( +CREATE TABLE orc_create_staging_n1 ( str STRING, mp MAP, lst ARRAY, @@ -22,7 +22,7 @@ CREATE TABLE orc_create_staging ( COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'; -DESCRIBE FORMATTED orc_create_staging; +DESCRIBE FORMATTED orc_create_staging_n1; CREATE TABLE orc_create (key INT, value STRING) PARTITIONED BY (ds string) @@ -52,26 +52,26 @@ set hive.default.fileformat=TextFile; DESCRIBE FORMATTED orc_create; -CREATE TABLE orc_create_complex ( +CREATE TABLE orc_create_complex_n1 ( str STRING, mp MAP, lst ARRAY, strct STRUCT ) STORED AS ORC; -DESCRIBE FORMATTED orc_create_complex; +DESCRIBE FORMATTED orc_create_complex_n1; -LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging; +LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging_n1; -SELECT * from orc_create_staging; +SELECT * from orc_create_staging_n1; -INSERT OVERWRITE TABLE orc_create_complex SELECT * FROM orc_create_staging; +INSERT OVERWRITE TABLE orc_create_complex_n1 SELECT * FROM orc_create_staging_n1; -SELECT * from orc_create_complex; -SELECT str from orc_create_complex; -SELECT mp from orc_create_complex; -SELECT lst from orc_create_complex; -SELECT strct from orc_create_complex; +SELECT * from orc_create_complex_n1; +SELECT str from orc_create_complex_n1; +SELECT mp from orc_create_complex_n1; +SELECT lst from orc_create_complex_n1; +SELECT strct from orc_create_complex_n1; CREATE TABLE orc_create_people_staging ( id int, @@ -135,8 +135,8 @@ SELECT 1 from src limit 1; SELECT * from orc_create_cprl; DROP TABLE orc_create; -DROP TABLE orc_create_complex; -DROP TABLE orc_create_staging; +DROP TABLE orc_create_complex_n1; +DROP TABLE orc_create_staging_n1; DROP TABLE orc_create_people_staging; DROP TABLE orc_create_people; DROP TABLE orc_create_cprl; diff --git a/ql/src/test/queries/clientpositive/orc_dictionary_threshold.q b/ql/src/test/queries/clientpositive/orc_dictionary_threshold.q index 1848500126..1429a6d51f 100644 --- a/ql/src/test/queries/clientpositive/orc_dictionary_threshold.q +++ b/ql/src/test/queries/clientpositive/orc_dictionary_threshold.q @@ -7,19 +7,19 @@ set hive.exec.orc.dictionary.key.size.threshold=-1; -- Tests that the data can be read back correctly when a string column is stored -- without dictionary encoding -CREATE TABLE test_orc (key STRING) +CREATE TABLE test_orc_n5 (key STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat'; -- should be single split -INSERT OVERWRITE TABLE test_orc SELECT key FROM src TABLESAMPLE (10 ROWS); +INSERT OVERWRITE TABLE test_orc_n5 SELECT key FROM src TABLESAMPLE (10 ROWS); -- Test reading the column back -SELECT * FROM test_orc; +SELECT * FROM test_orc_n5; -ALTER TABLE test_orc SET SERDEPROPERTIES ('orc.stripe.size' = '1'); +ALTER TABLE test_orc_n5 SET SERDEPROPERTIES ('orc.stripe.size' = '1'); CREATE TABLE src_thousand(key STRING) STORED AS TEXTFILE; LOAD DATA LOCAL INPATH '../../data/files/kv1kv2.cogroup.txt' @@ -35,7 +35,7 @@ set hive.exec.orc.dictionary.key.size.threshold=0.5; -- dictionary encoded. The final stripe will have 630 out of 1000 and be -- direct encoded. -INSERT OVERWRITE TABLE test_orc +INSERT OVERWRITE TABLE test_orc_n5 SELECT key FROM ( SELECT CONCAT("a", key) AS key FROM src_thousand UNION ALL @@ -60,4 +60,4 @@ UNION ALL SELECT CONCAT("k", key) AS key FROM src_thousand ) a ORDER BY key LIMIT 11000; -SELECT SUM(HASH(key)) FROM test_orc; +SELECT SUM(HASH(key)) FROM test_orc_n5; diff --git a/ql/src/test/queries/clientpositive/orc_diff_part_cols.q b/ql/src/test/queries/clientpositive/orc_diff_part_cols.q index 3a2cbd45eb..ea9623eb31 100644 --- a/ql/src/test/queries/clientpositive/orc_diff_part_cols.q +++ b/ql/src/test/queries/clientpositive/orc_diff_part_cols.q @@ -6,7 +6,7 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -CREATE TABLE test_orc (key STRING) +CREATE TABLE test_orc_n0 (key STRING) PARTITIONED BY (part STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' @@ -18,10 +18,10 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -- to another partition -- This can produce unexpected results with CombineHiveInputFormat -INSERT OVERWRITE TABLE test_orc PARTITION (part = '1') SELECT key FROM src tablesample (5 rows); +INSERT OVERWRITE TABLE test_orc_n0 PARTITION (part = '1') SELECT key FROM src tablesample (5 rows); -ALTER TABLE test_orc ADD COLUMNS (cnt INT); +ALTER TABLE test_orc_n0 ADD COLUMNS (cnt INT); -INSERT OVERWRITE TABLE test_orc PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key LIMIT 5; +INSERT OVERWRITE TABLE test_orc_n0 PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key LIMIT 5; -SELECT * FROM test_orc; +SELECT * FROM test_orc_n0; diff --git a/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q b/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q index 41db00edf3..d7fdbc82e9 100644 --- a/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q +++ b/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q @@ -7,11 +7,11 @@ set hive.vectorized.execution.enabled=false; -- SORT_QUERY_RESULTS -CREATE TABLE test_orc (key STRING) +CREATE TABLE test_orc_n4 (key STRING) STORED AS ORC; -INSERT OVERWRITE TABLE test_orc SELECT key FROM src LIMIT 5; +INSERT OVERWRITE TABLE test_orc_n4 SELECT key FROM src LIMIT 5; -ALTER TABLE test_orc ADD COLUMNS (value STRING); +ALTER TABLE test_orc_n4 ADD COLUMNS (value STRING); -SELECT * FROM test_orc; +SELECT * FROM test_orc_n4; diff --git a/ql/src/test/queries/clientpositive/orc_empty_files.q b/ql/src/test/queries/clientpositive/orc_empty_files.q index 1ae6e681f8..f46d7b5265 100644 --- a/ql/src/test/queries/clientpositive/orc_empty_files.q +++ b/ql/src/test/queries/clientpositive/orc_empty_files.q @@ -1,7 +1,7 @@ --! qt:dataset:src set hive.vectorized.execution.enabled=false; -CREATE TABLE test_orc (key STRING, cnt INT) +CREATE TABLE test_orc_n2 (key STRING, cnt INT) CLUSTERED BY (key) INTO 3 BUCKETS ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' @@ -16,6 +16,6 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -- containing data and a file containing data followed by an empty file. -- This can produce unexpected results with CombineHiveInputFormat -INSERT OVERWRITE TABLE test_orc SELECT one, COUNT(*) FROM (SELECT 1 AS one FROM src) a GROUP BY one; +INSERT OVERWRITE TABLE test_orc_n2 SELECT one, COUNT(*) FROM (SELECT 1 AS one FROM src) a GROUP BY one; -SELECT count(*) FROM test_orc; +SELECT count(*) FROM test_orc_n2; diff --git a/ql/src/test/queries/clientpositive/orc_empty_strings.q b/ql/src/test/queries/clientpositive/orc_empty_strings.q index 53902f0165..10650bfcae 100644 --- a/ql/src/test/queries/clientpositive/orc_empty_strings.q +++ b/ql/src/test/queries/clientpositive/orc_empty_strings.q @@ -4,19 +4,19 @@ set hive.vectorized.execution.enabled=false; -- SORT_QUERY_RESULTS -CREATE TABLE test_orc (key STRING) +CREATE TABLE test_orc_n3 (key STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat'; -INSERT OVERWRITE TABLE test_orc SELECT '' FROM src tablesample (10 rows); +INSERT OVERWRITE TABLE test_orc_n3 SELECT '' FROM src tablesample (10 rows); -- Test reading a column which is just empty strings -SELECT * FROM test_orc; +SELECT * FROM test_orc_n3; -INSERT OVERWRITE TABLE test_orc SELECT IF (key % 3 = 0, key, '') FROM src tablesample (10 rows); +INSERT OVERWRITE TABLE test_orc_n3 SELECT IF (key % 3 = 0, key, '') FROM src tablesample (10 rows); -- Test reading a column which has some empty strings -SELECT * FROM test_orc; +SELECT * FROM test_orc_n3; diff --git a/ql/src/test/queries/clientpositive/orc_ends_with_nulls.q b/ql/src/test/queries/clientpositive/orc_ends_with_nulls.q index 9c330485e7..ba6dc057ba 100644 --- a/ql/src/test/queries/clientpositive/orc_ends_with_nulls.q +++ b/ql/src/test/queries/clientpositive/orc_ends_with_nulls.q @@ -11,9 +11,9 @@ ALTER TABLE test_orc SET SERDEPROPERTIES ('orc.row.index.stride' = '1000'); -- this produces the effect that the number of non-null rows between the last and second -- to last index stride are the same (there's only two index strides) -CREATE TABLE src_null(a STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null; +CREATE TABLE src_null_n0(a STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null_n0; -INSERT OVERWRITE TABLE test_orc SELECT a FROM src_null; +INSERT OVERWRITE TABLE test_orc SELECT a FROM src_null_n0; SELECT * FROM test_orc LIMIT 5; diff --git a/ql/src/test/queries/clientpositive/orc_file_dump.q b/ql/src/test/queries/clientpositive/orc_file_dump.q index 754e121e86..0aec810fc6 100644 --- a/ql/src/test/queries/clientpositive/orc_file_dump.q +++ b/ql/src/test/queries/clientpositive/orc_file_dump.q @@ -1,7 +1,7 @@ set hive.vectorized.execution.enabled=false; set hive.mapred.mode=nonstrict; -CREATE TABLE staging(t tinyint, +CREATE TABLE staging_n4(t tinyint, si smallint, i int, b bigint, @@ -15,9 +15,9 @@ CREATE TABLE staging(t tinyint, ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging; +LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging_n4; -CREATE TABLE orc_ppd(t tinyint, +CREATE TABLE orc_ppd_n0(t tinyint, si smallint, i int, b bigint, @@ -30,17 +30,17 @@ CREATE TABLE orc_ppd(t tinyint, bin binary) STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*"); -insert overwrite table orc_ppd select * from staging; +insert overwrite table orc_ppd_n0 select * from staging_n4; SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecOrcFileDump; -select * from orc_ppd limit 1; +select * from orc_ppd_n0 limit 1; -alter table orc_ppd set tblproperties("orc.bloom.filter.fpp"="0.01"); +alter table orc_ppd_n0 set tblproperties("orc.bloom.filter.fpp"="0.01"); -insert overwrite table orc_ppd select * from staging; +insert overwrite table orc_ppd_n0 select * from staging_n4; -select * from orc_ppd limit 1; +select * from orc_ppd_n0 limit 1; CREATE TABLE orc_ppd_part(t tinyint, si smallint, @@ -55,6 +55,6 @@ CREATE TABLE orc_ppd_part(t tinyint, bin binary) PARTITIONED BY (ds string, hr int) STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*"); -insert overwrite table orc_ppd_part partition(ds = "2015", hr = 10) select * from staging; +insert overwrite table orc_ppd_part partition(ds = "2015", hr = 10) select * from staging_n4; select * from orc_ppd_part limit 1; diff --git a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q index 93cd1bde0c..3f34500f5f 100644 --- a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q +++ b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q @@ -4,7 +4,7 @@ set hive.vectorized.execution.enabled=false; set hive.mapred.mode=nonstrict; set hive.metastore.disallow.incompatible.col.type.changes=false; -create table if not exists alltypes ( +create table if not exists alltypes_n0 ( bo boolean, ti tinyint, si smallint, @@ -25,7 +25,7 @@ create table if not exists alltypes ( collection items terminated by ',' map keys terminated by ':' stored as textfile; -create table if not exists alltypes_orc ( +create table if not exists alltypes_orc_n0 ( bo boolean, ti tinyint, si smallint, @@ -44,26 +44,26 @@ create table if not exists alltypes_orc ( st struct ) stored as orc; -load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes; +load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes_n0; -insert overwrite table alltypes_orc select * from alltypes; +insert overwrite table alltypes_orc_n0 select * from alltypes_n0; -select * from alltypes_orc; +select * from alltypes_orc_n0; SET hive.exec.schema.evolution=true; -alter table alltypes_orc change si si int; -select * from alltypes_orc; +alter table alltypes_orc_n0 change si si int; +select * from alltypes_orc_n0; -alter table alltypes_orc change si si bigint; -alter table alltypes_orc change i i bigint; -select * from alltypes_orc; +alter table alltypes_orc_n0 change si si bigint; +alter table alltypes_orc_n0 change i i bigint; +select * from alltypes_orc_n0; set hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -explain select ti, si, i, bi from alltypes_orc; -select ti, si, i, bi from alltypes_orc; +explain select ti, si, i, bi from alltypes_orc_n0; +select ti, si, i, bi from alltypes_orc_n0; SET hive.exec.schema.evolution=false; diff --git a/ql/src/test/queries/clientpositive/orc_llap_counters.q b/ql/src/test/queries/clientpositive/orc_llap_counters.q index 1136b559fa..9f8e3bb181 100644 --- a/ql/src/test/queries/clientpositive/orc_llap_counters.q +++ b/ql/src/test/queries/clientpositive/orc_llap_counters.q @@ -8,7 +8,7 @@ SET hive.llap.io.enabled=true; SET hive.map.aggr=false; -- disabling map side aggregation as that can lead to different intermediate record counts -CREATE TABLE staging(t tinyint, +CREATE TABLE staging_n6(t tinyint, si smallint, i int, b bigint, @@ -22,10 +22,10 @@ CREATE TABLE staging(t tinyint, ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging; -LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging; +LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging_n6; +LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging_n6; -CREATE TABLE orc_ppd_staging(t tinyint, +CREATE TABLE orc_ppd_staging_n0(t tinyint, si smallint, i int, b bigint, @@ -40,14 +40,14 @@ CREATE TABLE orc_ppd_staging(t tinyint, bin binary) STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*"); -insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging order by t, s; +insert overwrite table orc_ppd_staging_n0 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging_n6 order by t, s; -- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values -- which makes it hard to test bloom filters -insert into orc_ppd_staging select -10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11",-71.54,"aaa" from staging limit 1; -insert into orc_ppd_staging select 127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11",71.54,"zzz" from staging limit 1; +insert into orc_ppd_staging_n0 select -10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11",-71.54,"aaa" from staging_n6 limit 1; +insert into orc_ppd_staging_n0 select 127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11",71.54,"zzz" from staging_n6 limit 1; -CREATE TABLE orc_ppd(t tinyint, +CREATE TABLE orc_ppd_n1(t tinyint, si smallint, i int, b bigint, @@ -62,9 +62,9 @@ CREATE TABLE orc_ppd(t tinyint, bin binary) STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*"); -insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging order by t, s; +insert overwrite table orc_ppd_n1 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n0 order by t, s; -describe formatted orc_ppd; +describe formatted orc_ppd_n1; SET hive.fetch.task.conversion=none; SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter; @@ -75,50 +75,50 @@ SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrint -- Entry 2: count: 100 hasNull: false min: 118 max: 127 sum: 12151 positions: 0,4,119,0,0,244,19 -- INPUT_RECORDS: 2100 (all row groups) -select count(*) from orc_ppd; +select count(*) from orc_ppd_n1; -- INPUT_RECORDS: 0 (no row groups) -select count(*) from orc_ppd where t > 127; +select count(*) from orc_ppd_n1 where t > 127; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = 55; -select count(*) from orc_ppd where t <=> 50; -select count(*) from orc_ppd where t <=> 100; +select count(*) from orc_ppd_n1 where t = 55; +select count(*) from orc_ppd_n1 where t <=> 50; +select count(*) from orc_ppd_n1 where t <=> 100; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t = "54"; +select count(*) from orc_ppd_n1 where t = "54"; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = -10.0; +select count(*) from orc_ppd_n1 where t = -10.0; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = cast(53 as float); -select count(*) from orc_ppd where t = cast(53 as double); +select count(*) from orc_ppd_n1 where t = cast(53 as float); +select count(*) from orc_ppd_n1 where t = cast(53 as double); -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t < 100; +select count(*) from orc_ppd_n1 where t < 100; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t < 100 and t > 98; +select count(*) from orc_ppd_n1 where t < 100 and t > 98; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t <= 100; +select count(*) from orc_ppd_n1 where t <= 100; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t is null; +select count(*) from orc_ppd_n1 where t is null; -- INPUT_RECORDS: 1100 (2 row groups) -select count(*) from orc_ppd where t in (5, 120); +select count(*) from orc_ppd_n1 where t in (5, 120); -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t between 60 and 80; +select count(*) from orc_ppd_n1 where t between 60 and 80; -- bloom filter tests -- INPUT_RECORDS: 0 -select count(*) from orc_ppd where t = -100; -select count(*) from orc_ppd where t <=> -100; -select count(*) from orc_ppd where t = 125; -select count(*) from orc_ppd where t IN (-100, 125, 200); +select count(*) from orc_ppd_n1 where t = -100; +select count(*) from orc_ppd_n1 where t <=> -100; +select count(*) from orc_ppd_n1 where t = 125; +select count(*) from orc_ppd_n1 where t IN (-100, 125, 200); -- Row group statistics for column s: -- Entry 0: count: 1000 hasNull: false min: max: zach young sum: 12907 positions: 0,0,0 @@ -126,59 +126,59 @@ select count(*) from orc_ppd where t IN (-100, 125, 200); -- Entry 2: count: 100 hasNull: false min: bob davidson max: zzz sum: 1281 positions: 0,3246,373 -- INPUT_RECORDS: 0 (no row groups) -select count(*) from orc_ppd where s > "zzz"; +select count(*) from orc_ppd_n1 where s > "zzz"; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where s = "zach young"; -select count(*) from orc_ppd where s <=> "zach zipper"; -select count(*) from orc_ppd where s <=> ""; +select count(*) from orc_ppd_n1 where s = "zach young"; +select count(*) from orc_ppd_n1 where s <=> "zach zipper"; +select count(*) from orc_ppd_n1 where s <=> ""; -- INPUT_RECORDS: 0 -select count(*) from orc_ppd where s is null; +select count(*) from orc_ppd_n1 where s is null; -- INPUT_RECORDS: 2100 -select count(*) from orc_ppd where s is not null; +select count(*) from orc_ppd_n1 where s is not null; -- INPUT_RECORDS: 0 -select count(*) from orc_ppd where s = cast("zach young" as char(50)); +select count(*) from orc_ppd_n1 where s = cast("zach young" as char(50)); -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where s = cast("zach young" as char(10)); -select count(*) from orc_ppd where s = cast("zach young" as varchar(10)); -select count(*) from orc_ppd where s = cast("zach young" as varchar(50)); +select count(*) from orc_ppd_n1 where s = cast("zach young" as char(10)); +select count(*) from orc_ppd_n1 where s = cast("zach young" as varchar(10)); +select count(*) from orc_ppd_n1 where s = cast("zach young" as varchar(50)); -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where s < "b"; +select count(*) from orc_ppd_n1 where s < "b"; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where s > "alice" and s < "bob"; +select count(*) from orc_ppd_n1 where s > "alice" and s < "bob"; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where s in ("alice allen", ""); +select count(*) from orc_ppd_n1 where s in ("alice allen", ""); -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where s between "" and "alice allen"; +select count(*) from orc_ppd_n1 where s between "" and "alice allen"; -- INPUT_RECORDS: 100 (1 row group) -select count(*) from orc_ppd where s between "zz" and "zzz"; +select count(*) from orc_ppd_n1 where s between "zz" and "zzz"; -- INPUT_RECORDS: 1100 (2 row groups) -select count(*) from orc_ppd where s between "zach zipper" and "zzz"; +select count(*) from orc_ppd_n1 where s between "zach zipper" and "zzz"; -- bloom filter tests -- INPUT_RECORDS: 0 -select count(*) from orc_ppd where s = "hello world"; -select count(*) from orc_ppd where s <=> "apache hive"; -select count(*) from orc_ppd where s IN ("a", "z"); +select count(*) from orc_ppd_n1 where s = "hello world"; +select count(*) from orc_ppd_n1 where s <=> "apache hive"; +select count(*) from orc_ppd_n1 where s IN ("a", "z"); -- INPUT_RECORDS: 100 -select count(*) from orc_ppd where s = "sarah ovid"; +select count(*) from orc_ppd_n1 where s = "sarah ovid"; -- INPUT_RECORDS: 1100 -select count(*) from orc_ppd where s = "wendy king"; +select count(*) from orc_ppd_n1 where s = "wendy king"; -- INPUT_RECORDS: 1000 -select count(*) from orc_ppd where s = "wendy king" and t < 0; +select count(*) from orc_ppd_n1 where s = "wendy king" and t < 0; -- INPUT_RECORDS: 100 -select count(*) from orc_ppd where s = "wendy king" and t > 100; +select count(*) from orc_ppd_n1 where s = "wendy king" and t > 100; diff --git a/ql/src/test/queries/clientpositive/orc_llap_nonvector.q b/ql/src/test/queries/clientpositive/orc_llap_nonvector.q index 6fd676dc3a..4dfb259005 100644 --- a/ql/src/test/queries/clientpositive/orc_llap_nonvector.q +++ b/ql/src/test/queries/clientpositive/orc_llap_nonvector.q @@ -12,12 +12,12 @@ SET hive.optimize.index.filter=true; set hive.auto.convert.join=false; set hive.fetch.task.conversion=none; -DROP TABLE orc_create_staging; +DROP TABLE orc_create_staging_n3; DROP TABLE orc_create_complex; DROP TABLE orc_llap_nonvector; -CREATE TABLE orc_create_staging ( +CREATE TABLE orc_create_staging_n3 ( str STRING, mp MAP, lst ARRAY, @@ -26,7 +26,7 @@ CREATE TABLE orc_create_staging ( FIELDS TERMINATED BY '|' COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'; -LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging; +LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging_n3; create table orc_llap_nonvector stored as orc as select *, rand(1234) rdm from alltypesorc order by rdm; @@ -41,5 +41,5 @@ explain select cint, cstring1 from orc_llap_nonvector limit 1025; select cint, cstring1 from orc_llap_nonvector limit 1025; -DROP TABLE orc_create_staging; +DROP TABLE orc_create_staging_n3; DROP TABLE orc_llap_nonvector; diff --git a/ql/src/test/queries/clientpositive/orc_merge1.q b/ql/src/test/queries/clientpositive/orc_merge1.q index a4f3861002..41e604faca 100644 --- a/ql/src/test/queries/clientpositive/orc_merge1.q +++ b/ql/src/test/queries/clientpositive/orc_merge1.q @@ -20,28 +20,28 @@ set hive.merge.sparkfiles=false; -- SORT_QUERY_RESULTS -DROP TABLE orcfile_merge1; -DROP TABLE orcfile_merge1b; -DROP TABLE orcfile_merge1c; +DROP TABLE orcfile_merge1_n1; +DROP TABLE orcfile_merge1b_n1; +DROP TABLE orcfile_merge1c_n1; -CREATE TABLE orcfile_merge1 (key INT, value STRING) +CREATE TABLE orcfile_merge1_n1 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC; -CREATE TABLE orcfile_merge1b (key INT, value STRING) +CREATE TABLE orcfile_merge1b_n1 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC; -CREATE TABLE orcfile_merge1c (key INT, value STRING) +CREATE TABLE orcfile_merge1c_n1 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC; -- merge disabled EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) +INSERT OVERWRITE TABLE orcfile_merge1_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/ds=1/part=0/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1_n1/ds=1/part=0/; set hive.merge.tezfiles=true; set hive.merge.mapfiles=true; @@ -49,50 +49,50 @@ set hive.merge.mapredfiles=true; set hive.merge.sparkfiles=true; -- auto-merge slow way EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1b_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) +INSERT OVERWRITE TABLE orcfile_merge1b_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1b/ds=1/part=0/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1b_n1/ds=1/part=0/; set hive.merge.orcfile.stripe.level=true; -- auto-merge fast way EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1c_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) +INSERT OVERWRITE TABLE orcfile_merge1c_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1c/ds=1/part=0/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1c_n1/ds=1/part=0/; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- Verify SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1 WHERE ds='1' + FROM orcfile_merge1_n1 WHERE ds='1' ) t; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1b WHERE ds='1' + FROM orcfile_merge1b_n1 WHERE ds='1' ) t; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1c WHERE ds='1' + FROM orcfile_merge1c_n1 WHERE ds='1' ) t; -select count(*) from orcfile_merge1; -select count(*) from orcfile_merge1b; -select count(*) from orcfile_merge1c; +select count(*) from orcfile_merge1_n1; +select count(*) from orcfile_merge1b_n1; +select count(*) from orcfile_merge1c_n1; -DROP TABLE orcfile_merge1; -DROP TABLE orcfile_merge1b; -DROP TABLE orcfile_merge1c; +DROP TABLE orcfile_merge1_n1; +DROP TABLE orcfile_merge1b_n1; +DROP TABLE orcfile_merge1c_n1; diff --git a/ql/src/test/queries/clientpositive/orc_merge11.q b/ql/src/test/queries/clientpositive/orc_merge11.q index 746ba21d7f..d5add84183 100644 --- a/ql/src/test/queries/clientpositive/orc_merge11.q +++ b/ql/src/test/queries/clientpositive/orc_merge11.q @@ -1,19 +1,19 @@ set hive.vectorized.execution.enabled=false; -DROP TABLE orcfile_merge1; -DROP TABLE orc_split_elim; +DROP TABLE orcfile_merge1_n2; +DROP TABLE orc_split_elim_n0; -create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; +create table orc_split_elim_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; -load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim; -load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim; +load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_n0; +load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_n0; -create table orcfile_merge1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc tblproperties("orc.compress.size"="4096"); +create table orcfile_merge1_n2 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc tblproperties("orc.compress.size"="4096"); -insert overwrite table orcfile_merge1 select * from orc_split_elim; -insert into table orcfile_merge1 select * from orc_split_elim; +insert overwrite table orcfile_merge1_n2 select * from orc_split_elim_n0; +insert into table orcfile_merge1_n2 select * from orc_split_elim_n0; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1_n2/; set hive.merge.tezfiles=true; set hive.merge.mapfiles=true; @@ -25,25 +25,25 @@ set tez.grouping.split-count=1; set hive.exec.orc.default.buffer.size=120; SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecOrcFileDump; -select * from orcfile_merge1 limit 1; +select * from orcfile_merge1_n2 limit 1; SET hive.exec.post.hooks=; -- concatenate -ALTER TABLE orcfile_merge1 CONCATENATE; +ALTER TABLE orcfile_merge1_n2 CONCATENATE; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1_n2/; -select count(*) from orc_split_elim; +select count(*) from orc_split_elim_n0; -- will have double the number of rows -select count(*) from orcfile_merge1; +select count(*) from orcfile_merge1_n2; SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecOrcFileDump; -select * from orcfile_merge1 limit 1; +select * from orcfile_merge1_n2 limit 1; SET hive.exec.post.hooks=; SET mapreduce.job.reduces=2; -INSERT OVERWRITE DIRECTORY 'output' stored as orcfile select * from orc_split_elim; +INSERT OVERWRITE DIRECTORY 'output' stored as orcfile select * from orc_split_elim_n0; -DROP TABLE orc_split_elim; -DROP TABLE orcfile_merge1; +DROP TABLE orc_split_elim_n0; +DROP TABLE orcfile_merge1_n2; diff --git a/ql/src/test/queries/clientpositive/orc_merge2.q b/ql/src/test/queries/clientpositive/orc_merge2.q index e6fdf39105..2e50a3557d 100644 --- a/ql/src/test/queries/clientpositive/orc_merge2.q +++ b/ql/src/test/queries/clientpositive/orc_merge2.q @@ -8,27 +8,27 @@ set hive.exec.dynamic.partition=true; set hive.exec.dynamic.partition.mode=nonstrict; set hive.merge.sparkfiles=true; -DROP TABLE orcfile_merge2a; +DROP TABLE orcfile_merge2a_n0; -CREATE TABLE orcfile_merge2a (key INT, value STRING) +CREATE TABLE orcfile_merge2a_n0 (key INT, value STRING) PARTITIONED BY (one string, two string, three string) STORED AS ORC; -EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three) +EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a_n0 PARTITION (one='1', two, three) SELECT key, value, PMOD(HASH(key), 10) as two, PMOD(HASH(value), 10) as three FROM src; -INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three) +INSERT OVERWRITE TABLE orcfile_merge2a_n0 PARTITION (one='1', two, three) SELECT key, value, PMOD(HASH(key), 10) as two, PMOD(HASH(value), 10) as three FROM src; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge2a/one=1/two=0/three=2/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge2a_n0/one=1/two=0/three=2/; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge2a + FROM orcfile_merge2a_n0 ) t; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; @@ -39,5 +39,5 @@ SELECT SUM(HASH(c)) FROM ( FROM src ) t; -DROP TABLE orcfile_merge2a; +DROP TABLE orcfile_merge2a_n0; diff --git a/ql/src/test/queries/clientpositive/orc_merge3.q b/ql/src/test/queries/clientpositive/orc_merge3.q index 730e4a30f6..8b7954564f 100644 --- a/ql/src/test/queries/clientpositive/orc_merge3.q +++ b/ql/src/test/queries/clientpositive/orc_merge3.q @@ -6,38 +6,38 @@ set hive.explain.user=false; set hive.merge.orcfile.stripe.level=true; set hive.merge.sparkfiles=true; -DROP TABLE orcfile_merge3a; -DROP TABLE orcfile_merge3b; +DROP TABLE orcfile_merge3a_n0; +DROP TABLE orcfile_merge3b_n0; -CREATE TABLE orcfile_merge3a (key int, value string) +CREATE TABLE orcfile_merge3a_n0 (key int, value string) PARTITIONED BY (ds string) STORED AS TEXTFILE; -CREATE TABLE orcfile_merge3b (key int, value string) STORED AS ORC; +CREATE TABLE orcfile_merge3b_n0 (key int, value string) STORED AS ORC; -INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1') +INSERT OVERWRITE TABLE orcfile_merge3a_n0 PARTITION (ds='1') SELECT * FROM src; -INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='2') +INSERT OVERWRITE TABLE orcfile_merge3a_n0 PARTITION (ds='2') SELECT * FROM src; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b - SELECT key, value FROM orcfile_merge3a; +EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b_n0 + SELECT key, value FROM orcfile_merge3a_n0; -INSERT OVERWRITE TABLE orcfile_merge3b - SELECT key, value FROM orcfile_merge3a; +INSERT OVERWRITE TABLE orcfile_merge3b_n0 + SELECT key, value FROM orcfile_merge3a_n0; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge3b/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge3b_n0/; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c) - FROM orcfile_merge3a + FROM orcfile_merge3a_n0 ) t; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c) - FROM orcfile_merge3b + FROM orcfile_merge3b_n0 ) t; -DROP TABLE orcfile_merge3a; -DROP TABLE orcfile_merge3b; +DROP TABLE orcfile_merge3a_n0; +DROP TABLE orcfile_merge3b_n0; diff --git a/ql/src/test/queries/clientpositive/orc_merge5.q b/ql/src/test/queries/clientpositive/orc_merge5.q index 810f1de23d..190c6e07c5 100644 --- a/ql/src/test/queries/clientpositive/orc_merge5.q +++ b/ql/src/test/queries/clientpositive/orc_merge5.q @@ -3,10 +3,10 @@ set hive.explain.user=false; -- SORT_QUERY_RESULTS -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; -create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; +create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; +create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; -load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5; +load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n5; SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; SET mapred.min.split.size=1000; @@ -22,13 +22,13 @@ set tez.grouping.max-size=50000; set hive.merge.sparkfiles=false; -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; -insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +explain insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13; +insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13; -- 3 files total -analyze table orc_merge5b compute statistics noscan; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/; -select * from orc_merge5b; +analyze table orc_merge5b_n0 compute statistics noscan; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b_n0/; +select * from orc_merge5b_n0; set hive.merge.orcfile.stripe.level=true; set hive.merge.tezfiles=true; @@ -37,13 +37,13 @@ set hive.merge.mapredfiles=true; set hive.merge.sparkfiles=true; -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; -insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +explain insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13; +insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13; -- 1 file after merging -analyze table orc_merge5b compute statistics noscan; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/; -select * from orc_merge5b; +analyze table orc_merge5b_n0 compute statistics noscan; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b_n0/; +select * from orc_merge5b_n0; set hive.merge.orcfile.stripe.level=false; set hive.merge.tezfiles=false; @@ -51,17 +51,17 @@ set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; set hive.merge.sparkfiles=false; -insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; -analyze table orc_merge5b compute statistics noscan; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/; -select * from orc_merge5b; +insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13; +analyze table orc_merge5b_n0 compute statistics noscan; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b_n0/; +select * from orc_merge5b_n0; set hive.merge.orcfile.stripe.level=true; -explain alter table orc_merge5b concatenate; -alter table orc_merge5b concatenate; +explain alter table orc_merge5b_n0 concatenate; +alter table orc_merge5b_n0 concatenate; -- 1 file after merging -analyze table orc_merge5b compute statistics noscan; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/; -select * from orc_merge5b; +analyze table orc_merge5b_n0 compute statistics noscan; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b_n0/; +select * from orc_merge5b_n0; diff --git a/ql/src/test/queries/clientpositive/orc_merge6.q b/ql/src/test/queries/clientpositive/orc_merge6.q index af80f47e66..fabe656759 100644 --- a/ql/src/test/queries/clientpositive/orc_merge6.q +++ b/ql/src/test/queries/clientpositive/orc_merge6.q @@ -4,10 +4,10 @@ set hive.explain.user=false; -- SORT_QUERY_RESULTS -- orc file merge tests for static partitions -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; -create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc; +create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; +create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc; -load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5; +load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n4; SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; SET mapred.min.split.size=1000; @@ -23,17 +23,17 @@ set tez.grouping.max-size=50000; set hive.merge.sparkfiles=false; -- 3 mappers -explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; -insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; -insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +explain insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13; +insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13; +insert overwrite table orc_merge5a_n1 partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13; -- 3 files total -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan; -analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/; -show partitions orc_merge5a; -select * from orc_merge5a; +analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics noscan; +analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics noscan; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2000/hour=24/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2001/hour=24/; +show partitions orc_merge5a_n1; +select * from orc_merge5a_n1; set hive.merge.orcfile.stripe.level=true; set hive.merge.tezfiles=true; @@ -42,17 +42,17 @@ set hive.merge.mapredfiles=true; set hive.merge.sparkfiles=true; -- 3 mappers -explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; -insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; -insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +explain insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13; +insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13; +insert overwrite table orc_merge5a_n1 partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13; -- 1 file after merging -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan; -analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/; -show partitions orc_merge5a; -select * from orc_merge5a; +analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics noscan; +analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics noscan; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2000/hour=24/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2001/hour=24/; +show partitions orc_merge5a_n1; +select * from orc_merge5a_n1; set hive.merge.orcfile.stripe.level=false; set hive.merge.tezfiles=false; @@ -60,25 +60,25 @@ set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; set hive.merge.sparkfiles=false; -insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; -insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan; -analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/; -show partitions orc_merge5a; -select * from orc_merge5a; +insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13; +insert overwrite table orc_merge5a_n1 partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13; +analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics noscan; +analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics noscan; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2000/hour=24/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2001/hour=24/; +show partitions orc_merge5a_n1; +select * from orc_merge5a_n1; set hive.merge.orcfile.stripe.level=true; -explain alter table orc_merge5a partition(year="2000",hour=24) concatenate; -alter table orc_merge5a partition(year="2000",hour=24) concatenate; -alter table orc_merge5a partition(year="2001",hour=24) concatenate; +explain alter table orc_merge5a_n1 partition(year="2000",hour=24) concatenate; +alter table orc_merge5a_n1 partition(year="2000",hour=24) concatenate; +alter table orc_merge5a_n1 partition(year="2001",hour=24) concatenate; -- 1 file after merging -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan; -analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/; -show partitions orc_merge5a; -select * from orc_merge5a; +analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics noscan; +analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics noscan; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2000/hour=24/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2001/hour=24/; +show partitions orc_merge5a_n1; +select * from orc_merge5a_n1; diff --git a/ql/src/test/queries/clientpositive/orc_merge7.q b/ql/src/test/queries/clientpositive/orc_merge7.q index 2acff2c715..260de83e6e 100644 --- a/ql/src/test/queries/clientpositive/orc_merge7.q +++ b/ql/src/test/queries/clientpositive/orc_merge7.q @@ -5,10 +5,10 @@ set hive.explain.user=false; -- orc merge file tests for dynamic partition case -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; -create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc; +create table orc_merge5_n2 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; +create table orc_merge5a_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc; -load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5; +load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n2; SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; SET mapred.min.split.size=1000; @@ -27,17 +27,17 @@ set hive.optimize.sort.dynamic.partition=false; set hive.merge.sparkfiles=false; -- 3 mappers -explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5; -insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5; -insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5; +explain insert overwrite table orc_merge5a_n0 partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2; +insert overwrite table orc_merge5a_n0 partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2; +insert overwrite table orc_merge5a_n0 partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2; -- 3 files total -analyze table orc_merge5a partition(st=80.0) compute statistics noscan; -analyze table orc_merge5a partition(st=0.8) compute statistics noscan; +analyze table orc_merge5a_n0 partition(st=80.0) compute statistics noscan; +analyze table orc_merge5a_n0 partition(st=0.8) compute statistics noscan; dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/; dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/; -show partitions orc_merge5a; -select * from orc_merge5a where userid<=13; +show partitions orc_merge5a_n0; +select * from orc_merge5a_n0 where userid<=13; set hive.merge.orcfile.stripe.level=true; set hive.merge.tezfiles=true; @@ -46,17 +46,17 @@ set hive.merge.mapredfiles=true; set hive.merge.sparkfiles=true; -- 3 mappers -explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5; -insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5; -insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5; +explain insert overwrite table orc_merge5a_n0 partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2; +insert overwrite table orc_merge5a_n0 partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2; +insert overwrite table orc_merge5a_n0 partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2; -- 1 file after merging -analyze table orc_merge5a partition(st=80.0) compute statistics noscan; -analyze table orc_merge5a partition(st=0.8) compute statistics noscan; +analyze table orc_merge5a_n0 partition(st=80.0) compute statistics noscan; +analyze table orc_merge5a_n0 partition(st=0.8) compute statistics noscan; dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/; dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/; -show partitions orc_merge5a; -select * from orc_merge5a where userid<=13; +show partitions orc_merge5a_n0; +select * from orc_merge5a_n0 where userid<=13; set hive.merge.orcfile.stripe.level=false; set hive.merge.tezfiles=false; @@ -64,25 +64,25 @@ set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; set hive.merge.sparkfiles=false; -insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5; -insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5; -analyze table orc_merge5a partition(st=80.0) compute statistics noscan; -analyze table orc_merge5a partition(st=0.8) compute statistics noscan; +insert overwrite table orc_merge5a_n0 partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2; +insert overwrite table orc_merge5a_n0 partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2; +analyze table orc_merge5a_n0 partition(st=80.0) compute statistics noscan; +analyze table orc_merge5a_n0 partition(st=0.8) compute statistics noscan; dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/; dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/; -show partitions orc_merge5a; -select * from orc_merge5a where userid<=13; +show partitions orc_merge5a_n0; +select * from orc_merge5a_n0 where userid<=13; set hive.merge.orcfile.stripe.level=true; -explain alter table orc_merge5a partition(st=80.0) concatenate; -alter table orc_merge5a partition(st=80.0) concatenate; -alter table orc_merge5a partition(st=0.8) concatenate; +explain alter table orc_merge5a_n0 partition(st=80.0) concatenate; +alter table orc_merge5a_n0 partition(st=80.0) concatenate; +alter table orc_merge5a_n0 partition(st=0.8) concatenate; -- 1 file after merging -analyze table orc_merge5a partition(st=80.0) compute statistics noscan; -analyze table orc_merge5a partition(st=0.8) compute statistics noscan; +analyze table orc_merge5a_n0 partition(st=80.0) compute statistics noscan; +analyze table orc_merge5a_n0 partition(st=0.8) compute statistics noscan; dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/; dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/; -show partitions orc_merge5a; -select * from orc_merge5a where userid<=13; +show partitions orc_merge5a_n0; +select * from orc_merge5a_n0 where userid<=13; diff --git a/ql/src/test/queries/clientpositive/orc_merge8.q b/ql/src/test/queries/clientpositive/orc_merge8.q index d24b2e65be..b6a4260b04 100644 --- a/ql/src/test/queries/clientpositive/orc_merge8.q +++ b/ql/src/test/queries/clientpositive/orc_merge8.q @@ -1,6 +1,6 @@ set hive.vectorized.execution.enabled=false; -create table if not exists alltypes ( +create table if not exists alltypes_n1 ( bo boolean, ti tinyint, si smallint, @@ -21,10 +21,10 @@ create table if not exists alltypes ( collection items terminated by ',' map keys terminated by ':' stored as textfile; -create table alltypes_orc like alltypes; -alter table alltypes_orc set fileformat orc; +create table alltypes_orc_n1 like alltypes_n1; +alter table alltypes_orc_n1 set fileformat orc; -load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes; +load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes_n1; SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; SET hive.optimize.index.filter=true; @@ -34,10 +34,10 @@ set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; set hive.merge.sparkfiles=false; -insert overwrite table alltypes_orc select * from alltypes; -insert into table alltypes_orc select * from alltypes; +insert overwrite table alltypes_orc_n1 select * from alltypes_n1; +insert into table alltypes_orc_n1 select * from alltypes_n1; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/alltypes_orc/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/alltypes_orc_n1/; set hive.merge.orcfile.stripe.level=true; set hive.merge.tezfiles=true; @@ -45,6 +45,6 @@ set hive.merge.mapfiles=true; set hive.merge.mapredfiles=true; set hive.merge.sparkfiles=true; -alter table alltypes_orc concatenate; +alter table alltypes_orc_n1 concatenate; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/alltypes_orc/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/alltypes_orc_n1/; diff --git a/ql/src/test/queries/clientpositive/orc_merge_diff_fs.q b/ql/src/test/queries/clientpositive/orc_merge_diff_fs.q index 9cfff44741..5d1e8c8aab 100644 --- a/ql/src/test/queries/clientpositive/orc_merge_diff_fs.q +++ b/ql/src/test/queries/clientpositive/orc_merge_diff_fs.q @@ -22,28 +22,28 @@ set hive.metastore.warehouse.dir=pfile://${system:test.tmp.dir}/orc_merge_diff_f -- SORT_QUERY_RESULTS -DROP TABLE orcfile_merge1; -DROP TABLE orcfile_merge1b; -DROP TABLE orcfile_merge1c; +DROP TABLE orcfile_merge1_n0; +DROP TABLE orcfile_merge1b_n0; +DROP TABLE orcfile_merge1c_n0; -CREATE TABLE orcfile_merge1 (key INT, value STRING) +CREATE TABLE orcfile_merge1_n0 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC; -CREATE TABLE orcfile_merge1b (key INT, value STRING) +CREATE TABLE orcfile_merge1b_n0 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC; -CREATE TABLE orcfile_merge1c (key INT, value STRING) +CREATE TABLE orcfile_merge1c_n0 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC; -- merge disabled EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) +INSERT OVERWRITE TABLE orcfile_merge1_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/ds=1/part=0/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1_n0/ds=1/part=0/; set hive.merge.tezfiles=true; set hive.merge.mapfiles=true; @@ -51,50 +51,50 @@ set hive.merge.mapredfiles=true; set hive.merge.sparkfiles=true; -- auto-merge slow way EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1b_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) +INSERT OVERWRITE TABLE orcfile_merge1b_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1b/ds=1/part=0/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1b_n0/ds=1/part=0/; set hive.merge.orcfile.stripe.level=true; -- auto-merge fast way EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1c_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) +INSERT OVERWRITE TABLE orcfile_merge1c_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1c/ds=1/part=0/; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1c_n0/ds=1/part=0/; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- Verify SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1 WHERE ds='1' + FROM orcfile_merge1_n0 WHERE ds='1' ) t; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1b WHERE ds='1' + FROM orcfile_merge1b_n0 WHERE ds='1' ) t; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1c WHERE ds='1' + FROM orcfile_merge1c_n0 WHERE ds='1' ) t; -select count(*) from orcfile_merge1; -select count(*) from orcfile_merge1b; -select count(*) from orcfile_merge1c; +select count(*) from orcfile_merge1_n0; +select count(*) from orcfile_merge1b_n0; +select count(*) from orcfile_merge1c_n0; -DROP TABLE orcfile_merge1; -DROP TABLE orcfile_merge1b; -DROP TABLE orcfile_merge1c; +DROP TABLE orcfile_merge1_n0; +DROP TABLE orcfile_merge1b_n0; +DROP TABLE orcfile_merge1c_n0; diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat1.q b/ql/src/test/queries/clientpositive/orc_merge_incompat1.q index 60458d0a93..aba4617ca6 100644 --- a/ql/src/test/queries/clientpositive/orc_merge_incompat1.q +++ b/ql/src/test/queries/clientpositive/orc_merge_incompat1.q @@ -3,10 +3,10 @@ set hive.explain.user=false; -- SORT_QUERY_RESULTS -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; +create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; -load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5; +load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n3; SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.merge.orcfile.stripe.level=false; @@ -15,15 +15,15 @@ set hive.merge.mapredfiles=false; set hive.merge.sparkfiles=false; -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13; set hive.exec.orc.write.format=0.12; -insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; -insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; -insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13; +insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13; +insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13; set hive.exec.orc.write.format=0.11; -insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; -insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; -insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13; +insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13; +insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13; -- 5 files total analyze table orc_merge5b compute statistics noscan; diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat_schema.q b/ql/src/test/queries/clientpositive/orc_merge_incompat_schema.q index 2396194daa..17ccb00533 100644 --- a/ql/src/test/queries/clientpositive/orc_merge_incompat_schema.q +++ b/ql/src/test/queries/clientpositive/orc_merge_incompat_schema.q @@ -2,7 +2,7 @@ SET hive.vectorized.execution.enabled=false; set hive.metastore.disallow.incompatible.col.type.changes=false; -CREATE TABLE orc_create_staging ( +CREATE TABLE orc_create_staging_n2 ( str STRING, mp MAP, lst ARRAY, @@ -12,9 +12,9 @@ CREATE TABLE orc_create_staging ( COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'; -LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging; +LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging_n2; -CREATE TABLE orc_create_complex ( +CREATE TABLE orc_create_complex_n2 ( str STRING, mp MAP, lst ARRAY, @@ -22,28 +22,28 @@ CREATE TABLE orc_create_complex ( val INT ) STORED AS ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="1000", "orc.compress.size"="10000"); -INSERT OVERWRITE TABLE orc_create_complex SELECT str,mp,lst,strct,0 FROM orc_create_staging; -INSERT INTO TABLE orc_create_complex SELECT str,mp,lst,strct,0 FROM orc_create_staging; +INSERT OVERWRITE TABLE orc_create_complex_n2 SELECT str,mp,lst,strct,0 FROM orc_create_staging_n2; +INSERT INTO TABLE orc_create_complex_n2 SELECT str,mp,lst,strct,0 FROM orc_create_staging_n2; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex/; -select sum(hash(*)) from orc_create_complex; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex_n2/; +select sum(hash(*)) from orc_create_complex_n2; -- will be merged as the schema is the same -ALTER TABLE orc_create_complex CONCATENATE; +ALTER TABLE orc_create_complex_n2 CONCATENATE; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex/; -select sum(hash(*)) from orc_create_complex; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex_n2/; +select sum(hash(*)) from orc_create_complex_n2; -ALTER TABLE orc_create_complex +ALTER TABLE orc_create_complex_n2 CHANGE COLUMN strct strct STRUCT; -INSERT INTO TABLE orc_create_complex SELECT str,mp,lst,NAMED_STRUCT('A',strct.A,'B',strct.B,'C','c'),0 FROM orc_create_staging; +INSERT INTO TABLE orc_create_complex_n2 SELECT str,mp,lst,NAMED_STRUCT('A',strct.A,'B',strct.B,'C','c'),0 FROM orc_create_staging_n2; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex/; -select sum(hash(*)) from orc_create_complex; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex_n2/; +select sum(hash(*)) from orc_create_complex_n2; -- schema is different for both files, will not be merged -ALTER TABLE orc_create_complex CONCATENATE; +ALTER TABLE orc_create_complex_n2 CONCATENATE; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex/; -select sum(hash(*)) from orc_create_complex; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex_n2/; +select sum(hash(*)) from orc_create_complex_n2; diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat_writer_version.q b/ql/src/test/queries/clientpositive/orc_merge_incompat_writer_version.q index 139098cc90..04bb978d85 100644 --- a/ql/src/test/queries/clientpositive/orc_merge_incompat_writer_version.q +++ b/ql/src/test/queries/clientpositive/orc_merge_incompat_writer_version.q @@ -2,8 +2,8 @@ set hive.vectorized.execution.enabled=false; -DROP TABLE part_orc; -CREATE TABLE part_orc( +DROP TABLE part_orc_n0; +CREATE TABLE part_orc_n0( p_partkey int, p_name string, p_mfgr string, @@ -17,18 +17,18 @@ CREATE TABLE part_orc( STORED AS ORC; -- writer version for this file is HIVE_13083 -LOAD DATA LOCAL INPATH '../../data/files/part.orc' OVERWRITE INTO TABLE part_orc; +LOAD DATA LOCAL INPATH '../../data/files/part.orc' OVERWRITE INTO TABLE part_orc_n0; -create table part_orc_staging as select * from part_orc; +create table part_orc_staging as select * from part_orc_n0; -- will be written with current writer version -insert into table part_orc select * from part_orc_staging; +insert into table part_orc_n0 select * from part_orc_staging; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/part_orc/; -select sum(hash(*)) from part_orc; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/part_orc_n0/; +select sum(hash(*)) from part_orc_n0; -- will not be merged as writer version is not matching -ALTER TABLE part_orc CONCATENATE; +ALTER TABLE part_orc_n0 CONCATENATE; -dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/part_orc/; -select sum(hash(*)) from part_orc; +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/part_orc_n0/; +select sum(hash(*)) from part_orc_n0; diff --git a/ql/src/test/queries/clientpositive/orc_min_max.q b/ql/src/test/queries/clientpositive/orc_min_max.q index 58a35d0350..18a21b0520 100644 --- a/ql/src/test/queries/clientpositive/orc_min_max.q +++ b/ql/src/test/queries/clientpositive/orc_min_max.q @@ -1,6 +1,6 @@ set hive.vectorized.execution.enabled=false; -create table if not exists alltypes ( +create table if not exists alltypes_n2 ( bo boolean, ti tinyint, si smallint, @@ -21,14 +21,14 @@ create table if not exists alltypes ( collection items terminated by ',' map keys terminated by ':' stored as textfile; -create table alltypes_orc like alltypes; -alter table alltypes_orc set fileformat orc; +create table alltypes_orc_n3 like alltypes_n2; +alter table alltypes_orc_n3 set fileformat orc; -load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes; +load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes_n2; -insert overwrite table alltypes_orc select * from alltypes; +insert overwrite table alltypes_orc_n3 select * from alltypes_n2; -select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) from alltypes; +select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) from alltypes_n2; -select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) from alltypes_orc; +select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) from alltypes_orc_n3; diff --git a/ql/src/test/queries/clientpositive/orc_nested_column_pruning.q b/ql/src/test/queries/clientpositive/orc_nested_column_pruning.q index 700fdd4e7e..91dc873a1b 100644 --- a/ql/src/test/queries/clientpositive/orc_nested_column_pruning.q +++ b/ql/src/test/queries/clientpositive/orc_nested_column_pruning.q @@ -4,12 +4,12 @@ set hive.exec.dynamic.partition.mode = nonstrict; set hive.strict.checks.cartesian.product=false; -- First, create source tables -DROP TABLE IF EXISTS dummy; -CREATE TABLE dummy (i int); -INSERT INTO TABLE dummy VALUES (42); +DROP TABLE IF EXISTS dummy_n4; +CREATE TABLE dummy_n4 (i int); +INSERT INTO TABLE dummy_n4 VALUES (42); -DROP TABLE IF EXISTS nested_tbl_1; -CREATE TABLE nested_tbl_1 ( +DROP TABLE IF EXISTS nested_tbl_1_n0; +CREATE TABLE nested_tbl_1_n0 ( a int, s1 struct, f6: int>, s2 struct, f11: map>>, @@ -19,7 +19,7 @@ CREATE TABLE nested_tbl_1 ( s6 map>>>> ) STORED AS ORC; -INSERT INTO TABLE nested_tbl_1 SELECT +INSERT INTO TABLE nested_tbl_1_n0 SELECT 1, named_struct('f1', false, 'f2', 'foo', 'f3', named_struct('f4', 4, 'f5', cast(5.0 as double)), 'f6', 4), named_struct('f7', 'f7', 'f8', named_struct('f9', true, 'f10', array(10, 11), 'f11', map('key1', true, 'key2', false))), named_struct('f12', array(named_struct('f13', 'foo', 'f14', 14), named_struct('f13', 'bar', 'f14', 28))), @@ -27,12 +27,12 @@ INSERT INTO TABLE nested_tbl_1 SELECT named_struct('f16', array(named_struct('f17', 'foo', 'f18', named_struct('f19', 14)), named_struct('f17', 'bar', 'f18', named_struct('f19', 28)))), map('key1', named_struct('f20', array(named_struct('f21', named_struct('f22', 1)))), 'key2', named_struct('f20', array(named_struct('f21', named_struct('f22', 2))))) -FROM dummy; +FROM dummy_n4; -DROP TABLE IF EXISTS nested_tbl_2; -CREATE TABLE nested_tbl_2 LIKE nested_tbl_1; +DROP TABLE IF EXISTS nested_tbl_2_n0; +CREATE TABLE nested_tbl_2_n0 LIKE nested_tbl_1_n0; -INSERT INTO TABLE nested_tbl_2 SELECT +INSERT INTO TABLE nested_tbl_2_n0 SELECT 2, named_struct('f1', true, 'f2', 'bar', 'f3', named_struct('f4', 4, 'f5', cast(6.5 as double)), 'f6', 4), named_struct('f7', 'f72', 'f8', named_struct('f9', false, 'f10', array(20, 22), 'f11', map('key3', true, 'key4', false))), named_struct('f12', array(named_struct('f13', 'bar', 'f14', 28), named_struct('f13', 'foo', 'f14', 56))), @@ -40,175 +40,175 @@ INSERT INTO TABLE nested_tbl_2 SELECT named_struct('f16', array(named_struct('f17', 'bar', 'f18', named_struct('f19', 28)), named_struct('f17', 'foo', 'f18', named_struct('f19', 56)))), map('key3', named_struct('f20', array(named_struct('f21', named_struct('f22', 3)))), 'key4', named_struct('f20', array(named_struct('f21', named_struct('f22', 4))))) -FROM dummy; +FROM dummy_n4; -- Testing only select statements -EXPLAIN SELECT a FROM nested_tbl_1; -SELECT a FROM nested_tbl_1; +EXPLAIN SELECT a FROM nested_tbl_1_n0; +SELECT a FROM nested_tbl_1_n0; -EXPLAIN SELECT s1.f1 FROM nested_tbl_1; -SELECT s1.f1 FROM nested_tbl_1; +EXPLAIN SELECT s1.f1 FROM nested_tbl_1_n0; +SELECT s1.f1 FROM nested_tbl_1_n0; -EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1; -SELECT s1.f1, s1.f2 FROM nested_tbl_1; +EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1_n0; +SELECT s1.f1, s1.f2 FROM nested_tbl_1_n0; -- In this case 's1.f3' and 's1.f3.f4' should be merged -EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1; -SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1; +EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n0; +SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n0; -- Testing select array and index shifting -EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1; -SELECT s1.f3.f5 FROM nested_tbl_1; +EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n0; +SELECT s1.f3.f5 FROM nested_tbl_1_n0; -- Testing select from multiple structs -EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1; -SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1; +EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n0; +SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n0; -- Testing select with filter -EXPLAIN SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE; -SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE; +EXPLAIN SELECT s1.f2 FROM nested_tbl_1_n0 WHERE s1.f1 = FALSE; +SELECT s1.f2 FROM nested_tbl_1_n0 WHERE s1.f1 = FALSE; -EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4; -SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4; +EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n0 WHERE s1.f3.f4 = 4; +SELECT s1.f3.f5 FROM nested_tbl_1_n0 WHERE s1.f3.f4 = 4; -EXPLAIN SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE; -SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE; +EXPLAIN SELECT s2.f8 FROM nested_tbl_1_n0 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE; +SELECT s2.f8 FROM nested_tbl_1_n0 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE; -- Testing lateral view -EXPLAIN SELECT col1, col2 FROM nested_tbl_1 +EXPLAIN SELECT col1, col2 FROM nested_tbl_1_n0 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1 LATERAL VIEW explode(s3.f12) tbl2 AS col2; -SELECT col1, col2 FROM nested_tbl_1 +SELECT col1, col2 FROM nested_tbl_1_n0 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1 LATERAL VIEW explode(s3.f12) tbl2 AS col2; -- Testing UDFs -EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1; -SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1; +EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n0; +SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n0; -- Testing aggregations -EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5; -SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5; +EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3.f5; +SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3.f5; -EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3; -SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3; +EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3; +SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3; -EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3; -SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3; +EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3 ORDER BY s1.f3; +SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3 ORDER BY s1.f3; -- Testing joins EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2 +FROM nested_tbl_1_n0 t1 JOIN nested_tbl_2_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == FALSE; SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2 +FROM nested_tbl_1_n0 t1 JOIN nested_tbl_2_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == FALSE; EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == TRUE; SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == TRUE; EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE; SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE; EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f1 <> t2.s2.f8.f9; SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f1 <> t2.s2.f8.f9; EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9; SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9; -- Testing insert with aliases -DROP TABLE IF EXISTS nested_tbl_3; -CREATE TABLE nested_tbl_3 (f1 boolean, f2 string) PARTITIONED BY (f3 int) STORED AS ORC; +DROP TABLE IF EXISTS nested_tbl_3_n0; +CREATE TABLE nested_tbl_3_n0 (f1 boolean, f2 string) PARTITIONED BY (f3 int) STORED AS ORC; -INSERT OVERWRITE TABLE nested_tbl_3 PARTITION(f3) +INSERT OVERWRITE TABLE nested_tbl_3_n0 PARTITION(f3) SELECT s1.f1 AS f1, S1.f2 AS f2, s1.f6 AS f3 -FROM nested_tbl_1; +FROM nested_tbl_1_n0; -SELECT * FROM nested_tbl_3; +SELECT * FROM nested_tbl_3_n0; -- Testing select struct field from elements in array or map EXPLAIN SELECT count(s1.f6), s3.f12[0].f14 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s3.f12[0].f14; SELECT count(s1.f6), s3.f12[0].f14 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s3.f12[0].f14; EXPLAIN SELECT count(s1.f6), s4['key1'].f15 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s4['key1'].f15; SELECT count(s1.f6), s4['key1'].f15 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s4['key1'].f15; EXPLAIN SELECT count(s1.f6), s5.f16[0].f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s5.f16[0].f18.f19; SELECT count(s1.f6), s5.f16[0].f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s5.f16[0].f18.f19; EXPLAIN SELECT count(s1.f6), s5.f16.f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s5.f16.f18.f19; SELECT count(s1.f6), s5.f16.f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s5.f16.f18.f19; EXPLAIN SELECT count(s1.f6), s6['key1'].f20[0].f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s6['key1'].f20[0].f21.f22; SELECT count(s1.f6), s6['key1'].f20[0].f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s6['key1'].f20[0].f21.f22; EXPLAIN SELECT count(s1.f6), s6['key1'].f20.f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s6['key1'].f20.f21.f22; SELECT count(s1.f6), s6['key1'].f20.f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s6['key1'].f20.f21.f22; diff --git a/ql/src/test/queries/clientpositive/orc_ppd_basic.q b/ql/src/test/queries/clientpositive/orc_ppd_basic.q index fb2efcee2f..f0b0b96928 100644 --- a/ql/src/test/queries/clientpositive/orc_ppd_basic.q +++ b/ql/src/test/queries/clientpositive/orc_ppd_basic.q @@ -8,7 +8,7 @@ SET hive.cbo.enable=false; SET hive.map.aggr=false; -- disabling map side aggregation as that can lead to different intermediate record counts -CREATE TABLE staging(t tinyint, +CREATE TABLE staging_n7(t tinyint, si smallint, i int, b bigint, @@ -22,10 +22,10 @@ CREATE TABLE staging(t tinyint, ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging; -LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging; +LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging_n7; +LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging_n7; -CREATE TABLE orc_ppd_staging(t tinyint, +CREATE TABLE orc_ppd_staging_n1(t tinyint, si smallint, i int, b bigint, @@ -40,14 +40,14 @@ CREATE TABLE orc_ppd_staging(t tinyint, bin binary) STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*"); -insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging order by t, s; +insert overwrite table orc_ppd_staging_n1 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging_n7 order by t, s; -- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values -- which makes it hard to test bloom filters -insert into orc_ppd_staging select -10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11",-71.54,"aaa" from staging limit 1; -insert into orc_ppd_staging select 127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11",71.54,"zzz" from staging limit 1; +insert into orc_ppd_staging_n1 select -10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11",-71.54,"aaa" from staging_n7 limit 1; +insert into orc_ppd_staging_n1 select 127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11",71.54,"zzz" from staging_n7 limit 1; -CREATE TABLE orc_ppd(t tinyint, +CREATE TABLE orc_ppd_n2(t tinyint, si smallint, i int, b bigint, @@ -62,7 +62,7 @@ CREATE TABLE orc_ppd(t tinyint, bin binary) STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*"); -insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging order by t, s; +insert overwrite table orc_ppd_n2 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n1 order by t, s; SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter; @@ -72,50 +72,50 @@ SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrint -- Entry 2: count: 100 hasNull: false min: 118 max: 127 sum: 12151 positions: 0,4,119,0,0,244,19 -- INPUT_RECORDS: 2100 (all row groups) -select count(*) from orc_ppd; +select count(*) from orc_ppd_n2; -- INPUT_RECORDS: 0 (no row groups) -select count(*) from orc_ppd where t > 127; +select count(*) from orc_ppd_n2 where t > 127; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = 55; -select count(*) from orc_ppd where t <=> 50; -select count(*) from orc_ppd where t <=> 100; +select count(*) from orc_ppd_n2 where t = 55; +select count(*) from orc_ppd_n2 where t <=> 50; +select count(*) from orc_ppd_n2 where t <=> 100; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t = "54"; +select count(*) from orc_ppd_n2 where t = "54"; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = -10.0; +select count(*) from orc_ppd_n2 where t = -10.0; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = cast(53 as float); -select count(*) from orc_ppd where t = cast(53 as double); +select count(*) from orc_ppd_n2 where t = cast(53 as float); +select count(*) from orc_ppd_n2 where t = cast(53 as double); -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t < 100; +select count(*) from orc_ppd_n2 where t < 100; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t < 100 and t > 98; +select count(*) from orc_ppd_n2 where t < 100 and t > 98; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t <= 100; +select count(*) from orc_ppd_n2 where t <= 100; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t is null; +select count(*) from orc_ppd_n2 where t is null; -- INPUT_RECORDS: 1100 (2 row groups) -select count(*) from orc_ppd where t in (5, 120); +select count(*) from orc_ppd_n2 where t in (5, 120); -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t between 60 and 80; +select count(*) from orc_ppd_n2 where t between 60 and 80; -- bloom filter tests -- INPUT_RECORDS: 0 -select count(*) from orc_ppd where t = -100; -select count(*) from orc_ppd where t <=> -100; -select count(*) from orc_ppd where t = 125; -select count(*) from orc_ppd where t IN (-100, 125, 200); +select count(*) from orc_ppd_n2 where t = -100; +select count(*) from orc_ppd_n2 where t <=> -100; +select count(*) from orc_ppd_n2 where t = 125; +select count(*) from orc_ppd_n2 where t IN (-100, 125, 200); -- Row group statistics for column s: -- Entry 0: count: 1000 hasNull: false min: max: zach young sum: 12907 positions: 0,0,0 @@ -123,79 +123,79 @@ select count(*) from orc_ppd where t IN (-100, 125, 200); -- Entry 2: count: 100 hasNull: false min: bob davidson max: zzz sum: 1281 positions: 0,3246,373 -- INPUT_RECORDS: 0 (no row groups) -select count(*) from orc_ppd where s > "zzz"; +select count(*) from orc_ppd_n2 where s > "zzz"; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where s = "zach young"; -select count(*) from orc_ppd where s <=> "zach zipper"; -select count(*) from orc_ppd where s <=> ""; +select count(*) from orc_ppd_n2 where s = "zach young"; +select count(*) from orc_ppd_n2 where s <=> "zach zipper"; +select count(*) from orc_ppd_n2 where s <=> ""; -- INPUT_RECORDS: 0 -select count(*) from orc_ppd where s is null; +select count(*) from orc_ppd_n2 where s is null; -- INPUT_RECORDS: 2100 -select count(*) from orc_ppd where s is not null; +select count(*) from orc_ppd_n2 where s is not null; -- INPUT_RECORDS: 0 -select count(*) from orc_ppd where s = cast("zach young" as char(50)); +select count(*) from orc_ppd_n2 where s = cast("zach young" as char(50)); -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where s = cast("zach young" as char(10)); -select count(*) from orc_ppd where s = cast("zach young" as varchar(10)); -select count(*) from orc_ppd where s = cast("zach young" as varchar(50)); +select count(*) from orc_ppd_n2 where s = cast("zach young" as char(10)); +select count(*) from orc_ppd_n2 where s = cast("zach young" as varchar(10)); +select count(*) from orc_ppd_n2 where s = cast("zach young" as varchar(50)); -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where s < "b"; +select count(*) from orc_ppd_n2 where s < "b"; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where s > "alice" and s < "bob"; +select count(*) from orc_ppd_n2 where s > "alice" and s < "bob"; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where s in ("alice allen", ""); +select count(*) from orc_ppd_n2 where s in ("alice allen", ""); -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where s between "" and "alice allen"; +select count(*) from orc_ppd_n2 where s between "" and "alice allen"; -- INPUT_RECORDS: 100 (1 row group) -select count(*) from orc_ppd where s between "zz" and "zzz"; +select count(*) from orc_ppd_n2 where s between "zz" and "zzz"; -- INPUT_RECORDS: 1100 (2 row groups) -select count(*) from orc_ppd where s between "zach zipper" and "zzz"; +select count(*) from orc_ppd_n2 where s between "zach zipper" and "zzz"; -- bloom filter tests -- INPUT_RECORDS: 0 -select count(*) from orc_ppd where s = "hello world"; -select count(*) from orc_ppd where s <=> "apache hive"; -select count(*) from orc_ppd where s IN ("a", "z"); +select count(*) from orc_ppd_n2 where s = "hello world"; +select count(*) from orc_ppd_n2 where s <=> "apache hive"; +select count(*) from orc_ppd_n2 where s IN ("a", "z"); -- INPUT_RECORDS: 100 -select count(*) from orc_ppd where s = "sarah ovid"; +select count(*) from orc_ppd_n2 where s = "sarah ovid"; -- INPUT_RECORDS: 1100 -select count(*) from orc_ppd where s = "wendy king"; +select count(*) from orc_ppd_n2 where s = "wendy king"; -- INPUT_RECORDS: 1000 -select count(*) from orc_ppd where s = "wendy king" and t < 0; +select count(*) from orc_ppd_n2 where s = "wendy king" and t < 0; -- INPUT_RECORDS: 100 -select count(*) from orc_ppd where s = "wendy king" and t > 100; +select count(*) from orc_ppd_n2 where s = "wendy king" and t > 100; set hive.cbo.enable=false; set hive.optimize.index.filter=false; -- when cbo is disabled constant gets converted to HiveDecimal -- 74.72f + 0.0 = 74.72000122070312 -select count(*) from orc_ppd where f=74.72; +select count(*) from orc_ppd_n2 where f=74.72; set hive.optimize.index.filter=true; -select count(*) from orc_ppd where f=74.72; +select count(*) from orc_ppd_n2 where f=74.72; set hive.cbo.enable=true; set hive.optimize.index.filter=false; -select count(*) from orc_ppd where f=74.72; +select count(*) from orc_ppd_n2 where f=74.72; set hive.optimize.index.filter=true; -select count(*) from orc_ppd where f=74.72; +select count(*) from orc_ppd_n2 where f=74.72; -- 42.47f + 0.0 == 42.470001220703125 -create temporary table orc_ppd_1 stored as orc as select * from orc_ppd_staging where d = 42.47; +create temporary table orc_ppd_1 stored as orc as select * from orc_ppd_staging_n1 where d = 42.47; set hive.cbo.enable=false; set hive.optimize.index.filter=false; @@ -256,7 +256,7 @@ drop table if exists tmp_orcppd; create temporary table tmp_orcppd stored as orc as select ts, cast(ts as date) - from staging ; + from staging_n7 ; insert into table tmp_orcppd values(null, null); diff --git a/ql/src/test/queries/clientpositive/orc_ppd_boolean.q b/ql/src/test/queries/clientpositive/orc_ppd_boolean.q index 7cec204cb0..4d4f56ddb0 100644 --- a/ql/src/test/queries/clientpositive/orc_ppd_boolean.q +++ b/ql/src/test/queries/clientpositive/orc_ppd_boolean.q @@ -6,33 +6,33 @@ SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; SET mapred.min.split.size=1000; SET mapred.max.split.size=5000; -create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), b boolean) stored as orc tblproperties("orc.stripe.size"="16777216"); +create table newtypesorc_n0(c char(10), v varchar(10), d decimal(5,3), b boolean) stored as orc tblproperties("orc.stripe.size"="16777216"); -insert overwrite table newtypesorc select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, true from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, false from src src2) uniontbl; +insert overwrite table newtypesorc_n0 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, true from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, false from src src2) uniontbl; set hive.optimize.index.filter=false; -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select sum(hash(*)) from newtypesorc where b=true; +select sum(hash(*)) from newtypesorc_n0 where b=true; set hive.optimize.index.filter=true; -select sum(hash(*)) from newtypesorc where b=false; +select sum(hash(*)) from newtypesorc_n0 where b=false; set hive.optimize.index.filter=false; -select sum(hash(*)) from newtypesorc where b!=true; +select sum(hash(*)) from newtypesorc_n0 where b!=true; set hive.optimize.index.filter=true; -select sum(hash(*)) from newtypesorc where b!=false; +select sum(hash(*)) from newtypesorc_n0 where b!=false; set hive.optimize.index.filter=false; -select sum(hash(*)) from newtypesorc where b 0; SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter; diff --git a/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_2a.q b/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_2a.q index 0f48730d87..cacbff3e8e 100644 --- a/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_2a.q +++ b/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_2a.q @@ -9,7 +9,7 @@ SET hive.map.aggr=false; -create table unique_1( +create table unique_1_n2( i int, d string, s string) @@ -17,30 +17,30 @@ row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/unique_1.txt' into table unique_1; +load data local inpath '../../data/files/unique_1.txt' into table unique_1_n2; -create table test1 stored as orc as select * from unique_1 order by d; +create table test1_n13 stored as orc as select * from unique_1_n2 order by d; SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter; -alter table test1 change column d d double; +alter table test1_n13 change column d d double; set hive.optimize.ppd=false; set hive.optimize.index.filter=false; set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -select s from test1 where d = -4996703.42; +select s from test1_n13 where d = -4996703.42; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select s from test1 where d = -4996703.42; +select s from test1_n13 where d = -4996703.42; set hive.optimize.ppd=true; set hive.optimize.index.filter=true; set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -select s from test1 where d = -4996703.42; +select s from test1_n13 where d = -4996703.42; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select s from test1 where d = -4996703.42; +select s from test1_n13 where d = -4996703.42; diff --git a/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_2b.q b/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_2b.q index b64471e68e..7fd8dee75b 100644 --- a/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_2b.q +++ b/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_2b.q @@ -8,7 +8,7 @@ SET hive.map.aggr=false; -- disabling map side aggregation as that can lead to different intermediate record counts -create table unique_1( +create table unique_1_n1( i int, d string, s string) @@ -16,9 +16,9 @@ row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/unique_1.txt' into table unique_1; +load data local inpath '../../data/files/unique_1.txt' into table unique_1_n1; -create table unique_2( +create table unique_2_n0( i int, d string, s string) @@ -26,40 +26,40 @@ row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/unique_2.txt' into table unique_2; +load data local inpath '../../data/files/unique_2.txt' into table unique_2_n0; -create table test_two_files( +create table test_two_files_n0( i int, d string, s string) stored as orc; -insert into table test_two_files select * from unique_1 where cast(d as double) <= 0 order by cast(d as double); -insert into table test_two_files select * from unique_2 where cast(d as double) > 0 order by cast(d as double); +insert into table test_two_files_n0 select * from unique_1_n1 where cast(d as double) <= 0 order by cast(d as double); +insert into table test_two_files_n0 select * from unique_2_n0 where cast(d as double) > 0 order by cast(d as double); SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter; -alter table test_two_files change column d d double; +alter table test_two_files_n0 change column d d double; set hive.optimize.ppd=false; set hive.optimize.index.filter=false; set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -select s from test_two_files where d = -4996703.42; +select s from test_two_files_n0 where d = -4996703.42; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select s from test_two_files where d = -4996703.42; +select s from test_two_files_n0 where d = -4996703.42; set hive.optimize.ppd=true; set hive.optimize.index.filter=true; set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -select s from test_two_files where d = -4996703.42; +select s from test_two_files_n0 where d = -4996703.42; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select s from test_two_files where d = -4996703.42; +select s from test_two_files_n0 where d = -4996703.42; diff --git a/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_3a.q b/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_3a.q index e4a9268ba6..4235c2c211 100644 --- a/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_3a.q +++ b/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_3a.q @@ -5,7 +5,7 @@ SET hive.cbo.enable=false; SET hive.map.aggr=false; -- disabling map side aggregation as that can lead to different intermediate record counts -CREATE TABLE staging(t tinyint, +CREATE TABLE staging_n8(t tinyint, si smallint, i int, b bigint, @@ -19,10 +19,10 @@ CREATE TABLE staging(t tinyint, ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging; -LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging; +LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging_n8; +LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging_n8; -CREATE TABLE orc_ppd_staging(t tinyint, +CREATE TABLE orc_ppd_staging_n2(t tinyint, si smallint, i int, b bigint, @@ -37,14 +37,14 @@ CREATE TABLE orc_ppd_staging(t tinyint, bin binary) STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*"); -insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging order by t, s; +insert overwrite table orc_ppd_staging_n2 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging_n8 order by t, s; -- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values -- which makes it hard to test bloom filters -insert into orc_ppd_staging select -10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11",-71.54,"aaa" from staging limit 1; -insert into orc_ppd_staging select 127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11",71.54,"zzz" from staging limit 1; +insert into orc_ppd_staging_n2 select -10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11",-71.54,"aaa" from staging_n8 limit 1; +insert into orc_ppd_staging_n2 select 127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11",71.54,"zzz" from staging_n8 limit 1; -CREATE TABLE orc_ppd(t tinyint, +CREATE TABLE orc_ppd_n3(t tinyint, si smallint, i int, b bigint, @@ -59,7 +59,7 @@ CREATE TABLE orc_ppd(t tinyint, bin binary) STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*"); -insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging order by t, s; +insert overwrite table orc_ppd_n3 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n2 order by t, s; SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter; SET hive.optimize.index.filter=false; @@ -70,177 +70,177 @@ SET hive.optimize.index.filter=false; -- Entry 2: count: 100 hasNull: false min: 118 max: 127 sum: 12151 positions: 0,4,119,0,0,244,19 -- INPUT_RECORDS: 0 (no row groups) -select count(*) from orc_ppd where t > 127; +select count(*) from orc_ppd_n3 where t > 127; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 0 (no row groups) -select count(*) from orc_ppd where t > 127; +select count(*) from orc_ppd_n3 where t > 127; SET hive.optimize.index.filter=false; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = 55; +select count(*) from orc_ppd_n3 where t = 55; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = 55; +select count(*) from orc_ppd_n3 where t = 55; SET hive.optimize.index.filter=false; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t = 54; +select count(*) from orc_ppd_n3 where t = 54; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t = 54; +select count(*) from orc_ppd_n3 where t = 54; -alter table orc_ppd change column t t smallint; +alter table orc_ppd_n3 change column t t smallint; SET hive.optimize.index.filter=false; -- INPUT_RECORDS: 0 (no row groups) -select count(*) from orc_ppd where t > 127; +select count(*) from orc_ppd_n3 where t > 127; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 0 (no row groups) -select count(*) from orc_ppd where t > 127; +select count(*) from orc_ppd_n3 where t > 127; SET hive.optimize.index.filter=false; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = 55; +select count(*) from orc_ppd_n3 where t = 55; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = 55; +select count(*) from orc_ppd_n3 where t = 55; SET hive.optimize.index.filter=false; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t = 54; +select count(*) from orc_ppd_n3 where t = 54; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t = 54; +select count(*) from orc_ppd_n3 where t = 54; -alter table orc_ppd change column t t int; +alter table orc_ppd_n3 change column t t int; SET hive.optimize.index.filter=false; -- INPUT_RECORDS: 0 (no row groups) -select count(*) from orc_ppd where t > 127; +select count(*) from orc_ppd_n3 where t > 127; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 0 (no row groups) -select count(*) from orc_ppd where t > 127; +select count(*) from orc_ppd_n3 where t > 127; SET hive.optimize.index.filter=false; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = 55; +select count(*) from orc_ppd_n3 where t = 55; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = 55; +select count(*) from orc_ppd_n3 where t = 55; SET hive.optimize.index.filter=false; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t = 54; +select count(*) from orc_ppd_n3 where t = 54; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t = 54; +select count(*) from orc_ppd_n3 where t = 54; -alter table orc_ppd change column t t bigint; +alter table orc_ppd_n3 change column t t bigint; SET hive.optimize.index.filter=false; -- INPUT_RECORDS: 0 (no row groups) -select count(*) from orc_ppd where t > 127; +select count(*) from orc_ppd_n3 where t > 127; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 0 (no row groups) -select count(*) from orc_ppd where t > 127; +select count(*) from orc_ppd_n3 where t > 127; SET hive.optimize.index.filter=false; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = 55; +select count(*) from orc_ppd_n3 where t = 55; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = 55; +select count(*) from orc_ppd_n3 where t = 55; SET hive.optimize.index.filter=false; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t = 54; +select count(*) from orc_ppd_n3 where t = 54; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t = 54; +select count(*) from orc_ppd_n3 where t = 54; -alter table orc_ppd change column t t string; +alter table orc_ppd_n3 change column t t string; SET hive.optimize.index.filter=false; -- INPUT_RECORDS: 0 (no row groups) -select count(*) from orc_ppd where t > '127'; +select count(*) from orc_ppd_n3 where t > '127'; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 0 (no row groups) -select count(*) from orc_ppd where t > '127'; +select count(*) from orc_ppd_n3 where t > '127'; SET hive.optimize.index.filter=false; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = '55'; +select count(*) from orc_ppd_n3 where t = '55'; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 1000 (1 row group) -select count(*) from orc_ppd where t = '55'; +select count(*) from orc_ppd_n3 where t = '55'; SET hive.optimize.index.filter=false; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t = '54'; +select count(*) from orc_ppd_n3 where t = '54'; SET hive.optimize.index.filter=true; -- INPUT_RECORDS: 2000 (2 row groups) -select count(*) from orc_ppd where t = '54'; +select count(*) from orc_ppd_n3 where t = '54'; SET hive.optimize.index.filter=false; -- float tests -select count(*) from orc_ppd where f = 74.72; +select count(*) from orc_ppd_n3 where f = 74.72; SET hive.optimize.index.filter=true; -select count(*) from orc_ppd where f = 74.72; +select count(*) from orc_ppd_n3 where f = 74.72; -alter table orc_ppd change column f f double; +alter table orc_ppd_n3 change column f f double; SET hive.optimize.index.filter=false; -select count(*) from orc_ppd where f = 74.72; +select count(*) from orc_ppd_n3 where f = 74.72; SET hive.optimize.index.filter=true; -select count(*) from orc_ppd where f = 74.72; +select count(*) from orc_ppd_n3 where f = 74.72; -alter table orc_ppd change column f f string; +alter table orc_ppd_n3 change column f f string; SET hive.optimize.index.filter=false; -select count(*) from orc_ppd where f = '74.72'; +select count(*) from orc_ppd_n3 where f = '74.72'; SET hive.optimize.index.filter=true; -select count(*) from orc_ppd where f = '74.72'; +select count(*) from orc_ppd_n3 where f = '74.72'; SET hive.optimize.index.filter=false; -- string tests -select count(*) from orc_ppd where s = 'bob davidson'; +select count(*) from orc_ppd_n3 where s = 'bob davidson'; SET hive.optimize.index.filter=true; -select count(*) from orc_ppd where s = 'bob davidson'; +select count(*) from orc_ppd_n3 where s = 'bob davidson'; -alter table orc_ppd change column s s char(50); +alter table orc_ppd_n3 change column s s char(50); SET hive.optimize.index.filter=false; -select count(*) from orc_ppd where s = 'bob davidson'; +select count(*) from orc_ppd_n3 where s = 'bob davidson'; SET hive.optimize.index.filter=true; -select count(*) from orc_ppd where s = 'bob davidson'; +select count(*) from orc_ppd_n3 where s = 'bob davidson'; -alter table orc_ppd change column s s varchar(50); +alter table orc_ppd_n3 change column s s varchar(50); SET hive.optimize.index.filter=false; -select count(*) from orc_ppd where s = 'bob davidson'; +select count(*) from orc_ppd_n3 where s = 'bob davidson'; SET hive.optimize.index.filter=true; -select count(*) from orc_ppd where s = 'bob davidson'; +select count(*) from orc_ppd_n3 where s = 'bob davidson'; -alter table orc_ppd change column s s char(50); +alter table orc_ppd_n3 change column s s char(50); SET hive.optimize.index.filter=false; -select count(*) from orc_ppd where s = 'bob davidson'; +select count(*) from orc_ppd_n3 where s = 'bob davidson'; SET hive.optimize.index.filter=true; -select count(*) from orc_ppd where s = 'bob davidson'; +select count(*) from orc_ppd_n3 where s = 'bob davidson'; -alter table orc_ppd change column s s string; +alter table orc_ppd_n3 change column s s string; SET hive.optimize.index.filter=false; -select count(*) from orc_ppd where s = 'bob davidson'; +select count(*) from orc_ppd_n3 where s = 'bob davidson'; SET hive.optimize.index.filter=true; -select count(*) from orc_ppd where s = 'bob davidson'; +select count(*) from orc_ppd_n3 where s = 'bob davidson'; -alter table orc_ppd add columns (boo boolean); +alter table orc_ppd_n3 add columns (boo boolean); SET hive.optimize.index.filter=false; -- ppd on newly added column -select count(*) from orc_ppd where si = 442; -select count(*) from orc_ppd where si = 442 or boo is not null or boo = false; +select count(*) from orc_ppd_n3 where si = 442; +select count(*) from orc_ppd_n3 where si = 442 or boo is not null or boo = false; SET hive.optimize.index.filter=true; -select count(*) from orc_ppd where si = 442; -select count(*) from orc_ppd where si = 442 or boo is not null or boo = false; +select count(*) from orc_ppd_n3 where si = 442; +select count(*) from orc_ppd_n3 where si = 442 or boo is not null or boo = false; diff --git a/ql/src/test/queries/clientpositive/orc_ppd_str_conversion.q b/ql/src/test/queries/clientpositive/orc_ppd_str_conversion.q index bba6c57510..61f4cfaae9 100644 --- a/ql/src/test/queries/clientpositive/orc_ppd_str_conversion.q +++ b/ql/src/test/queries/clientpositive/orc_ppd_str_conversion.q @@ -1,18 +1,18 @@ set hive.vectorized.execution.enabled=false; set hive.cbo.enable=false; -create table orc_test( col1 varchar(15), col2 char(10)) stored as orc; +create table orc_test_n0( col1 varchar(15), col2 char(10)) stored as orc; create table text_test( col1 varchar(15), col2 char(10)); -insert into orc_test values ('val1', '1'); -insert overwrite table text_test select * from orc_test; +insert into orc_test_n0 values ('val1', '1'); +insert overwrite table text_test select * from orc_test_n0; explain select * from text_test where col2='1'; select * from text_test where col2='1'; set hive.optimize.index.filter=false; -select * from orc_test where col2='1'; +select * from orc_test_n0 where col2='1'; set hive.optimize.index.filter=true; -select * from orc_test where col2='1'; +select * from orc_test_n0 where col2='1'; diff --git a/ql/src/test/queries/clientpositive/orc_ppd_timestamp.q b/ql/src/test/queries/clientpositive/orc_ppd_timestamp.q index bc0965d28e..9719270edb 100644 --- a/ql/src/test/queries/clientpositive/orc_ppd_timestamp.q +++ b/ql/src/test/queries/clientpositive/orc_ppd_timestamp.q @@ -6,96 +6,96 @@ SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; SET mapred.min.split.size=1000; SET mapred.max.split.size=5000; -create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), ts timestamp) stored as orc tblproperties("orc.stripe.size"="16777216"); +create table newtypesorc_n2(c char(10), v varchar(10), d decimal(5,3), ts timestamp) stored as orc tblproperties("orc.stripe.size"="16777216"); -insert overwrite table newtypesorc select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("2011-01-01 01:01:01" as timestamp) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("2011-01-20 01:01:01" as timestamp) from src src2) uniontbl; +insert overwrite table newtypesorc_n2 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("2011-01-01 01:01:01" as timestamp) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("2011-01-20 01:01:01" as timestamp) from src src2) uniontbl; -- timestamp data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select sum(hash(*)) from newtypesorc where cast(ts as string)='2011-01-01 01:01:01'; +select sum(hash(*)) from newtypesorc_n2 where cast(ts as string)='2011-01-01 01:01:01'; set hive.optimize.index.filter=true; -select sum(hash(*)) from newtypesorc where cast(ts as string)='2011-01-01 01:01:01'; +select sum(hash(*)) from newtypesorc_n2 where cast(ts as string)='2011-01-01 01:01:01'; set hive.optimize.index.filter=false; -select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as timestamp); +select sum(hash(*)) from newtypesorc_n2 where ts=cast('2011-01-01 01:01:01' as timestamp); set hive.optimize.index.filter=true; -select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as timestamp); +select sum(hash(*)) from newtypesorc_n2 where ts=cast('2011-01-01 01:01:01' as timestamp); set hive.optimize.index.filter=false; -select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as varchar(20)); +select sum(hash(*)) from newtypesorc_n2 where ts=cast('2011-01-01 01:01:01' as varchar(20)); set hive.optimize.index.filter=true; -select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as varchar(20)); +select sum(hash(*)) from newtypesorc_n2 where ts=cast('2011-01-01 01:01:01' as varchar(20)); set hive.optimize.index.filter=false; -select sum(hash(*)) from newtypesorc where ts!=cast('2011-01-01 01:01:01' as timestamp); +select sum(hash(*)) from newtypesorc_n2 where ts!=cast('2011-01-01 01:01:01' as timestamp); set hive.optimize.index.filter=true; -select sum(hash(*)) from newtypesorc where ts!=cast('2011-01-01 01:01:01' as timestamp); +select sum(hash(*)) from newtypesorc_n2 where ts!=cast('2011-01-01 01:01:01' as timestamp); set hive.optimize.index.filter=false; -select sum(hash(*)) from newtypesorc where ts) STORED AS AVRO; -LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table; +CREATE TABLE avro_table_n0 (avreau_col_1 map) STORED AS AVRO; +LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table_n0; -CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table; +CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table_n0; SELECT * FROM parquet_table; -DROP TABLE avro_table; +DROP TABLE avro_table_n0; DROP TABLE parquet_table; diff --git a/ql/src/test/queries/clientpositive/parquet_nested_complex.q b/ql/src/test/queries/clientpositive/parquet_nested_complex.q index 717e16f991..9688e169bc 100644 --- a/ql/src/test/queries/clientpositive/parquet_nested_complex.q +++ b/ql/src/test/queries/clientpositive/parquet_nested_complex.q @@ -2,9 +2,9 @@ set hive.vectorized.execution.enabled=false; set hive.test.vectorized.execution.enabled.override=none; --- start with the original nestedcomplex test +-- start with the original nestedcomplex_n0 test -create table nestedcomplex ( +create table nestedcomplex_n0 ( simple_int int, max_nested_array array>>>>>>>>>>>>>>>>>>>>>>, max_nested_map array>>>>>>>>>>>>>>>>>>>>>, @@ -18,16 +18,16 @@ WITH SERDEPROPERTIES ( ) ; -describe nestedcomplex; -describe extended nestedcomplex; +describe nestedcomplex_n0; +describe extended nestedcomplex_n0; -load data local inpath '../../data/files/nested_complex.txt' overwrite into table nestedcomplex; +load data local inpath '../../data/files/nested_complex.txt' overwrite into table nestedcomplex_n0; -- and load the table into Parquet -CREATE TABLE parquet_nested_complex STORED AS PARQUET AS SELECT * FROM nestedcomplex; +CREATE TABLE parquet_nested_complex STORED AS PARQUET AS SELECT * FROM nestedcomplex_n0; SELECT * FROM parquet_nested_complex SORT BY simple_int; -DROP TABLE nestedcomplex; +DROP TABLE nestedcomplex_n0; DROP TABLE parquet_nested_complex; diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_char.q b/ql/src/test/queries/clientpositive/parquet_ppd_char.q index 31ee693df1..386fb2589f 100644 --- a/ql/src/test/queries/clientpositive/parquet_ppd_char.q +++ b/ql/src/test/queries/clientpositive/parquet_ppd_char.q @@ -7,74 +7,74 @@ SET hive.optimize.ppd=true; SET mapred.min.split.size=1000; SET mapred.max.split.size=5000; -create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet; +create table newtypestbl_n3(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet; -insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl; +insert overwrite table newtypestbl_n3 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl; set hive.optimize.index.filter=false; -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select * from newtypestbl where c="apple"; +select * from newtypestbl_n3 where c="apple"; set hive.optimize.index.filter=true; -select * from newtypestbl where c="apple"; +select * from newtypestbl_n3 where c="apple"; set hive.optimize.index.filter=false; -select * from newtypestbl where c!="apple"; +select * from newtypestbl_n3 where c!="apple"; set hive.optimize.index.filter=true; -select * from newtypestbl where c!="apple"; +select * from newtypestbl_n3 where c!="apple"; set hive.optimize.index.filter=false; -select * from newtypestbl where c<"hello"; +select * from newtypestbl_n3 where c<"hello"; set hive.optimize.index.filter=true; -select * from newtypestbl where c<"hello"; +select * from newtypestbl_n3 where c<"hello"; set hive.optimize.index.filter=false; -select * from newtypestbl where c<="hello" sort by c; +select * from newtypestbl_n3 where c<="hello" sort by c; set hive.optimize.index.filter=true; -select * from newtypestbl where c<="hello" sort by c; +select * from newtypestbl_n3 where c<="hello" sort by c; set hive.optimize.index.filter=false; -select * from newtypestbl where c="apple "; +select * from newtypestbl_n3 where c="apple "; set hive.optimize.index.filter=true; -select * from newtypestbl where c="apple "; +select * from newtypestbl_n3 where c="apple "; set hive.optimize.index.filter=false; -select * from newtypestbl where c in ("apple", "carrot"); +select * from newtypestbl_n3 where c in ("apple", "carrot"); set hive.optimize.index.filter=true; -select * from newtypestbl where c in ("apple", "carrot"); +select * from newtypestbl_n3 where c in ("apple", "carrot"); set hive.optimize.index.filter=false; -select * from newtypestbl where c in ("apple", "hello") sort by c; +select * from newtypestbl_n3 where c in ("apple", "hello") sort by c; set hive.optimize.index.filter=true; -select * from newtypestbl where c in ("apple", "hello") sort by c; +select * from newtypestbl_n3 where c in ("apple", "hello") sort by c; set hive.optimize.index.filter=false; -select * from newtypestbl where c in ("carrot"); +select * from newtypestbl_n3 where c in ("carrot"); set hive.optimize.index.filter=true; -select * from newtypestbl where c in ("carrot"); +select * from newtypestbl_n3 where c in ("carrot"); set hive.optimize.index.filter=false; -select * from newtypestbl where c between "apple" and "carrot"; +select * from newtypestbl_n3 where c between "apple" and "carrot"; set hive.optimize.index.filter=true; -select * from newtypestbl where c between "apple" and "carrot"; +select * from newtypestbl_n3 where c between "apple" and "carrot"; set hive.optimize.index.filter=false; -select * from newtypestbl where c between "apple" and "zombie" sort by c; +select * from newtypestbl_n3 where c between "apple" and "zombie" sort by c; set hive.optimize.index.filter=true; -select * from newtypestbl where c between "apple" and "zombie" sort by c; +select * from newtypestbl_n3 where c between "apple" and "zombie" sort by c; set hive.optimize.index.filter=false; -select * from newtypestbl where c between "carrot" and "carrot1"; +select * from newtypestbl_n3 where c between "carrot" and "carrot1"; set hive.optimize.index.filter=true; -select * from newtypestbl where c between "carrot" and "carrot1"; \ No newline at end of file +select * from newtypestbl_n3 where c between "carrot" and "carrot1"; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_date.q b/ql/src/test/queries/clientpositive/parquet_ppd_date.q index ebc9f410f5..82085beea3 100644 --- a/ql/src/test/queries/clientpositive/parquet_ppd_date.q +++ b/ql/src/test/queries/clientpositive/parquet_ppd_date.q @@ -7,99 +7,99 @@ SET hive.optimize.ppd=true; SET mapred.min.split.size=1000; SET mapred.max.split.size=5000; -create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet; +create table newtypestbl_n2(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet; -insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl; +insert overwrite table newtypestbl_n2 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl; -- date data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select * from newtypestbl where da='1970-02-20'; +select * from newtypestbl_n2 where da='1970-02-20'; set hive.optimize.index.filter=true; -select * from newtypestbl where da='1970-02-20'; +select * from newtypestbl_n2 where da='1970-02-20'; set hive.optimize.index.filter=true; -select * from newtypestbl where da= date '1970-02-20'; +select * from newtypestbl_n2 where da= date '1970-02-20'; set hive.optimize.index.filter=false; -select * from newtypestbl where da=cast('1970-02-20' as date); +select * from newtypestbl_n2 where da=cast('1970-02-20' as date); set hive.optimize.index.filter=true; -select * from newtypestbl where da=cast('1970-02-20' as date); +select * from newtypestbl_n2 where da=cast('1970-02-20' as date); set hive.optimize.index.filter=false; -select * from newtypestbl where da=cast('1970-02-20' as varchar(20)); +select * from newtypestbl_n2 where da=cast('1970-02-20' as varchar(20)); set hive.optimize.index.filter=true; -select * from newtypestbl where da=cast('1970-02-20' as varchar(20)); +select * from newtypestbl_n2 where da=cast('1970-02-20' as varchar(20)); set hive.optimize.index.filter=false; -select * from newtypestbl where da!='1970-02-20'; +select * from newtypestbl_n2 where da!='1970-02-20'; set hive.optimize.index.filter=true; -select * from newtypestbl where da!='1970-02-20'; +select * from newtypestbl_n2 where da!='1970-02-20'; set hive.optimize.index.filter=false; -select * from newtypestbl where da<'1970-02-27'; +select * from newtypestbl_n2 where da<'1970-02-27'; set hive.optimize.index.filter=true; -select * from newtypestbl where da<'1970-02-27'; +select * from newtypestbl_n2 where da<'1970-02-27'; set hive.optimize.index.filter=false; -select * from newtypestbl where da<'1970-02-29' sort by c; +select * from newtypestbl_n2 where da<'1970-02-29' sort by c; set hive.optimize.index.filter=true; -select * from newtypestbl where da<'1970-02-29' sort by c; +select * from newtypestbl_n2 where da<'1970-02-29' sort by c; set hive.optimize.index.filter=false; -select * from newtypestbl where da<'1970-02-15'; +select * from newtypestbl_n2 where da<'1970-02-15'; set hive.optimize.index.filter=true; -select * from newtypestbl where da<'1970-02-15'; +select * from newtypestbl_n2 where da<'1970-02-15'; set hive.optimize.index.filter=false; -select * from newtypestbl where da<='1970-02-20'; +select * from newtypestbl_n2 where da<='1970-02-20'; set hive.optimize.index.filter=true; -select * from newtypestbl where da<='1970-02-20'; +select * from newtypestbl_n2 where da<='1970-02-20'; set hive.optimize.index.filter=false; -select * from newtypestbl where da<='1970-02-27' sort by c; +select * from newtypestbl_n2 where da<='1970-02-27' sort by c; set hive.optimize.index.filter=true; -select * from newtypestbl where da<='1970-02-27' sort by c; +select * from newtypestbl_n2 where da<='1970-02-27' sort by c; set hive.optimize.index.filter=false; -select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date)); +select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), cast('1970-02-27' as date)); set hive.optimize.index.filter=true; -select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date)); +select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), cast('1970-02-27' as date)); set hive.optimize.index.filter=false; -select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c; +select * from newtypestbl_n2 where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c; set hive.optimize.index.filter=true; -select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c; +select * from newtypestbl_n2 where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c; set hive.optimize.index.filter=false; -select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date)); +select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), cast('1970-02-22' as date)); set hive.optimize.index.filter=true; -select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date)); +select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), cast('1970-02-22' as date)); set hive.optimize.index.filter=false; -select * from newtypestbl where da between '1970-02-19' and '1970-02-22'; +select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-22'; set hive.optimize.index.filter=true; -select * from newtypestbl where da between '1970-02-19' and '1970-02-22'; +select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-22'; set hive.optimize.index.filter=false; -select * from newtypestbl where da between '1970-02-19' and '1970-02-28' sort by c; +select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-28' sort by c; set hive.optimize.index.filter=true; -select * from newtypestbl where da between '1970-02-19' and '1970-02-28' sort by c; +select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-28' sort by c; set hive.optimize.index.filter=false; -select * from newtypestbl where da between '1970-02-18' and '1970-02-19'; +select * from newtypestbl_n2 where da between '1970-02-18' and '1970-02-19'; set hive.optimize.index.filter=true; -select * from newtypestbl where da between '1970-02-18' and '1970-02-19'; +select * from newtypestbl_n2 where da between '1970-02-18' and '1970-02-19'; diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_decimal.q b/ql/src/test/queries/clientpositive/parquet_ppd_decimal.q index 747c911630..e8e118d541 100644 --- a/ql/src/test/queries/clientpositive/parquet_ppd_decimal.q +++ b/ql/src/test/queries/clientpositive/parquet_ppd_decimal.q @@ -8,162 +8,162 @@ SET mapred.min.split.size=1000; SET mapred.max.split.size=5000; set hive.llap.cache.allow.synthetic.fileid=true; -create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet; +create table newtypestbl_n5(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet; -insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl; +insert overwrite table newtypestbl_n5 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl; -- decimal data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select * from newtypestbl where d=0.22; +select * from newtypestbl_n5 where d=0.22; set hive.optimize.index.filter=true; -select * from newtypestbl where d=0.22; +select * from newtypestbl_n5 where d=0.22; set hive.optimize.index.filter=false; -select * from newtypestbl where d='0.22'; +select * from newtypestbl_n5 where d='0.22'; set hive.optimize.index.filter=true; -select * from newtypestbl where d='0.22'; +select * from newtypestbl_n5 where d='0.22'; set hive.optimize.index.filter=false; -select * from newtypestbl where d=cast('0.22' as float); +select * from newtypestbl_n5 where d=cast('0.22' as float); set hive.optimize.index.filter=true; -select * from newtypestbl where d=cast('0.22' as float); +select * from newtypestbl_n5 where d=cast('0.22' as float); set hive.optimize.index.filter=false; -select * from newtypestbl where d!=0.22; +select * from newtypestbl_n5 where d!=0.22; set hive.optimize.index.filter=true; -select * from newtypestbl where d!=0.22; +select * from newtypestbl_n5 where d!=0.22; set hive.optimize.index.filter=false; -select * from newtypestbl where d!='0.22'; +select * from newtypestbl_n5 where d!='0.22'; set hive.optimize.index.filter=true; -select * from newtypestbl where d!='0.22'; +select * from newtypestbl_n5 where d!='0.22'; set hive.optimize.index.filter=false; -select * from newtypestbl where d!=cast('0.22' as float); +select * from newtypestbl_n5 where d!=cast('0.22' as float); set hive.optimize.index.filter=true; -select * from newtypestbl where d!=cast('0.22' as float); +select * from newtypestbl_n5 where d!=cast('0.22' as float); set hive.optimize.index.filter=false; -select * from newtypestbl where d<11.22; +select * from newtypestbl_n5 where d<11.22; set hive.optimize.index.filter=true; -select * from newtypestbl where d<11.22; +select * from newtypestbl_n5 where d<11.22; set hive.optimize.index.filter=false; -select * from newtypestbl where d<'11.22'; +select * from newtypestbl_n5 where d<'11.22'; set hive.optimize.index.filter=true; -select * from newtypestbl where d<'11.22'; +select * from newtypestbl_n5 where d<'11.22'; set hive.optimize.index.filter=false; -select * from newtypestbl where d); --- Creates a table from just a portion of the file schema, including struct elements (test lower/upper case as well) -CREATE TABLE test (Name string, address struct) STORED AS PARQUET; +-- Creates a table from just a portion of the file schema, including struct elements (test_n6 lower/upper case as well) +CREATE TABLE test_n6 (Name string, address struct) STORED AS PARQUET; -LOAD DATA LOCAL INPATH '../../data/files/HiveGroup.parquet' OVERWRITE INTO TABLE test; -SELECT * FROM test; +LOAD DATA LOCAL INPATH '../../data/files/HiveGroup.parquet' OVERWRITE INTO TABLE test_n6; +SELECT * FROM test_n6; -DROP TABLE test; \ No newline at end of file +DROP TABLE test_n6; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q b/ql/src/test/queries/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q index e17d48b0af..0fded10ef3 100644 --- a/ql/src/test/queries/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q +++ b/ql/src/test/queries/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q @@ -1,8 +1,8 @@ set hive.vectorized.execution.enabled=false; set hive.mapred.mode=nonstrict; -DROP TABLE parquet_types_staging; -DROP TABLE parquet_types; +DROP TABLE parquet_types_staging_n2; +DROP TABLE parquet_types_n1; set hive.vectorized.execution.enabled=true; set hive.vectorized.execution.reduce.enabled=true; @@ -10,7 +10,7 @@ set hive.vectorized.use.row.serde.deserialize=true; set hive.vectorized.use.vector.serde.deserialize=true; set hive.vectorized.execution.reduce.groupby.enabled = true; -CREATE TABLE parquet_types_staging ( +CREATE TABLE parquet_types_staging_n2 ( cint int, ctinyint tinyint, csmallint smallint, @@ -30,7 +30,7 @@ FIELDS TERMINATED BY '|' COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'; -CREATE TABLE parquet_types ( +CREATE TABLE parquet_types_n1 ( cint int, ctinyint tinyint, csmallint smallint, @@ -48,13 +48,13 @@ CREATE TABLE parquet_types ( ) STORED AS PARQUET; LOAD DATA LOCAL INPATH '../../data/files/parquet_non_dictionary_types.txt' OVERWRITE INTO TABLE -parquet_types_staging; +parquet_types_staging_n2; -SELECT * FROM parquet_types_staging; +SELECT * FROM parquet_types_staging_n2; -INSERT OVERWRITE TABLE parquet_types +INSERT OVERWRITE TABLE parquet_types_n1 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, -unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging; +unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging_n2; -- test types in group by @@ -64,7 +64,7 @@ EXPLAIN SELECT ctinyint, COUNT(cstring1), ROUND(AVG(cfloat), 5), ROUND(STDDEV_POP(cdouble),5) -FROM parquet_types +FROM parquet_types_n1 GROUP BY ctinyint ORDER BY ctinyint ; @@ -75,22 +75,22 @@ SELECT ctinyint, COUNT(cstring1), ROUND(AVG(cfloat), 5), ROUND(STDDEV_POP(cdouble),5) -FROM parquet_types +FROM parquet_types_n1 GROUP BY ctinyint ORDER BY ctinyint ; -EXPLAIN SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat; -SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat; +EXPLAIN SELECT cfloat, count(*) FROM parquet_types_n1 GROUP BY cfloat ORDER BY cfloat; +SELECT cfloat, count(*) FROM parquet_types_n1 GROUP BY cfloat ORDER BY cfloat; -EXPLAIN SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar; -SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar; +EXPLAIN SELECT cchar, count(*) FROM parquet_types_n1 GROUP BY cchar ORDER BY cchar; +SELECT cchar, count(*) FROM parquet_types_n1 GROUP BY cchar ORDER BY cchar; -EXPLAIN SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY cvarchar; -SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY cvarchar; +EXPLAIN SELECT cvarchar, count(*) FROM parquet_types_n1 GROUP BY cvarchar ORDER BY cvarchar; +SELECT cvarchar, count(*) FROM parquet_types_n1 GROUP BY cvarchar ORDER BY cvarchar; -EXPLAIN SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY cstring1; -SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY cstring1; +EXPLAIN SELECT cstring1, count(*) FROM parquet_types_n1 GROUP BY cstring1 ORDER BY cstring1; +SELECT cstring1, count(*) FROM parquet_types_n1 GROUP BY cstring1 ORDER BY cstring1; -EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary; -SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary; \ No newline at end of file +EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types_n1 GROUP BY cbinary; +SELECT hex(cbinary), count(*) FROM parquet_types_n1 GROUP BY cbinary; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/parquet_types_vectorization.q b/ql/src/test/queries/clientpositive/parquet_types_vectorization.q index 1f353aed0e..67a27e8efb 100644 --- a/ql/src/test/queries/clientpositive/parquet_types_vectorization.q +++ b/ql/src/test/queries/clientpositive/parquet_types_vectorization.q @@ -1,6 +1,6 @@ set hive.mapred.mode=nonstrict; -DROP TABLE parquet_types_staging; -DROP TABLE parquet_types; +DROP TABLE parquet_types_staging_n1; +DROP TABLE parquet_types_n0; set hive.vectorized.execution.enabled=true; set hive.vectorized.execution.reduce.enabled=true; @@ -9,7 +9,7 @@ set hive.vectorized.use.vector.serde.deserialize=true; set hive.vectorized.execution.reduce.groupby.enabled = true; set hive.llap.cache.allow.synthetic.fileid=true; -CREATE TABLE parquet_types_staging ( +CREATE TABLE parquet_types_staging_n1 ( cint int, ctinyint tinyint, csmallint smallint, @@ -29,7 +29,7 @@ FIELDS TERMINATED BY '|' COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'; -CREATE TABLE parquet_types ( +CREATE TABLE parquet_types_n0 ( cint int, ctinyint tinyint, csmallint smallint, @@ -46,13 +46,13 @@ CREATE TABLE parquet_types ( d date ) STORED AS PARQUET; -LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging; +LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging_n1; -SELECT * FROM parquet_types_staging; +SELECT * FROM parquet_types_staging_n1; -INSERT OVERWRITE TABLE parquet_types +INSERT OVERWRITE TABLE parquet_types_n0 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, -unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging; +unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging_n1; -- test types in group by @@ -62,7 +62,7 @@ EXPLAIN SELECT ctinyint, COUNT(cstring1), ROUND(AVG(cfloat), 5), ROUND(STDDEV_POP(cdouble),5) -FROM parquet_types +FROM parquet_types_n0 GROUP BY ctinyint ORDER BY ctinyint ; @@ -73,25 +73,25 @@ SELECT ctinyint, COUNT(cstring1), ROUND(AVG(cfloat), 5), ROUND(STDDEV_POP(cdouble),5) -FROM parquet_types +FROM parquet_types_n0 GROUP BY ctinyint ORDER BY ctinyint ; -EXPLAIN SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat; -SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat; +EXPLAIN SELECT cfloat, count(*) FROM parquet_types_n0 GROUP BY cfloat ORDER BY cfloat; +SELECT cfloat, count(*) FROM parquet_types_n0 GROUP BY cfloat ORDER BY cfloat; -EXPLAIN SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar; -SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar; +EXPLAIN SELECT cchar, count(*) FROM parquet_types_n0 GROUP BY cchar ORDER BY cchar; +SELECT cchar, count(*) FROM parquet_types_n0 GROUP BY cchar ORDER BY cchar; -EXPLAIN SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY cvarchar; -SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY cvarchar; +EXPLAIN SELECT cvarchar, count(*) FROM parquet_types_n0 GROUP BY cvarchar ORDER BY cvarchar; +SELECT cvarchar, count(*) FROM parquet_types_n0 GROUP BY cvarchar ORDER BY cvarchar; -EXPLAIN SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY cstring1; -SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY cstring1; +EXPLAIN SELECT cstring1, count(*) FROM parquet_types_n0 GROUP BY cstring1 ORDER BY cstring1; +SELECT cstring1, count(*) FROM parquet_types_n0 GROUP BY cstring1 ORDER BY cstring1; -EXPLAIN SELECT t, count(*) FROM parquet_types GROUP BY t ORDER BY t; -SELECT t, count(*) FROM parquet_types GROUP BY t ORDER BY t; +EXPLAIN SELECT t, count(*) FROM parquet_types_n0 GROUP BY t ORDER BY t; +SELECT t, count(*) FROM parquet_types_n0 GROUP BY t ORDER BY t; -EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary; -SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary; \ No newline at end of file +EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types_n0 GROUP BY cbinary; +SELECT hex(cbinary), count(*) FROM parquet_types_n0 GROUP BY cbinary; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/parquet_vectorization_part_project.q b/ql/src/test/queries/clientpositive/parquet_vectorization_part_project.q index d5b2e3c8da..c36cfcbded 100644 --- a/ql/src/test/queries/clientpositive/parquet_vectorization_part_project.q +++ b/ql/src/test/queries/clientpositive/parquet_vectorization_part_project.q @@ -4,9 +4,9 @@ set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -CREATE TABLE alltypesparquet_part(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS PARQUET; -insert overwrite table alltypesparquet_part partition (ds='2011') select * from alltypesparquet order by ctinyint, cint, cbigint limit 100; -insert overwrite table alltypesparquet_part partition (ds='2012') select * from alltypesparquet order by ctinyint, cint, cbigint limit 100; +CREATE TABLE alltypesparquet_part_n0(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS PARQUET; +insert overwrite table alltypesparquet_part_n0 partition (ds='2011') select * from alltypesparquet order by ctinyint, cint, cbigint limit 100; +insert overwrite table alltypesparquet_part_n0 partition (ds='2012') select * from alltypesparquet order by ctinyint, cint, cbigint limit 100; -explain vectorization select (cdouble+2) c1 from alltypesparquet_part order by c1 limit 10; -select (cdouble+2) c1 from alltypesparquet_part order by c1 limit 10; +explain vectorization select (cdouble+2) c1 from alltypesparquet_part_n0 order by c1 limit 10; +select (cdouble+2) c1 from alltypesparquet_part_n0 order by c1 limit 10; diff --git a/ql/src/test/queries/clientpositive/partInit.q b/ql/src/test/queries/clientpositive/partInit.q index 62299b2232..e2703ebb21 100644 --- a/ql/src/test/queries/clientpositive/partInit.q +++ b/ql/src/test/queries/clientpositive/partInit.q @@ -1,13 +1,13 @@ set hive.mapred.mode=nonstrict; -CREATE TABLE empty (c INT) PARTITIONED BY (p INT); -SELECT MAX(c) FROM empty; -SELECT MAX(p) FROM empty; +CREATE TABLE empty_n1 (c INT) PARTITIONED BY (p INT); +SELECT MAX(c) FROM empty_n1; +SELECT MAX(p) FROM empty_n1; -ALTER TABLE empty ADD PARTITION (p=1); +ALTER TABLE empty_n1 ADD PARTITION (p=1); set hive.optimize.metadataonly=true; -SELECT MAX(p) FROM empty; +SELECT MAX(p) FROM empty_n1; set hive.optimize.metadataonly=false; -SELECT MAX(p) FROM empty; +SELECT MAX(p) FROM empty_n1; diff --git a/ql/src/test/queries/clientpositive/part_inherit_tbl_props.q b/ql/src/test/queries/clientpositive/part_inherit_tbl_props.q index 3ee1b4ac80..907fa0219b 100644 --- a/ql/src/test/queries/clientpositive/part_inherit_tbl_props.q +++ b/ql/src/test/queries/clientpositive/part_inherit_tbl_props.q @@ -1,8 +1,8 @@ set hive.metastore.partition.inherit.table.properties=a,b; -- The property needs to be unset at the end of the test till HIVE-3109/HIVE-3112 is fixed -create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval'); -alter table mytbl add partition (c2 = 'v1'); -describe formatted mytbl partition (c2='v1'); +create table mytbl_n0 (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval'); +alter table mytbl_n0 add partition (c2 = 'v1'); +describe formatted mytbl_n0 partition (c2='v1'); set hive.metastore.partition.inherit.table.properties=; diff --git a/ql/src/test/queries/clientpositive/part_inherit_tbl_props_empty.q b/ql/src/test/queries/clientpositive/part_inherit_tbl_props_empty.q index f3f0335c88..d54c7e6b4e 100644 --- a/ql/src/test/queries/clientpositive/part_inherit_tbl_props_empty.q +++ b/ql/src/test/queries/clientpositive/part_inherit_tbl_props_empty.q @@ -1,4 +1,4 @@ set hive.metastore.partition.inherit.table.properties=""; -create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval'); -alter table mytbl add partition (c2 = 'v1'); -describe formatted mytbl partition (c2='v1'); +create table mytbl_n2 (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval'); +alter table mytbl_n2 add partition (c2 = 'v1'); +describe formatted mytbl_n2 partition (c2='v1'); diff --git a/ql/src/test/queries/clientpositive/partcols1.q b/ql/src/test/queries/clientpositive/partcols1.q index 7f47005a91..4be9576e77 100644 --- a/ql/src/test/queries/clientpositive/partcols1.q +++ b/ql/src/test/queries/clientpositive/partcols1.q @@ -1,11 +1,11 @@ --! qt:dataset:src -create table test1(col1 string) partitioned by (partitionId int); -insert overwrite table test1 partition (partitionId=1) +create table test1_n15(col1 string) partitioned by (partitionId int); +insert overwrite table test1_n15 partition (partitionId=1) select key from src tablesample (10 rows); FROM ( - FROM test1 + FROM test1_n15 SELECT partitionId, 111 as col2, 222 as col3, 333 as col4 WHERE partitionId = 1 DISTRIBUTE BY partitionId diff --git a/ql/src/test/queries/clientpositive/partial_column_stats.q b/ql/src/test/queries/clientpositive/partial_column_stats.q index 8ff65acf09..f66525a8bb 100644 --- a/ql/src/test/queries/clientpositive/partial_column_stats.q +++ b/ql/src/test/queries/clientpositive/partial_column_stats.q @@ -1,9 +1,9 @@ set hive.mapred.mode=nonstrict; -create table t1 (key int, data struct, value string); +create table t1_n53 (key int, data struct, value string); -explain analyze table t1 compute statistics for columns; +explain analyze table t1_n53 compute statistics for columns; -analyze table t1 compute statistics for columns; +analyze table t1_n53 compute statistics for columns; -desc formatted t1 value; +desc formatted t1_n53 value; diff --git a/ql/src/test/queries/clientpositive/partition_condition_remover.q b/ql/src/test/queries/clientpositive/partition_condition_remover.q index f417eb7be5..95fb09f6cd 100644 --- a/ql/src/test/queries/clientpositive/partition_condition_remover.q +++ b/ql/src/test/queries/clientpositive/partition_condition_remover.q @@ -1,14 +1,14 @@ --! qt:dataset:alltypesorc -drop table foo; +drop table foo_n5; -create table foo (i int) partitioned by (s string); +create table foo_n5 (i int) partitioned by (s string); -insert overwrite table foo partition(s='foo') select cint from alltypesorc limit 10; -insert overwrite table foo partition(s='bar') select cint from alltypesorc limit 10; +insert overwrite table foo_n5 partition(s='foo_n5') select cint from alltypesorc limit 10; +insert overwrite table foo_n5 partition(s='bar') select cint from alltypesorc limit 10; -explain select * from foo where s not in ('bar'); -select * from foo where s not in ('bar'); +explain select * from foo_n5 where s not in ('bar'); +select * from foo_n5 where s not in ('bar'); -drop table foo; \ No newline at end of file +drop table foo_n5; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/partition_decode_name.q b/ql/src/test/queries/clientpositive/partition_decode_name.q index 7d851d9dcc..3d72cbe836 100644 --- a/ql/src/test/queries/clientpositive/partition_decode_name.q +++ b/ql/src/test/queries/clientpositive/partition_decode_name.q @@ -1,22 +1,22 @@ --! qt:dataset:src -create table sc as select * +create table sc_n0 as select * from (select '2011-01-11', '2011-01-11+14:18:26' from src tablesample (1 rows) union all select '2011-01-11', '2011-01-11+15:18:26' from src tablesample (1 rows) union all select '2011-01-11', '2011-01-11+16:18:26' from src tablesample (1 rows) ) s; -create table sc_part (key string) partitioned by (ts string) stored as rcfile; +create table sc_part_n0 (key string) partitioned by (ts string) stored as rcfile; set hive.exec.dynamic.partition=true; set hive.exec.dynamic.partition.mode=nonstrict; set hive.decode.partition.name=false; -insert overwrite table sc_part partition(ts) select * from sc; -show partitions sc_part; -select count(*) from sc_part where ts is not null; +insert overwrite table sc_part_n0 partition(ts) select * from sc_n0; +show partitions sc_part_n0; +select count(*) from sc_part_n0 where ts is not null; set hive.decode.partition.name=true; -insert overwrite table sc_part partition(ts) select * from sc; -show partitions sc_part; -select count(*) from sc_part where ts is not null; +insert overwrite table sc_part_n0 partition(ts) select * from sc_n0; +show partitions sc_part_n0; +select count(*) from sc_part_n0 where ts is not null; diff --git a/ql/src/test/queries/clientpositive/partition_shared_scan.q b/ql/src/test/queries/clientpositive/partition_shared_scan.q index ccb8e74260..55aff6a0ae 100644 --- a/ql/src/test/queries/clientpositive/partition_shared_scan.q +++ b/ql/src/test/queries/clientpositive/partition_shared_scan.q @@ -2,24 +2,24 @@ --! qt:dataset:alltypesorc set hive.merge.nway.joins=false; -drop table foo; +drop table foo_n1; -create table foo (i int) partitioned by (s string); -insert overwrite table foo partition(s='foo') select cint from alltypesorc limit 10; -insert overwrite table foo partition(s='bar') select cint from alltypesorc limit 10; +create table foo_n1 (i int) partitioned by (s string); +insert overwrite table foo_n1 partition(s='foo_n1') select cint from alltypesorc limit 10; +insert overwrite table foo_n1 partition(s='bar') select cint from alltypesorc limit 10; explain select * -from foo f1 +from foo_n1 f1 join part p1 on (p1.p_partkey = f1.i) -join foo f2 on (f1.i = f2.i) -where f1.s='foo' and f2.s='bar'; +join foo_n1 f2 on (f1.i = f2.i) +where f1.s='foo_n1' and f2.s='bar'; explain select * -from foo f1 +from foo_n1 f1 join part p1 on (p1.p_partkey = f1.i) -join foo f2 on (f1.i = f2.i) -where f1.s='foo' and f2.s='foo'; +join foo_n1 f2 on (f1.i = f2.i) +where f1.s='foo_n1' and f2.s='foo_n1'; -drop table foo; +drop table foo_n1; diff --git a/ql/src/test/queries/clientpositive/partition_type_check.q b/ql/src/test/queries/clientpositive/partition_type_check.q index 081d0a3290..fcb5caa592 100644 --- a/ql/src/test/queries/clientpositive/partition_type_check.q +++ b/ql/src/test/queries/clientpositive/partition_type_check.q @@ -3,24 +3,24 @@ set hive.mapred.mode=nonstrict; set hive.typecheck.on.insert = true; -- begin part(string, string) pass(string, int) -CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day string) stored as textfile; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day=2); +CREATE TABLE tab1_n3 (id1 int,id2 string) PARTITIONED BY(month string,day string) stored as textfile; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1_n3 PARTITION(month='June', day=2); -select * from tab1; -drop table tab1; +select * from tab1_n3; +drop table tab1_n3; -- begin part(string, int) pass(string, string) -CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day int) stored as textfile; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day='2'); +CREATE TABLE tab1_n3 (id1 int,id2 string) PARTITIONED BY(month string,day int) stored as textfile; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1_n3 PARTITION(month='June', day='2'); -select * from tab1; -drop table tab1; +select * from tab1_n3; +drop table tab1_n3; -- begin part(string, date) pass(string, date) -create table tab1 (id1 int, id2 string) PARTITIONED BY(month string,day date) stored as textfile; -alter table tab1 add partition (month='June', day='2008-01-01'); -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day='2008-01-01'); +create table tab1_n3 (id1 int, id2 string) PARTITIONED BY(month string,day date) stored as textfile; +alter table tab1_n3 add partition (month='June', day='2008-01-01'); +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1_n3 PARTITION(month='June', day='2008-01-01'); -select id1, id2, day from tab1 where day='2008-01-01'; -drop table tab1; +select id1, id2, day from tab1_n3 where day='2008-01-01'; +drop table tab1_n3; diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat.q index 3b547c3182..1796a00733 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat.q @@ -2,33 +2,33 @@ set hive.mapred.mode=nonstrict; -create table partition_test_partitioned(key string, value string) partitioned by (dt string); +create table partition_test_partitioned_n1(key string, value string) partitioned by (dt string); -insert overwrite table partition_test_partitioned partition(dt=100) select * from src1; -show table extended like partition_test_partitioned; -show table extended like partition_test_partitioned partition(dt=100); -select key from partition_test_partitioned where dt=100; -select key from partition_test_partitioned; +insert overwrite table partition_test_partitioned_n1 partition(dt=100) select * from src1; +show table extended like partition_test_partitioned_n1; +show table extended like partition_test_partitioned_n1 partition(dt=100); +select key from partition_test_partitioned_n1 where dt=100; +select key from partition_test_partitioned_n1; -alter table partition_test_partitioned set fileformat rcfile; -insert overwrite table partition_test_partitioned partition(dt=101) select * from src1; -show table extended like partition_test_partitioned; -show table extended like partition_test_partitioned partition(dt=100); -show table extended like partition_test_partitioned partition(dt=101); -select key from partition_test_partitioned where dt=100; -select key from partition_test_partitioned where dt=101; -select key from partition_test_partitioned; +alter table partition_test_partitioned_n1 set fileformat rcfile; +insert overwrite table partition_test_partitioned_n1 partition(dt=101) select * from src1; +show table extended like partition_test_partitioned_n1; +show table extended like partition_test_partitioned_n1 partition(dt=100); +show table extended like partition_test_partitioned_n1 partition(dt=101); +select key from partition_test_partitioned_n1 where dt=100; +select key from partition_test_partitioned_n1 where dt=101; +select key from partition_test_partitioned_n1; -alter table partition_test_partitioned set fileformat Sequencefile; -insert overwrite table partition_test_partitioned partition(dt=102) select * from src1; -show table extended like partition_test_partitioned; -show table extended like partition_test_partitioned partition(dt=100); -show table extended like partition_test_partitioned partition(dt=101); -show table extended like partition_test_partitioned partition(dt=102); -select key from partition_test_partitioned where dt=100; -select key from partition_test_partitioned where dt=101; -select key from partition_test_partitioned where dt=102; -select key from partition_test_partitioned; +alter table partition_test_partitioned_n1 set fileformat Sequencefile; +insert overwrite table partition_test_partitioned_n1 partition(dt=102) select * from src1; +show table extended like partition_test_partitioned_n1; +show table extended like partition_test_partitioned_n1 partition(dt=100); +show table extended like partition_test_partitioned_n1 partition(dt=101); +show table extended like partition_test_partitioned_n1 partition(dt=102); +select key from partition_test_partitioned_n1 where dt=100; +select key from partition_test_partitioned_n1 where dt=101; +select key from partition_test_partitioned_n1 where dt=102; +select key from partition_test_partitioned_n1; -select key from partition_test_partitioned where dt >=100 and dt <= 102; +select key from partition_test_partitioned_n1 where dt >=100 and dt <= 102; diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q index 2ff680eae3..2394acb58a 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat11.q @@ -2,19 +2,19 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -- This tests that the schema can be changed for binary serde data -create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile; -alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; -insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238; +create table partition_test_partitioned_n4(key string, value string) partitioned by (dt string) stored as rcfile; +alter table partition_test_partitioned_n4 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +insert overwrite table partition_test_partitioned_n4 partition(dt='1') select * from src where key = 238; -select * from partition_test_partitioned where dt is not null; -select key+key, value from partition_test_partitioned where dt is not null; +select * from partition_test_partitioned_n4 where dt is not null; +select key+key, value from partition_test_partitioned_n4 where dt is not null; set hive.metastore.disallow.incompatible.col.type.changes=false; -alter table partition_test_partitioned change key key int; +alter table partition_test_partitioned_n4 change key key int; reset hive.metastore.disallow.incompatible.col.type.changes; -select key+key, value from partition_test_partitioned where dt is not null; -select * from partition_test_partitioned where dt is not null; +select key+key, value from partition_test_partitioned_n4 where dt is not null; +select * from partition_test_partitioned_n4 where dt is not null; -alter table partition_test_partitioned add columns (value2 string); +alter table partition_test_partitioned_n4 add columns (value2 string); -select key+key, value from partition_test_partitioned where dt is not null; -select * from partition_test_partitioned where dt is not null; +select key+key, value from partition_test_partitioned_n4 where dt is not null; +select * from partition_test_partitioned_n4 where dt is not null; diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q index 0eb6d18fd0..c9379f4e38 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat12.q @@ -2,26 +2,26 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -- This tests that the schema can be changed for binary serde data -create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile; -alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; -insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238; +create table partition_test_partitioned_n9(key string, value string) partitioned by (dt string) stored as rcfile; +alter table partition_test_partitioned_n9 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +insert overwrite table partition_test_partitioned_n9 partition(dt='1') select * from src where key = 238; -select * from partition_test_partitioned where dt is not null; -select key+key, value from partition_test_partitioned where dt is not null; +select * from partition_test_partitioned_n9 where dt is not null; +select key+key, value from partition_test_partitioned_n9 where dt is not null; set hive.metastore.disallow.incompatible.col.type.changes=false; -alter table partition_test_partitioned change key key int; +alter table partition_test_partitioned_n9 change key key int; reset hive.metastore.disallow.incompatible.col.type.changes; -select key+key, value from partition_test_partitioned where dt is not null; -select * from partition_test_partitioned where dt is not null; +select key+key, value from partition_test_partitioned_n9 where dt is not null; +select * from partition_test_partitioned_n9 where dt is not null; -insert overwrite table partition_test_partitioned partition(dt='2') select * from src where key = 97; +insert overwrite table partition_test_partitioned_n9 partition(dt='2') select * from src where key = 97; -alter table partition_test_partitioned add columns (value2 string); +alter table partition_test_partitioned_n9 add columns (value2 string); -select key+key, value from partition_test_partitioned where dt is not null; -select * from partition_test_partitioned where dt is not null; +select key+key, value from partition_test_partitioned_n9 where dt is not null; +select * from partition_test_partitioned_n9 where dt is not null; -insert overwrite table partition_test_partitioned partition(dt='3') select key, value, value from src where key = 200; +insert overwrite table partition_test_partitioned_n9 partition(dt='3') select key, value, value from src where key = 200; -select key+key, value, value2 from partition_test_partitioned where dt is not null; -select * from partition_test_partitioned where dt is not null; +select key+key, value, value2 from partition_test_partitioned_n9 where dt is not null; +select * from partition_test_partitioned_n9 where dt is not null; diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q index f495d72ba7..0d8cfbb108 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat13.q @@ -3,19 +3,19 @@ set hive.mapred.mode=nonstrict; set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -- This tests that the schema can be changed for partitioned tables for binary serde data for joins -create table T1(key string, value string) partitioned by (dt string) stored as rcfile; -alter table T1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; -insert overwrite table T1 partition (dt='1') select * from src where key = 238 or key = 97; +create table T1_n16(key string, value string) partitioned by (dt string) stored as rcfile; +alter table T1_n16 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +insert overwrite table T1_n16 partition (dt='1') select * from src where key = 238 or key = 97; set hive.metastore.disallow.incompatible.col.type.changes=false; -alter table T1 change key key int; +alter table T1_n16 change key key int; -insert overwrite table T1 partition (dt='2') select * from src where key = 238 or key = 97; +insert overwrite table T1_n16 partition (dt='2') select * from src where key = 238 or key = 97; -alter table T1 change key key string; +alter table T1_n16 change key key string; -create table T2(key string, value string) partitioned by (dt string) stored as rcfile; -insert overwrite table T2 partition (dt='1') select * from src where key = 238 or key = 97; +create table T2_n10(key string, value string) partitioned by (dt string) stored as rcfile; +insert overwrite table T2_n10 partition (dt='1') select * from src where key = 238 or key = 97; -select /* + MAPJOIN(a) */ count(*) FROM T1 a JOIN T2 b ON a.key = b.key; -select count(*) FROM T1 a JOIN T2 b ON a.key = b.key; +select /* + MAPJOIN(a) */ count(*) FROM T1_n16 a JOIN T2_n10 b ON a.key = b.key; +select count(*) FROM T1_n16 a JOIN T2_n10 b ON a.key = b.key; reset hive.metastore.disallow.incompatible.col.type.changes; diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q index c27e45b05a..808798337c 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q @@ -2,56 +2,56 @@ set hive.mapred.mode=nonstrict; set hive.exec.reducers.max = 1; -CREATE TABLE tbl1(key int, value string) PARTITIONED by (ds string) +CREATE TABLE tbl1_n8(key int, value string) PARTITIONED by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS rcfile; -CREATE TABLE tbl2(key int, value string) PARTITIONED by (ds string) +CREATE TABLE tbl2_n7(key int, value string) PARTITIONED by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS rcfile; -alter table tbl1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; -alter table tbl2 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +alter table tbl1_n8 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +alter table tbl2_n7 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; -insert overwrite table tbl1 partition (ds='1') select * from src where key < 10; -insert overwrite table tbl2 partition (ds='1') select * from src where key < 10; +insert overwrite table tbl1_n8 partition (ds='1') select * from src where key < 10; +insert overwrite table tbl2_n7 partition (ds='1') select * from src where key < 10; -alter table tbl1 change key key int; -insert overwrite table tbl1 partition (ds='2') select * from src where key < 10; +alter table tbl1_n8 change key key int; +insert overwrite table tbl1_n8 partition (ds='2') select * from src where key < 10; -alter table tbl1 change key key string; +alter table tbl1_n8 change key key string; --- The subquery itself is being map-joined. Multiple partitions of tbl1 with different schemas are being read for tbl2 +-- The subquery itself is being map-joined. Multiple partitions of tbl1_n8 with different schemas are being read for tbl2_n7 select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n8 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n7 a where key < 6) subq2 on subq1.key = subq2.key; set hive.optimize.bucketmapjoin = true; set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should --- be converted to a bucketized mapside join. Multiple partitions of tbl1 with different schemas are being read for each --- bucket of tbl2 +-- be converted to a bucketized mapside join. Multiple partitions of tbl1_n8 with different schemas are being read for each +-- bucket of tbl2_n7 select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n8 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n7 a where key < 6) subq2 on subq1.key = subq2.key; set hive.optimize.bucketmapjoin.sortedmerge = true; -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. Multiple partitions of tbl1 with different schemas are being read for a --- given file of tbl2 +-- be converted to a sort-merge join. Multiple partitions of tbl1_n8 with different schemas are being read for a +-- given file of tbl2_n7 select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n8 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n7 a where key < 6) subq2 on subq1.key = subq2.key; -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side --- join should be performed. Multiple partitions of tbl1 with different schemas are being read for tbl2 +-- join should be performed. Multiple partitions of tbl1_n8 with different schemas are being read for tbl2_n7 select /*+mapjoin(subq1)*/ count(*) from - (select a.key+1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key+1 as key, concat(a.value, a.value) as value from tbl1_n8 a) subq1 join - (select a.key+1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key+1 as key, concat(a.value, a.value) as value from tbl2_n7 a) subq2 on subq1.key = subq2.key; diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q index ad2f068263..a652ca3fff 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat15.q @@ -2,26 +2,26 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -- This tests that the schema can be changed for binary serde data -create table partition_test_partitioned(key string, value string) +create table partition_test_partitioned_n6(key string, value string) partitioned by (dt string) stored as rcfile; -insert overwrite table partition_test_partitioned partition(dt='1') +insert overwrite table partition_test_partitioned_n6 partition(dt='1') select * from src where key = 238; -select * from partition_test_partitioned where dt is not null; -select key+key, value from partition_test_partitioned where dt is not null; +select * from partition_test_partitioned_n6 where dt is not null; +select key+key, value from partition_test_partitioned_n6 where dt is not null; set hive.metastore.disallow.incompatible.col.type.changes=false; -alter table partition_test_partitioned change key key int; +alter table partition_test_partitioned_n6 change key key int; reset hive.metastore.disallow.incompatible.col.type.changes; -select key+key, value from partition_test_partitioned where dt is not null; -select * from partition_test_partitioned where dt is not null; +select key+key, value from partition_test_partitioned_n6 where dt is not null; +select * from partition_test_partitioned_n6 where dt is not null; -alter table partition_test_partitioned add columns (value2 string); +alter table partition_test_partitioned_n6 add columns (value2 string); -select key+key, value from partition_test_partitioned where dt is not null; -select * from partition_test_partitioned where dt is not null; +select key+key, value from partition_test_partitioned_n6 where dt is not null; +select * from partition_test_partitioned_n6 where dt is not null; -insert overwrite table partition_test_partitioned partition(dt='2') +insert overwrite table partition_test_partitioned_n6 partition(dt='2') select key, value, value from src where key = 86; -select key+key, value, value2, dt from partition_test_partitioned where dt is not null; -select * from partition_test_partitioned where dt is not null; +select key+key, value, value2, dt from partition_test_partitioned_n6 where dt is not null; +select * from partition_test_partitioned_n6 where dt is not null; diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q index a97619fba4..703b21469e 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat16.q @@ -2,26 +2,26 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -- This tests that the schema can be changed for binary serde data -create table partition_test_partitioned(key string, value string) +create table partition_test_partitioned_n10(key string, value string) partitioned by (dt string) stored as textfile; -insert overwrite table partition_test_partitioned partition(dt='1') +insert overwrite table partition_test_partitioned_n10 partition(dt='1') select * from src where key = 238; -select * from partition_test_partitioned where dt is not null; -select key+key, value from partition_test_partitioned where dt is not null; +select * from partition_test_partitioned_n10 where dt is not null; +select key+key, value from partition_test_partitioned_n10 where dt is not null; set hive.metastore.disallow.incompatible.col.type.changes=false; -alter table partition_test_partitioned change key key int; +alter table partition_test_partitioned_n10 change key key int; reset hive.metastore.disallow.incompatible.col.type.changes; -select key+key, value from partition_test_partitioned where dt is not null; -select * from partition_test_partitioned where dt is not null; +select key+key, value from partition_test_partitioned_n10 where dt is not null; +select * from partition_test_partitioned_n10 where dt is not null; -alter table partition_test_partitioned add columns (value2 string); +alter table partition_test_partitioned_n10 add columns (value2 string); -select key+key, value from partition_test_partitioned where dt is not null; -select * from partition_test_partitioned where dt is not null; +select key+key, value from partition_test_partitioned_n10 where dt is not null; +select * from partition_test_partitioned_n10 where dt is not null; -insert overwrite table partition_test_partitioned partition(dt='2') +insert overwrite table partition_test_partitioned_n10 partition(dt='2') select key, value, value from src where key = 86; -select key+key, value, value2, dt from partition_test_partitioned where dt is not null; -select * from partition_test_partitioned where dt is not null; +select key+key, value, value2, dt from partition_test_partitioned_n10 where dt is not null; +select * from partition_test_partitioned_n10 where dt is not null; diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat3.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat3.q index f9f97119b3..5999fc146a 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat3.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat3.q @@ -1,19 +1,19 @@ --! qt:dataset:src1 -create table partition_test_partitioned(key string, value string) partitioned by (dt string); +create table partition_test_partitioned_n8(key string, value string) partitioned by (dt string); -alter table partition_test_partitioned set fileformat rcfile; -insert overwrite table partition_test_partitioned partition(dt=101) select * from src1; -show table extended like partition_test_partitioned partition(dt=101); +alter table partition_test_partitioned_n8 set fileformat rcfile; +insert overwrite table partition_test_partitioned_n8 partition(dt=101) select * from src1; +show table extended like partition_test_partitioned_n8 partition(dt=101); -alter table partition_test_partitioned set fileformat Sequencefile; -insert overwrite table partition_test_partitioned partition(dt=102) select * from src1; -show table extended like partition_test_partitioned partition(dt=102); -select key from partition_test_partitioned where dt=102; +alter table partition_test_partitioned_n8 set fileformat Sequencefile; +insert overwrite table partition_test_partitioned_n8 partition(dt=102) select * from src1; +show table extended like partition_test_partitioned_n8 partition(dt=102); +select key from partition_test_partitioned_n8 where dt=102; -insert overwrite table partition_test_partitioned partition(dt=101) select * from src1; -show table extended like partition_test_partitioned partition(dt=101); -select key from partition_test_partitioned where dt=101; +insert overwrite table partition_test_partitioned_n8 partition(dt=101) select * from src1; +show table extended like partition_test_partitioned_n8 partition(dt=101); +select key from partition_test_partitioned_n8 where dt=101; diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat4.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat4.q index 14ed9460cf..f64b54eed5 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat4.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat4.q @@ -1,9 +1,9 @@ --! qt:dataset:src1 -create table partition_test_partitioned(key string, value string) partitioned by (dt string); -alter table partition_test_partitioned set fileformat sequencefile; -insert overwrite table partition_test_partitioned partition(dt='1') select * from src1; -alter table partition_test_partitioned partition (dt='1') set fileformat sequencefile; +create table partition_test_partitioned_n5(key string, value string) partitioned by (dt string); +alter table partition_test_partitioned_n5 set fileformat sequencefile; +insert overwrite table partition_test_partitioned_n5 partition(dt='1') select * from src1; +alter table partition_test_partitioned_n5 partition (dt='1') set fileformat sequencefile; -alter table partition_test_partitioned add partition (dt='2'); -alter table partition_test_partitioned drop partition (dt='2'); +alter table partition_test_partitioned_n5 add partition (dt='2'); +alter table partition_test_partitioned_n5 drop partition (dt='2'); diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat5.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat5.q index f41c88e114..51b09fedad 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat5.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat5.q @@ -1,15 +1,15 @@ --! qt:dataset:src1 set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -create table partition_test_partitioned(key string, value string) partitioned by (dt string); +create table partition_test_partitioned_n3(key string, value string) partitioned by (dt string); -alter table partition_test_partitioned set fileformat rcfile; -insert overwrite table partition_test_partitioned partition(dt=101) select * from src1; -alter table partition_test_partitioned set fileformat Sequencefile; -insert overwrite table partition_test_partitioned partition(dt=102) select * from src1; +alter table partition_test_partitioned_n3 set fileformat rcfile; +insert overwrite table partition_test_partitioned_n3 partition(dt=101) select * from src1; +alter table partition_test_partitioned_n3 set fileformat Sequencefile; +insert overwrite table partition_test_partitioned_n3 partition(dt=102) select * from src1; -select dt, count(1) from partition_test_partitioned where dt is not null group by dt; +select dt, count(1) from partition_test_partitioned_n3 where dt is not null group by dt; -insert overwrite table partition_test_partitioned partition(dt=103) select * from src1; +insert overwrite table partition_test_partitioned_n3 partition(dt=103) select * from src1; -select dt, count(1) from partition_test_partitioned where dt is not null group by dt; +select dt, count(1) from partition_test_partitioned_n3 where dt is not null group by dt; diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat6.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat6.q index 73f2266ee3..2f097fba4d 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat6.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat6.q @@ -1,20 +1,20 @@ --! qt:dataset:src1 set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -create table partition_test_partitioned(key string, value string) partitioned by (dt string); +create table partition_test_partitioned_n2(key string, value string) partitioned by (dt string); -alter table partition_test_partitioned set fileformat rcfile; -insert overwrite table partition_test_partitioned partition(dt=101) select * from src1; -alter table partition_test_partitioned set fileformat Sequencefile; +alter table partition_test_partitioned_n2 set fileformat rcfile; +insert overwrite table partition_test_partitioned_n2 partition(dt=101) select * from src1; +alter table partition_test_partitioned_n2 set fileformat Sequencefile; -insert overwrite table partition_test_partitioned partition(dt=102) select * from src1; +insert overwrite table partition_test_partitioned_n2 partition(dt=102) select * from src1; select count(1) from -(select key, value from partition_test_partitioned where dt=101 and key < 100 +(select key, value from partition_test_partitioned_n2 where dt=101 and key < 100 union all -select key, value from partition_test_partitioned where dt=101 and key < 20)s; +select key, value from partition_test_partitioned_n2 where dt=101 and key < 20)s; select count(1) from -(select key, value from partition_test_partitioned where dt=101 and key < 100 +(select key, value from partition_test_partitioned_n2 where dt=101 and key < 100 union all -select key, value from partition_test_partitioned where dt=102 and key < 20)s; +select key, value from partition_test_partitioned_n2 where dt=102 and key < 20)s; diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat7.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat7.q index e695df76ac..9731d6a509 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat7.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat7.q @@ -1,13 +1,13 @@ --! qt:dataset:src1 set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -create table partition_test_partitioned(key string, value string) partitioned by (dt string); +create table partition_test_partitioned_n11(key string, value string) partitioned by (dt string); -alter table partition_test_partitioned set fileformat rcfile; -insert overwrite table partition_test_partitioned partition(dt=101) select * from src1; +alter table partition_test_partitioned_n11 set fileformat rcfile; +insert overwrite table partition_test_partitioned_n11 partition(dt=101) select * from src1; -select count(1) from partition_test_partitioned a join partition_test_partitioned b on a.key = b.key +select count(1) from partition_test_partitioned_n11 a join partition_test_partitioned_n11 b on a.key = b.key where a.dt = '101' and b.dt = '101'; -select count(1) from partition_test_partitioned a join partition_test_partitioned b on a.key = b.key +select count(1) from partition_test_partitioned_n11 a join partition_test_partitioned_n11 b on a.key = b.key where a.dt = '101' and b.dt = '101' and a.key < 100; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat8.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat8.q index 30eea0a677..c58cce9476 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat8.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat8.q @@ -3,12 +3,12 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -- This tests that a query can span multiple partitions which can not only have different file formats, but -- also different serdes -create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile; -insert overwrite table partition_test_partitioned partition(dt='1') select * from src; -alter table partition_test_partitioned set fileformat sequencefile; -insert overwrite table partition_test_partitioned partition(dt='2') select * from src; -alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'; -insert overwrite table partition_test_partitioned partition(dt='3') select * from src; +create table partition_test_partitioned_n0(key string, value string) partitioned by (dt string) stored as rcfile; +insert overwrite table partition_test_partitioned_n0 partition(dt='1') select * from src; +alter table partition_test_partitioned_n0 set fileformat sequencefile; +insert overwrite table partition_test_partitioned_n0 partition(dt='2') select * from src; +alter table partition_test_partitioned_n0 set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'; +insert overwrite table partition_test_partitioned_n0 partition(dt='3') select * from src; -select * from partition_test_partitioned where dt is not null order by key, value, dt limit 20; -select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20; +select * from partition_test_partitioned_n0 where dt is not null order by key, value, dt limit 20; +select key+key as key, value, dt from partition_test_partitioned_n0 where dt is not null order by key, value, dt limit 20; diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat9.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat9.q index 05fc20fa04..664fe37153 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat9.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat9.q @@ -3,11 +3,11 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; -- This tests that a query can span multiple partitions which can not only have different file formats, but -- also different serdes -create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile; -insert overwrite table partition_test_partitioned partition(dt='1') select * from src; -alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; -insert overwrite table partition_test_partitioned partition(dt='2') select * from src; +create table partition_test_partitioned_n7(key string, value string) partitioned by (dt string) stored as rcfile; +insert overwrite table partition_test_partitioned_n7 partition(dt='1') select * from src; +alter table partition_test_partitioned_n7 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +insert overwrite table partition_test_partitioned_n7 partition(dt='2') select * from src; -select * from partition_test_partitioned where dt is not null order by key, value, dt limit 20; -select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20; +select * from partition_test_partitioned_n7 where dt is not null order by key, value, dt limit 20; +select key+key as key, value, dt from partition_test_partitioned_n7 where dt is not null order by key, value, dt limit 20; diff --git a/ql/src/test/queries/clientpositive/partitions_json.q b/ql/src/test/queries/clientpositive/partitions_json.q index 825e3f6004..7a4a1aafbb 100644 --- a/ql/src/test/queries/clientpositive/partitions_json.q +++ b/ql/src/test/queries/clientpositive/partitions_json.q @@ -1,21 +1,21 @@ set hive.ddl.output.format=json; -CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING); -SHOW PARTITIONS add_part_test; +CREATE TABLE add_part_test_n0 (key STRING, value STRING) PARTITIONED BY (ds STRING); +SHOW PARTITIONS add_part_test_n0; -ALTER TABLE add_part_test ADD PARTITION (ds='2010-01-01'); -SHOW PARTITIONS add_part_test; +ALTER TABLE add_part_test_n0 ADD PARTITION (ds='2010-01-01'); +SHOW PARTITIONS add_part_test_n0; -ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01'); -SHOW PARTITIONS add_part_test; +ALTER TABLE add_part_test_n0 ADD IF NOT EXISTS PARTITION (ds='2010-01-01'); +SHOW PARTITIONS add_part_test_n0; -ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02'); -SHOW PARTITIONS add_part_test; +ALTER TABLE add_part_test_n0 ADD IF NOT EXISTS PARTITION (ds='2010-01-02'); +SHOW PARTITIONS add_part_test_n0; -SHOW TABLE EXTENDED LIKE add_part_test PARTITION (ds='2010-01-02'); +SHOW TABLE EXTENDED LIKE add_part_test_n0 PARTITION (ds='2010-01-02'); -ALTER TABLE add_part_test DROP PARTITION (ds='2010-01-02'); +ALTER TABLE add_part_test_n0 DROP PARTITION (ds='2010-01-02'); -DROP TABLE add_part_test; +DROP TABLE add_part_test_n0; set hive.ddl.output.format=text; diff --git a/ql/src/test/queries/clientpositive/pointlookup2.q b/ql/src/test/queries/clientpositive/pointlookup2.q index 8ec07a0971..fe19381368 100644 --- a/ql/src/test/queries/clientpositive/pointlookup2.q +++ b/ql/src/test/queries/clientpositive/pointlookup2.q @@ -1,72 +1,72 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -drop table pcr_t1; -drop table pcr_t2; +drop table pcr_t1_n2; +drop table pcr_t2_n0; drop table pcr_t3; -create table pcr_t1 (key int, value string) partitioned by (ds string); -insert overwrite table pcr_t1 partition (ds='2000-04-08') select * from src where key < 20 order by key; -insert overwrite table pcr_t1 partition (ds='2000-04-09') select * from src where key < 20 order by key; -insert overwrite table pcr_t1 partition (ds='2000-04-10') select * from src where key < 20 order by key; +create table pcr_t1_n2 (key int, value string) partitioned by (ds string); +insert overwrite table pcr_t1_n2 partition (ds='2000-04-08') select * from src where key < 20 order by key; +insert overwrite table pcr_t1_n2 partition (ds='2000-04-09') select * from src where key < 20 order by key; +insert overwrite table pcr_t1_n2 partition (ds='2000-04-10') select * from src where key < 20 order by key; -create table pcr_t2 (ds string, key int, value string); -from pcr_t1 -insert overwrite table pcr_t2 select ds, key, value where ds='2000-04-08'; -from pcr_t1 -insert overwrite table pcr_t2 select ds, key, value where ds='2000-04-08' and key=2; +create table pcr_t2_n0 (ds string, key int, value string); +from pcr_t1_n2 +insert overwrite table pcr_t2_n0 select ds, key, value where ds='2000-04-08'; +from pcr_t1_n2 +insert overwrite table pcr_t2_n0 select ds, key, value where ds='2000-04-08' and key=2; explain extended select key, value, ds -from pcr_t1 +from pcr_t1_n2 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds; explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-08' order by t1.key; explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-09' order by t1.key; explain extended select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds; explain extended select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t2.ds='2000-04-08' and t1.key=1) or (t2.ds='2000-04-09' and t1.key=2) order by t1.key, t1.value, t2.ds; select key, value, ds -from pcr_t1 +from pcr_t1_n2 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds; select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-08' order by t1.key; select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds; select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds; select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t2.ds='2000-04-08' and t1.key=1) or (t2.ds='2000-04-09' and t1.key=2) order by t1.key, t1.value, t2.ds; @@ -75,59 +75,59 @@ set hive.optimize.partition.columns.separate=true; explain extended select key, value, ds -from pcr_t1 +from pcr_t1_n2 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds; explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-08' order by t1.key; explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-09' order by t1.key; explain extended select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds; explain extended select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t2.ds='2000-04-08' and t1.key=1) or (t2.ds='2000-04-09' and t1.key=2) order by t1.key, t1.value, t2.ds; select key, value, ds -from pcr_t1 +from pcr_t1_n2 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds; select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-08' order by t1.key; select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds; select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds; select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t2.ds='2000-04-08' and t1.key=1) or (t2.ds='2000-04-09' and t1.key=2) order by t1.key, t1.value, t2.ds; -drop table pcr_t1; -drop table pcr_t2; +drop table pcr_t1_n2; +drop table pcr_t2_n0; drop table pcr_t3; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/pointlookup3.q b/ql/src/test/queries/clientpositive/pointlookup3.q index facfec038a..f98feeb164 100644 --- a/ql/src/test/queries/clientpositive/pointlookup3.q +++ b/ql/src/test/queries/clientpositive/pointlookup3.q @@ -1,64 +1,64 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -drop table pcr_t1; +drop table pcr_t1_n1; -create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string); -insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key; -insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key; -insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key; +create table pcr_t1_n1 (key int, value string) partitioned by (ds1 string, ds2 string); +insert overwrite table pcr_t1_n1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key; +insert overwrite table pcr_t1_n1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key; +insert overwrite table pcr_t1_n1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key; explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2) order by key, value, ds1, ds2; explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2) order by key, value, ds1, ds2; explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08' order by t2.key, t2.value, t1.ds1; explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09' order by t2.key, t2.value, t1.ds1; explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds1; select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2) order by key, value, ds1, ds2; select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2) order by key, value, ds1, ds2; select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08' order by t2.key, t2.value, t1.ds1; select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09' order by t2.key, t2.value, t1.ds1; select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds1; @@ -67,57 +67,57 @@ set hive.optimize.partition.columns.separate=true; explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2) order by key, value, ds1, ds2; explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2) order by key, value, ds1, ds2; explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08' order by t2.key, t2.value, t1.ds1; explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09' order by t2.key, t2.value, t1.ds1; explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds1; select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2) order by key, value, ds1, ds2; select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2) order by key, value, ds1, ds2; select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08' order by t2.key, t2.value, t1.ds1; select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09' order by t2.key, t2.value, t1.ds1; select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds1; -drop table pcr_t1; +drop table pcr_t1_n1; diff --git a/ql/src/test/queries/clientpositive/pointlookup4.q b/ql/src/test/queries/clientpositive/pointlookup4.q index 5eaa58e176..98f7cf0073 100644 --- a/ql/src/test/queries/clientpositive/pointlookup4.q +++ b/ql/src/test/queries/clientpositive/pointlookup4.q @@ -1,23 +1,23 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -drop table pcr_t1; +drop table pcr_t1_n0; -create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string); -insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key; -insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key; -insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key; +create table pcr_t1_n0 (key int, value string) partitioned by (ds1 string, ds2 string); +insert overwrite table pcr_t1_n0 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key; +insert overwrite table pcr_t1_n0 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key; +insert overwrite table pcr_t1_n0 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key; set hive.optimize.point.lookup=false; set hive.optimize.partition.columns.separate=false; explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n0 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2) order by key, value, ds1, ds2; select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n0 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2) order by key, value, ds1, ds2; @@ -27,13 +27,13 @@ set hive.optimize.partition.columns.separate=true; explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n0 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2) order by key, value, ds1, ds2; select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n0 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2) order by key, value, ds1, ds2; -drop table pcr_t1; +drop table pcr_t1_n0; diff --git a/ql/src/test/queries/clientpositive/ppd_join5.q b/ql/src/test/queries/clientpositive/ppd_join5.q index b13e35975b..3f302cbbea 100644 --- a/ql/src/test/queries/clientpositive/ppd_join5.q +++ b/ql/src/test/queries/clientpositive/ppd_join5.q @@ -1,26 +1,26 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -create table t1 (id1 string, id2 string); -create table t2 (id string, d int); +create table t1_n79 (id1 string, id2 string); +create table t2_n48 (id string, d int); from src tablesample (1 rows) - insert into table t1 select 'a','a' - insert into table t2 select 'a',2; + insert into table t1_n79 select 'a','a' + insert into table t2_n48 select 'a',2; explain select a.*,b.d d1,c.d d2 from - t1 a join t2 b on (a.id1 = b.id) - join t2 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1; + t1_n79 a join t2_n48 b on (a.id1 = b.id) + join t2_n48 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1; explain select * from ( select a.*,b.d d1,c.d d2 from - t1 a join t2 b on (a.id1 = b.id) - join t2 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 + t1_n79 a join t2_n48 b on (a.id1 = b.id) + join t2_n48 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 ) z where d1 > 1 or d2 > 1; select * from ( select a.*,b.d d1,c.d d2 from - t1 a join t2 b on (a.id1 = b.id) - join t2 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 + t1_n79 a join t2_n48 b on (a.id1 = b.id) + join t2_n48 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 ) z where d1 > 1 or d2 > 1; diff --git a/ql/src/test/queries/clientpositive/ppd_outer_join5.q b/ql/src/test/queries/clientpositive/ppd_outer_join5.q index cdd7db550d..848bfef70a 100644 --- a/ql/src/test/queries/clientpositive/ppd_outer_join5.q +++ b/ql/src/test/queries/clientpositive/ppd_outer_join5.q @@ -2,16 +2,16 @@ set hive.mapred.mode=nonstrict; set hive.optimize.ppd=true; set hive.ppd.remove.duplicatefilters=true; -create table t1 (id int, key string, value string); -create table t2 (id int, key string, value string); -create table t3 (id int, key string, value string); -create table t4 (id int, key string, value string); +create table t1_n98 (id int, key string, value string); +create table t2_n61 (id int, key string, value string); +create table t3_n23 (id int, key string, value string); +create table t4_n12 (id int, key string, value string); -explain select * from t1 full outer join t2 on t1.id=t2.id join t3 on t2.id=t3.id where t3.id=20; -explain select * from t1 join t2 on (t1.id=t2.id) left outer join t3 on (t2.id=t3.id) where t2.id=20; -explain select * from t1 join t2 on (t1.id=t2.id) left outer join t3 on (t1.id=t3.id) where t2.id=20; +explain select * from t1_n98 full outer join t2_n61 on t1_n98.id=t2_n61.id join t3_n23 on t2_n61.id=t3_n23.id where t3_n23.id=20; +explain select * from t1_n98 join t2_n61 on (t1_n98.id=t2_n61.id) left outer join t3_n23 on (t2_n61.id=t3_n23.id) where t2_n61.id=20; +explain select * from t1_n98 join t2_n61 on (t1_n98.id=t2_n61.id) left outer join t3_n23 on (t1_n98.id=t3_n23.id) where t2_n61.id=20; -drop table t1; -drop table t2; -drop table t3; -drop table t4; \ No newline at end of file +drop table t1_n98; +drop table t2_n61; +drop table t3_n23; +drop table t4_n12; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/ppd_repeated_alias.q b/ql/src/test/queries/clientpositive/ppd_repeated_alias.q index cb9039eead..b94572ae09 100644 --- a/ql/src/test/queries/clientpositive/ppd_repeated_alias.q +++ b/ql/src/test/queries/clientpositive/ppd_repeated_alias.q @@ -1,13 +1,13 @@ set hive.mapred.mode=nonstrict; -drop table pokes; -drop table pokes2; -create table pokes (foo int, bar int, blah int); -create table pokes2 (foo int, bar int, blah int); +drop table pokes_n0; +drop table pokes2_n0; +create table pokes_n0 (foo int, bar int, blah int); +create table pokes2_n0 (foo int, bar int, blah int); -- Q1: predicate should not be pushed on the right side of a left outer join explain SELECT a.foo as foo1, b.foo as foo2, b.bar -FROM pokes a LEFT OUTER JOIN pokes2 b +FROM pokes_n0 a LEFT OUTER JOIN pokes2_n0 b ON a.foo=b.foo WHERE b.bar=3; @@ -15,7 +15,7 @@ WHERE b.bar=3; explain SELECT * FROM (SELECT a.foo as foo1, b.foo as foo2, b.bar - FROM pokes a LEFT OUTER JOIN pokes2 b + FROM pokes_n0 a LEFT OUTER JOIN pokes2_n0 b ON a.foo=b.foo) a WHERE a.bar=3; @@ -23,12 +23,12 @@ WHERE a.bar=3; explain SELECT * FROM (SELECT a.foo as foo1, b.foo as foo2, a.bar - FROM pokes a JOIN pokes2 b + FROM pokes_n0 a JOIN pokes2_n0 b ON a.foo=b.foo) a WHERE a.bar=3; -- Q4: here, the filter c.bar should be created under the first join but above the second -explain select c.foo, d.bar from (select c.foo, b.bar, c.blah from pokes c left outer join pokes b on c.foo=b.foo) c left outer join pokes d where d.foo=1 and c.bar=2; +explain select c.foo, d.bar from (select c.foo, b.bar, c.blah from pokes_n0 c left outer join pokes_n0 b on c.foo=b.foo) c left outer join pokes_n0 d where d.foo=1 and c.bar=2; -drop table pokes; -drop table pokes2; +drop table pokes_n0; +drop table pokes2_n0; diff --git a/ql/src/test/queries/clientpositive/ppd_union_view.q b/ql/src/test/queries/clientpositive/ppd_union_view.q index 8302868881..f6f9d1a1df 100644 --- a/ql/src/test/queries/clientpositive/ppd_union_view.q +++ b/ql/src/test/queries/clientpositive/ppd_union_view.q @@ -4,12 +4,12 @@ set hive.mapred.mode=nonstrict; drop view v; -create table t1_new (key string, value string) partitioned by (ds string); +create table t1_new_n0 (key string, value string) partitioned by (ds string); -insert overwrite table t1_new partition (ds = '2011-10-15') +insert overwrite table t1_new_n0 partition (ds = '2011-10-15') select 'key1', 'value1' from src tablesample (1 rows); -insert overwrite table t1_new partition (ds = '2011-10-16') +insert overwrite table t1_new_n0 partition (ds = '2011-10-16') select 'key2', 'value2' from src tablesample (1 rows); create table t1_old (keymap string, value string) partitioned by (ds string); @@ -29,10 +29,10 @@ insert overwrite table t1_mapping partition (ds = '2011-10-14') select 'key4', 'keymap4' from src tablesample (1 rows); -create view t1 partitioned on (ds) as +create view t1_n113 partitioned on (ds) as select * from ( -select key, value, ds from t1_new +select key, value, ds from t1_new_n0 union all select key, value, t1_old.ds from t1_old join t1_mapping on t1_old.keymap = t1_mapping.keymap and @@ -40,14 +40,14 @@ on t1_old.keymap = t1_mapping.keymap and ) subq; explain extended -select * from t1 where ds = '2011-10-13'; +select * from t1_n113 where ds = '2011-10-13'; -select * from t1 where ds = '2011-10-13'; +select * from t1_n113 where ds = '2011-10-13'; -select * from t1 where ds = '2011-10-14'; +select * from t1_n113 where ds = '2011-10-14'; explain extended -select * from t1 where ds = '2011-10-15'; +select * from t1_n113 where ds = '2011-10-15'; -select * from t1 where ds = '2011-10-15'; -select * from t1 where ds = '2011-10-16'; \ No newline at end of file +select * from t1_n113 where ds = '2011-10-15'; +select * from t1_n113 where ds = '2011-10-16'; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/ppr_pushdown.q b/ql/src/test/queries/clientpositive/ppr_pushdown.q index c721744acb..242c4b05f4 100644 --- a/ql/src/test/queries/clientpositive/ppr_pushdown.q +++ b/ql/src/test/queries/clientpositive/ppr_pushdown.q @@ -2,44 +2,44 @@ set hive.mapred.mode=nonstrict; set hive.fetch.task.conversion=more; -create table ppr_test (key string) partitioned by (ds string); - -alter table ppr_test add partition (ds = '1234'); -alter table ppr_test add partition (ds = '1224'); -alter table ppr_test add partition (ds = '1214'); -alter table ppr_test add partition (ds = '12+4'); -alter table ppr_test add partition (ds = '12.4'); -alter table ppr_test add partition (ds = '12:4'); -alter table ppr_test add partition (ds = '12%4'); -alter table ppr_test add partition (ds = '12*4'); - -insert overwrite table ppr_test partition(ds = '1234') select * from (select '1234' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; -insert overwrite table ppr_test partition(ds = '1224') select * from (select '1224' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; -insert overwrite table ppr_test partition(ds = '1214') select * from (select '1214' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; -insert overwrite table ppr_test partition(ds = '12+4') select * from (select '12+4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; -insert overwrite table ppr_test partition(ds = '12.4') select * from (select '12.4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; -insert overwrite table ppr_test partition(ds = '12:4') select * from (select '12:4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; -insert overwrite table ppr_test partition(ds = '12%4') select * from (select '12%4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; -insert overwrite table ppr_test partition(ds = '12*4') select * from (select '12*4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; - - -select * from ppr_test where ds = '1234' order by key; -select * from ppr_test where ds = '1224' order by key; -select * from ppr_test where ds = '1214' order by key; -select * from ppr_test where ds = '12.4' order by key; -select * from ppr_test where ds = '12+4' order by key; -select * from ppr_test where ds = '12:4' order by key; -select * from ppr_test where ds = '12%4' order by key; -select * from ppr_test where ds = '12*4' order by key; -select * from ppr_test where ds = '12.*4' order by key; - -select * from ppr_test where ds = '1234' and key = '1234'; -select * from ppr_test where ds = '1224' and key = '1224'; -select * from ppr_test where ds = '1214' and key = '1214'; -select * from ppr_test where ds = '12.4' and key = '12.4'; -select * from ppr_test where ds = '12+4' and key = '12+4'; -select * from ppr_test where ds = '12:4' and key = '12:4'; -select * from ppr_test where ds = '12%4' and key = '12%4'; -select * from ppr_test where ds = '12*4' and key = '12*4'; +create table ppr_test_n0 (key string) partitioned by (ds string); + +alter table ppr_test_n0 add partition (ds = '1234'); +alter table ppr_test_n0 add partition (ds = '1224'); +alter table ppr_test_n0 add partition (ds = '1214'); +alter table ppr_test_n0 add partition (ds = '12+4'); +alter table ppr_test_n0 add partition (ds = '12.4'); +alter table ppr_test_n0 add partition (ds = '12:4'); +alter table ppr_test_n0 add partition (ds = '12%4'); +alter table ppr_test_n0 add partition (ds = '12*4'); + +insert overwrite table ppr_test_n0 partition(ds = '1234') select * from (select '1234' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; +insert overwrite table ppr_test_n0 partition(ds = '1224') select * from (select '1224' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; +insert overwrite table ppr_test_n0 partition(ds = '1214') select * from (select '1214' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; +insert overwrite table ppr_test_n0 partition(ds = '12+4') select * from (select '12+4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; +insert overwrite table ppr_test_n0 partition(ds = '12.4') select * from (select '12.4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; +insert overwrite table ppr_test_n0 partition(ds = '12:4') select * from (select '12:4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; +insert overwrite table ppr_test_n0 partition(ds = '12%4') select * from (select '12%4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; +insert overwrite table ppr_test_n0 partition(ds = '12*4') select * from (select '12*4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s; + + +select * from ppr_test_n0 where ds = '1234' order by key; +select * from ppr_test_n0 where ds = '1224' order by key; +select * from ppr_test_n0 where ds = '1214' order by key; +select * from ppr_test_n0 where ds = '12.4' order by key; +select * from ppr_test_n0 where ds = '12+4' order by key; +select * from ppr_test_n0 where ds = '12:4' order by key; +select * from ppr_test_n0 where ds = '12%4' order by key; +select * from ppr_test_n0 where ds = '12*4' order by key; +select * from ppr_test_n0 where ds = '12.*4' order by key; + +select * from ppr_test_n0 where ds = '1234' and key = '1234'; +select * from ppr_test_n0 where ds = '1224' and key = '1224'; +select * from ppr_test_n0 where ds = '1214' and key = '1214'; +select * from ppr_test_n0 where ds = '12.4' and key = '12.4'; +select * from ppr_test_n0 where ds = '12+4' and key = '12+4'; +select * from ppr_test_n0 where ds = '12:4' and key = '12:4'; +select * from ppr_test_n0 where ds = '12%4' and key = '12%4'; +select * from ppr_test_n0 where ds = '12*4' and key = '12*4'; diff --git a/ql/src/test/queries/clientpositive/primitive_types.q b/ql/src/test/queries/clientpositive/primitive_types.q index c3fe4646dd..bfe07fe76f 100644 --- a/ql/src/test/queries/clientpositive/primitive_types.q +++ b/ql/src/test/queries/clientpositive/primitive_types.q @@ -1,4 +1,4 @@ -create table t ( +create table t_n9 ( dp double precision, d double, f float, @@ -9,5 +9,5 @@ create table t ( de dec, dc dec(3,2) ); -describe t; +describe t_n9; diff --git a/ql/src/test/queries/clientpositive/ptf.q b/ql/src/test/queries/clientpositive/ptf.q index ae65d223ab..c48ddb2865 100644 --- a/ql/src/test/queries/clientpositive/ptf.q +++ b/ql/src/test/queries/clientpositive/ptf.q @@ -285,7 +285,7 @@ order by p_name); -- 16. testViewAsTableInputToPTF -create view IF NOT EXISTS mfgr_price_view as +create view IF NOT EXISTS mfgr_price_view_n5 as select p_mfgr, p_brand, round(sum(p_retailprice),2) as s from part @@ -294,20 +294,20 @@ group by p_mfgr, p_brand; explain select p_mfgr, p_brand, s, round(sum(s) over w1,2) as s1 -from noop(on mfgr_price_view +from noop(on mfgr_price_view_n5 partition by p_mfgr order by p_mfgr) window w1 as ( partition by p_mfgr order by p_brand rows between 2 preceding and current row); select p_mfgr, p_brand, s, round(sum(s) over w1,2) as s1 -from noop(on mfgr_price_view +from noop(on mfgr_price_view_n5 partition by p_mfgr order by p_mfgr) window w1 as ( partition by p_mfgr order by p_brand rows between 2 preceding and current row); -- 17. testMultipleInserts2SWQsWithPTF -CREATE TABLE part_4( +CREATE TABLE part_4_n2( p_mfgr STRING, p_name STRING, p_size INT, @@ -315,7 +315,7 @@ r INT, dr INT, s DOUBLE); -CREATE TABLE part_5( +CREATE TABLE part_5_n2( p_mfgr STRING, p_name STRING, p_size INT, @@ -329,11 +329,11 @@ explain from noop(on part partition by p_mfgr order by p_name) -INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, +INSERT OVERWRITE TABLE part_4_n2 select p_mfgr, p_name, p_size, rank() over (distribute by p_mfgr sort by p_name) as r, dense_rank() over (distribute by p_mfgr sort by p_name) as dr, round(sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row),2) as s -INSERT OVERWRITE TABLE part_5 select p_mfgr,p_name, p_size, +INSERT OVERWRITE TABLE part_5_n2 select p_mfgr,p_name, p_size, round(sum(p_size) over (distribute by p_mfgr sort by p_size range between 5 preceding and current row),1) as s2, rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as r, dense_rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as dr, @@ -344,11 +344,11 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi from noop(on part partition by p_mfgr order by p_name) -INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, +INSERT OVERWRITE TABLE part_4_n2 select p_mfgr, p_name, p_size, rank() over (distribute by p_mfgr sort by p_name) as r, dense_rank() over (distribute by p_mfgr sort by p_name) as dr, round(sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row),2) as s -INSERT OVERWRITE TABLE part_5 select p_mfgr,p_name, p_size, +INSERT OVERWRITE TABLE part_5_n2 select p_mfgr,p_name, p_size, round(sum(p_size) over (distribute by p_mfgr sort by p_size range between 5 preceding and current row),1) as s2, rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as r, dense_rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as dr, @@ -356,9 +356,9 @@ cume_dist() over (distribute by p_mfgr sort by p_mfgr, p_name) as cud, first_value(p_size, true) over w1 as fv1 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following); -select * from part_4; +select * from part_4_n2; -select * from part_5; +select * from part_5_n2; -- 18. testMulti2OperatorsFunctionChainWithMap explain diff --git a/ql/src/test/queries/clientpositive/ptf_matchpath.q b/ql/src/test/queries/clientpositive/ptf_matchpath.q index f769759988..fa68ca5f80 100644 --- a/ql/src/test/queries/clientpositive/ptf_matchpath.q +++ b/ql/src/test/queries/clientpositive/ptf_matchpath.q @@ -1,9 +1,9 @@ set hive.vectorized.execution.enabled=false; set hive.explain.user=false; -DROP TABLE flights_tiny; +DROP TABLE flights_tiny_n0; -create table flights_tiny ( +create table flights_tiny_n0 ( ORIGIN_CITY_NAME string, DEST_CITY_NAME string, YEAR int, @@ -13,7 +13,7 @@ ARR_DELAY float, FL_NUM string ); -LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt' OVERWRITE INTO TABLE flights_tiny; +LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt' OVERWRITE INTO TABLE flights_tiny_n0; -- SORT_QUERY_RESULTS @@ -21,7 +21,7 @@ LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt' OVERWRITE INTO TABLE explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - flights_tiny + flights_tiny_n0 distribute by fl_num sort by year, month, day_of_month arg1('LATE.LATE+'), @@ -31,7 +31,7 @@ from matchpath(on select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - flights_tiny + flights_tiny_n0 distribute by fl_num sort by year, month, day_of_month arg1('LATE.LATE+'), @@ -43,7 +43,7 @@ from matchpath(on explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - flights_tiny + flights_tiny_n0 sort by fl_num, year, month, day_of_month arg1('LATE.LATE+'), arg2('LATE'), arg3(arr_delay > 15), @@ -53,7 +53,7 @@ where fl_num = 1142; select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - flights_tiny + flights_tiny_n0 sort by fl_num, year, month, day_of_month arg1('LATE.LATE+'), arg2('LATE'), arg3(arr_delay > 15), @@ -65,7 +65,7 @@ where fl_num = 1142; explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - (select * from flights_tiny where fl_num = -1142) flights_tiny + (select * from flights_tiny_n0 where fl_num = -1142) flights_tiny_n0 sort by fl_num, year, month, day_of_month arg1('LATE.LATE+'), arg2('LATE'), arg3(arr_delay > 15), @@ -75,7 +75,7 @@ from matchpath(on select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - (select * from flights_tiny where fl_num = -1142) flights_tiny + (select * from flights_tiny_n0 where fl_num = -1142) flights_tiny_n0 sort by fl_num, year, month, day_of_month arg1('LATE.LATE+'), arg2('LATE'), arg3(arr_delay > 15), diff --git a/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q b/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q index 36da2892cd..e3996e3ecb 100644 --- a/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q +++ b/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q @@ -4,8 +4,8 @@ set hive.stats.dbclass=fs; set hive.stats.autogather=true; set hive.cbo.enable=false; -DROP TABLE IF EXISTS lineitem_ix; -CREATE TABLE lineitem_ix (L_ORDERKEY INT, +DROP TABLE IF EXISTS lineitem_ix_n1; +CREATE TABLE lineitem_ix_n1 (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, @@ -24,28 +24,28 @@ CREATE TABLE lineitem_ix (L_ORDERKEY INT, ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'; -LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem_ix; +LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem_ix_n1; -CREATE INDEX lineitem_ix_lshipdate_idx ON TABLE lineitem_ix(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)"); -ALTER INDEX lineitem_ix_lshipdate_idx ON lineitem_ix REBUILD; +CREATE INDEX lineitem_ix_lshipdate_idx ON TABLE lineitem_ix_n1(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)"); +ALTER INDEX lineitem_ix_lshipdate_idx ON lineitem_ix_n1 REBUILD; explain select l_shipdate, count(l_shipdate) -from lineitem_ix +from lineitem_ix_n1 group by l_shipdate; select l_shipdate, count(l_shipdate) -from lineitem_ix +from lineitem_ix_n1 group by l_shipdate order by l_shipdate; set hive.optimize.index.groupby=true; explain select l_shipdate, count(l_shipdate) -from lineitem_ix +from lineitem_ix_n1 group by l_shipdate; select l_shipdate, count(l_shipdate) -from lineitem_ix +from lineitem_ix_n1 group by l_shipdate order by l_shipdate; @@ -55,14 +55,14 @@ set hive.optimize.index.groupby=false; explain select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem_ix +from lineitem_ix_n1 group by year(l_shipdate), month(l_shipdate) order by year, month; select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem_ix +from lineitem_ix_n1 group by year(l_shipdate), month(l_shipdate) order by year, month; @@ -71,14 +71,14 @@ set hive.optimize.index.groupby=true; explain select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem_ix +from lineitem_ix_n1 group by year(l_shipdate), month(l_shipdate) order by year, month; select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments -from lineitem_ix +from lineitem_ix_n1 group by year(l_shipdate), month(l_shipdate) order by year, month; @@ -89,89 +89,89 @@ lastyear.monthly_shipments as monthly_shipments_delta from (select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments - from lineitem_ix + from lineitem_ix_n1 where year(l_shipdate) = 1997 group by year(l_shipdate), month(l_shipdate) ) lastyear join (select year(l_shipdate) as year, month(l_shipdate) as month, count(l_shipdate) as monthly_shipments - from lineitem_ix + from lineitem_ix_n1 where year(l_shipdate) = 1998 group by year(l_shipdate), month(l_shipdate) ) thisyear on lastyear.month = thisyear.month; explain select l_shipdate, cnt -from (select l_shipdate, count(l_shipdate) as cnt from lineitem_ix group by l_shipdate +from (select l_shipdate, count(l_shipdate) as cnt from lineitem_ix_n1 group by l_shipdate union all select l_shipdate, l_orderkey as cnt -from lineitem_ix) dummy; +from lineitem_ix_n1) dummy; -CREATE TABLE tbl(key int, value int); -CREATE INDEX tbl_key_idx ON TABLE tbl(key) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(key)"); -ALTER INDEX tbl_key_idx ON tbl REBUILD; +CREATE TABLE tbl_n2(key int, value int); +CREATE INDEX tbl_key_idx ON TABLE tbl_n2(key) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(key)"); +ALTER INDEX tbl_key_idx ON tbl_n2 REBUILD; -EXPLAIN select key, count(key) from tbl where key = 1 group by key; -EXPLAIN select key, count(key) from tbl group by key; +EXPLAIN select key, count(key) from tbl_n2 where key = 1 group by key; +EXPLAIN select key, count(key) from tbl_n2 group by key; -EXPLAIN select count(1) from tbl; -EXPLAIN select count(key) from tbl; +EXPLAIN select count(1) from tbl_n2; +EXPLAIN select count(key) from tbl_n2; -EXPLAIN select key FROM tbl GROUP BY key; -EXPLAIN select key FROM tbl GROUP BY value, key; -EXPLAIN select key FROM tbl WHERE key = 3 GROUP BY key; -EXPLAIN select key FROM tbl WHERE value = 2 GROUP BY key; -EXPLAIN select key FROM tbl GROUP BY key, substr(key,2,3); +EXPLAIN select key FROM tbl_n2 GROUP BY key; +EXPLAIN select key FROM tbl_n2 GROUP BY value, key; +EXPLAIN select key FROM tbl_n2 WHERE key = 3 GROUP BY key; +EXPLAIN select key FROM tbl_n2 WHERE value = 2 GROUP BY key; +EXPLAIN select key FROM tbl_n2 GROUP BY key, substr(key,2,3); -EXPLAIN select key, value FROM tbl GROUP BY value, key; -EXPLAIN select key, value FROM tbl WHERE value = 1 GROUP BY key, value; +EXPLAIN select key, value FROM tbl_n2 GROUP BY value, key; +EXPLAIN select key, value FROM tbl_n2 WHERE value = 1 GROUP BY key, value; -EXPLAIN select DISTINCT key FROM tbl; -EXPLAIN select DISTINCT key FROM tbl; -EXPLAIN select DISTINCT key FROM tbl; -EXPLAIN select DISTINCT key, value FROM tbl; -EXPLAIN select DISTINCT key, value FROM tbl WHERE value = 2; -EXPLAIN select DISTINCT key, value FROM tbl WHERE value = 2 AND key = 3; -EXPLAIN select DISTINCT key, value FROM tbl WHERE value = key; -EXPLAIN select DISTINCT key, substr(value,2,3) FROM tbl WHERE value = key; -EXPLAIN select DISTINCT key, substr(value,2,3) FROM tbl; +EXPLAIN select DISTINCT key FROM tbl_n2; +EXPLAIN select DISTINCT key FROM tbl_n2; +EXPLAIN select DISTINCT key FROM tbl_n2; +EXPLAIN select DISTINCT key, value FROM tbl_n2; +EXPLAIN select DISTINCT key, value FROM tbl_n2 WHERE value = 2; +EXPLAIN select DISTINCT key, value FROM tbl_n2 WHERE value = 2 AND key = 3; +EXPLAIN select DISTINCT key, value FROM tbl_n2 WHERE value = key; +EXPLAIN select DISTINCT key, substr(value,2,3) FROM tbl_n2 WHERE value = key; +EXPLAIN select DISTINCT key, substr(value,2,3) FROM tbl_n2; -EXPLAIN select * FROM (select DISTINCT key, value FROM tbl) v1 WHERE v1.value = 2; +EXPLAIN select * FROM (select DISTINCT key, value FROM tbl_n2) v1 WHERE v1.value = 2; -DROP TABLE tbl; +DROP TABLE tbl_n2; -CREATE TABLE tblpart (key int, value string) PARTITIONED BY (ds string, hr int); -INSERT OVERWRITE TABLE tblpart PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11; -INSERT OVERWRITE TABLE tblpart PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12; -INSERT OVERWRITE TABLE tblpart PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11; -INSERT OVERWRITE TABLE tblpart PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12; +CREATE TABLE tblpart_n0 (key int, value string) PARTITIONED BY (ds string, hr int); +INSERT OVERWRITE TABLE tblpart_n0 PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11; +INSERT OVERWRITE TABLE tblpart_n0 PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12; +INSERT OVERWRITE TABLE tblpart_n0 PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11; +INSERT OVERWRITE TABLE tblpart_n0 PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12; -CREATE INDEX tbl_part_index ON TABLE tblpart(key) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(key)"); +CREATE INDEX tbl_part_index ON TABLE tblpart_n0(key) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(key)"); -ALTER INDEX tbl_part_index ON tblpart PARTITION (ds='2008-04-08', hr=11) REBUILD; -EXPLAIN SELECT key, count(key) FROM tblpart WHERE ds='2008-04-09' AND hr=12 AND key < 10 GROUP BY key; +ALTER INDEX tbl_part_index ON tblpart_n0 PARTITION (ds='2008-04-08', hr=11) REBUILD; +EXPLAIN SELECT key, count(key) FROM tblpart_n0 WHERE ds='2008-04-09' AND hr=12 AND key < 10 GROUP BY key; -ALTER INDEX tbl_part_index ON tblpart PARTITION (ds='2008-04-08', hr=12) REBUILD; -ALTER INDEX tbl_part_index ON tblpart PARTITION (ds='2008-04-09', hr=11) REBUILD; -ALTER INDEX tbl_part_index ON tblpart PARTITION (ds='2008-04-09', hr=12) REBUILD; -EXPLAIN SELECT key, count(key) FROM tblpart WHERE ds='2008-04-09' AND hr=12 AND key < 10 GROUP BY key; +ALTER INDEX tbl_part_index ON tblpart_n0 PARTITION (ds='2008-04-08', hr=12) REBUILD; +ALTER INDEX tbl_part_index ON tblpart_n0 PARTITION (ds='2008-04-09', hr=11) REBUILD; +ALTER INDEX tbl_part_index ON tblpart_n0 PARTITION (ds='2008-04-09', hr=12) REBUILD; +EXPLAIN SELECT key, count(key) FROM tblpart_n0 WHERE ds='2008-04-09' AND hr=12 AND key < 10 GROUP BY key; -DROP INDEX tbl_part_index on tblpart; -DROP TABLE tblpart; +DROP INDEX tbl_part_index on tblpart_n0; +DROP TABLE tblpart_n0; -CREATE TABLE tbl(key int, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'; -LOAD DATA LOCAL INPATH '../../data/files/tbl.txt' OVERWRITE INTO TABLE tbl; +CREATE TABLE tbl_n2(key int, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'; +LOAD DATA LOCAL INPATH '../../data/files/tbl.txt' OVERWRITE INTO TABLE tbl_n2; -CREATE INDEX tbl_key_idx ON TABLE tbl(key) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(key)"); -ALTER INDEX tbl_key_idx ON tbl REBUILD; +CREATE INDEX tbl_key_idx ON TABLE tbl_n2(key) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(key)"); +ALTER INDEX tbl_key_idx ON tbl_n2 REBUILD; set hive.optimize.index.groupby=false; -explain select key, count(key) from tbl group by key order by key; -select key, count(key) from tbl group by key order by key; +explain select key, count(key) from tbl_n2 group by key order by key; +select key, count(key) from tbl_n2 group by key order by key; set hive.optimize.index.groupby=true; -explain select key, count(key) from tbl group by key order by key; -select key, count(key) from tbl group by key order by key; -DROP TABLE tbl; +explain select key, count(key) from tbl_n2 group by key order by key; +select key, count(key) from tbl_n2 group by key order by key; +DROP TABLE tbl_n2; reset hive.cbo.enable; diff --git a/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx_cbo_2.q b/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx_cbo_2.q index 14197f41cf..cd425489c7 100644 --- a/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx_cbo_2.q +++ b/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx_cbo_2.q @@ -5,12 +5,12 @@ set hive.stats.autogather=true; set hive.cbo.enable=true; set hive.optimize.index.groupby=true; -DROP TABLE IF EXISTS lineitem_ix; -DROP INDEX IF EXISTS lineitem_ix_L_ORDERKEY_idx on lineitem_ix; -DROP INDEX IF EXISTS lineitem_ix_L_PARTKEY_idx on lineitem_ix; +DROP TABLE IF EXISTS lineitem_ix_n0; +DROP INDEX IF EXISTS lineitem_ix_L_ORDERKEY_idx on lineitem_ix_n0; +DROP INDEX IF EXISTS lineitem_ix_L_PARTKEY_idx on lineitem_ix_n0; -CREATE TABLE lineitem_ix (L_ORDERKEY INT, +CREATE TABLE lineitem_ix_n0 (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, @@ -29,178 +29,178 @@ CREATE TABLE lineitem_ix (L_ORDERKEY INT, ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'; -LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem_ix; +LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem_ix_n0; -CREATE INDEX lineitem_ix_L_ORDERKEY_idx ON TABLE lineitem_ix(L_ORDERKEY) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(L_ORDERKEY)"); -ALTER INDEX lineitem_ix_L_ORDERKEY_idx ON lineitem_ix REBUILD; +CREATE INDEX lineitem_ix_L_ORDERKEY_idx ON TABLE lineitem_ix_n0(L_ORDERKEY) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(L_ORDERKEY)"); +ALTER INDEX lineitem_ix_L_ORDERKEY_idx ON lineitem_ix_n0 REBUILD; -CREATE INDEX lineitem_ix_L_PARTKEY_idx ON TABLE lineitem_ix(L_PARTKEY) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(L_PARTKEY)"); -ALTER INDEX lineitem_ix_L_PARTKEY_idx ON lineitem_ix REBUILD; +CREATE INDEX lineitem_ix_L_PARTKEY_idx ON TABLE lineitem_ix_n0(L_PARTKEY) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(L_PARTKEY)"); +ALTER INDEX lineitem_ix_L_PARTKEY_idx ON lineitem_ix_n0 REBUILD; explain select count(1) -from lineitem_ix; +from lineitem_ix_n0; select count(1) -from lineitem_ix; +from lineitem_ix_n0; explain select count(L_ORDERKEY) -from lineitem_ix; +from lineitem_ix_n0; select count(L_ORDERKEY) -from lineitem_ix; +from lineitem_ix_n0; explain select L_ORDERKEY+L_PARTKEY as keysum, count(L_ORDERKEY), count(L_PARTKEY) -from lineitem_ix +from lineitem_ix_n0 group by L_ORDERKEY, L_PARTKEY; select L_ORDERKEY+L_PARTKEY as keysum, count(L_ORDERKEY), count(L_PARTKEY) -from lineitem_ix +from lineitem_ix_n0 group by L_ORDERKEY, L_PARTKEY; explain select L_ORDERKEY, count(L_ORDERKEY) -from lineitem_ix +from lineitem_ix_n0 where L_ORDERKEY = 7 group by L_ORDERKEY; select L_ORDERKEY, count(L_ORDERKEY) -from lineitem_ix +from lineitem_ix_n0 where L_ORDERKEY = 7 group by L_ORDERKEY; explain select L_ORDERKEY, count(1) -from lineitem_ix +from lineitem_ix_n0 group by L_ORDERKEY; select L_ORDERKEY, count(1) -from lineitem_ix +from lineitem_ix_n0 group by L_ORDERKEY; explain select count(L_ORDERKEY+1) -from lineitem_ix; +from lineitem_ix_n0; select count(L_ORDERKEY+1) -from lineitem_ix; +from lineitem_ix_n0; explain select L_ORDERKEY, count(L_ORDERKEY+1) -from lineitem_ix +from lineitem_ix_n0 group by L_ORDERKEY; select L_ORDERKEY, count(L_ORDERKEY+1) -from lineitem_ix +from lineitem_ix_n0 group by L_ORDERKEY; explain select L_ORDERKEY, count(L_ORDERKEY+1+L_ORDERKEY+2) -from lineitem_ix +from lineitem_ix_n0 group by L_ORDERKEY; select L_ORDERKEY, count(L_ORDERKEY+1+L_ORDERKEY+2) -from lineitem_ix +from lineitem_ix_n0 group by L_ORDERKEY; explain select L_ORDERKEY, count(1+L_ORDERKEY+2) -from lineitem_ix +from lineitem_ix_n0 group by L_ORDERKEY; select L_ORDERKEY, count(1+L_ORDERKEY+2) -from lineitem_ix +from lineitem_ix_n0 group by L_ORDERKEY; explain select L_ORDERKEY as a, count(1) as b -from lineitem_ix +from lineitem_ix_n0 where L_ORDERKEY < 7 group by L_ORDERKEY; select L_ORDERKEY as a, count(1) as b -from lineitem_ix +from lineitem_ix_n0 where L_ORDERKEY < 7 group by L_ORDERKEY; explain select L_ORDERKEY, count(keysum), sum(keysum) from -(select L_ORDERKEY, L_ORDERKEY+L_PARTKEY as keysum from lineitem_ix) tabA +(select L_ORDERKEY, L_ORDERKEY+L_PARTKEY as keysum from lineitem_ix_n0) tabA group by L_ORDERKEY; select L_ORDERKEY, count(keysum), sum(keysum) from -(select L_ORDERKEY, L_ORDERKEY+L_PARTKEY as keysum from lineitem_ix) tabA +(select L_ORDERKEY, L_ORDERKEY+L_PARTKEY as keysum from lineitem_ix_n0) tabA group by L_ORDERKEY; explain select L_ORDERKEY, count(L_ORDERKEY), sum(L_ORDERKEY) -from lineitem_ix +from lineitem_ix_n0 group by L_ORDERKEY; select L_ORDERKEY, count(L_ORDERKEY), sum(L_ORDERKEY) -from lineitem_ix +from lineitem_ix_n0 group by L_ORDERKEY; explain select colA, count(colA) -from (select L_ORDERKEY as colA from lineitem_ix) tabA +from (select L_ORDERKEY as colA from lineitem_ix_n0) tabA group by colA; select colA, count(colA) -from (select L_ORDERKEY as colA from lineitem_ix) tabA +from (select L_ORDERKEY as colA from lineitem_ix_n0) tabA group by colA; explain select keysum, count(keysum) from -(select L_ORDERKEY+L_PARTKEY as keysum from lineitem_ix) tabA +(select L_ORDERKEY+L_PARTKEY as keysum from lineitem_ix_n0) tabA group by keysum; select keysum, count(keysum) from -(select L_ORDERKEY+L_PARTKEY as keysum from lineitem_ix) tabA +(select L_ORDERKEY+L_PARTKEY as keysum from lineitem_ix_n0) tabA group by keysum; explain select keysum, count(keysum) from -(select L_ORDERKEY+1 as keysum from lineitem_ix) tabA +(select L_ORDERKEY+1 as keysum from lineitem_ix_n0) tabA group by keysum; select keysum, count(keysum) from -(select L_ORDERKEY+1 as keysum from lineitem_ix) tabA +(select L_ORDERKEY+1 as keysum from lineitem_ix_n0) tabA group by keysum; explain select keysum, count(1) from -(select L_ORDERKEY+1 as keysum from lineitem_ix) tabA +(select L_ORDERKEY+1 as keysum from lineitem_ix_n0) tabA group by keysum; select keysum, count(1) from -(select L_ORDERKEY+1 as keysum from lineitem_ix) tabA +(select L_ORDERKEY+1 as keysum from lineitem_ix_n0) tabA group by keysum; explain select keysum, count(keysum) from -(select L_ORDERKEY+1 as keysum from lineitem_ix where L_ORDERKEY = 7) tabA +(select L_ORDERKEY+1 as keysum from lineitem_ix_n0 where L_ORDERKEY = 7) tabA group by keysum; select keysum, count(keysum) from -(select L_ORDERKEY+1 as keysum from lineitem_ix where L_ORDERKEY = 7) tabA +(select L_ORDERKEY+1 as keysum from lineitem_ix_n0 where L_ORDERKEY = 7) tabA group by keysum; @@ -209,7 +209,7 @@ select ckeysum, count(ckeysum) from (select keysum, count(keysum) as ckeysum from - (select L_ORDERKEY+1 as keysum from lineitem_ix where L_ORDERKEY = 7) tabA + (select L_ORDERKEY+1 as keysum from lineitem_ix_n0 where L_ORDERKEY = 7) tabA group by keysum) tabB group by ckeysum; @@ -217,7 +217,7 @@ select ckeysum, count(ckeysum) from (select keysum, count(keysum) as ckeysum from - (select L_ORDERKEY+1 as keysum from lineitem_ix where L_ORDERKEY = 7) tabA + (select L_ORDERKEY+1 as keysum from lineitem_ix_n0 where L_ORDERKEY = 7) tabA group by keysum) tabB group by ckeysum; @@ -225,7 +225,7 @@ explain select keysum, count(keysum) as ckeysum from (select L_ORDERKEY, count(L_ORDERKEY) as keysum -from lineitem_ix +from lineitem_ix_n0 where L_ORDERKEY < 7 group by L_ORDERKEY)tabA group by keysum; @@ -233,7 +233,7 @@ group by keysum; select keysum, count(keysum) as ckeysum from (select L_ORDERKEY, count(L_ORDERKEY) as keysum -from lineitem_ix +from lineitem_ix_n0 where L_ORDERKEY < 7 group by L_ORDERKEY)tabA group by keysum; @@ -247,7 +247,7 @@ explain select tabA.a, tabA.b, tabB.a, tabB.b from (select L_ORDERKEY as a, count(L_ORDERKEY) as b -from lineitem_ix +from lineitem_ix_n0 where L_ORDERKEY < 7 group by L_ORDERKEY) tabA join @@ -260,7 +260,7 @@ on (tabA.b=tabB.b); select tabA.a, tabA.b, tabB.a, tabB.b from (select L_ORDERKEY as a, count(L_ORDERKEY) as b -from lineitem_ix +from lineitem_ix_n0 where L_ORDERKEY < 7 group by L_ORDERKEY) tabA join @@ -275,7 +275,7 @@ explain select tabA.a, tabA.b, tabB.a, tabB.b from (select L_ORDERKEY as a, count(L_ORDERKEY) as b -from lineitem_ix +from lineitem_ix_n0 where L_ORDERKEY < 7 group by L_ORDERKEY) tabA join @@ -288,7 +288,7 @@ on (tabA.b=tabB.b and tabB.a < '2'); select tabA.a, tabA.b, tabB.a, tabB.b from (select L_ORDERKEY as a, count(L_ORDERKEY) as b -from lineitem_ix +from lineitem_ix_n0 where L_ORDERKEY < 7 group by L_ORDERKEY) tabA join @@ -299,19 +299,19 @@ group by key on (tabA.b=tabB.b and tabB.a < '2'); EXPLAIN -select L_ORDERKEY FROM lineitem_ix GROUP BY L_ORDERKEY, L_ORDERKEY+1; +select L_ORDERKEY FROM lineitem_ix_n0 GROUP BY L_ORDERKEY, L_ORDERKEY+1; -select L_ORDERKEY FROM lineitem_ix GROUP BY L_ORDERKEY, L_ORDERKEY+1; +select L_ORDERKEY FROM lineitem_ix_n0 GROUP BY L_ORDERKEY, L_ORDERKEY+1; EXPLAIN -select L_ORDERKEY, L_ORDERKEY+1, count(L_ORDERKEY) FROM lineitem_ix GROUP BY L_ORDERKEY, L_ORDERKEY+1; +select L_ORDERKEY, L_ORDERKEY+1, count(L_ORDERKEY) FROM lineitem_ix_n0 GROUP BY L_ORDERKEY, L_ORDERKEY+1; -select L_ORDERKEY, L_ORDERKEY+1, count(L_ORDERKEY) FROM lineitem_ix GROUP BY L_ORDERKEY, L_ORDERKEY+1; +select L_ORDERKEY, L_ORDERKEY+1, count(L_ORDERKEY) FROM lineitem_ix_n0 GROUP BY L_ORDERKEY, L_ORDERKEY+1; EXPLAIN -select L_ORDERKEY+2, count(L_ORDERKEY) FROM lineitem_ix GROUP BY L_ORDERKEY+2; +select L_ORDERKEY+2, count(L_ORDERKEY) FROM lineitem_ix_n0 GROUP BY L_ORDERKEY+2; -select L_ORDERKEY+2, count(L_ORDERKEY) FROM lineitem_ix GROUP BY L_ORDERKEY+2; +select L_ORDERKEY+2, count(L_ORDERKEY) FROM lineitem_ix_n0 GROUP BY L_ORDERKEY+2; --with cbo on, the following query can use idx @@ -320,12 +320,12 @@ select b, count(b) as ckeysum from ( select L_ORDERKEY as a, count(L_ORDERKEY) as b -from lineitem_ix +from lineitem_ix_n0 where L_ORDERKEY < 7 group by L_ORDERKEY union all select L_PARTKEY as a, count(L_PARTKEY) as b -from lineitem_ix +from lineitem_ix_n0 where L_PARTKEY < 10 group by L_PARTKEY ) tabA @@ -335,12 +335,12 @@ select b, count(b) as ckeysum from ( select L_ORDERKEY as a, count(L_ORDERKEY) as b -from lineitem_ix +from lineitem_ix_n0 where L_ORDERKEY < 7 group by L_ORDERKEY union all select L_PARTKEY as a, count(L_PARTKEY) as b -from lineitem_ix +from lineitem_ix_n0 where L_PARTKEY < 10 group by L_PARTKEY ) tabA @@ -353,12 +353,12 @@ select a, count(a) as ckeysum from ( select L_ORDERKEY as a, count(L_ORDERKEY) as b -from lineitem_ix +from lineitem_ix_n0 where L_ORDERKEY < 7 group by L_ORDERKEY union all select L_PARTKEY as a, count(L_PARTKEY) as b -from lineitem_ix +from lineitem_ix_n0 where L_PARTKEY < 10 group by L_PARTKEY ) tabA @@ -368,12 +368,12 @@ select a, count(a) as ckeysum from ( select L_ORDERKEY as a, count(L_ORDERKEY) as b -from lineitem_ix +from lineitem_ix_n0 where L_ORDERKEY < 7 group by L_ORDERKEY union all select L_PARTKEY as a, count(L_PARTKEY) as b -from lineitem_ix +from lineitem_ix_n0 where L_PARTKEY < 10 group by L_PARTKEY ) tabA @@ -383,12 +383,12 @@ explain select a, count(a) from ( select case L_ORDERKEY when null then 1 else 1 END as a -from lineitem_ix)tab +from lineitem_ix_n0)tab group by a; select a, count(a) from ( select case L_ORDERKEY when null then 1 else 1 END as a -from lineitem_ix)tab +from lineitem_ix_n0)tab group by a; diff --git a/ql/src/test/queries/clientpositive/quote1.q b/ql/src/test/queries/clientpositive/quote1.q index 3f38597aab..c82422ab33 100644 --- a/ql/src/test/queries/clientpositive/quote1.q +++ b/ql/src/test/queries/clientpositive/quote1.q @@ -1,15 +1,15 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -CREATE TABLE dest1(`location` INT, `type` STRING) PARTITIONED BY(`table` STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n103(`location` INT, `type` STRING) PARTITIONED BY(`table` STRING) STORED AS TEXTFILE; EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 PARTITION(`table`='2008-04-08') SELECT src.key as `partition`, src.value as `from` WHERE src.key >= 200 and src.key < 300; +INSERT OVERWRITE TABLE dest1_n103 PARTITION(`table`='2008-04-08') SELECT src.key as `partition`, src.value as `from` WHERE src.key >= 200 and src.key < 300; EXPLAIN -SELECT `int`.`location`, `int`.`type`, `int`.`table` FROM dest1 `int` WHERE `int`.`table` = '2008-04-08'; +SELECT `int`.`location`, `int`.`type`, `int`.`table` FROM dest1_n103 `int` WHERE `int`.`table` = '2008-04-08'; FROM src -INSERT OVERWRITE TABLE dest1 PARTITION(`table`='2008-04-08') SELECT src.key as `partition`, src.value as `from` WHERE src.key >= 200 and src.key < 300; +INSERT OVERWRITE TABLE dest1_n103 PARTITION(`table`='2008-04-08') SELECT src.key as `partition`, src.value as `from` WHERE src.key >= 200 and src.key < 300; -SELECT `int`.`location`, `int`.`type`, `int`.`table` FROM dest1 `int` WHERE `int`.`table` = '2008-04-08'; +SELECT `int`.`location`, `int`.`type`, `int`.`table` FROM dest1_n103 `int` WHERE `int`.`table` = '2008-04-08'; diff --git a/ql/src/test/queries/clientpositive/quotedid_basic.q b/ql/src/test/queries/clientpositive/quotedid_basic.q index 8d6136b233..cb718f091c 100644 --- a/ql/src/test/queries/clientpositive/quotedid_basic.q +++ b/ql/src/test/queries/clientpositive/quotedid_basic.q @@ -4,18 +4,18 @@ set hive.mapred.mode=nonstrict; set hive.support.quoted.identifiers=column; -- basic -create table t1(`x+1` string, `y&y` string, `!@#$%^&*()_q` string); -describe t1; -select `x+1`, `y&y`, `!@#$%^&*()_q` from t1; -explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1; -explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 where `!@#$%^&*()_q` = '1'; -explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = '1'; +create table t1_n7(`x+1` string, `y&y` string, `!@#$%^&*()_q` string); +describe t1_n7; +select `x+1`, `y&y`, `!@#$%^&*()_q` from t1_n7; +explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1_n7; +explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1_n7 where `!@#$%^&*()_q` = '1'; +explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1_n7 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = '1'; explain select `x+1`, `y&y`, `!@#$%^&*()_q`, rank() over(partition by `!@#$%^&*()_q` order by `y&y`) -from t1 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = '1'; +from t1_n7 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = '1'; -- case insensitive explain select `X+1`, `Y&y`, `!@#$%^&*()_Q`, rank() over(partition by `!@#$%^&*()_q` order by `y&y`) -from t1 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&Y`, `!@#$%^&*()_q` having `!@#$%^&*()_Q` = '1'; +from t1_n7 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&Y`, `!@#$%^&*()_q` having `!@#$%^&*()_Q` = '1'; -- escaped back ticks diff --git a/ql/src/test/queries/clientpositive/quotedid_skew.q b/ql/src/test/queries/clientpositive/quotedid_skew.q index 94a2f922a2..34b8638b3f 100644 --- a/ql/src/test/queries/clientpositive/quotedid_skew.q +++ b/ql/src/test/queries/clientpositive/quotedid_skew.q @@ -4,22 +4,22 @@ set hive.support.quoted.identifiers=column; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(`!@#$%^&*()_q` string, `y&y` string) +CREATE TABLE T1_n46(`!@#$%^&*()_q` string, `y&y` string) SKEWED BY (`!@#$%^&*()_q`) ON ((2)) STORED AS TEXTFILE ; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n46; -CREATE TABLE T2(`!@#$%^&*()_q` string, `y&y` string) +CREATE TABLE T2_n28(`!@#$%^&*()_q` string, `y&y` string) SKEWED BY (`!@#$%^&*()_q`) ON ((2)) STORED AS TEXTFILE ; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T2_n28; -- a simple join query with skew on both the tables on the join key -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a. `!@#$%^&*()_q` = b. `!@#$%^&*()_q` +SELECT a.*, b.* FROM T1_n46 a JOIN T2_n28 b ON a. `!@#$%^&*()_q` = b. `!@#$%^&*()_q` ; diff --git a/ql/src/test/queries/clientpositive/quotedid_stats.q b/ql/src/test/queries/clientpositive/quotedid_stats.q index 3f7e05c556..aebfeae7f4 100644 --- a/ql/src/test/queries/clientpositive/quotedid_stats.q +++ b/ql/src/test/queries/clientpositive/quotedid_stats.q @@ -3,9 +3,9 @@ set hive.mapred.mode=nonstrict; set hive.support.quoted.identifiers=column; -- escaped back ticks -create table t4(`x+1``` string, `y&y` string); -describe formatted t4; +create table t4_n9(`x+1``` string, `y&y` string); +describe formatted t4_n9; -analyze table t4 compute statistics for columns; +analyze table t4_n9 compute statistics for columns; -describe formatted t4; +describe formatted t4_n9; diff --git a/ql/src/test/queries/clientpositive/rand_partitionpruner2.q b/ql/src/test/queries/clientpositive/rand_partitionpruner2.q index 05da3d4268..ab4c89198e 100644 --- a/ql/src/test/queries/clientpositive/rand_partitionpruner2.q +++ b/ql/src/test/queries/clientpositive/rand_partitionpruner2.q @@ -1,15 +1,15 @@ --! qt:dataset:srcpart -- scanning partitioned data -create table tmptable(key string, value string, hr string, ds string); +create table tmptable_n1(key string, value string, hr string, ds string); explain extended -insert overwrite table tmptable +insert overwrite table tmptable_n1 select a.* from srcpart a where rand(1) < 0.1 and a.ds = '2008-04-08'; -insert overwrite table tmptable +insert overwrite table tmptable_n1 select a.* from srcpart a where rand(1) < 0.1 and a.ds = '2008-04-08'; -select * from tmptable x sort by x.key,x.value,x.ds,x.hr; +select * from tmptable_n1 x sort by x.key,x.value,x.ds,x.hr; diff --git a/ql/src/test/queries/clientpositive/rcfile_merge3.q b/ql/src/test/queries/clientpositive/rcfile_merge3.q index fa30ee75be..277d32ee41 100644 --- a/ql/src/test/queries/clientpositive/rcfile_merge3.q +++ b/ql/src/test/queries/clientpositive/rcfile_merge3.q @@ -4,31 +4,31 @@ set hive.merge.rcfile.block.level=true; set mapred.max.split.size=100; set mapred.min.split.size=1; -DROP TABLE rcfile_merge3a; -DROP TABLE rcfile_merge3b; +DROP TABLE rcfile_merge3a_n0; +DROP TABLE rcfile_merge3b_n0; -CREATE TABLE rcfile_merge3a (key int, value string) +CREATE TABLE rcfile_merge3a_n0 (key int, value string) PARTITIONED BY (ds string) STORED AS TEXTFILE; -CREATE TABLE rcfile_merge3b (key int, value string) STORED AS RCFILE; +CREATE TABLE rcfile_merge3b_n0 (key int, value string) STORED AS RCFILE; -INSERT OVERWRITE TABLE rcfile_merge3a PARTITION (ds='1') +INSERT OVERWRITE TABLE rcfile_merge3a_n0 PARTITION (ds='1') SELECT * FROM src; -INSERT OVERWRITE TABLE rcfile_merge3a PARTITION (ds='2') +INSERT OVERWRITE TABLE rcfile_merge3a_n0 PARTITION (ds='2') SELECT * FROM src; -EXPLAIN INSERT OVERWRITE TABLE rcfile_merge3b - SELECT key, value FROM rcfile_merge3a; -INSERT OVERWRITE TABLE rcfile_merge3b - SELECT key, value FROM rcfile_merge3a; +EXPLAIN INSERT OVERWRITE TABLE rcfile_merge3b_n0 + SELECT key, value FROM rcfile_merge3a_n0; +INSERT OVERWRITE TABLE rcfile_merge3b_n0 + SELECT key, value FROM rcfile_merge3a_n0; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c) - FROM rcfile_merge3a + FROM rcfile_merge3a_n0 ) t; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c) - FROM rcfile_merge3b + FROM rcfile_merge3b_n0 ) t; -DROP TABLE rcfile_merge3a; -DROP TABLE rcfile_merge3b; +DROP TABLE rcfile_merge3a_n0; +DROP TABLE rcfile_merge3b_n0; diff --git a/ql/src/test/queries/clientpositive/rcfile_toleratecorruptions.q b/ql/src/test/queries/clientpositive/rcfile_toleratecorruptions.q index 1344a60f34..f2dcf71f0e 100644 --- a/ql/src/test/queries/clientpositive/rcfile_toleratecorruptions.q +++ b/ql/src/test/queries/clientpositive/rcfile_toleratecorruptions.q @@ -1,10 +1,10 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -CREATE TABLE test_src(key int, value string) stored as RCFILE; +CREATE TABLE test_src_n3(key int, value string) stored as RCFILE; set hive.io.rcfile.record.interval=5; set hive.io.rcfile.record.buffer.size=100; set hive.exec.compress.output=true; -INSERT OVERWRITE table test_src SELECT * FROM src; +INSERT OVERWRITE table test_src_n3 SELECT * FROM src; set hive.io.rcfile.tolerate.corruptions=true; -SELECT key, value FROM test_src order by key; +SELECT key, value FROM test_src_n3 order by key; diff --git a/ql/src/test/queries/clientpositive/read_uint_parquet_vectorized.q b/ql/src/test/queries/clientpositive/read_uint_parquet_vectorized.q index f6b067bb17..52d9a0a22b 100644 --- a/ql/src/test/queries/clientpositive/read_uint_parquet_vectorized.q +++ b/ql/src/test/queries/clientpositive/read_uint_parquet_vectorized.q @@ -2,205 +2,205 @@ SET hive.vectorized.execution.enabled=true; SET hive.fetch.task.conversion=none; -create table testbasicint (uint_32_col int) stored as parquet; -load data local inpath '../../data/files/test_uint.parquet' into table testbasicint; -select * from testbasicint; -drop table testbasicint; +create table testbasicint_n0 (uint_32_col int) stored as parquet; +load data local inpath '../../data/files/test_uint.parquet' into table testbasicint_n0; +select * from testbasicint_n0; +drop table testbasicint_n0; -create table testbigintinv +create table testbigintinv_n0 (col_INT32_UINT_8 bigint, col_INT32_UINT_16 bigint, col_INT32_UINT_32 bigint, col_INT64_UINT_64 bigint) stored as parquet; -load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testbigintinv; -select * from testbigintinv; -drop table testbigintinv; +load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testbigintinv_n0; +select * from testbigintinv_n0; +drop table testbigintinv_n0; -create table testintinv +create table testintinv_n0 (col_INT32_UINT_8 int, col_INT32_UINT_16 int, col_INT32_UINT_32 int, col_INT64_UINT_64 int) stored as parquet; -load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testintinv; -select * from testintinv; -drop table testintinv; +load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testintinv_n0; +select * from testintinv_n0; +drop table testintinv_n0; -create table testsmallintinv +create table testsmallintinv_n0 (col_INT32_UINT_8 smallint, col_INT32_UINT_16 smallint, col_INT32_UINT_32 smallint, col_INT64_UINT_64 smallint) stored as parquet; -load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testsmallintinv; -select * from testsmallintinv; -drop table testsmallintinv; +load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testsmallintinv_n0; +select * from testsmallintinv_n0; +drop table testsmallintinv_n0; -create table testtinyintinv +create table testtinyintinv_n0 (col_INT32_UINT_8 tinyint, col_INT32_UINT_16 tinyint, col_INT32_UINT_32 tinyint, col_INT64_UINT_64 tinyint) stored as parquet; -load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testtinyintinv; -select * from testtinyintinv; -drop table testtinyintinv; +load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testtinyintinv_n0; +select * from testtinyintinv_n0; +drop table testtinyintinv_n0; -create table testfloatinv +create table testfloatinv_n0 (col_INT32_UINT_8 float, col_INT32_UINT_16 float, col_INT32_UINT_32 float, col_INT64_UINT_64 float) stored as parquet; -load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testfloatinv; -select * from testfloatinv; -drop table testfloatinv; +load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testfloatinv_n0; +select * from testfloatinv_n0; +drop table testfloatinv_n0; -create table testdoubleinv +create table testdoubleinv_n0 (col_INT32_UINT_8 double, col_INT32_UINT_16 double, col_INT32_UINT_32 double, col_INT64_UINT_64 double) stored as parquet; -load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdoubleinv; -select * from testdoubleinv; -drop table testdoubleinv; +load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdoubleinv_n0; +select * from testdoubleinv_n0; +drop table testdoubleinv_n0; -create table testdecimal22_2inv +create table testdecimal22_2inv_n0 (col_INT32_UINT_8 decimal(22,2), col_INT32_UINT_16 decimal(22,2), col_INT32_UINT_32 decimal(22,2), col_INT64_UINT_64 decimal(22,2)) stored as parquet; -load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal22_2inv; -select * from testdecimal22_2inv; -drop table testdecimal22_2inv; +load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal22_2inv_n0; +select * from testdecimal22_2inv_n0; +drop table testdecimal22_2inv_n0; -create table testdecimal13_2inv +create table testdecimal13_2inv_n0 (col_INT32_UINT_8 decimal(13,2), col_INT32_UINT_16 decimal(13,2), col_INT32_UINT_32 decimal(13,2), col_INT64_UINT_64 decimal(13,2)) stored as parquet; -load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal13_2inv; -select * from testdecimal13_2inv; -drop table testdecimal13_2inv; +load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal13_2inv_n0; +select * from testdecimal13_2inv_n0; +drop table testdecimal13_2inv_n0; -create table testdecimal8_2inv +create table testdecimal8_2inv_n0 (col_INT32_UINT_8 decimal(8,2), col_INT32_UINT_16 decimal(8,2), col_INT32_UINT_32 decimal(8,2), col_INT64_UINT_64 decimal(8,2)) stored as parquet; -load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal8_2inv; -select * from testdecimal8_2inv; -drop table testdecimal8_2inv; +load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal8_2inv_n0; +select * from testdecimal8_2inv_n0; +drop table testdecimal8_2inv_n0; -create table testdecimal6_2inv +create table testdecimal6_2inv_n0 (col_INT32_UINT_8 decimal(6,2), col_INT32_UINT_16 decimal(6,2), col_INT32_UINT_32 decimal(6,2), col_INT64_UINT_64 decimal(6,2)) stored as parquet; -load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal6_2inv; -select * from testdecimal6_2inv; -drop table testdecimal6_2inv; +load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal6_2inv_n0; +select * from testdecimal6_2inv_n0; +drop table testdecimal6_2inv_n0; -create table testdecimal3_2inv +create table testdecimal3_2inv_n0 (col_INT32_UINT_8 decimal(3,2), col_INT32_UINT_16 decimal(3,2), col_INT32_UINT_32 decimal(3,2), col_INT64_UINT_64 decimal(3,2)) stored as parquet; -load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal3_2inv; -select * from testdecimal3_2inv; -drop table testdecimal3_2inv; +load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal3_2inv_n0; +select * from testdecimal3_2inv_n0; +drop table testdecimal3_2inv_n0; -create table testbigintvalid +create table testbigintvalid_n0 (col_INT32_UINT_8 bigint, col_INT32_UINT_16 bigint, col_INT32_UINT_32 bigint, col_INT64_UINT_64 bigint) stored as parquet; -load data local inpath '../../data/files/data_with_valid_values.parquet' into table testbigintvalid; -select * from testbigintvalid; -drop table testbigintvalid; +load data local inpath '../../data/files/data_with_valid_values.parquet' into table testbigintvalid_n0; +select * from testbigintvalid_n0; +drop table testbigintvalid_n0; -create table testintvalid +create table testintvalid_n0 (col_INT32_UINT_8 int, col_INT32_UINT_16 int, col_INT32_UINT_32 int, col_INT64_UINT_64 int) stored as parquet; -load data local inpath '../../data/files/data_with_valid_values.parquet' into table testintvalid; -select * from testintvalid; -drop table testintvalid; +load data local inpath '../../data/files/data_with_valid_values.parquet' into table testintvalid_n0; +select * from testintvalid_n0; +drop table testintvalid_n0; -create table testsmallintvalid +create table testsmallintvalid_n0 (col_INT32_UINT_8 smallint, col_INT32_UINT_16 smallint, col_INT32_UINT_32 smallint, col_INT64_UINT_64 smallint) stored as parquet; -load data local inpath '../../data/files/data_with_valid_values.parquet' into table testsmallintvalid; -select * from testsmallintvalid; -drop table testsmallintvalid; +load data local inpath '../../data/files/data_with_valid_values.parquet' into table testsmallintvalid_n0; +select * from testsmallintvalid_n0; +drop table testsmallintvalid_n0; -create table testtinyintvalid +create table testtinyintvalid_n0 (col_INT32_UINT_8 tinyint, col_INT32_UINT_16 tinyint, col_INT32_UINT_32 tinyint, col_INT64_UINT_64 tinyint) stored as parquet; -load data local inpath '../../data/files/data_with_valid_values.parquet' into table testtinyintvalid; -select * from testtinyintvalid; -drop table testtinyintvalid; +load data local inpath '../../data/files/data_with_valid_values.parquet' into table testtinyintvalid_n0; +select * from testtinyintvalid_n0; +drop table testtinyintvalid_n0; -create table testfloatvalid +create table testfloatvalid_n0 (col_INT32_UINT_8 float, col_INT32_UINT_16 float, col_INT32_UINT_32 float, col_INT64_UINT_64 float) stored as parquet; -load data local inpath '../../data/files/data_with_valid_values.parquet' into table testfloatvalid; -select * from testfloatvalid; -drop table testfloatvalid; +load data local inpath '../../data/files/data_with_valid_values.parquet' into table testfloatvalid_n0; +select * from testfloatvalid_n0; +drop table testfloatvalid_n0; -create table testdoublevalid +create table testdoublevalid_n0 (col_INT32_UINT_8 double, col_INT32_UINT_16 double, col_INT32_UINT_32 double, col_INT64_UINT_64 double) stored as parquet; -load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdoublevalid; -select * from testdoublevalid; -drop table testdoublevalid; +load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdoublevalid_n0; +select * from testdoublevalid_n0; +drop table testdoublevalid_n0; -create table testdecimal22_2valid +create table testdecimal22_2valid_n0 (col_INT32_UINT_8 decimal(22,2), col_INT32_UINT_16 decimal(22,2), col_INT32_UINT_32 decimal(22,2), col_INT64_UINT_64 decimal(22,2)) stored as parquet; -load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal22_2valid; -select * from testdecimal22_2valid; -drop table testdecimal22_2valid; +load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal22_2valid_n0; +select * from testdecimal22_2valid_n0; +drop table testdecimal22_2valid_n0; -create table testdecimal13_2valid +create table testdecimal13_2valid_n0 (col_INT32_UINT_8 decimal(13,2), col_INT32_UINT_16 decimal(13,2), col_INT32_UINT_32 decimal(13,2), col_INT64_UINT_64 decimal(13,2)) stored as parquet; -load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal13_2valid; -select * from testdecimal13_2valid; -drop table testdecimal13_2valid; +load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal13_2valid_n0; +select * from testdecimal13_2valid_n0; +drop table testdecimal13_2valid_n0; -create table testdecimal8_2valid +create table testdecimal8_2valid_n0 (col_INT32_UINT_8 decimal(8,2), col_INT32_UINT_16 decimal(8,2), col_INT32_UINT_32 decimal(8,2), col_INT64_UINT_64 decimal(8,2)) stored as parquet; -load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal8_2valid; -select * from testdecimal8_2valid; -drop table testdecimal8_2valid; +load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal8_2valid_n0; +select * from testdecimal8_2valid_n0; +drop table testdecimal8_2valid_n0; -create table testdecimal6_2valid +create table testdecimal6_2valid_n0 (col_INT32_UINT_8 decimal(6,2), col_INT32_UINT_16 decimal(6,2), col_INT32_UINT_32 decimal(6,2), col_INT64_UINT_64 decimal(6,2)) stored as parquet; -load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal6_2valid; -select * from testdecimal6_2valid; -drop table testdecimal6_2valid; +load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal6_2valid_n0; +select * from testdecimal6_2valid_n0; +drop table testdecimal6_2valid_n0; -create table testdecimal3_2valid +create table testdecimal3_2valid_n0 (col_INT32_UINT_8 decimal(3,2), col_INT32_UINT_16 decimal(3,2), col_INT32_UINT_32 decimal(3,2), col_INT64_UINT_64 decimal(3,2)) stored as parquet; -load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal3_2valid; -select * from testdecimal3_2valid; -drop table testdecimal3_2valid; +load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal3_2valid_n0; +select * from testdecimal3_2valid_n0; +drop table testdecimal3_2valid_n0; diff --git a/ql/src/test/queries/clientpositive/recursive_dir.q b/ql/src/test/queries/clientpositive/recursive_dir.q index eea365dc6b..2b25f60d33 100644 --- a/ql/src/test/queries/clientpositive/recursive_dir.q +++ b/ql/src/test/queries/clientpositive/recursive_dir.q @@ -1,20 +1,20 @@ --! qt:dataset:src -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) -CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING); -CREATE TABLE fact_tz(x int) PARTITIONED BY (ds STRING, hr STRING) +CREATE TABLE fact_daily_n1(x int) PARTITIONED BY (ds STRING); +CREATE TABLE fact_tz_n0(x int) PARTITIONED BY (ds STRING, hr STRING) LOCATION 'pfile:${system:test.tmp.dir}/fact_tz'; -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +INSERT OVERWRITE TABLE fact_tz_n0 PARTITION (ds='1', hr='1') SELECT key+11 FROM src WHERE key=484; -ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE'); -ALTER TABLE fact_daily ADD PARTITION (ds='1') +ALTER TABLE fact_daily_n1 SET TBLPROPERTIES('EXTERNAL'='TRUE'); +ALTER TABLE fact_daily_n1 ADD PARTITION (ds='1') LOCATION 'pfile:${system:test.tmp.dir}/fact_tz/ds=1'; set mapred.input.dir.recursive=true; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT * FROM fact_daily WHERE ds='1'; +SELECT * FROM fact_daily_n1 WHERE ds='1'; -SELECT count(1) FROM fact_daily WHERE ds='1'; +SELECT count(1) FROM fact_daily_n1 WHERE ds='1'; diff --git a/ql/src/test/queries/clientpositive/reduce_deduplicate_exclude_gby.q b/ql/src/test/queries/clientpositive/reduce_deduplicate_exclude_gby.q index e28ed9fc53..d6c4a89600 100644 --- a/ql/src/test/queries/clientpositive/reduce_deduplicate_exclude_gby.q +++ b/ql/src/test/queries/clientpositive/reduce_deduplicate_exclude_gby.q @@ -1,8 +1,8 @@ -create table t1( key_int1 int, key_int2 int, key_string1 string, key_string2 string); +create table t1_n32( key_int1_n32 int, key_int2 int, key_string1 string, key_string2 string); set hive.optimize.reducededuplication=false; set hive.map.aggr=false; -select Q1.key_int1, sum(Q1.key_int1) from (select * from t1 cluster by key_int1) Q1 group by Q1.key_int1; +select Q1.key_int1_n32, sum(Q1.key_int1_n32) from (select * from t1_n32 cluster by key_int1_n32) Q1 group by Q1.key_int1_n32; -drop table t1; +drop table t1_n32; diff --git a/ql/src/test/queries/clientpositive/remove_exprs_stats.q b/ql/src/test/queries/clientpositive/remove_exprs_stats.q index 741d455c33..3d73fc37b7 100644 --- a/ql/src/test/queries/clientpositive/remove_exprs_stats.q +++ b/ql/src/test/queries/clientpositive/remove_exprs_stats.q @@ -2,74 +2,74 @@ set hive.optimize.filter.stats.reduction=true; set hive.mapred.mode=nonstrict; set hive.stats.fetch.column.stats=true; -create table if not exists loc_staging ( +create table if not exists loc_staging_n0 ( state string, locid int, zip bigint, year int ) row format delimited fields terminated by '|' stored as textfile; -create table loc_orc like loc_staging; -alter table loc_orc set fileformat orc; +create table loc_orc_n0 like loc_staging_n0; +alter table loc_orc_n0 set fileformat orc; -load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging; +load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n0; -insert overwrite table loc_orc select * from loc_staging; +insert overwrite table loc_orc_n0 select * from loc_staging_n0; -analyze table loc_orc compute statistics for columns state,locid,zip,year; +analyze table loc_orc_n0 compute statistics for columns state,locid,zip,year; -- always true -explain select * from loc_orc where locid < 30; +explain select * from loc_orc_n0 where locid < 30; -- always false -explain select * from loc_orc where locid > 30; +explain select * from loc_orc_n0 where locid > 30; -- always true -explain select * from loc_orc where locid <= 30; +explain select * from loc_orc_n0 where locid <= 30; -- always false -explain select * from loc_orc where locid >= 30; +explain select * from loc_orc_n0 where locid >= 30; -- nothing to do -explain select * from loc_orc where locid < 6; +explain select * from loc_orc_n0 where locid < 6; -- always false -explain select * from loc_orc where locid > 6; +explain select * from loc_orc_n0 where locid > 6; -- always true -explain select * from loc_orc where locid <= 6; +explain select * from loc_orc_n0 where locid <= 6; -- nothing to do -explain select * from loc_orc where locid >= 6; +explain select * from loc_orc_n0 where locid >= 6; -- always false -explain select * from loc_orc where locid < 1; +explain select * from loc_orc_n0 where locid < 1; -- nothing to do -explain select * from loc_orc where locid > 1; +explain select * from loc_orc_n0 where locid > 1; -- nothing to do -explain select * from loc_orc where locid <= 1; +explain select * from loc_orc_n0 where locid <= 1; -- always true -explain select * from loc_orc where locid >= 1; +explain select * from loc_orc_n0 where locid >= 1; -- 5 should stay -explain select * from loc_orc where locid IN (-4,5,30,40); +explain select * from loc_orc_n0 where locid IN (-4,5,30,40); -- nothing to do -explain select * from loc_orc where locid IN (5,2,3); +explain select * from loc_orc_n0 where locid IN (5,2,3); -- 1 and 6 should be left -explain select * from loc_orc where locid IN (1,6,9); +explain select * from loc_orc_n0 where locid IN (1,6,9); -- always false -explain select * from loc_orc where locid IN (40,30); +explain select * from loc_orc_n0 where locid IN (40,30); -create table t ( s string); -insert into t values (null),(null); -analyze table t compute statistics for columns s; +create table t_n7 ( s string); +insert into t_n7 values (null),(null); +analyze table t_n7 compute statistics for columns s; -- true -explain select * from t where s is null; -explain select * from loc_orc where locid is not null; +explain select * from t_n7 where s is null; +explain select * from loc_orc_n0 where locid is not null; -- false -explain select * from t where s is not null; -explain select * from loc_orc where locid is null; +explain select * from t_n7 where s is not null; +explain select * from loc_orc_n0 where locid is null; -insert into t values ('val1'); -analyze table t compute statistics for columns s; +insert into t_n7 values ('val1'); +analyze table t_n7 compute statistics for columns s; -- untouched -explain select * from t where s is not null; -explain select * from t where s is null; +explain select * from t_n7 where s is not null; +explain select * from t_n7 where s is null; diff --git a/ql/src/test/queries/clientpositive/rename_partition_location.q b/ql/src/test/queries/clientpositive/rename_partition_location.q index 09114d6705..fb93be1e3d 100644 --- a/ql/src/test/queries/clientpositive/rename_partition_location.q +++ b/ql/src/test/queries/clientpositive/rename_partition_location.q @@ -3,19 +3,19 @@ -- This test verifies that if the tables location changes, renaming a partition will not change -- the partition location accordingly -CREATE TABLE rename_partition_table (key STRING, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE rename_partition_table_n0 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE LOCATION 'pfile:${system:test.tmp.dir}/rename_partition_table'; -INSERT OVERWRITE TABLE rename_partition_table PARTITION (part = '1') SELECT * FROM src; +INSERT OVERWRITE TABLE rename_partition_table_n0 PARTITION (part = '1') SELECT * FROM src; -ALTER TABLE rename_partition_table SET LOCATION 'file:${system:test.tmp.dir}/rename_partition_table'; +ALTER TABLE rename_partition_table_n0 SET LOCATION 'file:${system:test.tmp.dir}/rename_partition_table'; -ALTER TABLE rename_partition_table PARTITION (part = '1') RENAME TO PARTITION (part = '2'); +ALTER TABLE rename_partition_table_n0 PARTITION (part = '1') RENAME TO PARTITION (part = '2'); SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyPartitionIsNotSubdirectoryOfTableHook; -SELECT count(*) FROM rename_partition_table where part = '2'; +SELECT count(*) FROM rename_partition_table_n0 where part = '2'; SET hive.exec.post.hooks=; @@ -32,5 +32,5 @@ SELECT count(*) FROM rename_partition_table_2 where part = '2'; SET hive.exec.post.hooks=; -DROP TABLE rename_partition_table; +DROP TABLE rename_partition_table_n0; DROP TABLE rename_partition_table_2; diff --git a/ql/src/test/queries/clientpositive/repair.q b/ql/src/test/queries/clientpositive/repair.q index 80760307b3..d48417f914 100644 --- a/ql/src/test/queries/clientpositive/repair.q +++ b/ql/src/test/queries/clientpositive/repair.q @@ -1,17 +1,17 @@ -DROP TABLE IF EXISTS repairtable; +DROP TABLE IF EXISTS repairtable_n4; -CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING); +CREATE TABLE repairtable_n4(col STRING) PARTITIONED BY (p1 STRING, p2 STRING); -MSCK TABLE repairtable; +MSCK TABLE repairtable_n4; dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=a/p2=a; dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=b/p2=a; dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=b/p2=a/datafile; -MSCK TABLE default.repairtable; +MSCK TABLE default.repairtable_n4; -MSCK REPAIR TABLE default.repairtable; +MSCK REPAIR TABLE default.repairtable_n4; -MSCK TABLE repairtable; +MSCK TABLE repairtable_n4; -DROP TABLE default.repairtable; +DROP TABLE default.repairtable_n4; diff --git a/ql/src/test/queries/clientpositive/results_cache_1.q b/ql/src/test/queries/clientpositive/results_cache_1.q index f05c793629..1b020097b2 100644 --- a/ql/src/test/queries/clientpositive/results_cache_1.q +++ b/ql/src/test/queries/clientpositive/results_cache_1.q @@ -1,4 +1,4 @@ ---! qt:dataset:src +--! qt:dataset:src_n0 --! qt:dataset:cbo_t3 --! qt:dataset:cbo_t2 --! qt:dataset:cbo_t1 @@ -7,83 +7,83 @@ set hive.query.results.cache.enabled=true; set hive.query.results.cache.nontransactional.tables.enabled=true; explain -select count(*) from src a join src b on (a.key = b.key); -select count(*) from src a join src b on (a.key = b.key); +select count(*) from src_n0 a join src_n0 b on (a.key = b.key); +select count(*) from src_n0 a join src_n0 b on (a.key = b.key); set test.comment="Cache should be used for this query"; set test.comment; explain -select count(*) from src a join src b on (a.key = b.key); -select count(*) from src a join src b on (a.key = b.key); +select count(*) from src_n0 a join src_n0 b on (a.key = b.key); +select count(*) from src_n0 a join src_n0 b on (a.key = b.key); set hive.query.results.cache.enabled=false; set test.comment="Cache is disabled, should not be used here."; set test.comment; explain -select count(*) from src a join src b on (a.key = b.key); +select count(*) from src_n0 a join src_n0 b on (a.key = b.key); create database db1; use db1; -create table src as select key, value from default.src; +create table src_n0 as select key, value from default.src_n0; set hive.query.results.cache.enabled=true; set test.comment="Same query string, but different current database. Cache should not be used since unqualified tablenames resolve to different tables"; set test.comment; explain -select count(*) from src a join src b on (a.key = b.key); +select count(*) from src_n0 a join src_n0 b on (a.key = b.key); use default; -- Union -select * from src where key = 0 +select * from src_n0 where key = 0 union all -select * from src where key = 2; +select * from src_n0 where key = 2; set test.comment="Union all. Cache should be used now"; set test.comment; explain -select * from src where key = 0 +select * from src_n0 where key = 0 union all -select * from src where key = 2; +select * from src_n0 where key = 2; -select * from src where key = 0 +select * from src_n0 where key = 0 union all -select * from src where key = 2; +select * from src_n0 where key = 2; -- CTE with q1 as ( select distinct key from q2 ), -q2 as ( select key, value from src where key < 10 ) +q2 as ( select key, value from src_n0 where key < 10 ) select * from q1 a, q1 b where a.key = b.key; set test.comment="CTE. Cache should be used now"; set test.comment; explain with q1 as ( select distinct key from q2 ), -q2 as ( select key, value from src where key < 10 ) +q2 as ( select key, value from src_n0 where key < 10 ) select * from q1 a, q1 b where a.key = b.key; with q1 as ( select distinct key from q2 ), -q2 as ( select key, value from src where key < 10 ) +q2 as ( select key, value from src_n0 where key < 10 ) select * from q1 a, q1 b where a.key = b.key; -- Intersect/Except -with q1 as ( select distinct key, value from src ), -q2 as ( select key, value from src where key < 10 ), -q3 as ( select key, value from src where key = 0 ) +with q1 as ( select distinct key, value from src_n0 ), +q2 as ( select key, value from src_n0 where key < 10 ), +q3 as ( select key, value from src_n0 where key = 0 ) select * from q1 intersect all select * from q2 except all select * from q3; set test.comment="Intersect/Except. Cache should be used now"; set test.comment; explain -with q1 as ( select distinct key, value from src ), -q2 as ( select key, value from src where key < 10 ), -q3 as ( select key, value from src where key = 0 ) +with q1 as ( select distinct key, value from src_n0 ), +q2 as ( select key, value from src_n0 where key < 10 ), +q3 as ( select key, value from src_n0 where key = 0 ) select * from q1 intersect all select * from q2 except all select * from q3; -with q1 as ( select distinct key, value from src ), -q2 as ( select key, value from src where key < 10 ), -q3 as ( select key, value from src where key = 0 ) +with q1 as ( select distinct key, value from src_n0 ), +q2 as ( select key, value from src_n0 where key < 10 ), +q3 as ( select key, value from src_n0 where key = 0 ) select * from q1 intersect all select * from q2 except all select * from q3; -- Semijoin. Use settings from cbo_semijoin diff --git a/ql/src/test/queries/clientpositive/results_cache_invalidation.q b/ql/src/test/queries/clientpositive/results_cache_invalidation.q index 0ef5c66594..ecb5011692 100644 --- a/ql/src/test/queries/clientpositive/results_cache_invalidation.q +++ b/ql/src/test/queries/clientpositive/results_cache_invalidation.q @@ -3,11 +3,11 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -create table tab1 (key string, value string) stored as orc tblproperties ('transactional'='true'); -create table tab2 (key string, value string) stored as orc tblproperties ('transactional'='true'); +create table tab1_n6 (key string, value string) stored as orc tblproperties ('transactional'='true'); +create table tab2_n5 (key string, value string) stored as orc tblproperties ('transactional'='true'); -insert into tab1 select * from default.src; -insert into tab2 select * from default.src; +insert into tab1_n6 select * from default.src; +insert into tab2_n5 select * from default.src; set hive.query.results.cache.enabled=true; @@ -16,75 +16,75 @@ set test.comment; -- Q1 explain -select count(*) from tab1 a where key >= 0; -select count(*) from tab1 a where key >= 0; +select count(*) from tab1_n6 a where key >= 0; +select count(*) from tab1_n6 a where key >= 0; -- Q2 explain -select max(key) from tab2; -select max(key) from tab2; +select max(key) from tab2_n5; +select max(key) from tab2_n5; -- Q3 explain -select count(*) from tab1 join tab2 on (tab1.key = tab2.key); -select count(*) from tab1 join tab2 on (tab1.key = tab2.key); +select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key); +select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key); set test.comment="Q1 should now be able to use cache"; set test.comment; explain -select count(*) from tab1 a where key >= 0; -select count(*) from tab1 a where key >= 0; +select count(*) from tab1_n6 a where key >= 0; +select count(*) from tab1_n6 a where key >= 0; set test.comment="Q2 should now be able to use cache"; set test.comment; explain -select max(key) from tab2; -select max(key) from tab2; +select max(key) from tab2_n5; +select max(key) from tab2_n5; set test.comment="Q3 should now be able to use cache"; set test.comment; explain -select count(*) from tab1 join tab2 on (tab1.key = tab2.key); -select count(*) from tab1 join tab2 on (tab1.key = tab2.key); +select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key); +select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key); --- Update tab1 which should invalidate Q1 and Q3. -insert into tab1 values ('88', 'val_88'); +-- Update tab1_n6 which should invalidate Q1 and Q3. +insert into tab1_n6 values ('88', 'val_88'); set test.comment="Q1 should not use cache"; set test.comment; explain -select count(*) from tab1 a where key >= 0; -select count(*) from tab1 a where key >= 0; +select count(*) from tab1_n6 a where key >= 0; +select count(*) from tab1_n6 a where key >= 0; -set test.comment="Q2 should still use cache since tab2 not updated"; +set test.comment="Q2 should still use cache since tab2_n5 not updated"; set test.comment; explain -select max(key) from tab2; -select max(key) from tab2; +select max(key) from tab2_n5; +select max(key) from tab2_n5; set test.comment="Q3 should not use cache"; set test.comment; explain -select count(*) from tab1 join tab2 on (tab1.key = tab2.key); -select count(*) from tab1 join tab2 on (tab1.key = tab2.key); +select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key); +select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key); --- Update tab2 which should invalidate Q2 and Q3. -insert into tab2 values ('88', 'val_88'); +-- Update tab2_n5 which should invalidate Q2 and Q3. +insert into tab2_n5 values ('88', 'val_88'); set test.comment="Q1 should use cache"; set test.comment; explain -select count(*) from tab1 a where key >= 0; -select count(*) from tab1 a where key >= 0; +select count(*) from tab1_n6 a where key >= 0; +select count(*) from tab1_n6 a where key >= 0; set test.comment="Q2 should not use cache"; set test.comment; explain -select max(key) from tab2; -select max(key) from tab2; +select max(key) from tab2_n5; +select max(key) from tab2_n5; set test.comment="Q3 should not use cache"; set test.comment; explain -select count(*) from tab1 join tab2 on (tab1.key = tab2.key); -select count(*) from tab1 join tab2 on (tab1.key = tab2.key); +select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key); +select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key); diff --git a/ql/src/test/queries/clientpositive/results_cache_transactional.q b/ql/src/test/queries/clientpositive/results_cache_transactional.q index 13fb8483dc..89a6a55b62 100644 --- a/ql/src/test/queries/clientpositive/results_cache_transactional.q +++ b/ql/src/test/queries/clientpositive/results_cache_transactional.q @@ -3,34 +3,34 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -create table tab1 (key string, value string) stored as orc tblproperties ('transactional'='true'); -create table tab2 (key string, value string) stored as orc tblproperties ('transactional'='true'); +create table tab1_n1 (key string, value string) stored as orc tblproperties ('transactional'='true'); +create table tab2_n1 (key string, value string) stored as orc tblproperties ('transactional'='true'); -insert into tab1 select * from default.src; -insert into tab2 select * from default.src; +insert into tab1_n1 select * from default.src; +insert into tab2_n1 select * from default.src; set hive.query.results.cache.enabled=true; set hive.query.results.cache.nontransactional.tables.enabled=false; explain -select max(key) from tab1; -select max(key) from tab1; +select max(key) from tab1_n1; +select max(key) from tab1_n1; set test.comment="Query on transactional table should use cache"; set test.comment; explain -select max(key) from tab1; -select max(key) from tab1; +select max(key) from tab1_n1; +select max(key) from tab1_n1; explain -select count(*) from tab1 join tab2 on (tab1.key = tab2.key); -select count(*) from tab1 join tab2 on (tab1.key = tab2.key); +select count(*) from tab1_n1 join tab2_n1 on (tab1_n1.key = tab2_n1.key); +select count(*) from tab1_n1 join tab2_n1 on (tab1_n1.key = tab2_n1.key); set test.comment="Join on transactional tables, should use cache"; set test.comment; explain -select count(*) from tab1 join tab2 on (tab1.key = tab2.key); -select count(*) from tab1 join tab2 on (tab1.key = tab2.key); +select count(*) from tab1_n1 join tab2_n1 on (tab1_n1.key = tab2_n1.key); +select count(*) from tab1_n1 join tab2_n1 on (tab1_n1.key = tab2_n1.key); -- Non-transactional tables @@ -46,12 +46,12 @@ select max(key) from src; select max(key) from src; explain -select count(*) from tab1 join src on (tab1.key = src.key); -select count(*) from tab1 join src on (tab1.key = src.key); +select count(*) from tab1_n1 join src on (tab1_n1.key = src.key); +select count(*) from tab1_n1 join src on (tab1_n1.key = src.key); set test.comment="Join uses non-transactional table, should not use cache"; set test.comment; explain -select count(*) from tab1 join src on (tab1.key = src.key); -select count(*) from tab1 join src on (tab1.key = src.key); +select count(*) from tab1_n1 join src on (tab1_n1.key = src.key); +select count(*) from tab1_n1 join src on (tab1_n1.key = src.key); diff --git a/ql/src/test/queries/clientpositive/results_cache_with_masking.q b/ql/src/test/queries/clientpositive/results_cache_with_masking.q index db27525aa0..d078092507 100644 --- a/ql/src/test/queries/clientpositive/results_cache_with_masking.q +++ b/ql/src/test/queries/clientpositive/results_cache_with_masking.q @@ -6,14 +6,14 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.autho set hive.query.results.cache.enabled=true; set hive.query.results.cache.nontransactional.tables.enabled=true; -create table masking_test as select cast(key as int) as key, value from src; +create table masking_test_n7 as select cast(key as int) as key, value from src; explain -select key, count(*) from masking_test group by key; -select key, count(*) from masking_test group by key; +select key, count(*) from masking_test_n7 group by key; +select key, count(*) from masking_test_n7 group by key; -- This time we should use the cache explain -select key, count(*) from masking_test group by key; -select key, count(*) from masking_test group by key; +select key, count(*) from masking_test_n7 group by key; +select key, count(*) from masking_test_n7 group by key; diff --git a/ql/src/test/queries/clientpositive/retry_failure.q b/ql/src/test/queries/clientpositive/retry_failure.q index 0d5ce4f07d..ad12ecd81a 100644 --- a/ql/src/test/queries/clientpositive/retry_failure.q +++ b/ql/src/test/queries/clientpositive/retry_failure.q @@ -1,8 +1,8 @@ --! qt:dataset:src SET hive.vectorized.execution.enabled=false; -create table tx(a int,f string); -insert into tx values (1,'non_existent_file'); +create table tx_n1(a int,f string); +insert into tx_n1 values (1,'non_existent_file'); set zzz=1; set reexec.overlay.zzz=2; @@ -10,4 +10,4 @@ set reexec.overlay.zzz=2; set hive.query.reexecution.enabled=true; set hive.query.reexecution.strategies=overlay; -select assert_true(${hiveconf:zzz} > a) from tx group by a; +select assert_true(${hiveconf:zzz} > a) from tx_n1 group by a; diff --git a/ql/src/test/queries/clientpositive/retry_failure_stat_changes.q b/ql/src/test/queries/clientpositive/retry_failure_stat_changes.q index e1c70ed765..08fd4a2551 100644 --- a/ql/src/test/queries/clientpositive/retry_failure_stat_changes.q +++ b/ql/src/test/queries/clientpositive/retry_failure_stat_changes.q @@ -1,7 +1,7 @@ SET hive.vectorized.execution.enabled=false; -create table tx(a int,u int); -insert into tx values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(10,10); +create table tx_n2(a int,u int); +insert into tx_n2 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(10,10); create table px(a int,p int); insert into px values (2,2),(3,3),(5,5),(7,7),(11,11); @@ -12,19 +12,19 @@ set hive.query.reexecution.enabled=true; set hive.query.reexecution.strategies=overlay,reoptimize; explain REOPTIMIZATION -select sum(u*p) from tx join px on (u=p) where u<10 and p>2; +select sum(u*p) from tx_n2 join px on (u=p) where u<10 and p>2; set hive.auto.convert.join=false; explain analyze -select sum(u*p) from tx join px on (u=p) where u<10 and p>2; +select sum(u*p) from tx_n2 join px on (u=p) where u<10 and p>2; set hive.auto.convert.join=true; explain analyze -select sum(u*p) from tx join px on (u=p) where u<10 and p>2; +select sum(u*p) from tx_n2 join px on (u=p) where u<10 and p>2; set zzz=1; set reexec.overlay.zzz=2000; explain -select assert_true_oom(${hiveconf:zzz} > sum(u*p)) from tx join px on (tx.a=px.a) where u<10 and p>2; -select assert_true_oom(${hiveconf:zzz} > sum(u*p)) from tx join px on (tx.a=px.a) where u<10 and p>2; +select assert_true_oom(${hiveconf:zzz} > sum(u*p)) from tx_n2 join px on (tx_n2.a=px.a) where u<10 and p>2; +select assert_true_oom(${hiveconf:zzz} > sum(u*p)) from tx_n2 join px on (tx_n2.a=px.a) where u<10 and p>2; diff --git a/ql/src/test/queries/clientpositive/root_dir_external_table.q b/ql/src/test/queries/clientpositive/root_dir_external_table.q index a08d640fe5..7763ce2432 100644 --- a/ql/src/test/queries/clientpositive/root_dir_external_table.q +++ b/ql/src/test/queries/clientpositive/root_dir_external_table.q @@ -7,7 +7,7 @@ insert overwrite directory "hdfs:///tmp/test_root_dir_external_table" select key dfs -cp /tmp/test_root_dir_external_table/000000_0 /000000_0; dfs -rmr hdfs:///tmp/test_root_dir_external_table; -create external table roottable (key string) row format delimited fields terminated by '\\t' stored as textfile location 'hdfs:///'; -select count(*) from roottable; +create external table roottable_n0 (key string) row format delimited fields terminated by '\\t' stored as textfile location 'hdfs:///'; +select count(*) from roottable_n0; dfs -rmr /000000_0; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/runtime_skewjoin_mapjoin_spark.q b/ql/src/test/queries/clientpositive/runtime_skewjoin_mapjoin_spark.q index 7ebe1270a9..ac93eedd24 100644 --- a/ql/src/test/queries/clientpositive/runtime_skewjoin_mapjoin_spark.q +++ b/ql/src/test/queries/clientpositive/runtime_skewjoin_mapjoin_spark.q @@ -10,19 +10,19 @@ set hive.auto.convert.join.noconditionaltask.size=50; -- This is mainly intended for spark, to test runtime skew join together with map join -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n94(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n94; EXPLAIN SELECT COUNT(*) FROM (SELECT src1.key,src1.value FROM src src1 JOIN src src2 ON src1.key=src2.key) a JOIN - (SELECT src.key,src.value FROM src JOIN T1 ON src.key=T1.key) b + (SELECT src.key,src.value FROM src JOIN T1_n94 ON src.key=T1_n94.key) b ON a.key=b.key; SELECT COUNT(*) FROM (SELECT src1.key,src1.value FROM src src1 JOIN src src2 ON src1.key=src2.key) a JOIN - (SELECT src.key,src.value FROM src JOIN T1 ON src.key=T1.key) b + (SELECT src.key,src.value FROM src JOIN T1_n94 ON src.key=T1_n94.key) b ON a.key=b.key; diff --git a/ql/src/test/queries/clientpositive/runtime_stats_hs2.q b/ql/src/test/queries/clientpositive/runtime_stats_hs2.q index 34a8dd3f2f..1a02eac474 100644 --- a/ql/src/test/queries/clientpositive/runtime_stats_hs2.q +++ b/ql/src/test/queries/clientpositive/runtime_stats_hs2.q @@ -1,9 +1,9 @@ -create table tx(a int,u int); -insert into tx values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(10,10); +create table tx_n3(a int,u int); +insert into tx_n3 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(10,10); -create table px(a int,p int); -insert into px values (2,2),(3,3),(5,5),(7,7),(11,11); +create table px_n0(a int,p int); +insert into px_n0 values (2,2),(3,3),(5,5),(7,7),(11,11); set hive.explain.user=true; set hive.query.reexecution.enabled=true; @@ -13,10 +13,10 @@ set hive.query.reexecution.stats.persist.scope=hiveserver; -- join output estimate is underestimated: 1 row explain -select sum(u*p) from tx join px on (u=p) where u<10 and p>2; +select sum(u*p) from tx_n3 join px_n0 on (u=p) where u<10 and p>2; -select sum(u*p) from tx join px on (u=p) where u<10 and p>2; +select sum(u*p) from tx_n3 join px_n0 on (u=p) where u<10 and p>2; -- join output estimate is 3 rows ; all the operators stats are "runtime" explain -select sum(u*p) from tx join px on (u=p) where u<10 and p>2; +select sum(u*p) from tx_n3 join px_n0 on (u=p) where u<10 and p>2; diff --git a/ql/src/test/queries/clientpositive/sample1.q b/ql/src/test/queries/clientpositive/sample1.q index 16c5bcddf2..c7dcbbaa65 100644 --- a/ql/src/test/queries/clientpositive/sample1.q +++ b/ql/src/test/queries/clientpositive/sample1.q @@ -1,17 +1,17 @@ --! qt:dataset:srcpart --! qt:dataset:srcbucket -CREATE TABLE dest1(key INT, value STRING, dt STRING, hr STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n89(key INT, value STRING, dt STRING, hr STRING) STORED AS TEXTFILE; -- no input pruning, no sample filter EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n89 SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s WHERE s.ds='2008-04-08' and s.hr='11'; -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n89 SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s WHERE s.ds='2008-04-08' and s.hr='11'; -SELECT dest1.* FROM dest1; +SELECT dest1_n89.* FROM dest1_n89; select count(1) from srcbucket; diff --git a/ql/src/test/queries/clientpositive/sample2.q b/ql/src/test/queries/clientpositive/sample2.q index b9edb7dc06..3cd5521978 100644 --- a/ql/src/test/queries/clientpositive/sample2.q +++ b/ql/src/test/queries/clientpositive/sample2.q @@ -1,14 +1,14 @@ --! qt:dataset:srcbucket -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n29(key INT, value STRING) STORED AS TEXTFILE; -- input pruning, no sample filter -- default table sample columns EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n29 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s; -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n29 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s; -SELECT dest1.* FROM dest1 +SELECT dest1_n29.* FROM dest1_n29 order by key, value; diff --git a/ql/src/test/queries/clientpositive/sample4.q b/ql/src/test/queries/clientpositive/sample4.q index 49d7418feb..756d05ca3a 100644 --- a/ql/src/test/queries/clientpositive/sample4.q +++ b/ql/src/test/queries/clientpositive/sample4.q @@ -1,14 +1,14 @@ --! qt:dataset:srcbucket -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n118(key INT, value STRING) STORED AS TEXTFILE; -- bucket column is the same as table sample -- No need for sample filter EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n118 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s; -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n118 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s; -SELECT dest1.* FROM dest1 +SELECT dest1_n118.* FROM dest1_n118 order by key, value; diff --git a/ql/src/test/queries/clientpositive/sample5.q b/ql/src/test/queries/clientpositive/sample5.q index 5cb6ffdc3e..a659f2b5d5 100644 --- a/ql/src/test/queries/clientpositive/sample5.q +++ b/ql/src/test/queries/clientpositive/sample5.q @@ -1,16 +1,16 @@ --! qt:dataset:srcbucket -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n69(key INT, value STRING) STORED AS TEXTFILE; -- SORT_QUERY_RESULTS -- no input pruning, sample filter EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n69 SELECT s.* -- here's another test FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 on key) s; -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n69 SELECT s.* -- here's another test FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 on key) s; -SELECT dest1.* FROM dest1 SORT BY key, value; +SELECT dest1_n69.* FROM dest1_n69 SORT BY key, value; diff --git a/ql/src/test/queries/clientpositive/sample6.q b/ql/src/test/queries/clientpositive/sample6.q index 6972897102..292338cb0b 100644 --- a/ql/src/test/queries/clientpositive/sample6.q +++ b/ql/src/test/queries/clientpositive/sample6.q @@ -1,17 +1,17 @@ --! qt:dataset:srcbucket2 --! qt:dataset:srcbucket set hive.mapred.mode=nonstrict; -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n27(key INT, value STRING) STORED AS TEXTFILE; -- both input pruning and sample filter EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n27 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s; -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n27 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s; -SELECT dest1.* FROM dest1 +SELECT dest1_n27.* FROM dest1_n27 order by key, value; EXPLAIN EXTENDED SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 4 OUT OF 4 on key) s diff --git a/ql/src/test/queries/clientpositive/sample7.q b/ql/src/test/queries/clientpositive/sample7.q index e8f533678f..dbbd26292c 100644 --- a/ql/src/test/queries/clientpositive/sample7.q +++ b/ql/src/test/queries/clientpositive/sample7.q @@ -1,15 +1,15 @@ --! qt:dataset:srcbucket -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n160(key INT, value STRING) STORED AS TEXTFILE; -- both input pruning and sample filter EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n160 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s WHERE s.key > 100; -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n160 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s WHERE s.key > 100; -SELECT dest1.* FROM dest1 +SELECT dest1_n160.* FROM dest1_n160 order by key, value; diff --git a/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q b/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q index 772b8a8fa3..803ca91d8e 100644 --- a/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q +++ b/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q @@ -18,25 +18,25 @@ set hive.merge.smallfiles.avgsize=1; -- fixed in MAPREDUCE-2046 which is included in 0.22. -- create file inputs -create table sih_i_part (key int, value string) partitioned by (p string); -insert overwrite table sih_i_part partition (p='1') select key, value from src; -insert overwrite table sih_i_part partition (p='2') select key+10000, value from src; -insert overwrite table sih_i_part partition (p='3') select key+20000, value from src; -create table sih_src as select key, value from sih_i_part order by key, value; -create table sih_src2 as select key, value from sih_src order by key, value; +create table sih_i_part_n0 (key int, value string) partitioned by (p string); +insert overwrite table sih_i_part_n0 partition (p='1') select key, value from src; +insert overwrite table sih_i_part_n0 partition (p='2') select key+10000, value from src; +insert overwrite table sih_i_part_n0 partition (p='3') select key+20000, value from src; +create table sih_src_n0 as select key, value from sih_i_part_n0 order by key, value; +create table sih_src2_n0 as select key, value from sih_src_n0 order by key, value; set hive.exec.post.hooks = org.apache.hadoop.hive.ql.hooks.VerifyIsLocalModeHook ; set mapred.job.tracker=localhost:58; set hive.exec.mode.local.auto.input.files.max=1; -- Sample split, running locally limited by num tasks -select count(1) from sih_src tablesample(1 percent); +select count(1) from sih_src_n0 tablesample(1 percent); -- sample two tables -select count(1) from sih_src tablesample(1 percent)a join sih_src2 tablesample(1 percent)b on a.key = b.key; +select count(1) from sih_src_n0 tablesample(1 percent)a join sih_src2_n0 tablesample(1 percent)b on a.key = b.key; set hive.exec.mode.local.auto.inputbytes.max=1000; set hive.exec.mode.local.auto.input.files.max=4; -- sample split, running locally limited by max bytes -select count(1) from sih_src tablesample(1 percent); +select count(1) from sih_src_n0 tablesample(1 percent); diff --git a/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q b/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q index cb80ef4e11..1675263b84 100644 --- a/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q +++ b/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q @@ -14,12 +14,12 @@ set hive.compute.query.using.stats=true; -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) -- create file inputs -create table sih_i_part (key int, value string) partitioned by (p string); -insert overwrite table sih_i_part partition (p='1') select key, value from src; -insert overwrite table sih_i_part partition (p='2') select key+10000, value from src; -insert overwrite table sih_i_part partition (p='3') select key+20000, value from src; -create table sih_src as select key, value from sih_i_part order by key, value; -create table sih_src2 as select key, value from sih_src order by key, value; +create table sih_i_part_n1 (key int, value string) partitioned by (p string); +insert overwrite table sih_i_part_n1 partition (p='1') select key, value from src; +insert overwrite table sih_i_part_n1 partition (p='2') select key+10000, value from src; +insert overwrite table sih_i_part_n1 partition (p='3') select key+20000, value from src; +create table sih_src_n1 as select key, value from sih_i_part_n1 order by key, value; +create table sih_src2_n1 as select key, value from sih_src_n1 order by key, value; set hive.exec.post.hooks = org.apache.hadoop.hive.ql.hooks.VerifyIsLocalModeHook; set mapreduce.framework.name=yarn; @@ -34,16 +34,16 @@ set hive.sample.seednumber=7; -- sample split, running locally limited by num tasks -desc formatted sih_src; +desc formatted sih_src_n1; -explain select count(1) from sih_src; +explain select count(1) from sih_src_n1; -select count(1) from sih_src; +select count(1) from sih_src_n1; -explain select count(1) from sih_src tablesample(1 percent); +explain select count(1) from sih_src_n1 tablesample(1 percent); -select count(1) from sih_src tablesample(1 percent); +select count(1) from sih_src_n1 tablesample(1 percent); -explain select count(1) from sih_src tablesample(10 rows); +explain select count(1) from sih_src_n1 tablesample(10 rows); -select count(1) from sih_src tablesample(10 rows); +select count(1) from sih_src_n1 tablesample(10 rows); diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part.q index df526964c3..723112332a 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part.q @@ -24,9 +24,9 @@ set hive.llap.io.enabled=false; -- Instead just one explain vectorization only detail -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n29(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n29; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -35,48 +35,48 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_add_int_permute_select_n9(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_permute_select_n9 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_permute_select add columns(c int); +alter table part_add_int_permute_select_n9 add columns(c int); -insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333); +insert into table part_add_int_permute_select_n9 partition(part=1) VALUES (2, 2222, 'new', 3333); -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_permute_select; -select insert_num,part,a,b,c from part_add_int_permute_select; -select insert_num,part,c from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n9; +select insert_num,part,a,b,c from part_add_int_permute_select_n9; +select insert_num,part,c from part_add_int_permute_select_n9; -drop table part_add_int_permute_select; +drop table part_add_int_permute_select_n9; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_add_int_string_permute_select_n9(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_string_permute_select_n9 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_string_permute_select add columns(c int, d string); +alter table part_add_int_string_permute_select_n9 add columns(c int, d string); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); +insert into table part_add_int_string_permute_select_n9 partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); explain vectorization only detail -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n9; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_string_permute_select; -select insert_num,part,a,b,c from part_add_int_string_permute_select; -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; -select insert_num,part,a,c,d from part_add_int_string_permute_select; -select insert_num,part,a,d from part_add_int_string_permute_select; -select insert_num,part,c from part_add_int_string_permute_select; -select insert_num,part,d from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n9; +select insert_num,part,a,b,c from part_add_int_string_permute_select_n9; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n9; +select insert_num,part,a,c,d from part_add_int_string_permute_select_n9; +select insert_num,part,a,d from part_add_int_string_permute_select_n9; +select insert_num,part,c from part_add_int_string_permute_select_n9; +select insert_num,part,d from part_add_int_string_permute_select_n9; -drop table part_add_int_string_permute_select; +drop table part_add_int_string_permute_select_n9; @@ -87,18 +87,18 @@ drop table part_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_change_string_group_double_n9(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table part_change_string_group_double_n9 partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n29; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table part_change_string_group_double_n9 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111; +insert into table part_change_string_group_double_n9 partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n29 WHERE insert_num = 111; -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n9; -drop table part_change_string_group_double; +drop table part_change_string_group_double_n9; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -107,18 +107,18 @@ drop table part_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_change_date_group_string_group_date_timestamp_n9(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_date_group_string_group_date_timestamp_n9 partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n29; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table part_change_date_group_string_group_date_timestamp_n9 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table part_change_date_group_string_group_date_timestamp_n9 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n9; -drop table part_change_date_group_string_group_date_timestamp; +drop table part_change_date_group_string_group_date_timestamp_n9; @@ -134,36 +134,36 @@ drop table part_change_date_group_string_group_date_timestamp; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group_n9(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n9 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n29; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n9; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_multi_ints_string_group_n9 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n9 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n9; -drop table part_change_numeric_group_string_group_multi_ints_string_group; +drop table part_change_numeric_group_string_group_multi_ints_string_group_n9; @@ -174,36 +174,36 @@ drop table part_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_floating_string_group_n9(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_floating_string_group_n9 partition(part=1) SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n29; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n9; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_floating_string_group_n9 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_floating_string_group_n9 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n9; -drop table part_change_numeric_group_string_group_floating_string_group; +drop table part_change_numeric_group_string_group_floating_string_group_n9; @@ -215,34 +215,34 @@ drop table part_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE part_change_string_group_string_group_string(insert_num int, +CREATE TABLE part_change_string_group_string_group_string_n9(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, +insert into table part_change_string_group_string_group_string_n9 partition(part=1) SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n29; -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n9; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_string_group_string replace columns (insert_num int, +alter table part_change_string_group_string_group_string_n9 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, +insert into table part_change_string_group_string_group_string_n9 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n9; -drop table part_change_string_group_string_group_string; +drop table part_change_string_group_string_group_string_n9; ------------------------------------------------------------------------------------------ @@ -256,40 +256,40 @@ drop table part_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9 partition(part=1) SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n29; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9 partition(part=1) VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, 1234.5678, 9876.543, 789.321, 'new'); -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9; -drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9; @@ -298,23 +298,23 @@ drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float_n9(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n9 partition(part=1) SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n29; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n9; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table part_change_lower_to_higher_numeric_group_decimal_to_float_n9 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n9 partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n9; -drop table part_change_lower_to_higher_numeric_group_decimal_to_float; \ No newline at end of file +drop table part_change_lower_to_higher_numeric_group_decimal_to_float_n9; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_llap_io.q index 3d13f7690c..7366326922 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_llap_io.q @@ -25,9 +25,9 @@ set hive.llap.io.encode.enabled=true; -- Instead just one explain vectorization only detail -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n22(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n22; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -36,48 +36,48 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_add_int_permute_select_n5(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_permute_select_n5 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_permute_select add columns(c int); +alter table part_add_int_permute_select_n5 add columns(c int); -insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333); +insert into table part_add_int_permute_select_n5 partition(part=1) VALUES (2, 2222, 'new', 3333); -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_permute_select; -select insert_num,part,a,b,c from part_add_int_permute_select; -select insert_num,part,c from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n5; +select insert_num,part,a,b,c from part_add_int_permute_select_n5; +select insert_num,part,c from part_add_int_permute_select_n5; -drop table part_add_int_permute_select; +drop table part_add_int_permute_select_n5; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_add_int_string_permute_select_n5(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_string_permute_select_n5 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_string_permute_select add columns(c int, d string); +alter table part_add_int_string_permute_select_n5 add columns(c int, d string); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); +insert into table part_add_int_string_permute_select_n5 partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); explain vectorization only detail -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n5; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_string_permute_select; -select insert_num,part,a,b,c from part_add_int_string_permute_select; -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; -select insert_num,part,a,c,d from part_add_int_string_permute_select; -select insert_num,part,a,d from part_add_int_string_permute_select; -select insert_num,part,c from part_add_int_string_permute_select; -select insert_num,part,d from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n5; +select insert_num,part,a,b,c from part_add_int_string_permute_select_n5; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n5; +select insert_num,part,a,c,d from part_add_int_string_permute_select_n5; +select insert_num,part,a,d from part_add_int_string_permute_select_n5; +select insert_num,part,c from part_add_int_string_permute_select_n5; +select insert_num,part,d from part_add_int_string_permute_select_n5; -drop table part_add_int_string_permute_select; +drop table part_add_int_string_permute_select_n5; @@ -88,18 +88,18 @@ drop table part_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_change_string_group_double_n5(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table part_change_string_group_double_n5 partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n22; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table part_change_string_group_double_n5 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111; +insert into table part_change_string_group_double_n5 partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n22 WHERE insert_num = 111; -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n5; -drop table part_change_string_group_double; +drop table part_change_string_group_double_n5; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -108,18 +108,18 @@ drop table part_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_change_date_group_string_group_date_timestamp_n5(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_date_group_string_group_date_timestamp_n5 partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n22; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table part_change_date_group_string_group_date_timestamp_n5 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table part_change_date_group_string_group_date_timestamp_n5 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n5; -drop table part_change_date_group_string_group_date_timestamp; +drop table part_change_date_group_string_group_date_timestamp_n5; @@ -135,36 +135,36 @@ drop table part_change_date_group_string_group_date_timestamp; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group_n5(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n5 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n22; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_multi_ints_string_group_n5 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n5 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n5; -drop table part_change_numeric_group_string_group_multi_ints_string_group; +drop table part_change_numeric_group_string_group_multi_ints_string_group_n5; @@ -175,36 +175,36 @@ drop table part_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_floating_string_group_n5(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_floating_string_group_n5 partition(part=1) SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n22; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_floating_string_group_n5 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_floating_string_group_n5 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n5; -drop table part_change_numeric_group_string_group_floating_string_group; +drop table part_change_numeric_group_string_group_floating_string_group_n5; @@ -216,34 +216,34 @@ drop table part_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE part_change_string_group_string_group_string(insert_num int, +CREATE TABLE part_change_string_group_string_group_string_n5(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, +insert into table part_change_string_group_string_group_string_n5 partition(part=1) SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n22; -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_string_group_string replace columns (insert_num int, +alter table part_change_string_group_string_group_string_n5 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, +insert into table part_change_string_group_string_group_string_n5 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n5; -drop table part_change_string_group_string_group_string; +drop table part_change_string_group_string_group_string_n5; ------------------------------------------------------------------------------------------ @@ -257,40 +257,40 @@ drop table part_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5 partition(part=1) SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n22; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5 partition(part=1) VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, 1234.5678, 9876.543, 789.321, 'new'); -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5; -drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5; @@ -299,23 +299,23 @@ drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float_n5(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n5 partition(part=1) SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n22; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table part_change_lower_to_higher_numeric_group_decimal_to_float_n5 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n5 partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n5; -drop table part_change_lower_to_higher_numeric_group_decimal_to_float; \ No newline at end of file +drop table part_change_lower_to_higher_numeric_group_decimal_to_float_n5; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update.q index 70f8cc9b7e..b540b6e635 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update.q @@ -25,112 +25,112 @@ set hive.llap.io.enabled=false; -- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences... -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n34(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n34; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n10(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n10; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... UPDATE New Columns --- -CREATE TABLE partitioned_update_1(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE partitioned_update_1_n1(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table partitioned_update_1 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table partitioned_update_1_n1 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data_n34; -- Table-Non-Cascade ADD COLUMNS ... -alter table partitioned_update_1 add columns(c int, d string); +alter table partitioned_update_1_n1 add columns(c int, d string); -insert into table partitioned_update_1 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <=110; +insert into table partitioned_update_1_n1 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n10 WHERE insert_num <=110; -insert into table partitioned_update_1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table partitioned_update_1_n1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n10 WHERE insert_num > 110; -select insert_num,part,a,b,c,d from partitioned_update_1; +select insert_num,part,a,b,c,d from partitioned_update_1_n1; -- UPDATE New Columns -update partitioned_update_1 set c=99; +update partitioned_update_1_n1 set c=99; -select insert_num,part,a,b,c,d from partitioned_update_1; +select insert_num,part,a,b,c,d from partitioned_update_1_n1; -alter table partitioned_update_1 partition(part=1) compact 'major'; -alter table partitioned_update_1 partition(part=2) compact 'major'; +alter table partitioned_update_1_n1 partition(part=1) compact 'major'; +alter table partitioned_update_1_n1 partition(part=2) compact 'major'; -select insert_num,part,a,b,c,d from partitioned_update_1; +select insert_num,part,a,b,c,d from partitioned_update_1_n1; -DROP TABLE partitioned_update_1; +DROP TABLE partitioned_update_1_n1; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where old column --- -CREATE TABLE partitioned_delete_1(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE partitioned_delete_1_n1(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table partitioned_delete_1 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table partitioned_delete_1_n1 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data_n34; -- Table-Non-Cascade ADD COLUMNS ... -alter table partitioned_delete_1 add columns(c int, d string); +alter table partitioned_delete_1_n1 add columns(c int, d string); -insert into table partitioned_delete_1 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <=110; +insert into table partitioned_delete_1_n1 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n10 WHERE insert_num <=110; -insert into table partitioned_delete_1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table partitioned_delete_1_n1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n10 WHERE insert_num > 110; -select part,a,b,c,d from partitioned_delete_1; +select part,a,b,c,d from partitioned_delete_1_n1; -- DELETE where old column -delete from partitioned_delete_1 where insert_num = 102 or insert_num = 104 or insert_num = 106; +delete from partitioned_delete_1_n1 where insert_num = 102 or insert_num = 104 or insert_num = 106; -select insert_num,part,a,b,c,d from partitioned_delete_1; +select insert_num,part,a,b,c,d from partitioned_delete_1_n1; -alter table partitioned_delete_1 partition(part=1) compact 'major'; -alter table partitioned_delete_1 partition(part=2) compact 'major'; +alter table partitioned_delete_1_n1 partition(part=1) compact 'major'; +alter table partitioned_delete_1_n1 partition(part=2) compact 'major'; -select insert_num,part,a,b,c,d from partitioned_delete_1; +select insert_num,part,a,b,c,d from partitioned_delete_1_n1; -DROP TABLE partitioned_delete_1; +DROP TABLE partitioned_delete_1_n1; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where new column --- -CREATE TABLE partitioned_delete_2(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE partitioned_delete_2_n1(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table partitioned_delete_2 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table partitioned_delete_2_n1 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data_n34; -- Table-Non-Cascade ADD COLUMNS ... -alter table partitioned_delete_2 add columns(c int, d string); +alter table partitioned_delete_2_n1 add columns(c int, d string); -insert into table partitioned_delete_2 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <=110; +insert into table partitioned_delete_2_n1 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n10 WHERE insert_num <=110; -insert into table partitioned_delete_2 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table partitioned_delete_2_n1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n10 WHERE insert_num > 110; -select insert_num,part,a,b,c,d from partitioned_delete_2; +select insert_num,part,a,b,c,d from partitioned_delete_2_n1; -- DELETE where new column -delete from partitioned_delete_2 where insert_num = 108 or insert_num > 113; +delete from partitioned_delete_2_n1 where insert_num = 108 or insert_num > 113; -select insert_num,part,a,b,c,d from partitioned_delete_2; +select insert_num,part,a,b,c,d from partitioned_delete_2_n1; -alter table partitioned_delete_2 partition(part=1) compact 'major'; -alter table partitioned_delete_2 partition(part=2) compact 'major'; +alter table partitioned_delete_2_n1 partition(part=1) compact 'major'; +alter table partitioned_delete_2_n1 partition(part=2) compact 'major'; -select insert_num,part,a,b,c,d from partitioned_delete_2; +select insert_num,part,a,b,c,d from partitioned_delete_2_n1; -DROP TABLE partitioned_delete_2; +DROP TABLE partitioned_delete_2_n1; --following tests is moved from system tests -drop table if exists missing_ddl_2; -create table missing_ddl_2(name string, age int); -insert overwrite table missing_ddl_2 select value, key from srcbucket; -alter table missing_ddl_2 add columns (gps double); +drop table if exists missing_ddl_2_n0; +create table missing_ddl_2_n0(name string, age int); +insert overwrite table missing_ddl_2_n0 select value, key from srcbucket; +alter table missing_ddl_2_n0 add columns (gps double); set hive.exec.dynamic.partition.mode=nonstrict; set hive.optimize.sort.dynamic.partition=true; -DROP TABLE IF EXISTS all100kjson_textfile_orc; -CREATE TABLE all100kjson_textfile_orc ( +DROP TABLE IF EXISTS all100kjson_textfile_orc_n0; +CREATE TABLE all100kjson_textfile_orc_n0 ( si smallint, i int, b bigint, @@ -144,20 +144,20 @@ CREATE TABLE all100kjson_textfile_orc ( WITH SERDEPROPERTIES ('timestamp.formats'='yyyy-MM-dd\'T\'HH:mm:ss') STORED AS TEXTFILE; -INSERT INTO TABLE all100kjson_textfile_orc PARTITION (t) SELECT csmallint, cint, cbigint, cfloat, cdouble, cstring1, cboolean1, ctimestamp1, ctinyint FROM alltypesorc WHERE ctinyint > 0; +INSERT INTO TABLE all100kjson_textfile_orc_n0 PARTITION (t) SELECT csmallint, cint, cbigint, cfloat, cdouble, cstring1, cboolean1, ctimestamp1, ctinyint FROM alltypesorc WHERE ctinyint > 0; -ALTER TABLE all100kjson_textfile_orc +ALTER TABLE all100kjson_textfile_orc_n0 SET FILEFORMAT INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde'; -INSERT INTO TABLE all100kjson_textfile_orc PARTITION (t) SELECT csmallint, cint, cbigint, cfloat, cdouble, cstring1, cboolean1, ctimestamp1, ctinyint FROM alltypesorc WHERE ctinyint < 1 and ctinyint > -50 ; +INSERT INTO TABLE all100kjson_textfile_orc_n0 PARTITION (t) SELECT csmallint, cint, cbigint, cfloat, cdouble, cstring1, cboolean1, ctimestamp1, ctinyint FROM alltypesorc WHERE ctinyint < 1 and ctinyint > -50 ; -- HIVE-11977: Hive should handle an external avro table with zero length files present -DROP TABLE IF EXISTS emptyavro; -CREATE TABLE emptyavro (i int) +DROP TABLE IF EXISTS emptyavro_n1; +CREATE TABLE emptyavro_n1 (i int) PARTITIONED BY (s string) STORED AS AVRO; -load data local inpath '../../data/files/empty1.txt' into table emptyavro PARTITION (s='something'); -SELECT COUNT(*) from emptyavro; \ No newline at end of file +load data local inpath '../../data/files/empty1.txt' into table emptyavro_n1 PARTITION (s='something'); +SELECT COUNT(*) from emptyavro_n1; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update_llap_io.q index 0d528ea6d4..9dfdf97113 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update_llap_io.q @@ -26,13 +26,13 @@ set hive.llap.io.encode.enabled=true; -- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences... -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n9(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n9; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n3(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n3; -- -- @@ -40,14 +40,14 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data_ --- CREATE TABLE partitioned_update_1(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table partitioned_update_1 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table partitioned_update_1 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data_n9; -- Table-Non-Cascade ADD COLUMNS ... alter table partitioned_update_1 add columns(c int, d string); -insert into table partitioned_update_1 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <=110; +insert into table partitioned_update_1 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n3 WHERE insert_num <=110; -insert into table partitioned_update_1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table partitioned_update_1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n3 WHERE insert_num > 110; select insert_num,part,a,b,c,d from partitioned_update_1; @@ -69,14 +69,14 @@ DROP TABLE partitioned_update_1; --- CREATE TABLE partitioned_delete_1(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table partitioned_delete_1 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table partitioned_delete_1 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data_n9; -- Table-Non-Cascade ADD COLUMNS ... alter table partitioned_delete_1 add columns(c int, d string); -insert into table partitioned_delete_1 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <=110; +insert into table partitioned_delete_1 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n3 WHERE insert_num <=110; -insert into table partitioned_delete_1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table partitioned_delete_1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n3 WHERE insert_num > 110; select part,a,b,c,d from partitioned_delete_1; @@ -98,14 +98,14 @@ DROP TABLE partitioned_delete_1; --- CREATE TABLE partitioned_delete_2(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table partitioned_delete_2 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table partitioned_delete_2 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data_n9; -- Table-Non-Cascade ADD COLUMNS ... alter table partitioned_delete_2 add columns(c int, d string); -insert into table partitioned_delete_2 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <=110; +insert into table partitioned_delete_2 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n3 WHERE insert_num <=110; -insert into table partitioned_delete_2 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table partitioned_delete_2 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n3 WHERE insert_num > 110; select insert_num,part,a,b,c,d from partitioned_delete_2; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table.q index 19e7bc5443..650d415818 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table.q @@ -22,9 +22,9 @@ set hive.llap.io.enabled=false; -- Instead just one explain vectorization only detail -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n18(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n18; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -33,48 +33,48 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table_add_int_permute_select_n6(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_permute_select_n6 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n18; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_permute_select add columns(c int); +alter table table_add_int_permute_select_n6 add columns(c int); -insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000); +insert into table table_add_int_permute_select_n6 VALUES (111, 80000, 'new', 80000); explain vectorization only detail -select insert_num,a,b,c from table_add_int_permute_select; +select insert_num,a,b,c from table_add_int_permute_select_n6; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_permute_select; -select insert_num,a,b,c from table_add_int_permute_select; -select insert_num,c from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n6; +select insert_num,a,b,c from table_add_int_permute_select_n6; +select insert_num,c from table_add_int_permute_select_n6; -drop table table_add_int_permute_select; +drop table table_add_int_permute_select_n6; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table_add_int_string_permute_select_n6(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_string_permute_select_n6 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n18; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_string_permute_select add columns(c int, d string); +alter table table_add_int_string_permute_select_n6 add columns(c int, d string); -insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler'); +insert into table table_add_int_string_permute_select_n6 VALUES (111, 80000, 'new', 80000, 'filler'); -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_string_permute_select; -select insert_num,a,b,c from table_add_int_string_permute_select; -select insert_num,a,b,c,d from table_add_int_string_permute_select; -select insert_num,a,c,d from table_add_int_string_permute_select; -select insert_num,a,d from table_add_int_string_permute_select; -select insert_num,c from table_add_int_string_permute_select; -select insert_num,d from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n6; +select insert_num,a,b,c from table_add_int_string_permute_select_n6; +select insert_num,a,b,c,d from table_add_int_string_permute_select_n6; +select insert_num,a,c,d from table_add_int_string_permute_select_n6; +select insert_num,a,d from table_add_int_string_permute_select_n6; +select insert_num,c from table_add_int_string_permute_select_n6; +select insert_num,d from table_add_int_string_permute_select_n6; -drop table table_add_int_string_permute_select; +drop table table_add_int_string_permute_select_n6; @@ -85,18 +85,18 @@ drop table table_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE table_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table_change_string_group_double_n6(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table table_change_string_group_double_n6 SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n18; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table table_change_string_group_double_n6 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new'); +insert into table table_change_string_group_double_n6 VALUES (111, 789.321, 789.321, 789.321, 'new'); -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n6; -drop table table_change_string_group_double; +drop table table_change_string_group_double_n6; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -105,18 +105,18 @@ drop table table_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE table_change_date_group_string_group_date_group(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table_change_date_group_string_group_date_group_n6(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table table_change_date_group_string_group_date_group_n6 SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n18; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_date_group_string_group_date_group replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table table_change_date_group_string_group_date_group_n6 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table table_change_date_group_string_group_date_group VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table table_change_date_group_string_group_date_group_n6 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n6; -drop table table_change_date_group_string_group_date_group; +drop table table_change_date_group_string_group_date_group_n6; @@ -131,36 +131,36 @@ drop table table_change_date_group_string_group_date_group; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group_n6(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_numeric_group_string_group_multi_ints_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n6 SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n18; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_multi_ints_string_group_n6 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table table_change_numeric_group_string_group_multi_ints_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n6 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n6; -drop table table_change_numeric_group_string_group_multi_ints_string_group; +drop table table_change_numeric_group_string_group_multi_ints_string_group_n6; @@ -171,36 +171,36 @@ drop table table_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n6(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n6 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n18; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_floating_string_group_n6 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_floating_string_group_n6 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n6; -drop table table_change_numeric_group_string_group_floating_string_group; +drop table table_change_numeric_group_string_group_floating_string_group_n6; ------------------------------------------------------------------------------------------ @@ -211,34 +211,34 @@ drop table table_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE table_change_string_group_string_group_string(insert_num int, +CREATE TABLE table_change_string_group_string_group_string_n6(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_string_group_string_group_string SELECT insert_num, +insert into table table_change_string_group_string_group_string_n6 SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n18; -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_string_group_string replace columns (insert_num int, +alter table table_change_string_group_string_group_string_n6 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table table_change_string_group_string_group_string VALUES (111, +insert into table table_change_string_group_string_group_string_n6 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string_n6; -drop table table_change_string_group_string_group_string; +drop table table_change_string_group_string_group_string_n6; @@ -253,40 +253,40 @@ drop table table_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6 SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n18; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint VALUES (111, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6 VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, 1234.5678, 9876.543, 789.321, 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6; -drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6; @@ -295,23 +295,23 @@ drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float_n6(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n6 SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n18; -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table table_change_lower_to_higher_numeric_group_decimal_to_float_n6 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n6 VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n6; -drop table table_change_lower_to_higher_numeric_group_decimal_to_float; +drop table table_change_lower_to_higher_numeric_group_decimal_to_float_n6; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_llap_io.q index 71ab2e529a..5e617e37ad 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_llap_io.q @@ -23,9 +23,9 @@ set hive.llap.io.encode.enabled=true; -- Instead just one explain vectorization only detail -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n4(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n4; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -34,48 +34,48 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table_add_int_permute_select_n0(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_permute_select_n0 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n4; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_permute_select add columns(c int); +alter table table_add_int_permute_select_n0 add columns(c int); -insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000); +insert into table table_add_int_permute_select_n0 VALUES (111, 80000, 'new', 80000); explain vectorization only detail -select insert_num,a,b,c from table_add_int_permute_select; +select insert_num,a,b,c from table_add_int_permute_select_n0; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_permute_select; -select insert_num,a,b,c from table_add_int_permute_select; -select insert_num,c from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n0; +select insert_num,a,b,c from table_add_int_permute_select_n0; +select insert_num,c from table_add_int_permute_select_n0; -drop table table_add_int_permute_select; +drop table table_add_int_permute_select_n0; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table_add_int_string_permute_select_n0(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_string_permute_select_n0 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n4; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_string_permute_select add columns(c int, d string); +alter table table_add_int_string_permute_select_n0 add columns(c int, d string); -insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler'); +insert into table table_add_int_string_permute_select_n0 VALUES (111, 80000, 'new', 80000, 'filler'); -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_string_permute_select; -select insert_num,a,b,c from table_add_int_string_permute_select; -select insert_num,a,b,c,d from table_add_int_string_permute_select; -select insert_num,a,c,d from table_add_int_string_permute_select; -select insert_num,a,d from table_add_int_string_permute_select; -select insert_num,c from table_add_int_string_permute_select; -select insert_num,d from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n0; +select insert_num,a,b,c from table_add_int_string_permute_select_n0; +select insert_num,a,b,c,d from table_add_int_string_permute_select_n0; +select insert_num,a,c,d from table_add_int_string_permute_select_n0; +select insert_num,a,d from table_add_int_string_permute_select_n0; +select insert_num,c from table_add_int_string_permute_select_n0; +select insert_num,d from table_add_int_string_permute_select_n0; -drop table table_add_int_string_permute_select; +drop table table_add_int_string_permute_select_n0; @@ -86,18 +86,18 @@ drop table table_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE table_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table_change_string_group_double_n0(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table table_change_string_group_double_n0 SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table table_change_string_group_double_n0 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new'); +insert into table table_change_string_group_double_n0 VALUES (111, 789.321, 789.321, 789.321, 'new'); -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n0; -drop table table_change_string_group_double; +drop table table_change_string_group_double_n0; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -106,18 +106,18 @@ drop table table_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE table_change_date_group_string_group_date_group(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table_change_date_group_string_group_date_group_n0(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table table_change_date_group_string_group_date_group_n0 SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_date_group_string_group_date_group replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table table_change_date_group_string_group_date_group_n0 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table table_change_date_group_string_group_date_group VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table table_change_date_group_string_group_date_group_n0 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n0; -drop table table_change_date_group_string_group_date_group; +drop table table_change_date_group_string_group_date_group_n0; @@ -132,36 +132,36 @@ drop table table_change_date_group_string_group_date_group; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group_n0(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_numeric_group_string_group_multi_ints_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n0 SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n4; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n0; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_multi_ints_string_group_n0 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table table_change_numeric_group_string_group_multi_ints_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n0 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n0; -drop table table_change_numeric_group_string_group_multi_ints_string_group; +drop table table_change_numeric_group_string_group_multi_ints_string_group_n0; @@ -172,36 +172,36 @@ drop table table_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n0(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n0 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n4; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n0; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_floating_string_group_n0 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_floating_string_group_n0 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n0; -drop table table_change_numeric_group_string_group_floating_string_group; +drop table table_change_numeric_group_string_group_floating_string_group_n0; ------------------------------------------------------------------------------------------ @@ -212,34 +212,34 @@ drop table table_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE table_change_string_group_string_group_string(insert_num int, +CREATE TABLE table_change_string_group_string_group_string_n0(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_string_group_string_group_string SELECT insert_num, +insert into table table_change_string_group_string_group_string_n0 SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n4; -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n0; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_string_group_string replace columns (insert_num int, +alter table table_change_string_group_string_group_string_n0 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table table_change_string_group_string_group_string VALUES (111, +insert into table table_change_string_group_string_group_string_n0 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string_n0; -drop table table_change_string_group_string_group_string; +drop table table_change_string_group_string_group_string_n0; @@ -254,40 +254,40 @@ drop table table_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0 SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n4; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint VALUES (111, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0 VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, 1234.5678, 9876.543, 789.321, 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0; -drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0; @@ -296,23 +296,23 @@ drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float_n0(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n0 SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n4; -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n0; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table table_change_lower_to_higher_numeric_group_decimal_to_float_n0 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n0 VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n0; -drop table table_change_lower_to_higher_numeric_group_decimal_to_float; +drop table table_change_lower_to_higher_numeric_group_decimal_to_float_n0; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update.q index 35c758a757..0165f10037 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update.q @@ -21,92 +21,92 @@ set hive.llap.io.enabled=false; -- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences... -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n20(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n20; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n5(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n5; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... UPDATE New Columns --- -CREATE TABLE table5(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table5_n2(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table5 SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table5_n2 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n20; -- Table-Non-Cascade ADD COLUMNS ... -alter table table5 add columns(c int, d string); +alter table table5_n2 add columns(c int, d string); -insert into table table5 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2; +insert into table table5_n2 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n5; -select a,b,c,d from table5; +select a,b,c,d from table5_n2; -- UPDATE New Columns -update table5 set c=99; +update table5_n2 set c=99; -select a,b,c,d from table5; +select a,b,c,d from table5_n2; -alter table table5 compact 'major'; +alter table table5_n2 compact 'major'; -select a,b,c,d from table5; +select a,b,c,d from table5_n2; -DROP TABLE table5; +DROP TABLE table5_n2; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where old column --- -CREATE TABLE table6(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table6_n1(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table6 SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table6_n1 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n20; -- Table-Non-Cascade ADD COLUMNS ... -alter table table6 add columns(c int, d string); +alter table table6_n1 add columns(c int, d string); -insert into table table6 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <= 110; +insert into table table6_n1 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n5 WHERE insert_num <= 110; -insert into table table6 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table table6_n1 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n5 WHERE insert_num > 110; -select a,b,c,d from table6; +select a,b,c,d from table6_n1; -- DELETE where old column -delete from table6 where insert_num = 102 or insert_num = 104 or insert_num = 106; +delete from table6_n1 where insert_num = 102 or insert_num = 104 or insert_num = 106; -select a,b,c,d from table6; +select a,b,c,d from table6_n1; -alter table table6 compact 'major'; +alter table table6_n1 compact 'major'; -select a,b,c,d from table6; +select a,b,c,d from table6_n1; -DROP TABLE table6; +DROP TABLE table6_n1; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where new column --- -CREATE TABLE table7(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table7_n1(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table7 SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table7_n1 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n20; -- Table-Non-Cascade ADD COLUMNS ... -alter table table7 add columns(c int, d string); +alter table table7_n1 add columns(c int, d string); -insert into table table7 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <= 110; +insert into table table7_n1 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n5 WHERE insert_num <= 110; -insert into table table7 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table table7_n1 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n5 WHERE insert_num > 110; -select a,b,c,d from table7; +select a,b,c,d from table7_n1; -- DELETE where new column -delete from table7 where insert_num = 107 or insert_num >= 110; +delete from table7_n1 where insert_num = 107 or insert_num >= 110; -select a,b,c,d from table7; +select a,b,c,d from table7_n1; -alter table table7 compact 'major'; +alter table table7_n1 compact 'major'; -select a,b,c,d from table7; +select a,b,c,d from table7_n1; -DROP TABLE table7; +DROP TABLE table7_n1; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update_llap_io.q index b72ded6bd0..0ed3754ae5 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update_llap_io.q @@ -22,92 +22,92 @@ set hive.llap.io.encode.enabled=true; -- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences... -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n12(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n12; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n4(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n4; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... UPDATE New Columns --- -CREATE TABLE table5(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table5_n0(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table5 SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table5_n0 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n12; -- Table-Non-Cascade ADD COLUMNS ... -alter table table5 add columns(c int, d string); +alter table table5_n0 add columns(c int, d string); -insert into table table5 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2; +insert into table table5_n0 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n4; -select a,b,c,d from table5; +select a,b,c,d from table5_n0; -- UPDATE New Columns -update table5 set c=99; +update table5_n0 set c=99; -select a,b,c,d from table5; +select a,b,c,d from table5_n0; -alter table table5 compact 'major'; +alter table table5_n0 compact 'major'; -select a,b,c,d from table5; +select a,b,c,d from table5_n0; -DROP TABLE table5; +DROP TABLE table5_n0; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where old column --- -CREATE TABLE table6(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table6_n0(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table6 SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table6_n0 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n12; -- Table-Non-Cascade ADD COLUMNS ... -alter table table6 add columns(c int, d string); +alter table table6_n0 add columns(c int, d string); -insert into table table6 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <= 110; +insert into table table6_n0 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n4 WHERE insert_num <= 110; -insert into table table6 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table table6_n0 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n4 WHERE insert_num > 110; -select a,b,c,d from table6; +select a,b,c,d from table6_n0; -- DELETE where old column -delete from table6 where insert_num = 102 or insert_num = 104 or insert_num = 106; +delete from table6_n0 where insert_num = 102 or insert_num = 104 or insert_num = 106; -select a,b,c,d from table6; +select a,b,c,d from table6_n0; -alter table table6 compact 'major'; +alter table table6_n0 compact 'major'; -select a,b,c,d from table6; +select a,b,c,d from table6_n0; -DROP TABLE table6; +DROP TABLE table6_n0; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where new column --- -CREATE TABLE table7(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table7_n0(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table7 SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table7_n0 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n12; -- Table-Non-Cascade ADD COLUMNS ... -alter table table7 add columns(c int, d string); +alter table table7_n0 add columns(c int, d string); -insert into table table7 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <= 110; +insert into table table7_n0 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n4 WHERE insert_num <= 110; -insert into table table7 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table table7_n0 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n4 WHERE insert_num > 110; -select a,b,c,d from table7; +select a,b,c,d from table7_n0; -- DELETE where new column -delete from table7 where insert_num = 107 or insert_num >= 110; +delete from table7_n0 where insert_num = 107 or insert_num >= 110; -select a,b,c,d from table7; +select a,b,c,d from table7_n0; -alter table table7 compact 'major'; +alter table table7_n0 compact 'major'; -select a,b,c,d from table7; +select a,b,c,d from table7_n0; -DROP TABLE table7; +DROP TABLE table7_n0; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part.q index d84f47674e..a32f5c88de 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part.q @@ -24,9 +24,9 @@ set hive.llap.io.enabled=false; -- Instead explain vectorization only detail -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n14(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n14; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -35,51 +35,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_add_int_permute_select_n3(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_permute_select_n3 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_permute_select add columns(c int); +alter table part_add_int_permute_select_n3 add columns(c int); -insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333); +insert into table part_add_int_permute_select_n3 partition(part=1) VALUES (2, 2222, 'new', 3333); explain vectorization only detail -select insert_num,part,a,b,c from part_add_int_permute_select; +select insert_num,part,a,b,c from part_add_int_permute_select_n3; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_permute_select; -select insert_num,part,a,b,c from part_add_int_permute_select; -select insert_num,part,c from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n3; +select insert_num,part,a,b,c from part_add_int_permute_select_n3; +select insert_num,part,c from part_add_int_permute_select_n3; -drop table part_add_int_permute_select; +drop table part_add_int_permute_select_n3; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_add_int_string_permute_select_n3(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_string_permute_select_n3 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_string_permute_select add columns(c int, d string); +alter table part_add_int_string_permute_select_n3 add columns(c int, d string); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); +insert into table part_add_int_string_permute_select_n3 partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); explain vectorization only detail -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n3; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_string_permute_select; -select insert_num,part,a,b,c from part_add_int_string_permute_select; -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; -select insert_num,part,a,c,d from part_add_int_string_permute_select; -select insert_num,part,a,d from part_add_int_string_permute_select; -select insert_num,part,c from part_add_int_string_permute_select; -select insert_num,part,d from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n3; +select insert_num,part,a,b,c from part_add_int_string_permute_select_n3; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n3; +select insert_num,part,a,c,d from part_add_int_string_permute_select_n3; +select insert_num,part,a,d from part_add_int_string_permute_select_n3; +select insert_num,part,c from part_add_int_string_permute_select_n3; +select insert_num,part,d from part_add_int_string_permute_select_n3; -drop table part_add_int_string_permute_select; +drop table part_add_int_string_permute_select_n3; @@ -90,21 +90,21 @@ drop table part_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_change_string_group_double_n3(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table part_change_string_group_double_n3 partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n14; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table part_change_string_group_double_n3 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111; +insert into table part_change_string_group_double_n3 partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n14 WHERE insert_num = 111; explain vectorization only detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n3; -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n3; -drop table part_change_string_group_double; +drop table part_change_string_group_double_n3; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -113,21 +113,21 @@ drop table part_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_change_date_group_string_group_date_timestamp_n3(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_date_group_string_group_date_timestamp_n3 partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n14; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table part_change_date_group_string_group_date_timestamp_n3 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table part_change_date_group_string_group_date_timestamp_n3 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n3; -drop table part_change_date_group_string_group_date_timestamp; +drop table part_change_date_group_string_group_date_timestamp_n3; @@ -143,42 +143,42 @@ drop table part_change_date_group_string_group_date_timestamp; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group_n3(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n3 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n14; explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_multi_ints_string_group_n3 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n3 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n3; -drop table part_change_numeric_group_string_group_multi_ints_string_group; +drop table part_change_numeric_group_string_group_multi_ints_string_group_n3; @@ -189,42 +189,42 @@ drop table part_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_floating_string_group_n3(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_floating_string_group_n3 partition(part=1) SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n14; explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_floating_string_group_n3 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_floating_string_group_n3 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n3; -drop table part_change_numeric_group_string_group_floating_string_group; +drop table part_change_numeric_group_string_group_floating_string_group_n3; @@ -236,40 +236,40 @@ drop table part_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE part_change_string_group_string_group_string(insert_num int, +CREATE TABLE part_change_string_group_string_group_string_n3(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, +insert into table part_change_string_group_string_group_string_n3 partition(part=1) SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n14; explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n3; -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_string_group_string replace columns (insert_num int, +alter table part_change_string_group_string_group_string_n3 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, +insert into table part_change_string_group_string_group_string_n3 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n3; -drop table part_change_string_group_string_group_string; +drop table part_change_string_group_string_group_string_n3; ------------------------------------------------------------------------------------------ @@ -283,34 +283,34 @@ drop table part_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3 partition(part=1) SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n14; explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3 partition(part=1) VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, @@ -318,11 +318,11 @@ insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint pa 'new'); explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3; -drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3; @@ -331,29 +331,29 @@ drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float_n3(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n3 partition(part=1) SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n14; explain vectorization only detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n3; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table part_change_lower_to_higher_numeric_group_decimal_to_float_n3 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n3 partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); explain vectorization only detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n3; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n3; -drop table part_change_lower_to_higher_numeric_group_decimal_to_float; \ No newline at end of file +drop table part_change_lower_to_higher_numeric_group_decimal_to_float_n3; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_llap_io.q index a632855b87..7e08e76300 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_llap_io.q @@ -25,9 +25,9 @@ set hive.llap.io.encode.enabled=true; -- Instead explain vectorization only detail -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n23(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n23; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -36,51 +36,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_add_int_permute_select_n6(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_permute_select_n6 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_permute_select add columns(c int); +alter table part_add_int_permute_select_n6 add columns(c int); -insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333); +insert into table part_add_int_permute_select_n6 partition(part=1) VALUES (2, 2222, 'new', 3333); explain vectorization only detail -select insert_num,part,a,b,c from part_add_int_permute_select; +select insert_num,part,a,b,c from part_add_int_permute_select_n6; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_permute_select; -select insert_num,part,a,b,c from part_add_int_permute_select; -select insert_num,part,c from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n6; +select insert_num,part,a,b,c from part_add_int_permute_select_n6; +select insert_num,part,c from part_add_int_permute_select_n6; -drop table part_add_int_permute_select; +drop table part_add_int_permute_select_n6; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_add_int_string_permute_select_n6(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_string_permute_select_n6 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_string_permute_select add columns(c int, d string); +alter table part_add_int_string_permute_select_n6 add columns(c int, d string); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); +insert into table part_add_int_string_permute_select_n6 partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); explain vectorization only detail -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n6; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_string_permute_select; -select insert_num,part,a,b,c from part_add_int_string_permute_select; -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; -select insert_num,part,a,c,d from part_add_int_string_permute_select; -select insert_num,part,a,d from part_add_int_string_permute_select; -select insert_num,part,c from part_add_int_string_permute_select; -select insert_num,part,d from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n6; +select insert_num,part,a,b,c from part_add_int_string_permute_select_n6; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n6; +select insert_num,part,a,c,d from part_add_int_string_permute_select_n6; +select insert_num,part,a,d from part_add_int_string_permute_select_n6; +select insert_num,part,c from part_add_int_string_permute_select_n6; +select insert_num,part,d from part_add_int_string_permute_select_n6; -drop table part_add_int_string_permute_select; +drop table part_add_int_string_permute_select_n6; @@ -91,21 +91,21 @@ drop table part_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_change_string_group_double_n6(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table part_change_string_group_double_n6 partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n23; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table part_change_string_group_double_n6 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111; +insert into table part_change_string_group_double_n6 partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n23 WHERE insert_num = 111; explain vectorization only detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n6; -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n6; -drop table part_change_string_group_double; +drop table part_change_string_group_double_n6; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -114,21 +114,21 @@ drop table part_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE part_change_date_group_string_group_date_timestamp_n6(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_date_group_string_group_date_timestamp_n6 partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n23; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table part_change_date_group_string_group_date_timestamp_n6 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table part_change_date_group_string_group_date_timestamp_n6 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n6; -drop table part_change_date_group_string_group_date_timestamp; +drop table part_change_date_group_string_group_date_timestamp_n6; @@ -144,42 +144,42 @@ drop table part_change_date_group_string_group_date_timestamp; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group_n6(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n6 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n23; explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_multi_ints_string_group_n6 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n6 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n6; -drop table part_change_numeric_group_string_group_multi_ints_string_group; +drop table part_change_numeric_group_string_group_multi_ints_string_group_n6; @@ -190,42 +190,42 @@ drop table part_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_floating_string_group_n6(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_floating_string_group_n6 partition(part=1) SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n23; explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_floating_string_group_n6 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_floating_string_group_n6 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n6; -drop table part_change_numeric_group_string_group_floating_string_group; +drop table part_change_numeric_group_string_group_floating_string_group_n6; @@ -237,40 +237,40 @@ drop table part_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE part_change_string_group_string_group_string(insert_num int, +CREATE TABLE part_change_string_group_string_group_string_n6(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, +insert into table part_change_string_group_string_group_string_n6 partition(part=1) SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n23; explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n6; -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_string_group_string replace columns (insert_num int, +alter table part_change_string_group_string_group_string_n6 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, +insert into table part_change_string_group_string_group_string_n6 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n6; -drop table part_change_string_group_string_group_string; +drop table part_change_string_group_string_group_string_n6; ------------------------------------------------------------------------------------------ @@ -284,34 +284,34 @@ drop table part_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6 partition(part=1) SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n23; explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6 partition(part=1) VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, @@ -319,11 +319,11 @@ insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint pa 'new'); explain vectorization only detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6; -drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n6; @@ -332,29 +332,29 @@ drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float_n6(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n6 partition(part=1) SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n23; explain vectorization only detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n6; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table part_change_lower_to_higher_numeric_group_decimal_to_float_n6 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n6 partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); explain vectorization only detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n6; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n6; -drop table part_change_lower_to_higher_numeric_group_decimal_to_float; \ No newline at end of file +drop table part_change_lower_to_higher_numeric_group_decimal_to_float_n6; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update.q index 760deb4725..6cbb1c1dad 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update.q @@ -23,97 +23,97 @@ set hive.llap.io.enabled=false; -- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences... -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n38(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n38; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n12(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n12; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... UPDATE New Columns --- -CREATE TABLE partitioned_update_1(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE partitioned_update_1_n2(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table partitioned_update_1 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table partitioned_update_1_n2 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data_n38; -- Table-Non-Cascade ADD COLUMNS ... -alter table partitioned_update_1 add columns(c int, d string); +alter table partitioned_update_1_n2 add columns(c int, d string); -insert into table partitioned_update_1 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <=110; +insert into table partitioned_update_1_n2 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n12 WHERE insert_num <=110; -insert into table partitioned_update_1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table partitioned_update_1_n2 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n12 WHERE insert_num > 110; -select insert_num,part,a,b,c,d from partitioned_update_1; +select insert_num,part,a,b,c,d from partitioned_update_1_n2; -- UPDATE New Columns -update partitioned_update_1 set c=99; +update partitioned_update_1_n2 set c=99; -select insert_num,part,a,b,c,d from partitioned_update_1; +select insert_num,part,a,b,c,d from partitioned_update_1_n2; -alter table partitioned_update_1 partition(part=1) compact 'major'; -alter table partitioned_update_1 partition(part=2) compact 'major'; +alter table partitioned_update_1_n2 partition(part=1) compact 'major'; +alter table partitioned_update_1_n2 partition(part=2) compact 'major'; -select insert_num,part,a,b,c,d from partitioned_update_1; +select insert_num,part,a,b,c,d from partitioned_update_1_n2; -DROP TABLE partitioned_update_1; +DROP TABLE partitioned_update_1_n2; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where old column --- -CREATE TABLE partitioned_delete_1(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE partitioned_delete_1_n2(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table partitioned_delete_1 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table partitioned_delete_1_n2 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data_n38; -- Table-Non-Cascade ADD COLUMNS ... -alter table partitioned_delete_1 add columns(c int, d string); +alter table partitioned_delete_1_n2 add columns(c int, d string); -insert into table partitioned_delete_1 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <=110; +insert into table partitioned_delete_1_n2 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n12 WHERE insert_num <=110; -insert into table partitioned_delete_1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table partitioned_delete_1_n2 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n12 WHERE insert_num > 110; -select part,a,b,c,d from partitioned_delete_1; +select part,a,b,c,d from partitioned_delete_1_n2; -- DELETE where old column -delete from partitioned_delete_1 where insert_num = 102 or insert_num = 104 or insert_num = 106; +delete from partitioned_delete_1_n2 where insert_num = 102 or insert_num = 104 or insert_num = 106; -select insert_num,part,a,b,c,d from partitioned_delete_1; +select insert_num,part,a,b,c,d from partitioned_delete_1_n2; -alter table partitioned_delete_1 partition(part=1) compact 'major'; -alter table partitioned_delete_1 partition(part=2) compact 'major'; +alter table partitioned_delete_1_n2 partition(part=1) compact 'major'; +alter table partitioned_delete_1_n2 partition(part=2) compact 'major'; -select insert_num,part,a,b,c,d from partitioned_delete_1; +select insert_num,part,a,b,c,d from partitioned_delete_1_n2; -DROP TABLE partitioned_delete_1; +DROP TABLE partitioned_delete_1_n2; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where new column --- -CREATE TABLE partitioned_delete_2(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE partitioned_delete_2_n2(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table partitioned_delete_2 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table partitioned_delete_2_n2 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data_n38; -- Table-Non-Cascade ADD COLUMNS ... -alter table partitioned_delete_2 add columns(c int, d string); +alter table partitioned_delete_2_n2 add columns(c int, d string); -insert into table partitioned_delete_2 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <=110; +insert into table partitioned_delete_2_n2 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n12 WHERE insert_num <=110; -insert into table partitioned_delete_2 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table partitioned_delete_2_n2 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n12 WHERE insert_num > 110; -select insert_num,part,a,b,c,d from partitioned_delete_2; +select insert_num,part,a,b,c,d from partitioned_delete_2_n2; -- DELETE where new column -delete from partitioned_delete_2 where insert_num = 108 or insert_num > 113; +delete from partitioned_delete_2_n2 where insert_num = 108 or insert_num > 113; -select insert_num,part,a,b,c,d from partitioned_delete_2; +select insert_num,part,a,b,c,d from partitioned_delete_2_n2; -alter table partitioned_delete_2 partition(part=1) compact 'major'; -alter table partitioned_delete_2 partition(part=2) compact 'major'; +alter table partitioned_delete_2_n2 partition(part=1) compact 'major'; +alter table partitioned_delete_2_n2 partition(part=2) compact 'major'; -select insert_num,part,a,b,c,d from partitioned_delete_2; +select insert_num,part,a,b,c,d from partitioned_delete_2_n2; -DROP TABLE partitioned_delete_2; +DROP TABLE partitioned_delete_2_n2; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update_llap_io.q index 3ece4773d6..9c8bdda7ee 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update_llap_io.q @@ -24,97 +24,97 @@ set hive.llap.io.encode.enabled=true; -- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences... -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n28(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n28; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n8(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n8; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... UPDATE New Columns --- -CREATE TABLE partitioned_update_1(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE partitioned_update_1_n0(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table partitioned_update_1 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table partitioned_update_1_n0 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data_n28; -- Table-Non-Cascade ADD COLUMNS ... -alter table partitioned_update_1 add columns(c int, d string); +alter table partitioned_update_1_n0 add columns(c int, d string); -insert into table partitioned_update_1 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <=110; +insert into table partitioned_update_1_n0 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n8 WHERE insert_num <=110; -insert into table partitioned_update_1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table partitioned_update_1_n0 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n8 WHERE insert_num > 110; -select insert_num,part,a,b,c,d from partitioned_update_1; +select insert_num,part,a,b,c,d from partitioned_update_1_n0; -- UPDATE New Columns -update partitioned_update_1 set c=99; +update partitioned_update_1_n0 set c=99; -select insert_num,part,a,b,c,d from partitioned_update_1; +select insert_num,part,a,b,c,d from partitioned_update_1_n0; -alter table partitioned_update_1 partition(part=1) compact 'major'; -alter table partitioned_update_1 partition(part=2) compact 'major'; +alter table partitioned_update_1_n0 partition(part=1) compact 'major'; +alter table partitioned_update_1_n0 partition(part=2) compact 'major'; -select insert_num,part,a,b,c,d from partitioned_update_1; +select insert_num,part,a,b,c,d from partitioned_update_1_n0; -DROP TABLE partitioned_update_1; +DROP TABLE partitioned_update_1_n0; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where old column --- -CREATE TABLE partitioned_delete_1(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE partitioned_delete_1_n0(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table partitioned_delete_1 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table partitioned_delete_1_n0 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data_n28; -- Table-Non-Cascade ADD COLUMNS ... -alter table partitioned_delete_1 add columns(c int, d string); +alter table partitioned_delete_1_n0 add columns(c int, d string); -insert into table partitioned_delete_1 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <=110; +insert into table partitioned_delete_1_n0 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n8 WHERE insert_num <=110; -insert into table partitioned_delete_1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table partitioned_delete_1_n0 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n8 WHERE insert_num > 110; -select part,a,b,c,d from partitioned_delete_1; +select part,a,b,c,d from partitioned_delete_1_n0; -- DELETE where old column -delete from partitioned_delete_1 where insert_num = 102 or insert_num = 104 or insert_num = 106; +delete from partitioned_delete_1_n0 where insert_num = 102 or insert_num = 104 or insert_num = 106; -select insert_num,part,a,b,c,d from partitioned_delete_1; +select insert_num,part,a,b,c,d from partitioned_delete_1_n0; -alter table partitioned_delete_1 partition(part=1) compact 'major'; -alter table partitioned_delete_1 partition(part=2) compact 'major'; +alter table partitioned_delete_1_n0 partition(part=1) compact 'major'; +alter table partitioned_delete_1_n0 partition(part=2) compact 'major'; -select insert_num,part,a,b,c,d from partitioned_delete_1; +select insert_num,part,a,b,c,d from partitioned_delete_1_n0; -DROP TABLE partitioned_delete_1; +DROP TABLE partitioned_delete_1_n0; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where new column --- -CREATE TABLE partitioned_delete_2(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE partitioned_delete_2_n0(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table partitioned_delete_2 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table partitioned_delete_2_n0 partition(part=1) SELECT insert_num, int1, 'original' FROM schema_evolution_data_n28; -- Table-Non-Cascade ADD COLUMNS ... -alter table partitioned_delete_2 add columns(c int, d string); +alter table partitioned_delete_2_n0 add columns(c int, d string); -insert into table partitioned_delete_2 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <=110; +insert into table partitioned_delete_2_n0 partition(part=2) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n8 WHERE insert_num <=110; -insert into table partitioned_delete_2 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table partitioned_delete_2_n0 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n8 WHERE insert_num > 110; -select insert_num,part,a,b,c,d from partitioned_delete_2; +select insert_num,part,a,b,c,d from partitioned_delete_2_n0; -- DELETE where new column -delete from partitioned_delete_2 where insert_num = 108 or insert_num > 113; +delete from partitioned_delete_2_n0 where insert_num = 108 or insert_num > 113; -select insert_num,part,a,b,c,d from partitioned_delete_2; +select insert_num,part,a,b,c,d from partitioned_delete_2_n0; -alter table partitioned_delete_2 partition(part=1) compact 'major'; -alter table partitioned_delete_2 partition(part=2) compact 'major'; +alter table partitioned_delete_2_n0 partition(part=1) compact 'major'; +alter table partitioned_delete_2_n0 partition(part=2) compact 'major'; -select insert_num,part,a,b,c,d from partitioned_delete_2; +select insert_num,part,a,b,c,d from partitioned_delete_2_n0; -DROP TABLE partitioned_delete_2; +DROP TABLE partitioned_delete_2_n0; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table.q index e26a329915..476318b2a1 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table.q @@ -22,9 +22,9 @@ set hive.llap.io.enabled=false; -- Instead explain vectorization only detail -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n1(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n1; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -35,7 +35,7 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data_n1; -- Table-Non-Cascade ADD COLUMNS ... alter table table_add_int_permute_select add columns(c int); @@ -58,7 +58,7 @@ drop table table_add_int_permute_select; -- CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data_n1; -- Table-Non-Cascade ADD COLUMNS ... alter table table_add_int_string_permute_select add columns(c int, d string); @@ -90,7 +90,7 @@ drop table table_add_int_string_permute_select; -- CREATE TABLE table_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n1; -- Table-Non-Cascade CHANGE COLUMNS ... alter table table_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); @@ -113,7 +113,7 @@ drop table table_change_string_group_double; -- CREATE TABLE table_change_date_group_string_group_date_group(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n1; -- Table-Non-Cascade CHANGE COLUMNS ... alter table table_change_date_group_string_group_date_group replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); @@ -150,7 +150,7 @@ insert into table table_change_numeric_group_string_group_multi_ints_string_grou tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n1; explain vectorization only detail select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; @@ -196,7 +196,7 @@ insert into table table_change_numeric_group_string_group_floating_string_group decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n1; explain vectorization only detail select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; @@ -241,7 +241,7 @@ insert into table table_change_string_group_string_group_string SELECT insert_nu string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n1; explain vectorization only detail select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; @@ -292,7 +292,7 @@ insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint S smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n1; explain vectorization only detail select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; @@ -336,7 +336,7 @@ CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float(insert_ insert into table table_change_lower_to_higher_numeric_group_decimal_to_float SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n1; explain vectorization only detail select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_llap_io.q index 7ca54599fb..ebc9664d27 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_llap_io.q @@ -23,9 +23,9 @@ set hive.llap.io.encode.enabled=true; -- Instead explain vectorization only detail -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n39(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n39; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -34,51 +34,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table_add_int_permute_select_n12(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_permute_select_n12 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n39; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_permute_select add columns(c int); +alter table table_add_int_permute_select_n12 add columns(c int); -insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000); +insert into table table_add_int_permute_select_n12 VALUES (111, 80000, 'new', 80000); explain vectorization only detail -select insert_num,a,b,c from table_add_int_permute_select; +select insert_num,a,b,c from table_add_int_permute_select_n12; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_permute_select; -select insert_num,a,b,c from table_add_int_permute_select; -select insert_num,c from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n12; +select insert_num,a,b,c from table_add_int_permute_select_n12; +select insert_num,c from table_add_int_permute_select_n12; -drop table table_add_int_permute_select; +drop table table_add_int_permute_select_n12; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table_add_int_string_permute_select_n12(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_string_permute_select_n12 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n39; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_string_permute_select add columns(c int, d string); +alter table table_add_int_string_permute_select_n12 add columns(c int, d string); -insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler'); +insert into table table_add_int_string_permute_select_n12 VALUES (111, 80000, 'new', 80000, 'filler'); explain vectorization only detail -select insert_num,a,b,c,d from table_add_int_string_permute_select; +select insert_num,a,b,c,d from table_add_int_string_permute_select_n12; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_string_permute_select; -select insert_num,a,b,c from table_add_int_string_permute_select; -select insert_num,a,b,c,d from table_add_int_string_permute_select; -select insert_num,a,c,d from table_add_int_string_permute_select; -select insert_num,a,d from table_add_int_string_permute_select; -select insert_num,c from table_add_int_string_permute_select; -select insert_num,d from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n12; +select insert_num,a,b,c from table_add_int_string_permute_select_n12; +select insert_num,a,b,c,d from table_add_int_string_permute_select_n12; +select insert_num,a,c,d from table_add_int_string_permute_select_n12; +select insert_num,a,d from table_add_int_string_permute_select_n12; +select insert_num,c from table_add_int_string_permute_select_n12; +select insert_num,d from table_add_int_string_permute_select_n12; -drop table table_add_int_string_permute_select; +drop table table_add_int_string_permute_select_n12; @@ -89,21 +89,21 @@ drop table table_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE table_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table_change_string_group_double_n12(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table table_change_string_group_double_n12 SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n39; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table table_change_string_group_double_n12 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new'); +insert into table table_change_string_group_double_n12 VALUES (111, 789.321, 789.321, 789.321, 'new'); explain vectorization only detail -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n12; -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n12; -drop table table_change_string_group_double; +drop table table_change_string_group_double_n12; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -112,21 +112,21 @@ drop table table_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE table_change_date_group_string_group_date_group(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table_change_date_group_string_group_date_group_n12(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table table_change_date_group_string_group_date_group_n12 SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n39; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_date_group_string_group_date_group replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table table_change_date_group_string_group_date_group_n12 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table table_change_date_group_string_group_date_group VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table table_change_date_group_string_group_date_group_n12 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization only detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n12; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n12; -drop table table_change_date_group_string_group_date_group; +drop table table_change_date_group_string_group_date_group_n12; @@ -141,42 +141,42 @@ drop table table_change_date_group_string_group_date_group; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group_n12(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_numeric_group_string_group_multi_ints_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n12 SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n39; explain vectorization only detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n12; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n12; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_multi_ints_string_group_n12 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table table_change_numeric_group_string_group_multi_ints_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n12 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization only detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n12; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n12; -drop table table_change_numeric_group_string_group_multi_ints_string_group; +drop table table_change_numeric_group_string_group_multi_ints_string_group_n12; @@ -187,42 +187,42 @@ drop table table_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n12(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n12 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n39; explain vectorization only detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n12; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n12; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_floating_string_group_n12 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_floating_string_group_n12 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization only detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n12; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n12; -drop table table_change_numeric_group_string_group_floating_string_group; +drop table table_change_numeric_group_string_group_floating_string_group_n12; ------------------------------------------------------------------------------------------ @@ -233,40 +233,40 @@ drop table table_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE table_change_string_group_string_group_string(insert_num int, +CREATE TABLE table_change_string_group_string_group_string_n12(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_string_group_string_group_string SELECT insert_num, +insert into table table_change_string_group_string_group_string_n12 SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n39; explain vectorization only detail -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n12; -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n12; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_string_group_string replace columns (insert_num int, +alter table table_change_string_group_string_group_string_n12 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table table_change_string_group_string_group_string VALUES (111, +insert into table table_change_string_group_string_group_string_n12 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization only detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string_n12; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string_n12; -drop table table_change_string_group_string_group_string; +drop table table_change_string_group_string_group_string_n12; @@ -281,34 +281,34 @@ drop table table_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12 SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n39; explain vectorization only detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint VALUES (111, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12 VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, @@ -316,11 +316,11 @@ insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint V 'new'); explain vectorization only detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12; -drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12; @@ -329,29 +329,29 @@ drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float_n12(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n12 SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n39; explain vectorization only detail -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n12; -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n12; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table table_change_lower_to_higher_numeric_group_decimal_to_float_n12 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n12 VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); explain vectorization only detail -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n12; -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n12; -drop table table_change_lower_to_higher_numeric_group_decimal_to_float; +drop table table_change_lower_to_higher_numeric_group_decimal_to_float_n12; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update.q index 65e68a6726..22b84d048a 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update.q @@ -21,92 +21,92 @@ set hive.llap.io.enabled=false; -- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences... -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n21(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n21; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n6(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n6; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... UPDATE New Columns --- -CREATE TABLE table5(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table5_n3(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table5 SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table5_n3 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n21; -- Table-Non-Cascade ADD COLUMNS ... -alter table table5 add columns(c int, d string); +alter table table5_n3 add columns(c int, d string); -insert into table table5 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2; +insert into table table5_n3 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n6; -select a,b,c,d from table5; +select a,b,c,d from table5_n3; -- UPDATE New Columns -update table5 set c=99; +update table5_n3 set c=99; -select a,b,c,d from table5; +select a,b,c,d from table5_n3; -alter table table5 compact 'major'; +alter table table5_n3 compact 'major'; -select a,b,c,d from table5; +select a,b,c,d from table5_n3; -DROP TABLE table5; +DROP TABLE table5_n3; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where old column --- -CREATE TABLE table6(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table6_n2(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table6 SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table6_n2 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n21; -- Table-Non-Cascade ADD COLUMNS ... -alter table table6 add columns(c int, d string); +alter table table6_n2 add columns(c int, d string); -insert into table table6 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <= 110; +insert into table table6_n2 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n6 WHERE insert_num <= 110; -insert into table table6 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table table6_n2 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n6 WHERE insert_num > 110; -select a,b,c,d from table6; +select a,b,c,d from table6_n2; -- DELETE where old column -delete from table6 where insert_num = 102 or insert_num = 104 or insert_num = 106; +delete from table6_n2 where insert_num = 102 or insert_num = 104 or insert_num = 106; -select a,b,c,d from table6; +select a,b,c,d from table6_n2; -alter table table6 compact 'major'; +alter table table6_n2 compact 'major'; -select a,b,c,d from table6; +select a,b,c,d from table6_n2; -DROP TABLE table6; +DROP TABLE table6_n2; -- -- -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where new column --- -CREATE TABLE table7(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); +CREATE TABLE table7_n2(insert_num int, a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true'); -insert into table table7 SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table7_n2 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n21; -- Table-Non-Cascade ADD COLUMNS ... -alter table table7 add columns(c int, d string); +alter table table7_n2 add columns(c int, d string); -insert into table table7 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num <= 110; +insert into table table7_n2 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n6 WHERE insert_num <= 110; -insert into table table7 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2 WHERE insert_num > 110; +insert into table table7_n2 SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n6 WHERE insert_num > 110; -select a,b,c,d from table7; +select a,b,c,d from table7_n2; -- DELETE where new column -delete from table7 where insert_num = 107 or insert_num >= 110; +delete from table7_n2 where insert_num = 107 or insert_num >= 110; -select a,b,c,d from table7; +select a,b,c,d from table7_n2; -alter table table7 compact 'major'; +alter table table7_n2 compact 'major'; -select a,b,c,d from table7; +select a,b,c,d from table7_n2; -DROP TABLE table7; +DROP TABLE table7_n2; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part.q index c15792e7b9..e2a4f30af3 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part.q @@ -18,9 +18,9 @@ set hive.llap.io.enabled=false; -- FILE VARIATION: ORC, Non-Vectorized, MapWork, Partitioned -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n25(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n25; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -29,51 +29,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_permute_select_n7(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_permute_select_n7 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_permute_select add columns(c int); +alter table part_add_int_permute_select_n7 add columns(c int); -insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333); +insert into table part_add_int_permute_select_n7 partition(part=1) VALUES (2, 2222, 'new', 3333); explain vectorization detail -select insert_num,part,a,b from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n7; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_permute_select; -select insert_num,part,a,b,c from part_add_int_permute_select; -select insert_num,part,c from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n7; +select insert_num,part,a,b,c from part_add_int_permute_select_n7; +select insert_num,part,c from part_add_int_permute_select_n7; -drop table part_add_int_permute_select; +drop table part_add_int_permute_select_n7; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_string_permute_select_n7(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_string_permute_select_n7 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_string_permute_select add columns(c int, d string); +alter table part_add_int_string_permute_select_n7 add columns(c int, d string); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); +insert into table part_add_int_string_permute_select_n7 partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); explain vectorization detail -select insert_num,part,a,b from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n7; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_string_permute_select; -select insert_num,part,a,b,c from part_add_int_string_permute_select; -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; -select insert_num,part,a,c,d from part_add_int_string_permute_select; -select insert_num,part,a,d from part_add_int_string_permute_select; -select insert_num,part,c from part_add_int_string_permute_select; -select insert_num,part,d from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n7; +select insert_num,part,a,b,c from part_add_int_string_permute_select_n7; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n7; +select insert_num,part,a,c,d from part_add_int_string_permute_select_n7; +select insert_num,part,a,d from part_add_int_string_permute_select_n7; +select insert_num,part,c from part_add_int_string_permute_select_n7; +select insert_num,part,d from part_add_int_string_permute_select_n7; -drop table part_add_int_string_permute_select; +drop table part_add_int_string_permute_select_n7; @@ -84,21 +84,21 @@ drop table part_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_string_group_double_n7(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table part_change_string_group_double_n7 partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n25; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table part_change_string_group_double_n7 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111; +insert into table part_change_string_group_double_n7 partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n25 WHERE insert_num = 111; explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n7; -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n7; -drop table part_change_string_group_double; +drop table part_change_string_group_double_n7; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -107,21 +107,21 @@ drop table part_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_date_group_string_group_date_timestamp_n7(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_date_group_string_group_date_timestamp_n7 partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n25; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table part_change_date_group_string_group_date_timestamp_n7 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table part_change_date_group_string_group_date_timestamp_n7 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n7; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n7; -drop table part_change_date_group_string_group_date_timestamp; +drop table part_change_date_group_string_group_date_timestamp_n7; @@ -137,39 +137,39 @@ drop table part_change_date_group_string_group_date_timestamp; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group_n7(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n7 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n25; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_multi_ints_string_group_n7 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n7 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n7; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n7; -drop table part_change_numeric_group_string_group_multi_ints_string_group; +drop table part_change_numeric_group_string_group_multi_ints_string_group_n7; @@ -180,39 +180,39 @@ drop table part_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_floating_string_group_n7(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_floating_string_group_n7 partition(part=1) SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n25; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_floating_string_group_n7 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_floating_string_group_n7 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n7; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n7; -drop table part_change_numeric_group_string_group_floating_string_group; +drop table part_change_numeric_group_string_group_floating_string_group_n7; @@ -224,37 +224,37 @@ drop table part_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE part_change_string_group_string_group_string(insert_num int, +CREATE TABLE part_change_string_group_string_group_string_n7(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, +insert into table part_change_string_group_string_group_string_n7 partition(part=1) SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n25; -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_string_group_string replace columns (insert_num int, +alter table part_change_string_group_string_group_string_n7 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, +insert into table part_change_string_group_string_group_string_n7 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n7; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n7; -drop table part_change_string_group_string_group_string; +drop table part_change_string_group_string_group_string_n7; ------------------------------------------------------------------------------------------ @@ -268,31 +268,31 @@ drop table part_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7 partition(part=1) SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n25; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7 partition(part=1) VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, @@ -300,11 +300,11 @@ insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint pa 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7; -drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7; @@ -313,26 +313,26 @@ drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float_n7(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n7 partition(part=1) SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n25; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table part_change_lower_to_higher_numeric_group_decimal_to_float_n7 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n7 partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n7; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n7; -drop table part_change_lower_to_higher_numeric_group_decimal_to_float; \ No newline at end of file +drop table part_change_lower_to_higher_numeric_group_decimal_to_float_n7; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_complex.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_complex.q index 76e2f57ae9..c34a0627f5 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_complex.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_complex.q @@ -21,103 +21,103 @@ set hive.llap.io.enabled=false; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: STRUCT --> STRUCT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_struct1_n2(insert_num int, s1 STRUCT, b STRING) PARTITIONED BY(part INT); -CREATE TABLE complex_struct1_a_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_a_txt_n2(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt_n2; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_a_txt; +insert into table part_change_various_various_struct1_n2 partition(part=1) select * from complex_struct1_a_txt_n2; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_struct1 replace columns (insert_num int, s1 STRUCT, b STRING); +alter table part_change_various_various_struct1_n2 replace columns (insert_num int, s1 STRUCT, b STRING); -CREATE TABLE complex_struct1_b_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_b_txt_n2(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt_n2; -insert into table part_change_various_various_struct1 partition(part=2) select * from complex_struct1_b_txt; +insert into table part_change_various_various_struct1_n2 partition(part=2) select * from complex_struct1_b_txt_n2; -CREATE TABLE complex_struct1_c_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_c_txt_n2(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt_n2; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_c_txt; +insert into table part_change_various_various_struct1_n2 partition(part=1) select * from complex_struct1_c_txt_n2; explain vectorization detail -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n2; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n2; -drop table part_change_various_various_struct1; +drop table part_change_various_various_struct1_n2; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: STRUCT -- -CREATE TABLE part_add_various_various_struct2(insert_num int, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_various_various_struct2_n2(insert_num int, b STRING) PARTITIONED BY(part INT); -insert into table part_add_various_various_struct2 partition(part=1) +insert into table part_add_various_various_struct2_n2 partition(part=1) values(1, 'original'), (2, 'original'); -select insert_num,part,b from part_add_various_various_struct2; +select insert_num,part,b from part_add_various_various_struct2_n2; -- Table-Non-Cascade ADD COLUMN ... -alter table part_add_various_various_struct2 ADD columns (s2 STRUCT); +alter table part_add_various_various_struct2_n2 ADD columns (s2 STRUCT); -CREATE TABLE complex_struct2_a_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_a_txt_n2(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt_n2; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_a_txt; +insert into table part_add_various_various_struct2_n2 partition(part=1) select * from complex_struct2_a_txt_n2; -CREATE TABLE complex_struct2_b_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_b_txt_n2(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt_n2; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_b_txt; +insert into table part_add_various_various_struct2_n2 partition(part=2) select * from complex_struct2_b_txt_n2; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_various_various_struct2 REPLACE columns (insert_num int, b STRING, s2 STRUCT); +alter table part_add_various_various_struct2_n2 REPLACE columns (insert_num int, b STRING, s2 STRUCT); -CREATE TABLE complex_struct2_c_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_c_txt_n2(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt_n2; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_c_txt; +insert into table part_add_various_various_struct2_n2 partition(part=2) select * from complex_struct2_c_txt_n2; -CREATE TABLE complex_struct2_d_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_d_txt_n2(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt_n2; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_d_txt; +insert into table part_add_various_various_struct2_n2 partition(part=1) select * from complex_struct2_d_txt_n2; explain vectorization detail -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n2; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n2; -drop table part_add_various_various_struct2; +drop table part_add_various_various_struct2_n2; @@ -125,40 +125,40 @@ drop table part_add_various_various_struct2; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: ADD COLUMNS to STRUCT type as LAST column of 3 columns -- -CREATE TABLE part_add_to_various_various_struct4(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); +CREATE TABLE part_add_to_various_various_struct4_n2(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); -CREATE TABLE complex_struct4_a_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_a_txt_n2(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt_n2; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_a_txt; +insert into table part_add_to_various_various_struct4_n2 partition(part=1) select * from complex_struct4_a_txt_n2; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_to_various_various_struct4 replace columns (insert_num int, b STRING, s3 STRUCT); +alter table part_add_to_various_various_struct4_n2 replace columns (insert_num int, b STRING, s3 STRUCT); -CREATE TABLE complex_struct4_b_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_b_txt_n2(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt_n2; -insert into table part_add_to_various_various_struct4 partition(part=2) select * from complex_struct4_b_txt; +insert into table part_add_to_various_various_struct4_n2 partition(part=2) select * from complex_struct4_b_txt_n2; -CREATE TABLE complex_struct4_c_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_c_txt_n2(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt_n2; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_c_txt; +insert into table part_add_to_various_various_struct4_n2 partition(part=1) select * from complex_struct4_c_txt_n2; explain vectorization detail -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n2; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n2; -drop table part_add_to_various_various_struct4; +drop table part_add_to_various_various_struct4_n2; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_complex_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_complex_llap_io.q index a2800b521e..9b97aa958d 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_complex_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_complex_llap_io.q @@ -22,103 +22,103 @@ set hive.llap.io.encode.enabled=true; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: STRUCT --> STRUCT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_struct1_n5(insert_num int, s1 STRUCT, b STRING) PARTITIONED BY(part INT); -CREATE TABLE complex_struct1_a_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_a_txt_n5(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt_n5; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_a_txt; +insert into table part_change_various_various_struct1_n5 partition(part=1) select * from complex_struct1_a_txt_n5; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_struct1 replace columns (insert_num int, s1 STRUCT, b STRING); +alter table part_change_various_various_struct1_n5 replace columns (insert_num int, s1 STRUCT, b STRING); -CREATE TABLE complex_struct1_b_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_b_txt_n5(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt_n5; -insert into table part_change_various_various_struct1 partition(part=2) select * from complex_struct1_b_txt; +insert into table part_change_various_various_struct1_n5 partition(part=2) select * from complex_struct1_b_txt_n5; -CREATE TABLE complex_struct1_c_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_c_txt_n5(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt_n5; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_c_txt; +insert into table part_change_various_various_struct1_n5 partition(part=1) select * from complex_struct1_c_txt_n5; explain vectorization detail -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n5; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n5; -drop table part_change_various_various_struct1; +drop table part_change_various_various_struct1_n5; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: STRUCT -- -CREATE TABLE part_add_various_various_struct2(insert_num int, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_various_various_struct2_n5(insert_num int, b STRING) PARTITIONED BY(part INT); -insert into table part_add_various_various_struct2 partition(part=1) +insert into table part_add_various_various_struct2_n5 partition(part=1) values(1, 'original'), (2, 'original'); -select insert_num,part,b from part_add_various_various_struct2; +select insert_num,part,b from part_add_various_various_struct2_n5; -- Table-Non-Cascade ADD COLUMN ... -alter table part_add_various_various_struct2 ADD columns (s2 STRUCT); +alter table part_add_various_various_struct2_n5 ADD columns (s2 STRUCT); -CREATE TABLE complex_struct2_a_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_a_txt_n5(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt_n5; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_a_txt; +insert into table part_add_various_various_struct2_n5 partition(part=1) select * from complex_struct2_a_txt_n5; -CREATE TABLE complex_struct2_b_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_b_txt_n5(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt_n5; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_b_txt; +insert into table part_add_various_various_struct2_n5 partition(part=2) select * from complex_struct2_b_txt_n5; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_various_various_struct2 REPLACE columns (insert_num int, b STRING, s2 STRUCT); +alter table part_add_various_various_struct2_n5 REPLACE columns (insert_num int, b STRING, s2 STRUCT); -CREATE TABLE complex_struct2_c_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_c_txt_n5(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt_n5; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_c_txt; +insert into table part_add_various_various_struct2_n5 partition(part=2) select * from complex_struct2_c_txt_n5; -CREATE TABLE complex_struct2_d_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_d_txt_n5(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt_n5; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_d_txt; +insert into table part_add_various_various_struct2_n5 partition(part=1) select * from complex_struct2_d_txt_n5; explain vectorization detail -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n5; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n5; -drop table part_add_various_various_struct2; +drop table part_add_various_various_struct2_n5; @@ -126,40 +126,40 @@ drop table part_add_various_various_struct2; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: ADD COLUMNS to STRUCT type as LAST column of 3 columns -- -CREATE TABLE part_add_to_various_various_struct4(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); +CREATE TABLE part_add_to_various_various_struct4_n5(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); -CREATE TABLE complex_struct4_a_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_a_txt_n5(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt_n5; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_a_txt; +insert into table part_add_to_various_various_struct4_n5 partition(part=1) select * from complex_struct4_a_txt_n5; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_to_various_various_struct4 replace columns (insert_num int, b STRING, s3 STRUCT); +alter table part_add_to_various_various_struct4_n5 replace columns (insert_num int, b STRING, s3 STRUCT); -CREATE TABLE complex_struct4_b_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_b_txt_n5(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt_n5; -insert into table part_add_to_various_various_struct4 partition(part=2) select * from complex_struct4_b_txt; +insert into table part_add_to_various_various_struct4_n5 partition(part=2) select * from complex_struct4_b_txt_n5; -CREATE TABLE complex_struct4_c_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_c_txt_n5(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt_n5; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_c_txt; +insert into table part_add_to_various_various_struct4_n5 partition(part=1) select * from complex_struct4_c_txt_n5; explain vectorization detail -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n5; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n5; -drop table part_add_to_various_various_struct4; +drop table part_add_to_various_various_struct4_n5; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive.q index 336a9ad0d3..427734f970 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive.q @@ -22,13 +22,13 @@ set hive.llap.io.enabled=false; -- -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n41(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n41; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n14(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n14; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: @@ -38,7 +38,7 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data_ -- (BOOLEAN, TINYINT, SMALLINT, LONG, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> INT –2147483648 to 2147483647 and -- (BOOLEAN, TINYINT, SMALLINT, INT, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> BIGINT -9223372036854775808 to 9223372036854775807 -- -CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, +CREATE TABLE part_change_various_various_boolean_to_bigint_n6(insert_num int, c1 TINYINT, c2 SMALLINT, c3 INT, c4 BIGINT, c5 FLOAT, c6 DOUBLE, c7 DECIMAL(38,18), c8 STRING, c9 TIMESTAMP, c10 BOOLEAN, c11 SMALLINT, c12 INT, c13 BIGINT, c14 FLOAT, c15 DOUBLE, c16 DECIMAL(38,18), c17 STRING, c18 CHAR(25), c19 VARCHAR(25), c20 TIMESTAMP, c21 BOOLEAN, c22 TINYINT, c23 INT, c24 BIGINT, c25 FLOAT, c26 DOUBLE, c27 DECIMAL(38,18), c28 STRING, c29 CHAR(25), c30 VARCHAR(25), c31 TIMESTAMP, @@ -46,18 +46,18 @@ CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, c43 BOOLEAN, c44 TINYINT, c45 SMALLINT, c46 INT, c47 FLOAT, c48 DOUBLE, c49 DECIMAL(38,18), c50 STRING, c51 CHAR(25), c52 VARCHAR(25), c53 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n6 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, boolean_str, timestamp1, boolean1, smallint1, int1, bigint1, float1, double1, decimal1, tinyint_str, tinyint_str, tinyint_str, timestamp1, boolean1, tinyint1, int1, bigint1, float1, double1, decimal1, smallint_str, smallint_str, smallint_str, timestamp1, boolean1, tinyint1, smallint1, bigint1, float1, double1, decimal1, int_str, int_str, int_str, timestamp1, boolean1, tinyint1, smallint1, int1, float1, double1, decimal1, bigint_str, bigint_str, bigint_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n41; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_boolean_to_bigint replace columns (insert_num int, +alter table part_change_various_various_boolean_to_bigint_n6 replace columns (insert_num int, c1 BOOLEAN, c2 BOOLEAN, c3 BOOLEAN, c4 BOOLEAN, c5 BOOLEAN, c6 BOOLEAN, c7 BOOLEAN, c8 BOOLEAN, c9 BOOLEAN, c10 TINYINT, c11 TINYINT, c12 TINYINT, c13 TINYINT, c14 TINYINT, c15 TINYINT, c16 TINYINT, c17 TINYINT, c18 TINYINT, c19 TINYINT, c20 TINYINT, c21 SMALLINT, c22 SMALLINT, c23 SMALLINT, c24 SMALLINT, c25 SMALLINT, c26 SMALLINT, c27 SMALLINT, c28 SMALLINT, c29 SMALLINT, c30 SMALLINT, c31 SMALLINT, @@ -65,20 +65,20 @@ alter table part_change_various_various_boolean_to_bigint replace columns (inser c43 BIGINT, c44 BIGINT, c45 BIGINT, c46 BIGINT, c47 BIGINT, c48 BIGINT, c49 BIGINT, c50 BIGINT, c51 BIGINT, c52 BIGINT, c53 BIGINT, b STRING); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n6 partition(part=1) SELECT insert_num, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, - 'new' FROM schema_evolution_data; + 'new' FROM schema_evolution_data_n41; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n6; -drop table part_change_various_various_boolean_to_bigint; +drop table part_change_various_various_boolean_to_bigint_n6; @@ -88,39 +88,39 @@ drop table part_change_various_various_boolean_to_bigint; -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> FLOAT and -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> DOUBLE and -- -CREATE TABLE part_change_various_various_decimal_to_double(insert_num int, +CREATE TABLE part_change_various_various_decimal_to_double_n6(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 STRING, c9 CHAR(25), c10 VARCHAR(25), c11 TIMESTAMP, c12 BOOLEAN, c13 TINYINT, c14 SMALLINT, c15 INT, c16 BIGINT, c17 DECIMAL(38,18), c18 DOUBLE, c19 STRING, c20 CHAR(25), c21 VARCHAR(25), c22 TIMESTAMP, c23 BOOLEAN, c24 TINYINT, c25 SMALLINT, c26 INT, c27 BIGINT, c28 DECIMAL(38,18), c29 FLOAT, c30 STRING, c31 CHAR(25), c32 VARCHAR(25), c33 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n6 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal_str, decimal_str, decimal_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, double1, float_str, float_str, float_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, float1, double_str, double_str, double_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n41; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_decimal_to_double replace columns (insert_num int, +alter table part_change_various_various_decimal_to_double_n6 replace columns (insert_num int, c1 DECIMAL(38,18), c2 DECIMAL(38,18), c3 DECIMAL(38,18), c4 DECIMAL(38,18), c5 DECIMAL(38,18), c6 DECIMAL(38,18), c7 DECIMAL(38,18), c8 DECIMAL(38,18), c9 DECIMAL(38,18), c10 DECIMAL(38,18), c11 DECIMAL(38,18), c12 FLOAT, c13 FLOAT, c14 FLOAT, c15 FLOAT, c16 FLOAT, c17 FLOAT, c18 FLOAT, c19 FLOAT, c20 FLOAT, c21 FLOAT, c22 FLOAT, c23 DOUBLE, c24 DOUBLE, c25 DOUBLE, c26 DOUBLE, c27 DOUBLE, c28 DOUBLE, c29 DOUBLE, c30 DOUBLE, c31 DOUBLE, c32 DOUBLE, c33 DOUBLE, b STRING); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n6 partition(part=1) SELECT insert_num, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, - 'new' FROM schema_evolution_data_2 WHERE insert_num=111; + 'new' FROM schema_evolution_data_2_n14 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n6; -drop table part_change_various_various_decimal_to_double; +drop table part_change_various_various_decimal_to_double_n6; @@ -128,80 +128,80 @@ drop table part_change_various_various_decimal_to_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DOUBLE, DECIMAL(38,18), STRING, CHAR, VARCHAR, DATE) --> TIMESTAMP -- -CREATE TABLE part_change_various_various_timestamp(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_timestamp_n6(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_timestamp_n6 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data_n41; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_timestamp replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); +alter table part_change_various_various_timestamp_n6 replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_timestamp_n6 partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2_n14 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n6; -drop table part_change_various_various_timestamp; +drop table part_change_various_various_timestamp_n6; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (STRING, CHAR, VARCHAR, TIMESTAMP --> DATE -- -CREATE TABLE part_change_various_various_date(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_date_n6(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_date_n6 partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data_n41; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_date replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); +alter table part_change_various_various_date_n6 replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_date_n6 partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2_n14 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n6; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n6; -drop table part_change_various_various_date; +drop table part_change_various_various_date_n6; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Same Type (CHAR, VARCHAR, DECIMAL) --> Different maxLength or precision/scale -- -CREATE TABLE part_change_same_type_different_params(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_same_type_different_params_n6(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); -CREATE TABLE same_type1_a_txt(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) +CREATE TABLE same_type1_a_txt_n6(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt_n6; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_a_txt; +insert into table part_change_same_type_different_params_n6 partition(part=1) select * from same_type1_a_txt_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_same_type_different_params replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); +alter table part_change_same_type_different_params_n6 replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); -CREATE TABLE same_type1_b_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_b_txt_n6(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt_n6; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_b_txt; +insert into table part_change_same_type_different_params_n6 partition(part=1) select * from same_type1_b_txt_n6; -CREATE TABLE same_type1_c_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_c_txt_n6(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt_n6; -insert into table part_change_same_type_different_params partition(part=2) select * from same_type1_c_txt; +insert into table part_change_same_type_different_params_n6 partition(part=2) select * from same_type1_c_txt_n6; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n6; -drop table part_change_same_type_different_params; +drop table part_change_same_type_different_params_n6; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive_llap_io.q index b1b414d030..1eca9e302c 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive_llap_io.q @@ -23,13 +23,13 @@ set hive.llap.io.encode.enabled=true; -- -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n40(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n40; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n13(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n13; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: @@ -39,7 +39,7 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data_ -- (BOOLEAN, TINYINT, SMALLINT, LONG, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> INT –2147483648 to 2147483647 and -- (BOOLEAN, TINYINT, SMALLINT, INT, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> BIGINT -9223372036854775808 to 9223372036854775807 -- -CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, +CREATE TABLE part_change_various_various_boolean_to_bigint_n5(insert_num int, c1 TINYINT, c2 SMALLINT, c3 INT, c4 BIGINT, c5 FLOAT, c6 DOUBLE, c7 DECIMAL(38,18), c8 STRING, c9 TIMESTAMP, c10 BOOLEAN, c11 SMALLINT, c12 INT, c13 BIGINT, c14 FLOAT, c15 DOUBLE, c16 DECIMAL(38,18), c17 STRING, c18 CHAR(25), c19 VARCHAR(25), c20 TIMESTAMP, c21 BOOLEAN, c22 TINYINT, c23 INT, c24 BIGINT, c25 FLOAT, c26 DOUBLE, c27 DECIMAL(38,18), c28 STRING, c29 CHAR(25), c30 VARCHAR(25), c31 TIMESTAMP, @@ -47,18 +47,18 @@ CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, c43 BOOLEAN, c44 TINYINT, c45 SMALLINT, c46 INT, c47 FLOAT, c48 DOUBLE, c49 DECIMAL(38,18), c50 STRING, c51 CHAR(25), c52 VARCHAR(25), c53 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n5 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, boolean_str, timestamp1, boolean1, smallint1, int1, bigint1, float1, double1, decimal1, tinyint_str, tinyint_str, tinyint_str, timestamp1, boolean1, tinyint1, int1, bigint1, float1, double1, decimal1, smallint_str, smallint_str, smallint_str, timestamp1, boolean1, tinyint1, smallint1, bigint1, float1, double1, decimal1, int_str, int_str, int_str, timestamp1, boolean1, tinyint1, smallint1, int1, float1, double1, decimal1, bigint_str, bigint_str, bigint_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n40; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_boolean_to_bigint replace columns (insert_num int, +alter table part_change_various_various_boolean_to_bigint_n5 replace columns (insert_num int, c1 BOOLEAN, c2 BOOLEAN, c3 BOOLEAN, c4 BOOLEAN, c5 BOOLEAN, c6 BOOLEAN, c7 BOOLEAN, c8 BOOLEAN, c9 BOOLEAN, c10 TINYINT, c11 TINYINT, c12 TINYINT, c13 TINYINT, c14 TINYINT, c15 TINYINT, c16 TINYINT, c17 TINYINT, c18 TINYINT, c19 TINYINT, c20 TINYINT, c21 SMALLINT, c22 SMALLINT, c23 SMALLINT, c24 SMALLINT, c25 SMALLINT, c26 SMALLINT, c27 SMALLINT, c28 SMALLINT, c29 SMALLINT, c30 SMALLINT, c31 SMALLINT, @@ -66,20 +66,20 @@ alter table part_change_various_various_boolean_to_bigint replace columns (inser c43 BIGINT, c44 BIGINT, c45 BIGINT, c46 BIGINT, c47 BIGINT, c48 BIGINT, c49 BIGINT, c50 BIGINT, c51 BIGINT, c52 BIGINT, c53 BIGINT, b STRING); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n5 partition(part=1) SELECT insert_num, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, - 'new' FROM schema_evolution_data; + 'new' FROM schema_evolution_data_n40; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n5; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n5; -drop table part_change_various_various_boolean_to_bigint; +drop table part_change_various_various_boolean_to_bigint_n5; @@ -89,39 +89,39 @@ drop table part_change_various_various_boolean_to_bigint; -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> FLOAT and -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> DOUBLE and -- -CREATE TABLE part_change_various_various_decimal_to_double(insert_num int, +CREATE TABLE part_change_various_various_decimal_to_double_n5(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 STRING, c9 CHAR(25), c10 VARCHAR(25), c11 TIMESTAMP, c12 BOOLEAN, c13 TINYINT, c14 SMALLINT, c15 INT, c16 BIGINT, c17 DECIMAL(38,18), c18 DOUBLE, c19 STRING, c20 CHAR(25), c21 VARCHAR(25), c22 TIMESTAMP, c23 BOOLEAN, c24 TINYINT, c25 SMALLINT, c26 INT, c27 BIGINT, c28 DECIMAL(38,18), c29 FLOAT, c30 STRING, c31 CHAR(25), c32 VARCHAR(25), c33 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n5 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal_str, decimal_str, decimal_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, double1, float_str, float_str, float_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, float1, double_str, double_str, double_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n40; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_decimal_to_double replace columns (insert_num int, +alter table part_change_various_various_decimal_to_double_n5 replace columns (insert_num int, c1 DECIMAL(38,18), c2 DECIMAL(38,18), c3 DECIMAL(38,18), c4 DECIMAL(38,18), c5 DECIMAL(38,18), c6 DECIMAL(38,18), c7 DECIMAL(38,18), c8 DECIMAL(38,18), c9 DECIMAL(38,18), c10 DECIMAL(38,18), c11 DECIMAL(38,18), c12 FLOAT, c13 FLOAT, c14 FLOAT, c15 FLOAT, c16 FLOAT, c17 FLOAT, c18 FLOAT, c19 FLOAT, c20 FLOAT, c21 FLOAT, c22 FLOAT, c23 DOUBLE, c24 DOUBLE, c25 DOUBLE, c26 DOUBLE, c27 DOUBLE, c28 DOUBLE, c29 DOUBLE, c30 DOUBLE, c31 DOUBLE, c32 DOUBLE, c33 DOUBLE, b STRING); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n5 partition(part=1) SELECT insert_num, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, - 'new' FROM schema_evolution_data_2 WHERE insert_num=111; + 'new' FROM schema_evolution_data_2_n13 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n5; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n5; -drop table part_change_various_various_decimal_to_double; +drop table part_change_various_various_decimal_to_double_n5; @@ -129,80 +129,80 @@ drop table part_change_various_various_decimal_to_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DOUBLE, DECIMAL(38,18), STRING, CHAR, VARCHAR, DATE) --> TIMESTAMP -- -CREATE TABLE part_change_various_various_timestamp(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_timestamp_n5(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_timestamp_n5 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data_n40; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_timestamp replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); +alter table part_change_various_various_timestamp_n5 replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_timestamp_n5 partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2_n13 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n5; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n5; -drop table part_change_various_various_timestamp; +drop table part_change_various_various_timestamp_n5; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (STRING, CHAR, VARCHAR, TIMESTAMP --> DATE -- -CREATE TABLE part_change_various_various_date(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_date_n5(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_date_n5 partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data_n40; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_date replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); +alter table part_change_various_various_date_n5 replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_date_n5 partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2_n13 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n5; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n5; -drop table part_change_various_various_date; +drop table part_change_various_various_date_n5; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Same Type (CHAR, VARCHAR, DECIMAL) --> Different maxLength or precision/scale -- -CREATE TABLE part_change_same_type_different_params(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_same_type_different_params_n5(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); -CREATE TABLE same_type1_a_txt(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) +CREATE TABLE same_type1_a_txt_n5(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt_n5; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_a_txt; +insert into table part_change_same_type_different_params_n5 partition(part=1) select * from same_type1_a_txt_n5; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_same_type_different_params replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); +alter table part_change_same_type_different_params_n5 replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); -CREATE TABLE same_type1_b_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_b_txt_n5(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt_n5; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_b_txt; +insert into table part_change_same_type_different_params_n5 partition(part=1) select * from same_type1_b_txt_n5; -CREATE TABLE same_type1_c_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_c_txt_n5(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt_n5; -insert into table part_change_same_type_different_params partition(part=2) select * from same_type1_c_txt; +insert into table part_change_same_type_different_params_n5 partition(part=2) select * from same_type1_c_txt_n5; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n5; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n5; -drop table part_change_same_type_different_params; +drop table part_change_same_type_different_params_n5; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_llap_io.q index 52f10e1050..59d9187d42 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_llap_io.q @@ -19,9 +19,9 @@ set hive.llap.io.encode.enabled=true; -- FILE VARIATION: ORC, Non-Vectorized, MapWork, Partitioned -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n11(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n11; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -30,51 +30,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_permute_select_n2(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_permute_select_n2 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_permute_select add columns(c int); +alter table part_add_int_permute_select_n2 add columns(c int); -insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333); +insert into table part_add_int_permute_select_n2 partition(part=1) VALUES (2, 2222, 'new', 3333); explain vectorization detail -select insert_num,part,a,b from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n2; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_permute_select; -select insert_num,part,a,b,c from part_add_int_permute_select; -select insert_num,part,c from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n2; +select insert_num,part,a,b,c from part_add_int_permute_select_n2; +select insert_num,part,c from part_add_int_permute_select_n2; -drop table part_add_int_permute_select; +drop table part_add_int_permute_select_n2; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_string_permute_select_n2(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_string_permute_select_n2 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_string_permute_select add columns(c int, d string); +alter table part_add_int_string_permute_select_n2 add columns(c int, d string); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); +insert into table part_add_int_string_permute_select_n2 partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); explain vectorization detail -select insert_num,part,a,b from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n2; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_string_permute_select; -select insert_num,part,a,b,c from part_add_int_string_permute_select; -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; -select insert_num,part,a,c,d from part_add_int_string_permute_select; -select insert_num,part,a,d from part_add_int_string_permute_select; -select insert_num,part,c from part_add_int_string_permute_select; -select insert_num,part,d from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n2; +select insert_num,part,a,b,c from part_add_int_string_permute_select_n2; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n2; +select insert_num,part,a,c,d from part_add_int_string_permute_select_n2; +select insert_num,part,a,d from part_add_int_string_permute_select_n2; +select insert_num,part,c from part_add_int_string_permute_select_n2; +select insert_num,part,d from part_add_int_string_permute_select_n2; -drop table part_add_int_string_permute_select; +drop table part_add_int_string_permute_select_n2; @@ -85,21 +85,21 @@ drop table part_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_string_group_double_n2(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table part_change_string_group_double_n2 partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n11; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table part_change_string_group_double_n2 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111; +insert into table part_change_string_group_double_n2 partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n11 WHERE insert_num = 111; explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n2; -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n2; -drop table part_change_string_group_double; +drop table part_change_string_group_double_n2; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -108,21 +108,21 @@ drop table part_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_date_group_string_group_date_timestamp_n2(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_date_group_string_group_date_timestamp_n2 partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n11; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table part_change_date_group_string_group_date_timestamp_n2 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table part_change_date_group_string_group_date_timestamp_n2 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n2; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n2; -drop table part_change_date_group_string_group_date_timestamp; +drop table part_change_date_group_string_group_date_timestamp_n2; @@ -138,39 +138,39 @@ drop table part_change_date_group_string_group_date_timestamp; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group_n2(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n2 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n11; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_multi_ints_string_group_n2 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n2 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n2; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n2; -drop table part_change_numeric_group_string_group_multi_ints_string_group; +drop table part_change_numeric_group_string_group_multi_ints_string_group_n2; @@ -181,39 +181,39 @@ drop table part_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_floating_string_group_n2(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_floating_string_group_n2 partition(part=1) SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n11; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_floating_string_group_n2 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_floating_string_group_n2 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n2; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n2; -drop table part_change_numeric_group_string_group_floating_string_group; +drop table part_change_numeric_group_string_group_floating_string_group_n2; @@ -225,37 +225,37 @@ drop table part_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE part_change_string_group_string_group_string(insert_num int, +CREATE TABLE part_change_string_group_string_group_string_n2(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, +insert into table part_change_string_group_string_group_string_n2 partition(part=1) SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n11; -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_string_group_string replace columns (insert_num int, +alter table part_change_string_group_string_group_string_n2 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, +insert into table part_change_string_group_string_group_string_n2 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n2; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n2; -drop table part_change_string_group_string_group_string; +drop table part_change_string_group_string_group_string_n2; ------------------------------------------------------------------------------------------ @@ -269,31 +269,31 @@ drop table part_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2 partition(part=1) SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n11; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2 partition(part=1) VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, @@ -301,11 +301,11 @@ insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint pa 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2; -drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2; @@ -314,26 +314,26 @@ drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float_n2(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n2 partition(part=1) SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n11; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table part_change_lower_to_higher_numeric_group_decimal_to_float_n2 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n2 partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n2; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n2; -drop table part_change_lower_to_higher_numeric_group_decimal_to_float; \ No newline at end of file +drop table part_change_lower_to_higher_numeric_group_decimal_to_float_n2; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_table.q index 592b4008b4..c9e27bec16 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_table.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_table.q @@ -17,9 +17,9 @@ set hive.llap.io.enabled=false; -- FILE VARIATION: ORC, Non-Vectorized, MapWork, Table -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n36(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n36; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -28,51 +28,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_permute_select_n10(insert_num int, a INT, b STRING); -insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_permute_select_n10 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n36; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_permute_select add columns(c int); +alter table table_add_int_permute_select_n10 add columns(c int); -insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000); +insert into table table_add_int_permute_select_n10 VALUES (111, 80000, 'new', 80000); explain vectorization detail -select insert_num,a,b from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n10; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_permute_select; -select insert_num,a,b,c from table_add_int_permute_select; -select insert_num,c from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n10; +select insert_num,a,b,c from table_add_int_permute_select_n10; +select insert_num,c from table_add_int_permute_select_n10; -drop table table_add_int_permute_select; +drop table table_add_int_permute_select_n10; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_string_permute_select_n10(insert_num int, a INT, b STRING); -insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_string_permute_select_n10 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n36; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_string_permute_select add columns(c int, d string); +alter table table_add_int_string_permute_select_n10 add columns(c int, d string); -insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler'); +insert into table table_add_int_string_permute_select_n10 VALUES (111, 80000, 'new', 80000, 'filler'); explain vectorization detail -select insert_num,a,b from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n10; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_string_permute_select; -select insert_num,a,b,c from table_add_int_string_permute_select; -select insert_num,a,b,c,d from table_add_int_string_permute_select; -select insert_num,a,c,d from table_add_int_string_permute_select; -select insert_num,a,d from table_add_int_string_permute_select; -select insert_num,c from table_add_int_string_permute_select; -select insert_num,d from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n10; +select insert_num,a,b,c from table_add_int_string_permute_select_n10; +select insert_num,a,b,c,d from table_add_int_string_permute_select_n10; +select insert_num,a,c,d from table_add_int_string_permute_select_n10; +select insert_num,a,d from table_add_int_string_permute_select_n10; +select insert_num,c from table_add_int_string_permute_select_n10; +select insert_num,d from table_add_int_string_permute_select_n10; -drop table table_add_int_string_permute_select; +drop table table_add_int_string_permute_select_n10; @@ -83,21 +83,21 @@ drop table table_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE table_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); +CREATE TABLE table_change_string_group_double_n10(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); -insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table table_change_string_group_double_n10 SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n36; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table table_change_string_group_double_n10 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new'); +insert into table table_change_string_group_double_n10 VALUES (111, 789.321, 789.321, 789.321, 'new'); explain vectorization detail -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n10; -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n10; -drop table table_change_string_group_double; +drop table table_change_string_group_double_n10; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -106,18 +106,18 @@ drop table table_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE table_change_date_group_string_group_date_group(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); +CREATE TABLE table_change_date_group_string_group_date_group_n10(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); -insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table table_change_date_group_string_group_date_group_n10 SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n36; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_date_group_string_group_date_group replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table table_change_date_group_string_group_date_group_n10 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table table_change_date_group_string_group_date_group VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table table_change_date_group_string_group_date_group_n10 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n10; -drop table table_change_date_group_string_group_date_group; +drop table table_change_date_group_string_group_date_group_n10; @@ -132,39 +132,39 @@ drop table table_change_date_group_string_group_date_group; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group_n10(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING); -insert into table table_change_numeric_group_string_group_multi_ints_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n10 SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n36; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n10; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_multi_ints_string_group_n10 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table table_change_numeric_group_string_group_multi_ints_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n10 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n10; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n10; -drop table table_change_numeric_group_string_group_multi_ints_string_group; +drop table table_change_numeric_group_string_group_multi_ints_string_group_n10; @@ -175,39 +175,39 @@ drop table table_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n10(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n10 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n36; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n10; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_floating_string_group_n10 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_floating_string_group_n10 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n10; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n10; -drop table table_change_numeric_group_string_group_floating_string_group; +drop table table_change_numeric_group_string_group_floating_string_group_n10; ------------------------------------------------------------------------------------------ @@ -218,34 +218,34 @@ drop table table_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE table_change_string_group_string_group_string(insert_num int, +CREATE TABLE table_change_string_group_string_group_string_n10(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING); -insert into table table_change_string_group_string_group_string SELECT insert_num, +insert into table table_change_string_group_string_group_string_n10 SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n36; -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n10; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_string_group_string replace columns (insert_num int, +alter table table_change_string_group_string_group_string_n10 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table table_change_string_group_string_group_string VALUES (111, +insert into table table_change_string_group_string_group_string_n10 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string_n10; -drop table table_change_string_group_string_group_string; +drop table table_change_string_group_string_group_string_n10; @@ -260,40 +260,40 @@ drop table table_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING); -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10 SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n36; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint VALUES (111, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10 VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, 1234.5678, 9876.543, 789.321, 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10; -drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10; @@ -302,23 +302,23 @@ drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float_n10(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING); -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n10 SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n36; -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n10; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table table_change_lower_to_higher_numeric_group_decimal_to_float_n10 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n10 VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n10; -drop table table_change_lower_to_higher_numeric_group_decimal_to_float; +drop table table_change_lower_to_higher_numeric_group_decimal_to_float_n10; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_table_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_table_llap_io.q index 5312fbe947..fd3e87dc57 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_table_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_table_llap_io.q @@ -18,9 +18,9 @@ set hive.llap.io.encode.enabled=true; -- FILE VARIATION: ORC, Non-Vectorized, MapWork, Table -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n19(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n19; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -29,51 +29,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_permute_select_n7(insert_num int, a INT, b STRING); -insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_permute_select_n7 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n19; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_permute_select add columns(c int); +alter table table_add_int_permute_select_n7 add columns(c int); -insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000); +insert into table table_add_int_permute_select_n7 VALUES (111, 80000, 'new', 80000); explain vectorization detail -select insert_num,a,b from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n7; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_permute_select; -select insert_num,a,b,c from table_add_int_permute_select; -select insert_num,c from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n7; +select insert_num,a,b,c from table_add_int_permute_select_n7; +select insert_num,c from table_add_int_permute_select_n7; -drop table table_add_int_permute_select; +drop table table_add_int_permute_select_n7; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_string_permute_select_n7(insert_num int, a INT, b STRING); -insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_string_permute_select_n7 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n19; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_string_permute_select add columns(c int, d string); +alter table table_add_int_string_permute_select_n7 add columns(c int, d string); -insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler'); +insert into table table_add_int_string_permute_select_n7 VALUES (111, 80000, 'new', 80000, 'filler'); explain vectorization detail -select insert_num,a,b from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n7; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_string_permute_select; -select insert_num,a,b,c from table_add_int_string_permute_select; -select insert_num,a,b,c,d from table_add_int_string_permute_select; -select insert_num,a,c,d from table_add_int_string_permute_select; -select insert_num,a,d from table_add_int_string_permute_select; -select insert_num,c from table_add_int_string_permute_select; -select insert_num,d from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n7; +select insert_num,a,b,c from table_add_int_string_permute_select_n7; +select insert_num,a,b,c,d from table_add_int_string_permute_select_n7; +select insert_num,a,c,d from table_add_int_string_permute_select_n7; +select insert_num,a,d from table_add_int_string_permute_select_n7; +select insert_num,c from table_add_int_string_permute_select_n7; +select insert_num,d from table_add_int_string_permute_select_n7; -drop table table_add_int_string_permute_select; +drop table table_add_int_string_permute_select_n7; @@ -84,21 +84,21 @@ drop table table_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE table_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); +CREATE TABLE table_change_string_group_double_n7(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); -insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table table_change_string_group_double_n7 SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n19; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table table_change_string_group_double_n7 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new'); +insert into table table_change_string_group_double_n7 VALUES (111, 789.321, 789.321, 789.321, 'new'); explain vectorization detail -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n7; -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n7; -drop table table_change_string_group_double; +drop table table_change_string_group_double_n7; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -107,18 +107,18 @@ drop table table_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE table_change_date_group_string_group_date_group(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); +CREATE TABLE table_change_date_group_string_group_date_group_n7(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); -insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table table_change_date_group_string_group_date_group_n7 SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n19; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_date_group_string_group_date_group replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table table_change_date_group_string_group_date_group_n7 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table table_change_date_group_string_group_date_group VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table table_change_date_group_string_group_date_group_n7 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n7; -drop table table_change_date_group_string_group_date_group; +drop table table_change_date_group_string_group_date_group_n7; @@ -133,39 +133,39 @@ drop table table_change_date_group_string_group_date_group; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group_n7(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING); -insert into table table_change_numeric_group_string_group_multi_ints_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n7 SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n19; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_multi_ints_string_group_n7 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table table_change_numeric_group_string_group_multi_ints_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n7 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n7; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n7; -drop table table_change_numeric_group_string_group_multi_ints_string_group; +drop table table_change_numeric_group_string_group_multi_ints_string_group_n7; @@ -176,39 +176,39 @@ drop table table_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n7(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n7 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n19; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_floating_string_group_n7 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_floating_string_group_n7 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n7; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n7; -drop table table_change_numeric_group_string_group_floating_string_group; +drop table table_change_numeric_group_string_group_floating_string_group_n7; ------------------------------------------------------------------------------------------ @@ -219,34 +219,34 @@ drop table table_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE table_change_string_group_string_group_string(insert_num int, +CREATE TABLE table_change_string_group_string_group_string_n7(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING); -insert into table table_change_string_group_string_group_string SELECT insert_num, +insert into table table_change_string_group_string_group_string_n7 SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n19; -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_string_group_string replace columns (insert_num int, +alter table table_change_string_group_string_group_string_n7 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table table_change_string_group_string_group_string VALUES (111, +insert into table table_change_string_group_string_group_string_n7 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string_n7; -drop table table_change_string_group_string_group_string; +drop table table_change_string_group_string_group_string_n7; @@ -261,40 +261,40 @@ drop table table_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING); -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7 SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n19; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint VALUES (111, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7 VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, 1234.5678, 9876.543, 789.321, 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7; -drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n7; @@ -303,23 +303,23 @@ drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float_n7(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING); -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n7 SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n19; -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table table_change_lower_to_higher_numeric_group_decimal_to_float_n7 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n7 VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n7; -drop table table_change_lower_to_higher_numeric_group_decimal_to_float; +drop table table_change_lower_to_higher_numeric_group_decimal_to_float_n7; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part.q index d401679fa6..024bb3a7d4 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part.q @@ -19,9 +19,9 @@ set hive.llap.io.enabled=false; -- FILE VARIATION: ORC, Vectorized, MapWork, Partitioned -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n17(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n17; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -30,51 +30,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_permute_select_n4(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_permute_select_n4 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_permute_select add columns(c int); +alter table part_add_int_permute_select_n4 add columns(c int); -insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333); +insert into table part_add_int_permute_select_n4 partition(part=1) VALUES (2, 2222, 'new', 3333); explain vectorization detail -select insert_num,part,a,b from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n4; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_permute_select; -select insert_num,part,a,b,c from part_add_int_permute_select; -select insert_num,part,c from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n4; +select insert_num,part,a,b,c from part_add_int_permute_select_n4; +select insert_num,part,c from part_add_int_permute_select_n4; -drop table part_add_int_permute_select; +drop table part_add_int_permute_select_n4; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_string_permute_select_n4(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_string_permute_select_n4 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_string_permute_select add columns(c int, d string); +alter table part_add_int_string_permute_select_n4 add columns(c int, d string); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); +insert into table part_add_int_string_permute_select_n4 partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); explain vectorization detail -select insert_num,part,a,b from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n4; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_string_permute_select; -select insert_num,part,a,b,c from part_add_int_string_permute_select; -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; -select insert_num,part,a,c,d from part_add_int_string_permute_select; -select insert_num,part,a,d from part_add_int_string_permute_select; -select insert_num,part,c from part_add_int_string_permute_select; -select insert_num,part,d from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n4; +select insert_num,part,a,b,c from part_add_int_string_permute_select_n4; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n4; +select insert_num,part,a,c,d from part_add_int_string_permute_select_n4; +select insert_num,part,a,d from part_add_int_string_permute_select_n4; +select insert_num,part,c from part_add_int_string_permute_select_n4; +select insert_num,part,d from part_add_int_string_permute_select_n4; -drop table part_add_int_string_permute_select; +drop table part_add_int_string_permute_select_n4; @@ -85,21 +85,21 @@ drop table part_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_string_group_double_n4(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table part_change_string_group_double_n4 partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n17; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table part_change_string_group_double_n4 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111; +insert into table part_change_string_group_double_n4 partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n17 WHERE insert_num = 111; explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n4; -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n4; -drop table part_change_string_group_double; +drop table part_change_string_group_double_n4; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -108,21 +108,21 @@ drop table part_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_date_group_string_group_date_timestamp_n4(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_date_group_string_group_date_timestamp_n4 partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n17; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table part_change_date_group_string_group_date_timestamp_n4 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table part_change_date_group_string_group_date_timestamp_n4 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n4; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n4; -drop table part_change_date_group_string_group_date_timestamp; +drop table part_change_date_group_string_group_date_timestamp_n4; @@ -138,39 +138,39 @@ drop table part_change_date_group_string_group_date_timestamp; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group_n4(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n4 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n17; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_multi_ints_string_group_n4 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n4 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n4; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n4; -drop table part_change_numeric_group_string_group_multi_ints_string_group; +drop table part_change_numeric_group_string_group_multi_ints_string_group_n4; @@ -181,39 +181,39 @@ drop table part_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_floating_string_group_n4(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_floating_string_group_n4 partition(part=1) SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n17; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_floating_string_group_n4 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_floating_string_group_n4 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n4; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n4; -drop table part_change_numeric_group_string_group_floating_string_group; +drop table part_change_numeric_group_string_group_floating_string_group_n4; @@ -225,37 +225,37 @@ drop table part_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE part_change_string_group_string_group_string(insert_num int, +CREATE TABLE part_change_string_group_string_group_string_n4(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, +insert into table part_change_string_group_string_group_string_n4 partition(part=1) SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n17; -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_string_group_string replace columns (insert_num int, +alter table part_change_string_group_string_group_string_n4 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, +insert into table part_change_string_group_string_group_string_n4 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n4; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n4; -drop table part_change_string_group_string_group_string; +drop table part_change_string_group_string_group_string_n4; ------------------------------------------------------------------------------------------ @@ -269,31 +269,31 @@ drop table part_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4 partition(part=1) SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n17; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4 partition(part=1) VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, @@ -301,11 +301,11 @@ insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint pa 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4; -drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4; @@ -314,26 +314,26 @@ drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float_n4(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n4 partition(part=1) SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n17; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table part_change_lower_to_higher_numeric_group_decimal_to_float_n4 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n4 partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n4; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n4; -drop table part_change_lower_to_higher_numeric_group_decimal_to_float; \ No newline at end of file +drop table part_change_lower_to_higher_numeric_group_decimal_to_float_n4; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex.q index 91b3c4efe9..794b4b323e 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex.q @@ -21,103 +21,103 @@ set hive.llap.io.enabled=false; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: STRUCT --> STRUCT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_struct1_n8(insert_num int, s1 STRUCT, b STRING) PARTITIONED BY(part INT); -CREATE TABLE complex_struct1_a_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_a_txt_n8(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt_n8; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_a_txt; +insert into table part_change_various_various_struct1_n8 partition(part=1) select * from complex_struct1_a_txt_n8; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_struct1 replace columns (insert_num int, s1 STRUCT, b STRING); +alter table part_change_various_various_struct1_n8 replace columns (insert_num int, s1 STRUCT, b STRING); -CREATE TABLE complex_struct1_b_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_b_txt_n8(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt_n8; -insert into table part_change_various_various_struct1 partition(part=2) select * from complex_struct1_b_txt; +insert into table part_change_various_various_struct1_n8 partition(part=2) select * from complex_struct1_b_txt_n8; -CREATE TABLE complex_struct1_c_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_c_txt_n8(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt_n8; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_c_txt; +insert into table part_change_various_various_struct1_n8 partition(part=1) select * from complex_struct1_c_txt_n8; explain vectorization detail -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n8; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n8; -drop table part_change_various_various_struct1; +drop table part_change_various_various_struct1_n8; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: STRUCT -- -CREATE TABLE part_add_various_various_struct2(insert_num int, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_various_various_struct2_n8(insert_num int, b STRING) PARTITIONED BY(part INT); -insert into table part_add_various_various_struct2 partition(part=1) +insert into table part_add_various_various_struct2_n8 partition(part=1) values(1, 'original'), (2, 'original'); -select insert_num,part,b from part_add_various_various_struct2; +select insert_num,part,b from part_add_various_various_struct2_n8; -- Table-Non-Cascade ADD COLUMN ... -alter table part_add_various_various_struct2 ADD columns (s2 STRUCT); +alter table part_add_various_various_struct2_n8 ADD columns (s2 STRUCT); -CREATE TABLE complex_struct2_a_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_a_txt_n8(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt_n8; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_a_txt; +insert into table part_add_various_various_struct2_n8 partition(part=1) select * from complex_struct2_a_txt_n8; -CREATE TABLE complex_struct2_b_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_b_txt_n8(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt_n8; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_b_txt; +insert into table part_add_various_various_struct2_n8 partition(part=2) select * from complex_struct2_b_txt_n8; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_various_various_struct2 REPLACE columns (insert_num int, b STRING, s2 STRUCT); +alter table part_add_various_various_struct2_n8 REPLACE columns (insert_num int, b STRING, s2 STRUCT); -CREATE TABLE complex_struct2_c_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_c_txt_n8(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt_n8; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_c_txt; +insert into table part_add_various_various_struct2_n8 partition(part=2) select * from complex_struct2_c_txt_n8; -CREATE TABLE complex_struct2_d_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_d_txt_n8(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt_n8; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_d_txt; +insert into table part_add_various_various_struct2_n8 partition(part=1) select * from complex_struct2_d_txt_n8; explain vectorization detail -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n8; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n8; -drop table part_add_various_various_struct2; +drop table part_add_various_various_struct2_n8; @@ -125,40 +125,40 @@ drop table part_add_various_various_struct2; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: ADD COLUMNS to STRUCT type as LAST column of 3 columns -- -CREATE TABLE part_add_to_various_various_struct4(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); +CREATE TABLE part_add_to_various_various_struct4_n8(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); -CREATE TABLE complex_struct4_a_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_a_txt_n8(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt_n8; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_a_txt; +insert into table part_add_to_various_various_struct4_n8 partition(part=1) select * from complex_struct4_a_txt_n8; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_to_various_various_struct4 replace columns (insert_num int, b STRING, s3 STRUCT); +alter table part_add_to_various_various_struct4_n8 replace columns (insert_num int, b STRING, s3 STRUCT); -CREATE TABLE complex_struct4_b_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_b_txt_n8(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt_n8; -insert into table part_add_to_various_various_struct4 partition(part=2) select * from complex_struct4_b_txt; +insert into table part_add_to_various_various_struct4_n8 partition(part=2) select * from complex_struct4_b_txt_n8; -CREATE TABLE complex_struct4_c_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_c_txt_n8(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt_n8; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_c_txt; +insert into table part_add_to_various_various_struct4_n8 partition(part=1) select * from complex_struct4_c_txt_n8; explain vectorization detail -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n8; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n8; -drop table part_add_to_various_various_struct4; +drop table part_add_to_various_various_struct4_n8; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex_llap_io.q index d79bc826b8..e42a92d8ce 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex_llap_io.q @@ -22,103 +22,103 @@ set hive.llap.io.encode.enabled=true; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: STRUCT --> STRUCT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_struct1_n0(insert_num int, s1 STRUCT, b STRING) PARTITIONED BY(part INT); -CREATE TABLE complex_struct1_a_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_a_txt_n0(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt_n0; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_a_txt; +insert into table part_change_various_various_struct1_n0 partition(part=1) select * from complex_struct1_a_txt_n0; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n0; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_struct1 replace columns (insert_num int, s1 STRUCT, b STRING); +alter table part_change_various_various_struct1_n0 replace columns (insert_num int, s1 STRUCT, b STRING); -CREATE TABLE complex_struct1_b_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_b_txt_n0(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt_n0; -insert into table part_change_various_various_struct1 partition(part=2) select * from complex_struct1_b_txt; +insert into table part_change_various_various_struct1_n0 partition(part=2) select * from complex_struct1_b_txt_n0; -CREATE TABLE complex_struct1_c_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_c_txt_n0(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt_n0; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_c_txt; +insert into table part_change_various_various_struct1_n0 partition(part=1) select * from complex_struct1_c_txt_n0; explain vectorization detail -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n0; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n0; -drop table part_change_various_various_struct1; +drop table part_change_various_various_struct1_n0; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: STRUCT -- -CREATE TABLE part_add_various_various_struct2(insert_num int, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_various_various_struct2_n0(insert_num int, b STRING) PARTITIONED BY(part INT); -insert into table part_add_various_various_struct2 partition(part=1) +insert into table part_add_various_various_struct2_n0 partition(part=1) values(1, 'original'), (2, 'original'); -select insert_num,part,b from part_add_various_various_struct2; +select insert_num,part,b from part_add_various_various_struct2_n0; -- Table-Non-Cascade ADD COLUMN ... -alter table part_add_various_various_struct2 ADD columns (s2 STRUCT); +alter table part_add_various_various_struct2_n0 ADD columns (s2 STRUCT); -CREATE TABLE complex_struct2_a_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_a_txt_n0(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt_n0; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_a_txt; +insert into table part_add_various_various_struct2_n0 partition(part=1) select * from complex_struct2_a_txt_n0; -CREATE TABLE complex_struct2_b_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_b_txt_n0(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt_n0; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_b_txt; +insert into table part_add_various_various_struct2_n0 partition(part=2) select * from complex_struct2_b_txt_n0; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n0; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_various_various_struct2 REPLACE columns (insert_num int, b STRING, s2 STRUCT); +alter table part_add_various_various_struct2_n0 REPLACE columns (insert_num int, b STRING, s2 STRUCT); -CREATE TABLE complex_struct2_c_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_c_txt_n0(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt_n0; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_c_txt; +insert into table part_add_various_various_struct2_n0 partition(part=2) select * from complex_struct2_c_txt_n0; -CREATE TABLE complex_struct2_d_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_d_txt_n0(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt_n0; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_d_txt; +insert into table part_add_various_various_struct2_n0 partition(part=1) select * from complex_struct2_d_txt_n0; explain vectorization detail -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n0; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n0; -drop table part_add_various_various_struct2; +drop table part_add_various_various_struct2_n0; @@ -126,40 +126,40 @@ drop table part_add_various_various_struct2; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: ADD COLUMNS to STRUCT type as LAST column of 3 columns -- -CREATE TABLE part_add_to_various_various_struct4(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); +CREATE TABLE part_add_to_various_various_struct4_n0(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); -CREATE TABLE complex_struct4_a_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_a_txt_n0(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt_n0; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_a_txt; +insert into table part_add_to_various_various_struct4_n0 partition(part=1) select * from complex_struct4_a_txt_n0; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n0; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_to_various_various_struct4 replace columns (insert_num int, b STRING, s3 STRUCT); +alter table part_add_to_various_various_struct4_n0 replace columns (insert_num int, b STRING, s3 STRUCT); -CREATE TABLE complex_struct4_b_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_b_txt_n0(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt_n0; -insert into table part_add_to_various_various_struct4 partition(part=2) select * from complex_struct4_b_txt; +insert into table part_add_to_various_various_struct4_n0 partition(part=2) select * from complex_struct4_b_txt_n0; -CREATE TABLE complex_struct4_c_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_c_txt_n0(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt_n0; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_c_txt; +insert into table part_add_to_various_various_struct4_n0 partition(part=1) select * from complex_struct4_c_txt_n0; explain vectorization detail -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n0; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n0; -drop table part_add_to_various_various_struct4; +drop table part_add_to_various_various_struct4_n0; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive.q index 552c25d656..6e35f5a539 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive.q @@ -22,13 +22,13 @@ set hive.llap.io.enabled=false; -- -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n7(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n7; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n1(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n1; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: @@ -38,7 +38,7 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data_ -- (BOOLEAN, TINYINT, SMALLINT, LONG, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> INT –2147483648 to 2147483647 and -- (BOOLEAN, TINYINT, SMALLINT, INT, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> BIGINT -9223372036854775808 to 9223372036854775807 -- -CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, +CREATE TABLE part_change_various_various_boolean_to_bigint_n0(insert_num int, c1 TINYINT, c2 SMALLINT, c3 INT, c4 BIGINT, c5 FLOAT, c6 DOUBLE, c7 DECIMAL(38,18), c8 STRING, c9 TIMESTAMP, c10 BOOLEAN, c11 SMALLINT, c12 INT, c13 BIGINT, c14 FLOAT, c15 DOUBLE, c16 DECIMAL(38,18), c17 STRING, c18 CHAR(25), c19 VARCHAR(25), c20 TIMESTAMP, c21 BOOLEAN, c22 TINYINT, c23 INT, c24 BIGINT, c25 FLOAT, c26 DOUBLE, c27 DECIMAL(38,18), c28 STRING, c29 CHAR(25), c30 VARCHAR(25), c31 TIMESTAMP, @@ -46,18 +46,18 @@ CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, c43 BOOLEAN, c44 TINYINT, c45 SMALLINT, c46 INT, c47 FLOAT, c48 DOUBLE, c49 DECIMAL(38,18), c50 STRING, c51 CHAR(25), c52 VARCHAR(25), c53 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n0 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, boolean_str, timestamp1, boolean1, smallint1, int1, bigint1, float1, double1, decimal1, tinyint_str, tinyint_str, tinyint_str, timestamp1, boolean1, tinyint1, int1, bigint1, float1, double1, decimal1, smallint_str, smallint_str, smallint_str, timestamp1, boolean1, tinyint1, smallint1, bigint1, float1, double1, decimal1, int_str, int_str, int_str, timestamp1, boolean1, tinyint1, smallint1, int1, float1, double1, decimal1, bigint_str, bigint_str, bigint_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n7; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n0; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_boolean_to_bigint replace columns (insert_num int, +alter table part_change_various_various_boolean_to_bigint_n0 replace columns (insert_num int, c1 BOOLEAN, c2 BOOLEAN, c3 BOOLEAN, c4 BOOLEAN, c5 BOOLEAN, c6 BOOLEAN, c7 BOOLEAN, c8 BOOLEAN, c9 BOOLEAN, c10 TINYINT, c11 TINYINT, c12 TINYINT, c13 TINYINT, c14 TINYINT, c15 TINYINT, c16 TINYINT, c17 TINYINT, c18 TINYINT, c19 TINYINT, c20 TINYINT, c21 SMALLINT, c22 SMALLINT, c23 SMALLINT, c24 SMALLINT, c25 SMALLINT, c26 SMALLINT, c27 SMALLINT, c28 SMALLINT, c29 SMALLINT, c30 SMALLINT, c31 SMALLINT, @@ -65,20 +65,20 @@ alter table part_change_various_various_boolean_to_bigint replace columns (inser c43 BIGINT, c44 BIGINT, c45 BIGINT, c46 BIGINT, c47 BIGINT, c48 BIGINT, c49 BIGINT, c50 BIGINT, c51 BIGINT, c52 BIGINT, c53 BIGINT, b STRING); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n0 partition(part=1) SELECT insert_num, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, - 'new' FROM schema_evolution_data; + 'new' FROM schema_evolution_data_n7; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n0; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n0; -drop table part_change_various_various_boolean_to_bigint; +drop table part_change_various_various_boolean_to_bigint_n0; @@ -88,39 +88,39 @@ drop table part_change_various_various_boolean_to_bigint; -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> FLOAT and -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> DOUBLE and -- -CREATE TABLE part_change_various_various_decimal_to_double(insert_num int, +CREATE TABLE part_change_various_various_decimal_to_double_n0(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 STRING, c9 CHAR(25), c10 VARCHAR(25), c11 TIMESTAMP, c12 BOOLEAN, c13 TINYINT, c14 SMALLINT, c15 INT, c16 BIGINT, c17 DECIMAL(38,18), c18 DOUBLE, c19 STRING, c20 CHAR(25), c21 VARCHAR(25), c22 TIMESTAMP, c23 BOOLEAN, c24 TINYINT, c25 SMALLINT, c26 INT, c27 BIGINT, c28 DECIMAL(38,18), c29 FLOAT, c30 STRING, c31 CHAR(25), c32 VARCHAR(25), c33 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n0 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal_str, decimal_str, decimal_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, double1, float_str, float_str, float_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, float1, double_str, double_str, double_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n7; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n0; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_decimal_to_double replace columns (insert_num int, +alter table part_change_various_various_decimal_to_double_n0 replace columns (insert_num int, c1 DECIMAL(38,18), c2 DECIMAL(38,18), c3 DECIMAL(38,18), c4 DECIMAL(38,18), c5 DECIMAL(38,18), c6 DECIMAL(38,18), c7 DECIMAL(38,18), c8 DECIMAL(38,18), c9 DECIMAL(38,18), c10 DECIMAL(38,18), c11 DECIMAL(38,18), c12 FLOAT, c13 FLOAT, c14 FLOAT, c15 FLOAT, c16 FLOAT, c17 FLOAT, c18 FLOAT, c19 FLOAT, c20 FLOAT, c21 FLOAT, c22 FLOAT, c23 DOUBLE, c24 DOUBLE, c25 DOUBLE, c26 DOUBLE, c27 DOUBLE, c28 DOUBLE, c29 DOUBLE, c30 DOUBLE, c31 DOUBLE, c32 DOUBLE, c33 DOUBLE, b STRING); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n0 partition(part=1) SELECT insert_num, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, - 'new' FROM schema_evolution_data_2 WHERE insert_num=111; + 'new' FROM schema_evolution_data_2_n1 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n0; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n0; -drop table part_change_various_various_decimal_to_double; +drop table part_change_various_various_decimal_to_double_n0; @@ -128,80 +128,80 @@ drop table part_change_various_various_decimal_to_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DOUBLE, DECIMAL(38,18), STRING, CHAR, VARCHAR, DATE) --> TIMESTAMP -- -CREATE TABLE part_change_various_various_timestamp(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_timestamp_n0(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_timestamp_n0 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data_n7; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n0; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_timestamp replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); +alter table part_change_various_various_timestamp_n0 replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_timestamp_n0 partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2_n1 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n0; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n0; -drop table part_change_various_various_timestamp; +drop table part_change_various_various_timestamp_n0; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (STRING, CHAR, VARCHAR, TIMESTAMP --> DATE -- -CREATE TABLE part_change_various_various_date(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_date_n0(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_date_n0 partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data_n7; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n0; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_date replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); +alter table part_change_various_various_date_n0 replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_date_n0 partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2_n1 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n0; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n0; -drop table part_change_various_various_date; +drop table part_change_various_various_date_n0; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Same Type (CHAR, VARCHAR, DECIMAL) --> Different maxLength or precision/scale -- -CREATE TABLE part_change_same_type_different_params(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_same_type_different_params_n0(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); -CREATE TABLE same_type1_a_txt(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) +CREATE TABLE same_type1_a_txt_n0(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt_n0; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_a_txt; +insert into table part_change_same_type_different_params_n0 partition(part=1) select * from same_type1_a_txt_n0; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n0; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_same_type_different_params replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); +alter table part_change_same_type_different_params_n0 replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); -CREATE TABLE same_type1_b_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_b_txt_n0(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt_n0; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_b_txt; +insert into table part_change_same_type_different_params_n0 partition(part=1) select * from same_type1_b_txt_n0; -CREATE TABLE same_type1_c_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_c_txt_n0(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt_n0; -insert into table part_change_same_type_different_params partition(part=2) select * from same_type1_c_txt; +insert into table part_change_same_type_different_params_n0 partition(part=2) select * from same_type1_c_txt_n0; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n0; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n0; -drop table part_change_same_type_different_params; +drop table part_change_same_type_different_params_n0; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive_llap_io.q index 2132cfa758..576f9948d0 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive_llap_io.q @@ -23,13 +23,13 @@ set hive.llap.io.encode.enabled=true; -- -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n35(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n35; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n11(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n11; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: @@ -39,7 +39,7 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data_ -- (BOOLEAN, TINYINT, SMALLINT, LONG, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> INT –2147483648 to 2147483647 and -- (BOOLEAN, TINYINT, SMALLINT, INT, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> BIGINT -9223372036854775808 to 9223372036854775807 -- -CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, +CREATE TABLE part_change_various_various_boolean_to_bigint_n4(insert_num int, c1 TINYINT, c2 SMALLINT, c3 INT, c4 BIGINT, c5 FLOAT, c6 DOUBLE, c7 DECIMAL(38,18), c8 STRING, c9 TIMESTAMP, c10 BOOLEAN, c11 SMALLINT, c12 INT, c13 BIGINT, c14 FLOAT, c15 DOUBLE, c16 DECIMAL(38,18), c17 STRING, c18 CHAR(25), c19 VARCHAR(25), c20 TIMESTAMP, c21 BOOLEAN, c22 TINYINT, c23 INT, c24 BIGINT, c25 FLOAT, c26 DOUBLE, c27 DECIMAL(38,18), c28 STRING, c29 CHAR(25), c30 VARCHAR(25), c31 TIMESTAMP, @@ -47,18 +47,18 @@ CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, c43 BOOLEAN, c44 TINYINT, c45 SMALLINT, c46 INT, c47 FLOAT, c48 DOUBLE, c49 DECIMAL(38,18), c50 STRING, c51 CHAR(25), c52 VARCHAR(25), c53 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n4 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, boolean_str, timestamp1, boolean1, smallint1, int1, bigint1, float1, double1, decimal1, tinyint_str, tinyint_str, tinyint_str, timestamp1, boolean1, tinyint1, int1, bigint1, float1, double1, decimal1, smallint_str, smallint_str, smallint_str, timestamp1, boolean1, tinyint1, smallint1, bigint1, float1, double1, decimal1, int_str, int_str, int_str, timestamp1, boolean1, tinyint1, smallint1, int1, float1, double1, decimal1, bigint_str, bigint_str, bigint_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n35; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_boolean_to_bigint replace columns (insert_num int, +alter table part_change_various_various_boolean_to_bigint_n4 replace columns (insert_num int, c1 BOOLEAN, c2 BOOLEAN, c3 BOOLEAN, c4 BOOLEAN, c5 BOOLEAN, c6 BOOLEAN, c7 BOOLEAN, c8 BOOLEAN, c9 BOOLEAN, c10 TINYINT, c11 TINYINT, c12 TINYINT, c13 TINYINT, c14 TINYINT, c15 TINYINT, c16 TINYINT, c17 TINYINT, c18 TINYINT, c19 TINYINT, c20 TINYINT, c21 SMALLINT, c22 SMALLINT, c23 SMALLINT, c24 SMALLINT, c25 SMALLINT, c26 SMALLINT, c27 SMALLINT, c28 SMALLINT, c29 SMALLINT, c30 SMALLINT, c31 SMALLINT, @@ -66,20 +66,20 @@ alter table part_change_various_various_boolean_to_bigint replace columns (inser c43 BIGINT, c44 BIGINT, c45 BIGINT, c46 BIGINT, c47 BIGINT, c48 BIGINT, c49 BIGINT, c50 BIGINT, c51 BIGINT, c52 BIGINT, c53 BIGINT, b STRING); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n4 partition(part=1) SELECT insert_num, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, - 'new' FROM schema_evolution_data; + 'new' FROM schema_evolution_data_n35; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n4; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n4; -drop table part_change_various_various_boolean_to_bigint; +drop table part_change_various_various_boolean_to_bigint_n4; @@ -89,39 +89,39 @@ drop table part_change_various_various_boolean_to_bigint; -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> FLOAT and -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> DOUBLE and -- -CREATE TABLE part_change_various_various_decimal_to_double(insert_num int, +CREATE TABLE part_change_various_various_decimal_to_double_n4(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 STRING, c9 CHAR(25), c10 VARCHAR(25), c11 TIMESTAMP, c12 BOOLEAN, c13 TINYINT, c14 SMALLINT, c15 INT, c16 BIGINT, c17 DECIMAL(38,18), c18 DOUBLE, c19 STRING, c20 CHAR(25), c21 VARCHAR(25), c22 TIMESTAMP, c23 BOOLEAN, c24 TINYINT, c25 SMALLINT, c26 INT, c27 BIGINT, c28 DECIMAL(38,18), c29 FLOAT, c30 STRING, c31 CHAR(25), c32 VARCHAR(25), c33 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n4 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal_str, decimal_str, decimal_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, double1, float_str, float_str, float_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, float1, double_str, double_str, double_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n35; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_decimal_to_double replace columns (insert_num int, +alter table part_change_various_various_decimal_to_double_n4 replace columns (insert_num int, c1 DECIMAL(38,18), c2 DECIMAL(38,18), c3 DECIMAL(38,18), c4 DECIMAL(38,18), c5 DECIMAL(38,18), c6 DECIMAL(38,18), c7 DECIMAL(38,18), c8 DECIMAL(38,18), c9 DECIMAL(38,18), c10 DECIMAL(38,18), c11 DECIMAL(38,18), c12 FLOAT, c13 FLOAT, c14 FLOAT, c15 FLOAT, c16 FLOAT, c17 FLOAT, c18 FLOAT, c19 FLOAT, c20 FLOAT, c21 FLOAT, c22 FLOAT, c23 DOUBLE, c24 DOUBLE, c25 DOUBLE, c26 DOUBLE, c27 DOUBLE, c28 DOUBLE, c29 DOUBLE, c30 DOUBLE, c31 DOUBLE, c32 DOUBLE, c33 DOUBLE, b STRING); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n4 partition(part=1) SELECT insert_num, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, - 'new' FROM schema_evolution_data_2 WHERE insert_num=111; + 'new' FROM schema_evolution_data_2_n11 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n4; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n4; -drop table part_change_various_various_decimal_to_double; +drop table part_change_various_various_decimal_to_double_n4; @@ -129,80 +129,80 @@ drop table part_change_various_various_decimal_to_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DOUBLE, DECIMAL(38,18), STRING, CHAR, VARCHAR, DATE) --> TIMESTAMP -- -CREATE TABLE part_change_various_various_timestamp(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_timestamp_n4(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_timestamp_n4 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data_n35; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_timestamp replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); +alter table part_change_various_various_timestamp_n4 replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_timestamp_n4 partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2_n11 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n4; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n4; -drop table part_change_various_various_timestamp; +drop table part_change_various_various_timestamp_n4; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (STRING, CHAR, VARCHAR, TIMESTAMP --> DATE -- -CREATE TABLE part_change_various_various_date(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_date_n4(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_date_n4 partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data_n35; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_date replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); +alter table part_change_various_various_date_n4 replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_date_n4 partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2_n11 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n4; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n4; -drop table part_change_various_various_date; +drop table part_change_various_various_date_n4; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Same Type (CHAR, VARCHAR, DECIMAL) --> Different maxLength or precision/scale -- -CREATE TABLE part_change_same_type_different_params(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_same_type_different_params_n4(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); -CREATE TABLE same_type1_a_txt(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) +CREATE TABLE same_type1_a_txt_n4(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt_n4; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_a_txt; +insert into table part_change_same_type_different_params_n4 partition(part=1) select * from same_type1_a_txt_n4; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_same_type_different_params replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); +alter table part_change_same_type_different_params_n4 replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); -CREATE TABLE same_type1_b_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_b_txt_n4(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt_n4; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_b_txt; +insert into table part_change_same_type_different_params_n4 partition(part=1) select * from same_type1_b_txt_n4; -CREATE TABLE same_type1_c_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_c_txt_n4(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt_n4; -insert into table part_change_same_type_different_params partition(part=2) select * from same_type1_c_txt; +insert into table part_change_same_type_different_params_n4 partition(part=2) select * from same_type1_c_txt_n4; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n4; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n4; -drop table part_change_same_type_different_params; +drop table part_change_same_type_different_params_n4; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_llap_io.q index 99782277bf..c99ae38600 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_llap_io.q @@ -20,9 +20,9 @@ set hive.llap.io.encode.enabled=true; -- FILE VARIATION: ORC, Vectorized, MapWork, Partitioned -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n42(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n42; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -31,51 +31,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_permute_select_n12(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_permute_select_n12 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_permute_select add columns(c int); +alter table part_add_int_permute_select_n12 add columns(c int); -insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333); +insert into table part_add_int_permute_select_n12 partition(part=1) VALUES (2, 2222, 'new', 3333); explain vectorization detail -select insert_num,part,a,b from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n12; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_permute_select; -select insert_num,part,a,b,c from part_add_int_permute_select; -select insert_num,part,c from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n12; +select insert_num,part,a,b,c from part_add_int_permute_select_n12; +select insert_num,part,c from part_add_int_permute_select_n12; -drop table part_add_int_permute_select; +drop table part_add_int_permute_select_n12; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_string_permute_select_n12(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_string_permute_select_n12 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_string_permute_select add columns(c int, d string); +alter table part_add_int_string_permute_select_n12 add columns(c int, d string); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); +insert into table part_add_int_string_permute_select_n12 partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); explain vectorization detail -select insert_num,part,a,b from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n12; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_string_permute_select; -select insert_num,part,a,b,c from part_add_int_string_permute_select; -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; -select insert_num,part,a,c,d from part_add_int_string_permute_select; -select insert_num,part,a,d from part_add_int_string_permute_select; -select insert_num,part,c from part_add_int_string_permute_select; -select insert_num,part,d from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n12; +select insert_num,part,a,b,c from part_add_int_string_permute_select_n12; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n12; +select insert_num,part,a,c,d from part_add_int_string_permute_select_n12; +select insert_num,part,a,d from part_add_int_string_permute_select_n12; +select insert_num,part,c from part_add_int_string_permute_select_n12; +select insert_num,part,d from part_add_int_string_permute_select_n12; -drop table part_add_int_string_permute_select; +drop table part_add_int_string_permute_select_n12; @@ -86,21 +86,21 @@ drop table part_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_string_group_double_n12(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table part_change_string_group_double_n12 partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n42; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table part_change_string_group_double_n12 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111; +insert into table part_change_string_group_double_n12 partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n42 WHERE insert_num = 111; explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n12; -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n12; -drop table part_change_string_group_double; +drop table part_change_string_group_double_n12; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -109,21 +109,21 @@ drop table part_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_date_group_string_group_date_timestamp_n12(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_date_group_string_group_date_timestamp_n12 partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n42; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table part_change_date_group_string_group_date_timestamp_n12 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table part_change_date_group_string_group_date_timestamp_n12 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n12; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n12; -drop table part_change_date_group_string_group_date_timestamp; +drop table part_change_date_group_string_group_date_timestamp_n12; @@ -139,39 +139,39 @@ drop table part_change_date_group_string_group_date_timestamp; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group_n12(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n12 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n42; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n12; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_multi_ints_string_group_n12 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n12 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n12; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n12; -drop table part_change_numeric_group_string_group_multi_ints_string_group; +drop table part_change_numeric_group_string_group_multi_ints_string_group_n12; @@ -182,39 +182,39 @@ drop table part_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_floating_string_group_n12(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_floating_string_group_n12 partition(part=1) SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n42; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n12; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_floating_string_group_n12 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_floating_string_group_n12 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n12; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n12; -drop table part_change_numeric_group_string_group_floating_string_group; +drop table part_change_numeric_group_string_group_floating_string_group_n12; @@ -226,37 +226,37 @@ drop table part_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE part_change_string_group_string_group_string(insert_num int, +CREATE TABLE part_change_string_group_string_group_string_n12(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, +insert into table part_change_string_group_string_group_string_n12 partition(part=1) SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n42; -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n12; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_string_group_string replace columns (insert_num int, +alter table part_change_string_group_string_group_string_n12 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, +insert into table part_change_string_group_string_group_string_n12 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n12; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n12; -drop table part_change_string_group_string_group_string; +drop table part_change_string_group_string_group_string_n12; ------------------------------------------------------------------------------------------ @@ -270,31 +270,31 @@ drop table part_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12 partition(part=1) SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n42; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12 partition(part=1) VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, @@ -302,11 +302,11 @@ insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint pa 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12; -drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n12; @@ -315,26 +315,26 @@ drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float_n12(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n12 partition(part=1) SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n42; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n12; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table part_change_lower_to_higher_numeric_group_decimal_to_float_n12 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n12 partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n12; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n12; -drop table part_change_lower_to_higher_numeric_group_decimal_to_float; \ No newline at end of file +drop table part_change_lower_to_higher_numeric_group_decimal_to_float_n12; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_table.q index 617c76e48e..cf89ab195c 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_table.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_table.q @@ -16,9 +16,9 @@ set hive.llap.io.enabled=false; -- FILE VARIATION: ORC, Vectorized, MapWork, Table -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n13(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n13; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -27,51 +27,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_permute_select_n3(insert_num int, a INT, b STRING); -insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_permute_select_n3 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n13; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_permute_select add columns(c int); +alter table table_add_int_permute_select_n3 add columns(c int); -insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000); +insert into table table_add_int_permute_select_n3 VALUES (111, 80000, 'new', 80000); explain vectorization detail -select insert_num,a,b from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n3; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_permute_select; -select insert_num,a,b,c from table_add_int_permute_select; -select insert_num,c from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n3; +select insert_num,a,b,c from table_add_int_permute_select_n3; +select insert_num,c from table_add_int_permute_select_n3; -drop table table_add_int_permute_select; +drop table table_add_int_permute_select_n3; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_string_permute_select_n3(insert_num int, a INT, b STRING); -insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_string_permute_select_n3 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n13; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_string_permute_select add columns(c int, d string); +alter table table_add_int_string_permute_select_n3 add columns(c int, d string); -insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler'); +insert into table table_add_int_string_permute_select_n3 VALUES (111, 80000, 'new', 80000, 'filler'); explain vectorization detail -select insert_num,a,b from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n3; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_string_permute_select; -select insert_num,a,b,c from table_add_int_string_permute_select; -select insert_num,a,b,c,d from table_add_int_string_permute_select; -select insert_num,a,c,d from table_add_int_string_permute_select; -select insert_num,a,d from table_add_int_string_permute_select; -select insert_num,c from table_add_int_string_permute_select; -select insert_num,d from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n3; +select insert_num,a,b,c from table_add_int_string_permute_select_n3; +select insert_num,a,b,c,d from table_add_int_string_permute_select_n3; +select insert_num,a,c,d from table_add_int_string_permute_select_n3; +select insert_num,a,d from table_add_int_string_permute_select_n3; +select insert_num,c from table_add_int_string_permute_select_n3; +select insert_num,d from table_add_int_string_permute_select_n3; -drop table table_add_int_string_permute_select; +drop table table_add_int_string_permute_select_n3; @@ -82,21 +82,21 @@ drop table table_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE table_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); +CREATE TABLE table_change_string_group_double_n3(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); -insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table table_change_string_group_double_n3 SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n13; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table table_change_string_group_double_n3 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new'); +insert into table table_change_string_group_double_n3 VALUES (111, 789.321, 789.321, 789.321, 'new'); explain vectorization detail -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n3; -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n3; -drop table table_change_string_group_double; +drop table table_change_string_group_double_n3; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -105,18 +105,18 @@ drop table table_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE table_change_date_group_string_group_date_group(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); +CREATE TABLE table_change_date_group_string_group_date_group_n3(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); -insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table table_change_date_group_string_group_date_group_n3 SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n13; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_date_group_string_group_date_group replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table table_change_date_group_string_group_date_group_n3 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table table_change_date_group_string_group_date_group VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table table_change_date_group_string_group_date_group_n3 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n3; -drop table table_change_date_group_string_group_date_group; +drop table table_change_date_group_string_group_date_group_n3; @@ -131,39 +131,39 @@ drop table table_change_date_group_string_group_date_group; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group_n3(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING); -insert into table table_change_numeric_group_string_group_multi_ints_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n3 SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n13; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_multi_ints_string_group_n3 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table table_change_numeric_group_string_group_multi_ints_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n3 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n3; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n3; -drop table table_change_numeric_group_string_group_multi_ints_string_group; +drop table table_change_numeric_group_string_group_multi_ints_string_group_n3; @@ -174,39 +174,39 @@ drop table table_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n3(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n3 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n13; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_floating_string_group_n3 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_floating_string_group_n3 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n3; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n3; -drop table table_change_numeric_group_string_group_floating_string_group; +drop table table_change_numeric_group_string_group_floating_string_group_n3; ------------------------------------------------------------------------------------------ @@ -217,34 +217,34 @@ drop table table_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE table_change_string_group_string_group_string(insert_num int, +CREATE TABLE table_change_string_group_string_group_string_n3(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING); -insert into table table_change_string_group_string_group_string SELECT insert_num, +insert into table table_change_string_group_string_group_string_n3 SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n13; -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_string_group_string replace columns (insert_num int, +alter table table_change_string_group_string_group_string_n3 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table table_change_string_group_string_group_string VALUES (111, +insert into table table_change_string_group_string_group_string_n3 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string_n3; -drop table table_change_string_group_string_group_string; +drop table table_change_string_group_string_group_string_n3; @@ -259,40 +259,40 @@ drop table table_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING); -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3 SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n13; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint VALUES (111, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3 VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, 1234.5678, 9876.543, 789.321, 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3; -drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n3; @@ -301,23 +301,23 @@ drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float_n3(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING); -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n3 SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n13; -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table table_change_lower_to_higher_numeric_group_decimal_to_float_n3 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n3 VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n3; -drop table table_change_lower_to_higher_numeric_group_decimal_to_float; +drop table table_change_lower_to_higher_numeric_group_decimal_to_float_n3; diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_table_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_table_llap_io.q index ac206f0716..230d57c761 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_table_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_table_llap_io.q @@ -17,9 +17,9 @@ set hive.llap.io.encode.enabled=true; -- FILE VARIATION: ORC, Vectorized, MapWork, Table -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n16(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n16; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -28,51 +28,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_permute_select_n5(insert_num int, a INT, b STRING); -insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_permute_select_n5 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n16; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_permute_select add columns(c int); +alter table table_add_int_permute_select_n5 add columns(c int); -insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000); +insert into table table_add_int_permute_select_n5 VALUES (111, 80000, 'new', 80000); explain vectorization detail -select insert_num,a,b from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n5; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_permute_select; -select insert_num,a,b,c from table_add_int_permute_select; -select insert_num,c from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n5; +select insert_num,a,b,c from table_add_int_permute_select_n5; +select insert_num,c from table_add_int_permute_select_n5; -drop table table_add_int_permute_select; +drop table table_add_int_permute_select_n5; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_string_permute_select_n5(insert_num int, a INT, b STRING); -insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_string_permute_select_n5 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n16; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_string_permute_select add columns(c int, d string); +alter table table_add_int_string_permute_select_n5 add columns(c int, d string); -insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler'); +insert into table table_add_int_string_permute_select_n5 VALUES (111, 80000, 'new', 80000, 'filler'); explain vectorization detail -select insert_num,a,b from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n5; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_string_permute_select; -select insert_num,a,b,c from table_add_int_string_permute_select; -select insert_num,a,b,c,d from table_add_int_string_permute_select; -select insert_num,a,c,d from table_add_int_string_permute_select; -select insert_num,a,d from table_add_int_string_permute_select; -select insert_num,c from table_add_int_string_permute_select; -select insert_num,d from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n5; +select insert_num,a,b,c from table_add_int_string_permute_select_n5; +select insert_num,a,b,c,d from table_add_int_string_permute_select_n5; +select insert_num,a,c,d from table_add_int_string_permute_select_n5; +select insert_num,a,d from table_add_int_string_permute_select_n5; +select insert_num,c from table_add_int_string_permute_select_n5; +select insert_num,d from table_add_int_string_permute_select_n5; -drop table table_add_int_string_permute_select; +drop table table_add_int_string_permute_select_n5; @@ -83,21 +83,21 @@ drop table table_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE table_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); +CREATE TABLE table_change_string_group_double_n5(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); -insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table table_change_string_group_double_n5 SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n16; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table table_change_string_group_double_n5 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new'); +insert into table table_change_string_group_double_n5 VALUES (111, 789.321, 789.321, 789.321, 'new'); explain vectorization detail -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n5; -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n5; -drop table table_change_string_group_double; +drop table table_change_string_group_double_n5; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -106,18 +106,18 @@ drop table table_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE table_change_date_group_string_group_date_group(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); +CREATE TABLE table_change_date_group_string_group_date_group_n5(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); -insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table table_change_date_group_string_group_date_group_n5 SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n16; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_date_group_string_group_date_group replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table table_change_date_group_string_group_date_group_n5 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table table_change_date_group_string_group_date_group VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table table_change_date_group_string_group_date_group_n5 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n5; -drop table table_change_date_group_string_group_date_group; +drop table table_change_date_group_string_group_date_group_n5; @@ -132,39 +132,39 @@ drop table table_change_date_group_string_group_date_group; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group_n5(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING); -insert into table table_change_numeric_group_string_group_multi_ints_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n5 SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n16; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_multi_ints_string_group_n5 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table table_change_numeric_group_string_group_multi_ints_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n5 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n5; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n5; -drop table table_change_numeric_group_string_group_multi_ints_string_group; +drop table table_change_numeric_group_string_group_multi_ints_string_group_n5; @@ -175,39 +175,39 @@ drop table table_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n5(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n5 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n16; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_floating_string_group_n5 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_floating_string_group_n5 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n5; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n5; -drop table table_change_numeric_group_string_group_floating_string_group; +drop table table_change_numeric_group_string_group_floating_string_group_n5; ------------------------------------------------------------------------------------------ @@ -218,34 +218,34 @@ drop table table_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE table_change_string_group_string_group_string(insert_num int, +CREATE TABLE table_change_string_group_string_group_string_n5(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING); -insert into table table_change_string_group_string_group_string SELECT insert_num, +insert into table table_change_string_group_string_group_string_n5 SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n16; -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_string_group_string replace columns (insert_num int, +alter table table_change_string_group_string_group_string_n5 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table table_change_string_group_string_group_string VALUES (111, +insert into table table_change_string_group_string_group_string_n5 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string_n5; -drop table table_change_string_group_string_group_string; +drop table table_change_string_group_string_group_string_n5; @@ -260,40 +260,40 @@ drop table table_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING); -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5 SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n16; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint VALUES (111, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5 VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, 1234.5678, 9876.543, 789.321, 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5; -drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5; @@ -302,23 +302,23 @@ drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float_n5(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING); -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n5 SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n16; -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n5; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table table_change_lower_to_higher_numeric_group_decimal_to_float_n5 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n5 VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n5; -drop table table_change_lower_to_higher_numeric_group_decimal_to_float; +drop table table_change_lower_to_higher_numeric_group_decimal_to_float_n5; diff --git a/ql/src/test/queries/clientpositive/schema_evol_par_vec_table_non_dictionary_encoding.q b/ql/src/test/queries/clientpositive/schema_evol_par_vec_table_non_dictionary_encoding.q index 22a455c8bc..a8a16d2966 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_par_vec_table_non_dictionary_encoding.q +++ b/ql/src/test/queries/clientpositive/schema_evol_par_vec_table_non_dictionary_encoding.q @@ -4,93 +4,93 @@ set parquet.enable.dictionary=false; -- SORT_QUERY_RESULTS -drop table test_alter; -drop table test_alter2; -drop table test_alter3; +drop table test_alter_n0; +drop table test_alter2_n0; +drop table test_alter3_n0; -create table test_alter (id string) stored as parquet; -insert into test_alter values ('1'), ('2'), ('3'); -select * from test_alter; +create table test_alter_n0 (id string) stored as parquet; +insert into test_alter_n0 values ('1'), ('2'), ('3'); +select * from test_alter_n0; -- add new column -> empty col values should return NULL -alter table test_alter add columns (newCol string); -select * from test_alter; +alter table test_alter_n0 add columns (newCol string); +select * from test_alter_n0; -- insert data into new column -> New data should be returned -insert into test_alter values ('4', '100'); -select * from test_alter; +insert into test_alter_n0 values ('4', '100'); +select * from test_alter_n0; -- remove the newly added column -- this works in vectorized execution -alter table test_alter replace columns (id string); -select * from test_alter; +alter table test_alter_n0 replace columns (id string); +select * from test_alter_n0; -- add column using replace column syntax -alter table test_alter replace columns (id string, id2 string); +alter table test_alter_n0 replace columns (id string, id2 string); -- this surprisingly doesn't return the 100 added to 4th row above -select * from test_alter; -insert into test_alter values ('5', '100'); -select * from test_alter; +select * from test_alter_n0; +insert into test_alter_n0 values ('5', '100'); +select * from test_alter_n0; -- use the same column name and datatype -alter table test_alter replace columns (id string, id2 string); -select * from test_alter; +alter table test_alter_n0 replace columns (id string, id2 string); +select * from test_alter_n0; -- change string to char -alter table test_alter replace columns (id char(10), id2 string); -select * from test_alter; +alter table test_alter_n0 replace columns (id char(10), id2 string); +select * from test_alter_n0; -- change string to varchar -alter table test_alter replace columns (id string, id2 string); -alter table test_alter replace columns (id varchar(10), id2 string); -select * from test_alter; +alter table test_alter_n0 replace columns (id string, id2 string); +alter table test_alter_n0 replace columns (id varchar(10), id2 string); +select * from test_alter_n0; -- change columntype and column name -alter table test_alter replace columns (id string, id2 string); -alter table test_alter replace columns (idv varchar(10), id2 string); -select * from test_alter; +alter table test_alter_n0 replace columns (id string, id2 string); +alter table test_alter_n0 replace columns (idv varchar(10), id2 string); +select * from test_alter_n0; -- test int to long type conversion -create table test_alter2 (id int) stored as parquet; -insert into test_alter2 values (1); -alter table test_alter2 replace columns (id bigint); -select * from test_alter2; +create table test_alter2_n0 (id int) stored as parquet; +insert into test_alter2_n0 values (1); +alter table test_alter2_n0 replace columns (id bigint); +select * from test_alter2_n0; -- test float to double type conversion -drop table test_alter2; -create table test_alter2 (id float) stored as parquet; -insert into test_alter2 values (1.5); -alter table test_alter2 replace columns (id double); -select * from test_alter2; - -drop table test_alter2; -create table test_alter2 (ts timestamp) stored as parquet; -insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456'); -select * from test_alter2; -alter table test_alter2 replace columns (ts string); -select * from test_alter2; - -drop table test_alter2; -create table test_alter2 (ts timestamp) stored as parquet; -insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456'); -select * from test_alter2; -alter table test_alter2 replace columns (ts varchar(19)); +drop table test_alter2_n0; +create table test_alter2_n0 (id float) stored as parquet; +insert into test_alter2_n0 values (1.5); +alter table test_alter2_n0 replace columns (id double); +select * from test_alter2_n0; + +drop table test_alter2_n0; +create table test_alter2_n0 (ts timestamp) stored as parquet; +insert into test_alter2_n0 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456'); +select * from test_alter2_n0; +alter table test_alter2_n0 replace columns (ts string); +select * from test_alter2_n0; + +drop table test_alter2_n0; +create table test_alter2_n0 (ts timestamp) stored as parquet; +insert into test_alter2_n0 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456'); +select * from test_alter2_n0; +alter table test_alter2_n0 replace columns (ts varchar(19)); -- this should truncate the microseconds -select * from test_alter2; +select * from test_alter2_n0; -drop table test_alter2; -create table test_alter2 (ts timestamp) stored as parquet; -insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456'); -select * from test_alter2; -alter table test_alter2 replace columns (ts char(25)); -select * from test_alter2; +drop table test_alter2_n0; +create table test_alter2_n0 (ts timestamp) stored as parquet; +insert into test_alter2_n0 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456'); +select * from test_alter2_n0; +alter table test_alter2_n0 replace columns (ts char(25)); +select * from test_alter2_n0; -- test integer types upconversion -create table test_alter3 (id1 tinyint, id2 smallint, id3 int, id4 bigint) stored as parquet; -insert into test_alter3 values (10, 20, 30, 40); -alter table test_alter3 replace columns (id1 smallint, id2 int, id3 bigint, id4 decimal(10,4)); +create table test_alter3_n0 (id1 tinyint, id2 smallint, id3 int, id4 bigint) stored as parquet; +insert into test_alter3_n0 values (10, 20, 30, 40); +alter table test_alter3_n0 replace columns (id1 smallint, id2 int, id3 bigint, id4 decimal(10,4)); -- this fails mostly due to bigint to decimal --- select * from test_alter3; -select id1, id2, id3 from test_alter3; +-- select * from test_alter3_n0; +select id1, id2, id3 from test_alter3_n0; diff --git a/ql/src/test/queries/clientpositive/schema_evol_stats.q b/ql/src/test/queries/clientpositive/schema_evol_stats.q index 6a5688a216..d608267e15 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_stats.q +++ b/ql/src/test/queries/clientpositive/schema_evol_stats.q @@ -3,50 +3,50 @@ set hive.mapred.mode=nonstrict; SET hive.exec.schema.evolution=true; set hive.llap.io.enabled=false; -CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE; +CREATE TABLE partitioned1_n0(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE; -insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original'); +insert into table partitioned1_n0 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original'); -- Table-Non-Cascade ADD COLUMNS ... -alter table partitioned1 add columns(c int, d string); +alter table partitioned1_n0 add columns(c int, d string); -insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', NULL, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty'); +insert into table partitioned1_n0 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', NULL, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty'); -analyze table partitioned1 compute statistics for columns; +analyze table partitioned1_n0 compute statistics for columns; -desc formatted partitioned1; +desc formatted partitioned1_n0; -desc formatted partitioned1 PARTITION(part=1); +desc formatted partitioned1_n0 PARTITION(part=1); -desc formatted partitioned1 PARTITION(part=2); +desc formatted partitioned1_n0 PARTITION(part=2); set hive.compute.query.using.stats=true; -explain select count(c) from partitioned1; +explain select count(c) from partitioned1_n0; -select count(c) from partitioned1; +select count(c) from partitioned1_n0; -drop table partitioned1; +drop table partitioned1_n0; -CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS ORC; +CREATE TABLE partitioned1_n0(a INT, b STRING) PARTITIONED BY(part INT) STORED AS ORC; -insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original'); +insert into table partitioned1_n0 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original'); -- Table-Non-Cascade ADD COLUMNS ... -alter table partitioned1 add columns(c int, d string); +alter table partitioned1_n0 add columns(c int, d string); -insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', NULL, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty'); +insert into table partitioned1_n0 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', NULL, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty'); -analyze table partitioned1 compute statistics for columns; +analyze table partitioned1_n0 compute statistics for columns; -desc formatted partitioned1; +desc formatted partitioned1_n0; -desc formatted partitioned1 PARTITION(part=1); +desc formatted partitioned1_n0 PARTITION(part=1); -desc formatted partitioned1 PARTITION(part=2); +desc formatted partitioned1_n0 PARTITION(part=2); set hive.compute.query.using.stats=true; -explain select count(c) from partitioned1; +explain select count(c) from partitioned1_n0; -select count(c) from partitioned1; +select count(c) from partitioned1_n0; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part.q b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part.q index 159f326de3..dbc3a3318e 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part.q @@ -18,9 +18,9 @@ set hive.llap.io.enabled=false; -- FILE VARIATION: TEXTFILE, Non-Vectorized, MapWork, Partitioned -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n27(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n27; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -29,51 +29,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_permute_select_n8(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_permute_select_n8 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_permute_select add columns(c int); +alter table part_add_int_permute_select_n8 add columns(c int); -insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333); +insert into table part_add_int_permute_select_n8 partition(part=1) VALUES (2, 2222, 'new', 3333); explain vectorization detail -select insert_num,part,a,b from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n8; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_permute_select; -select insert_num,part,a,b,c from part_add_int_permute_select; -select insert_num,part,c from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n8; +select insert_num,part,a,b,c from part_add_int_permute_select_n8; +select insert_num,part,c from part_add_int_permute_select_n8; -drop table part_add_int_permute_select; +drop table part_add_int_permute_select_n8; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_string_permute_select_n8(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_string_permute_select_n8 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_string_permute_select add columns(c int, d string); +alter table part_add_int_string_permute_select_n8 add columns(c int, d string); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); +insert into table part_add_int_string_permute_select_n8 partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); explain vectorization detail -select insert_num,part,a,b from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n8; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_string_permute_select; -select insert_num,part,a,b,c from part_add_int_string_permute_select; -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; -select insert_num,part,a,c,d from part_add_int_string_permute_select; -select insert_num,part,a,d from part_add_int_string_permute_select; -select insert_num,part,c from part_add_int_string_permute_select; -select insert_num,part,d from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n8; +select insert_num,part,a,b,c from part_add_int_string_permute_select_n8; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n8; +select insert_num,part,a,c,d from part_add_int_string_permute_select_n8; +select insert_num,part,a,d from part_add_int_string_permute_select_n8; +select insert_num,part,c from part_add_int_string_permute_select_n8; +select insert_num,part,d from part_add_int_string_permute_select_n8; -drop table part_add_int_string_permute_select; +drop table part_add_int_string_permute_select_n8; @@ -84,21 +84,21 @@ drop table part_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_string_group_double_n8(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table part_change_string_group_double_n8 partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n27; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table part_change_string_group_double_n8 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111; +insert into table part_change_string_group_double_n8 partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n27 WHERE insert_num = 111; explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n8; -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n8; -drop table part_change_string_group_double; +drop table part_change_string_group_double_n8; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -107,21 +107,21 @@ drop table part_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_date_group_string_group_date_timestamp_n8(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_date_group_string_group_date_timestamp_n8 partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n27; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table part_change_date_group_string_group_date_timestamp_n8 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table part_change_date_group_string_group_date_timestamp_n8 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n8; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n8; -drop table part_change_date_group_string_group_date_timestamp; +drop table part_change_date_group_string_group_date_timestamp_n8; @@ -137,39 +137,39 @@ drop table part_change_date_group_string_group_date_timestamp; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group_n8(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n8 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n27; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_multi_ints_string_group_n8 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n8 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n8; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n8; -drop table part_change_numeric_group_string_group_multi_ints_string_group; +drop table part_change_numeric_group_string_group_multi_ints_string_group_n8; @@ -180,39 +180,39 @@ drop table part_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_floating_string_group_n8(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_floating_string_group_n8 partition(part=1) SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n27; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_floating_string_group_n8 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_floating_string_group_n8 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n8; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n8; -drop table part_change_numeric_group_string_group_floating_string_group; +drop table part_change_numeric_group_string_group_floating_string_group_n8; @@ -224,37 +224,37 @@ drop table part_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE part_change_string_group_string_group_string(insert_num int, +CREATE TABLE part_change_string_group_string_group_string_n8(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, +insert into table part_change_string_group_string_group_string_n8 partition(part=1) SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n27; -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_string_group_string replace columns (insert_num int, +alter table part_change_string_group_string_group_string_n8 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, +insert into table part_change_string_group_string_group_string_n8 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n8; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n8; -drop table part_change_string_group_string_group_string; +drop table part_change_string_group_string_group_string_n8; ------------------------------------------------------------------------------------------ @@ -268,31 +268,31 @@ drop table part_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8 partition(part=1) SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n27; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8 partition(part=1) VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, @@ -300,11 +300,11 @@ insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint pa 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8; -drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8; @@ -313,26 +313,26 @@ drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float_n8(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n8 partition(part=1) SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n27; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table part_change_lower_to_higher_numeric_group_decimal_to_float_n8 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n8 partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n8; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n8; -drop table part_change_lower_to_higher_numeric_group_decimal_to_float; \ No newline at end of file +drop table part_change_lower_to_higher_numeric_group_decimal_to_float_n8; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_complex.q b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_complex.q index c444246d3a..1323c250e0 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_complex.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_complex.q @@ -21,103 +21,103 @@ set hive.llap.io.enabled=false; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: STRUCT --> STRUCT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_struct1_n3(insert_num int, s1 STRUCT, b STRING) PARTITIONED BY(part INT); -CREATE TABLE complex_struct1_a_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_a_txt_n3(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt_n3; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_a_txt; +insert into table part_change_various_various_struct1_n3 partition(part=1) select * from complex_struct1_a_txt_n3; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_struct1 replace columns (insert_num int, s1 STRUCT, b STRING); +alter table part_change_various_various_struct1_n3 replace columns (insert_num int, s1 STRUCT, b STRING); -CREATE TABLE complex_struct1_b_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_b_txt_n3(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt_n3; -insert into table part_change_various_various_struct1 partition(part=2) select * from complex_struct1_b_txt; +insert into table part_change_various_various_struct1_n3 partition(part=2) select * from complex_struct1_b_txt_n3; -CREATE TABLE complex_struct1_c_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_c_txt_n3(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt_n3; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_c_txt; +insert into table part_change_various_various_struct1_n3 partition(part=1) select * from complex_struct1_c_txt_n3; explain vectorization detail -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n3; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n3; -drop table part_change_various_various_struct1; +drop table part_change_various_various_struct1_n3; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: STRUCT -- -CREATE TABLE part_add_various_various_struct2(insert_num int, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_various_various_struct2_n3(insert_num int, b STRING) PARTITIONED BY(part INT); -insert into table part_add_various_various_struct2 partition(part=1) +insert into table part_add_various_various_struct2_n3 partition(part=1) values(1, 'original'), (2, 'original'); -select insert_num,part,b from part_add_various_various_struct2; +select insert_num,part,b from part_add_various_various_struct2_n3; -- Table-Non-Cascade ADD COLUMN ... -alter table part_add_various_various_struct2 ADD columns (s2 STRUCT); +alter table part_add_various_various_struct2_n3 ADD columns (s2 STRUCT); -CREATE TABLE complex_struct2_a_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_a_txt_n3(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt_n3; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_a_txt; +insert into table part_add_various_various_struct2_n3 partition(part=1) select * from complex_struct2_a_txt_n3; -CREATE TABLE complex_struct2_b_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_b_txt_n3(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt_n3; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_b_txt; +insert into table part_add_various_various_struct2_n3 partition(part=2) select * from complex_struct2_b_txt_n3; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_various_various_struct2 REPLACE columns (insert_num int, b STRING, s2 STRUCT); +alter table part_add_various_various_struct2_n3 REPLACE columns (insert_num int, b STRING, s2 STRUCT); -CREATE TABLE complex_struct2_c_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_c_txt_n3(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt_n3; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_c_txt; +insert into table part_add_various_various_struct2_n3 partition(part=2) select * from complex_struct2_c_txt_n3; -CREATE TABLE complex_struct2_d_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_d_txt_n3(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt_n3; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_d_txt; +insert into table part_add_various_various_struct2_n3 partition(part=1) select * from complex_struct2_d_txt_n3; explain vectorization detail -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n3; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n3; -drop table part_add_various_various_struct2; +drop table part_add_various_various_struct2_n3; @@ -125,40 +125,40 @@ drop table part_add_various_various_struct2; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: ADD COLUMNS to STRUCT type as LAST column of 3 columns -- -CREATE TABLE part_add_to_various_various_struct4(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); +CREATE TABLE part_add_to_various_various_struct4_n3(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); -CREATE TABLE complex_struct4_a_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_a_txt_n3(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt_n3; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_a_txt; +insert into table part_add_to_various_various_struct4_n3 partition(part=1) select * from complex_struct4_a_txt_n3; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_to_various_various_struct4 replace columns (insert_num int, b STRING, s3 STRUCT); +alter table part_add_to_various_various_struct4_n3 replace columns (insert_num int, b STRING, s3 STRUCT); -CREATE TABLE complex_struct4_b_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_b_txt_n3(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt_n3; -insert into table part_add_to_various_various_struct4 partition(part=2) select * from complex_struct4_b_txt; +insert into table part_add_to_various_various_struct4_n3 partition(part=2) select * from complex_struct4_b_txt_n3; -CREATE TABLE complex_struct4_c_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_c_txt_n3(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt_n3; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_c_txt; +insert into table part_add_to_various_various_struct4_n3 partition(part=1) select * from complex_struct4_c_txt_n3; explain vectorization detail -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n3; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n3; -drop table part_add_to_various_various_struct4; +drop table part_add_to_various_various_struct4_n3; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_complex_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_complex_llap_io.q index 0bc9f83f30..338cb5d162 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_complex_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_complex_llap_io.q @@ -22,103 +22,103 @@ set hive.llap.io.encode.enabled=true; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: STRUCT --> STRUCT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_struct1_n1(insert_num int, s1 STRUCT, b STRING) PARTITIONED BY(part INT); -CREATE TABLE complex_struct1_a_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_a_txt_n1(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt_n1; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_a_txt; +insert into table part_change_various_various_struct1_n1 partition(part=1) select * from complex_struct1_a_txt_n1; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_struct1 replace columns (insert_num int, s1 STRUCT, b STRING); +alter table part_change_various_various_struct1_n1 replace columns (insert_num int, s1 STRUCT, b STRING); -CREATE TABLE complex_struct1_b_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_b_txt_n1(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt_n1; -insert into table part_change_various_various_struct1 partition(part=2) select * from complex_struct1_b_txt; +insert into table part_change_various_various_struct1_n1 partition(part=2) select * from complex_struct1_b_txt_n1; -CREATE TABLE complex_struct1_c_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_c_txt_n1(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt_n1; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_c_txt; +insert into table part_change_various_various_struct1_n1 partition(part=1) select * from complex_struct1_c_txt_n1; explain vectorization detail -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n1; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n1; -drop table part_change_various_various_struct1; +drop table part_change_various_various_struct1_n1; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: STRUCT -- -CREATE TABLE part_add_various_various_struct2(insert_num int, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_various_various_struct2_n1(insert_num int, b STRING) PARTITIONED BY(part INT); -insert into table part_add_various_various_struct2 partition(part=1) +insert into table part_add_various_various_struct2_n1 partition(part=1) values(1, 'original'), (2, 'original'); -select insert_num,part,b from part_add_various_various_struct2; +select insert_num,part,b from part_add_various_various_struct2_n1; -- Table-Non-Cascade ADD COLUMN ... -alter table part_add_various_various_struct2 ADD columns (s2 STRUCT); +alter table part_add_various_various_struct2_n1 ADD columns (s2 STRUCT); -CREATE TABLE complex_struct2_a_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_a_txt_n1(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt_n1; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_a_txt; +insert into table part_add_various_various_struct2_n1 partition(part=1) select * from complex_struct2_a_txt_n1; -CREATE TABLE complex_struct2_b_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_b_txt_n1(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt_n1; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_b_txt; +insert into table part_add_various_various_struct2_n1 partition(part=2) select * from complex_struct2_b_txt_n1; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_various_various_struct2 REPLACE columns (insert_num int, b STRING, s2 STRUCT); +alter table part_add_various_various_struct2_n1 REPLACE columns (insert_num int, b STRING, s2 STRUCT); -CREATE TABLE complex_struct2_c_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_c_txt_n1(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt_n1; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_c_txt; +insert into table part_add_various_various_struct2_n1 partition(part=2) select * from complex_struct2_c_txt_n1; -CREATE TABLE complex_struct2_d_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_d_txt_n1(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt_n1; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_d_txt; +insert into table part_add_various_various_struct2_n1 partition(part=1) select * from complex_struct2_d_txt_n1; explain vectorization detail -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n1; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n1; -drop table part_add_various_various_struct2; +drop table part_add_various_various_struct2_n1; @@ -126,40 +126,40 @@ drop table part_add_various_various_struct2; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: ADD COLUMNS to STRUCT type as LAST column of 3 columns -- -CREATE TABLE part_add_to_various_various_struct4(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); +CREATE TABLE part_add_to_various_various_struct4_n1(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); -CREATE TABLE complex_struct4_a_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_a_txt_n1(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt_n1; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_a_txt; +insert into table part_add_to_various_various_struct4_n1 partition(part=1) select * from complex_struct4_a_txt_n1; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_to_various_various_struct4 replace columns (insert_num int, b STRING, s3 STRUCT); +alter table part_add_to_various_various_struct4_n1 replace columns (insert_num int, b STRING, s3 STRUCT); -CREATE TABLE complex_struct4_b_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_b_txt_n1(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt_n1; -insert into table part_add_to_various_various_struct4 partition(part=2) select * from complex_struct4_b_txt; +insert into table part_add_to_various_various_struct4_n1 partition(part=2) select * from complex_struct4_b_txt_n1; -CREATE TABLE complex_struct4_c_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_c_txt_n1(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt_n1; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_c_txt; +insert into table part_add_to_various_various_struct4_n1 partition(part=1) select * from complex_struct4_c_txt_n1; explain vectorization detail -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n1; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n1; -drop table part_add_to_various_various_struct4; +drop table part_add_to_various_various_struct4_n1; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_primitive.q b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_primitive.q index 5d91055459..bb7d37e42e 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_primitive.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_primitive.q @@ -22,13 +22,13 @@ set hive.llap.io.enabled=false; -- -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n26(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n26; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n7(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n7; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: @@ -38,7 +38,7 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data_ -- (BOOLEAN, TINYINT, SMALLINT, LONG, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> INT –2147483648 to 2147483647 and -- (BOOLEAN, TINYINT, SMALLINT, INT, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> BIGINT -9223372036854775808 to 9223372036854775807 -- -CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, +CREATE TABLE part_change_various_various_boolean_to_bigint_n2(insert_num int, c1 TINYINT, c2 SMALLINT, c3 INT, c4 BIGINT, c5 FLOAT, c6 DOUBLE, c7 DECIMAL(38,18), c8 STRING, c9 TIMESTAMP, c10 BOOLEAN, c11 SMALLINT, c12 INT, c13 BIGINT, c14 FLOAT, c15 DOUBLE, c16 DECIMAL(38,18), c17 STRING, c18 CHAR(25), c19 VARCHAR(25), c20 TIMESTAMP, c21 BOOLEAN, c22 TINYINT, c23 INT, c24 BIGINT, c25 FLOAT, c26 DOUBLE, c27 DECIMAL(38,18), c28 STRING, c29 CHAR(25), c30 VARCHAR(25), c31 TIMESTAMP, @@ -46,18 +46,18 @@ CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, c43 BOOLEAN, c44 TINYINT, c45 SMALLINT, c46 INT, c47 FLOAT, c48 DOUBLE, c49 DECIMAL(38,18), c50 STRING, c51 CHAR(25), c52 VARCHAR(25), c53 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n2 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, boolean_str, timestamp1, boolean1, smallint1, int1, bigint1, float1, double1, decimal1, tinyint_str, tinyint_str, tinyint_str, timestamp1, boolean1, tinyint1, int1, bigint1, float1, double1, decimal1, smallint_str, smallint_str, smallint_str, timestamp1, boolean1, tinyint1, smallint1, bigint1, float1, double1, decimal1, int_str, int_str, int_str, timestamp1, boolean1, tinyint1, smallint1, int1, float1, double1, decimal1, bigint_str, bigint_str, bigint_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n26; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_boolean_to_bigint replace columns (insert_num int, +alter table part_change_various_various_boolean_to_bigint_n2 replace columns (insert_num int, c1 BOOLEAN, c2 BOOLEAN, c3 BOOLEAN, c4 BOOLEAN, c5 BOOLEAN, c6 BOOLEAN, c7 BOOLEAN, c8 BOOLEAN, c9 BOOLEAN, c10 TINYINT, c11 TINYINT, c12 TINYINT, c13 TINYINT, c14 TINYINT, c15 TINYINT, c16 TINYINT, c17 TINYINT, c18 TINYINT, c19 TINYINT, c20 TINYINT, c21 SMALLINT, c22 SMALLINT, c23 SMALLINT, c24 SMALLINT, c25 SMALLINT, c26 SMALLINT, c27 SMALLINT, c28 SMALLINT, c29 SMALLINT, c30 SMALLINT, c31 SMALLINT, @@ -65,20 +65,20 @@ alter table part_change_various_various_boolean_to_bigint replace columns (inser c43 BIGINT, c44 BIGINT, c45 BIGINT, c46 BIGINT, c47 BIGINT, c48 BIGINT, c49 BIGINT, c50 BIGINT, c51 BIGINT, c52 BIGINT, c53 BIGINT, b STRING); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n2 partition(part=1) SELECT insert_num, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, - 'new' FROM schema_evolution_data; + 'new' FROM schema_evolution_data_n26; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n2; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n2; -drop table part_change_various_various_boolean_to_bigint; +drop table part_change_various_various_boolean_to_bigint_n2; @@ -88,39 +88,39 @@ drop table part_change_various_various_boolean_to_bigint; -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> FLOAT and -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> DOUBLE and -- -CREATE TABLE part_change_various_various_decimal_to_double(insert_num int, +CREATE TABLE part_change_various_various_decimal_to_double_n2(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 STRING, c9 CHAR(25), c10 VARCHAR(25), c11 TIMESTAMP, c12 BOOLEAN, c13 TINYINT, c14 SMALLINT, c15 INT, c16 BIGINT, c17 DECIMAL(38,18), c18 DOUBLE, c19 STRING, c20 CHAR(25), c21 VARCHAR(25), c22 TIMESTAMP, c23 BOOLEAN, c24 TINYINT, c25 SMALLINT, c26 INT, c27 BIGINT, c28 DECIMAL(38,18), c29 FLOAT, c30 STRING, c31 CHAR(25), c32 VARCHAR(25), c33 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n2 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal_str, decimal_str, decimal_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, double1, float_str, float_str, float_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, float1, double_str, double_str, double_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n26; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_decimal_to_double replace columns (insert_num int, +alter table part_change_various_various_decimal_to_double_n2 replace columns (insert_num int, c1 DECIMAL(38,18), c2 DECIMAL(38,18), c3 DECIMAL(38,18), c4 DECIMAL(38,18), c5 DECIMAL(38,18), c6 DECIMAL(38,18), c7 DECIMAL(38,18), c8 DECIMAL(38,18), c9 DECIMAL(38,18), c10 DECIMAL(38,18), c11 DECIMAL(38,18), c12 FLOAT, c13 FLOAT, c14 FLOAT, c15 FLOAT, c16 FLOAT, c17 FLOAT, c18 FLOAT, c19 FLOAT, c20 FLOAT, c21 FLOAT, c22 FLOAT, c23 DOUBLE, c24 DOUBLE, c25 DOUBLE, c26 DOUBLE, c27 DOUBLE, c28 DOUBLE, c29 DOUBLE, c30 DOUBLE, c31 DOUBLE, c32 DOUBLE, c33 DOUBLE, b STRING); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n2 partition(part=1) SELECT insert_num, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, - 'new' FROM schema_evolution_data_2 WHERE insert_num=111; + 'new' FROM schema_evolution_data_2_n7 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n2; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n2; -drop table part_change_various_various_decimal_to_double; +drop table part_change_various_various_decimal_to_double_n2; @@ -128,80 +128,80 @@ drop table part_change_various_various_decimal_to_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DOUBLE, DECIMAL(38,18), STRING, CHAR, VARCHAR, DATE) --> TIMESTAMP -- -CREATE TABLE part_change_various_various_timestamp(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_timestamp_n2(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_timestamp_n2 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data_n26; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_timestamp replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); +alter table part_change_various_various_timestamp_n2 replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_timestamp_n2 partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2_n7 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n2; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n2; -drop table part_change_various_various_timestamp; +drop table part_change_various_various_timestamp_n2; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (STRING, CHAR, VARCHAR, TIMESTAMP --> DATE -- -CREATE TABLE part_change_various_various_date(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_date_n2(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_date_n2 partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data_n26; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_date replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); +alter table part_change_various_various_date_n2 replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_date_n2 partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2_n7 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n2; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n2; -drop table part_change_various_various_date; +drop table part_change_various_various_date_n2; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Same Type (CHAR, VARCHAR, DECIMAL) --> Different maxLength or precision/scale -- -CREATE TABLE part_change_same_type_different_params(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_same_type_different_params_n2(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); -CREATE TABLE same_type1_a_txt(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) +CREATE TABLE same_type1_a_txt_n2(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt_n2; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_a_txt; +insert into table part_change_same_type_different_params_n2 partition(part=1) select * from same_type1_a_txt_n2; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_same_type_different_params replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); +alter table part_change_same_type_different_params_n2 replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); -CREATE TABLE same_type1_b_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_b_txt_n2(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt_n2; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_b_txt; +insert into table part_change_same_type_different_params_n2 partition(part=1) select * from same_type1_b_txt_n2; -CREATE TABLE same_type1_c_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_c_txt_n2(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt_n2; -insert into table part_change_same_type_different_params partition(part=2) select * from same_type1_c_txt; +insert into table part_change_same_type_different_params_n2 partition(part=2) select * from same_type1_c_txt_n2; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n2; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n2; -drop table part_change_same_type_different_params; +drop table part_change_same_type_different_params_n2; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_primitive_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_primitive_llap_io.q index 6298c85396..487829b901 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_primitive_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_primitive_llap_io.q @@ -23,13 +23,13 @@ set hive.llap.io.encode.enabled=true; -- -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n8(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n8; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n2; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: @@ -39,7 +39,7 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data_ -- (BOOLEAN, TINYINT, SMALLINT, LONG, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> INT –2147483648 to 2147483647 and -- (BOOLEAN, TINYINT, SMALLINT, INT, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> BIGINT -9223372036854775808 to 9223372036854775807 -- -CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, +CREATE TABLE part_change_various_various_boolean_to_bigint_n1(insert_num int, c1 TINYINT, c2 SMALLINT, c3 INT, c4 BIGINT, c5 FLOAT, c6 DOUBLE, c7 DECIMAL(38,18), c8 STRING, c9 TIMESTAMP, c10 BOOLEAN, c11 SMALLINT, c12 INT, c13 BIGINT, c14 FLOAT, c15 DOUBLE, c16 DECIMAL(38,18), c17 STRING, c18 CHAR(25), c19 VARCHAR(25), c20 TIMESTAMP, c21 BOOLEAN, c22 TINYINT, c23 INT, c24 BIGINT, c25 FLOAT, c26 DOUBLE, c27 DECIMAL(38,18), c28 STRING, c29 CHAR(25), c30 VARCHAR(25), c31 TIMESTAMP, @@ -47,21 +47,21 @@ CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, c43 BOOLEAN, c44 TINYINT, c45 SMALLINT, c46 INT, c47 FLOAT, c48 DOUBLE, c49 DECIMAL(38,18), c50 STRING, c51 CHAR(25), c52 VARCHAR(25), c53 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n1 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, boolean_str, timestamp1, boolean1, smallint1, int1, bigint1, float1, double1, decimal1, tinyint_str, tinyint_str, tinyint_str, timestamp1, boolean1, tinyint1, int1, bigint1, float1, double1, decimal1, smallint_str, smallint_str, smallint_str, timestamp1, boolean1, tinyint1, smallint1, bigint1, float1, double1, decimal1, int_str, int_str, int_str, timestamp1, boolean1, tinyint1, smallint1, int1, float1, double1, decimal1, bigint_str, bigint_str, bigint_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n8; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n1; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_boolean_to_bigint replace columns (insert_num int, +alter table part_change_various_various_boolean_to_bigint_n1 replace columns (insert_num int, c1 BOOLEAN, c2 BOOLEAN, c3 BOOLEAN, c4 BOOLEAN, c5 BOOLEAN, c6 BOOLEAN, c7 BOOLEAN, c8 BOOLEAN, c9 BOOLEAN, c10 TINYINT, c11 TINYINT, c12 TINYINT, c13 TINYINT, c14 TINYINT, c15 TINYINT, c16 TINYINT, c17 TINYINT, c18 TINYINT, c19 TINYINT, c20 TINYINT, c21 SMALLINT, c22 SMALLINT, c23 SMALLINT, c24 SMALLINT, c25 SMALLINT, c26 SMALLINT, c27 SMALLINT, c28 SMALLINT, c29 SMALLINT, c30 SMALLINT, c31 SMALLINT, @@ -69,20 +69,20 @@ alter table part_change_various_various_boolean_to_bigint replace columns (inser c43 BIGINT, c44 BIGINT, c45 BIGINT, c46 BIGINT, c47 BIGINT, c48 BIGINT, c49 BIGINT, c50 BIGINT, c51 BIGINT, c52 BIGINT, c53 BIGINT, b STRING); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n1 partition(part=1) SELECT insert_num, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, - 'new' FROM schema_evolution_data; + 'new' FROM schema_evolution_data_n8; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n1; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n1; -drop table part_change_various_various_boolean_to_bigint; +drop table part_change_various_various_boolean_to_bigint_n1; @@ -92,42 +92,42 @@ drop table part_change_various_various_boolean_to_bigint; -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> FLOAT and -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> DOUBLE and -- -CREATE TABLE part_change_various_various_decimal_to_double(insert_num int, +CREATE TABLE part_change_various_various_decimal_to_double_n1(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 STRING, c9 CHAR(25), c10 VARCHAR(25), c11 TIMESTAMP, c12 BOOLEAN, c13 TINYINT, c14 SMALLINT, c15 INT, c16 BIGINT, c17 DECIMAL(38,18), c18 DOUBLE, c19 STRING, c20 CHAR(25), c21 VARCHAR(25), c22 TIMESTAMP, c23 BOOLEAN, c24 TINYINT, c25 SMALLINT, c26 INT, c27 BIGINT, c28 DECIMAL(38,18), c29 FLOAT, c30 STRING, c31 CHAR(25), c32 VARCHAR(25), c33 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n1 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal_str, decimal_str, decimal_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, double1, float_str, float_str, float_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, float1, double_str, double_str, double_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n8; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n1; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_decimal_to_double replace columns (insert_num int, +alter table part_change_various_various_decimal_to_double_n1 replace columns (insert_num int, c1 DECIMAL(38,18), c2 DECIMAL(38,18), c3 DECIMAL(38,18), c4 DECIMAL(38,18), c5 DECIMAL(38,18), c6 DECIMAL(38,18), c7 DECIMAL(38,18), c8 DECIMAL(38,18), c9 DECIMAL(38,18), c10 DECIMAL(38,18), c11 DECIMAL(38,18), c12 FLOAT, c13 FLOAT, c14 FLOAT, c15 FLOAT, c16 FLOAT, c17 FLOAT, c18 FLOAT, c19 FLOAT, c20 FLOAT, c21 FLOAT, c22 FLOAT, c23 DOUBLE, c24 DOUBLE, c25 DOUBLE, c26 DOUBLE, c27 DOUBLE, c28 DOUBLE, c29 DOUBLE, c30 DOUBLE, c31 DOUBLE, c32 DOUBLE, c33 DOUBLE, b STRING); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n1 partition(part=1) SELECT insert_num, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, - 'new' FROM schema_evolution_data_2 WHERE insert_num=111; + 'new' FROM schema_evolution_data_2_n2 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n1; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n1; -drop table part_change_various_various_decimal_to_double; +drop table part_change_various_various_decimal_to_double_n1; @@ -135,89 +135,89 @@ drop table part_change_various_various_decimal_to_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DOUBLE, DECIMAL(38,18), STRING, CHAR, VARCHAR, DATE) --> TIMESTAMP -- -CREATE TABLE part_change_various_various_timestamp(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_timestamp_n1(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_timestamp_n1 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data_n8; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n1; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_timestamp replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); +alter table part_change_various_various_timestamp_n1 replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_timestamp_n1 partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2_n2 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n1; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n1; -drop table part_change_various_various_timestamp; +drop table part_change_various_various_timestamp_n1; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (STRING, CHAR, VARCHAR, TIMESTAMP --> DATE -- -CREATE TABLE part_change_various_various_date(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_date_n1(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_date_n1 partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data_n8; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n1; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_date replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); +alter table part_change_various_various_date_n1 replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_date_n1 partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2_n2 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n1; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n1; -drop table part_change_various_various_date; +drop table part_change_various_various_date_n1; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Same Type (CHAR, VARCHAR, DECIMAL) --> Different maxLength or precision/scale -- -CREATE TABLE part_change_same_type_different_params(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_same_type_different_params_n1(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); -CREATE TABLE same_type1_a_txt(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) +CREATE TABLE same_type1_a_txt_n1(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt_n1; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_a_txt; +insert into table part_change_same_type_different_params_n1 partition(part=1) select * from same_type1_a_txt_n1; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n1; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_same_type_different_params replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); +alter table part_change_same_type_different_params_n1 replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); -CREATE TABLE same_type1_b_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_b_txt_n1(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt_n1; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_b_txt; +insert into table part_change_same_type_different_params_n1 partition(part=1) select * from same_type1_b_txt_n1; -CREATE TABLE same_type1_c_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_c_txt_n1(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt_n1; -insert into table part_change_same_type_different_params partition(part=2) select * from same_type1_c_txt; +insert into table part_change_same_type_different_params_n1 partition(part=2) select * from same_type1_c_txt_n1; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n1; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n1; -drop table part_change_same_type_different_params; +drop table part_change_same_type_different_params_n1; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_llap_io.q index 821f8cfe17..9b14b2131b 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_llap_io.q @@ -19,9 +19,9 @@ set hive.llap.io.encode.enabled=true; -- FILE VARIATION: TEXTFILE, Non-Vectorized, MapWork, Partitioned -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n6(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n6; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -30,51 +30,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_permute_select_n1(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_permute_select_n1 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_permute_select add columns(c int); +alter table part_add_int_permute_select_n1 add columns(c int); -insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333); +insert into table part_add_int_permute_select_n1 partition(part=1) VALUES (2, 2222, 'new', 3333); explain vectorization detail -select insert_num,part,a,b from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n1; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_permute_select; -select insert_num,part,a,b,c from part_add_int_permute_select; -select insert_num,part,c from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n1; +select insert_num,part,a,b,c from part_add_int_permute_select_n1; +select insert_num,part,c from part_add_int_permute_select_n1; -drop table part_add_int_permute_select; +drop table part_add_int_permute_select_n1; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_string_permute_select_n1(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_string_permute_select_n1 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_string_permute_select add columns(c int, d string); +alter table part_add_int_string_permute_select_n1 add columns(c int, d string); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); +insert into table part_add_int_string_permute_select_n1 partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); explain vectorization detail -select insert_num,part,a,b from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n1; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_string_permute_select; -select insert_num,part,a,b,c from part_add_int_string_permute_select; -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; -select insert_num,part,a,c,d from part_add_int_string_permute_select; -select insert_num,part,a,d from part_add_int_string_permute_select; -select insert_num,part,c from part_add_int_string_permute_select; -select insert_num,part,d from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n1; +select insert_num,part,a,b,c from part_add_int_string_permute_select_n1; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n1; +select insert_num,part,a,c,d from part_add_int_string_permute_select_n1; +select insert_num,part,a,d from part_add_int_string_permute_select_n1; +select insert_num,part,c from part_add_int_string_permute_select_n1; +select insert_num,part,d from part_add_int_string_permute_select_n1; -drop table part_add_int_string_permute_select; +drop table part_add_int_string_permute_select_n1; @@ -85,21 +85,21 @@ drop table part_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_string_group_double_n1(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table part_change_string_group_double_n1 partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table part_change_string_group_double_n1 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111; +insert into table part_change_string_group_double_n1 partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n6 WHERE insert_num = 111; explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n1; -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n1; -drop table part_change_string_group_double; +drop table part_change_string_group_double_n1; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -108,21 +108,21 @@ drop table part_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_date_group_string_group_date_timestamp_n1(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_date_group_string_group_date_timestamp_n1 partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table part_change_date_group_string_group_date_timestamp_n1 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table part_change_date_group_string_group_date_timestamp_n1 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n1; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n1; -drop table part_change_date_group_string_group_date_timestamp; +drop table part_change_date_group_string_group_date_timestamp_n1; @@ -138,39 +138,39 @@ drop table part_change_date_group_string_group_date_timestamp; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group_n1(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n1 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_multi_ints_string_group_n1 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n1 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n1; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n1; -drop table part_change_numeric_group_string_group_multi_ints_string_group; +drop table part_change_numeric_group_string_group_multi_ints_string_group_n1; @@ -181,39 +181,39 @@ drop table part_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_floating_string_group_n1(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_floating_string_group_n1 partition(part=1) SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_floating_string_group_n1 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_floating_string_group_n1 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n1; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n1; -drop table part_change_numeric_group_string_group_floating_string_group; +drop table part_change_numeric_group_string_group_floating_string_group_n1; @@ -225,37 +225,37 @@ drop table part_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE part_change_string_group_string_group_string(insert_num int, +CREATE TABLE part_change_string_group_string_group_string_n1(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, +insert into table part_change_string_group_string_group_string_n1 partition(part=1) SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n6; -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_string_group_string replace columns (insert_num int, +alter table part_change_string_group_string_group_string_n1 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, +insert into table part_change_string_group_string_group_string_n1 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n1; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n1; -drop table part_change_string_group_string_group_string; +drop table part_change_string_group_string_group_string_n1; ------------------------------------------------------------------------------------------ @@ -269,31 +269,31 @@ drop table part_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1 partition(part=1) SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n6; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1 partition(part=1) VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, @@ -301,11 +301,11 @@ insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint pa 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1; -drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1; @@ -314,26 +314,26 @@ drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float_n1(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n1 partition(part=1) SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n6; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table part_change_lower_to_higher_numeric_group_decimal_to_float_n1 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n1 partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n1; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n1; -drop table part_change_lower_to_higher_numeric_group_decimal_to_float; \ No newline at end of file +drop table part_change_lower_to_higher_numeric_group_decimal_to_float_n1; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_table.q b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_table.q index ae0e001658..c940e0dbad 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_table.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_table.q @@ -16,9 +16,9 @@ set hive.llap.io.enabled=false; -- FILE VARIATION: TEXTFILE, Non-Vectorized, MapWork, Table -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n32(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n32; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -27,51 +27,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_permute_select_n9(insert_num int, a INT, b STRING); -insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_permute_select_n9 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n32; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_permute_select add columns(c int); +alter table table_add_int_permute_select_n9 add columns(c int); -insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000); +insert into table table_add_int_permute_select_n9 VALUES (111, 80000, 'new', 80000); explain vectorization detail -select insert_num,a,b from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n9; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_permute_select; -select insert_num,a,b,c from table_add_int_permute_select; -select insert_num,c from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n9; +select insert_num,a,b,c from table_add_int_permute_select_n9; +select insert_num,c from table_add_int_permute_select_n9; -drop table table_add_int_permute_select; +drop table table_add_int_permute_select_n9; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_string_permute_select_n9(insert_num int, a INT, b STRING); -insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_string_permute_select_n9 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n32; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_string_permute_select add columns(c int, d string); +alter table table_add_int_string_permute_select_n9 add columns(c int, d string); -insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler'); +insert into table table_add_int_string_permute_select_n9 VALUES (111, 80000, 'new', 80000, 'filler'); explain vectorization detail -select insert_num,a,b from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n9; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_string_permute_select; -select insert_num,a,b,c from table_add_int_string_permute_select; -select insert_num,a,b,c,d from table_add_int_string_permute_select; -select insert_num,a,c,d from table_add_int_string_permute_select; -select insert_num,a,d from table_add_int_string_permute_select; -select insert_num,c from table_add_int_string_permute_select; -select insert_num,d from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n9; +select insert_num,a,b,c from table_add_int_string_permute_select_n9; +select insert_num,a,b,c,d from table_add_int_string_permute_select_n9; +select insert_num,a,c,d from table_add_int_string_permute_select_n9; +select insert_num,a,d from table_add_int_string_permute_select_n9; +select insert_num,c from table_add_int_string_permute_select_n9; +select insert_num,d from table_add_int_string_permute_select_n9; -drop table table_add_int_string_permute_select; +drop table table_add_int_string_permute_select_n9; @@ -82,21 +82,21 @@ drop table table_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE table_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); +CREATE TABLE table_change_string_group_double_n9(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); -insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table table_change_string_group_double_n9 SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n32; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table table_change_string_group_double_n9 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new'); +insert into table table_change_string_group_double_n9 VALUES (111, 789.321, 789.321, 789.321, 'new'); explain vectorization detail -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n9; -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n9; -drop table table_change_string_group_double; +drop table table_change_string_group_double_n9; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -105,18 +105,18 @@ drop table table_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE table_change_date_group_string_group_date_group(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); +CREATE TABLE table_change_date_group_string_group_date_group_n9(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); -insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table table_change_date_group_string_group_date_group_n9 SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n32; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_date_group_string_group_date_group replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table table_change_date_group_string_group_date_group_n9 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table table_change_date_group_string_group_date_group VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table table_change_date_group_string_group_date_group_n9 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n9; -drop table table_change_date_group_string_group_date_group; +drop table table_change_date_group_string_group_date_group_n9; @@ -131,39 +131,39 @@ drop table table_change_date_group_string_group_date_group; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group_n9(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING); -insert into table table_change_numeric_group_string_group_multi_ints_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n9 SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n32; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n9; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_multi_ints_string_group_n9 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table table_change_numeric_group_string_group_multi_ints_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n9 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n9; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n9; -drop table table_change_numeric_group_string_group_multi_ints_string_group; +drop table table_change_numeric_group_string_group_multi_ints_string_group_n9; @@ -174,39 +174,39 @@ drop table table_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n9(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n9 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n32; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n9; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_floating_string_group_n9 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_floating_string_group_n9 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n9; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n9; -drop table table_change_numeric_group_string_group_floating_string_group; +drop table table_change_numeric_group_string_group_floating_string_group_n9; ------------------------------------------------------------------------------------------ @@ -217,34 +217,34 @@ drop table table_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE table_change_string_group_string_group_string(insert_num int, +CREATE TABLE table_change_string_group_string_group_string_n9(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING); -insert into table table_change_string_group_string_group_string SELECT insert_num, +insert into table table_change_string_group_string_group_string_n9 SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n32; -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n9; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_string_group_string replace columns (insert_num int, +alter table table_change_string_group_string_group_string_n9 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table table_change_string_group_string_group_string VALUES (111, +insert into table table_change_string_group_string_group_string_n9 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string_n9; -drop table table_change_string_group_string_group_string; +drop table table_change_string_group_string_group_string_n9; @@ -259,40 +259,40 @@ drop table table_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING); -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9 SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n32; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint VALUES (111, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9 VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, 1234.5678, 9876.543, 789.321, 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9; -drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9; @@ -301,23 +301,23 @@ drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float_n9(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING); -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n9 SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n32; -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n9; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table table_change_lower_to_higher_numeric_group_decimal_to_float_n9 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n9 VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n9; -drop table table_change_lower_to_higher_numeric_group_decimal_to_float; +drop table table_change_lower_to_higher_numeric_group_decimal_to_float_n9; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_table_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_table_llap_io.q index 5d5ea3837f..9cc3a8950b 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_table_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_table_llap_io.q @@ -17,9 +17,9 @@ set hive.llap.io.encode.enabled=true; -- FILE VARIATION: TEXTFILE, Non-Vectorized, MapWork, Table -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n10(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n10; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -28,51 +28,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_permute_select_n2(insert_num int, a INT, b STRING); -insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_permute_select_n2 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n10; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_permute_select add columns(c int); +alter table table_add_int_permute_select_n2 add columns(c int); -insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000); +insert into table table_add_int_permute_select_n2 VALUES (111, 80000, 'new', 80000); explain vectorization detail -select insert_num,a,b from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n2; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_permute_select; -select insert_num,a,b,c from table_add_int_permute_select; -select insert_num,c from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n2; +select insert_num,a,b,c from table_add_int_permute_select_n2; +select insert_num,c from table_add_int_permute_select_n2; -drop table table_add_int_permute_select; +drop table table_add_int_permute_select_n2; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_string_permute_select_n2(insert_num int, a INT, b STRING); -insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_string_permute_select_n2 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n10; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_string_permute_select add columns(c int, d string); +alter table table_add_int_string_permute_select_n2 add columns(c int, d string); -insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler'); +insert into table table_add_int_string_permute_select_n2 VALUES (111, 80000, 'new', 80000, 'filler'); explain vectorization detail -select insert_num,a,b from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n2; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_string_permute_select; -select insert_num,a,b,c from table_add_int_string_permute_select; -select insert_num,a,b,c,d from table_add_int_string_permute_select; -select insert_num,a,c,d from table_add_int_string_permute_select; -select insert_num,a,d from table_add_int_string_permute_select; -select insert_num,c from table_add_int_string_permute_select; -select insert_num,d from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n2; +select insert_num,a,b,c from table_add_int_string_permute_select_n2; +select insert_num,a,b,c,d from table_add_int_string_permute_select_n2; +select insert_num,a,c,d from table_add_int_string_permute_select_n2; +select insert_num,a,d from table_add_int_string_permute_select_n2; +select insert_num,c from table_add_int_string_permute_select_n2; +select insert_num,d from table_add_int_string_permute_select_n2; -drop table table_add_int_string_permute_select; +drop table table_add_int_string_permute_select_n2; @@ -83,21 +83,21 @@ drop table table_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE table_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); +CREATE TABLE table_change_string_group_double_n2(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); -insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table table_change_string_group_double_n2 SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n10; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table table_change_string_group_double_n2 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new'); +insert into table table_change_string_group_double_n2 VALUES (111, 789.321, 789.321, 789.321, 'new'); explain vectorization detail -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n2; -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n2; -drop table table_change_string_group_double; +drop table table_change_string_group_double_n2; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -106,18 +106,18 @@ drop table table_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE table_change_date_group_string_group_date_group(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); +CREATE TABLE table_change_date_group_string_group_date_group_n2(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); -insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table table_change_date_group_string_group_date_group_n2 SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n10; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_date_group_string_group_date_group replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table table_change_date_group_string_group_date_group_n2 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table table_change_date_group_string_group_date_group VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table table_change_date_group_string_group_date_group_n2 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n2; -drop table table_change_date_group_string_group_date_group; +drop table table_change_date_group_string_group_date_group_n2; @@ -132,39 +132,39 @@ drop table table_change_date_group_string_group_date_group; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group_n2(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING); -insert into table table_change_numeric_group_string_group_multi_ints_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n2 SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n10; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_multi_ints_string_group_n2 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table table_change_numeric_group_string_group_multi_ints_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n2 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n2; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n2; -drop table table_change_numeric_group_string_group_multi_ints_string_group; +drop table table_change_numeric_group_string_group_multi_ints_string_group_n2; @@ -175,39 +175,39 @@ drop table table_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n2(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n2 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n10; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_floating_string_group_n2 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_floating_string_group_n2 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n2; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n2; -drop table table_change_numeric_group_string_group_floating_string_group; +drop table table_change_numeric_group_string_group_floating_string_group_n2; ------------------------------------------------------------------------------------------ @@ -218,34 +218,34 @@ drop table table_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE table_change_string_group_string_group_string(insert_num int, +CREATE TABLE table_change_string_group_string_group_string_n2(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING); -insert into table table_change_string_group_string_group_string SELECT insert_num, +insert into table table_change_string_group_string_group_string_n2 SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n10; -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_string_group_string replace columns (insert_num int, +alter table table_change_string_group_string_group_string_n2 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table table_change_string_group_string_group_string VALUES (111, +insert into table table_change_string_group_string_group_string_n2 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string_n2; -drop table table_change_string_group_string_group_string; +drop table table_change_string_group_string_group_string_n2; @@ -260,40 +260,40 @@ drop table table_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING); -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2 SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n10; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint VALUES (111, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2 VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, 1234.5678, 9876.543, 789.321, 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2; -drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n2; @@ -302,23 +302,23 @@ drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float_n2(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING); -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n2 SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n10; -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n2; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table table_change_lower_to_higher_numeric_group_decimal_to_float_n2 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n2 VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n2; -drop table table_change_lower_to_higher_numeric_group_decimal_to_float; +drop table table_change_lower_to_higher_numeric_group_decimal_to_float_n2; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part.q b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part.q index 142ab28780..fb216f88d6 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part.q @@ -20,9 +20,9 @@ set hive.llap.io.enabled=false; -- vectorized reading of TEXTFILE format files using the vector SERDE methods. -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n31(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n31; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -31,51 +31,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_permute_select_n10(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_permute_select_n10 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_permute_select add columns(c int); +alter table part_add_int_permute_select_n10 add columns(c int); -insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333); +insert into table part_add_int_permute_select_n10 partition(part=1) VALUES (2, 2222, 'new', 3333); explain vectorization detail -select insert_num,part,a,b from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n10; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_permute_select; -select insert_num,part,a,b,c from part_add_int_permute_select; -select insert_num,part,c from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n10; +select insert_num,part,a,b,c from part_add_int_permute_select_n10; +select insert_num,part,c from part_add_int_permute_select_n10; -drop table part_add_int_permute_select; +drop table part_add_int_permute_select_n10; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_string_permute_select_n10(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_string_permute_select_n10 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_string_permute_select add columns(c int, d string); +alter table part_add_int_string_permute_select_n10 add columns(c int, d string); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); +insert into table part_add_int_string_permute_select_n10 partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); explain vectorization detail -select insert_num,part,a,b from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n10; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_string_permute_select; -select insert_num,part,a,b,c from part_add_int_string_permute_select; -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; -select insert_num,part,a,c,d from part_add_int_string_permute_select; -select insert_num,part,a,d from part_add_int_string_permute_select; -select insert_num,part,c from part_add_int_string_permute_select; -select insert_num,part,d from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n10; +select insert_num,part,a,b,c from part_add_int_string_permute_select_n10; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n10; +select insert_num,part,a,c,d from part_add_int_string_permute_select_n10; +select insert_num,part,a,d from part_add_int_string_permute_select_n10; +select insert_num,part,c from part_add_int_string_permute_select_n10; +select insert_num,part,d from part_add_int_string_permute_select_n10; -drop table part_add_int_string_permute_select; +drop table part_add_int_string_permute_select_n10; @@ -86,21 +86,21 @@ drop table part_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_string_group_double_n10(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table part_change_string_group_double_n10 partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n31; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table part_change_string_group_double_n10 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111; +insert into table part_change_string_group_double_n10 partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n31 WHERE insert_num = 111; explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n10; -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n10; -drop table part_change_string_group_double; +drop table part_change_string_group_double_n10; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -109,21 +109,21 @@ drop table part_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_date_group_string_group_date_timestamp_n10(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_date_group_string_group_date_timestamp_n10 partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n31; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table part_change_date_group_string_group_date_timestamp_n10 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table part_change_date_group_string_group_date_timestamp_n10 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n10; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n10; -drop table part_change_date_group_string_group_date_timestamp; +drop table part_change_date_group_string_group_date_timestamp_n10; @@ -139,39 +139,39 @@ drop table part_change_date_group_string_group_date_timestamp; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group_n10(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n10 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n31; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n10; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_multi_ints_string_group_n10 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n10 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n10; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n10; -drop table part_change_numeric_group_string_group_multi_ints_string_group; +drop table part_change_numeric_group_string_group_multi_ints_string_group_n10; @@ -182,39 +182,39 @@ drop table part_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_floating_string_group_n10(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_floating_string_group_n10 partition(part=1) SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n31; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n10; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_floating_string_group_n10 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_floating_string_group_n10 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n10; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n10; -drop table part_change_numeric_group_string_group_floating_string_group; +drop table part_change_numeric_group_string_group_floating_string_group_n10; @@ -226,37 +226,37 @@ drop table part_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE part_change_string_group_string_group_string(insert_num int, +CREATE TABLE part_change_string_group_string_group_string_n10(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, +insert into table part_change_string_group_string_group_string_n10 partition(part=1) SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n31; -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n10; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_string_group_string replace columns (insert_num int, +alter table part_change_string_group_string_group_string_n10 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, +insert into table part_change_string_group_string_group_string_n10 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n10; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n10; -drop table part_change_string_group_string_group_string; +drop table part_change_string_group_string_group_string_n10; ------------------------------------------------------------------------------------------ @@ -270,31 +270,31 @@ drop table part_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10 partition(part=1) SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n31; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10 partition(part=1) VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, @@ -302,11 +302,11 @@ insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint pa 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10; -drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n10; @@ -315,26 +315,26 @@ drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float_n10(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n10 partition(part=1) SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n31; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n10; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table part_change_lower_to_higher_numeric_group_decimal_to_float_n10 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n10 partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n10; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n10; -drop table part_change_lower_to_higher_numeric_group_decimal_to_float; \ No newline at end of file +drop table part_change_lower_to_higher_numeric_group_decimal_to_float_n10; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_complex.q b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_complex.q index dc870bee76..97b8601cd7 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_complex.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_complex.q @@ -23,103 +23,103 @@ set hive.llap.io.enabled=false; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: STRUCT --> STRUCT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_struct1_n6(insert_num int, s1 STRUCT, b STRING) PARTITIONED BY(part INT); -CREATE TABLE complex_struct1_a_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_a_txt_n6(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt_n6; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_a_txt; +insert into table part_change_various_various_struct1_n6 partition(part=1) select * from complex_struct1_a_txt_n6; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_struct1 replace columns (insert_num int, s1 STRUCT, b STRING); +alter table part_change_various_various_struct1_n6 replace columns (insert_num int, s1 STRUCT, b STRING); -CREATE TABLE complex_struct1_b_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_b_txt_n6(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt_n6; -insert into table part_change_various_various_struct1 partition(part=2) select * from complex_struct1_b_txt; +insert into table part_change_various_various_struct1_n6 partition(part=2) select * from complex_struct1_b_txt_n6; -CREATE TABLE complex_struct1_c_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_c_txt_n6(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt_n6; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_c_txt; +insert into table part_change_various_various_struct1_n6 partition(part=1) select * from complex_struct1_c_txt_n6; explain vectorization detail -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n6; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n6; -drop table part_change_various_various_struct1; +drop table part_change_various_various_struct1_n6; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: STRUCT -- -CREATE TABLE part_add_various_various_struct2(insert_num int, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_various_various_struct2_n6(insert_num int, b STRING) PARTITIONED BY(part INT); -insert into table part_add_various_various_struct2 partition(part=1) +insert into table part_add_various_various_struct2_n6 partition(part=1) values(1, 'original'), (2, 'original'); -select insert_num,part,b from part_add_various_various_struct2; +select insert_num,part,b from part_add_various_various_struct2_n6; -- Table-Non-Cascade ADD COLUMN ... -alter table part_add_various_various_struct2 ADD columns (s2 STRUCT); +alter table part_add_various_various_struct2_n6 ADD columns (s2 STRUCT); -CREATE TABLE complex_struct2_a_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_a_txt_n6(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt_n6; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_a_txt; +insert into table part_add_various_various_struct2_n6 partition(part=1) select * from complex_struct2_a_txt_n6; -CREATE TABLE complex_struct2_b_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_b_txt_n6(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt_n6; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_b_txt; +insert into table part_add_various_various_struct2_n6 partition(part=2) select * from complex_struct2_b_txt_n6; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_various_various_struct2 REPLACE columns (insert_num int, b STRING, s2 STRUCT); +alter table part_add_various_various_struct2_n6 REPLACE columns (insert_num int, b STRING, s2 STRUCT); -CREATE TABLE complex_struct2_c_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_c_txt_n6(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt_n6; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_c_txt; +insert into table part_add_various_various_struct2_n6 partition(part=2) select * from complex_struct2_c_txt_n6; -CREATE TABLE complex_struct2_d_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_d_txt_n6(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt_n6; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_d_txt; +insert into table part_add_various_various_struct2_n6 partition(part=1) select * from complex_struct2_d_txt_n6; explain vectorization detail -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n6; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n6; -drop table part_add_various_various_struct2; +drop table part_add_various_various_struct2_n6; @@ -127,40 +127,40 @@ drop table part_add_various_various_struct2; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: ADD COLUMNS to STRUCT type as LAST column of 3 columns -- -CREATE TABLE part_add_to_various_various_struct4(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); +CREATE TABLE part_add_to_various_various_struct4_n6(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); -CREATE TABLE complex_struct4_a_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_a_txt_n6(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt_n6; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_a_txt; +insert into table part_add_to_various_various_struct4_n6 partition(part=1) select * from complex_struct4_a_txt_n6; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n6; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_to_various_various_struct4 replace columns (insert_num int, b STRING, s3 STRUCT); +alter table part_add_to_various_various_struct4_n6 replace columns (insert_num int, b STRING, s3 STRUCT); -CREATE TABLE complex_struct4_b_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_b_txt_n6(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt_n6; -insert into table part_add_to_various_various_struct4 partition(part=2) select * from complex_struct4_b_txt; +insert into table part_add_to_various_various_struct4_n6 partition(part=2) select * from complex_struct4_b_txt_n6; -CREATE TABLE complex_struct4_c_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_c_txt_n6(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt_n6; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_c_txt; +insert into table part_add_to_various_various_struct4_n6 partition(part=1) select * from complex_struct4_c_txt_n6; explain vectorization detail -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n6; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n6; -drop table part_add_to_various_various_struct4; +drop table part_add_to_various_various_struct4_n6; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive.q b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive.q index b86423beb4..ea1f610c51 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive.q @@ -24,13 +24,13 @@ set hive.llap.io.enabled=false; -- -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n44(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n44; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n16(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n16; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: @@ -40,7 +40,7 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data_ -- (BOOLEAN, TINYINT, SMALLINT, LONG, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> INT –2147483648 to 2147483647 and -- (BOOLEAN, TINYINT, SMALLINT, INT, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> BIGINT -9223372036854775808 to 9223372036854775807 -- -CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, +CREATE TABLE part_change_various_various_boolean_to_bigint_n8(insert_num int, c1 TINYINT, c2 SMALLINT, c3 INT, c4 BIGINT, c5 FLOAT, c6 DOUBLE, c7 DECIMAL(38,18), c8 STRING, c9 TIMESTAMP, c10 BOOLEAN, c11 SMALLINT, c12 INT, c13 BIGINT, c14 FLOAT, c15 DOUBLE, c16 DECIMAL(38,18), c17 STRING, c18 CHAR(25), c19 VARCHAR(25), c20 TIMESTAMP, c21 BOOLEAN, c22 TINYINT, c23 INT, c24 BIGINT, c25 FLOAT, c26 DOUBLE, c27 DECIMAL(38,18), c28 STRING, c29 CHAR(25), c30 VARCHAR(25), c31 TIMESTAMP, @@ -48,18 +48,18 @@ CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, c43 BOOLEAN, c44 TINYINT, c45 SMALLINT, c46 INT, c47 FLOAT, c48 DOUBLE, c49 DECIMAL(38,18), c50 STRING, c51 CHAR(25), c52 VARCHAR(25), c53 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n8 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, boolean_str, timestamp1, boolean1, smallint1, int1, bigint1, float1, double1, decimal1, tinyint_str, tinyint_str, tinyint_str, timestamp1, boolean1, tinyint1, int1, bigint1, float1, double1, decimal1, smallint_str, smallint_str, smallint_str, timestamp1, boolean1, tinyint1, smallint1, bigint1, float1, double1, decimal1, int_str, int_str, int_str, timestamp1, boolean1, tinyint1, smallint1, int1, float1, double1, decimal1, bigint_str, bigint_str, bigint_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n44; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_boolean_to_bigint replace columns (insert_num int, +alter table part_change_various_various_boolean_to_bigint_n8 replace columns (insert_num int, c1 BOOLEAN, c2 BOOLEAN, c3 BOOLEAN, c4 BOOLEAN, c5 BOOLEAN, c6 BOOLEAN, c7 BOOLEAN, c8 BOOLEAN, c9 BOOLEAN, c10 TINYINT, c11 TINYINT, c12 TINYINT, c13 TINYINT, c14 TINYINT, c15 TINYINT, c16 TINYINT, c17 TINYINT, c18 TINYINT, c19 TINYINT, c20 TINYINT, c21 SMALLINT, c22 SMALLINT, c23 SMALLINT, c24 SMALLINT, c25 SMALLINT, c26 SMALLINT, c27 SMALLINT, c28 SMALLINT, c29 SMALLINT, c30 SMALLINT, c31 SMALLINT, @@ -67,20 +67,20 @@ alter table part_change_various_various_boolean_to_bigint replace columns (inser c43 BIGINT, c44 BIGINT, c45 BIGINT, c46 BIGINT, c47 BIGINT, c48 BIGINT, c49 BIGINT, c50 BIGINT, c51 BIGINT, c52 BIGINT, c53 BIGINT, b STRING); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n8 partition(part=1) SELECT insert_num, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, - 'new' FROM schema_evolution_data; + 'new' FROM schema_evolution_data_n44; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n8; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n8; -drop table part_change_various_various_boolean_to_bigint; +drop table part_change_various_various_boolean_to_bigint_n8; @@ -90,39 +90,39 @@ drop table part_change_various_various_boolean_to_bigint; -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> FLOAT and -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> DOUBLE and -- -CREATE TABLE part_change_various_various_decimal_to_double(insert_num int, +CREATE TABLE part_change_various_various_decimal_to_double_n8(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 STRING, c9 CHAR(25), c10 VARCHAR(25), c11 TIMESTAMP, c12 BOOLEAN, c13 TINYINT, c14 SMALLINT, c15 INT, c16 BIGINT, c17 DECIMAL(38,18), c18 DOUBLE, c19 STRING, c20 CHAR(25), c21 VARCHAR(25), c22 TIMESTAMP, c23 BOOLEAN, c24 TINYINT, c25 SMALLINT, c26 INT, c27 BIGINT, c28 DECIMAL(38,18), c29 FLOAT, c30 STRING, c31 CHAR(25), c32 VARCHAR(25), c33 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n8 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal_str, decimal_str, decimal_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, double1, float_str, float_str, float_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, float1, double_str, double_str, double_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n44; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_decimal_to_double replace columns (insert_num int, +alter table part_change_various_various_decimal_to_double_n8 replace columns (insert_num int, c1 DECIMAL(38,18), c2 DECIMAL(38,18), c3 DECIMAL(38,18), c4 DECIMAL(38,18), c5 DECIMAL(38,18), c6 DECIMAL(38,18), c7 DECIMAL(38,18), c8 DECIMAL(38,18), c9 DECIMAL(38,18), c10 DECIMAL(38,18), c11 DECIMAL(38,18), c12 FLOAT, c13 FLOAT, c14 FLOAT, c15 FLOAT, c16 FLOAT, c17 FLOAT, c18 FLOAT, c19 FLOAT, c20 FLOAT, c21 FLOAT, c22 FLOAT, c23 DOUBLE, c24 DOUBLE, c25 DOUBLE, c26 DOUBLE, c27 DOUBLE, c28 DOUBLE, c29 DOUBLE, c30 DOUBLE, c31 DOUBLE, c32 DOUBLE, c33 DOUBLE, b STRING); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n8 partition(part=1) SELECT insert_num, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, - 'new' FROM schema_evolution_data_2 WHERE insert_num=111; + 'new' FROM schema_evolution_data_2_n16 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n8; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n8; -drop table part_change_various_various_decimal_to_double; +drop table part_change_various_various_decimal_to_double_n8; @@ -130,80 +130,80 @@ drop table part_change_various_various_decimal_to_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DOUBLE, DECIMAL(38,18), STRING, CHAR, VARCHAR, DATE) --> TIMESTAMP -- -CREATE TABLE part_change_various_various_timestamp(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_timestamp_n8(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_timestamp_n8 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data_n44; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_timestamp replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); +alter table part_change_various_various_timestamp_n8 replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_timestamp_n8 partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2_n16 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n8; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n8; -drop table part_change_various_various_timestamp; +drop table part_change_various_various_timestamp_n8; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (STRING, CHAR, VARCHAR, TIMESTAMP --> DATE -- -CREATE TABLE part_change_various_various_date(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_date_n8(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_date_n8 partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data_n44; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_date replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); +alter table part_change_various_various_date_n8 replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_date_n8 partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2_n16 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n8; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n8; -drop table part_change_various_various_date; +drop table part_change_various_various_date_n8; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Same Type (CHAR, VARCHAR, DECIMAL) --> Different maxLength or precision/scale -- -CREATE TABLE part_change_same_type_different_params(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_same_type_different_params_n8(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); -CREATE TABLE same_type1_a_txt(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) +CREATE TABLE same_type1_a_txt_n8(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt_n8; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_a_txt; +insert into table part_change_same_type_different_params_n8 partition(part=1) select * from same_type1_a_txt_n8; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_same_type_different_params replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); +alter table part_change_same_type_different_params_n8 replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); -CREATE TABLE same_type1_b_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_b_txt_n8(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt_n8; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_b_txt; +insert into table part_change_same_type_different_params_n8 partition(part=1) select * from same_type1_b_txt_n8; -CREATE TABLE same_type1_c_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_c_txt_n8(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt_n8; -insert into table part_change_same_type_different_params partition(part=2) select * from same_type1_c_txt; +insert into table part_change_same_type_different_params_n8 partition(part=2) select * from same_type1_c_txt_n8; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n8; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n8; -drop table part_change_same_type_different_params; +drop table part_change_same_type_different_params_n8; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive_llap_io.q index 63a4df7844..e1f8ffe281 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive_llap_io.q @@ -24,13 +24,13 @@ set hive.llap.io.enabled=true;set hive.llap.io.encode.enabled=true; -- -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n30(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n30; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n9(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n9; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: @@ -40,7 +40,7 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data_ -- (BOOLEAN, TINYINT, SMALLINT, LONG, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> INT –2147483648 to 2147483647 and -- (BOOLEAN, TINYINT, SMALLINT, INT, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> BIGINT -9223372036854775808 to 9223372036854775807 -- -CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, +CREATE TABLE part_change_various_various_boolean_to_bigint_n3(insert_num int, c1 TINYINT, c2 SMALLINT, c3 INT, c4 BIGINT, c5 FLOAT, c6 DOUBLE, c7 DECIMAL(38,18), c8 STRING, c9 TIMESTAMP, c10 BOOLEAN, c11 SMALLINT, c12 INT, c13 BIGINT, c14 FLOAT, c15 DOUBLE, c16 DECIMAL(38,18), c17 STRING, c18 CHAR(25), c19 VARCHAR(25), c20 TIMESTAMP, c21 BOOLEAN, c22 TINYINT, c23 INT, c24 BIGINT, c25 FLOAT, c26 DOUBLE, c27 DECIMAL(38,18), c28 STRING, c29 CHAR(25), c30 VARCHAR(25), c31 TIMESTAMP, @@ -48,21 +48,21 @@ CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, c43 BOOLEAN, c44 TINYINT, c45 SMALLINT, c46 INT, c47 FLOAT, c48 DOUBLE, c49 DECIMAL(38,18), c50 STRING, c51 CHAR(25), c52 VARCHAR(25), c53 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n3 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, boolean_str, timestamp1, boolean1, smallint1, int1, bigint1, float1, double1, decimal1, tinyint_str, tinyint_str, tinyint_str, timestamp1, boolean1, tinyint1, int1, bigint1, float1, double1, decimal1, smallint_str, smallint_str, smallint_str, timestamp1, boolean1, tinyint1, smallint1, bigint1, float1, double1, decimal1, int_str, int_str, int_str, timestamp1, boolean1, tinyint1, smallint1, int1, float1, double1, decimal1, bigint_str, bigint_str, bigint_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n30; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_boolean_to_bigint replace columns (insert_num int, +alter table part_change_various_various_boolean_to_bigint_n3 replace columns (insert_num int, c1 BOOLEAN, c2 BOOLEAN, c3 BOOLEAN, c4 BOOLEAN, c5 BOOLEAN, c6 BOOLEAN, c7 BOOLEAN, c8 BOOLEAN, c9 BOOLEAN, c10 TINYINT, c11 TINYINT, c12 TINYINT, c13 TINYINT, c14 TINYINT, c15 TINYINT, c16 TINYINT, c17 TINYINT, c18 TINYINT, c19 TINYINT, c20 TINYINT, c21 SMALLINT, c22 SMALLINT, c23 SMALLINT, c24 SMALLINT, c25 SMALLINT, c26 SMALLINT, c27 SMALLINT, c28 SMALLINT, c29 SMALLINT, c30 SMALLINT, c31 SMALLINT, @@ -70,20 +70,20 @@ alter table part_change_various_various_boolean_to_bigint replace columns (inser c43 BIGINT, c44 BIGINT, c45 BIGINT, c46 BIGINT, c47 BIGINT, c48 BIGINT, c49 BIGINT, c50 BIGINT, c51 BIGINT, c52 BIGINT, c53 BIGINT, b STRING); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n3 partition(part=1) SELECT insert_num, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, - 'new' FROM schema_evolution_data; + 'new' FROM schema_evolution_data_n30; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n3; -drop table part_change_various_various_boolean_to_bigint; +drop table part_change_various_various_boolean_to_bigint_n3; @@ -93,42 +93,42 @@ drop table part_change_various_various_boolean_to_bigint; -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> FLOAT and -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> DOUBLE and -- -CREATE TABLE part_change_various_various_decimal_to_double(insert_num int, +CREATE TABLE part_change_various_various_decimal_to_double_n3(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 STRING, c9 CHAR(25), c10 VARCHAR(25), c11 TIMESTAMP, c12 BOOLEAN, c13 TINYINT, c14 SMALLINT, c15 INT, c16 BIGINT, c17 DECIMAL(38,18), c18 DOUBLE, c19 STRING, c20 CHAR(25), c21 VARCHAR(25), c22 TIMESTAMP, c23 BOOLEAN, c24 TINYINT, c25 SMALLINT, c26 INT, c27 BIGINT, c28 DECIMAL(38,18), c29 FLOAT, c30 STRING, c31 CHAR(25), c32 VARCHAR(25), c33 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n3 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal_str, decimal_str, decimal_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, double1, float_str, float_str, float_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, float1, double_str, double_str, double_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n30; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_decimal_to_double replace columns (insert_num int, +alter table part_change_various_various_decimal_to_double_n3 replace columns (insert_num int, c1 DECIMAL(38,18), c2 DECIMAL(38,18), c3 DECIMAL(38,18), c4 DECIMAL(38,18), c5 DECIMAL(38,18), c6 DECIMAL(38,18), c7 DECIMAL(38,18), c8 DECIMAL(38,18), c9 DECIMAL(38,18), c10 DECIMAL(38,18), c11 DECIMAL(38,18), c12 FLOAT, c13 FLOAT, c14 FLOAT, c15 FLOAT, c16 FLOAT, c17 FLOAT, c18 FLOAT, c19 FLOAT, c20 FLOAT, c21 FLOAT, c22 FLOAT, c23 DOUBLE, c24 DOUBLE, c25 DOUBLE, c26 DOUBLE, c27 DOUBLE, c28 DOUBLE, c29 DOUBLE, c30 DOUBLE, c31 DOUBLE, c32 DOUBLE, c33 DOUBLE, b STRING); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n3 partition(part=1) SELECT insert_num, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, - 'new' FROM schema_evolution_data_2 WHERE insert_num=111; + 'new' FROM schema_evolution_data_2_n9 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n3; -drop table part_change_various_various_decimal_to_double; +drop table part_change_various_various_decimal_to_double_n3; @@ -136,89 +136,89 @@ drop table part_change_various_various_decimal_to_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DOUBLE, DECIMAL(38,18), STRING, CHAR, VARCHAR, DATE) --> TIMESTAMP -- -CREATE TABLE part_change_various_various_timestamp(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_timestamp_n3(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_timestamp_n3 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data_n30; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_timestamp replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); +alter table part_change_various_various_timestamp_n3 replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_timestamp_n3 partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2_n9 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n3; -drop table part_change_various_various_timestamp; +drop table part_change_various_various_timestamp_n3; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (STRING, CHAR, VARCHAR, TIMESTAMP --> DATE -- -CREATE TABLE part_change_various_various_date(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_date_n3(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_date_n3 partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data_n30; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n3; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_date replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); +alter table part_change_various_various_date_n3 replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_date_n3 partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2_n9 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n3; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n3; -drop table part_change_various_various_date; +drop table part_change_various_various_date_n3; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Same Type (CHAR, VARCHAR, DECIMAL) --> Different maxLength or precision/scale -- -CREATE TABLE part_change_same_type_different_params(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_same_type_different_params_n3(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); -CREATE TABLE same_type1_a_txt(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) +CREATE TABLE same_type1_a_txt_n3(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt_n3; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_a_txt; +insert into table part_change_same_type_different_params_n3 partition(part=1) select * from same_type1_a_txt_n3; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n3; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_same_type_different_params replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); +alter table part_change_same_type_different_params_n3 replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); -CREATE TABLE same_type1_b_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_b_txt_n3(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt_n3; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_b_txt; +insert into table part_change_same_type_different_params_n3 partition(part=1) select * from same_type1_b_txt_n3; -CREATE TABLE same_type1_c_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_c_txt_n3(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt_n3; -insert into table part_change_same_type_different_params partition(part=2) select * from same_type1_c_txt; +insert into table part_change_same_type_different_params_n3 partition(part=2) select * from same_type1_c_txt_n3; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n3; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n3; -drop table part_change_same_type_different_params; +drop table part_change_same_type_different_params_n3; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_llap_io.q index 4cf867c49c..2c4c25f134 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_llap_io.q @@ -20,9 +20,9 @@ set hive.llap.io.enabled=true;set hive.llap.io.encode.enabled=true; -- vectorized reading of TEXTFILE format files using the vector SERDE methods. -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n3(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n3; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -31,61 +31,61 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_permute_select_n0(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_permute_select_n0 partition(part=1) VALUES (1, 1111, 'new'); explain vectorization detail -select insert_num,part,a,b from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n0; -select insert_num,part,a,b from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n0; -- Table-Non-Cascade ADD COLUMNS ... ---** alter table part_add_int_permute_select add columns(c int); +--** alter table part_add_int_permute_select_n0 add columns(c int); ---** insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333); +--** insert into table part_add_int_permute_select_n0 partition(part=1) VALUES (2, 2222, 'new', 3333); --** explain vectorization detail ---** select insert_num,part,a,b from part_add_int_permute_select; +--** select insert_num,part,a,b from part_add_int_permute_select_n0; -- SELECT permutation columns to make sure NULL defaulting works right ---** select insert_num,part,a,b from part_add_int_permute_select; ---** select insert_num,part,a,b,c from part_add_int_permute_select; ---** select insert_num,part,c from part_add_int_permute_select; +--** select insert_num,part,a,b from part_add_int_permute_select_n0; +--** select insert_num,part,a,b,c from part_add_int_permute_select_n0; +--** select insert_num,part,c from part_add_int_permute_select_n0; -drop table part_add_int_permute_select; +drop table part_add_int_permute_select_n0; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_string_permute_select_n0(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_string_permute_select_n0 partition(part=1) VALUES (1, 1111, 'new'); explain vectorization detail -select insert_num,part,a,b from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n0; -select insert_num,part,a,b from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n0; -- Table-Non-Cascade ADD COLUMNS ... ---** alter table part_add_int_string_permute_select add columns(c int, d string); +--** alter table part_add_int_string_permute_select_n0 add columns(c int, d string); ---** insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); +--** insert into table part_add_int_string_permute_select_n0 partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); --** explain vectorization detail ---** select insert_num,part,a,b from part_add_int_string_permute_select; +--** select insert_num,part,a,b from part_add_int_string_permute_select_n0; -- SELECT permutation columns to make sure NULL defaulting works right ---** select insert_num,part,a,b from part_add_int_string_permute_select; ---** select insert_num,part,a,b,c from part_add_int_string_permute_select; ---** select insert_num,part,a,b,c,d from part_add_int_string_permute_select; ---** select insert_num,part,a,c,d from part_add_int_string_permute_select; ---** select insert_num,part,a,d from part_add_int_string_permute_select; ---** select insert_num,part,c from part_add_int_string_permute_select; ---** select insert_num,part,d from part_add_int_string_permute_select; +--** select insert_num,part,a,b from part_add_int_string_permute_select_n0; +--** select insert_num,part,a,b,c from part_add_int_string_permute_select_n0; +--** select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n0; +--** select insert_num,part,a,c,d from part_add_int_string_permute_select_n0; +--** select insert_num,part,a,d from part_add_int_string_permute_select_n0; +--** select insert_num,part,c from part_add_int_string_permute_select_n0; +--** select insert_num,part,d from part_add_int_string_permute_select_n0; -drop table part_add_int_string_permute_select; +drop table part_add_int_string_permute_select_n0; @@ -96,26 +96,26 @@ drop table part_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_string_group_double_n0(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table part_change_string_group_double_n0 partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n3; explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n0; -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n0; -- Table-Non-Cascade CHANGE COLUMNS ... ---** alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +--** alter table part_change_string_group_double_n0 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); ---** insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111; +--** insert into table part_change_string_group_double_n0 partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n3 WHERE insert_num = 111; --** explain vectorization detail ---** select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +--** select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n0; ---** select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +--** select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n0; -drop table part_change_string_group_double; +drop table part_change_string_group_double_n0; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -124,26 +124,26 @@ drop table part_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_date_group_string_group_date_timestamp_n0(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_date_group_string_group_date_timestamp_n0 partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n3; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n0; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n0; -- Table-Non-Cascade CHANGE COLUMNS ... ---** alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +--** alter table part_change_date_group_string_group_date_timestamp_n0 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); ---** insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +--** insert into table part_change_date_group_string_group_date_timestamp_n0 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); --** explain vectorization detail ---** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +--** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n0; ---** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +--** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n0; -drop table part_change_date_group_string_group_date_timestamp; +drop table part_change_date_group_string_group_date_timestamp_n0; @@ -159,42 +159,42 @@ drop table part_change_date_group_string_group_date_timestamp; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group_n0(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n0 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n3; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n0; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n0; -- Table-Non-Cascade CHANGE COLUMNS ... ---** alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +--** alter table part_change_numeric_group_string_group_multi_ints_string_group_n0 replace columns (insert_num int, --** c1 STRING, c2 STRING, c3 STRING, c4 STRING, --** c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), --** c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), --** b STRING) ; ---** insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, +--** insert into table part_change_numeric_group_string_group_multi_ints_string_group_n0 partition(part=1) VALUES (111, --** 'filler', 'filler', 'filler', 'filler', --** 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', --** 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', --** 'new'); --** explain vectorization detail ---** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +--** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n0; ---** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +--** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n0; -drop table part_change_numeric_group_string_group_multi_ints_string_group; +drop table part_change_numeric_group_string_group_multi_ints_string_group_n0; @@ -205,42 +205,42 @@ drop table part_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_floating_string_group_n0(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_floating_string_group_n0 partition(part=1) SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n3; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n0; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n0; -- Table-Non-Cascade CHANGE COLUMNS ... ---** alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +--** alter table part_change_numeric_group_string_group_floating_string_group_n0 replace columns (insert_num int, --** c1 STRING, c2 STRING, c3 STRING, --** c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), --** c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), --** b STRING); ---** insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, +--** insert into table part_change_numeric_group_string_group_floating_string_group_n0 partition(part=1) VALUES (111, --** 'filler', 'filler', 'filler', --** 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', --** 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', --** 'new'); --** explain vectorization detail ---** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +--** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n0; ---** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +--** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n0; -drop table part_change_numeric_group_string_group_floating_string_group; +drop table part_change_numeric_group_string_group_floating_string_group_n0; @@ -252,40 +252,40 @@ drop table part_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE part_change_string_group_string_group_string(insert_num int, +CREATE TABLE part_change_string_group_string_group_string_n0(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, +insert into table part_change_string_group_string_group_string_n0 partition(part=1) SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n3; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n0; -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n0; -- Table-Non-Cascade CHANGE COLUMNS ... ---** alter table part_change_string_group_string_group_string replace columns (insert_num int, +--** alter table part_change_string_group_string_group_string_n0 replace columns (insert_num int, --** c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), --** c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, --** c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; ---** insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, +--** insert into table part_change_string_group_string_group_string_n0 partition(part=1) VALUES (111, --** 'filler', 'filler', 'filler', 'filler', --** 'filler', 'filler', 'filler', --** 'filler', 'filler', 'filler', --** 'new'); --** explain vectorization detail ---** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +--** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n0; ---** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +--** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n0; -drop table part_change_string_group_string_group_string; +drop table part_change_string_group_string_group_string_n0; ------------------------------------------------------------------------------------------ @@ -299,34 +299,34 @@ drop table part_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0 partition(part=1) SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n3; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0; -- Table-Non-Cascade CHANGE COLUMNS ... ---** alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +--** alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0 replace columns (insert_num int, --** c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, --** c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, --** c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, --** c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, --** b STRING) ; ---** insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, +--** insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0 partition(part=1) VALUES (111, --** 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, --** 80000, 90000000, 1234.5678, 9876.543, 789.321, --** 90000000, 1234.5678, 9876.543, 789.321, @@ -334,11 +334,11 @@ select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c1 --** 'new'); --** explain vectorization detail ---** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +--** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0; ---** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +--** select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0; -drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n0; @@ -347,29 +347,29 @@ drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float_n0(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n0 partition(part=1) SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n3; explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n0; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n0; -- Table-Non-Cascade CHANGE COLUMNS ... ---** alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +--** alter table part_change_lower_to_higher_numeric_group_decimal_to_float_n0 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; ---** insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +--** insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n0 partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); --** explain vectorization detail ---** select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +--** select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n0; ---** select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +--** select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n0; -drop table part_change_lower_to_higher_numeric_group_decimal_to_float; \ No newline at end of file +drop table part_change_lower_to_higher_numeric_group_decimal_to_float_n0; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vec_table.q b/ql/src/test/queries/clientpositive/schema_evol_text_vec_table.q index 166b34a4c7..3088a8dc69 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vec_table.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vec_table.q @@ -18,9 +18,9 @@ set hive.llap.io.enabled=false; -- vectorized reading of TEXTFILE format files using the vector SERDE methods. -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n15(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n15; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -29,51 +29,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_permute_select_n4(insert_num int, a INT, b STRING); -insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_permute_select_n4 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n15; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_permute_select add columns(c int); +alter table table_add_int_permute_select_n4 add columns(c int); -insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000); +insert into table table_add_int_permute_select_n4 VALUES (111, 80000, 'new', 80000); explain vectorization detail -select insert_num,a,b from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n4; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_permute_select; -select insert_num,a,b,c from table_add_int_permute_select; -select insert_num,c from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n4; +select insert_num,a,b,c from table_add_int_permute_select_n4; +select insert_num,c from table_add_int_permute_select_n4; -drop table table_add_int_permute_select; +drop table table_add_int_permute_select_n4; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_string_permute_select_n4(insert_num int, a INT, b STRING); -insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_string_permute_select_n4 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n15; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_string_permute_select add columns(c int, d string); +alter table table_add_int_string_permute_select_n4 add columns(c int, d string); -insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler'); +insert into table table_add_int_string_permute_select_n4 VALUES (111, 80000, 'new', 80000, 'filler'); explain vectorization detail -select insert_num,a,b from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n4; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_string_permute_select; -select insert_num,a,b,c from table_add_int_string_permute_select; -select insert_num,a,b,c,d from table_add_int_string_permute_select; -select insert_num,a,c,d from table_add_int_string_permute_select; -select insert_num,a,d from table_add_int_string_permute_select; -select insert_num,c from table_add_int_string_permute_select; -select insert_num,d from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n4; +select insert_num,a,b,c from table_add_int_string_permute_select_n4; +select insert_num,a,b,c,d from table_add_int_string_permute_select_n4; +select insert_num,a,c,d from table_add_int_string_permute_select_n4; +select insert_num,a,d from table_add_int_string_permute_select_n4; +select insert_num,c from table_add_int_string_permute_select_n4; +select insert_num,d from table_add_int_string_permute_select_n4; -drop table table_add_int_string_permute_select; +drop table table_add_int_string_permute_select_n4; @@ -84,21 +84,21 @@ drop table table_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE table_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); +CREATE TABLE table_change_string_group_double_n4(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); -insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table table_change_string_group_double_n4 SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n15; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table table_change_string_group_double_n4 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new'); +insert into table table_change_string_group_double_n4 VALUES (111, 789.321, 789.321, 789.321, 'new'); explain vectorization detail -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n4; -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n4; -drop table table_change_string_group_double; +drop table table_change_string_group_double_n4; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -107,18 +107,18 @@ drop table table_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE table_change_date_group_string_group_date_group(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); +CREATE TABLE table_change_date_group_string_group_date_group_n4(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); -insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table table_change_date_group_string_group_date_group_n4 SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n15; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_date_group_string_group_date_group replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table table_change_date_group_string_group_date_group_n4 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table table_change_date_group_string_group_date_group VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table table_change_date_group_string_group_date_group_n4 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n4; -drop table table_change_date_group_string_group_date_group; +drop table table_change_date_group_string_group_date_group_n4; @@ -133,39 +133,39 @@ drop table table_change_date_group_string_group_date_group; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group_n4(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING); -insert into table table_change_numeric_group_string_group_multi_ints_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n4 SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n15; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_multi_ints_string_group_n4 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table table_change_numeric_group_string_group_multi_ints_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n4 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n4; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n4; -drop table table_change_numeric_group_string_group_multi_ints_string_group; +drop table table_change_numeric_group_string_group_multi_ints_string_group_n4; @@ -176,39 +176,39 @@ drop table table_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n4(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n4 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n15; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_floating_string_group_n4 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_floating_string_group_n4 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n4; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n4; -drop table table_change_numeric_group_string_group_floating_string_group; +drop table table_change_numeric_group_string_group_floating_string_group_n4; ------------------------------------------------------------------------------------------ @@ -219,34 +219,34 @@ drop table table_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE table_change_string_group_string_group_string(insert_num int, +CREATE TABLE table_change_string_group_string_group_string_n4(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING); -insert into table table_change_string_group_string_group_string SELECT insert_num, +insert into table table_change_string_group_string_group_string_n4 SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n15; -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_string_group_string replace columns (insert_num int, +alter table table_change_string_group_string_group_string_n4 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table table_change_string_group_string_group_string VALUES (111, +insert into table table_change_string_group_string_group_string_n4 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string_n4; -drop table table_change_string_group_string_group_string; +drop table table_change_string_group_string_group_string_n4; @@ -261,40 +261,40 @@ drop table table_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING); -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4 SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n15; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint VALUES (111, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4 VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, 1234.5678, 9876.543, 789.321, 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4; -drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n4; @@ -303,23 +303,23 @@ drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float_n4(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING); -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n4 SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n15; -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table table_change_lower_to_higher_numeric_group_decimal_to_float_n4 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n4 VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n4; -drop table table_change_lower_to_higher_numeric_group_decimal_to_float; +drop table table_change_lower_to_higher_numeric_group_decimal_to_float_n4; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vec_table_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_text_vec_table_llap_io.q index cdd4dd4acc..5777f39005 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vec_table_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vec_table_llap_io.q @@ -18,9 +18,9 @@ set hive.llap.io.enabled=true;set hive.llap.io.encode.enabled=true; -- vectorized reading of TEXTFILE format files using the vector SERDE methods. -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n5(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n5; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -29,61 +29,61 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_permute_select_n1(insert_num int, a INT, b STRING); -insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_permute_select_n1 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n5; explain vectorization detail -select insert_num,a,b from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n1; -select insert_num,a,b from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n1; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_permute_select add columns(c int); +alter table table_add_int_permute_select_n1 add columns(c int); -insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000); +insert into table table_add_int_permute_select_n1 VALUES (111, 80000, 'new', 80000); explain vectorization detail -select insert_num,a,b from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n1; -- SELECT permutation columns to make sure NULL defaulting works right ---** select insert_num,a,b from table_add_int_permute_select; ---** select insert_num,a,b,c from table_add_int_permute_select; ---** select insert_num,c from table_add_int_permute_select; +--** select insert_num,a,b from table_add_int_permute_select_n1; +--** select insert_num,a,b,c from table_add_int_permute_select_n1; +--** select insert_num,c from table_add_int_permute_select_n1; -drop table table_add_int_permute_select; +drop table table_add_int_permute_select_n1; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_string_permute_select_n1(insert_num int, a INT, b STRING); -insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_string_permute_select_n1 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n5; explain vectorization detail -select insert_num,a,b from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n1; -select insert_num,a,b from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n1; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_string_permute_select add columns(c int, d string); +alter table table_add_int_string_permute_select_n1 add columns(c int, d string); -insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler'); +insert into table table_add_int_string_permute_select_n1 VALUES (111, 80000, 'new', 80000, 'filler'); explain vectorization detail -select insert_num,a,b from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n1; -- SELECT permutation columns to make sure NULL defaulting works right ---** select insert_num,a,b from table_add_int_string_permute_select; ---** select insert_num,a,b,c from table_add_int_string_permute_select; ---** select insert_num,a,b,c,d from table_add_int_string_permute_select; ---** select insert_num,a,c,d from table_add_int_string_permute_select; ---** select insert_num,a,d from table_add_int_string_permute_select; ---** select insert_num,c from table_add_int_string_permute_select; ---** select insert_num,d from table_add_int_string_permute_select; +--** select insert_num,a,b from table_add_int_string_permute_select_n1; +--** select insert_num,a,b,c from table_add_int_string_permute_select_n1; +--** select insert_num,a,b,c,d from table_add_int_string_permute_select_n1; +--** select insert_num,a,c,d from table_add_int_string_permute_select_n1; +--** select insert_num,a,d from table_add_int_string_permute_select_n1; +--** select insert_num,c from table_add_int_string_permute_select_n1; +--** select insert_num,d from table_add_int_string_permute_select_n1; -drop table table_add_int_string_permute_select; +drop table table_add_int_string_permute_select_n1; @@ -94,26 +94,26 @@ drop table table_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE table_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); +CREATE TABLE table_change_string_group_double_n1(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); -insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table table_change_string_group_double_n1 SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n5; explain vectorization detail -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n1; -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table table_change_string_group_double_n1 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new'); +insert into table table_change_string_group_double_n1 VALUES (111, 789.321, 789.321, 789.321, 'new'); explain vectorization detail -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n1; ---** select insert_num,c1,c2,c3,b from table_change_string_group_double; +--** select insert_num,c1,c2,c3,b from table_change_string_group_double_n1; -drop table table_change_string_group_double; +drop table table_change_string_group_double_n1; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -122,23 +122,23 @@ drop table table_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE table_change_date_group_string_group_date_group(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); +CREATE TABLE table_change_date_group_string_group_date_group_n1(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); -insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table table_change_date_group_string_group_date_group_n1 SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n5; explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n1; ---** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +--** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_date_group_string_group_date_group replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table table_change_date_group_string_group_date_group_n1 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table table_change_date_group_string_group_date_group VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table table_change_date_group_string_group_date_group_n1 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); ---** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +--** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n1; -drop table table_change_date_group_string_group_date_group; +drop table table_change_date_group_string_group_date_group_n1; @@ -153,42 +153,42 @@ drop table table_change_date_group_string_group_date_group; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group_n1(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING); -insert into table table_change_numeric_group_string_group_multi_ints_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n1 SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n5; explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n1; ---** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +--** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_multi_ints_string_group_n1 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table table_change_numeric_group_string_group_multi_ints_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n1 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n1; ---** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +--** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n1; -drop table table_change_numeric_group_string_group_multi_ints_string_group; +drop table table_change_numeric_group_string_group_multi_ints_string_group_n1; @@ -199,42 +199,42 @@ drop table table_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n1(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n1 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n5; explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n1; ---** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +--** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_floating_string_group_n1 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_floating_string_group_n1 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n1; ---** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +--** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n1; -drop table table_change_numeric_group_string_group_floating_string_group; +drop table table_change_numeric_group_string_group_floating_string_group_n1; ------------------------------------------------------------------------------------------ @@ -245,37 +245,37 @@ drop table table_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE table_change_string_group_string_group_string(insert_num int, +CREATE TABLE table_change_string_group_string_group_string_n1(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING); -insert into table table_change_string_group_string_group_string SELECT insert_num, +insert into table table_change_string_group_string_group_string_n1 SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n5; explain vectorization detail -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n1; ---** select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +--** select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_string_group_string replace columns (insert_num int, +alter table table_change_string_group_string_group_string_n1 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table table_change_string_group_string_group_string VALUES (111, +insert into table table_change_string_group_string_group_string_n1 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); ---** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string; +--** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string_n1; -drop table table_change_string_group_string_group_string; +drop table table_change_string_group_string_group_string_n1; @@ -290,43 +290,43 @@ drop table table_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING); -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1 SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n5; explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1; ---** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +--** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint VALUES (111, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1 VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, 1234.5678, 9876.543, 789.321, 'new'); ---** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +--** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1; -drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n1; @@ -335,26 +335,26 @@ drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float_n1(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING); -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n1 SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n5; explain vectorization detail -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n1; ---** select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +--** select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n1; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table table_change_lower_to_higher_numeric_group_decimal_to_float_n1 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n1 VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); ---** select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +--** select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n1; -drop table table_change_lower_to_higher_numeric_group_decimal_to_float; +drop table table_change_lower_to_higher_numeric_group_decimal_to_float_n1; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part.q b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part.q index 06685139c8..fa39e4005f 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part.q @@ -20,9 +20,9 @@ set hive.llap.io.enabled=false; -- vectorized reading of TEXTFILE format files using the row SERDE methods. -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n33(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n33; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -31,51 +31,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_permute_select_n11(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_permute_select_n11 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_permute_select add columns(c int); +alter table part_add_int_permute_select_n11 add columns(c int); -insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333); +insert into table part_add_int_permute_select_n11 partition(part=1) VALUES (2, 2222, 'new', 3333); explain vectorization detail -select insert_num,part,a,b from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n11; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_permute_select; -select insert_num,part,a,b,c from part_add_int_permute_select; -select insert_num,part,c from part_add_int_permute_select; +select insert_num,part,a,b from part_add_int_permute_select_n11; +select insert_num,part,a,b,c from part_add_int_permute_select_n11; +select insert_num,part,c from part_add_int_permute_select_n11; -drop table part_add_int_permute_select; +drop table part_add_int_permute_select_n11; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_int_string_permute_select_n11(insert_num int, a INT, b STRING) PARTITIONED BY(part INT); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new'); +insert into table part_add_int_string_permute_select_n11 partition(part=1) VALUES (1, 1111, 'new'); -- Table-Non-Cascade ADD COLUMNS ... -alter table part_add_int_string_permute_select add columns(c int, d string); +alter table part_add_int_string_permute_select_n11 add columns(c int, d string); -insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); +insert into table part_add_int_string_permute_select_n11 partition(part=1) VALUES (2, 2222, 'new', 3333, '4444'); explain vectorization detail -select insert_num,part,a,b from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n11; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,part,a,b from part_add_int_string_permute_select; -select insert_num,part,a,b,c from part_add_int_string_permute_select; -select insert_num,part,a,b,c,d from part_add_int_string_permute_select; -select insert_num,part,a,c,d from part_add_int_string_permute_select; -select insert_num,part,a,d from part_add_int_string_permute_select; -select insert_num,part,c from part_add_int_string_permute_select; -select insert_num,part,d from part_add_int_string_permute_select; +select insert_num,part,a,b from part_add_int_string_permute_select_n11; +select insert_num,part,a,b,c from part_add_int_string_permute_select_n11; +select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n11; +select insert_num,part,a,c,d from part_add_int_string_permute_select_n11; +select insert_num,part,a,d from part_add_int_string_permute_select_n11; +select insert_num,part,c from part_add_int_string_permute_select_n11; +select insert_num,part,d from part_add_int_string_permute_select_n11; -drop table part_add_int_string_permute_select; +drop table part_add_int_string_permute_select_n11; @@ -86,21 +86,21 @@ drop table part_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_string_group_double_n11(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table part_change_string_group_double_n11 partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n33; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table part_change_string_group_double_n11 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111; +insert into table part_change_string_group_double_n11 partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n33 WHERE insert_num = 111; explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n11; -select insert_num,part,c1,c2,c3,b from part_change_string_group_double; +select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n11; -drop table part_change_string_group_double; +drop table part_change_string_group_double_n11; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -109,21 +109,21 @@ drop table part_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_date_group_string_group_date_timestamp_n11(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_date_group_string_group_date_timestamp_n11 partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n33; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table part_change_date_group_string_group_date_timestamp_n11 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table part_change_date_group_string_group_date_timestamp_n11 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n11; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n11; -drop table part_change_date_group_string_group_date_timestamp; +drop table part_change_date_group_string_group_date_timestamp_n11; @@ -139,39 +139,39 @@ drop table part_change_date_group_string_group_date_timestamp; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group_n11(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n11 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n33; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n11; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_multi_ints_string_group_n11 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_multi_ints_string_group_n11 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n11; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n11; -drop table part_change_numeric_group_string_group_multi_ints_string_group; +drop table part_change_numeric_group_string_group_multi_ints_string_group_n11; @@ -182,39 +182,39 @@ drop table part_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE part_change_numeric_group_string_group_floating_string_group_n11(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PARTITIONED BY(part INT); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num, +insert into table part_change_numeric_group_string_group_floating_string_group_n11 partition(part=1) SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n33; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n11; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table part_change_numeric_group_string_group_floating_string_group_n11 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111, +insert into table part_change_numeric_group_string_group_floating_string_group_n11 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n11; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n11; -drop table part_change_numeric_group_string_group_floating_string_group; +drop table part_change_numeric_group_string_group_floating_string_group_n11; @@ -226,37 +226,37 @@ drop table part_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE part_change_string_group_string_group_string(insert_num int, +CREATE TABLE part_change_string_group_string_group_string_n11(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num, +insert into table part_change_string_group_string_group_string_n11 partition(part=1) SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n33; -select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n11; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_string_group_string_group_string replace columns (insert_num int, +alter table part_change_string_group_string_group_string_n11 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111, +insert into table part_change_string_group_string_group_string_n11 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n11; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n11; -drop table part_change_string_group_string_group_string; +drop table part_change_string_group_string_group_string_n11; ------------------------------------------------------------------------------------------ @@ -270,31 +270,31 @@ drop table part_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11 partition(part=1) SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n33; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111, +insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11 partition(part=1) VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, @@ -302,11 +302,11 @@ insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint pa 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11; -drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11; @@ -315,26 +315,26 @@ drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float_n11(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING) PARTITIONED BY(part INT); -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n11 partition(part=1) SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n33; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n11; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table part_change_lower_to_higher_numeric_group_decimal_to_float_n11 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n11 partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); explain vectorization detail -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n11; -select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n11; -drop table part_change_lower_to_higher_numeric_group_decimal_to_float; \ No newline at end of file +drop table part_change_lower_to_higher_numeric_group_decimal_to_float_n11; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex.q b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex.q index 55163c8297..2b4fe22204 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex.q @@ -24,103 +24,103 @@ set hive.llap.io.enabled=false; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: STRUCT --> STRUCT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_struct1_n4(insert_num int, s1 STRUCT, b STRING) PARTITIONED BY(part INT); -CREATE TABLE complex_struct1_a_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_a_txt_n4(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt_n4; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_a_txt; +insert into table part_change_various_various_struct1_n4 partition(part=1) select * from complex_struct1_a_txt_n4; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_struct1 replace columns (insert_num int, s1 STRUCT, b STRING); +alter table part_change_various_various_struct1_n4 replace columns (insert_num int, s1 STRUCT, b STRING); -CREATE TABLE complex_struct1_b_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_b_txt_n4(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt_n4; -insert into table part_change_various_various_struct1 partition(part=2) select * from complex_struct1_b_txt; +insert into table part_change_various_various_struct1_n4 partition(part=2) select * from complex_struct1_b_txt_n4; -CREATE TABLE complex_struct1_c_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_c_txt_n4(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt_n4; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_c_txt; +insert into table part_change_various_various_struct1_n4 partition(part=1) select * from complex_struct1_c_txt_n4; explain vectorization detail -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n4; -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n4; -drop table part_change_various_various_struct1; +drop table part_change_various_various_struct1_n4; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: STRUCT -- -CREATE TABLE part_add_various_various_struct2(insert_num int, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_various_various_struct2_n4(insert_num int, b STRING) PARTITIONED BY(part INT); -insert into table part_add_various_various_struct2 partition(part=1) +insert into table part_add_various_various_struct2_n4 partition(part=1) values(1, 'original'), (2, 'original'); -select insert_num,part,b from part_add_various_various_struct2; +select insert_num,part,b from part_add_various_various_struct2_n4; -- Table-Non-Cascade ADD COLUMN ... -alter table part_add_various_various_struct2 ADD columns (s2 STRUCT); +alter table part_add_various_various_struct2_n4 ADD columns (s2 STRUCT); -CREATE TABLE complex_struct2_a_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_a_txt_n4(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt_n4; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_a_txt; +insert into table part_add_various_various_struct2_n4 partition(part=1) select * from complex_struct2_a_txt_n4; -CREATE TABLE complex_struct2_b_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_b_txt_n4(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt_n4; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_b_txt; +insert into table part_add_various_various_struct2_n4 partition(part=2) select * from complex_struct2_b_txt_n4; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_various_various_struct2 REPLACE columns (insert_num int, b STRING, s2 STRUCT); +alter table part_add_various_various_struct2_n4 REPLACE columns (insert_num int, b STRING, s2 STRUCT); -CREATE TABLE complex_struct2_c_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_c_txt_n4(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt_n4; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_c_txt; +insert into table part_add_various_various_struct2_n4 partition(part=2) select * from complex_struct2_c_txt_n4; -CREATE TABLE complex_struct2_d_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_d_txt_n4(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt_n4; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_d_txt; +insert into table part_add_various_various_struct2_n4 partition(part=1) select * from complex_struct2_d_txt_n4; explain vectorization detail -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n4; -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n4; -drop table part_add_various_various_struct2; +drop table part_add_various_various_struct2_n4; @@ -128,40 +128,40 @@ drop table part_add_various_various_struct2; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: ADD COLUMNS to STRUCT type as LAST column of 3 columns -- -CREATE TABLE part_add_to_various_various_struct4(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); +CREATE TABLE part_add_to_various_various_struct4_n4(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); -CREATE TABLE complex_struct4_a_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_a_txt_n4(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt_n4; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_a_txt; +insert into table part_add_to_various_various_struct4_n4 partition(part=1) select * from complex_struct4_a_txt_n4; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n4; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_to_various_various_struct4 replace columns (insert_num int, b STRING, s3 STRUCT); +alter table part_add_to_various_various_struct4_n4 replace columns (insert_num int, b STRING, s3 STRUCT); -CREATE TABLE complex_struct4_b_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_b_txt_n4(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt_n4; -insert into table part_add_to_various_various_struct4 partition(part=2) select * from complex_struct4_b_txt; +insert into table part_add_to_various_various_struct4_n4 partition(part=2) select * from complex_struct4_b_txt_n4; -CREATE TABLE complex_struct4_c_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_c_txt_n4(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt_n4; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_c_txt; +insert into table part_add_to_various_various_struct4_n4 partition(part=1) select * from complex_struct4_c_txt_n4; explain vectorization detail -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n4; -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n4; -drop table part_add_to_various_various_struct4; +drop table part_add_to_various_various_struct4_n4; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex_llap_io.q index f349a8eb42..c1e503656f 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex_llap_io.q @@ -24,109 +24,109 @@ set hive.llap.io.enabled=true;set hive.llap.io.encode.enabled=true; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: STRUCT --> STRUCT, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_struct1_n7(insert_num int, s1 STRUCT, b STRING) PARTITIONED BY(part INT); -CREATE TABLE complex_struct1_a_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_a_txt_n7(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table complex_struct1_a_txt_n7; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_a_txt; +insert into table part_change_various_various_struct1_n7 partition(part=1) select * from complex_struct1_a_txt_n7; explain vectorization detail -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n7; ---** select insert_num,part,s1,b from part_change_various_various_struct1; +--** select insert_num,part,s1,b from part_change_various_various_struct1_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_struct1 replace columns (insert_num int, s1 STRUCT, b STRING); +alter table part_change_various_various_struct1_n7 replace columns (insert_num int, s1 STRUCT, b STRING); -CREATE TABLE complex_struct1_b_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_b_txt_n7(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table complex_struct1_b_txt_n7; -insert into table part_change_various_various_struct1 partition(part=2) select * from complex_struct1_b_txt; +insert into table part_change_various_various_struct1_n7 partition(part=2) select * from complex_struct1_b_txt_n7; -CREATE TABLE complex_struct1_c_txt(insert_num int, s1 STRUCT, b STRING) +CREATE TABLE complex_struct1_c_txt_n7(insert_num int, s1 STRUCT, b STRING) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table complex_struct1_c_txt_n7; -insert into table part_change_various_various_struct1 partition(part=1) select * from complex_struct1_c_txt; +insert into table part_change_various_various_struct1_n7 partition(part=1) select * from complex_struct1_c_txt_n7; explain vectorization detail -select insert_num,part,s1,b from part_change_various_various_struct1; +select insert_num,part,s1,b from part_change_various_various_struct1_n7; ---** select insert_num,part,s1,b from part_change_various_various_struct1; +--** select insert_num,part,s1,b from part_change_various_various_struct1_n7; -drop table part_change_various_various_struct1; +drop table part_change_various_various_struct1_n7; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: STRUCT -- -CREATE TABLE part_add_various_various_struct2(insert_num int, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_add_various_various_struct2_n7(insert_num int, b STRING) PARTITIONED BY(part INT); -insert into table part_add_various_various_struct2 partition(part=1) +insert into table part_add_various_various_struct2_n7 partition(part=1) values(1, 'original'), (2, 'original'); explain vectorization detail -select insert_num,part,b from part_add_various_various_struct2; +select insert_num,part,b from part_add_various_various_struct2_n7; ---** select insert_num,part,b from part_add_various_various_struct2; +--** select insert_num,part,b from part_add_various_various_struct2_n7; -- Table-Non-Cascade ADD COLUMN ... -alter table part_add_various_various_struct2 ADD columns (s2 STRUCT); +alter table part_add_various_various_struct2_n7 ADD columns (s2 STRUCT); -CREATE TABLE complex_struct2_a_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_a_txt_n7(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_a.txt' overwrite into table complex_struct2_a_txt_n7; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_a_txt; +insert into table part_add_various_various_struct2_n7 partition(part=1) select * from complex_struct2_a_txt_n7; -CREATE TABLE complex_struct2_b_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_b_txt_n7(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_b.txt' overwrite into table complex_struct2_b_txt_n7; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_b_txt; +insert into table part_add_various_various_struct2_n7 partition(part=2) select * from complex_struct2_b_txt_n7; ---** select insert_num,part,b,s2 from part_add_various_various_struct2; +--** select insert_num,part,b,s2 from part_add_various_various_struct2_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_various_various_struct2 REPLACE columns (insert_num int, b STRING, s2 STRUCT); +alter table part_add_various_various_struct2_n7 REPLACE columns (insert_num int, b STRING, s2 STRUCT); -CREATE TABLE complex_struct2_c_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_c_txt_n7(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_c.txt' overwrite into table complex_struct2_c_txt_n7; -insert into table part_add_various_various_struct2 partition(part=2) select * from complex_struct2_c_txt; +insert into table part_add_various_various_struct2_n7 partition(part=2) select * from complex_struct2_c_txt_n7; -CREATE TABLE complex_struct2_d_txt(insert_num int, b STRING, s2 STRUCT) +CREATE TABLE complex_struct2_d_txt_n7(insert_num int, b STRING, s2 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct2_d.txt' overwrite into table complex_struct2_d_txt_n7; -insert into table part_add_various_various_struct2 partition(part=1) select * from complex_struct2_d_txt; +insert into table part_add_various_various_struct2_n7 partition(part=1) select * from complex_struct2_d_txt_n7; explain vectorization detail -select insert_num,part,b,s2 from part_add_various_various_struct2; +select insert_num,part,b,s2 from part_add_various_various_struct2_n7; ---** select insert_num,part,b,s2 from part_add_various_various_struct2; +--** select insert_num,part,b,s2 from part_add_various_various_struct2_n7; -drop table part_add_various_various_struct2; +drop table part_add_various_various_struct2_n7; @@ -134,43 +134,43 @@ drop table part_add_various_various_struct2; -- -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: ADD COLUMNS to STRUCT type as LAST column of 3 columns -- -CREATE TABLE part_add_to_various_various_struct4(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); +CREATE TABLE part_add_to_various_various_struct4_n7(insert_num int, b STRING, s3 STRUCT) PARTITIONED BY(part INT); -CREATE TABLE complex_struct4_a_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_a_txt_n7(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_a.txt' overwrite into table complex_struct4_a_txt_n7; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_a_txt; +insert into table part_add_to_various_various_struct4_n7 partition(part=1) select * from complex_struct4_a_txt_n7; explain vectorization detail -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n7; ---** select insert_num,part,b,s3 from part_add_to_various_various_struct4; +--** select insert_num,part,b,s3 from part_add_to_various_various_struct4_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_add_to_various_various_struct4 replace columns (insert_num int, b STRING, s3 STRUCT); +alter table part_add_to_various_various_struct4_n7 replace columns (insert_num int, b STRING, s3 STRUCT); -CREATE TABLE complex_struct4_b_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_b_txt_n7(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_b.txt' overwrite into table complex_struct4_b_txt_n7; -insert into table part_add_to_various_various_struct4 partition(part=2) select * from complex_struct4_b_txt; +insert into table part_add_to_various_various_struct4_n7 partition(part=2) select * from complex_struct4_b_txt_n7; -CREATE TABLE complex_struct4_c_txt(insert_num int, b STRING, s3 STRUCT) +CREATE TABLE complex_struct4_c_txt_n7(insert_num int, b STRING, s3 STRUCT) row format delimited fields terminated by '|' collection items terminated by ',' map keys terminated by ':' stored as textfile; -load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt; +load data local inpath '../../data/files/schema_evolution/complex_struct4_c.txt' overwrite into table complex_struct4_c_txt_n7; -insert into table part_add_to_various_various_struct4 partition(part=1) select * from complex_struct4_c_txt; +insert into table part_add_to_various_various_struct4_n7 partition(part=1) select * from complex_struct4_c_txt_n7; explain vectorization detail -select insert_num,part,b,s3 from part_add_to_various_various_struct4; +select insert_num,part,b,s3 from part_add_to_various_various_struct4_n7; ---** select insert_num,part,b,s3 from part_add_to_various_various_struct4; +--** select insert_num,part,b,s3 from part_add_to_various_various_struct4_n7; -drop table part_add_to_various_various_struct4; +drop table part_add_to_various_various_struct4_n7; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_primitive.q b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_primitive.q index d9ff65e3b3..ae87ba0db1 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_primitive.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_primitive.q @@ -24,13 +24,13 @@ set hive.llap.io.enabled=false; -- -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n43(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n43; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n15(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n15; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: @@ -40,7 +40,7 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data_ -- (BOOLEAN, TINYINT, SMALLINT, LONG, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> INT –2147483648 to 2147483647 and -- (BOOLEAN, TINYINT, SMALLINT, INT, FLOAT, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> BIGINT -9223372036854775808 to 9223372036854775807 -- -CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, +CREATE TABLE part_change_various_various_boolean_to_bigint_n7(insert_num int, c1 TINYINT, c2 SMALLINT, c3 INT, c4 BIGINT, c5 FLOAT, c6 DOUBLE, c7 DECIMAL(38,18), c8 STRING, c9 TIMESTAMP, c10 BOOLEAN, c11 SMALLINT, c12 INT, c13 BIGINT, c14 FLOAT, c15 DOUBLE, c16 DECIMAL(38,18), c17 STRING, c18 CHAR(25), c19 VARCHAR(25), c20 TIMESTAMP, c21 BOOLEAN, c22 TINYINT, c23 INT, c24 BIGINT, c25 FLOAT, c26 DOUBLE, c27 DECIMAL(38,18), c28 STRING, c29 CHAR(25), c30 VARCHAR(25), c31 TIMESTAMP, @@ -48,18 +48,18 @@ CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int, c43 BOOLEAN, c44 TINYINT, c45 SMALLINT, c46 INT, c47 FLOAT, c48 DOUBLE, c49 DECIMAL(38,18), c50 STRING, c51 CHAR(25), c52 VARCHAR(25), c53 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n7 partition(part=1) SELECT insert_num, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, boolean_str, timestamp1, boolean1, smallint1, int1, bigint1, float1, double1, decimal1, tinyint_str, tinyint_str, tinyint_str, timestamp1, boolean1, tinyint1, int1, bigint1, float1, double1, decimal1, smallint_str, smallint_str, smallint_str, timestamp1, boolean1, tinyint1, smallint1, bigint1, float1, double1, decimal1, int_str, int_str, int_str, timestamp1, boolean1, tinyint1, smallint1, int1, float1, double1, decimal1, bigint_str, bigint_str, bigint_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n43; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_boolean_to_bigint replace columns (insert_num int, +alter table part_change_various_various_boolean_to_bigint_n7 replace columns (insert_num int, c1 BOOLEAN, c2 BOOLEAN, c3 BOOLEAN, c4 BOOLEAN, c5 BOOLEAN, c6 BOOLEAN, c7 BOOLEAN, c8 BOOLEAN, c9 BOOLEAN, c10 TINYINT, c11 TINYINT, c12 TINYINT, c13 TINYINT, c14 TINYINT, c15 TINYINT, c16 TINYINT, c17 TINYINT, c18 TINYINT, c19 TINYINT, c20 TINYINT, c21 SMALLINT, c22 SMALLINT, c23 SMALLINT, c24 SMALLINT, c25 SMALLINT, c26 SMALLINT, c27 SMALLINT, c28 SMALLINT, c29 SMALLINT, c30 SMALLINT, c31 SMALLINT, @@ -67,20 +67,20 @@ alter table part_change_various_various_boolean_to_bigint replace columns (inser c43 BIGINT, c44 BIGINT, c45 BIGINT, c46 BIGINT, c47 BIGINT, c48 BIGINT, c49 BIGINT, c50 BIGINT, c51 BIGINT, c52 BIGINT, c53 BIGINT, b STRING); -insert into table part_change_various_various_boolean_to_bigint partition(part=1) SELECT insert_num, +insert into table part_change_various_various_boolean_to_bigint_n7 partition(part=1) SELECT insert_num, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, boolean1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, - 'new' FROM schema_evolution_data; + 'new' FROM schema_evolution_data_n43; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n7; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint_n7; -drop table part_change_various_various_boolean_to_bigint; +drop table part_change_various_various_boolean_to_bigint_n7; @@ -90,39 +90,39 @@ drop table part_change_various_various_boolean_to_bigint; -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, DOUBLE, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> FLOAT and -- (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DECIMAL, STRING, CHAR, VARCHAR, TIMESTAMP) --> DOUBLE and -- -CREATE TABLE part_change_various_various_decimal_to_double(insert_num int, +CREATE TABLE part_change_various_various_decimal_to_double_n7(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 STRING, c9 CHAR(25), c10 VARCHAR(25), c11 TIMESTAMP, c12 BOOLEAN, c13 TINYINT, c14 SMALLINT, c15 INT, c16 BIGINT, c17 DECIMAL(38,18), c18 DOUBLE, c19 STRING, c20 CHAR(25), c21 VARCHAR(25), c22 TIMESTAMP, c23 BOOLEAN, c24 TINYINT, c25 SMALLINT, c26 INT, c27 BIGINT, c28 DECIMAL(38,18), c29 FLOAT, c30 STRING, c31 CHAR(25), c32 VARCHAR(25), c33 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n7 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal_str, decimal_str, decimal_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, double1, float_str, float_str, float_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, float1, double_str, double_str, double_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n43; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_decimal_to_double replace columns (insert_num int, +alter table part_change_various_various_decimal_to_double_n7 replace columns (insert_num int, c1 DECIMAL(38,18), c2 DECIMAL(38,18), c3 DECIMAL(38,18), c4 DECIMAL(38,18), c5 DECIMAL(38,18), c6 DECIMAL(38,18), c7 DECIMAL(38,18), c8 DECIMAL(38,18), c9 DECIMAL(38,18), c10 DECIMAL(38,18), c11 DECIMAL(38,18), c12 FLOAT, c13 FLOAT, c14 FLOAT, c15 FLOAT, c16 FLOAT, c17 FLOAT, c18 FLOAT, c19 FLOAT, c20 FLOAT, c21 FLOAT, c22 FLOAT, c23 DOUBLE, c24 DOUBLE, c25 DOUBLE, c26 DOUBLE, c27 DOUBLE, c28 DOUBLE, c29 DOUBLE, c30 DOUBLE, c31 DOUBLE, c32 DOUBLE, c33 DOUBLE, b STRING); -insert into table part_change_various_various_decimal_to_double partition(part=1) SELECT insert_num, +insert into table part_change_various_various_decimal_to_double_n7 partition(part=1) SELECT insert_num, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, - 'new' FROM schema_evolution_data_2 WHERE insert_num=111; + 'new' FROM schema_evolution_data_2_n15 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n7; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double_n7; -drop table part_change_various_various_decimal_to_double; +drop table part_change_various_various_decimal_to_double_n7; @@ -130,80 +130,80 @@ drop table part_change_various_various_decimal_to_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (BOOLEAN, TINYINT, SMALLINT, INT, LONG, FLOAT, DOUBLE, DECIMAL(38,18), STRING, CHAR, VARCHAR, DATE) --> TIMESTAMP -- -CREATE TABLE part_change_various_various_timestamp(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_timestamp_n7(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_timestamp_n7 partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data_n43; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_timestamp replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); +alter table part_change_various_various_timestamp_n7 replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_timestamp_n7 partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2_n15 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n7; -select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; +select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp_n7; -drop table part_change_various_various_timestamp; +drop table part_change_various_various_timestamp_n7; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: (STRING, CHAR, VARCHAR, TIMESTAMP --> DATE -- -CREATE TABLE part_change_various_various_date(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_various_various_date_n7(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_date_n7 partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data_n43; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_various_various_date replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); +alter table part_change_various_various_date_n7 replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_date_n7 partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2_n15 WHERE insert_num=111; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n7; -select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; +select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date_n7; -drop table part_change_various_various_date; +drop table part_change_various_various_date_n7; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Same Type (CHAR, VARCHAR, DECIMAL) --> Different maxLength or precision/scale -- -CREATE TABLE part_change_same_type_different_params(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); +CREATE TABLE part_change_same_type_different_params_n7(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) PARTITIONED BY(part INT); -CREATE TABLE same_type1_a_txt(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) +CREATE TABLE same_type1_a_txt_n7(insert_num int, c1 CHAR(12), c2 CHAR(25), c3 VARCHAR(25), c4 VARCHAR(10), c5 DECIMAL(12,4), c6 DECIMAL(20,10), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_a.txt' overwrite into table same_type1_a_txt_n7; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_a_txt; +insert into table part_change_same_type_different_params_n7 partition(part=1) select * from same_type1_a_txt_n7; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n7; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table part_change_same_type_different_params replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); +alter table part_change_same_type_different_params_n7 replace columns (insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING); -CREATE TABLE same_type1_b_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_b_txt_n7(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_b.txt' overwrite into table same_type1_b_txt_n7; -insert into table part_change_same_type_different_params partition(part=1) select * from same_type1_b_txt; +insert into table part_change_same_type_different_params_n7 partition(part=1) select * from same_type1_b_txt_n7; -CREATE TABLE same_type1_c_txt(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) +CREATE TABLE same_type1_c_txt_n7(insert_num int, c1 CHAR(8), c2 CHAR(32), c3 VARCHAR(15), c4 VARCHAR(18), c5 DECIMAL(10,2), c6 DECIMAL(25,15), b STRING) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt; +load data local inpath '../../data/files/schema_evolution/same_type1_c.txt' overwrite into table same_type1_c_txt_n7; -insert into table part_change_same_type_different_params partition(part=2) select * from same_type1_c_txt; +insert into table part_change_same_type_different_params_n7 partition(part=2) select * from same_type1_c_txt_n7; explain vectorization detail -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n7; -select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params; +select insert_num,part,c1,c2,c3,c4,c5,c6,b from part_change_same_type_different_params_n7; -drop table part_change_same_type_different_params; +drop table part_change_same_type_different_params_n7; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_primitive_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_primitive_llap_io.q index eac3d0df12..686b2ed4e9 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_primitive_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_primitive_llap_io.q @@ -24,13 +24,13 @@ set hive.llap.io.enabled=true;set hive.llap.io.encode.enabled=true; -- -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n2; -CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_2_n0(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into table schema_evolution_data_2_n0; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: @@ -54,7 +54,7 @@ insert into table part_change_various_various_boolean_to_bigint partition(part=1 boolean1, tinyint1, int1, bigint1, float1, double1, decimal1, smallint_str, smallint_str, smallint_str, timestamp1, boolean1, tinyint1, smallint1, bigint1, float1, double1, decimal1, int_str, int_str, int_str, timestamp1, boolean1, tinyint1, smallint1, int1, float1, double1, decimal1, bigint_str, bigint_str, bigint_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n2; explain vectorization detail select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; @@ -76,7 +76,7 @@ insert into table part_change_various_various_boolean_to_bigint partition(part=1 smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, int1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, bigint1, - 'new' FROM schema_evolution_data; + 'new' FROM schema_evolution_data_n2; explain vectorization detail select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b from part_change_various_various_boolean_to_bigint; @@ -103,7 +103,7 @@ insert into table part_change_various_various_decimal_to_double partition(part=1 boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal_str, decimal_str, decimal_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, double1, float_str, float_str, float_str, timestamp1, boolean1, tinyint1, smallint1, int1, bigint1, decimal1, float1, double_str, double_str, double_str, timestamp1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n2; explain vectorization detail select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; @@ -121,7 +121,7 @@ insert into table part_change_various_various_decimal_to_double partition(part=1 decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, decimal1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, float1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, double1, - 'new' FROM schema_evolution_data_2 WHERE insert_num=111; + 'new' FROM schema_evolution_data_2_n0 WHERE insert_num=111; explain vectorization detail select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b from part_change_various_various_decimal_to_double; @@ -138,7 +138,7 @@ drop table part_change_various_various_decimal_to_double; -- CREATE TABLE part_change_various_various_timestamp(insert_num int, c1 BOOLEAN, c2 TINYINT, c3 SMALLINT, c4 INT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 DECIMAL(38,18), c9 STRING, c10 CHAR(25), c11 VARCHAR(25), c12 DATE, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, boolean1, tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, timestamp_str, timestamp_str, timestamp_str, date1, 'original' FROM schema_evolution_data_n2; explain vectorization detail select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; @@ -148,7 +148,7 @@ select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change -- Table-Non-Cascade CHANGE COLUMNS ... alter table part_change_various_various_timestamp replace columns (insert_num int, c1 TIMESTAMP, c2 TIMESTAMP, c3 TIMESTAMP, c4 TIMESTAMP, c5 TIMESTAMP, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, c11 TIMESTAMP, c12 TIMESTAMP, b STRING); -insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_timestamp partition(part=1) SELECT insert_num, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'new' FROM schema_evolution_data_2_n0 WHERE insert_num=111; explain vectorization detail select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from part_change_various_various_timestamp; @@ -162,7 +162,7 @@ drop table part_change_various_various_timestamp; -- CREATE TABLE part_change_various_various_date(insert_num int, c1 STRING, c2 CHAR(25), c3 VARCHAR(25), c4 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date_str, date_str, date_str, timestamp1, 'original' FROM schema_evolution_data_n2; explain vectorization detail select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; @@ -172,7 +172,7 @@ select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; -- Table-Non-Cascade CHANGE COLUMNS ... alter table part_change_various_various_date replace columns (insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, b STRING); -insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2 WHERE insert_num=111; +insert into table part_change_various_various_date partition(part=1) SELECT insert_num, date1, date1, date1, date1, 'new' FROM schema_evolution_data_2_n0 WHERE insert_num=111; explain vectorization detail select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_llap_io.q index 9fa6908c91..389353a021 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_llap_io.q @@ -20,9 +20,9 @@ set hive.llap.io.enabled=true;set hive.llap.io.encode.enabled=true; -- vectorized reading of TEXTFILE format files using the row SERDE methods. -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n0(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n0; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -98,7 +98,7 @@ drop table part_add_int_string_permute_select; -- CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n0; explain vectorization detail select insert_num,part,c1,c2,c3,b from part_change_string_group_double; @@ -108,7 +108,7 @@ select insert_num,part,c1,c2,c3,b from part_change_string_group_double; -- Table-Non-Cascade CHANGE COLUMNS ... alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111; +insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n0 WHERE insert_num = 111; explain vectorization detail select insert_num,part,c1,c2,c3,b from part_change_string_group_double; @@ -126,7 +126,7 @@ drop table part_change_string_group_double; -- CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT); -insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n0; explain vectorization detail select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp; @@ -169,7 +169,7 @@ insert into table part_change_numeric_group_string_group_multi_ints_string_group tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n0; explain vectorization detail select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group; @@ -215,7 +215,7 @@ insert into table part_change_numeric_group_string_group_floating_string_group p decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n0; explain vectorization detail select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group; @@ -261,7 +261,7 @@ insert into table part_change_string_group_string_group_string partition(part=1) string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n0; explain vectorization detail select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string; @@ -311,7 +311,7 @@ insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint pa smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n0; explain vectorization detail select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint; @@ -355,7 +355,7 @@ CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_n insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n0; explain vectorization detail select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table.q b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table.q index 3059604dcc..5256597a98 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table.q @@ -18,9 +18,9 @@ set hive.llap.io.enabled=false; -- vectorized reading of TEXTFILE format files using the row SERDE methods. -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n24(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n24; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -29,51 +29,51 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_permute_select_n8(insert_num int, a INT, b STRING); -insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_permute_select_n8 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n24; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_permute_select add columns(c int); +alter table table_add_int_permute_select_n8 add columns(c int); -insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000); +insert into table table_add_int_permute_select_n8 VALUES (111, 80000, 'new', 80000); explain vectorization detail -select insert_num,a,b from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n8; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_permute_select; -select insert_num,a,b,c from table_add_int_permute_select; -select insert_num,c from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n8; +select insert_num,a,b,c from table_add_int_permute_select_n8; +select insert_num,c from table_add_int_permute_select_n8; -drop table table_add_int_permute_select; +drop table table_add_int_permute_select_n8; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_string_permute_select_n8(insert_num int, a INT, b STRING); -insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_string_permute_select_n8 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n24; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_string_permute_select add columns(c int, d string); +alter table table_add_int_string_permute_select_n8 add columns(c int, d string); -insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler'); +insert into table table_add_int_string_permute_select_n8 VALUES (111, 80000, 'new', 80000, 'filler'); explain vectorization detail -select insert_num,a,b from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n8; -- SELECT permutation columns to make sure NULL defaulting works right -select insert_num,a,b from table_add_int_string_permute_select; -select insert_num,a,b,c from table_add_int_string_permute_select; -select insert_num,a,b,c,d from table_add_int_string_permute_select; -select insert_num,a,c,d from table_add_int_string_permute_select; -select insert_num,a,d from table_add_int_string_permute_select; -select insert_num,c from table_add_int_string_permute_select; -select insert_num,d from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n8; +select insert_num,a,b,c from table_add_int_string_permute_select_n8; +select insert_num,a,b,c,d from table_add_int_string_permute_select_n8; +select insert_num,a,c,d from table_add_int_string_permute_select_n8; +select insert_num,a,d from table_add_int_string_permute_select_n8; +select insert_num,c from table_add_int_string_permute_select_n8; +select insert_num,d from table_add_int_string_permute_select_n8; -drop table table_add_int_string_permute_select; +drop table table_add_int_string_permute_select_n8; @@ -84,21 +84,21 @@ drop table table_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE table_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); +CREATE TABLE table_change_string_group_double_n8(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); -insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table table_change_string_group_double_n8 SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n24; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table table_change_string_group_double_n8 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new'); +insert into table table_change_string_group_double_n8 VALUES (111, 789.321, 789.321, 789.321, 'new'); explain vectorization detail -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n8; -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n8; -drop table table_change_string_group_double; +drop table table_change_string_group_double_n8; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -107,18 +107,18 @@ drop table table_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE table_change_date_group_string_group_date_group(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); +CREATE TABLE table_change_date_group_string_group_date_group_n8(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); -insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table table_change_date_group_string_group_date_group_n8 SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n24; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_date_group_string_group_date_group replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table table_change_date_group_string_group_date_group_n8 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table table_change_date_group_string_group_date_group VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table table_change_date_group_string_group_date_group_n8 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n8; -drop table table_change_date_group_string_group_date_group; +drop table table_change_date_group_string_group_date_group_n8; @@ -133,39 +133,39 @@ drop table table_change_date_group_string_group_date_group; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group_n8(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING); -insert into table table_change_numeric_group_string_group_multi_ints_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n8 SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n24; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_multi_ints_string_group_n8 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table table_change_numeric_group_string_group_multi_ints_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n8 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n8; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n8; -drop table table_change_numeric_group_string_group_multi_ints_string_group; +drop table table_change_numeric_group_string_group_multi_ints_string_group_n8; @@ -176,39 +176,39 @@ drop table table_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n8(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n8 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n24; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_floating_string_group_n8 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_floating_string_group_n8 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n8; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n8; -drop table table_change_numeric_group_string_group_floating_string_group; +drop table table_change_numeric_group_string_group_floating_string_group_n8; ------------------------------------------------------------------------------------------ @@ -219,34 +219,34 @@ drop table table_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE table_change_string_group_string_group_string(insert_num int, +CREATE TABLE table_change_string_group_string_group_string_n8(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING); -insert into table table_change_string_group_string_group_string SELECT insert_num, +insert into table table_change_string_group_string_group_string_n8 SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n24; -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_string_group_string replace columns (insert_num int, +alter table table_change_string_group_string_group_string_n8 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table table_change_string_group_string_group_string VALUES (111, +insert into table table_change_string_group_string_group_string_n8 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string_n8; -drop table table_change_string_group_string_group_string; +drop table table_change_string_group_string_group_string_n8; @@ -261,40 +261,40 @@ drop table table_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING); -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8 SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n24; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint VALUES (111, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8 VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, 1234.5678, 9876.543, 789.321, 'new'); -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8; -drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n8; @@ -303,23 +303,23 @@ drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float_n8(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING); -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n8 SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n24; -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n8; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table table_change_lower_to_higher_numeric_group_decimal_to_float_n8 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n8 VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n8; -drop table table_change_lower_to_higher_numeric_group_decimal_to_float; +drop table table_change_lower_to_higher_numeric_group_decimal_to_float_n8; diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table_llap_io.q index 3fa9388166..9607896f5c 100644 --- a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table_llap_io.q +++ b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table_llap_io.q @@ -18,9 +18,9 @@ set hive.llap.io.enabled=true;set hive.llap.io.encode.enabled=true; -- vectorized reading of TEXTFILE format files using the row SERDE methods. -- -CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +CREATE TABLE schema_evolution_data_n37(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile; -load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data; +load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n37; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE ADD COLUMNS @@ -29,61 +29,61 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data. -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT -- -- -CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_permute_select_n11(insert_num int, a INT, b STRING); -insert into table table_add_int_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_permute_select_n11 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n37; explain vectorization detail -select insert_num,a,b from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n11; -select insert_num,a,b from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n11; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_permute_select add columns(c int); +alter table table_add_int_permute_select_n11 add columns(c int); -insert into table table_add_int_permute_select VALUES (111, 80000, 'new', 80000); +insert into table table_add_int_permute_select_n11 VALUES (111, 80000, 'new', 80000); explain vectorization detail -select insert_num,a,b from table_add_int_permute_select; +select insert_num,a,b from table_add_int_permute_select_n11; -- SELECT permutation columns to make sure NULL defaulting works right ---** select insert_num,a,b from table_add_int_permute_select; ---** select insert_num,a,b,c from table_add_int_permute_select; ---** select insert_num,c from table_add_int_permute_select; +--** select insert_num,a,b from table_add_int_permute_select_n11; +--** select insert_num,a,b,c from table_add_int_permute_select_n11; +--** select insert_num,c from table_add_int_permute_select_n11; -drop table table_add_int_permute_select; +drop table table_add_int_permute_select_n11; -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT -- -- -CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b STRING); +CREATE TABLE table_add_int_string_permute_select_n11(insert_num int, a INT, b STRING); -insert into table table_add_int_string_permute_select SELECT insert_num, int1, 'original' FROM schema_evolution_data; +insert into table table_add_int_string_permute_select_n11 SELECT insert_num, int1, 'original' FROM schema_evolution_data_n37; explain vectorization detail -select insert_num,a,b from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n11; -select insert_num,a,b from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n11; -- Table-Non-Cascade ADD COLUMNS ... -alter table table_add_int_string_permute_select add columns(c int, d string); +alter table table_add_int_string_permute_select_n11 add columns(c int, d string); -insert into table table_add_int_string_permute_select VALUES (111, 80000, 'new', 80000, 'filler'); +insert into table table_add_int_string_permute_select_n11 VALUES (111, 80000, 'new', 80000, 'filler'); explain vectorization detail -select insert_num,a,b from table_add_int_string_permute_select; +select insert_num,a,b from table_add_int_string_permute_select_n11; -- SELECT permutation columns to make sure NULL defaulting works right ---** select insert_num,a,b from table_add_int_string_permute_select; ---** select insert_num,a,b,c from table_add_int_string_permute_select; ---** select insert_num,a,b,c,d from table_add_int_string_permute_select; ---** select insert_num,a,c,d from table_add_int_string_permute_select; ---** select insert_num,a,d from table_add_int_string_permute_select; ---** select insert_num,c from table_add_int_string_permute_select; ---** select insert_num,d from table_add_int_string_permute_select; +--** select insert_num,a,b from table_add_int_string_permute_select_n11; +--** select insert_num,a,b,c from table_add_int_string_permute_select_n11; +--** select insert_num,a,b,c,d from table_add_int_string_permute_select_n11; +--** select insert_num,a,c,d from table_add_int_string_permute_select_n11; +--** select insert_num,a,d from table_add_int_string_permute_select_n11; +--** select insert_num,c from table_add_int_string_permute_select_n11; +--** select insert_num,d from table_add_int_string_permute_select_n11; -drop table table_add_int_string_permute_select; +drop table table_add_int_string_permute_select_n11; @@ -94,26 +94,26 @@ drop table table_add_int_string_permute_select; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR) -- -CREATE TABLE table_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); +CREATE TABLE table_change_string_group_double_n11(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING); -insert into table table_change_string_group_double SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data; +insert into table table_change_string_group_double_n11 SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n37; explain vectorization detail -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n11; -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n11; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); +alter table table_change_string_group_double_n11 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING); -insert into table table_change_string_group_double VALUES (111, 789.321, 789.321, 789.321, 'new'); +insert into table table_change_string_group_double_n11 VALUES (111, 789.321, 789.321, 789.321, 'new'); explain vectorization detail -select insert_num,c1,c2,c3,b from table_change_string_group_double; +select insert_num,c1,c2,c3,b from table_change_string_group_double_n11; ---** select insert_num,c1,c2,c3,b from table_change_string_group_double; +--** select insert_num,c1,c2,c3,b from table_change_string_group_double_n11; -drop table table_change_string_group_double; +drop table table_change_string_group_double_n11; ------------------------------------------------------------------------------------------ -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP @@ -122,26 +122,26 @@ drop table table_change_string_group_double; -- -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) -- -CREATE TABLE table_change_date_group_string_group_date_group(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); +CREATE TABLE table_change_date_group_string_group_date_group_n11(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING); -insert into table table_change_date_group_string_group_date_group SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data; +insert into table table_change_date_group_string_group_date_group_n11 SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n37; explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n11; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n11; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_date_group_string_group_date_group replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); +alter table table_change_date_group_string_group_date_group_n11 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING); -insert into table table_change_date_group_string_group_date_group VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); +insert into table table_change_date_group_string_group_date_group_n11 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n11; ---** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group; +--** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_date_group_string_group_date_group_n11; -drop table table_change_date_group_string_group_date_group; +drop table table_change_date_group_string_group_date_group_n11; @@ -156,42 +156,42 @@ drop table table_change_date_group_string_group_date_group; -- (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_multi_ints_string_group_n11(insert_num int, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint, c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint, b STRING); -insert into table table_change_numeric_group_string_group_multi_ints_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n11 SELECT insert_num, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n37; explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n11; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n11; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_multi_ints_string_group_n11 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5), c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5), b STRING) ; -insert into table table_change_numeric_group_string_group_multi_ints_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_multi_ints_string_group_n11 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n11; ---** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group; +--** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from table_change_numeric_group_string_group_multi_ints_string_group_n11; -drop table table_change_numeric_group_string_group_multi_ints_string_group; +drop table table_change_numeric_group_string_group_multi_ints_string_group_n11; @@ -202,42 +202,42 @@ drop table table_change_numeric_group_string_group_multi_ints_string_group; -- (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc -- -- -CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n11(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +insert into table table_change_numeric_group_string_group_floating_string_group_n11 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n37; explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n11; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n11; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_numeric_group_string_group_floating_string_group replace columns (insert_num int, +alter table table_change_numeric_group_string_group_floating_string_group_n11 replace columns (insert_num int, c1 STRING, c2 STRING, c3 STRING, c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7), c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7), b STRING); -insert into table table_change_numeric_group_string_group_floating_string_group VALUES (111, +insert into table table_change_numeric_group_string_group_floating_string_group_n11 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n11; ---** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group; +--** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n11; -drop table table_change_numeric_group_string_group_floating_string_group; +drop table table_change_numeric_group_string_group_floating_string_group_n11; ------------------------------------------------------------------------------------------ @@ -248,37 +248,37 @@ drop table table_change_numeric_group_string_group_floating_string_group; -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and -- CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING) -- -CREATE TABLE table_change_string_group_string_group_string(insert_num int, +CREATE TABLE table_change_string_group_string_group_string_n11(insert_num int, c1 string, c2 string, c3 string, c4 string, c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING); -insert into table table_change_string_group_string_group_string SELECT insert_num, +insert into table table_change_string_group_string_group_string_n11 SELECT insert_num, string2, string2, string2, string2, string2, string2, string2, string2, string2, string2, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n37; explain vectorization detail -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n11; -select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string; +select insert_num,c1,c2,c3,c4,b from table_change_string_group_string_group_string_n11; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_string_group_string_group_string replace columns (insert_num int, +alter table table_change_string_group_string_group_string_n11 replace columns (insert_num int, c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9), c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING, c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ; -insert into table table_change_string_group_string_group_string VALUES (111, +insert into table table_change_string_group_string_group_string_n11 VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new'); ---** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string; +--** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from table_change_string_group_string_group_string_n11; -drop table table_change_string_group_string_group_string; +drop table table_change_string_group_string_group_string_n11; @@ -293,43 +293,43 @@ drop table table_change_string_group_string_group_string; -- INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and -- BIGINT, (DECIMAL, FLOAT, DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11(insert_num int, c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint, c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint, c12 int, c13 int, c14 int, c15 int, c16 bigint, c17 bigint, c18 bigint, b STRING); -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11 SELECT insert_num, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, smallint1, smallint1, smallint1, smallint1, smallint1, int1, int1, int1, int1, bigint1, bigint1, bigint1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n37; explain vectorization detail -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11; -select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int, +alter table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11 replace columns (insert_num int, c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE, c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE, c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE, c16 decimal(38,18), c17 FLOAT, c18 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint VALUES (111, +insert into table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11 VALUES (111, 7000, 80000, 90000000, 1234.5678, 9876.543, 789.321, 80000, 90000000, 1234.5678, 9876.543, 789.321, 90000000, 1234.5678, 9876.543, 789.321, 1234.5678, 9876.543, 789.321, 'new'); ---** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +--** select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11; -drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; +drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint_n11; @@ -338,26 +338,26 @@ drop table table_change_lower_to_higher_numeric_group_tinyint_to_bigint; -- DECIMAL, (FLOAT, DOUBLE) and -- FLOAT, (DOUBLE) -- -CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int, +CREATE TABLE table_change_lower_to_higher_numeric_group_decimal_to_float_n11(insert_num int, c1 decimal(38,18), c2 decimal(38,18), c3 float, b STRING); -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float SELECT insert_num, +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n11 SELECT insert_num, decimal1, decimal1, float1, - 'original' FROM schema_evolution_data; + 'original' FROM schema_evolution_data_n37; explain vectorization detail -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n11; -select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n11; -- Table-Non-Cascade CHANGE COLUMNS ... -alter table table_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; +alter table table_change_lower_to_higher_numeric_group_decimal_to_float_n11 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ; -insert into table table_change_lower_to_higher_numeric_group_decimal_to_float VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); +insert into table table_change_lower_to_higher_numeric_group_decimal_to_float_n11 VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new'); ---** select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float; +--** select insert_num,c1,c2,c3,b from table_change_lower_to_higher_numeric_group_decimal_to_float_n11; -drop table table_change_lower_to_higher_numeric_group_decimal_to_float; +drop table table_change_lower_to_higher_numeric_group_decimal_to_float_n11; diff --git a/ql/src/test/queries/clientpositive/schemeAuthority2.q b/ql/src/test/queries/clientpositive/schemeAuthority2.q index cfb4be8a79..562d07fe0d 100644 --- a/ql/src/test/queries/clientpositive/schemeAuthority2.q +++ b/ql/src/test/queries/clientpositive/schemeAuthority2.q @@ -3,12 +3,12 @@ set hive.mapred.mode=nonstrict; dfs ${system:test.dfs.mkdir} file:///tmp/test; dfs ${system:test.dfs.mkdir} hdfs:///tmp/test; -create external table dynPart (key string) partitioned by (value string, value2 string) row format delimited fields terminated by '\\t' stored as textfile; +create external table dynPart_n0 (key string) partitioned by (value string, value2 string) row format delimited fields terminated by '\\t' stored as textfile; insert overwrite local directory "/tmp/test" select key from src where (key = 10) order by key; insert overwrite directory "/tmp/test" select key from src where (key = 20) order by key; -alter table dynPart add partition (value='0', value2='clusterA') location 'file:///tmp/test'; -alter table dynPart add partition (value='0', value2='clusterB') location 'hdfs:///tmp/test'; -select value2, key from dynPart where value='0'; +alter table dynPart_n0 add partition (value='0', value2='clusterA') location 'file:///tmp/test'; +alter table dynPart_n0 add partition (value='0', value2='clusterB') location 'hdfs:///tmp/test'; +select value2, key from dynPart_n0 where value='0'; dfs -rmr file:///tmp/test; dfs -rmr hdfs:///tmp/test; diff --git a/ql/src/test/queries/clientpositive/scriptfile1.q b/ql/src/test/queries/clientpositive/scriptfile1.q index 243c65ce09..073f5407b1 100644 --- a/ql/src/test/queries/clientpositive/scriptfile1.q +++ b/ql/src/test/queries/clientpositive/scriptfile1.q @@ -7,7 +7,7 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- NO_SESSION_REUSE -CREATE TABLE dest1(key INT, value STRING); +CREATE TABLE dest1_n22(key INT, value STRING); ADD FILE ../../ql/src/test/scripts/testgrep; @@ -17,6 +17,6 @@ FROM ( USING 'testgrep' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue; +INSERT OVERWRITE TABLE dest1_n22 SELECT tmap.tkey, tmap.tvalue; -SELECT dest1.* FROM dest1; +SELECT dest1_n22.* FROM dest1_n22; diff --git a/ql/src/test/queries/clientpositive/select_unquote_and.q b/ql/src/test/queries/clientpositive/select_unquote_and.q index 79894eab39..e647bc045d 100644 --- a/ql/src/test/queries/clientpositive/select_unquote_and.q +++ b/ql/src/test/queries/clientpositive/select_unquote_and.q @@ -1,17 +1,17 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -CREATE TABLE npe_test (key STRING, value STRING) PARTITIONED BY (ds STRING); +CREATE TABLE npe_test_n0 (key STRING, value STRING) PARTITIONED BY (ds STRING); -INSERT OVERWRITE TABLE npe_test PARTITION(ds='2012-12-11') +INSERT OVERWRITE TABLE npe_test_n0 PARTITION(ds='2012-12-11') SELECT src.key, src.value FROM src WHERE key < '200'; -INSERT OVERWRITE TABLE npe_test PARTITION(ds='2012-12-12') +INSERT OVERWRITE TABLE npe_test_n0 PARTITION(ds='2012-12-12') SELECT src.key, src.value FROM src WHERE key > '200'; -SELECT count(*) FROM npe_test; +SELECT count(*) FROM npe_test_n0; -EXPLAIN SELECT * FROM npe_test WHERE ds > 2012-11-31 AND ds < 2012-12-15; +EXPLAIN SELECT * FROM npe_test_n0 WHERE ds > 2012-11-31 AND ds < 2012-12-15; -SELECT count(*) FROM npe_test WHERE ds > 2012-11-31 AND ds < 2012-12-15; +SELECT count(*) FROM npe_test_n0 WHERE ds > 2012-11-31 AND ds < 2012-12-15; -DROP TABLE npe_test; +DROP TABLE npe_test_n0; diff --git a/ql/src/test/queries/clientpositive/select_unquote_not.q b/ql/src/test/queries/clientpositive/select_unquote_not.q index ee5d304b99..463c1548ea 100644 --- a/ql/src/test/queries/clientpositive/select_unquote_not.q +++ b/ql/src/test/queries/clientpositive/select_unquote_not.q @@ -1,17 +1,17 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -CREATE TABLE npe_test (key STRING, value STRING) PARTITIONED BY (ds STRING); +CREATE TABLE npe_test_n1 (key STRING, value STRING) PARTITIONED BY (ds STRING); -INSERT OVERWRITE TABLE npe_test PARTITION(ds='2012-12-11') +INSERT OVERWRITE TABLE npe_test_n1 PARTITION(ds='2012-12-11') SELECT src.key, src.value FROM src WHERE key < '200'; -INSERT OVERWRITE TABLE npe_test PARTITION(ds='2012-12-12') +INSERT OVERWRITE TABLE npe_test_n1 PARTITION(ds='2012-12-12') SELECT src.key, src.value FROM src WHERE key > '200'; -SELECT count(*) FROM npe_test; +SELECT count(*) FROM npe_test_n1; -EXPLAIN SELECT * FROM npe_test WHERE NOT ds < 2012-11-31; +EXPLAIN SELECT * FROM npe_test_n1 WHERE NOT ds < 2012-11-31; -SELECT count(*) FROM npe_test WHERE NOT ds < 2012-11-31; +SELECT count(*) FROM npe_test_n1 WHERE NOT ds < 2012-11-31; -DROP TABLE npe_test; +DROP TABLE npe_test_n1; diff --git a/ql/src/test/queries/clientpositive/semijoin.q b/ql/src/test/queries/clientpositive/semijoin.q index 6c6135c996..144069bbe6 100644 --- a/ql/src/test/queries/clientpositive/semijoin.q +++ b/ql/src/test/queries/clientpositive/semijoin.q @@ -3,82 +3,82 @@ SET hive.vectorized.execution.enabled=false; set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -create table t1 as select cast(key as int) key, value from src where key <= 10; +create table t1_n55 as select cast(key as int) key, value from src where key <= 10; -select * from t1 sort by key; +select * from t1_n55 sort by key; -create table t2 as select cast(2*key as int) key, value from t1; +create table t2_n33 as select cast(2*key as int) key, value from t1_n55; -select * from t2 sort by key; +select * from t2_n33 sort by key; -create table t3 as select * from (select * from t1 union all select * from t2) b; -select * from t3 sort by key, value; +create table t3_n12 as select * from (select * from t1_n55 union all select * from t2_n33) b; +select * from t3_n12 sort by key, value; -create table t4 (key int, value string); -select * from t4; +create table t4_n5 (key int, value string); +select * from t4_n5; -explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value; +explain select * from t1_n55 a left semi join t2_n33 b on a.key=b.key sort by a.key, a.value; +select * from t1_n55 a left semi join t2_n33 b on a.key=b.key sort by a.key, a.value; -explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value; -select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value; +explain select * from t2_n33 a left semi join t1_n55 b on b.key=a.key sort by a.key, a.value; +select * from t2_n33 a left semi join t1_n55 b on b.key=a.key sort by a.key, a.value; -explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value; -select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value; +explain select * from t1_n55 a left semi join t4_n5 b on b.key=a.key sort by a.key, a.value; +select * from t1_n55 a left semi join t4_n5 b on b.key=a.key sort by a.key, a.value; -explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value; -select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value; +explain select a.value from t1_n55 a left semi join t3_n12 b on (b.key = a.key and b.key < '15') sort by a.value; +select a.value from t1_n55 a left semi join t3_n12 b on (b.key = a.key and b.key < '15') sort by a.value; -explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; +explain select * from t1_n55 a left semi join t2_n33 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; +select * from t1_n55 a left semi join t2_n33 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; -explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value; -select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value; +explain select a.value from t1_n55 a left semi join (select key from t3_n12 where key > 5) b on a.key = b.key sort by a.value; +select a.value from t1_n55 a left semi join (select key from t3_n12 where key > 5) b on a.key = b.key sort by a.value; -explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; -select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; +explain select a.value from t1_n55 a left semi join (select key , value from t2_n33 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; +select a.value from t1_n55 a left semi join (select key , value from t2_n33 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; -explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value; -select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value; +explain select * from t2_n33 a left semi join (select key , value from t1_n55 where key > 2) b on a.key = b.key sort by a.key, a.value; +select * from t2_n33 a left semi join (select key , value from t1_n55 where key > 2) b on a.key = b.key sort by a.key, a.value; -explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key; -select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key; +explain select /*+ mapjoin(b) */ a.key from t3_n12 a left semi join t1_n55 b on a.key = b.key sort by a.key; +select /*+ mapjoin(b) */ a.key from t3_n12 a left semi join t1_n55 b on a.key = b.key sort by a.key; -explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value; +explain select * from t1_n55 a left semi join t2_n33 b on a.key = 2*b.key sort by a.key, a.value; +select * from t1_n55 a left semi join t2_n33 b on a.key = 2*b.key sort by a.key, a.value; -explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value; -select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value; +explain select * from t1_n55 a join t2_n33 b on a.key = b.key left semi join t3_n12 c on b.key = c.key sort by a.key, a.value; +select * from t1_n55 a join t2_n33 b on a.key = b.key left semi join t3_n12 c on b.key = c.key sort by a.key, a.value; -explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value; -select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value; +explain select * from t3_n12 a left semi join t1_n55 b on a.key = b.key and a.value=b.value sort by a.key, a.value; +select * from t3_n12 a left semi join t1_n55 b on a.key = b.key and a.value=b.value sort by a.key, a.value; -explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key; -select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key; +explain select /*+ mapjoin(b, c) */ a.key from t3_n12 a left semi join t1_n55 b on a.key = b.key left semi join t2_n33 c on a.key = c.key sort by a.key; +select /*+ mapjoin(b, c) */ a.key from t3_n12 a left semi join t1_n55 b on a.key = b.key left semi join t2_n33 c on a.key = c.key sort by a.key; -explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +explain select a.key from t3_n12 a left outer join t1_n55 b on a.key = b.key left semi join t2_n33 c on b.key = c.key sort by a.key; +select a.key from t3_n12 a left outer join t1_n55 b on a.key = b.key left semi join t2_n33 c on b.key = c.key sort by a.key; -explain select a.key from t1 a right outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t1 a right outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +explain select a.key from t1_n55 a right outer join t3_n12 b on a.key = b.key left semi join t2_n33 c on b.key = c.key sort by a.key; +select a.key from t1_n55 a right outer join t3_n12 b on a.key = b.key left semi join t2_n33 c on b.key = c.key sort by a.key; -explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +explain select a.key from t1_n55 a full outer join t3_n12 b on a.key = b.key left semi join t2_n33 c on b.key = c.key sort by a.key; +select a.key from t1_n55 a full outer join t3_n12 b on a.key = b.key left semi join t2_n33 c on b.key = c.key sort by a.key; -explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key; +explain select a.key from t3_n12 a left semi join t2_n33 b on a.key = b.key left outer join t1_n55 c on a.key = c.key sort by a.key; +select a.key from t3_n12 a left semi join t2_n33 b on a.key = b.key left outer join t1_n55 c on a.key = c.key sort by a.key; -explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key; +explain select a.key from t3_n12 a left semi join t2_n33 b on a.key = b.key right outer join t1_n55 c on a.key = c.key sort by a.key; +select a.key from t3_n12 a left semi join t2_n33 b on a.key = b.key right outer join t1_n55 c on a.key = c.key sort by a.key; -explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key; +explain select a.key from t3_n12 a left semi join t1_n55 b on a.key = b.key full outer join t2_n33 c on a.key = c.key sort by a.key; +select a.key from t3_n12 a left semi join t1_n55 b on a.key = b.key full outer join t2_n33 c on a.key = c.key sort by a.key; -explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key; +explain select a.key from t3_n12 a left semi join t2_n33 b on a.key = b.key left outer join t1_n55 c on a.value = c.value sort by a.key; +select a.key from t3_n12 a left semi join t2_n33 b on a.key = b.key left outer join t1_n55 c on a.value = c.value sort by a.key; -explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100; -select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100; +explain select a.key from t3_n12 a left semi join t2_n33 b on a.value = b.value where a.key > 100; +select a.key from t3_n12 a left semi join t2_n33 b on a.value = b.value where a.key > 100; explain select key, value from src outr left semi join (select a.key, b.value from src a join (select distinct value from src) b on a.value > b.value group by a.key, b.value) inr diff --git a/ql/src/test/queries/clientpositive/semijoin2.q b/ql/src/test/queries/clientpositive/semijoin2.q index 6f852c4431..5579fb1436 100644 --- a/ql/src/test/queries/clientpositive/semijoin2.q +++ b/ql/src/test/queries/clientpositive/semijoin2.q @@ -1,6 +1,6 @@ -CREATE TABLE table_1 (boolean_col_1 BOOLEAN, float_col_2 FLOAT, bigint_col_3 BIGINT, varchar0111_col_4 VARCHAR(111), bigint_col_5 BIGINT, float_col_6 FLOAT, boolean_col_7 BOOLEAN, decimal0101_col_8 DECIMAL(1, 1), decimal0904_col_9 DECIMAL(9, 4), char0112_col_10 CHAR(112), double_col_11 DOUBLE, boolean_col_12 BOOLEAN, double_col_13 DOUBLE, varchar0142_col_14 VARCHAR(142), timestamp_col_15 TIMESTAMP, decimal0502_col_16 DECIMAL(5, 2), smallint_col_25 SMALLINT, decimal3222_col_18 DECIMAL(32, 22), boolean_col_19 BOOLEAN, decimal2012_col_20 DECIMAL(20, 12), char0204_col_21 CHAR(204), double_col_61 DOUBLE, timestamp_col_23 TIMESTAMP, int_col_24 INT, float_col_25 FLOAT, smallint_col_26 SMALLINT, double_col_27 DOUBLE, char0180_col_28 CHAR(180), decimal1503_col_29 DECIMAL(15, 3), timestamp_col_30 TIMESTAMP, smallint_col_31 SMALLINT, decimal2020_col_32 DECIMAL(20, 20), timestamp_col_33 TIMESTAMP, boolean_col_34 BOOLEAN, decimal3025_col_35 DECIMAL(30, 25), decimal3117_col_36 DECIMAL(31, 17), timestamp_col_37 TIMESTAMP, varchar0146_col_38 VARCHAR(146), boolean_col_39 BOOLEAN, double_col_40 DOUBLE, float_col_41 FLOAT, timestamp_col_42 TIMESTAMP, double_col_43 DOUBLE, boolean_col_44 BOOLEAN, timestamp_col_45 TIMESTAMP, tinyint_col_8 TINYINT, int_col_47 INT, decimal0401_col_48 DECIMAL(4, 1), varchar0064_col_49 VARCHAR(64), string_col_50 STRING, double_col_51 DOUBLE, string_col_52 STRING, boolean_col_53 BOOLEAN, int_col_54 INT, boolean_col_55 BOOLEAN, string_col_56 STRING, double_col_57 DOUBLE, varchar0131_col_58 VARCHAR(131), boolean_col_59 BOOLEAN, bigint_col_22 BIGINT, char0184_col_61 CHAR(184), varchar0173_col_62 VARCHAR(173), timestamp_col_63 TIMESTAMP, decimal1709_col_26 DECIMAL(20, 5), timestamp_col_65 TIMESTAMP, timestamp_col_66 TIMESTAMP, timestamp_col_67 TIMESTAMP, boolean_col_68 BOOLEAN, decimal1208_col_20 DECIMAL(33, 11), decimal1605_col_70 DECIMAL(16, 5), varchar0010_col_71 VARCHAR(10), tinyint_col_72 TINYINT, timestamp_col_10 TIMESTAMP, decimal2714_col_74 DECIMAL(27, 14), double_col_75 DOUBLE, boolean_col_76 BOOLEAN, double_col_77 DOUBLE, string_col_78 STRING, boolean_col_79 BOOLEAN, boolean_col_80 BOOLEAN, decimal0803_col_81 DECIMAL(8, 3), decimal1303_col_82 DECIMAL(13, 3), tinyint_col_83 TINYINT, decimal3424_col_84 DECIMAL(34, 24), float_col_85 FLOAT, boolean_col_86 BOOLEAN, char0233_col_87 CHAR(233)); +CREATE TABLE table_1_n0 (boolean_col_1 BOOLEAN, float_col_2 FLOAT, bigint_col_3 BIGINT, varchar0111_col_4 VARCHAR(111), bigint_col_5 BIGINT, float_col_6 FLOAT, boolean_col_7 BOOLEAN, decimal0101_col_8 DECIMAL(1, 1), decimal0904_col_9 DECIMAL(9, 4), char0112_col_10 CHAR(112), double_col_11 DOUBLE, boolean_col_12 BOOLEAN, double_col_13 DOUBLE, varchar0142_col_14 VARCHAR(142), timestamp_col_15 TIMESTAMP, decimal0502_col_16 DECIMAL(5, 2), smallint_col_25 SMALLINT, decimal3222_col_18 DECIMAL(32, 22), boolean_col_19 BOOLEAN, decimal2012_col_20 DECIMAL(20, 12), char0204_col_21 CHAR(204), double_col_61 DOUBLE, timestamp_col_23 TIMESTAMP, int_col_24 INT, float_col_25 FLOAT, smallint_col_26 SMALLINT, double_col_27 DOUBLE, char0180_col_28 CHAR(180), decimal1503_col_29 DECIMAL(15, 3), timestamp_col_30 TIMESTAMP, smallint_col_31 SMALLINT, decimal2020_col_32 DECIMAL(20, 20), timestamp_col_33 TIMESTAMP, boolean_col_34 BOOLEAN, decimal3025_col_35 DECIMAL(30, 25), decimal3117_col_36 DECIMAL(31, 17), timestamp_col_37 TIMESTAMP, varchar0146_col_38 VARCHAR(146), boolean_col_39 BOOLEAN, double_col_40 DOUBLE, float_col_41 FLOAT, timestamp_col_42 TIMESTAMP, double_col_43 DOUBLE, boolean_col_44 BOOLEAN, timestamp_col_45 TIMESTAMP, tinyint_col_8 TINYINT, int_col_47 INT, decimal0401_col_48 DECIMAL(4, 1), varchar0064_col_49 VARCHAR(64), string_col_50 STRING, double_col_51 DOUBLE, string_col_52 STRING, boolean_col_53 BOOLEAN, int_col_54 INT, boolean_col_55 BOOLEAN, string_col_56 STRING, double_col_57 DOUBLE, varchar0131_col_58 VARCHAR(131), boolean_col_59 BOOLEAN, bigint_col_22 BIGINT, char0184_col_61 CHAR(184), varchar0173_col_62 VARCHAR(173), timestamp_col_63 TIMESTAMP, decimal1709_col_26 DECIMAL(20, 5), timestamp_col_65 TIMESTAMP, timestamp_col_66 TIMESTAMP, timestamp_col_67 TIMESTAMP, boolean_col_68 BOOLEAN, decimal1208_col_20 DECIMAL(33, 11), decimal1605_col_70 DECIMAL(16, 5), varchar0010_col_71 VARCHAR(10), tinyint_col_72 TINYINT, timestamp_col_10 TIMESTAMP, decimal2714_col_74 DECIMAL(27, 14), double_col_75 DOUBLE, boolean_col_76 BOOLEAN, double_col_77 DOUBLE, string_col_78 STRING, boolean_col_79 BOOLEAN, boolean_col_80 BOOLEAN, decimal0803_col_81 DECIMAL(8, 3), decimal1303_col_82 DECIMAL(13, 3), tinyint_col_83 TINYINT, decimal3424_col_84 DECIMAL(34, 24), float_col_85 FLOAT, boolean_col_86 BOOLEAN, char0233_col_87 CHAR(233)); -CREATE TABLE table_18 (timestamp_col_1 TIMESTAMP, double_col_2 DOUBLE, boolean_col_3 BOOLEAN, timestamp_col_4 TIMESTAMP, decimal2103_col_5 DECIMAL(21, 3), char0221_col_6 CHAR(221), tinyint_col_7 TINYINT, float_col_8 FLOAT, int_col_2 INT, timestamp_col_10 TIMESTAMP, char0228_col_11 CHAR(228), timestamp_col_12 TIMESTAMP, double_col_13 DOUBLE, tinyint_col_6 TINYINT, tinyint_col_33 TINYINT, smallint_col_38 SMALLINT, boolean_col_17 BOOLEAN, double_col_18 DOUBLE, boolean_col_19 BOOLEAN, bigint_col_20 BIGINT, decimal0504_col_37 DECIMAL(37, 34), boolean_col_22 BOOLEAN, double_col_23 DOUBLE, timestamp_col_24 TIMESTAMP, varchar0076_col_25 VARCHAR(76), timestamp_col_18 TIMESTAMP, boolean_col_27 BOOLEAN, decimal1611_col_22 DECIMAL(37, 5), boolean_col_29 BOOLEAN); +CREATE TABLE table_18_n0 (timestamp_col_1 TIMESTAMP, double_col_2 DOUBLE, boolean_col_3 BOOLEAN, timestamp_col_4 TIMESTAMP, decimal2103_col_5 DECIMAL(21, 3), char0221_col_6 CHAR(221), tinyint_col_7 TINYINT, float_col_8 FLOAT, int_col_2 INT, timestamp_col_10 TIMESTAMP, char0228_col_11 CHAR(228), timestamp_col_12 TIMESTAMP, double_col_13 DOUBLE, tinyint_col_6 TINYINT, tinyint_col_33 TINYINT, smallint_col_38 SMALLINT, boolean_col_17 BOOLEAN, double_col_18 DOUBLE, boolean_col_19 BOOLEAN, bigint_col_20 BIGINT, decimal0504_col_37 DECIMAL(37, 34), boolean_col_22 BOOLEAN, double_col_23 DOUBLE, timestamp_col_24 TIMESTAMP, varchar0076_col_25 VARCHAR(76), timestamp_col_18 TIMESTAMP, boolean_col_27 BOOLEAN, decimal1611_col_22 DECIMAL(37, 5), boolean_col_29 BOOLEAN); set hive.cbo.enable = false; @@ -10,12 +10,12 @@ COALESCE(498, LEAD(COALESCE(-973, -684, 515)) OVER (PARTITION BY (t2.int_col_2 + (t2.int_col_2) + (t1.smallint_col_25) AS int_col_1, FLOOR(t1.double_col_61) AS float_col, COALESCE(SUM(COALESCE(62, -380, -435)) OVER (PARTITION BY (t2.int_col_2 + t1.smallint_col_25) ORDER BY (t2.int_col_2 + t1.smallint_col_25) DESC, FLOOR(t1.double_col_61) DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 48 FOLLOWING), 704) AS int_col_2 -FROM table_1 t1 -INNER JOIN table_18 t2 ON (((t2.tinyint_col_6) = (t1.bigint_col_22)) AND ((t2.decimal0504_col_37) = (t1.decimal1709_col_26))) AND ((t2.tinyint_col_33) = (t1.tinyint_col_8)) +FROM table_1_n0 t1 +INNER JOIN table_18_n0 t2 ON (((t2.tinyint_col_6) = (t1.bigint_col_22)) AND ((t2.decimal0504_col_37) = (t1.decimal1709_col_26))) AND ((t2.tinyint_col_33) = (t1.tinyint_col_8)) WHERE (t2.smallint_col_38) IN (SELECT COALESCE(-92, -994) AS int_col -FROM table_1 tt1 -INNER JOIN table_18 tt2 ON (tt2.decimal1611_col_22) = (tt1.decimal1208_col_20) +FROM table_1_n0 tt1 +INNER JOIN table_18_n0 tt2 ON (tt2.decimal1611_col_22) = (tt1.decimal1208_col_20) WHERE (t1.timestamp_col_10) = (tt2.timestamp_col_18)); diff --git a/ql/src/test/queries/clientpositive/semijoin3.q b/ql/src/test/queries/clientpositive/semijoin3.q index 14442acc94..d77927271f 100644 --- a/ql/src/test/queries/clientpositive/semijoin3.q +++ b/ql/src/test/queries/clientpositive/semijoin3.q @@ -1,7 +1,7 @@ --! qt:dataset:src -create table t1 as select cast(key as int) key, value from src; +create table t1_n59 as select cast(key as int) key, value from src; -create table t2 as select cast(key as int) key, value from src; +create table t2_n37 as select cast(key as int) key, value from src; set hive.cbo.enable=false; @@ -9,21 +9,21 @@ explain select count(1) from (select key - from t1 - where key = 0) t1 + from t1_n59 + where key = 0) t1_n59 left semi join (select key - from t2 - where key = 0) t2 + from t2_n37 + where key = 0) t2_n37 on 1 = 1; select count(1) from (select key - from t1 - where key = 0) t1 + from t1_n59 + where key = 0) t1_n59 left semi join (select key - from t2 - where key = 0) t2 + from t2_n37 + where key = 0) t2_n37 on 1 = 1; diff --git a/ql/src/test/queries/clientpositive/semijoin5.q b/ql/src/test/queries/clientpositive/semijoin5.q index 3e7c20a389..19a64b3923 100644 --- a/ql/src/test/queries/clientpositive/semijoin5.q +++ b/ql/src/test/queries/clientpositive/semijoin5.q @@ -1,6 +1,6 @@ -CREATE TABLE table_1 (timestamp_col_1 TIMESTAMP, decimal3003_col_2 DECIMAL(30, 3), tinyint_col_3 TINYINT, decimal0101_col_4 DECIMAL(1, 1), boolean_col_5 BOOLEAN, float_col_6 FLOAT, bigint_col_7 BIGINT, varchar0098_col_8 VARCHAR(98), timestamp_col_9 TIMESTAMP, bigint_col_10 BIGINT, decimal0903_col_11 DECIMAL(9, 3), timestamp_col_12 TIMESTAMP, timestamp_col_13 TIMESTAMP, float_col_14 FLOAT, char0254_col_15 CHAR(254), double_col_16 DOUBLE, timestamp_col_17 TIMESTAMP, boolean_col_18 BOOLEAN, decimal2608_col_19 DECIMAL(26, 8), varchar0216_col_20 VARCHAR(216), string_col_21 STRING, bigint_col_22 BIGINT, boolean_col_23 BOOLEAN, timestamp_col_24 TIMESTAMP, boolean_col_25 BOOLEAN, decimal2016_col_26 DECIMAL(20, 16), string_col_27 STRING, decimal0202_col_28 DECIMAL(2, 2), float_col_29 FLOAT, decimal2020_col_30 DECIMAL(20, 20), boolean_col_31 BOOLEAN, double_col_32 DOUBLE, varchar0148_col_33 VARCHAR(148), decimal2121_col_34 DECIMAL(21, 21), tinyint_col_35 TINYINT, boolean_col_36 BOOLEAN, boolean_col_37 BOOLEAN, string_col_38 STRING, decimal3420_col_39 DECIMAL(34, 20), timestamp_col_40 TIMESTAMP, decimal1408_col_41 DECIMAL(14, 8), string_col_42 STRING, decimal0902_col_43 DECIMAL(9, 2), varchar0204_col_44 VARCHAR(204), boolean_col_45 BOOLEAN, timestamp_col_46 TIMESTAMP, boolean_col_47 BOOLEAN, bigint_col_48 BIGINT, boolean_col_49 BOOLEAN, smallint_col_50 SMALLINT, decimal0704_col_51 DECIMAL(7, 4), timestamp_col_52 TIMESTAMP, boolean_col_53 BOOLEAN, timestamp_col_54 TIMESTAMP, int_col_55 INT, decimal0505_col_56 DECIMAL(5, 5), char0155_col_57 CHAR(155), boolean_col_58 BOOLEAN, bigint_col_59 BIGINT, boolean_col_60 BOOLEAN, boolean_col_61 BOOLEAN, char0249_col_62 CHAR(249), boolean_col_63 BOOLEAN, timestamp_col_64 TIMESTAMP, decimal1309_col_65 DECIMAL(13, 9), int_col_66 INT, float_col_67 FLOAT, timestamp_col_68 TIMESTAMP, timestamp_col_69 TIMESTAMP, boolean_col_70 BOOLEAN, timestamp_col_71 TIMESTAMP, double_col_72 DOUBLE, boolean_col_73 BOOLEAN, char0222_col_74 CHAR(222), float_col_75 FLOAT, string_col_76 STRING, decimal2612_col_77 DECIMAL(26, 12), timestamp_col_78 TIMESTAMP, char0128_col_79 CHAR(128), timestamp_col_80 TIMESTAMP, double_col_81 DOUBLE, timestamp_col_82 TIMESTAMP, float_col_83 FLOAT, decimal2622_col_84 DECIMAL(26, 22), double_col_85 DOUBLE, float_col_86 FLOAT, decimal0907_col_87 DECIMAL(9, 7)) STORED AS orc; +CREATE TABLE table_1_n1 (timestamp_col_1 TIMESTAMP, decimal3003_col_2 DECIMAL(30, 3), tinyint_col_3 TINYINT, decimal0101_col_4 DECIMAL(1, 1), boolean_col_5 BOOLEAN, float_col_6 FLOAT, bigint_col_7 BIGINT, varchar0098_col_8 VARCHAR(98), timestamp_col_9 TIMESTAMP, bigint_col_10 BIGINT, decimal0903_col_11 DECIMAL(9, 3), timestamp_col_12 TIMESTAMP, timestamp_col_13 TIMESTAMP, float_col_14 FLOAT, char0254_col_15 CHAR(254), double_col_16 DOUBLE, timestamp_col_17 TIMESTAMP, boolean_col_18 BOOLEAN, decimal2608_col_19 DECIMAL(26, 8), varchar0216_col_20 VARCHAR(216), string_col_21 STRING, bigint_col_22 BIGINT, boolean_col_23 BOOLEAN, timestamp_col_24 TIMESTAMP, boolean_col_25 BOOLEAN, decimal2016_col_26 DECIMAL(20, 16), string_col_27 STRING, decimal0202_col_28 DECIMAL(2, 2), float_col_29 FLOAT, decimal2020_col_30 DECIMAL(20, 20), boolean_col_31 BOOLEAN, double_col_32 DOUBLE, varchar0148_col_33 VARCHAR(148), decimal2121_col_34 DECIMAL(21, 21), tinyint_col_35 TINYINT, boolean_col_36 BOOLEAN, boolean_col_37 BOOLEAN, string_col_38 STRING, decimal3420_col_39 DECIMAL(34, 20), timestamp_col_40 TIMESTAMP, decimal1408_col_41 DECIMAL(14, 8), string_col_42 STRING, decimal0902_col_43 DECIMAL(9, 2), varchar0204_col_44 VARCHAR(204), boolean_col_45 BOOLEAN, timestamp_col_46 TIMESTAMP, boolean_col_47 BOOLEAN, bigint_col_48 BIGINT, boolean_col_49 BOOLEAN, smallint_col_50 SMALLINT, decimal0704_col_51 DECIMAL(7, 4), timestamp_col_52 TIMESTAMP, boolean_col_53 BOOLEAN, timestamp_col_54 TIMESTAMP, int_col_55 INT, decimal0505_col_56 DECIMAL(5, 5), char0155_col_57 CHAR(155), boolean_col_58 BOOLEAN, bigint_col_59 BIGINT, boolean_col_60 BOOLEAN, boolean_col_61 BOOLEAN, char0249_col_62 CHAR(249), boolean_col_63 BOOLEAN, timestamp_col_64 TIMESTAMP, decimal1309_col_65 DECIMAL(13, 9), int_col_66 INT, float_col_67 FLOAT, timestamp_col_68 TIMESTAMP, timestamp_col_69 TIMESTAMP, boolean_col_70 BOOLEAN, timestamp_col_71 TIMESTAMP, double_col_72 DOUBLE, boolean_col_73 BOOLEAN, char0222_col_74 CHAR(222), float_col_75 FLOAT, string_col_76 STRING, decimal2612_col_77 DECIMAL(26, 12), timestamp_col_78 TIMESTAMP, char0128_col_79 CHAR(128), timestamp_col_80 TIMESTAMP, double_col_81 DOUBLE, timestamp_col_82 TIMESTAMP, float_col_83 FLOAT, decimal2622_col_84 DECIMAL(26, 22), double_col_85 DOUBLE, float_col_86 FLOAT, decimal0907_col_87 DECIMAL(9, 7)) STORED AS orc; -CREATE TABLE table_18 (boolean_col_1 BOOLEAN, boolean_col_2 BOOLEAN, decimal2518_col_3 DECIMAL(25, 18), float_col_4 FLOAT, timestamp_col_5 TIMESTAMP, double_col_6 DOUBLE, double_col_7 DOUBLE, char0035_col_8 CHAR(35), decimal2709_col_9 DECIMAL(27, 9), int_col_10 INT, timestamp_col_11 TIMESTAMP, decimal3604_col_12 DECIMAL(36, 4), string_col_13 STRING, int_col_14 INT, tinyint_col_15 TINYINT, decimal1911_col_16 DECIMAL(19, 11), float_col_17 FLOAT, timestamp_col_18 TIMESTAMP, smallint_col_19 SMALLINT, tinyint_col_20 TINYINT, timestamp_col_21 TIMESTAMP, boolean_col_22 BOOLEAN, int_col_23 INT) STORED AS orc; +CREATE TABLE table_18_n1 (boolean_col_1 BOOLEAN, boolean_col_2 BOOLEAN, decimal2518_col_3 DECIMAL(25, 18), float_col_4 FLOAT, timestamp_col_5 TIMESTAMP, double_col_6 DOUBLE, double_col_7 DOUBLE, char0035_col_8 CHAR(35), decimal2709_col_9 DECIMAL(27, 9), int_col_10 INT, timestamp_col_11 TIMESTAMP, decimal3604_col_12 DECIMAL(36, 4), string_col_13 STRING, int_col_14 INT, tinyint_col_15 TINYINT, decimal1911_col_16 DECIMAL(19, 11), float_col_17 FLOAT, timestamp_col_18 TIMESTAMP, smallint_col_19 SMALLINT, tinyint_col_20 TINYINT, timestamp_col_21 TIMESTAMP, boolean_col_22 BOOLEAN, int_col_23 INT) STORED AS orc; explain SELECT @@ -8,14 +8,14 @@ SELECT (t2.int_col_10) + (t1.smallint_col_50) AS int_col_1, FLOOR(t1.double_col_16) AS float_col, COALESCE(SUM(COALESCE(62, -380, -435)) OVER (PARTITION BY (t2.int_col_10 + t1.smallint_col_50) ORDER BY (t2.int_col_10 + t1.smallint_col_50) DESC, FLOOR(t1.double_col_16) DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 48 FOLLOWING), 704) AS int_col_2 -FROM table_1 t1 -INNER JOIN table_18 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND +FROM table_1_n1 t1 +INNER JOIN table_18_n1 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND ((t2.decimal2709_col_9) = (t1.decimal2016_col_26))) AND ((t2.tinyint_col_20) = (t1.tinyint_col_3)) WHERE (t2.smallint_col_19) IN (SELECT COALESCE(-92, -994) AS int_col - FROM table_1 tt1 - INNER JOIN table_18 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) + FROM table_1_n1 tt1 + INNER JOIN table_18_n1 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) WHERE (t1.timestamp_col_9) = (tt2.timestamp_col_18)); SELECT @@ -23,13 +23,13 @@ SELECT (t2.int_col_10) + (t1.smallint_col_50) AS int_col_1, FLOOR(t1.double_col_16) AS float_col, COALESCE(SUM(COALESCE(62, -380, -435)) OVER (PARTITION BY (t2.int_col_10 + t1.smallint_col_50) ORDER BY (t2.int_col_10 + t1.smallint_col_50) DESC, FLOOR(t1.double_col_16) DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 48 FOLLOWING), 704) AS int_col_2 -FROM table_1 t1 -INNER JOIN table_18 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND +FROM table_1_n1 t1 +INNER JOIN table_18_n1 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND ((t2.decimal2709_col_9) = (t1.decimal2016_col_26))) AND ((t2.tinyint_col_20) = (t1.tinyint_col_3)) WHERE (t2.smallint_col_19) IN (SELECT COALESCE(-92, -994) AS int_col - FROM table_1 tt1 - INNER JOIN table_18 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) + FROM table_1_n1 tt1 + INNER JOIN table_18_n1 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) WHERE (t1.timestamp_col_9) = (tt2.timestamp_col_18)); diff --git a/ql/src/test/queries/clientpositive/semijoin6.q b/ql/src/test/queries/clientpositive/semijoin6.q index f90d757a76..86404f5aaa 100644 --- a/ql/src/test/queries/clientpositive/semijoin6.q +++ b/ql/src/test/queries/clientpositive/semijoin6.q @@ -1,50 +1,50 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -create table tx1 (a integer,b integer); -insert into tx1 values (1, 105), (2, 203), (3, 300), (4, 400), (null, 400), (null, null); +create table tx1_n1 (a integer,b integer); +insert into tx1_n1 values (1, 105), (2, 203), (3, 300), (4, 400), (null, 400), (null, null); -create table tx2 (a int, b int); -insert into tx2 values (1, 105), (1, 1900), (2, 1995), (2, 1996), (4, 400), (4, null); +create table tx2_n0 (a int, b int); +insert into tx2_n0 values (1, 105), (1, 1900), (2, 1995), (2, 1996), (4, 400), (4, null); explain -select * from tx1 u left semi join tx2 v on u.a=v.a; +select * from tx1_n1 u left semi join tx2_n0 v on u.a=v.a; -select * from tx1 u left semi join tx2 v on u.a=v.a; +select * from tx1_n1 u left semi join tx2_n0 v on u.a=v.a; explain -select * from tx1 u left semi join tx2 v on u.b <=> v.b; +select * from tx1_n1 u left semi join tx2_n0 v on u.b <=> v.b; -select * from tx1 u left semi join tx2 v on u.b <=> v.b; +select * from tx1_n1 u left semi join tx2_n0 v on u.b <=> v.b; explain -select * from tx1 u left semi join tx2 v on u.b <> v.b; +select * from tx1_n1 u left semi join tx2_n0 v on u.b <> v.b; -select * from tx1 u left semi join tx2 v on u.b <> v.b; +select * from tx1_n1 u left semi join tx2_n0 v on u.b <> v.b; explain -select * from tx1 u left semi join tx2 v on u.a=v.a and u.b <> v.b; +select * from tx1_n1 u left semi join tx2_n0 v on u.a=v.a and u.b <> v.b; -select * from tx1 u left semi join tx2 v on u.a=v.a and u.b <> v.b; +select * from tx1_n1 u left semi join tx2_n0 v on u.a=v.a and u.b <> v.b; explain -select * from tx1 u left semi join tx2 v on u.a=v.a or u.b <> v.b; +select * from tx1_n1 u left semi join tx2_n0 v on u.a=v.a or u.b <> v.b; -select * from tx1 u left semi join tx2 v on u.a=v.a or u.b <> v.b; +select * from tx1_n1 u left semi join tx2_n0 v on u.a=v.a or u.b <> v.b; explain -select * from tx1 u left semi join tx1 v on u.a=v.a; +select * from tx1_n1 u left semi join tx1_n1 v on u.a=v.a; -select * from tx1 u left semi join tx1 v on u.a=v.a; +select * from tx1_n1 u left semi join tx1_n1 v on u.a=v.a; explain -select * from tx1 u left semi join tx2 v +select * from tx1_n1 u left semi join tx2_n0 v on (u.a + v.b > 400) and ((case when u.a > 3 then true when v.b > 1900 then true else false end) or (coalesce(u.a) + coalesce(v.b) > 1900)) and u.a = v.a; -select * from tx1 u left semi join tx2 v +select * from tx1_n1 u left semi join tx2_n0 v on (u.a + v.b > 400) and ((case when u.a > 3 then true when v.b > 1900 then true else false end) or (coalesce(u.a) + coalesce(v.b) > 1900)) diff --git a/ql/src/test/queries/clientpositive/setop_no_distinct.q b/ql/src/test/queries/clientpositive/setop_no_distinct.q index 207954ac13..798f9c76c8 100644 --- a/ql/src/test/queries/clientpositive/setop_no_distinct.q +++ b/ql/src/test/queries/clientpositive/setop_no_distinct.q @@ -1,51 +1,51 @@ set hive.mapred.mode=nonstrict; set hive.cbo.enable=true; -create table a(key int, value int); +create table a_n1(key int, value int); -insert into table a values (1,2),(1,2),(1,2),(1,3),(2,3); +insert into table a_n1 values (1,2),(1,2),(1,2),(1,3),(2,3); -create table b(key int, value int); +create table b_n1(key int, value int); -insert into table b values (1,2),(1,2),(2,3); +insert into table b_n1 values (1,2),(1,2),(2,3); -select * from a intersect select * from b; +select * from a_n1 intersect select * from b_n1; -(select * from b intersect (select * from a)) intersect select * from b; +(select * from b_n1 intersect (select * from a_n1)) intersect select * from b_n1; -select * from b intersect all select * from a intersect select * from b; +select * from b_n1 intersect all select * from a_n1 intersect select * from b_n1; -(select * from b) intersect all ((select * from a) intersect select * from b); +(select * from b_n1) intersect all ((select * from a_n1) intersect select * from b_n1); -select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +select * from (select a_n1.key, b_n1.value from a_n1 join b_n1 on a_n1.key=b_n1.key)sub1 intersect -select * from (select a.key, b.value from a join b on a.key=b.key)sub2; +select * from (select a_n1.key, b_n1.value from a_n1 join b_n1 on a_n1.key=b_n1.key)sub2; -drop table a; +drop table a_n1; -drop table b; +drop table b_n1; -create table a(key int); +create table a_n1(key int); -insert into table a values (0),(1),(2),(2),(2),(2),(3),(NULL),(NULL),(NULL),(NULL),(NULL); +insert into table a_n1 values (0),(1),(2),(2),(2),(2),(3),(NULL),(NULL),(NULL),(NULL),(NULL); -create table b(key bigint); +create table b_n1(key bigint); -insert into table b values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL); +insert into table b_n1 values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL); -select * from a except select * from b; +select * from a_n1 except select * from b_n1; -(select * from a) minus select * from b union (select * from a) minus select * from b; +(select * from a_n1) minus select * from b_n1 union (select * from a_n1) minus select * from b_n1; -(select * from a) minus select * from b union all ((select * from a) minus select * from b); +(select * from a_n1) minus select * from b_n1 union all ((select * from a_n1) minus select * from b_n1); -(select * from a) minus select * from b union all (select * from a) minus all select * from b; +(select * from a_n1) minus select * from b_n1 union all (select * from a_n1) minus all select * from b_n1; -select * from a minus select * from b minus (select * from a minus select * from b); +select * from a_n1 minus select * from b_n1 minus (select * from a_n1 minus select * from b_n1); -(select * from a) minus (select * from b minus (select * from a minus select * from b)); +(select * from a_n1) minus (select * from b_n1 minus (select * from a_n1 minus select * from b_n1)); -drop table a; +drop table a_n1; -drop table b; +drop table b_n1; diff --git a/ql/src/test/queries/clientpositive/show_columns.q b/ql/src/test/queries/clientpositive/show_columns.q index fdd1ea85c2..aa45bae9d5 100644 --- a/ql/src/test/queries/clientpositive/show_columns.q +++ b/ql/src/test/queries/clientpositive/show_columns.q @@ -8,32 +8,32 @@ SHOW COLUMNS from shcol_test; -- SHOW COLUMNS CREATE DATABASE test_db; USE test_db; -CREATE TABLE foo(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc INT, a INT, b INT, c INT); +CREATE TABLE foo_n7(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc INT, a INT, b INT, c INT); -- SHOW COLUMNS basic syntax tests USE test_db; -SHOW COLUMNS from foo; -SHOW COLUMNS in foo; -SHOW COLUMNS in foo 'col*'; -SHOW COLUMNS in foo "col*"; -SHOW COLUMNS from foo 'col*'; -SHOW COLUMNS from foo "col*"; -SHOW COLUMNS from foo "col1|cola"; +SHOW COLUMNS from foo_n7; +SHOW COLUMNS in foo_n7; +SHOW COLUMNS in foo_n7 'col*'; +SHOW COLUMNS in foo_n7 "col*"; +SHOW COLUMNS from foo_n7 'col*'; +SHOW COLUMNS from foo_n7 "col*"; +SHOW COLUMNS from foo_n7 "col1|cola"; -- SHOW COLUMNS from a database with a name that requires escaping CREATE DATABASE `database`; USE `database`; -CREATE TABLE foo(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc INT, a INT, b INT, c INT); -SHOW COLUMNS from foo; -SHOW COLUMNS in foo "col*"; +CREATE TABLE foo_n7(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc INT, a INT, b INT, c INT); +SHOW COLUMNS from foo_n7; +SHOW COLUMNS in foo_n7 "col*"; -- Non existing column pattern -SHOW COLUMNS in foo "nomatch*"; -SHOW COLUMNS in foo "col+"; -SHOW COLUMNS in foo "nomatch"; +SHOW COLUMNS in foo_n7 "nomatch*"; +SHOW COLUMNS in foo_n7 "col+"; +SHOW COLUMNS in foo_n7 "nomatch"; use default; -SHOW COLUMNS from test_db.foo; -SHOW COLUMNS from foo from test_db; -SHOW COLUMNS from foo from test_db "col*"; -SHOW COLUMNS from foo from test_db like 'col*'; +SHOW COLUMNS from test_db.foo_n7; +SHOW COLUMNS from foo_n7 from test_db; +SHOW COLUMNS from foo_n7 from test_db "col*"; +SHOW COLUMNS from foo_n7 from test_db like 'col*'; diff --git a/ql/src/test/queries/clientpositive/show_create_table_alter.q b/ql/src/test/queries/clientpositive/show_create_table_alter.q index 1158a69ba3..3218ade008 100644 --- a/ql/src/test/queries/clientpositive/show_create_table_alter.q +++ b/ql/src/test/queries/clientpositive/show_create_table_alter.q @@ -1,23 +1,23 @@ -- Test SHOW CREATE TABLE on an external, clustered and sorted table. Then test the query again after ALTERs. -CREATE EXTERNAL TABLE tmp_showcrt1 (key smallint, value float) +CREATE EXTERNAL TABLE tmp_showcrt1_n1 (key smallint, value float) CLUSTERED BY (key) SORTED BY (value DESC) INTO 5 BUCKETS; -SHOW CREATE TABLE tmp_showcrt1; +SHOW CREATE TABLE tmp_showcrt1_n1; -- Add a comment to the table, change the EXTERNAL property, and test SHOW CREATE TABLE on the change. -ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='temporary table', 'EXTERNAL'='FALSE'); -SHOW CREATE TABLE tmp_showcrt1; +ALTER TABLE tmp_showcrt1_n1 SET TBLPROPERTIES ('comment'='temporary table', 'EXTERNAL'='FALSE'); +SHOW CREATE TABLE tmp_showcrt1_n1; -- Alter the table comment, change the EXTERNAL property back and test SHOW CREATE TABLE on the change. -ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='changed comment', 'EXTERNAL'='TRUE'); -SHOW CREATE TABLE tmp_showcrt1; +ALTER TABLE tmp_showcrt1_n1 SET TBLPROPERTIES ('comment'='changed comment', 'EXTERNAL'='TRUE'); +SHOW CREATE TABLE tmp_showcrt1_n1; -- Change the 'SORTBUCKETCOLSPREFIX' property and test SHOW CREATE TABLE. The output should not change. -ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('SORTBUCKETCOLSPREFIX'='FALSE'); -SHOW CREATE TABLE tmp_showcrt1; +ALTER TABLE tmp_showcrt1_n1 SET TBLPROPERTIES ('SORTBUCKETCOLSPREFIX'='FALSE'); +SHOW CREATE TABLE tmp_showcrt1_n1; -- Alter the storage handler of the table, and test SHOW CREATE TABLE. -ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('storage_handler'='org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler'); -SHOW CREATE TABLE tmp_showcrt1; -DROP TABLE tmp_showcrt1; +ALTER TABLE tmp_showcrt1_n1 SET TBLPROPERTIES ('storage_handler'='org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler'); +SHOW CREATE TABLE tmp_showcrt1_n1; +DROP TABLE tmp_showcrt1_n1; diff --git a/ql/src/test/queries/clientpositive/show_create_table_partitioned.q b/ql/src/test/queries/clientpositive/show_create_table_partitioned.q index 3b691544e5..34ae2e7a28 100644 --- a/ql/src/test/queries/clientpositive/show_create_table_partitioned.q +++ b/ql/src/test/queries/clientpositive/show_create_table_partitioned.q @@ -1,8 +1,8 @@ -- Test SHOW CREATE TABLE on a table with partitions and column comments. -CREATE EXTERNAL TABLE tmp_showcrt1 (key string, newvalue boolean COMMENT 'a new value') +CREATE EXTERNAL TABLE tmp_showcrt1_n2 (key string, newvalue boolean COMMENT 'a new value') COMMENT 'temporary table' PARTITIONED BY (value bigint COMMENT 'some value'); -SHOW CREATE TABLE tmp_showcrt1; -DROP TABLE tmp_showcrt1; +SHOW CREATE TABLE tmp_showcrt1_n2; +DROP TABLE tmp_showcrt1_n2; diff --git a/ql/src/test/queries/clientpositive/show_create_table_serde.q b/ql/src/test/queries/clientpositive/show_create_table_serde.q index a94379bc92..c692b0756d 100644 --- a/ql/src/test/queries/clientpositive/show_create_table_serde.q +++ b/ql/src/test/queries/clientpositive/show_create_table_serde.q @@ -1,33 +1,33 @@ -- Test SHOW CREATE TABLE on a table with serde. -CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint); -ALTER TABLE tmp_showcrt1 SET SERDEPROPERTIES ('custom.property.key1'='custom.property.value1', 'custom.property.key2'='custom.property.value2'); -SHOW CREATE TABLE tmp_showcrt1; -DROP TABLE tmp_showcrt1; +CREATE TABLE tmp_showcrt1_n0 (key int, value string, newvalue bigint); +ALTER TABLE tmp_showcrt1_n0 SET SERDEPROPERTIES ('custom.property.key1'='custom.property.value1', 'custom.property.key2'='custom.property.value2'); +SHOW CREATE TABLE tmp_showcrt1_n0; +DROP TABLE tmp_showcrt1_n0; -- without a storage handler -CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +CREATE TABLE tmp_showcrt1_n0 (key int, value string, newvalue bigint) COMMENT 'temporary table' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat'; -SHOW CREATE TABLE tmp_showcrt1; -DROP TABLE tmp_showcrt1; +SHOW CREATE TABLE tmp_showcrt1_n0; +DROP TABLE tmp_showcrt1_n0; -- without a storage handler / with custom serde params -CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +CREATE TABLE tmp_showcrt1_n0 (key int, value string, newvalue bigint) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' WITH SERDEPROPERTIES ('custom.property.key1'='custom.property.value1', 'custom.property.key2'='custom.property.value2') STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat'; -SHOW CREATE TABLE tmp_showcrt1; -DROP TABLE tmp_showcrt1; +SHOW CREATE TABLE tmp_showcrt1_n0; +DROP TABLE tmp_showcrt1_n0; -- with a storage handler and serde properties -CREATE EXTERNAL TABLE tmp_showcrt1 (key string, value boolean) +CREATE EXTERNAL TABLE tmp_showcrt1_n0 (key string, value boolean) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED BY 'org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler' WITH SERDEPROPERTIES ('field.delim'=',', 'serialization.format'='$'); -SHOW CREATE TABLE tmp_showcrt1; -DROP TABLE tmp_showcrt1; +SHOW CREATE TABLE tmp_showcrt1_n0; +DROP TABLE tmp_showcrt1_n0; diff --git a/ql/src/test/queries/clientpositive/show_materialized_views.q b/ql/src/test/queries/clientpositive/show_materialized_views.q index df7485feea..50d03155c2 100644 --- a/ql/src/test/queries/clientpositive/show_materialized_views.q +++ b/ql/src/test/queries/clientpositive/show_materialized_views.q @@ -47,14 +47,14 @@ SHOW MATERIALIZED VIEWS IN test2 LIKE "nomatch"; -- SHOW MATERIALIZED VIEWS from a database with a name that requires escaping CREATE DATABASE `database`; USE `database`; -CREATE TABLE foo(a INT); +CREATE TABLE foo_n0(a INT); CREATE VIEW fooview AS -SELECT * FROM foo; +SELECT * FROM foo_n0; USE default; SHOW MATERIALIZED VIEWS FROM `database` LIKE "fooview"; DROP MATERIALIZED VIEW fooview; -DROP TABLE foo; +DROP TABLE foo_n0; USE test1; DROP MATERIALIZED VIEW shtb_test1_view1; diff --git a/ql/src/test/queries/clientpositive/show_tables.q b/ql/src/test/queries/clientpositive/show_tables.q index 619065711d..772d63ac9e 100644 --- a/ql/src/test/queries/clientpositive/show_tables.q +++ b/ql/src/test/queries/clientpositive/show_tables.q @@ -1,5 +1,5 @@ -CREATE TABLE shtb_test1(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE; -CREATE TABLE shtb_test2(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE; +CREATE TABLE shtb_test1_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE; +CREATE TABLE shtb_test2_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE; EXPLAIN SHOW TABLES 'shtb_*'; @@ -7,15 +7,15 @@ SHOW TABLES 'shtb_*'; SHOW TABLES 'shtb_*'; EXPLAIN -SHOW TABLES LIKE 'shtb_test1|shtb_test2'; +SHOW TABLES LIKE 'shtb_test1_n0|shtb_test2_n0'; -SHOW TABLES LIKE 'shtb_test1|shtb_test2'; +SHOW TABLES LIKE 'shtb_test1_n0|shtb_test2_n0'; -- SHOW TABLES FROM/IN database CREATE DATABASE test_db; USE test_db; -CREATE TABLE foo(a INT); -CREATE TABLE bar(a INT); +CREATE TABLE foo_n4(a INT); +CREATE TABLE bar_n0(a INT); CREATE TABLE baz(a INT); -- SHOW TABLES basic syntax tests @@ -26,10 +26,10 @@ SHOW TABLES IN test_db "test*"; SHOW TABLES IN test_db LIKE "nomatch"; -- SHOW TABLE EXTENDED basic syntax tests and wildcard -SHOW TABLE EXTENDED IN test_db LIKE foo; -SHOW TABLE EXTENDED IN test_db LIKE "foo"; -SHOW TABLE EXTENDED IN test_db LIKE 'foo'; -SHOW TABLE EXTENDED IN test_db LIKE `foo`; +SHOW TABLE EXTENDED IN test_db LIKE foo_n4; +SHOW TABLE EXTENDED IN test_db LIKE "foo_n4"; +SHOW TABLE EXTENDED IN test_db LIKE 'foo_n4'; +SHOW TABLE EXTENDED IN test_db LIKE `foo_n4`; SHOW TABLE EXTENDED IN test_db LIKE 'ba*'; SHOW TABLE EXTENDED IN test_db LIKE "ba*"; SHOW TABLE EXTENDED IN test_db LIKE `ba*`; @@ -37,6 +37,6 @@ SHOW TABLE EXTENDED IN test_db LIKE `ba*`; -- SHOW TABLES from a database with a name that requires escaping CREATE DATABASE `database`; USE `database`; -CREATE TABLE foo(a INT); +CREATE TABLE foo_n4(a INT); USE default; -SHOW TABLES FROM `database` LIKE "foo"; +SHOW TABLES FROM `database` LIKE "foo_n4"; diff --git a/ql/src/test/queries/clientpositive/show_views.q b/ql/src/test/queries/clientpositive/show_views.q index 61aad43461..726ad4b22e 100644 --- a/ql/src/test/queries/clientpositive/show_views.q +++ b/ql/src/test/queries/clientpositive/show_views.q @@ -2,15 +2,15 @@ CREATE DATABASE test1; CREATE DATABASE test2; USE test1; -CREATE TABLE shtb_test1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE; -CREATE VIEW shtb_test1_view1 AS SELECT * FROM shtb_test1 where KEY > 1000 and KEY < 2000; -CREATE VIEW shtb_test1_view2 AS SELECT * FROM shtb_test1 where KEY > 100 and KEY < 200; -CREATE VIEW shtb_full_view2 AS SELECT * FROM shtb_test1; +CREATE TABLE shtb_test1_n1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE; +CREATE VIEW shtb_test1_view1_n0 AS SELECT * FROM shtb_test1_n1 where KEY > 1000 and KEY < 2000; +CREATE VIEW shtb_test1_view2_n0 AS SELECT * FROM shtb_test1_n1 where KEY > 100 and KEY < 200; +CREATE VIEW shtb_full_view2_n0 AS SELECT * FROM shtb_test1_n1; USE test2; -CREATE TABLE shtb_test1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE; -CREATE TABLE shtb_test2(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE; -CREATE VIEW shtb_test1_view1 AS SELECT * FROM shtb_test1 where KEY > 1000 and KEY < 2000; -CREATE VIEW shtb_test2_view2 AS SELECT * FROM shtb_test2 where KEY > 100 and KEY < 200; +CREATE TABLE shtb_test1_n1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE; +CREATE TABLE shtb_test2_n1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE; +CREATE VIEW shtb_test1_view1_n0 AS SELECT * FROM shtb_test1_n1 where KEY > 1000 and KEY < 2000; +CREATE VIEW shtb_test2_view2_n0 AS SELECT * FROM shtb_test2_n1 where KEY > 100 and KEY < 200; USE test1; SHOW VIEWS; @@ -33,24 +33,24 @@ SHOW VIEWS IN test2 LIKE "nomatch"; -- SHOW VIEWS from a database with a name that requires escaping CREATE DATABASE `database`; USE `database`; -CREATE TABLE foo(a INT); -CREATE VIEW fooview AS SELECT * FROM foo; +CREATE TABLE foo_n8(a INT); +CREATE VIEW fooview_n0 AS SELECT * FROM foo_n8; USE default; -SHOW VIEWS FROM `database` LIKE "fooview"; +SHOW VIEWS FROM `database` LIKE "fooview_n0"; -DROP VIEW fooview; -DROP TABLE foo; +DROP VIEW fooview_n0; +DROP TABLE foo_n8; USE test1; -DROP VIEW shtb_test1_view1; -DROP VIEW shtb_test1_view2; -DROP VIEW shtb_full_view2; -DROP TABLE shtb_test1; +DROP VIEW shtb_test1_view1_n0; +DROP VIEW shtb_test1_view2_n0; +DROP VIEW shtb_full_view2_n0; +DROP TABLE shtb_test1_n1; DROP DATABASE test1; USE test2; -DROP VIEW shtb_test1_view1; -DROP VIEW shtb_test2_view2; -DROP TABLE shtb_test1; -DROP TABLE shtb_test2; +DROP VIEW shtb_test1_view1_n0; +DROP VIEW shtb_test2_view2_n0; +DROP TABLE shtb_test1_n1; +DROP TABLE shtb_test2_n1; DROP DATABASE test2; diff --git a/ql/src/test/queries/clientpositive/skewjoin.q b/ql/src/test/queries/clientpositive/skewjoin.q index 6e35e48881..d7c0570e58 100644 --- a/ql/src/test/queries/clientpositive/skewjoin.q +++ b/ql/src/test/queries/clientpositive/skewjoin.q @@ -8,50 +8,50 @@ set hive.skewjoin.key = 2; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n128(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n76(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T3_n30(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T4_n17(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n17(key INT, value STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n128; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n76; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n30; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4_n17; EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n17 SELECT src1.key, src2.value; FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n17 SELECT src1.key, src2.value; -SELECT sum(hash(key)), sum(hash(value)) FROM dest_j1; +SELECT sum(hash(key)), sum(hash(value)) FROM dest_j1_n17; set hive.cbo.enable=false; EXPLAIN SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key; +FROM T1_n128 a JOIN T2_n76 b ON a.key = b.key + JOIN T3_n30 c ON b.key = c.key + JOIN T4_n17 d ON c.key = d.key; SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key; +FROM T1_n128 a JOIN T2_n76 b ON a.key = b.key + JOIN T3_n30 c ON b.key = c.key + JOIN T4_n17 d ON c.key = d.key; EXPLAIN SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key; +FROM T1_n128 a JOIN T2_n76 b ON a.key = b.key + JOIN T3_n30 c ON b.key = c.key + JOIN T4_n17 d ON c.key = d.key; SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key; +FROM T1_n128 a JOIN T2_n76 b ON a.key = b.key + JOIN T3_n30 c ON b.key = c.key + JOIN T4_n17 d ON c.key = d.key; -EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); -FROM T1 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +EXPLAIN FROM T1_n128 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +FROM T1_n128 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); EXPLAIN FROM (SELECT src.* FROM src) x @@ -103,28 +103,28 @@ JOIN ON src1.c1 = src3.c5 AND src3.c5 < 80; EXPLAIN -SELECT /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) FROM T1 k LEFT OUTER JOIN T1 v ON k.key+1=v.key; -SELECT /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) FROM T1 k LEFT OUTER JOIN T1 v ON k.key+1=v.key; +SELECT /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) FROM T1_n128 k LEFT OUTER JOIN T1_n128 v ON k.key+1=v.key; +SELECT /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) FROM T1_n128 k LEFT OUTER JOIN T1_n128 v ON k.key+1=v.key; -select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.val; +select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1_n128 k join T1_n128 v on k.key=v.val; -select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.key; +select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1_n128 k join T1_n128 v on k.key=v.key; -select sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.key; +select sum(hash(k.key)), sum(hash(v.val)) from T1_n128 k join T1_n128 v on k.key=v.key; -select count(1) from T1 a join T1 b on a.key = b.key; +select count(1) from T1_n128 a join T1_n128 b on a.key = b.key; -FROM T1 a LEFT OUTER JOIN T2 c ON c.key+1=a.key SELECT sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +FROM T1_n128 a LEFT OUTER JOIN T2_n76 c ON c.key+1=a.key SELECT sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); -FROM T1 a RIGHT OUTER JOIN T2 c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +FROM T1_n128 a RIGHT OUTER JOIN T2_n76 c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); -FROM T1 a FULL OUTER JOIN T2 c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +FROM T1_n128 a FULL OUTER JOIN T2_n76 c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); -SELECT sum(hash(src1.key)), sum(hash(src1.val)), sum(hash(src2.key)) FROM T1 src1 LEFT OUTER JOIN T2 src2 ON src1.key+1 = src2.key RIGHT OUTER JOIN T2 src3 ON src2.key = src3.key; +SELECT sum(hash(src1.key)), sum(hash(src1.val)), sum(hash(src2.key)) FROM T1_n128 src1 LEFT OUTER JOIN T2_n76 src2 ON src1.key+1 = src2.key RIGHT OUTER JOIN T2_n76 src3 ON src2.key = src3.key; -SELECT sum(hash(src1.key)), sum(hash(src1.val)), sum(hash(src2.key)) FROM T1 src1 JOIN T2 src2 ON src1.key+1 = src2.key JOIN T2 src3 ON src2.key = src3.key; +SELECT sum(hash(src1.key)), sum(hash(src1.val)), sum(hash(src2.key)) FROM T1_n128 src1 JOIN T2_n76 src2 ON src1.key+1 = src2.key JOIN T2_n76 src3 ON src2.key = src3.key; -select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k left outer join T1 v on k.key+1=v.key; +select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from T1_n128 k left outer join T1_n128 v on k.key+1=v.key; diff --git a/ql/src/test/queries/clientpositive/skewjoin_mapjoin1.q b/ql/src/test/queries/clientpositive/skewjoin_mapjoin1.q index 70feb9d26e..4e8057bae4 100644 --- a/ql/src/test/queries/clientpositive/skewjoin_mapjoin1.q +++ b/ql/src/test/queries/clientpositive/skewjoin_mapjoin1.q @@ -2,15 +2,15 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; set hive.auto.convert.join=true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n67(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n67; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n40(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n40; -- copy from skewjoinopt1 -- test compile time skew join and auto map join @@ -18,27 +18,27 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -- adding an order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n67 a JOIN T2_n40 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n67 a JOIN T2_n40 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; -- test outer joins also EXPLAIN -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n67 a RIGHT OUTER JOIN T2_n40 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n67 a RIGHT OUTER JOIN T2_n40 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; -- an aggregation at the end should not change anything EXPLAIN -SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n67 a JOIN T2_n40 b ON a.key = b.key; -SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n67 a JOIN T2_n40 b ON a.key = b.key; EXPLAIN -SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n67 a RIGHT OUTER JOIN T2_n40 b ON a.key = b.key; -SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n67 a RIGHT OUTER JOIN T2_n40 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/skewjoin_mapjoin10.q b/ql/src/test/queries/clientpositive/skewjoin_mapjoin10.q index abfde79554..1d96347593 100644 --- a/ql/src/test/queries/clientpositive/skewjoin_mapjoin10.q +++ b/ql/src/test/queries/clientpositive/skewjoin_mapjoin10.q @@ -2,21 +2,21 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; set hive.auto.convert.join=true; -CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE tmpT1_n0(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1_n0; -- testing skew on other data types - int -CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)); -INSERT OVERWRITE TABLE T1 SELECT key, val FROM tmpT1; +CREATE TABLE T1_n151(key INT, val STRING) SKEWED BY (key) ON ((2)); +INSERT OVERWRITE TABLE T1_n151 SELECT key, val FROM tmpT1_n0; -CREATE TABLE tmpT2(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE tmpT2_n0(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE tmpT2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE tmpT2_n0; -CREATE TABLE T2(key INT, val STRING) SKEWED BY (key) ON ((3)); +CREATE TABLE T2_n88(key INT, val STRING) SKEWED BY (key) ON ((3)); -INSERT OVERWRITE TABLE T2 SELECT key, val FROM tmpT2; +INSERT OVERWRITE TABLE T2_n88 SELECT key, val FROM tmpT2_n0; -- copy from skewjoinopt15 -- test compile time skew join and auto map join @@ -27,27 +27,27 @@ INSERT OVERWRITE TABLE T2 SELECT key, val FROM tmpT2; -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n151 a JOIN T2_n88 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n151 a JOIN T2_n88 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; -- test outer joins also EXPLAIN -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n151 a RIGHT OUTER JOIN T2_n88 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n151 a RIGHT OUTER JOIN T2_n88 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; -- an aggregation at the end should not change anything EXPLAIN -SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n151 a JOIN T2_n88 b ON a.key = b.key; -SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n151 a JOIN T2_n88 b ON a.key = b.key; EXPLAIN -SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n151 a RIGHT OUTER JOIN T2_n88 b ON a.key = b.key; -SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n151 a RIGHT OUTER JOIN T2_n88 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/skewjoin_mapjoin11.q b/ql/src/test/queries/clientpositive/skewjoin_mapjoin11.q index f1f826b189..77a94e6e61 100644 --- a/ql/src/test/queries/clientpositive/skewjoin_mapjoin11.q +++ b/ql/src/test/queries/clientpositive/skewjoin_mapjoin11.q @@ -2,15 +2,15 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; set hive.auto.convert.join=true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n31(key STRING, val STRING) CLUSTERED BY (key) INTO 4 BUCKETS SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n31; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n21(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n21; -- copy from skewjoinopt19 -- test compile time skew join and auto map join @@ -20,7 +20,7 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n31 a JOIN T2_n21 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n31 a JOIN T2_n21 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoin_mapjoin2.q b/ql/src/test/queries/clientpositive/skewjoin_mapjoin2.q index f4f4489b17..581b9b3b04 100644 --- a/ql/src/test/queries/clientpositive/skewjoin_mapjoin2.q +++ b/ql/src/test/queries/clientpositive/skewjoin_mapjoin2.q @@ -2,15 +2,15 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; set hive.auto.convert.join=true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n25(key STRING, val STRING) SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n25; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n16(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n16; -- copy from skewjoinopt3 -- test compile time skew join and auto map join @@ -20,15 +20,15 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n25 a JOIN T2_n16 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n25 a JOIN T2_n16 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; -- test outer joins also EXPLAIN -SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n25 a FULL OUTER JOIN T2_n16 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n25 a FULL OUTER JOIN T2_n16 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoin_mapjoin3.q b/ql/src/test/queries/clientpositive/skewjoin_mapjoin3.q index 280b8a512e..1d5d2ddaf1 100644 --- a/ql/src/test/queries/clientpositive/skewjoin_mapjoin3.q +++ b/ql/src/test/queries/clientpositive/skewjoin_mapjoin3.q @@ -2,15 +2,15 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; set hive.auto.convert.join=true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n155(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12), (8, 18)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n155; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n91(key STRING, val STRING) SKEWED BY (key, val) ON ((3, 13), (8, 18)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n91; -- copy from skewjoinopt6 -- test compile time skew join and auto map join @@ -20,7 +20,7 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n155 a JOIN T2_n91 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n155 a JOIN T2_n91 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoin_mapjoin4.q b/ql/src/test/queries/clientpositive/skewjoin_mapjoin4.q index c9aa9e6ef8..0badb37f7b 100644 --- a/ql/src/test/queries/clientpositive/skewjoin_mapjoin4.q +++ b/ql/src/test/queries/clientpositive/skewjoin_mapjoin4.q @@ -2,19 +2,19 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; set hive.auto.convert.join=true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n124(key STRING, val STRING) SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n124; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n73(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n73; -CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T3_n27(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n27; -- copy from skewjoinopt7 -- test compile time skew join and auto map join @@ -24,7 +24,7 @@ LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key; +SELECT a.*, b.*, c.* FROM T1_n124 a JOIN T2_n73 b ON a.key = b.key JOIN T3_n27 c on a.key = c.key; -SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +SELECT a.*, b.*, c.* FROM T1_n124 a JOIN T2_n73 b ON a.key = b.key JOIN T3_n27 c on a.key = c.key ORDER BY a.key, b.key, c.key, a.val, b.val, c.val; diff --git a/ql/src/test/queries/clientpositive/skewjoin_mapjoin5.q b/ql/src/test/queries/clientpositive/skewjoin_mapjoin5.q index 1c49b5ff7d..4ad3006a11 100644 --- a/ql/src/test/queries/clientpositive/skewjoin_mapjoin5.q +++ b/ql/src/test/queries/clientpositive/skewjoin_mapjoin5.q @@ -2,14 +2,14 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; set hive.auto.convert.join=true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n87(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n87; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n54(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n54; -- copy from skewjoinopt9 -- test compile time skew join and auto map join @@ -19,19 +19,19 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; EXPLAIN select * from ( -select key, val from T1 +select key, val from T1_n87 union all -select key, val from T1 +select key, val from T1_n87 ) subq1 -join T2 b on subq1.key = b.key; +join T2_n54 b on subq1.key = b.key; select * from ( -select key, val from T1 +select key, val from T1_n87 union all -select key, val from T1 +select key, val from T1_n87 ) subq1 -join T2 b on subq1.key = b.key +join T2_n54 b on subq1.key = b.key ORDER BY subq1.key, b.key, subq1.val, b.val; -- no skew join compile time optimization would be performed if one of the @@ -39,13 +39,13 @@ ORDER BY subq1.key, b.key, subq1.val, b.val; EXPLAIN select * from ( -select key, count(1) as cnt from T1 group by key +select key, count(1) as cnt from T1_n87 group by key ) subq1 -join T2 b on subq1.key = b.key; +join T2_n54 b on subq1.key = b.key; select * from ( -select key, count(1) as cnt from T1 group by key +select key, count(1) as cnt from T1_n87 group by key ) subq1 -join T2 b on subq1.key = b.key +join T2_n54 b on subq1.key = b.key ORDER BY subq1.key, b.key, subq1.cnt, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoin_mapjoin6.q b/ql/src/test/queries/clientpositive/skewjoin_mapjoin6.q index 2f263bf495..9923c801bc 100644 --- a/ql/src/test/queries/clientpositive/skewjoin_mapjoin6.q +++ b/ql/src/test/queries/clientpositive/skewjoin_mapjoin6.q @@ -2,13 +2,13 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; set hive.auto.convert.join=true; -CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n39(key STRING, value STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n39; -drop table array_valued_T1; -create table array_valued_T1 (key string, value array) SKEWED BY (key) ON ((8)); -insert overwrite table array_valued_T1 select key, array(value) from T1; +drop table array_valued_T1_n39; +create table array_valued_T1_n39 (key string, value array) SKEWED BY (key) ON ((8)); +insert overwrite table array_valued_T1_n39 select key, array(value) from T1_n39; -- copy from skewjoinopt10 -- test compile time skew join and auto map join @@ -16,7 +16,7 @@ insert overwrite table array_valued_T1 select key, array(value) from T1; -- adding a order by at the end to make the results deterministic explain -select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val; +select * from (select a.key as key, b.value as array_val from T1_n39 a join array_valued_T1_n39 b on a.key=b.key) i lateral view explode (array_val) c as val; -select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +select * from (select a.key as key, b.value as array_val from T1_n39 a join array_valued_T1_n39 b on a.key=b.key) i lateral view explode (array_val) c as val ORDER BY key, val; diff --git a/ql/src/test/queries/clientpositive/skewjoin_mapjoin7.q b/ql/src/test/queries/clientpositive/skewjoin_mapjoin7.q index 78dcc908a3..67fa143f98 100644 --- a/ql/src/test/queries/clientpositive/skewjoin_mapjoin7.q +++ b/ql/src/test/queries/clientpositive/skewjoin_mapjoin7.q @@ -2,14 +2,14 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; set hive.auto.convert.join=true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n157(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n157; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n92(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n92; -- copy from skewjoinopt11 -- test compile time skew join and auto map join @@ -21,15 +21,15 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; EXPLAIN select * from ( - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n157 a join T2_n92 b on a.key = b.key union all - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n157 a join T2_n92 b on a.key = b.key ) subq1; select * from ( - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n157 a join T2_n92 b on a.key = b.key union all - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n157 a join T2_n92 b on a.key = b.key ) subq1 ORDER BY key, val1, val2; diff --git a/ql/src/test/queries/clientpositive/skewjoin_mapjoin8.q b/ql/src/test/queries/clientpositive/skewjoin_mapjoin8.q index 34a8f54dfe..ea0dc86185 100644 --- a/ql/src/test/queries/clientpositive/skewjoin_mapjoin8.q +++ b/ql/src/test/queries/clientpositive/skewjoin_mapjoin8.q @@ -2,18 +2,18 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; set hive.auto.convert.join=true; -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n29(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n29; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n20(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n20; -CREATE TABLE T3(key STRING, val STRING) +CREATE TABLE T3_n7(key STRING, val STRING) SKEWED BY (val) ON ((12)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n7; -- copy from skewjoinopt13 -- test compile time skew join and auto map join @@ -27,12 +27,12 @@ LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; EXPLAIN select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val; +T1_n29 a join T2_n20 b on a.key = b.key +join T3_n7 c on a.val = c.val; select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n29 a join T2_n20 b on a.key = b.key +join T3_n7 c on a.val = c.val order by a.key, b.key, c.key, a.val, b.val, c.val; diff --git a/ql/src/test/queries/clientpositive/skewjoin_mapjoin9.q b/ql/src/test/queries/clientpositive/skewjoin_mapjoin9.q index c612974d77..ff164fc23d 100644 --- a/ql/src/test/queries/clientpositive/skewjoin_mapjoin9.q +++ b/ql/src/test/queries/clientpositive/skewjoin_mapjoin9.q @@ -2,19 +2,19 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; set hive.auto.convert.join=true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n152(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n152; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n89(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n89; -CREATE TABLE T3(key STRING, val STRING) +CREATE TABLE T3_n36(key STRING, val STRING) SKEWED BY (val) ON ((12)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n36; -- copy from skewjoinopt14 -- test compile time skew join and auto map join @@ -29,12 +29,12 @@ LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; EXPLAIN select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val; +T1_n152 a join T2_n89 b on a.key = b.key +join T3_n36 c on a.val = c.val; select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n152 a join T2_n89 b on a.key = b.key +join T3_n36 c on a.val = c.val order by a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoin_onesideskew.q b/ql/src/test/queries/clientpositive/skewjoin_onesideskew.q index 6d6b9cc192..099b7912ce 100644 --- a/ql/src/test/queries/clientpositive/skewjoin_onesideskew.q +++ b/ql/src/test/queries/clientpositive/skewjoin_onesideskew.q @@ -15,8 +15,8 @@ INSERT INTO TABLE nonskewtable VALUES ("1", "val_1"); INSERT INTO TABLE nonskewtable VALUES ("2", "val_2"); EXPLAIN -CREATE TABLE result AS SELECT a.* FROM skewtable a JOIN nonskewtable b ON a.key=b.key; -CREATE TABLE result AS SELECT a.* FROM skewtable a JOIN nonskewtable b ON a.key=b.key; +CREATE TABLE result_n1 AS SELECT a.* FROM skewtable a JOIN nonskewtable b ON a.key=b.key; +CREATE TABLE result_n1 AS SELECT a.* FROM skewtable a JOIN nonskewtable b ON a.key=b.key; -SELECT * FROM result; +SELECT * FROM result_n1; diff --git a/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q b/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q index 5e688d79a5..9d5571bea8 100644 --- a/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q +++ b/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q @@ -17,52 +17,52 @@ set mapred.input.dir.recursive=true; -- Since this test creates sub-directories for the output, it might be easier to run the test -- only on hadoop 23 -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n57(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n57; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n35(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n35; -- a simple join query with skew on both the tables on the join key EXPLAIN -SELECT * FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT * FROM T1_n57 a JOIN T2_n35 b ON a.key = b.key; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT * FROM T1 a JOIN T2 b ON a.key = b.key +SELECT * FROM T1_n57 a JOIN T2_n35 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; -- test outer joins also EXPLAIN -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n57 a RIGHT OUTER JOIN T2_n35 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n57 a RIGHT OUTER JOIN T2_n35 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; -create table DEST1(key1 STRING, val1 STRING, key2 STRING, val2 STRING); +create table DEST1_n58(key1 STRING, val1 STRING, key2 STRING, val2 STRING); EXPLAIN -INSERT OVERWRITE TABLE DEST1 -SELECT * FROM T1 a JOIN T2 b ON a.key = b.key; +INSERT OVERWRITE TABLE DEST1_n58 +SELECT * FROM T1_n57 a JOIN T2_n35 b ON a.key = b.key; -INSERT OVERWRITE TABLE DEST1 -SELECT * FROM T1 a JOIN T2 b ON a.key = b.key; +INSERT OVERWRITE TABLE DEST1_n58 +SELECT * FROM T1_n57 a JOIN T2_n35 b ON a.key = b.key; -SELECT * FROM DEST1 +SELECT * FROM DEST1_n58 ORDER BY key1, key2, val1, val2; EXPLAIN -INSERT OVERWRITE TABLE DEST1 -SELECT * FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +INSERT OVERWRITE TABLE DEST1_n58 +SELECT * FROM T1_n57 a RIGHT OUTER JOIN T2_n35 b ON a.key = b.key; -INSERT OVERWRITE TABLE DEST1 -SELECT * FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +INSERT OVERWRITE TABLE DEST1_n58 +SELECT * FROM T1_n57 a RIGHT OUTER JOIN T2_n35 b ON a.key = b.key; -SELECT * FROM DEST1 +SELECT * FROM DEST1_n58 ORDER BY key1, key2, val1, val2; diff --git a/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q b/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q index 3e2610ff6c..06ebfdcb53 100644 --- a/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q +++ b/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q @@ -9,19 +9,19 @@ set hive.merge.mapredfiles=false; set hive.merge.sparkfiles=false; set mapred.input.dir.recursive=true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n8(key STRING, val STRING) SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n8; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n4(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n4; -CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T3_n2(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n2; -- This is to test the union->selectstar->filesink and skewjoin optimization -- Union of 3 map-reduce subqueries is performed for the skew join @@ -32,9 +32,9 @@ LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; -- to run the test only on hadoop 23 EXPLAIN -SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key; +SELECT a.*, b.*, c.* FROM T1_n8 a JOIN T2_n4 b ON a.key = b.key JOIN T3_n2 c on a.key = c.key; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +SELECT a.*, b.*, c.* FROM T1_n8 a JOIN T2_n4 b ON a.key = b.key JOIN T3_n2 c on a.key = c.key ORDER BY a.key, b.key, c.key, a.val, b.val, c.val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt1.q b/ql/src/test/queries/clientpositive/skewjoinopt1.q index e32a583f45..2679462aa1 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt1.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt1.q @@ -1,41 +1,41 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n101(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n101; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n64(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n64; -- a simple join query with skew on both the tables on the join key -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n101 a JOIN T2_n64 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n101 a JOIN T2_n64 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; -- test outer joins also EXPLAIN -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n101 a RIGHT OUTER JOIN T2_n64 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n101 a RIGHT OUTER JOIN T2_n64 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; -- an aggregation at the end should not change anything EXPLAIN -SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n101 a JOIN T2_n64 b ON a.key = b.key; -SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n101 a JOIN T2_n64 b ON a.key = b.key; EXPLAIN -SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n101 a RIGHT OUTER JOIN T2_n64 b ON a.key = b.key; -SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n101 a RIGHT OUTER JOIN T2_n64 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt10.q b/ql/src/test/queries/clientpositive/skewjoinopt10.q index 16de44c27b..3695d17ec4 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt10.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt10.q @@ -1,19 +1,19 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n88(key STRING, value STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n88; -drop table array_valued_T1; -create table array_valued_T1 (key string, value array) SKEWED BY (key) ON ((8)); -insert overwrite table array_valued_T1 select key, array(value) from T1; +drop table array_valued_T1_n0; +create table array_valued_T1_n0 (key string, value array) SKEWED BY (key) ON ((8)); +insert overwrite table array_valued_T1_n0 select key, array(value) from T1_n88; -- This test is to verify the skew join compile optimization when the join is followed by a lateral view -- adding a order by at the end to make the results deterministic explain -select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val; +select * from (select a.key as key, b.value as array_val from T1_n88 a join array_valued_T1_n0 b on a.key=b.key) i lateral view explode (array_val) c as val; -select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +select * from (select a.key as key, b.value as array_val from T1_n88 a join array_valued_T1_n0 b on a.key=b.key) i lateral view explode (array_val) c as val ORDER BY key, val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt11.q b/ql/src/test/queries/clientpositive/skewjoinopt11.q index 880e8f1411..074165f2a7 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt11.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt11.q @@ -1,14 +1,14 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n122(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n122; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n72(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n72; -- This test is to verify the skew join compile optimization when the join is followed -- by a union. Both sides of a union consist of a join, which should have used @@ -18,15 +18,15 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; EXPLAIN select * from ( - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n122 a join T2_n72 b on a.key = b.key union all - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n122 a join T2_n72 b on a.key = b.key ) subq1; select * from ( - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n122 a join T2_n72 b on a.key = b.key union all - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n122 a join T2_n72 b on a.key = b.key ) subq1 ORDER BY key, val1, val2; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt12.q b/ql/src/test/queries/clientpositive/skewjoinopt12.q index 3d7b170508..e1e4689644 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt12.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt12.q @@ -1,22 +1,22 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n159(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12), (8, 18)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n159; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n93(key STRING, val STRING) SKEWED BY (key, val) ON ((3, 13), (8, 18)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n93; -- Both the join tables are skewed by 2 keys, and one of the skewed values -- is common to both the tables. The join key matches the skewed key set. -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val; +SELECT a.*, b.* FROM T1_n159 a JOIN T2_n93 b ON a.key = b.key and a.val = b.val; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n159 a JOIN T2_n93 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt13.q b/ql/src/test/queries/clientpositive/skewjoinopt13.q index 36ad3c664b..ef6f7a546b 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt13.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt13.q @@ -1,18 +1,18 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n38(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n38; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n25(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n25; -CREATE TABLE T3(key STRING, val STRING) +CREATE TABLE T3_n9(key STRING, val STRING) SKEWED BY (val) ON ((12)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n9; -- This test is for skewed join compile time optimization for more than 2 tables. -- The join key for table 3 is different from the join key used for joining @@ -24,12 +24,12 @@ LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; EXPLAIN select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val; +T1_n38 a join T2_n25 b on a.key = b.key +join T3_n9 c on a.val = c.val; select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n38 a join T2_n25 b on a.key = b.key +join T3_n9 c on a.val = c.val order by a.key, b.key, c.key, a.val, b.val, c.val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt14.q b/ql/src/test/queries/clientpositive/skewjoinopt14.q index 94dab199b2..5fac94eee5 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt14.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt14.q @@ -1,19 +1,19 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n65(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n65; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n39(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n39; -CREATE TABLE T3(key STRING, val STRING) +CREATE TABLE T3_n14(key STRING, val STRING) SKEWED BY (val) ON ((12)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n14; -- This test is for skewed join compile time optimization for more than 2 tables. -- The join key for table 3 is different from the join key used for joining @@ -26,12 +26,12 @@ LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; EXPLAIN select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val; +T1_n65 a join T2_n39 b on a.key = b.key +join T3_n14 c on a.val = c.val; select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n65 a join T2_n39 b on a.key = b.key +join T3_n14 c on a.val = c.val order by a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt15.q b/ql/src/test/queries/clientpositive/skewjoinopt15.q index ba862565f1..69934d8c47 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt15.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt15.q @@ -1,21 +1,21 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE tmpT1_n109(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1_n109; -- testing skew on other data types - int -CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)); -INSERT OVERWRITE TABLE T1 SELECT key, val FROM tmpT1; +CREATE TABLE T1_n109(key INT, val STRING) SKEWED BY (key) ON ((2)); +INSERT OVERWRITE TABLE T1_n109 SELECT key, val FROM tmpT1_n109; -CREATE TABLE tmpT2(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE tmpT2_n66(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE tmpT2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE tmpT2_n66; -CREATE TABLE T2(key INT, val STRING) SKEWED BY (key) ON ((3)); +CREATE TABLE T2_n66(key INT, val STRING) SKEWED BY (key) ON ((3)); -INSERT OVERWRITE TABLE T2 SELECT key, val FROM tmpT2; +INSERT OVERWRITE TABLE T2_n66 SELECT key, val FROM tmpT2_n66; -- The skewed key is a integer column. -- Otherwise this test is similar to skewjoinopt1.q @@ -24,27 +24,27 @@ INSERT OVERWRITE TABLE T2 SELECT key, val FROM tmpT2; -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n109 a JOIN T2_n66 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n109 a JOIN T2_n66 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; -- test outer joins also EXPLAIN -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n109 a RIGHT OUTER JOIN T2_n66 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n109 a RIGHT OUTER JOIN T2_n66 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; -- an aggregation at the end should not change anything EXPLAIN -SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n109 a JOIN T2_n66 b ON a.key = b.key; -SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n109 a JOIN T2_n66 b ON a.key = b.key; EXPLAIN -SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n109 a RIGHT OUTER JOIN T2_n66 b ON a.key = b.key; -SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key; +SELECT count(1) FROM T1_n109 a RIGHT OUTER JOIN T2_n66 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt16.q b/ql/src/test/queries/clientpositive/skewjoinopt16.q index 1345334a4c..44ac708f3b 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt16.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt16.q @@ -1,22 +1,22 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n154(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n154; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n90(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n90; -- One of the tables is skewed by 2 columns, and the other table is -- skewed by one column. Ths join is performed on the both the columns -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val; +SELECT a.*, b.* FROM T1_n154 a JOIN T2_n90 b ON a.key = b.key and a.val = b.val; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n154 a JOIN T2_n90 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt17.q b/ql/src/test/queries/clientpositive/skewjoinopt17.q index 1c866a5d8f..476e40ea8e 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt17.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt17.q @@ -1,15 +1,15 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n27(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n27; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n18(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n18; -- One of the tables is skewed by 2 columns, and the other table is -- skewed by one column. Ths join is performed on the first skewed column @@ -18,31 +18,31 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n27 a JOIN T2_n18 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n27 a JOIN T2_n18 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; -DROP TABLE T1; -DROP TABLE T2; +DROP TABLE T1_n27; +DROP TABLE T2_n18; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n27(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n27; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n18(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n18; -- One of the tables is skewed by 2 columns, and the other table is -- skewed by one column. Ths join is performed on the both the columns -- In this case, the skewed join value is repeated in the filter. EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val; +SELECT a.*, b.* FROM T1_n27 a JOIN T2_n18 b ON a.key = b.key and a.val = b.val; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n27 a JOIN T2_n18 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt18.q b/ql/src/test/queries/clientpositive/skewjoinopt18.q index a2a2618b15..cd323934a8 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt18.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt18.q @@ -1,21 +1,21 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE tmpT1_n1(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1_n1; -- testing skew on other data types - int -CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)); -INSERT OVERWRITE TABLE T1 SELECT key, val FROM tmpT1; +CREATE TABLE T1_n160(key INT, val STRING) SKEWED BY (key) ON ((2)); +INSERT OVERWRITE TABLE T1_n160 SELECT key, val FROM tmpT1_n1; -- Tke skewed column is same in both the tables, however it is -- INT in one of the tables, and STRING in the other table -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n94(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n94; -- Once HIVE-3445 is fixed, the compile time skew join optimization would be -- applicable here. Till the above jira is fixed, it would be performed as a @@ -23,7 +23,7 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n160 a JOIN T2_n94 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n160 a JOIN T2_n94 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt19.q b/ql/src/test/queries/clientpositive/skewjoinopt19.q index 784c4fc9d1..02cadda7f5 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt19.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt19.q @@ -1,15 +1,15 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n34(key STRING, val STRING) CLUSTERED BY (key) INTO 4 BUCKETS SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n34; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n22(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n22; -- add a test where the skewed key is also the bucketized key -- it should not matter, and the compile time skewed join @@ -17,7 +17,7 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n34 a JOIN T2_n22 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n34 a JOIN T2_n22 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt2.q b/ql/src/test/queries/clientpositive/skewjoinopt2.q index 7cca946056..c1b59d3c6d 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt2.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt2.q @@ -3,15 +3,15 @@ set hive.optimize.skewjoin.compiletime = true; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n139(key STRING, val STRING) SKEWED BY (key) ON ((2), (7)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n139; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n81(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n81; -- a simple query with skew on both the tables on the join key -- multiple skew values are present for the skewed keys @@ -20,27 +20,27 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val; +SELECT a.*, b.* FROM T1_n139 a JOIN T2_n81 b ON a.key = b.key and a.val = b.val; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n139 a JOIN T2_n81 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val; -- test outer joins also EXPLAIN -SELECT a.*, b.* FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val; +SELECT a.*, b.* FROM T1_n139 a LEFT OUTER JOIN T2_n81 b ON a.key = b.key and a.val = b.val; -SELECT a.*, b.* FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n139 a LEFT OUTER JOIN T2_n81 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val; -- a group by at the end should not change anything EXPLAIN -SELECT a.key, count(1) FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key; +SELECT a.key, count(1) FROM T1_n139 a JOIN T2_n81 b ON a.key = b.key and a.val = b.val group by a.key; -SELECT a.key, count(1) FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key; +SELECT a.key, count(1) FROM T1_n139 a JOIN T2_n81 b ON a.key = b.key and a.val = b.val group by a.key; EXPLAIN -SELECT a.key, count(1) FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key; +SELECT a.key, count(1) FROM T1_n139 a LEFT OUTER JOIN T2_n81 b ON a.key = b.key and a.val = b.val group by a.key; -SELECT a.key, count(1) FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key; +SELECT a.key, count(1) FROM T1_n139 a LEFT OUTER JOIN T2_n81 b ON a.key = b.key and a.val = b.val group by a.key; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt20.q b/ql/src/test/queries/clientpositive/skewjoinopt20.q index 190cd98210..160e5b82d9 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt20.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt20.q @@ -1,15 +1,15 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n103(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n103; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n65(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n65; -- add a test where the skewed key is also the bucketized/sorted key -- it should not matter, and the compile time skewed join @@ -17,7 +17,7 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n103 a JOIN T2_n65 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n103 a JOIN T2_n65 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt21.q b/ql/src/test/queries/clientpositive/skewjoinopt21.q index 7ff086d144..692c87f7e0 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt21.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt21.q @@ -1,15 +1,15 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n63(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n63; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n38(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n38; -- a simple join query with skew on both the tables on the join key -- adding a order by at the end to make the results deterministic @@ -17,15 +17,15 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; EXPLAIN SELECT a.*, b.* FROM - (SELECT key as k, val as v FROM T1) a + (SELECT key as k, val as v FROM T1_n63) a JOIN - (SELECT key as k, val as v FROM T2) b + (SELECT key as k, val as v FROM T2_n38) b ON a.k = b.k; SELECT a.*, b.* FROM - (SELECT key as k, val as v FROM T1) a + (SELECT key as k, val as v FROM T1_n63) a JOIN - (SELECT key as k, val as v FROM T2) b + (SELECT key as k, val as v FROM T2_n38) b ON a.k = b.k ORDER BY a.k, b.k, a.v, b.v; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt3.q b/ql/src/test/queries/clientpositive/skewjoinopt3.q index 35cc41aa85..cfb2080a3d 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt3.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt3.q @@ -1,15 +1,15 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n12(key STRING, val STRING) SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n12; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n7(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n7; -- a simple query with skew on both the tables. One of the skewed -- value is common to both the tables. The skewed value should not be @@ -17,15 +17,15 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n12 a JOIN T2_n7 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n12 a JOIN T2_n7 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; -- test outer joins also EXPLAIN -SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n12 a FULL OUTER JOIN T2_n7 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n12 a FULL OUTER JOIN T2_n7 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt4.q b/ql/src/test/queries/clientpositive/skewjoinopt4.q index b66a02ab59..ba47d64600 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt4.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt4.q @@ -1,28 +1,28 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n52(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n52; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n32(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n32; -- only of the tables of the join (the left table of the join) is skewed -- the skewed filter would still be applied to both the tables -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n52 a JOIN T2_n32 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n52 a JOIN T2_n32 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; -- the order of the join should not matter, just confirming EXPLAIN -SELECT a.*, b.* FROM T2 a JOIN T1 b ON a.key = b.key; +SELECT a.*, b.* FROM T2_n32 a JOIN T1_n52 b ON a.key = b.key; -SELECT a.*, b.* FROM T2 a JOIN T1 b ON a.key = b.key +SELECT a.*, b.* FROM T2_n32 a JOIN T1_n52 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt5.q b/ql/src/test/queries/clientpositive/skewjoinopt5.q index 30249039ad..6469e29f42 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt5.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt5.q @@ -1,22 +1,22 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n100(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n100; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n63(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n63; -- One of the tables is skewed by 2 columns, and the other table is -- skewed by one column. Ths join is performed on the first skewed column -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n100 a JOIN T2_n63 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n100 a JOIN T2_n63 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt6.q b/ql/src/test/queries/clientpositive/skewjoinopt6.q index 152fbe7e35..fc4b766854 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt6.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt6.q @@ -1,15 +1,15 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n130(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12), (8, 18)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n130; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n77(key STRING, val STRING) SKEWED BY (key, val) ON ((3, 13), (8, 18)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n77; -- Both the join tables are skewed by 2 keys, and one of the skewed values -- is common to both the tables. The join key is a subset of the skewed key set: @@ -17,7 +17,7 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +SELECT a.*, b.* FROM T1_n130 a JOIN T2_n77 b ON a.key = b.key; -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n130 a JOIN T2_n77 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt8.q b/ql/src/test/queries/clientpositive/skewjoinopt8.q index 6f9807d765..fd05a6b4e3 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt8.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt8.q @@ -1,18 +1,18 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n140(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n140; -CREATE TABLE T2(key STRING, val STRING) +CREATE TABLE T2_n82(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n82; -CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T3_n33(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n33; -- This test is for validating skewed join compile time optimization for more than -- 2 tables. The join key is the same, and so a 3-way join would be performed. @@ -20,7 +20,7 @@ LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; -- adding a order by at the end to make the results deterministic EXPLAIN -SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key; +SELECT a.*, b.*, c.* FROM T1_n140 a JOIN T2_n82 b ON a.key = b.key JOIN T3_n33 c on a.key = c.key; -SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +SELECT a.*, b.*, c.* FROM T1_n140 a JOIN T2_n82 b ON a.key = b.key JOIN T3_n33 c on a.key = c.key ORDER BY a.key, b.key, c.key, a.val, b.val, c.val; diff --git a/ql/src/test/queries/clientpositive/skewjoinopt9.q b/ql/src/test/queries/clientpositive/skewjoinopt9.q index 626b1b2b5e..0409ad65b1 100644 --- a/ql/src/test/queries/clientpositive/skewjoinopt9.q +++ b/ql/src/test/queries/clientpositive/skewjoinopt9.q @@ -1,14 +1,14 @@ set hive.mapred.mode=nonstrict; set hive.optimize.skewjoin.compiletime = true; -CREATE TABLE T1(key STRING, val STRING) +CREATE TABLE T1_n9(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n9; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n5(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n5; -- no skew join compile time optimization would be performed if one of the -- join sources is a sub-query consisting of a union all @@ -16,19 +16,19 @@ LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; EXPLAIN select * from ( -select key, val from T1 +select key, val from T1_n9 union all -select key, val from T1 +select key, val from T1_n9 ) subq1 -join T2 b on subq1.key = b.key; +join T2_n5 b on subq1.key = b.key; select * from ( -select key, val from T1 +select key, val from T1_n9 union all -select key, val from T1 +select key, val from T1_n9 ) subq1 -join T2 b on subq1.key = b.key +join T2_n5 b on subq1.key = b.key ORDER BY subq1.key, b.key, subq1.val, b.val; -- no skew join compile time optimization would be performed if one of the @@ -36,13 +36,13 @@ ORDER BY subq1.key, b.key, subq1.val, b.val; EXPLAIN select * from ( -select key, count(1) as cnt from T1 group by key +select key, count(1) as cnt from T1_n9 group by key ) subq1 -join T2 b on subq1.key = b.key; +join T2_n5 b on subq1.key = b.key; select * from ( -select key, count(1) as cnt from T1 group by key +select key, count(1) as cnt from T1_n9 group by key ) subq1 -join T2 b on subq1.key = b.key +join T2_n5 b on subq1.key = b.key ORDER BY subq1.key, b.key, subq1.cnt, b.val; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_1.q b/ql/src/test/queries/clientpositive/smb_mapjoin_1.q index 7cc96563a7..7a98d2349e 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_1.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_1.q @@ -4,16 +4,16 @@ set hive.strict.checks.bucketing=false; -create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_1_n3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_2_n3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_3_n3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1; -load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2; -load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3; +load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1_n3; +load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2_n3; +load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3_n3; -desc formatted smb_bucket_1; -select count(*) from smb_bucket_1; +desc formatted smb_bucket_1_n3; +select count(*) from smb_bucket_1_n3; set hive.cbo.enable=false; set hive.optimize.bucketmapjoin = true; @@ -23,37 +23,37 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -- SORT_QUERY_RESULTS explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key; -select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a join smb_bucket_2_n3 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a join smb_bucket_2_n3 b on a.key = b.key; explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key; -select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a left outer join smb_bucket_2_n3 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a left outer join smb_bucket_2_n3 b on a.key = b.key; explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key; -select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a right outer join smb_bucket_2_n3 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a right outer join smb_bucket_2_n3 b on a.key = b.key; explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key; -select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a full outer join smb_bucket_2_n3 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a full outer join smb_bucket_2_n3 b on a.key = b.key; explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a join smb_bucket_2_n3 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a join smb_bucket_2_n3 b on a.key = b.key; explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a left outer join smb_bucket_2_n3 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a left outer join smb_bucket_2_n3 b on a.key = b.key; explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a right outer join smb_bucket_2_n3 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a right outer join smb_bucket_2_n3 b on a.key = b.key; explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a full outer join smb_bucket_2_n3 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a full outer join smb_bucket_2_n3 b on a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_11.q b/ql/src/test/queries/clientpositive/smb_mapjoin_11.q index ebab7b9811..6ce49b83c2 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_11.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_11.q @@ -13,32 +13,32 @@ set hive.merge.mapredfiles=false; -- This test verifies that the output of a sort merge join on 2 partitions (one on each side of the join) is bucketed -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS; +CREATE TABLE test_table1_n1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS; +CREATE TABLE test_table2_n1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *; +INSERT OVERWRITE TABLE test_table1_n1 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table2_n1 PARTITION (ds = '1') SELECT *; -- Create a bucketed table -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS; +CREATE TABLE test_table3_n1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS; -- Insert data into the bucketed table by joining the two bucketed and sorted tables, bucketing is not enforced EXPLAIN EXTENDED -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'; +INSERT OVERWRITE TABLE test_table3_n1 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1_n1 a JOIN test_table2_n1 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'; +INSERT OVERWRITE TABLE test_table3_n1 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1_n1 a JOIN test_table2_n1 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'; -SELECT * FROM test_table1 ORDER BY key; -SELECT * FROM test_table3 ORDER BY key; -EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16); -EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16); -SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16); -SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16); +SELECT * FROM test_table1_n1 ORDER BY key; +SELECT * FROM test_table3_n1 ORDER BY key; +EXPLAIN EXTENDED SELECT * FROM test_table1_n1 TABLESAMPLE(BUCKET 2 OUT OF 16); +EXPLAIN EXTENDED SELECT * FROM test_table3_n1 TABLESAMPLE(BUCKET 2 OUT OF 16); +SELECT * FROM test_table1_n1 TABLESAMPLE(BUCKET 2 OUT OF 16); +SELECT * FROM test_table3_n1 TABLESAMPLE(BUCKET 2 OUT OF 16); -- Join data from a sampled bucket to verify the data is bucketed -SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'; +SELECT COUNT(*) FROM test_table3_n1 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1_n1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_12.q b/ql/src/test/queries/clientpositive/smb_mapjoin_12.q index 136b4238ce..753e4d3c9a 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_12.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_12.q @@ -11,29 +11,29 @@ set hive.cbo.enable=false; -- This test verifies that the output of a sort merge join on 1 big partition with multiple small partitions is bucketed and sorted -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS; +CREATE TABLE test_table1_n6 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS; +CREATE TABLE test_table2_n6 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT * -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3') SELECT *; +INSERT OVERWRITE TABLE test_table1_n6 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table2_n6 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table2_n6 PARTITION (ds = '2') SELECT * +INSERT OVERWRITE TABLE test_table2_n6 PARTITION (ds = '3') SELECT *; -- Create a bucketed table -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS; +CREATE TABLE test_table3_n4 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS; -- Insert data into the bucketed table by joining the two bucketed and sorted tables, bucketing is not enforced EXPLAIN EXTENDED -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'; +INSERT OVERWRITE TABLE test_table3_n4 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1_n6 a JOIN test_table2_n6 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'; +INSERT OVERWRITE TABLE test_table3_n4 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1_n6 a JOIN test_table2_n6 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'; -- Join data from a sampled bucket to verify the data is bucketed -SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'; +SELECT COUNT(*) FROM test_table3_n4 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1_n6 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'; set hive.optimize.bucketmapjoin = true; set hive.optimize.bucketmapjoin.sortedmerge = true; @@ -41,10 +41,10 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -- Join data from the sampled buckets of 2 tables to verify the data is bucketed and sorted explain extended -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') -SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1'; +INSERT OVERWRITE TABLE test_table3_n4 PARTITION (ds = '2') +SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3_n4 a JOIN test_table1_n6 b ON a.key = b.key AND a.ds = '1' AND b.ds='1'; -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') -SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1'; +INSERT OVERWRITE TABLE test_table3_n4 PARTITION (ds = '2') +SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3_n4 a JOIN test_table1_n6 b ON a.key = b.key AND a.ds = '1' AND b.ds='1'; -SELECT count(*) from test_table3 tablesample (bucket 2 out of 16) a where ds = '2'; +SELECT count(*) from test_table3_n4 tablesample (bucket 2 out of 16) a where ds = '2'; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_13.q b/ql/src/test/queries/clientpositive/smb_mapjoin_13.q index 728ec157e7..6a1b937b74 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_13.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_13.q @@ -11,27 +11,27 @@ set hive.merge.mapredfiles=false; -- This test verifies that the sort merge join optimizer works when the tables are joined on columns with different names -- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS; -CREATE TABLE test_table2 (value INT, key STRING) CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS; -CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS; -CREATE TABLE test_table4 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (value ASC, key ASC) INTO 16 BUCKETS; +CREATE TABLE test_table1_n12 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS; +CREATE TABLE test_table2_n12 (value INT, key STRING) CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS; +CREATE TABLE test_table3_n6 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS; +CREATE TABLE test_table4_n0 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (value ASC, key ASC) INTO 16 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 SELECT * -INSERT OVERWRITE TABLE test_table2 SELECT * -INSERT OVERWRITE TABLE test_table3 SELECT * -INSERT OVERWRITE TABLE test_table4 SELECT *; +INSERT OVERWRITE TABLE test_table1_n12 SELECT * +INSERT OVERWRITE TABLE test_table2_n12 SELECT * +INSERT OVERWRITE TABLE test_table3_n6 SELECT * +INSERT OVERWRITE TABLE test_table4_n0 SELECT *; -- Join data from 2 tables on their respective sorted columns (one each, with different names) and -- verify sort merge join is used EXPLAIN EXTENDED -SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10; +SELECT /*+ MAPJOIN(b) */ * FROM test_table1_n12 a JOIN test_table2_n12 b ON a.key = b.value ORDER BY a.key LIMIT 10; -SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10; +SELECT /*+ MAPJOIN(b) */ * FROM test_table1_n12 a JOIN test_table2_n12 b ON a.key = b.value ORDER BY a.key LIMIT 10; -- Join data from 2 tables on their respective columns (two each, with the same names but sorted -- with different priorities) and verify sort merge join is not used EXPLAIN EXTENDED -SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10; +SELECT /*+ MAPJOIN(b) */ * FROM test_table3_n6 a JOIN test_table4_n0 b ON a.key = b.value ORDER BY a.key LIMIT 10; -SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10; +SELECT /*+ MAPJOIN(b) */ * FROM test_table3_n6 a JOIN test_table4_n0 b ON a.key = b.value ORDER BY a.key LIMIT 10; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_14.q b/ql/src/test/queries/clientpositive/smb_mapjoin_14.q index 25457d5744..4d7c52f14b 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_14.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_14.q @@ -3,13 +3,13 @@ set hive.mapred.mode=nonstrict; set hive.exec.reducers.max = 1; -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl1_n14(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE tbl2_n13(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -insert overwrite table tbl1 +insert overwrite table tbl1_n14 select * from src where key < 10; -insert overwrite table tbl2 +insert overwrite table tbl2_n13 select * from src where key < 10; set hive.optimize.bucketmapjoin = true; @@ -19,11 +19,11 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -- The mapjoin is being performed as part of sub-query. It should be converted to a sort-merge join explain select count(*) from ( - select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1_n14 a join tbl2_n13 b on a.key = b.key ) subq1; select count(*) from ( - select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1_n14 a join tbl2_n13 b on a.key = b.key ) subq1; -- The mapjoin is being performed as part of sub-query. It should be converted to a sort-merge join @@ -31,14 +31,14 @@ select count(*) from ( explain select key, count(*) from ( - select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1_n14 a join tbl2_n13 b on a.key = b.key ) subq1 group by key order by key; select key, count(*) from ( - select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1_n14 a join tbl2_n13 b on a.key = b.key ) subq1 group by key order by key; @@ -49,7 +49,7 @@ select count(*) from ( select key, count(*) from ( - select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1_n14 a join tbl2_n13 b on a.key = b.key ) subq1 group by key ) subq2; @@ -58,7 +58,7 @@ select count(*) from ( select key, count(*) from ( - select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1_n14 a join tbl2_n13 b on a.key = b.key ) subq1 group by key ) subq2; @@ -67,15 +67,15 @@ select count(*) from -- be converted to a sort-merge join. explain select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n14 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n13 a where key < 6) subq2 on subq1.key = subq2.key; select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n14 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n13 a where key < 6) subq2 on subq1.key = subq2.key; -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should @@ -85,22 +85,22 @@ select /*+mapjoin(subq2)*/ count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n14 a where key < 8 ) subq1 where key < 6 ) subq2 - join tbl2 b + join tbl2_n13 b on subq2.key = b.key; select /*+mapjoin(subq2)*/ count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n14 a where key < 8 ) subq1 where key < 6 ) subq2 - join tbl2 b + join tbl2_n13 b on subq2.key = b.key; -- Both the big table and the small table are nested sub-queries i.e more then 1 level of sub-query. @@ -110,7 +110,7 @@ select /*+mapjoin(subq2)*/ count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n14 a where key < 8 ) subq1 where key < 6 ) subq2 @@ -118,7 +118,7 @@ select /*+mapjoin(subq2)*/ count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n14 a where key < 8 ) subq3 where key < 6 ) subq4 @@ -128,7 +128,7 @@ select /*+mapjoin(subq2)*/ count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n14 a where key < 8 ) subq1 where key < 6 ) subq2 @@ -136,7 +136,7 @@ select /*+mapjoin(subq2)*/ count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n14 a where key < 8 ) subq3 where key < 6 ) subq4 @@ -147,73 +147,73 @@ select /*+mapjoin(subq2)*/ count(*) from -- item, but that is not part of the join key. explain select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + (select a.key as key, concat(a.value, a.value) as value from tbl1_n14 a where key < 8) subq1 join - (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + (select a.key as key, concat(a.value, a.value) as value from tbl2_n13 a where key < 8) subq2 on subq1.key = subq2.key; select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + (select a.key as key, concat(a.value, a.value) as value from tbl1_n14 a where key < 8) subq1 join - (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + (select a.key as key, concat(a.value, a.value) as value from tbl2_n13 a where key < 8) subq2 on subq1.key = subq2.key; -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side -- join should be performed explain select /*+mapjoin(subq1)*/ count(*) from - (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n14 a) subq1 join - (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n13 a) subq2 on subq1.key = subq2.key; select /*+mapjoin(subq1)*/ count(*) from - (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n14 a) subq1 join - (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n13 a) subq2 on subq1.key = subq2.key; -- The small table is a sub-query and the big table is not. -- It should be converted to a sort-merge join. explain select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key; + (select a.key as key, a.value as value from tbl1_n14 a where key < 6) subq1 + join tbl2_n13 a on subq1.key = a.key; select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key; + (select a.key as key, a.value as value from tbl1_n14 a where key < 6) subq1 + join tbl2_n13 a on subq1.key = a.key; -- The big table is a sub-query and the small table is not. -- It should be converted to a sort-merge join. explain select /*+mapjoin(a)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key; + (select a.key as key, a.value as value from tbl1_n14 a where key < 6) subq1 + join tbl2_n13 a on subq1.key = a.key; select /*+mapjoin(a)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key; + (select a.key as key, a.value as value from tbl1_n14 a where key < 6) subq1 + join tbl2_n13 a on subq1.key = a.key; -- There are more than 2 inputs to the join, all of them being sub-queries. -- It should be converted to to a sort-merge join explain select /*+mapjoin(subq1, subq2)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n14 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n13 a where key < 6) subq2 on (subq1.key = subq2.key) join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + (select a.key as key, a.value as value from tbl2_n13 a where key < 6) subq3 on (subq1.key = subq3.key); select /*+mapjoin(subq1, subq2)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n14 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n13 a where key < 6) subq2 on subq1.key = subq2.key join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + (select a.key as key, a.value as value from tbl2_n13 a where key < 6) subq3 on (subq1.key = subq3.key); -- The mapjoin is being performed on a nested sub-query, and an aggregation is performed after that. @@ -224,11 +224,11 @@ select count(*) from ( ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n14 a where key < 8 ) subq1 where key < 6 ) subq2 -join tbl2 b +join tbl2_n13 b on subq2.key = b.key) a; select count(*) from ( @@ -236,9 +236,9 @@ select count(*) from ( ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n14 a where key < 8 ) subq1 where key < 6 ) subq2 -join tbl2 b +join tbl2_n13 b on subq2.key = b.key) a; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_15.q b/ql/src/test/queries/clientpositive/smb_mapjoin_15.q index b4539c5632..04e4e0dffa 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_15.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_15.q @@ -12,45 +12,45 @@ set hive.merge.mapredfiles=false; -- of join columns -- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS; +CREATE TABLE test_table1_n4 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS; +CREATE TABLE test_table2_n4 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 SELECT * -INSERT OVERWRITE TABLE test_table2 SELECT *; +INSERT OVERWRITE TABLE test_table1_n4 SELECT * +INSERT OVERWRITE TABLE test_table2_n4 SELECT *; -- it should be converted to a sort-merge join, since the first sort column (#join columns = 1) contains the join columns EXPLAIN EXTENDED -SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key ORDER BY a.key LIMIT 10; -SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key ORDER BY a.key LIMIT 10; +SELECT /*+mapjoin(b)*/ * FROM test_table1_n4 a JOIN test_table2_n4 b ON a.key = b.key ORDER BY a.key LIMIT 10; +SELECT /*+mapjoin(b)*/ * FROM test_table1_n4 a JOIN test_table2_n4 b ON a.key = b.key ORDER BY a.key LIMIT 10; -DROP TABLE test_table1; -DROP TABLE test_table2; +DROP TABLE test_table1_n4; +DROP TABLE test_table2_n4; -- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, key2 INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, key2 ASC, value ASC) INTO 16 BUCKETS; -CREATE TABLE test_table2 (key INT, key2 INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, key2 ASC, value ASC) INTO 16 BUCKETS; +CREATE TABLE test_table1_n4 (key INT, key2 INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, key2 ASC, value ASC) INTO 16 BUCKETS; +CREATE TABLE test_table2_n4 (key INT, key2 INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, key2 ASC, value ASC) INTO 16 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 SELECT key, key, value -INSERT OVERWRITE TABLE test_table2 SELECT key, key, value; +INSERT OVERWRITE TABLE test_table1_n4 SELECT key, key, value +INSERT OVERWRITE TABLE test_table2_n4 SELECT key, key, value; -- it should be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) contain the join columns EXPLAIN EXTENDED -SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 ORDER BY a.key LIMIT 10; -SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 ORDER BY a.key LIMIT 10; +SELECT /*+mapjoin(b)*/ * FROM test_table1_n4 a JOIN test_table2_n4 b ON a.key = b.key and a.key2 = b.key2 ORDER BY a.key LIMIT 10; +SELECT /*+mapjoin(b)*/ * FROM test_table1_n4 a JOIN test_table2_n4 b ON a.key = b.key and a.key2 = b.key2 ORDER BY a.key LIMIT 10; -- it should be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) contain the join columns -- even if the order is not the same EXPLAIN EXTENDED -SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key2 = b.key2 and a.key = b.key ORDER BY a.key LIMIT 10; -SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key2 = b.key2 and a.key = b.key ORDER BY a.key LIMIT 10; +SELECT /*+mapjoin(b)*/ * FROM test_table1_n4 a JOIN test_table2_n4 b ON a.key2 = b.key2 and a.key = b.key ORDER BY a.key LIMIT 10; +SELECT /*+mapjoin(b)*/ * FROM test_table1_n4 a JOIN test_table2_n4 b ON a.key2 = b.key2 and a.key = b.key ORDER BY a.key LIMIT 10; -- it should not be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) do not contain all -- the join columns EXPLAIN EXTENDED -SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.value = b.value ORDER BY a.key LIMIT 10; -SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.value = b.value ORDER BY a.key LIMIT 10; +SELECT /*+mapjoin(b)*/ * FROM test_table1_n4 a JOIN test_table2_n4 b ON a.key = b.key and a.value = b.value ORDER BY a.key LIMIT 10; +SELECT /*+mapjoin(b)*/ * FROM test_table1_n4 a JOIN test_table2_n4 b ON a.key = b.key and a.value = b.value ORDER BY a.key LIMIT 10; -DROP TABLE test_table1; -DROP TABLE test_table2; +DROP TABLE test_table1_n4; +DROP TABLE test_table2_n4; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_16.q b/ql/src/test/queries/clientpositive/smb_mapjoin_16.q index b4fe0749cb..a15c3e5e32 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_16.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_16.q @@ -9,14 +9,14 @@ set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; -- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE test_table1_n9 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE test_table2_n9 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 SELECT * -INSERT OVERWRITE TABLE test_table2 SELECT *; +INSERT OVERWRITE TABLE test_table1_n9 SELECT * +INSERT OVERWRITE TABLE test_table2_n9 SELECT *; -- Mapjoin followed by a aggregation should be performed in a single MR job EXPLAIN -SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key; -SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key; +SELECT /*+mapjoin(b)*/ count(*) FROM test_table1_n9 a JOIN test_table2_n9 b ON a.key = b.key; +SELECT /*+mapjoin(b)*/ count(*) FROM test_table1_n9 a JOIN test_table2_n9 b ON a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_17.q b/ql/src/test/queries/clientpositive/smb_mapjoin_17.q index 2b7d7d1704..d68f5f3139 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_17.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_17.q @@ -10,25 +10,25 @@ set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; -- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table4 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE test_table1_n15 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE test_table2_n14 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE test_table3_n7 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE test_table4_n1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; CREATE TABLE test_table5 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; CREATE TABLE test_table6 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; CREATE TABLE test_table7 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; CREATE TABLE test_table8 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -INSERT OVERWRITE TABLE test_table1 +INSERT OVERWRITE TABLE test_table1_n15 SELECT * FROM src WHERE key < 10; -INSERT OVERWRITE TABLE test_table2 +INSERT OVERWRITE TABLE test_table2_n14 SELECT * FROM src WHERE key < 10; -INSERT OVERWRITE TABLE test_table3 +INSERT OVERWRITE TABLE test_table3_n7 SELECT * FROM src WHERE key < 10; -INSERT OVERWRITE TABLE test_table4 +INSERT OVERWRITE TABLE test_table4_n1 SELECT * FROM src WHERE key < 10; INSERT OVERWRITE TABLE test_table5 @@ -46,17 +46,17 @@ SELECT * FROM src WHERE key < 10; -- Mapjoin followed by a aggregation should be performed in a single MR job upto 7 tables EXPLAIN SELECT /*+ mapjoin(b, c, d, e, f, g) */ count(*) -FROM test_table1 a JOIN test_table2 b ON a.key = b.key -JOIN test_table3 c ON a.key = c.key -JOIN test_table4 d ON a.key = d.key +FROM test_table1_n15 a JOIN test_table2_n14 b ON a.key = b.key +JOIN test_table3_n7 c ON a.key = c.key +JOIN test_table4_n1 d ON a.key = d.key JOIN test_table5 e ON a.key = e.key JOIN test_table6 f ON a.key = f.key JOIN test_table7 g ON a.key = g.key; SELECT /*+ mapjoin(b, c, d, e, f, g) */ count(*) -FROM test_table1 a JOIN test_table2 b ON a.key = b.key -JOIN test_table3 c ON a.key = c.key -JOIN test_table4 d ON a.key = d.key +FROM test_table1_n15 a JOIN test_table2_n14 b ON a.key = b.key +JOIN test_table3_n7 c ON a.key = c.key +JOIN test_table4_n1 d ON a.key = d.key JOIN test_table5 e ON a.key = e.key JOIN test_table6 f ON a.key = f.key JOIN test_table7 g ON a.key = g.key; @@ -68,35 +68,35 @@ set hive.auto.convert.sortmerge.join=true; -- a single MR job EXPLAIN SELECT count(*) -FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key -LEFT OUTER JOIN test_table3 c ON a.key = c.key -LEFT OUTER JOIN test_table4 d ON a.key = d.key +FROM test_table1_n15 a LEFT OUTER JOIN test_table2_n14 b ON a.key = b.key +LEFT OUTER JOIN test_table3_n7 c ON a.key = c.key +LEFT OUTER JOIN test_table4_n1 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key; SELECT count(*) -FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key -LEFT OUTER JOIN test_table3 c ON a.key = c.key -LEFT OUTER JOIN test_table4 d ON a.key = d.key +FROM test_table1_n15 a LEFT OUTER JOIN test_table2_n14 b ON a.key = b.key +LEFT OUTER JOIN test_table3_n7 c ON a.key = c.key +LEFT OUTER JOIN test_table4_n1 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key; EXPLAIN SELECT count(*) -FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key -LEFT OUTER JOIN test_table3 c ON a.key = c.key -LEFT OUTER JOIN test_table4 d ON a.key = d.key +FROM test_table1_n15 a LEFT OUTER JOIN test_table2_n14 b ON a.key = b.key +LEFT OUTER JOIN test_table3_n7 c ON a.key = c.key +LEFT OUTER JOIN test_table4_n1 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key LEFT OUTER JOIN test_table8 h ON a.key = h.key; SELECT count(*) -FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key -LEFT OUTER JOIN test_table3 c ON a.key = c.key -LEFT OUTER JOIN test_table4 d ON a.key = d.key +FROM test_table1_n15 a LEFT OUTER JOIN test_table2_n14 b ON a.key = b.key +LEFT OUTER JOIN test_table3_n7 c ON a.key = c.key +LEFT OUTER JOIN test_table4_n1 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key @@ -105,22 +105,22 @@ LEFT OUTER JOIN test_table8 h ON a.key = h.key; -- outer join with max 16 aliases EXPLAIN SELECT a.* -FROM test_table1 a -LEFT OUTER JOIN test_table2 b ON a.key = b.key -LEFT OUTER JOIN test_table3 c ON a.key = c.key -LEFT OUTER JOIN test_table4 d ON a.key = d.key +FROM test_table1_n15 a +LEFT OUTER JOIN test_table2_n14 b ON a.key = b.key +LEFT OUTER JOIN test_table3_n7 c ON a.key = c.key +LEFT OUTER JOIN test_table4_n1 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key LEFT OUTER JOIN test_table8 h ON a.key = h.key -LEFT OUTER JOIN test_table4 i ON a.key = i.key +LEFT OUTER JOIN test_table4_n1 i ON a.key = i.key LEFT OUTER JOIN test_table5 j ON a.key = j.key LEFT OUTER JOIN test_table6 k ON a.key = k.key LEFT OUTER JOIN test_table7 l ON a.key = l.key LEFT OUTER JOIN test_table8 m ON a.key = m.key LEFT OUTER JOIN test_table7 n ON a.key = n.key LEFT OUTER JOIN test_table8 o ON a.key = o.key -LEFT OUTER JOIN test_table4 p ON a.key = p.key +LEFT OUTER JOIN test_table4_n1 p ON a.key = p.key LEFT OUTER JOIN test_table5 q ON a.key = q.key LEFT OUTER JOIN test_table6 r ON a.key = r.key LEFT OUTER JOIN test_table7 s ON a.key = s.key diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_18.q b/ql/src/test/queries/clientpositive/smb_mapjoin_18.q index 71d99737c1..64e4b29755 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_18.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_18.q @@ -8,62 +8,62 @@ set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table1_n7 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n7 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *; +INSERT OVERWRITE TABLE test_table1_n7 PARTITION (ds = '1') SELECT *; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'; +INSERT OVERWRITE TABLE test_table2_n7 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n7 a WHERE a.ds = '1'; -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'; +INSERT OVERWRITE TABLE test_table2_n7 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n7 a WHERE a.ds = '1'; -select count(*) from test_table1 where ds = '1'; -select count(*) from test_table1 where ds = '1' and hash(key) % 2 = 0; -select count(*) from test_table1 where ds = '1' and hash(key) % 2 = 1; -select count(*) from test_table1 tablesample (bucket 1 out of 2) s where ds = '1'; -select count(*) from test_table1 tablesample (bucket 2 out of 2) s where ds = '1'; +select count(*) from test_table1_n7 where ds = '1'; +select count(*) from test_table1_n7 where ds = '1' and hash(key) % 2 = 0; +select count(*) from test_table1_n7 where ds = '1' and hash(key) % 2 = 1; +select count(*) from test_table1_n7 tablesample (bucket 1 out of 2) s where ds = '1'; +select count(*) from test_table1_n7 tablesample (bucket 2 out of 2) s where ds = '1'; -select count(*) from test_table2 where ds = '1'; -select count(*) from test_table2 where ds = '1' and hash(key) % 2 = 0; -select count(*) from test_table2 where ds = '1' and hash(key) % 2 = 1; -select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1'; -select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1'; +select count(*) from test_table2_n7 where ds = '1'; +select count(*) from test_table2_n7 where ds = '1' and hash(key) % 2 = 0; +select count(*) from test_table2_n7 where ds = '1' and hash(key) % 2 = 1; +select count(*) from test_table2_n7 tablesample (bucket 1 out of 2) s where ds = '1'; +select count(*) from test_table2_n7 tablesample (bucket 2 out of 2) s where ds = '1'; set hive.optimize.constant.propagation=false; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation, one of the buckets should be empty EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' and a.key = 238; +INSERT OVERWRITE TABLE test_table2_n7 PARTITION (ds = '2') +SELECT a.key, a.value FROM test_table1_n7 a WHERE a.ds = '1' and a.key = 238; -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' and a.key = 238; +INSERT OVERWRITE TABLE test_table2_n7 PARTITION (ds = '2') +SELECT a.key, a.value FROM test_table1_n7 a WHERE a.ds = '1' and a.key = 238; set hive.optimize.constant.propagation=true; -select count(*) from test_table2 where ds = '2'; -select count(*) from test_table2 where ds = '2' and hash(key) % 2 = 0; -select count(*) from test_table2 where ds = '2' and hash(key) % 2 = 1; -select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '2'; -select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '2'; +select count(*) from test_table2_n7 where ds = '2'; +select count(*) from test_table2_n7 where ds = '2' and hash(key) % 2 = 0; +select count(*) from test_table2_n7 where ds = '2' and hash(key) % 2 = 1; +select count(*) from test_table2_n7 tablesample (bucket 1 out of 2) s where ds = '2'; +select count(*) from test_table2_n7 tablesample (bucket 2 out of 2) s where ds = '2'; EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3') -SELECT a.key, a.value FROM test_table2 a WHERE a.ds = '2'; +INSERT OVERWRITE TABLE test_table2_n7 PARTITION (ds = '3') +SELECT a.key, a.value FROM test_table2_n7 a WHERE a.ds = '2'; -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') -SELECT a.key, a.value FROM test_table2 a WHERE a.ds = '2'; +INSERT OVERWRITE TABLE test_table2_n7 PARTITION (ds = '2') +SELECT a.key, a.value FROM test_table2_n7 a WHERE a.ds = '2'; -select count(*) from test_table2 where ds = '3'; -select count(*) from test_table2 where ds = '3' and hash(key) % 2 = 0; -select count(*) from test_table2 where ds = '3' and hash(key) % 2 = 1; -select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '3'; -select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '3'; +select count(*) from test_table2_n7 where ds = '3'; +select count(*) from test_table2_n7 where ds = '3' and hash(key) % 2 = 0; +select count(*) from test_table2_n7 where ds = '3' and hash(key) % 2 = 1; +select count(*) from test_table2_n7 tablesample (bucket 1 out of 2) s where ds = '3'; +select count(*) from test_table2_n7 tablesample (bucket 2 out of 2) s where ds = '3'; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_19.q b/ql/src/test/queries/clientpositive/smb_mapjoin_19.q index 1026cf137a..8b9c3982e2 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_19.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_19.q @@ -10,35 +10,35 @@ set hive.merge.mapredfiles=false; set hive.metastore.aggregate.stats.cache.enabled=false; -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table1_n11 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n11 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *; +INSERT OVERWRITE TABLE test_table1_n11 PARTITION (ds = '1') SELECT *; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'; - -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'; - -select count(*) from test_table1 where ds = '1'; -select count(*) from test_table1 where ds = '1' and hash(key) % 16 = 0; -select count(*) from test_table1 where ds = '1' and hash(key) % 16 = 5; -select count(*) from test_table1 where ds = '1' and hash(key) % 16 = 12; -select count(*) from test_table1 tablesample (bucket 1 out of 16) s where ds = '1'; -select count(*) from test_table1 tablesample (bucket 6 out of 16) s where ds = '1'; -select count(*) from test_table1 tablesample (bucket 13 out of 16) s where ds = '1'; - -select count(*) from test_table2 where ds = '1'; -select count(*) from test_table2 where ds = '1' and hash(key) % 16 = 0; -select count(*) from test_table2 where ds = '1' and hash(key) % 16 = 5; -select count(*) from test_table2 where ds = '1' and hash(key) % 16 = 12; -select count(*) from test_table2 tablesample (bucket 1 out of 16) s where ds = '1'; -select count(*) from test_table2 tablesample (bucket 6 out of 16) s where ds = '1'; -select count(*) from test_table2 tablesample (bucket 13 out of 16) s where ds = '1'; +INSERT OVERWRITE TABLE test_table2_n11 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n11 a WHERE a.ds = '1'; + +INSERT OVERWRITE TABLE test_table2_n11 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n11 a WHERE a.ds = '1'; + +select count(*) from test_table1_n11 where ds = '1'; +select count(*) from test_table1_n11 where ds = '1' and hash(key) % 16 = 0; +select count(*) from test_table1_n11 where ds = '1' and hash(key) % 16 = 5; +select count(*) from test_table1_n11 where ds = '1' and hash(key) % 16 = 12; +select count(*) from test_table1_n11 tablesample (bucket 1 out of 16) s where ds = '1'; +select count(*) from test_table1_n11 tablesample (bucket 6 out of 16) s where ds = '1'; +select count(*) from test_table1_n11 tablesample (bucket 13 out of 16) s where ds = '1'; + +select count(*) from test_table2_n11 where ds = '1'; +select count(*) from test_table2_n11 where ds = '1' and hash(key) % 16 = 0; +select count(*) from test_table2_n11 where ds = '1' and hash(key) % 16 = 5; +select count(*) from test_table2_n11 where ds = '1' and hash(key) % 16 = 12; +select count(*) from test_table2_n11 tablesample (bucket 1 out of 16) s where ds = '1'; +select count(*) from test_table2_n11 tablesample (bucket 6 out of 16) s where ds = '1'; +select count(*) from test_table2_n11 tablesample (bucket 13 out of 16) s where ds = '1'; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_2.q b/ql/src/test/queries/clientpositive/smb_mapjoin_2.q index 43e51ed70c..9fe1fc332c 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_2.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_2.q @@ -4,13 +4,13 @@ set hive.strict.checks.bucketing=false; -create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_1_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_2_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_3_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1; -load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2; -load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3; +load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1_n1; +load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2_n1; +load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3_n1; set hive.optimize.bucketmapjoin = true; set hive.optimize.bucketmapjoin.sortedmerge = true; @@ -19,37 +19,37 @@ set hive.cbo.enable=false; -- SORT_QUERY_RESULTS explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a join smb_bucket_3_n1 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a join smb_bucket_3_n1 b on a.key = b.key; explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a left outer join smb_bucket_3_n1 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a left outer join smb_bucket_3_n1 b on a.key = b.key; explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a right outer join smb_bucket_3_n1 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a right outer join smb_bucket_3_n1 b on a.key = b.key; explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a full outer join smb_bucket_3_n1 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a full outer join smb_bucket_3_n1 b on a.key = b.key; explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a join smb_bucket_3_n1 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a join smb_bucket_3_n1 b on a.key = b.key; explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a left outer join smb_bucket_3_n1 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a left outer join smb_bucket_3_n1 b on a.key = b.key; explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a right outer join smb_bucket_3_n1 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a right outer join smb_bucket_3_n1 b on a.key = b.key; explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a full outer join smb_bucket_3_n1 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a full outer join smb_bucket_3_n1 b on a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_21.q b/ql/src/test/queries/clientpositive/smb_mapjoin_21.q index 7d7dedea50..b1a5a01f72 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_21.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_21.q @@ -8,71 +8,71 @@ set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table1_n17 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *; +INSERT OVERWRITE TABLE test_table1_n17 PARTITION (ds = '1') SELECT *; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'; +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1'; -drop table test_table2; +drop table test_table2_n16; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key desc) INTO 2 BUCKETS; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-reduce operation since the sort orders does not match EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'; +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1'; -drop table test_table2; +drop table test_table2_n16; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key, value) INTO 2 BUCKETS; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-reduce operation since the sort columns do not match EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'; +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1'; -drop table test_table2; +drop table test_table2_n16; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (value) INTO 2 BUCKETS; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-reduce operation since the sort columns do not match EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'; +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1'; -drop table test_table2; +drop table test_table2_n16; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-reduce operation since the number of buckets do not match EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'; +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1'; -drop table test_table2; +drop table test_table2_n16; -CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-reduce operation since sort columns do not match EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'; +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1'; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_22.q b/ql/src/test/queries/clientpositive/smb_mapjoin_22.q index 4c5fcf738c..c7fc2d3743 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_22.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_22.q @@ -8,49 +8,49 @@ set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) +CREATE TABLE test_table1_n10 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) +CREATE TABLE test_table2_n10 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 SELECT *; +INSERT OVERWRITE TABLE test_table1_n10 SELECT *; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation -EXPLAIN INSERT OVERWRITE TABLE test_table2 -SELECT * FROM test_table1; +EXPLAIN INSERT OVERWRITE TABLE test_table2_n10 +SELECT * FROM test_table1_n10; -INSERT OVERWRITE TABLE test_table2 -SELECT * FROM test_table1; +INSERT OVERWRITE TABLE test_table2_n10 +SELECT * FROM test_table1_n10; -select count(*) from test_table1; -select count(*) from test_table1 tablesample (bucket 2 out of 2) s; +select count(*) from test_table1_n10; +select count(*) from test_table1_n10 tablesample (bucket 2 out of 2) s; -select count(*) from test_table2; -select count(*) from test_table2 tablesample (bucket 2 out of 2) s; +select count(*) from test_table2_n10; +select count(*) from test_table2_n10 tablesample (bucket 2 out of 2) s; -drop table test_table1; -drop table test_table2; +drop table test_table1_n10; +drop table test_table2_n10; -CREATE TABLE test_table1 (key INT, value STRING) +CREATE TABLE test_table1_n10 (key INT, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS; -CREATE TABLE test_table2 (key INT, value STRING) +CREATE TABLE test_table2_n10 (key INT, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS; FROM src -INSERT OVERWRITE TABLE test_table1 SELECT *; +INSERT OVERWRITE TABLE test_table1_n10 SELECT *; -- Insert data into the bucketed table by selecting from another bucketed table -- This should be a map-only operation -EXPLAIN INSERT OVERWRITE TABLE test_table2 -SELECT * FROM test_table1; +EXPLAIN INSERT OVERWRITE TABLE test_table2_n10 +SELECT * FROM test_table1_n10; -INSERT OVERWRITE TABLE test_table2 -SELECT * FROM test_table1; +INSERT OVERWRITE TABLE test_table2_n10 +SELECT * FROM test_table1_n10; -select count(*) from test_table1; -select count(*) from test_table1 tablesample (bucket 2 out of 2) s; +select count(*) from test_table1_n10; +select count(*) from test_table1_n10 tablesample (bucket 2 out of 2) s; -select count(*) from test_table2; -select count(*) from test_table2 tablesample (bucket 2 out of 2) s; +select count(*) from test_table2_n10; +select count(*) from test_table2_n10 tablesample (bucket 2 out of 2) s; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_25.q b/ql/src/test/queries/clientpositive/smb_mapjoin_25.q index a6214c2ca3..56aa331ead 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_25.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_25.q @@ -10,16 +10,16 @@ set hive.exec.max.dynamic.partitions=1000000; set hive.exec.max.created.files=1000000; set hive.map.aggr=true; -create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_1_n4(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_2_n4(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_3_n4(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1; -load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2; -load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3; +load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1_n4; +load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2_n4; +load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3_n4; explain -select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5; +select * from (select a.key from smb_bucket_1_n4 a join smb_bucket_2_n4 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2_n4 c join smb_bucket_3_n4 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5; set hive.optimize.bucketmapjoin=true; set hive.optimize.bucketmapjoin.sortedmerge=true; @@ -32,12 +32,12 @@ set hive.optimize.reducededuplication.min.reducer=1; set hive.auto.convert.sortmerge.join.bigtable.selection.policy=org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSelectorForAutoSMJ; -- explain --- select * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join src c on a.key=c.value +-- select * from smb_bucket_1_n4 a left outer join smb_bucket_2_n4 b on a.key = b.key left outer join src c on a.key=c.value --- select a.key from smb_bucket_1 a +-- select a.key from smb_bucket_1_n4 a explain -select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5; +select * from (select a.key from smb_bucket_1_n4 a join smb_bucket_2_n4 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2_n4 c join smb_bucket_3_n4 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5; -select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5; +select * from (select a.key from smb_bucket_1_n4 a join smb_bucket_2_n4 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2_n4 c join smb_bucket_3_n4 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_3.q b/ql/src/test/queries/clientpositive/smb_mapjoin_3.q index 4c3bcc94d4..255b8840ef 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_3.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_3.q @@ -5,50 +5,50 @@ set hive.strict.checks.bucketing=false; -create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_1_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_2_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_3_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1; -load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2; -load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3; +load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1_n0; +load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2_n0; +load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3_n0; set hive.optimize.bucketmapjoin = true; set hive.optimize.bucketmapjoin.sortedmerge = true; set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; explain -select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a join smb_bucket_3_n0 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a join smb_bucket_3_n0 b on a.key = b.key; explain -select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a left outer join smb_bucket_3_n0 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a left outer join smb_bucket_3_n0 b on a.key = b.key; explain -select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a right outer join smb_bucket_3_n0 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a right outer join smb_bucket_3_n0 b on a.key = b.key; explain -select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a full outer join smb_bucket_3_n0 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a full outer join smb_bucket_3_n0 b on a.key = b.key; explain -select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a join smb_bucket_3_n0 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a join smb_bucket_3_n0 b on a.key = b.key; explain -select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a left outer join smb_bucket_3_n0 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a left outer join smb_bucket_3_n0 b on a.key = b.key; explain -select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a right outer join smb_bucket_3_n0 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a right outer join smb_bucket_3_n0 b on a.key = b.key; explain -select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a full outer join smb_bucket_3_n0 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a full outer join smb_bucket_3_n0 b on a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_46.q b/ql/src/test/queries/clientpositive/smb_mapjoin_46.q index ad2941443f..df52226d7e 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_46.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_46.q @@ -13,228 +13,228 @@ CREATE TABLE aux1 (key INT, value INT, col_1 STRING); INSERT INTO aux1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car'); -CREATE TABLE test1 (key INT, value INT, col_1 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; -INSERT OVERWRITE TABLE test1 +CREATE TABLE test1_n5 (key INT, value INT, col_1 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; +INSERT OVERWRITE TABLE test1_n5 SELECT * FROM aux1; CREATE TABLE aux2 (key INT, value INT, col_2 STRING); INSERT INTO aux2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), (104, 3, 'Fli'), (105, NULL, 'None'); -CREATE TABLE test2 (key INT, value INT, col_2 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; -INSERT OVERWRITE TABLE test2 +CREATE TABLE test2_n3 (key INT, value INT, col_2 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; +INSERT OVERWRITE TABLE test2_n3 SELECT * FROM aux2; -- Basic outer join EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value); -- Conjunction with pred on multiple inputs and single inputs (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND test1_n5.key between 100 and 102 + AND test2_n3.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND test1_n5.key between 100 and 102 + AND test2_n3.key between 100 and 102); -- Conjunction with pred on single inputs (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.key between 100 and 102 + AND test2_n3.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.key between 100 and 102 + AND test2_n3.key between 100 and 102); -- Conjunction with pred on multiple inputs and none (left outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true); +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value AND true); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true); +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value AND true); -- Condition on one input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.key between 100 and 102); -- Disjunction with pred on multiple inputs and single inputs (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102); -- Disjunction with pred on multiple inputs and left input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102); -- Disjunction with pred on multiple inputs and right input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102); -- Keys plus residual (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)); -- Disjunction with pred on multiple inputs and single inputs (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102); -- Disjunction with pred on multiple inputs and left input (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102); -- Disjunction with pred on multiple inputs and right input (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102); -- Keys plus residual (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)); -- Disjunction with pred on multiple inputs and single inputs (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102); -- Disjunction with pred on multiple inputs and left input (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102); -- Disjunction with pred on multiple inputs and right input (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102); -- Keys plus residual (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)); diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_47.q b/ql/src/test/queries/clientpositive/smb_mapjoin_47.q index f7d291efdb..a8c02f3b3c 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_47.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_47.q @@ -10,202 +10,202 @@ set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; set hive.join.inner.residual=true; -CREATE TABLE aux1 (key INT, value INT, col_1 STRING); -INSERT INTO aux1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), +CREATE TABLE aux1_n0 (key INT, value INT, col_1 STRING); +INSERT INTO aux1_n0 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car'); -CREATE TABLE test1 (key INT, value INT, col_1 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; -INSERT OVERWRITE TABLE test1 -SELECT * FROM aux1; +CREATE TABLE test1_n8 (key INT, value INT, col_1 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; +INSERT OVERWRITE TABLE test1_n8 +SELECT * FROM aux1_n0; -CREATE TABLE aux2 (key INT, value INT, col_2 STRING); -INSERT INTO aux2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), +CREATE TABLE aux2_n0 (key INT, value INT, col_2 STRING); +INSERT INTO aux2_n0 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), (104, 3, 'Fli'), (105, NULL, 'None'); -CREATE TABLE test2 (key INT, value INT, col_2 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; -INSERT OVERWRITE TABLE test2 -SELECT * FROM aux2; +CREATE TABLE test2_n5 (key INT, value INT, col_2 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; +INSERT OVERWRITE TABLE test2_n5 +SELECT * FROM aux2_n0; -- Conjunction with pred on multiple inputs and single inputs EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n8 JOIN test2_n5 +ON (test1_n8.value=test2_n5.value + AND test1_n8.key between 100 and 102 + AND test2_n5.key between 100 and 102) LIMIT 10; SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n8 JOIN test2_n5 +ON (test1_n8.value=test2_n5.value + AND test1_n8.key between 100 and 102 + AND test2_n5.key between 100 and 102) LIMIT 10; -- Conjunction with pred on multiple inputs and none EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n8 JOIN test2_n5 +ON (test1_n8.value=test2_n5.value AND true) LIMIT 10; SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n8 JOIN test2_n5 +ON (test1_n8.value=test2_n5.value AND true) LIMIT 10; -- Conjunction with pred on single inputs and none EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102 +FROM test1_n8 JOIN test2_n5 +ON (test1_n8.key between 100 and 102 + AND test2_n5.key between 100 and 102 AND true) LIMIT 10; SELECT * -FROM test1 JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102 +FROM test1_n8 JOIN test2_n5 +ON (test1_n8.key between 100 and 102 + AND test2_n5.key between 100 and 102 AND true) LIMIT 10; -- Disjunction with pred on multiple inputs and single inputs EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n8 JOIN test2_n5 +ON (test1_n8.value=test2_n5.value + OR test1_n8.key between 100 and 102 + OR test2_n5.key between 100 and 102) LIMIT 10; SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n8 JOIN test2_n5 +ON (test1_n8.value=test2_n5.value + OR test1_n8.key between 100 and 102 + OR test2_n5.key between 100 and 102) LIMIT 10; -- Conjunction with multiple inputs on one side EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.key+test2.key >= 100 - AND test1.key+test2.key <= 102) +FROM test1_n8 JOIN test2_n5 +ON (test1_n8.key+test2_n5.key >= 100 + AND test1_n8.key+test2_n5.key <= 102) LIMIT 10; SELECT * -FROM test1 JOIN test2 -ON (test1.key+test2.key >= 100 - AND test1.key+test2.key <= 102) +FROM test1_n8 JOIN test2_n5 +ON (test1_n8.key+test2_n5.key >= 100 + AND test1_n8.key+test2_n5.key <= 102) LIMIT 10; -- Disjunction with multiple inputs on one side EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.key+test2.key >= 100 - OR test1.key+test2.key <= 102) +FROM test1_n8 JOIN test2_n5 +ON (test1_n8.key+test2_n5.key >= 100 + OR test1_n8.key+test2_n5.key <= 102) LIMIT 10; SELECT * -FROM test1 JOIN test2 -ON (test1.key+test2.key >= 100 - OR test1.key+test2.key <= 102) +FROM test1_n8 JOIN test2_n5 +ON (test1_n8.key+test2_n5.key >= 100 + OR test1_n8.key+test2_n5.key <= 102) LIMIT 10; -- Function with multiple inputs on one side EXPLAIN SELECT * -FROM test1 JOIN test2 -ON ((test1.key,test2.key) IN ((100,100),(101,101),(102,102))) +FROM test1_n8 JOIN test2_n5 +ON ((test1_n8.key,test2_n5.key) IN ((100,100),(101,101),(102,102))) LIMIT 10; SELECT * -FROM test1 JOIN test2 -ON ((test1.key,test2.key) IN ((100,100),(101,101),(102,102))) +FROM test1_n8 JOIN test2_n5 +ON ((test1_n8.key,test2_n5.key) IN ((100,100),(101,101),(102,102))) LIMIT 10; -- Chained 1 EXPLAIN SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -LEFT OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n8 a ON (a.key+test2_n5.key >= 100) +LEFT OUTER JOIN test1_n8 b ON (b.value = test2_n5.value) LIMIT 10; SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -LEFT OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n8 a ON (a.key+test2_n5.key >= 100) +LEFT OUTER JOIN test1_n8 b ON (b.value = test2_n5.value) LIMIT 10; -- Chained 2 EXPLAIN SELECT * -FROM test2 -LEFT OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +LEFT OUTER JOIN test1_n8 a ON (a.value = test2_n5.value) +JOIN test1_n8 b ON (b.key+test2_n5.key<= 102) LIMIT 10; SELECT * -FROM test2 -LEFT OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +LEFT OUTER JOIN test1_n8 a ON (a.value = test2_n5.value) +JOIN test1_n8 b ON (b.key+test2_n5.key<= 102) LIMIT 10; -- Chained 3 EXPLAIN SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -RIGHT OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n8 a ON (a.key+test2_n5.key >= 100) +RIGHT OUTER JOIN test1_n8 b ON (b.value = test2_n5.value) LIMIT 10; SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -RIGHT OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n8 a ON (a.key+test2_n5.key >= 100) +RIGHT OUTER JOIN test1_n8 b ON (b.value = test2_n5.value) LIMIT 10; -- Chained 4 EXPLAIN SELECT * -FROM test2 -RIGHT OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +RIGHT OUTER JOIN test1_n8 a ON (a.value = test2_n5.value) +JOIN test1_n8 b ON (b.key+test2_n5.key<= 102) LIMIT 10; SELECT * -FROM test2 -RIGHT OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +RIGHT OUTER JOIN test1_n8 a ON (a.value = test2_n5.value) +JOIN test1_n8 b ON (b.key+test2_n5.key<= 102) LIMIT 10; -- Chained 5 EXPLAIN SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -FULL OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n8 a ON (a.key+test2_n5.key >= 100) +FULL OUTER JOIN test1_n8 b ON (b.value = test2_n5.value) LIMIT 10; SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -FULL OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n8 a ON (a.key+test2_n5.key >= 100) +FULL OUTER JOIN test1_n8 b ON (b.value = test2_n5.value) LIMIT 10; -- Chained 6 EXPLAIN SELECT * -FROM test2 -FULL OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +FULL OUTER JOIN test1_n8 a ON (a.value = test2_n5.value) +JOIN test1_n8 b ON (b.key+test2_n5.key<= 102) LIMIT 10; SELECT * -FROM test2 -FULL OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +FULL OUTER JOIN test1_n8 a ON (a.value = test2_n5.value) +JOIN test1_n8 b ON (b.key+test2_n5.key<= 102) LIMIT 10; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_5.q b/ql/src/test/queries/clientpositive/smb_mapjoin_5.q index 46acbf9b9d..3ae678e9e6 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_5.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_5.q @@ -4,13 +4,13 @@ set hive.strict.checks.bucketing=false; -create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_1_n2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_2_n2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; +create table smb_bucket_3_n2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE; -load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1; -load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2; -load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3; +load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1_n2; +load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2_n2; +load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3_n2; set hive.optimize.bucketmapjoin = true; set hive.optimize.bucketmapjoin.sortedmerge = true; @@ -19,56 +19,56 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -- SORT_QUERY_RESULTS explain -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key; -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a join smb_bucket_2_n2 b on a.key = b.key join smb_bucket_3_n2 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a join smb_bucket_2_n2 b on a.key = b.key join smb_bucket_3_n2 c on b.key=c.key; explain -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key; -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a left outer join smb_bucket_2_n2 b on a.key = b.key join smb_bucket_3_n2 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a left outer join smb_bucket_2_n2 b on a.key = b.key join smb_bucket_3_n2 c on b.key=c.key; explain -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key; -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a left outer join smb_bucket_2_n2 b on a.key = b.key left outer join smb_bucket_3_n2 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a left outer join smb_bucket_2_n2 b on a.key = b.key left outer join smb_bucket_3_n2 c on b.key=c.key; explain -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key; -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a left outer join smb_bucket_2_n2 b on a.key = b.key right outer join smb_bucket_3_n2 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a left outer join smb_bucket_2_n2 b on a.key = b.key right outer join smb_bucket_3_n2 c on b.key=c.key; explain -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key; -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a left outer join smb_bucket_2_n2 b on a.key = b.key full outer join smb_bucket_3_n2 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a left outer join smb_bucket_2_n2 b on a.key = b.key full outer join smb_bucket_3_n2 c on b.key=c.key; explain -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key; -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a right outer join smb_bucket_2_n2 b on a.key = b.key join smb_bucket_3_n2 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a right outer join smb_bucket_2_n2 b on a.key = b.key join smb_bucket_3_n2 c on b.key=c.key; explain -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key; -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a right outer join smb_bucket_2_n2 b on a.key = b.key left outer join smb_bucket_3_n2 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a right outer join smb_bucket_2_n2 b on a.key = b.key left outer join smb_bucket_3_n2 c on b.key=c.key; explain -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key; -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a right outer join smb_bucket_2_n2 b on a.key = b.key right outer join smb_bucket_3_n2 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a right outer join smb_bucket_2_n2 b on a.key = b.key right outer join smb_bucket_3_n2 c on b.key=c.key; explain -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key; -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a right outer join smb_bucket_2_n2 b on a.key = b.key full outer join smb_bucket_3_n2 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a right outer join smb_bucket_2_n2 b on a.key = b.key full outer join smb_bucket_3_n2 c on b.key=c.key; explain -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key; -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a full outer join smb_bucket_2_n2 b on a.key = b.key join smb_bucket_3_n2 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a full outer join smb_bucket_2_n2 b on a.key = b.key join smb_bucket_3_n2 c on b.key=c.key; explain -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key; -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a full outer join smb_bucket_2_n2 b on a.key = b.key left outer join smb_bucket_3_n2 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a full outer join smb_bucket_2_n2 b on a.key = b.key left outer join smb_bucket_3_n2 c on b.key=c.key; explain -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key; -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a full outer join smb_bucket_2_n2 b on a.key = b.key right outer join smb_bucket_3_n2 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a full outer join smb_bucket_2_n2 b on a.key = b.key right outer join smb_bucket_3_n2 c on b.key=c.key; explain -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key; -select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a full outer join smb_bucket_2_n2 b on a.key = b.key full outer join smb_bucket_3_n2 c on b.key=c.key; +select /*+mapjoin(a,c)*/ * from smb_bucket_1_n2 a full outer join smb_bucket_2_n2 b on a.key = b.key full outer join smb_bucket_3_n2 c on b.key=c.key; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_6.q b/ql/src/test/queries/clientpositive/smb_mapjoin_6.q index cb7e540e25..1f049dbc7a 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_6.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_6.q @@ -3,18 +3,18 @@ set hive.mapred.mode=nonstrict; set hive.exec.reducers.max = 1; -CREATE TABLE smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS RCFILE; +CREATE TABLE smb_bucket4_1_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS RCFILE; -CREATE TABLE smb_bucket4_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS RCFILE; +CREATE TABLE smb_bucket4_2_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS RCFILE; -create table smb_join_results(k1 int, v1 string, k2 int, v2 string); -create table normal_join_results(k1 int, v1 string, k2 int, v2 string); +create table smb_join_results_n0(k1 int, v1 string, k2 int, v2 string); +create table normal_join_results_n0(k1 int, v1 string, k2 int, v2 string); -insert overwrite table smb_bucket4_1 +insert overwrite table smb_bucket4_1_n0 select * from src; -insert overwrite table smb_bucket4_2 +insert overwrite table smb_bucket4_2_n0 select * from src; set hive.optimize.bucketmapjoin = true; @@ -22,55 +22,55 @@ set hive.optimize.bucketmapjoin.sortedmerge = true; set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; explain -insert overwrite table smb_join_results -select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key; +insert overwrite table smb_join_results_n0 +select /*+mapjoin(a)*/ * from smb_bucket4_1_n0 a join smb_bucket4_2_n0 b on a.key = b.key; -insert overwrite table smb_join_results -select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key; +insert overwrite table smb_join_results_n0 +select /*+mapjoin(a)*/ * from smb_bucket4_1_n0 a join smb_bucket4_2_n0 b on a.key = b.key; -select * from smb_join_results order by k1; +select * from smb_join_results_n0 order by k1; -insert overwrite table normal_join_results select * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key; +insert overwrite table normal_join_results_n0 select * from smb_bucket4_1_n0 a join smb_bucket4_2_n0 b on a.key = b.key; -select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from normal_join_results; -select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results; +select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from normal_join_results_n0; +select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results_n0; explain -insert overwrite table smb_join_results -select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key; -insert overwrite table smb_join_results -select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key; +insert overwrite table smb_join_results_n0 +select /*+mapjoin(b)*/ * from smb_bucket4_1_n0 a join smb_bucket4_2_n0 b on a.key = b.key; +insert overwrite table smb_join_results_n0 +select /*+mapjoin(b)*/ * from smb_bucket4_1_n0 a join smb_bucket4_2_n0 b on a.key = b.key; -insert overwrite table smb_join_results -select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key; +insert overwrite table smb_join_results_n0 +select /*+mapjoin(a)*/ * from smb_bucket4_1_n0 a join smb_bucket4_2_n0 b on a.key = b.key; -select * from smb_join_results order by k1; +select * from smb_join_results_n0 order by k1; -insert overwrite table normal_join_results select * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key; +insert overwrite table normal_join_results_n0 select * from smb_bucket4_1_n0 a join smb_bucket4_2_n0 b on a.key = b.key; -select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from normal_join_results; -select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results; +select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from normal_join_results_n0; +select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results_n0; explain -insert overwrite table smb_join_results -select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000; -insert overwrite table smb_join_results -select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000; +insert overwrite table smb_join_results_n0 +select /*+mapjoin(a)*/ * from smb_bucket4_1_n0 a join smb_bucket4_2_n0 b on a.key = b.key where a.key>1000; +insert overwrite table smb_join_results_n0 +select /*+mapjoin(a)*/ * from smb_bucket4_1_n0 a join smb_bucket4_2_n0 b on a.key = b.key where a.key>1000; explain -insert overwrite table smb_join_results -select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000; -insert overwrite table smb_join_results -select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000; +insert overwrite table smb_join_results_n0 +select /*+mapjoin(b)*/ * from smb_bucket4_1_n0 a join smb_bucket4_2_n0 b on a.key = b.key where a.key>1000; +insert overwrite table smb_join_results_n0 +select /*+mapjoin(b)*/ * from smb_bucket4_1_n0 a join smb_bucket4_2_n0 b on a.key = b.key where a.key>1000; explain -select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key join smb_bucket4_2 c on b.key = c.key where a.key>1000; -select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key join smb_bucket4_2 c on b.key = c.key where a.key>1000; +select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n0 a join smb_bucket4_2_n0 b on a.key = b.key join smb_bucket4_2_n0 c on b.key = c.key where a.key>1000; +select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n0 a join smb_bucket4_2_n0 b on a.key = b.key join smb_bucket4_2_n0 c on b.key = c.key where a.key>1000; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_8.q b/ql/src/test/queries/clientpositive/smb_mapjoin_8.q index d2624bba8e..ad9dcaba5c 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_8.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_8.q @@ -11,81 +11,81 @@ create table smb_bucket_input (key int, value string) stored as rcfile; load data local inpath '../../data/files/smb_bucket_input.rc' into table smb_bucket_input; -CREATE TABLE smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS; +CREATE TABLE smb_bucket4_1_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS; -CREATE TABLE smb_bucket4_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS; +CREATE TABLE smb_bucket4_2_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS; CREATE TABLE smb_bucket4_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS; -insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=4 or key=2000 or key=4000; -insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=484 or key=3000 or key=5000; +insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=4 or key=2000 or key=4000; +insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=484 or key=3000 or key=5000; set hive.optimize.bucketmapjoin = true; set hive.optimize.bucketmapjoin.sortedmerge = true; set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key; -insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=2000 or key=4000; -insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=3000 or key=5000; +insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=2000 or key=4000; +insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=3000 or key=5000; -select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key; -insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=4000; -insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=5000; +insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=4000; +insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=5000; -select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key; -insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=1000 or key=4000; -insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=1000 or key=5000; +insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=1000 or key=4000; +insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=1000 or key=5000; -select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key; -select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key; +select /*+mapjoin(a)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key; +select /*+mapjoin(b)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key; -insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=1000 or key=4000; -insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=1000 or key=5000; +insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=1000 or key=4000; +insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=1000 or key=5000; insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=1000 or key=5000; -select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key; -insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=1000 or key=4000; -insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=1000 or key=5000; +insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=1000 or key=4000; +insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=1000 or key=5000; insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=1000 or key=4000; -select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key; -insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=4000; -insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=5000; +insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=4000; +insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=5000; insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=4000; -select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key; -insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=00000; -insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=4000; +insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=00000; +insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=4000; insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=5000; -select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key; -insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=1000; -insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=4000; +insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=1000; +insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=4000; insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=5000; -select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key; diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_1.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_1.q index 1547cdff46..08194981e7 100644 --- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_1.q +++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_1.q @@ -1,14 +1,14 @@ --! qt:dataset:src -drop table table_desc1; -drop table table_desc2; +drop table table_desc1_n3; +drop table table_desc2_n3; -create table table_desc1(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS; -create table table_desc2(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS; +create table table_desc1_n3(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS; +create table table_desc2_n3(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS; -insert overwrite table table_desc1 select key, value from src; -insert overwrite table table_desc2 select key, value from src; +insert overwrite table table_desc1_n3 select key, value from src; +insert overwrite table table_desc2_n3 select key, value from src; set hive.optimize.bucketmapjoin = true; set hive.optimize.bucketmapjoin.sortedmerge = true; @@ -18,7 +18,7 @@ set hive.cbo.enable=false; -- So, sort merge join should be performed explain -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key where a.key < 10; +select /*+ mapjoin(b) */ count(*) from table_desc1_n3 a join table_desc2_n3 b on a.key=b.key where a.key < 10; -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key where a.key < 10; +select /*+ mapjoin(b) */ count(*) from table_desc1_n3 a join table_desc2_n3 b on a.key=b.key where a.key < 10; diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_2.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_2.q index 9453ed8f05..405c541025 100644 --- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_2.q +++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_2.q @@ -1,16 +1,16 @@ --! qt:dataset:src -drop table table_desc1; -drop table table_desc2; +drop table table_desc1_n1; +drop table table_desc2_n1; -create table table_desc1(key string, value string) clustered by (key, value) +create table table_desc1_n1(key string, value string) clustered by (key, value) sorted by (key DESC, value DESC) into 1 BUCKETS; -create table table_desc2(key string, value string) clustered by (key, value) +create table table_desc2_n1(key string, value string) clustered by (key, value) sorted by (key DESC, value DESC) into 1 BUCKETS; -insert overwrite table table_desc1 select key, value from src; -insert overwrite table table_desc2 select key, value from src; +insert overwrite table table_desc1_n1 select key, value from src; +insert overwrite table table_desc2_n1 select key, value from src; set hive.cbo.enable=false; set hive.optimize.bucketmapjoin = true; @@ -22,9 +22,9 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -- So, sort merge join should be performed explain -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +select /*+ mapjoin(b) */ count(*) from table_desc1_n1 a join table_desc2_n1 b on a.key=b.key and a.value=b.value where a.key < 10; -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +select /*+ mapjoin(b) */ count(*) from table_desc1_n1 a join table_desc2_n1 b on a.key=b.key and a.value=b.value where a.key < 10; diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_3.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_3.q index f8cb824eeb..d2f074b6e8 100644 --- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_3.q +++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_3.q @@ -1,16 +1,16 @@ --! qt:dataset:src -drop table table_desc1; -drop table table_desc2; +drop table table_desc1_n0; +drop table table_desc2_n0; -create table table_desc1(key string, value string) clustered by (key, value) +create table table_desc1_n0(key string, value string) clustered by (key, value) sorted by (key DESC, value ASC) into 1 BUCKETS; -create table table_desc2(key string, value string) clustered by (key, value) +create table table_desc2_n0(key string, value string) clustered by (key, value) sorted by (key DESC, value ASC) into 1 BUCKETS; -insert overwrite table table_desc1 select key, value from src; -insert overwrite table table_desc2 select key, value from src; +insert overwrite table table_desc1_n0 select key, value from src; +insert overwrite table table_desc2_n0 select key, value from src; set hive.optimize.bucketmapjoin = true; set hive.optimize.bucketmapjoin.sortedmerge = true; @@ -21,9 +21,9 @@ set hive.cbo.enable=false; -- So, sort merge join should be performed explain -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +select /*+ mapjoin(b) */ count(*) from table_desc1_n0 a join table_desc2_n0 b on a.key=b.key and a.value=b.value where a.key < 10; -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +select /*+ mapjoin(b) */ count(*) from table_desc1_n0 a join table_desc2_n0 b on a.key=b.key and a.value=b.value where a.key < 10; diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_5.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_5.q index fbe8e5ba93..d1cfddff91 100644 --- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_5.q +++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_5.q @@ -4,15 +4,15 @@ set hive.cbo.enable=false; -CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_1_n7 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) SORTED BY (key DESC) INTO 1 BUCKETS; -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') SELECT * FROM src; +INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n7 PARTITION (part='1') SELECT * FROM src; -CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_2_n17 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) SORTED BY (key DESC) INTO 1 BUCKETS; -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') SELECT * FROM src; +INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n17 PARTITION (part='1') SELECT * FROM src; -ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) SORTED BY (value DESC) INTO 1 BUCKETS; +ALTER TABLE srcbucket_mapjoin_part_2_n17 CLUSTERED BY (key) SORTED BY (value DESC) INTO 1 BUCKETS; set hive.optimize.bucketmapjoin=true; set hive.optimize.bucketmapjoin.sortedmerge = true; @@ -21,9 +21,9 @@ set hive.optimize.bucketmapjoin.sortedmerge = true; EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n7 a JOIN srcbucket_mapjoin_part_2_n17 b ON a.key = b.key AND a.part = '1' AND b.part = '1'; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n7 a JOIN srcbucket_mapjoin_part_2_n17 b ON a.key = b.key AND a.part = '1' AND b.part = '1'; diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_6.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_6.q index 7d4dfdf0a6..ab326ccc33 100644 --- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_6.q +++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_6.q @@ -4,15 +4,15 @@ set hive.exec.reducers.max = 1; -CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_1_n3 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS; -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') SELECT * FROM src; +INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n3 PARTITION (part='1') SELECT * FROM src; -CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_2_n8 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) SORTED BY (value DESC) INTO 2 BUCKETS; -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') SELECT * FROM src; +INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n8 PARTITION (part='1') SELECT * FROM src; -ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS; +ALTER TABLE srcbucket_mapjoin_part_2_n8 CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS; set hive.optimize.bucketmapjoin=true; set hive.optimize.bucketmapjoin.sortedmerge = true; @@ -21,9 +21,9 @@ set hive.cbo.enable=false; EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n3 a JOIN srcbucket_mapjoin_part_2_n8 b ON a.key = b.key AND a.part = '1' AND b.part = '1'; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n3 a JOIN srcbucket_mapjoin_part_2_n8 b ON a.key = b.key AND a.part = '1' AND b.part = '1'; diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_7.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_7.q index 8b65854065..c62cf5d6d1 100644 --- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_7.q +++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_7.q @@ -4,21 +4,21 @@ set hive.exec.reducers.max = 1; -CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_1_n0 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key, value) SORTED BY (key DESC) INTO 2 BUCKETS; -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') SELECT * FROM src; +INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n0 PARTITION (part='1') SELECT * FROM src; -ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS; -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') SELECT * FROM src; +ALTER TABLE srcbucket_mapjoin_part_1_n0 CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS; +INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n0 PARTITION (part='2') SELECT * FROM src; -CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +CREATE TABLE srcbucket_mapjoin_part_2_n2 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS; -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') SELECT * FROM src; +INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n2 PARTITION (part='1') SELECT * FROM src; -ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key, value) SORTED BY (key DESC) INTO 2 BUCKETS; -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='2') SELECT * FROM src; +ALTER TABLE srcbucket_mapjoin_part_2_n2 CLUSTERED BY (key, value) SORTED BY (key DESC) INTO 2 BUCKETS; +INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n2 PARTITION (part='2') SELECT * FROM src; -ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS; +ALTER TABLE srcbucket_mapjoin_part_2_n2 CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS; set hive.optimize.bucketmapjoin=true; set hive.optimize.bucketmapjoin.sortedmerge = true; @@ -27,9 +27,9 @@ set hive.cbo.enable=false; EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n0 a JOIN srcbucket_mapjoin_part_2_n2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL; SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n0 a JOIN srcbucket_mapjoin_part_2_n2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL; diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_8.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_8.q index 5903faec2e..fceb2b237b 100644 --- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_8.q +++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_8.q @@ -1,22 +1,22 @@ --! qt:dataset:src -drop table table_desc1; -drop table table_desc2; +drop table table_desc1_n2; +drop table table_desc2_n2; drop table table_desc3; drop table table_desc4; -create table table_desc1(key string, value string) clustered by (key) +create table table_desc1_n2(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS; -create table table_desc2(key string, value string) clustered by (key) +create table table_desc2_n2(key string, value string) clustered by (key) sorted by (key DESC, value DESC) into 1 BUCKETS; create table table_desc3(key string, value1 string, value2 string) clustered by (key) sorted by (key DESC, value1 DESC,value2 DESC) into 1 BUCKETS; create table table_desc4(key string, value2 string) clustered by (key) sorted by (key DESC, value2 DESC) into 1 BUCKETS; -insert overwrite table table_desc1 select key, value from src sort by key DESC; -insert overwrite table table_desc2 select key, value from src sort by key DESC; +insert overwrite table table_desc1_n2 select key, value from src sort by key DESC; +insert overwrite table table_desc2_n2 select key, value from src sort by key DESC; insert overwrite table table_desc3 select key, value, concat(value,"_2") as value2 from src sort by key, value, value2 DESC; insert overwrite table table_desc4 select key, concat(value,"_2") as value2 from src sort by key, value2 DESC; @@ -27,10 +27,10 @@ set hive.cbo.enable=false; -- columns are sorted by one key in first table, two keys in second table but in same sort order for key. Hence SMB join should pass explain -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +select /*+ mapjoin(b) */ count(*) from table_desc1_n2 a join table_desc2_n2 b on a.key=b.key where a.key < 10; -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +select /*+ mapjoin(b) */ count(*) from table_desc1_n2 a join table_desc2_n2 b on a.key=b.key where a.key < 10; -- columns are sorted by 3 keys(a, b, c) in first table, two keys(a, c) in second table with same sort order. Hence SMB join should not pass diff --git a/ql/src/test/queries/clientpositive/spark_constprog_dpp.q b/ql/src/test/queries/clientpositive/spark_constprog_dpp.q index cbe6aa2cca..44c705e849 100644 --- a/ql/src/test/queries/clientpositive/spark_constprog_dpp.q +++ b/ql/src/test/queries/clientpositive/spark_constprog_dpp.q @@ -2,16 +2,16 @@ set hive.mapred.mode=nonstrict; set hive.optimize.constant.propagation=true; set hive.spark.dynamic.partition.pruning=true; -drop table if exists tb1; -create table tb1 (id int); +drop table if exists tb1_n0; +create table tb1_n0 (id int); -drop table if exists tb2; -create table tb2 (id smallint); +drop table if exists tb2_n0; +create table tb2_n0 (id smallint); explain -select a.id from tb1 a +select a.id from tb1_n0 a left outer join -(select id from tb2 +(select id from tb2_n0 union all -select 2 as id from tb2 limit 1) b +select 2 as id from tb2_n0 limit 1) b on a.id=b.id; diff --git a/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning.q b/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning.q index 962d162b31..0b6ecc4e9d 100644 --- a/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning.q +++ b/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning.q @@ -12,82 +12,82 @@ set hive.strict.checks.cartesian.product=false; select distinct ds from srcpart; select distinct hr from srcpart; -EXPLAIN create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds; -create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds; -create table srcpart_hour as select hr as hr, hr as hour from srcpart group by hr; -create table srcpart_date_hour as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr; -create table srcpart_double_hour as select (hr*2) as hr, hr as hour from srcpart group by hr; +EXPLAIN create table srcpart_date_n4 as select ds as ds, ds as `date` from srcpart group by ds; +create table srcpart_date_n4 as select ds as ds, ds as `date` from srcpart group by ds; +create table srcpart_hour_n1 as select hr as hr, hr as hour from srcpart group by hr; +create table srcpart_date_hour_n1 as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr; +create table srcpart_double_hour_n1 as select (hr*2) as hr, hr as hour from srcpart group by hr; -- single column, single key -- join a partitioned table to a non-partitioned table, static filter on the non-partitioned table -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = '2008-04-08'; set hive.spark.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = '2008-04-08'; set hive.spark.dynamic.partition.pruning=true; select count(*) from srcpart where ds = '2008-04-08'; -- single column, single key, udf with typechange -EXPLAIN select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (day(srcpart.ds) = day(srcpart_date_n4.ds)) where srcpart_date_n4.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n4 on (day(srcpart.ds) = day(srcpart_date_n4.ds)) where srcpart_date_n4.`date` = '2008-04-08'; set hive.spark.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (day(srcpart.ds) = day(srcpart_date_n4.ds)) where srcpart_date_n4.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n4 on (day(srcpart.ds) = day(srcpart_date_n4.ds)) where srcpart_date_n4.`date` = '2008-04-08'; set hive.spark.dynamic.partition.pruning=true; -- multiple udfs and casts -EXPLAIN select count(*) from srcpart join srcpart_date on abs(negative(cast(concat(cast(day(srcpart.ds) as string), "0") as bigint)) + 10) = abs(negative(cast(concat(cast(day(srcpart_date.ds) as string), "0") as bigint)) + 10) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on abs(negative(cast(concat(cast(day(srcpart.ds) as string), "0") as bigint)) + 10) = abs(negative(cast(concat(cast(day(srcpart_date.ds) as string), "0") as bigint)) + 10) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on abs(negative(cast(concat(cast(day(srcpart.ds) as string), "0") as bigint)) + 10) = abs(negative(cast(concat(cast(day(srcpart_date_n4.ds) as string), "0") as bigint)) + 10) where srcpart_date_n4.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n4 on abs(negative(cast(concat(cast(day(srcpart.ds) as string), "0") as bigint)) + 10) = abs(negative(cast(concat(cast(day(srcpart_date_n4.ds) as string), "0") as bigint)) + 10) where srcpart_date_n4.`date` = '2008-04-08'; -- implicit type conversion between join columns -EXPLAIN select count(*) from srcpart join srcpart_date on cast(day(srcpart.ds) as smallint) = cast(day(srcpart_date.ds) as decimal) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on cast(day(srcpart.ds) as smallint) = cast(day(srcpart_date.ds) as decimal) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on cast(day(srcpart.ds) as smallint) = cast(day(srcpart_date_n4.ds) as decimal) where srcpart_date_n4.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n4 on cast(day(srcpart.ds) as smallint) = cast(day(srcpart_date_n4.ds) as decimal) where srcpart_date_n4.`date` = '2008-04-08'; -- multiple sources, single key -- filter partitioned table on both partitioned columns via join with non-partitioned table -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) join srcpart_hour_n1 on (srcpart.hr = srcpart_hour_n1.hr) +where srcpart_date_n4.`date` = '2008-04-08' and srcpart_hour_n1.hour = 11; +select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) join srcpart_hour_n1 on (srcpart.hr = srcpart_hour_n1.hr) +where srcpart_date_n4.`date` = '2008-04-08' and srcpart_hour_n1.hour = 11; set hive.spark.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) join srcpart_hour_n1 on (srcpart.hr = srcpart_hour_n1.hr) +where srcpart_date_n4.`date` = '2008-04-08' and srcpart_hour_n1.hour = 11; +select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) join srcpart_hour_n1 on (srcpart.hr = srcpart_hour_n1.hr) +where srcpart_date_n4.`date` = '2008-04-08' and srcpart_hour_n1.hour = 11; set hive.spark.dynamic.partition.pruning=true; select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; -- multiple columns single source -- filter partitioned table on both partitioned columns via join with non-partitioned table, filter non-partitioned table -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_hour_n1 on (srcpart.ds = srcpart_date_hour_n1.ds and srcpart.hr = srcpart_date_hour_n1.hr) where srcpart_date_hour_n1.`date` = '2008-04-08' and srcpart_date_hour_n1.hour = 11; +select count(*) from srcpart join srcpart_date_hour_n1 on (srcpart.ds = srcpart_date_hour_n1.ds and srcpart.hr = srcpart_date_hour_n1.hr) where srcpart_date_hour_n1.`date` = '2008-04-08' and srcpart_date_hour_n1.hour = 11; set hive.spark.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_hour_n1 on (srcpart.ds = srcpart_date_hour_n1.ds and srcpart.hr = srcpart_date_hour_n1.hr) where srcpart_date_hour_n1.`date` = '2008-04-08' and srcpart_date_hour_n1.hour = 11; +select count(*) from srcpart join srcpart_date_hour_n1 on (srcpart.ds = srcpart_date_hour_n1.ds and srcpart.hr = srcpart_date_hour_n1.hr) where srcpart_date_hour_n1.`date` = '2008-04-08' and srcpart_date_hour_n1.hour = 11; set hive.spark.dynamic.partition.pruning=true; select count(*) from srcpart where ds = '2008-04-08' and hr = 11; -- empty set -- join a partitioned table to a non-partitioned table, static filter on the non-partitioned table that doesn't filter out anything -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = 'I DONT EXIST'; set hive.spark.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = 'I DONT EXIST'; set hive.spark.dynamic.partition.pruning=true; select count(*) from srcpart where ds = 'I DONT EXIST'; -- expressions -- triggers DPP with various expressions - e.g. cast, multiplication, division -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour_n1 on (srcpart.hr = cast(srcpart_double_hour_n1.hr/2 as int)) where srcpart_double_hour_n1.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n1 on (srcpart.hr = cast(srcpart_double_hour_n1.hr/2 as int)) where srcpart_double_hour_n1.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour_n1 on (srcpart.hr*2 = srcpart_double_hour_n1.hr) where srcpart_double_hour_n1.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n1 on (srcpart.hr*2 = srcpart_double_hour_n1.hr) where srcpart_double_hour_n1.hour = 11; set hive.spark.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour_n1 on (srcpart.hr = cast(srcpart_double_hour_n1.hr/2 as int)) where srcpart_double_hour_n1.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n1 on (srcpart.hr = cast(srcpart_double_hour_n1.hr/2 as int)) where srcpart_double_hour_n1.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour_n1 on (srcpart.hr*2 = srcpart_double_hour_n1.hr) where srcpart_double_hour_n1.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n1 on (srcpart.hr*2 = srcpart_double_hour_n1.hr) where srcpart_double_hour_n1.hour = 11; set hive.spark.dynamic.partition.pruning=true; select count(*) from srcpart where hr = 11; -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour_n1 on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour_n1.hr as string)) where srcpart_double_hour_n1.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n1 on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour_n1.hr as string)) where srcpart_double_hour_n1.hour = 11; set hive.spark.dynamic.partition.pruning=true; select count(*) from srcpart where cast(hr as string) = 11; @@ -97,29 +97,29 @@ select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart gr select count(*) from srcpart where ds = '2008-04-08'; -- non-equi join -EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); -select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); +EXPLAIN select count(*) from srcpart, srcpart_date_hour_n1 where (srcpart_date_hour_n1.`date` = '2008-04-08' and srcpart_date_hour_n1.hour = 11) and (srcpart.ds = srcpart_date_hour_n1.ds or srcpart.hr = srcpart_date_hour_n1.hr); +select count(*) from srcpart, srcpart_date_hour_n1 where (srcpart_date_hour_n1.`date` = '2008-04-08' and srcpart_date_hour_n1.hour = 11) and (srcpart.ds = srcpart_date_hour_n1.ds or srcpart.hr = srcpart_date_hour_n1.hr); -- old style join syntax -EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; -select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; +EXPLAIN select count(*) from srcpart, srcpart_date_hour_n1 where srcpart_date_hour_n1.`date` = '2008-04-08' and srcpart_date_hour_n1.hour = 11 and srcpart.ds = srcpart_date_hour_n1.ds and srcpart.hr = srcpart_date_hour_n1.hr; +select count(*) from srcpart, srcpart_date_hour_n1 where srcpart_date_hour_n1.`date` = '2008-04-08' and srcpart_date_hour_n1.hour = 11 and srcpart.ds = srcpart_date_hour_n1.ds and srcpart.hr = srcpart_date_hour_n1.hr; -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart left join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart_date_n4 left join srcpart on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = '2008-04-08'; -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart full outer join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = '2008-04-08'; -- with static pruning -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) join srcpart_hour_n1 on (srcpart.hr = srcpart_hour_n1.hr) +where srcpart_date_n4.`date` = '2008-04-08' and srcpart_hour_n1.hour = 11 and srcpart.hr = 11; +select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) join srcpart_hour_n1 on (srcpart.hr = srcpart_hour_n1.hr) +where srcpart_date_n4.`date` = '2008-04-08' and srcpart_hour_n1.hour = 11 and srcpart.hr = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) join srcpart_hour_n1 on (srcpart.hr = srcpart_hour_n1.hr) +where srcpart_date_n4.`date` = '2008-04-08' and srcpart.hr = 13; +select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) join srcpart_hour_n1 on (srcpart.hr = srcpart_hour_n1.hr) +where srcpart_date_n4.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); @@ -134,35 +134,35 @@ set hive.auto.convert.join.noconditionaltask = true; set hive.auto.convert.join.noconditionaltask.size = 10000000; -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = '2008-04-08'; select count(*) from srcpart where ds = '2008-04-08'; -- single column, single key, udf with typechange -EXPLAIN select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (day(srcpart.ds) = day(srcpart_date_n4.ds)) where srcpart_date_n4.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n4 on (day(srcpart.ds) = day(srcpart_date_n4.ds)) where srcpart_date_n4.`date` = '2008-04-08'; -- multiple sources, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) join srcpart_hour_n1 on (srcpart.hr = srcpart_hour_n1.hr) +where srcpart_date_n4.`date` = '2008-04-08' and srcpart_hour_n1.hour = 11; +select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) join srcpart_hour_n1 on (srcpart.hr = srcpart_hour_n1.hr) +where srcpart_date_n4.`date` = '2008-04-08' and srcpart_hour_n1.hour = 11; select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_hour_n1 on (srcpart.ds = srcpart_date_hour_n1.ds and srcpart.hr = srcpart_date_hour_n1.hr) where srcpart_date_hour_n1.`date` = '2008-04-08' and srcpart_date_hour_n1.hour = 11; +select count(*) from srcpart join srcpart_date_hour_n1 on (srcpart.ds = srcpart_date_hour_n1.ds and srcpart.hr = srcpart_date_hour_n1.hr) where srcpart_date_hour_n1.`date` = '2008-04-08' and srcpart_date_hour_n1.hour = 11; select count(*) from srcpart where ds = '2008-04-08' and hr = 11; -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = 'I DONT EXIST'; -- expressions -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour_n1 on (srcpart.hr = cast(srcpart_double_hour_n1.hr/2 as int)) where srcpart_double_hour_n1.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n1 on (srcpart.hr = cast(srcpart_double_hour_n1.hr/2 as int)) where srcpart_double_hour_n1.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour_n1 on (srcpart.hr*2 = srcpart_double_hour_n1.hr) where srcpart_double_hour_n1.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n1 on (srcpart.hr*2 = srcpart_double_hour_n1.hr) where srcpart_double_hour_n1.hour = 11; select count(*) from srcpart where hr = 11; -- parent is reduce tasks @@ -171,21 +171,21 @@ select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart gr select count(*) from srcpart where ds = '2008-04-08'; -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart left join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart_date_n4 left join srcpart on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = '2008-04-08'; -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart full outer join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) where srcpart_date_n4.`date` = '2008-04-08'; -- with static pruning -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) join srcpart_hour_n1 on (srcpart.hr = srcpart_hour_n1.hr) +where srcpart_date_n4.`date` = '2008-04-08' and srcpart_hour_n1.hour = 11 and srcpart.hr = 11; +select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) join srcpart_hour_n1 on (srcpart.hr = srcpart_hour_n1.hr) +where srcpart_date_n4.`date` = '2008-04-08' and srcpart_hour_n1.hour = 11 and srcpart.hr = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) join srcpart_hour_n1 on (srcpart.hr = srcpart_hour_n1.hr) +where srcpart_date_n4.`date` = '2008-04-08' and srcpart.hr = 13; +select count(*) from srcpart join srcpart_date_n4 on (srcpart.ds = srcpart_date_n4.ds) join srcpart_hour_n1 on (srcpart.hr = srcpart_hour_n1.hr) +where srcpart_date_n4.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); @@ -199,12 +199,12 @@ set hive.vectorized.execution.enabled=false; set hive.exec.max.dynamic.partitions=1000; insert into table srcpart_parquet partition (ds, hr) select key, value, ds, hr from srcpart; -EXPLAIN select count(*) from srcpart_parquet join srcpart_date_hour on (srcpart_parquet.ds = srcpart_date_hour.ds and srcpart_parquet.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09'); -select count(*) from srcpart_parquet join srcpart_date_hour on (srcpart_parquet.ds = srcpart_date_hour.ds and srcpart_parquet.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09'); +EXPLAIN select count(*) from srcpart_parquet join srcpart_date_hour_n1 on (srcpart_parquet.ds = srcpart_date_hour_n1.ds and srcpart_parquet.hr = srcpart_date_hour_n1.hr) where srcpart_date_hour_n1.hour = 11 and (srcpart_date_hour_n1.`date` = '2008-04-08' or srcpart_date_hour_n1.`date` = '2008-04-09'); +select count(*) from srcpart_parquet join srcpart_date_hour_n1 on (srcpart_parquet.ds = srcpart_date_hour_n1.ds and srcpart_parquet.hr = srcpart_date_hour_n1.hr) where srcpart_date_hour_n1.hour = 11 and (srcpart_date_hour_n1.`date` = '2008-04-08' or srcpart_date_hour_n1.`date` = '2008-04-09'); select count(*) from srcpart where (ds = '2008-04-08' or ds = '2008-04-09') and hr = 11; drop table srcpart_parquet; -drop table srcpart_date; -drop table srcpart_hour; -drop table srcpart_date_hour; -drop table srcpart_double_hour; +drop table srcpart_date_n4; +drop table srcpart_hour_n1; +drop table srcpart_date_hour_n1; +drop table srcpart_double_hour_n1; diff --git a/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_2.q b/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_2.q index 415af97a9e..3dac85c302 100644 --- a/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_2.q +++ b/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_2.q @@ -11,26 +11,26 @@ set hive.auto.convert.join.noconditionaltask.size = 10000000; -- SORT_QUERY_RESULTS -create table dim_shops (id int, label string) row format delimited fields terminated by ',' stored as textfile; -load data local inpath '../../data/files/dim_shops.txt' into table dim_shops; +create table dim_shops_n0 (id int, label string) row format delimited fields terminated by ',' stored as textfile; +load data local inpath '../../data/files/dim_shops.txt' into table dim_shops_n0; -create table agg_01 (amount decimal) partitioned by (dim_shops_id int) row format delimited fields terminated by ',' stored as textfile; -alter table agg_01 add partition (dim_shops_id = 1); -alter table agg_01 add partition (dim_shops_id = 2); -alter table agg_01 add partition (dim_shops_id = 3); +create table agg_01_n0 (amount decimal) partitioned by (dim_shops_id int) row format delimited fields terminated by ',' stored as textfile; +alter table agg_01_n0 add partition (dim_shops_id = 1); +alter table agg_01_n0 add partition (dim_shops_id = 2); +alter table agg_01_n0 add partition (dim_shops_id = 3); -load data local inpath '../../data/files/agg_01-p1.txt' into table agg_01 partition (dim_shops_id=1); -load data local inpath '../../data/files/agg_01-p2.txt' into table agg_01 partition (dim_shops_id=2); -load data local inpath '../../data/files/agg_01-p3.txt' into table agg_01 partition (dim_shops_id=3); +load data local inpath '../../data/files/agg_01-p1.txt' into table agg_01_n0 partition (dim_shops_id=1); +load data local inpath '../../data/files/agg_01-p2.txt' into table agg_01_n0 partition (dim_shops_id=2); +load data local inpath '../../data/files/agg_01-p3.txt' into table agg_01_n0 partition (dim_shops_id=3); -analyze table dim_shops compute statistics; -analyze table agg_01 partition (dim_shops_id) compute statistics; +analyze table dim_shops_n0 compute statistics; +analyze table agg_01_n0 partition (dim_shops_id) compute statistics; -select * from dim_shops; -select * from agg_01; +select * from dim_shops_n0; +select * from agg_01_n0; EXPLAIN SELECT d1.label, count(*), sum(agg.amount) -FROM agg_01 agg, +FROM agg_01_n0 agg, dim_shops d1 WHERE agg.dim_shops_id = d1.id and @@ -39,7 +39,7 @@ GROUP BY d1.label ORDER BY d1.label; SELECT d1.label, count(*), sum(agg.amount) -FROM agg_01 agg, +FROM agg_01_n0 agg, dim_shops d1 WHERE agg.dim_shops_id = d1.id and @@ -50,7 +50,7 @@ ORDER BY d1.label; set hive.spark.dynamic.partition.pruning.max.data.size=1; EXPLAIN SELECT d1.label, count(*), sum(agg.amount) -FROM agg_01 agg, +FROM agg_01_n0 agg, dim_shops d1 WHERE agg.dim_shops_id = d1.id and @@ -59,7 +59,7 @@ GROUP BY d1.label ORDER BY d1.label; SELECT d1.label, count(*), sum(agg.amount) -FROM agg_01 agg, +FROM agg_01_n0 agg, dim_shops d1 WHERE agg.dim_shops_id = d1.id and @@ -68,23 +68,23 @@ GROUP BY d1.label ORDER BY d1.label; EXPLAIN SELECT d1.label -FROM agg_01 agg, +FROM agg_01_n0 agg, dim_shops d1 WHERE agg.dim_shops_id = d1.id; SELECT d1.label -FROM agg_01 agg, +FROM agg_01_n0 agg, dim_shops d1 WHERE agg.dim_shops_id = d1.id; EXPLAIN SELECT agg.amount -FROM agg_01 agg, +FROM agg_01_n0 agg, dim_shops d1 WHERE agg.dim_shops_id = d1.id and agg.dim_shops_id = 1; SELECT agg.amount -FROM agg_01 agg, +FROM agg_01_n0 agg, dim_shops d1 WHERE agg.dim_shops_id = d1.id and agg.dim_shops_id = 1; @@ -92,7 +92,7 @@ and agg.dim_shops_id = 1; set hive.spark.dynamic.partition.pruning.max.data.size=1000000; EXPLAIN SELECT d1.label, count(*), sum(agg.amount) -FROM agg_01 agg, +FROM agg_01_n0 agg, dim_shops d1 WHERE agg.dim_shops_id = d1.id and @@ -101,7 +101,7 @@ GROUP BY d1.label ORDER BY d1.label; SELECT d1.label, count(*), sum(agg.amount) -FROM agg_01 agg, +FROM agg_01_n0 agg, dim_shops d1 WHERE agg.dim_shops_id = d1.id and @@ -111,13 +111,13 @@ ORDER BY d1.label; EXPLAIN -SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo' +SELECT amount FROM agg_01_n0, dim_shops_n0 WHERE dim_shops_id = id AND label = 'foo' UNION ALL -SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar'; +SELECT amount FROM agg_01_n0, dim_shops_n0 WHERE dim_shops_id = id AND label = 'bar'; -SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo' +SELECT amount FROM agg_01_n0, dim_shops_n0 WHERE dim_shops_id = id AND label = 'foo' UNION ALL -SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar'; +SELECT amount FROM agg_01_n0, dim_shops_n0 WHERE dim_shops_id = id AND label = 'bar'; set hive.spark.dynamic.partition.pruning.max.data.size=10000; -- Dynamic partition pruning will be removed as data size exceeds the limit; diff --git a/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_4.q b/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_4.q index ec32c6578f..11ebe6ce6b 100644 --- a/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_4.q +++ b/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_4.q @@ -14,23 +14,23 @@ insert into table part1 partition (p='1', q='2') values ('3','3'), ('4','4'); insert into table part1 partition (p='2', q='1') values ('5','5'), ('6','6'); insert into table part1 partition (p='2', q='2') values ('7','7'), ('8','8'); -create table part2(key string, value string) partitioned by (p string, q string); -insert into table part2 partition (p='3', q='3') values ('a','a'), ('b','b'); -insert into table part2 partition (p='3', q='4') values ('c','c'), ('d','d'); -insert into table part2 partition (p='4', q='3') values ('e','e'), ('f','f'); -insert into table part2 partition (p='4', q='4') values ('g','g'), ('h','h'); +create table part2_n1(key string, value string) partitioned by (p string, q string); +insert into table part2_n1 partition (p='3', q='3') values ('a','a'), ('b','b'); +insert into table part2_n1 partition (p='3', q='4') values ('c','c'), ('d','d'); +insert into table part2_n1 partition (p='4', q='3') values ('e','e'), ('f','f'); +insert into table part2_n1 partition (p='4', q='4') values ('g','g'), ('h','h'); -- dpp works should be combined explain select * from (select part1.key, part1.value from part1 join src on part1.p=src.key) a union all - (select part2.key, part2.value from part2 join src on part2.p=src.key); + (select part2_n1.key, part2_n1.value from part2_n1 join src on part2_n1.p=src.key); select * from (select part1.key, part1.value from part1 join src on part1.p=src.key) a union all - (select part2.key, part2.value from part2 join src on part2.p=src.key); + (select part2_n1.key, part2_n1.value from part2_n1 join src on part2_n1.p=src.key); -- verify result set hive.spark.dynamic.partition.pruning=false; @@ -38,7 +38,7 @@ set hive.spark.dynamic.partition.pruning=false; select * from (select part1.key, part1.value from part1 join src on part1.p=src.key) a union all - (select part2.key, part2.value from part2 join src on part2.p=src.key); + (select part2_n1.key, part2_n1.value from part2_n1 join src on part2_n1.p=src.key); set hive.spark.dynamic.partition.pruning=true; @@ -47,12 +47,12 @@ explain select * from (select part1.key, part1.value from part1 join src on part1.p=src.key) a union all - (select part2.key, part2.value from part2 join src on part2.q=src.key); + (select part2_n1.key, part2_n1.value from part2_n1 join src on part2_n1.q=src.key); select * from (select part1.key, part1.value from part1 join src on part1.p=src.key) a union all - (select part2.key, part2.value from part2 join src on part2.q=src.key); + (select part2_n1.key, part2_n1.value from part2_n1 join src on part2_n1.q=src.key); -- verify result set hive.spark.dynamic.partition.pruning=false; @@ -60,7 +60,7 @@ set hive.spark.dynamic.partition.pruning=false; select * from (select part1.key, part1.value from part1 join src on part1.p=src.key) a union all - (select part2.key, part2.value from part2 join src on part2.q=src.key); + (select part2_n1.key, part2_n1.value from part2_n1 join src on part2_n1.q=src.key); set hive.spark.dynamic.partition.pruning=true; @@ -81,14 +81,14 @@ explain select * from (select part1.key, part1.value from part1 join src on part1.p=src.key) a union all - (select part2.key, part2.value from part2 join src on part2.p=src.value); + (select part2_n1.key, part2_n1.value from part2_n1 join src on part2_n1.p=src.value); -- dpp works shouldn't be combined explain select * from (select part1.key, part1.value from part1 join src on part1.p=upper(src.key)) a union all - (select part2.key, part2.value from part2 join src on part2.p=src.key); + (select part2_n1.key, part2_n1.value from part2_n1 join src on part2_n1.p=src.key); -- dpp works should be combined explain @@ -97,14 +97,14 @@ with top as select * from (select part1.key, part1.value from part1 join top on part1.q=top.key) a union all - (select part2.key, part2.value from part2 join top on part2.q=top.key); + (select part2_n1.key, part2_n1.value from part2_n1 join top on part2_n1.q=top.key); with top as (select key from src order by key limit 200) select * from (select part1.key, part1.value from part1 join top on part1.q=top.key) a union all - (select part2.key, part2.value from part2 join top on part2.q=top.key); + (select part2_n1.key, part2_n1.value from part2_n1 join top on part2_n1.q=top.key); -- verify result set hive.spark.dynamic.partition.pruning=false; @@ -114,7 +114,7 @@ with top as select * from (select part1.key, part1.value from part1 join top on part1.q=top.key) a union all - (select part2.key, part2.value from part2 join top on part2.q=top.key); + (select part2_n1.key, part2_n1.value from part2_n1 join top on part2_n1.q=top.key); set hive.spark.dynamic.partition.pruning=true; @@ -125,14 +125,14 @@ with top as select * from (select part1.key, part1.value from part1 join top on part1.p=top.key and part1.q=top.key) a union all - (select part2.key, part2.value from part2 join top on part2.p=top.key and part2.q=top.key); + (select part2_n1.key, part2_n1.value from part2_n1 join top on part2_n1.p=top.key and part2_n1.q=top.key); with top as (select key, value from src order by key, value limit 200) select * from (select part1.key, part1.value from part1 join top on part1.p=top.key and part1.q=top.key) a union all - (select part2.key, part2.value from part2 join top on part2.p=top.key and part2.q=top.key); + (select part2_n1.key, part2_n1.value from part2_n1 join top on part2_n1.p=top.key and part2_n1.q=top.key); -- verify result set hive.spark.dynamic.partition.pruning=false; @@ -142,7 +142,7 @@ with top as select * from (select part1.key, part1.value from part1 join top on part1.p=top.key and part1.q=top.key) a union all - (select part2.key, part2.value from part2 join top on part2.p=top.key and part2.q=top.key); + (select part2_n1.key, part2_n1.value from part2_n1 join top on part2_n1.p=top.key and part2_n1.q=top.key); set hive.spark.dynamic.partition.pruning=true; @@ -153,26 +153,26 @@ with top as select * from (select part1.key, part1.value from part1 join top on part1.p=top.key and part1.q=top.key) a union all - (select part2.key, part2.value from part2 join top on part2.p=top.key and part2.q=top.value); + (select part2_n1.key, part2_n1.value from part2_n1 join top on part2_n1.p=top.key and part2_n1.q=top.value); -- The following test case makes sure target map works can read from multiple DPP sinks, -- when the DPP sinks have different target lists -- see HIVE-18111 -create table foo(key string); -insert into table foo values ('1'),('2'); +create table foo_n2(key string); +insert into table foo_n2 values ('1'),('2'); set hive.cbo.enable = false; explain -select p from part2 where p in (select max(key) from foo) +select p from part2_n1 where p in (select max(key) from foo_n2) union all -select p from part1 where p in (select max(key) from foo union all select min(key) from foo); +select p from part1 where p in (select max(key) from foo_n2 union all select min(key) from foo_n2); -select p from part2 where p in (select max(key) from foo) +select p from part2_n1 where p in (select max(key) from foo_n2) union all -select p from part1 where p in (select max(key) from foo union all select min(key) from foo); +select p from part1 where p in (select max(key) from foo_n2 union all select min(key) from foo_n2); -drop table foo; +drop table foo_n2; drop table part1; -drop table part2; +drop table part2_n1; diff --git a/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_5.q b/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_5.q index 2b74f3d165..d0814199cc 100644 --- a/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_5.q +++ b/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_5.q @@ -4,23 +4,23 @@ set hive.spark.dynamic.partition.pruning=true; -- This qfile tests whether we can handle nested DPP sinks -create table part1(key string, value string) partitioned by (p string); -insert into table part1 partition (p='1') select * from src; +create table part1_n0(key string, value string) partitioned by (p string); +insert into table part1_n0 partition (p='1') select * from src; -create table part2(key string, value string) partitioned by (p string); -insert into table part2 partition (p='1') select * from src; +create table part2_n3(key string, value string) partitioned by (p string); +insert into table part2_n3 partition (p='1') select * from src; create table regular1 as select * from src limit 2; -- nested DPP is removed, upper most DPP is w/ common join -explain select * from src join part1 on src.key=part1.p join part2 on src.value=part2.p; +explain select * from src join part1_n0 on src.key=part1_n0.p join part2_n3 on src.value=part2_n3.p; -- nested DPP is removed, upper most DPP is w/ map join set hive.auto.convert.join=true; -- ensure regular1 is treated as small table, and partitioned tables are not set hive.auto.convert.join.noconditionaltask.size=20; -explain select * from regular1 join part1 on regular1.key=part1.p join part2 on regular1.value=part2.p; +explain select * from regular1 join part1_n0 on regular1.key=part1_n0.p join part2_n3 on regular1.value=part2_n3.p; -drop table part1; -drop table part2; +drop table part1_n0; +drop table part2_n3; drop table regular1; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_mapjoin_only.q b/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_mapjoin_only.q index dde6d1aa8e..ba44aefbc4 100644 --- a/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_mapjoin_only.q +++ b/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_mapjoin_only.q @@ -6,12 +6,12 @@ set hive.optimize.metadataonly=false; set hive.optimize.index.filter=true; set hive.strict.checks.cartesian.product=false; --- srcpart_date is the small table that will use map join. srcpart2 is the big table. --- both srcpart_date and srcpart2 will be joined with srcpart -create table srcpart_date as select ds as ds, ds as ds2 from srcpart group by ds; +-- srcpart_date_n3 is the small table that will use map join. srcpart2 is the big table. +-- both srcpart_date_n3 and srcpart2 will be joined with srcpart +create table srcpart_date_n3 as select ds as ds, ds as ds2 from srcpart group by ds; create table srcpart2 as select * from srcpart; --- enable map join and set the size to be small so that only join with srcpart_date gets to be a +-- enable map join and set the size to be small so that only join with srcpart_date_n3 gets to be a -- map join set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask.size=100; @@ -20,21 +20,21 @@ set hive.auto.convert.join.noconditionaltask.size=100; -- expectation: 2 spark jobs EXPLAIN select * from srcpart - join srcpart_date on (srcpart.ds = srcpart_date.ds) + join srcpart_date_n3 on (srcpart.ds = srcpart_date_n3.ds) join srcpart2 on (srcpart.hr = srcpart2.hr) - where srcpart_date.ds2 = '2008-04-08' + where srcpart_date_n3.ds2 = '2008-04-08' and srcpart2.hr = 11; -- checking with dpp enabled for all joins --- both join parts of srcpart_date and srcpart2 scans will result in partition pruning sink +-- both join parts of srcpart_date_n3 and srcpart2 scans will result in partition pruning sink -- scan with srcpart2 will get split resulting in additional spark jobs -- expectation: 3 spark jobs set hive.spark.dynamic.partition.pruning=true; EXPLAIN select * from srcpart - join srcpart_date on (srcpart.ds = srcpart_date.ds) + join srcpart_date_n3 on (srcpart.ds = srcpart_date_n3.ds) join srcpart2 on (srcpart.hr = srcpart2.hr) - where srcpart_date.ds2 = '2008-04-08' + where srcpart_date_n3.ds2 = '2008-04-08' and srcpart2.hr = 11; -- Restrict dpp to be enabled only for map joins @@ -43,10 +43,10 @@ set hive.spark.dynamic.partition.pruning.map.join.only=true; set hive.spark.dynamic.partition.pruning=false; EXPLAIN select * from srcpart - join srcpart_date on (srcpart.ds = srcpart_date.ds) + join srcpart_date_n3 on (srcpart.ds = srcpart_date_n3.ds) join srcpart2 on (srcpart.hr = srcpart2.hr) - where srcpart_date.ds2 = '2008-04-08' + where srcpart_date_n3.ds2 = '2008-04-08' and srcpart2.hr = 11; -drop table srcpart_date; +drop table srcpart_date_n3; drop table srcpart2; diff --git a/ql/src/test/queries/clientpositive/spark_explainuser_1.q b/ql/src/test/queries/clientpositive/spark_explainuser_1.q index 7a11665b5b..27183292ad 100644 --- a/ql/src/test/queries/clientpositive/spark_explainuser_1.q +++ b/ql/src/test/queries/clientpositive/spark_explainuser_1.q @@ -13,26 +13,26 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; set hive.spark.explain.user=true; -explain create table src_orc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as orc; -create table src_orc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as orc; +explain create table src_orc_merge_test_part_n0(key int, value string) partitioned by (ds string, ts string) stored as orc; +create table src_orc_merge_test_part_n0(key int, value string) partitioned by (ds string, ts string) stored as orc; -alter table src_orc_merge_test_part add partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); -desc extended src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); +alter table src_orc_merge_test_part_n0 add partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); +desc extended src_orc_merge_test_part_n0 partition (ds='2012-01-03', ts='2012-01-03+14:46:31'); -explain insert overwrite table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src; -insert overwrite table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src; -explain insert into table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src limit 100; +explain insert overwrite table src_orc_merge_test_part_n0 partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src; +insert overwrite table src_orc_merge_test_part_n0 partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src; +explain insert into table src_orc_merge_test_part_n0 partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src limit 100; -explain select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'; -explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'; +explain select count(1) from src_orc_merge_test_part_n0 where ds='2012-01-03' and ts='2012-01-03+14:46:31'; +explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part_n0 where ds='2012-01-03' and ts='2012-01-03+14:46:31'; -alter table src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate; +alter table src_orc_merge_test_part_n0 partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate; -explain select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'; -explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'; +explain select count(1) from src_orc_merge_test_part_n0 where ds='2012-01-03' and ts='2012-01-03+14:46:31'; +explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part_n0 where ds='2012-01-03' and ts='2012-01-03+14:46:31'; -drop table src_orc_merge_test_part; +drop table src_orc_merge_test_part_n0; set hive.auto.convert.join=true; @@ -128,7 +128,7 @@ having not exists ) ; -create view cv1 as +create view cv1_n3 as select * from src_cbo b where exists @@ -137,7 +137,7 @@ where exists where b.value = a.value and a.key = b.key and a.value > 'val_9') ; -explain select * from cv1; +explain select * from cv1_n3; explain select * from (select * @@ -250,15 +250,15 @@ FROM (select x.key AS key, count(1) AS cnt FROM src1 x LEFT SEMI JOIN src y ON (x.key = y.key) GROUP BY x.key) tmp; -explain create table abcd (a int, b int, c int, d int); -create table abcd (a int, b int, c int, d int); -LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd; +explain create table abcd_n0 (a int, b int, c int, d int); +create table abcd_n0 (a int, b int, c int, d int); +LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd_n0; set hive.map.aggr=true; -explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a; +explain select a, count(distinct b), count(distinct c), sum(d) from abcd_n0 group by a; set hive.map.aggr=false; -explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a; +explain select a, count(distinct b), count(distinct c), sum(d) from abcd_n0 group by a; explain create table src_rc_merge_test(key int, value string) stored as rcfile; create table src_rc_merge_test(key int, value string) stored as rcfile; @@ -289,20 +289,20 @@ drop table tgt_rc_merge_test; explain select src.key from src cross join src src2; -explain create table nzhang_Tmp(a int, b string); -create table nzhang_Tmp(a int, b string); +explain create table nzhang_Tmp_n0(a int, b string); +create table nzhang_Tmp_n0(a int, b string); -explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10; -create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10; +explain create table nzhang_CTAS1_n0 as select key k, value from src sort by k, value limit 10; +create table nzhang_CTAS1_n0 as select key k, value from src sort by k, value limit 10; -explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10; +explain create table nzhang_ctas3_n0 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10; -create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10; +create table nzhang_ctas3_n0 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10; -explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2; +explain create table if not exists nzhang_ctas3_n0 as select key, value from src sort by key, value limit 2; -create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2; +create table if not exists nzhang_ctas3_n0 as select key, value from src sort by key, value limit 2; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; @@ -324,24 +324,24 @@ select src1.key as k1, src1.value as v1, SORT BY k1, v1, k2, v2; -CREATE TABLE myinput1(key int, value int); -LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1; +CREATE TABLE myinput1_n6(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1_n6; -explain select * from myinput1 a join myinput1 b on a.key<=>b.value; +explain select * from myinput1_n6 a join myinput1_n6 b on a.key<=>b.value; -explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key; +explain select * from myinput1_n6 a join myinput1_n6 b on a.key<=>b.value join myinput1_n6 c on a.key=c.key; -explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key; +explain select * from myinput1_n6 a join myinput1_n6 b on a.key<=>b.value join myinput1_n6 c on a.key<=>c.key; -explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value; +explain select * from myinput1_n6 a join myinput1_n6 b on a.key<=>b.value AND a.value=b.key join myinput1_n6 c on a.key<=>c.key AND a.value=c.value; -explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value; +explain select * from myinput1_n6 a join myinput1_n6 b on a.key<=>b.value AND a.value<=>b.key join myinput1_n6 c on a.key<=>c.key AND a.value<=>c.value; -explain select * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value; -explain select * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key<=>b.value; -explain select * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key<=>b.value; +explain select * FROM myinput1_n6 a LEFT OUTER JOIN myinput1_n6 b ON a.key<=>b.value; +explain select * FROM myinput1_n6 a RIGHT OUTER JOIN myinput1_n6 b ON a.key<=>b.value; +explain select * FROM myinput1_n6 a FULL OUTER JOIN myinput1_n6 b ON a.key<=>b.value; -explain select /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value; +explain select /*+ MAPJOIN(b) */ * FROM myinput1_n6 a JOIN myinput1_n6 b ON a.key<=>b.value; CREATE TABLE smb_input(key int, value int); LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input; @@ -350,24 +350,24 @@ LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input; ; -CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; +CREATE TABLE smb_input1_n1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; +CREATE TABLE smb_input2_n1(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; from smb_input -insert overwrite table smb_input1 select * -insert overwrite table smb_input2 select *; +insert overwrite table smb_input1_n1 select * +insert overwrite table smb_input2_n1 select *; SET hive.optimize.bucketmapjoin = true; SET hive.optimize.bucketmapjoin.sortedmerge = true; SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; -analyze table smb_input1 compute statistics; +analyze table smb_input1_n1 compute statistics; -explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key; -explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key AND a.value <=> b.value; -explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input1 b ON a.key <=> b.key; -explain select /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key; -explain select /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input1 b ON a.key <=> b.key; +explain select /*+ MAPJOIN(a) */ * FROM smb_input1_n1 a JOIN smb_input1_n1 b ON a.key <=> b.key; +explain select /*+ MAPJOIN(a) */ * FROM smb_input1_n1 a JOIN smb_input1_n1 b ON a.key <=> b.key AND a.value <=> b.value; +explain select /*+ MAPJOIN(a) */ * FROM smb_input1_n1 a RIGHT OUTER JOIN smb_input1_n1 b ON a.key <=> b.key; +explain select /*+ MAPJOIN(b) */ * FROM smb_input1_n1 a JOIN smb_input1_n1 b ON a.key <=> b.key; +explain select /*+ MAPJOIN(b) */ * FROM smb_input1_n1 a LEFT OUTER JOIN smb_input1_n1 b ON a.key <=> b.key; drop table sales; drop table things; @@ -525,13 +525,13 @@ order by p_name ; -explain create view IF NOT EXISTS mfgr_price_view as +explain create view IF NOT EXISTS mfgr_price_view_n1 as select p_mfgr, p_brand, sum(p_retailprice) as s from part group by p_mfgr, p_brand; -CREATE TABLE part_4( +CREATE TABLE part_4_n0( p_mfgr STRING, p_name STRING, p_size INT, @@ -539,7 +539,7 @@ r INT, dr INT, s DOUBLE); -CREATE TABLE part_5( +CREATE TABLE part_5_n0( p_mfgr STRING, p_name STRING, p_size INT, @@ -553,11 +553,11 @@ explain from noop(on part partition by p_mfgr order by p_name) -INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, +INSERT OVERWRITE TABLE part_4_n0 select p_mfgr, p_name, p_size, rank() over (distribute by p_mfgr sort by p_name) as r, dense_rank() over (distribute by p_mfgr sort by p_name) as dr, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s -INSERT OVERWRITE TABLE part_5 select p_mfgr,p_name, p_size, +INSERT OVERWRITE TABLE part_5_n0 select p_mfgr,p_name, p_size, round(sum(p_size) over (distribute by p_mfgr sort by p_size range between 5 preceding and current row),1) as s2, rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as r, dense_rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as dr, @@ -622,41 +622,41 @@ explain select explode(array('a', 'b')); set hive.optimize.skewjoin = true; set hive.skewjoin.key = 2; -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n116(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n68(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T3_n24(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T4_n13(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE dest_j1_n14(key INT, value STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n116; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n68; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n24; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4_n13; explain FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 select src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n14 select src1.key, src2.value; FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 select src1.key, src2.value; +INSERT OVERWRITE TABLE dest_j1_n14 select src1.key, src2.value; explain select /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key; +FROM T1_n116 a JOIN T2_n68 b ON a.key = b.key + JOIN T3_n24 c ON b.key = c.key + JOIN T4_n13 d ON c.key = d.key; explain select /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key; +FROM T1_n116 a JOIN T2_n68 b ON a.key = b.key + JOIN T3_n24 c ON b.key = c.key + JOIN T4_n13 d ON c.key = d.key; -explain FROM T1 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); -FROM T1 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +explain FROM T1_n116 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +FROM T1_n116 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); explain select * FROM @@ -666,16 +666,16 @@ JOIN ON (x.key = Y.key); -explain select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.val; +explain select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1_n116 k join T1_n116 v on k.key=v.val; -explain select sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.key; +explain select sum(hash(k.key)), sum(hash(v.val)) from T1_n116 k join T1_n116 v on k.key=v.key; -explain select count(1) from T1 a join T1 b on a.key = b.key; +explain select count(1) from T1_n116 a join T1_n116 b on a.key = b.key; -explain FROM T1 a LEFT OUTER JOIN T2 c ON c.key+1=a.key select sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +explain FROM T1_n116 a LEFT OUTER JOIN T2_n68 c ON c.key+1=a.key select sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); -explain FROM T1 a RIGHT OUTER JOIN T2 c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +explain FROM T1_n116 a RIGHT OUTER JOIN T2_n68 c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); -explain FROM T1 a FULL OUTER JOIN T2 c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); +explain FROM T1_n116 a FULL OUTER JOIN T2_n68 c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)); -explain select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k left outer join T1 v on k.key+1=v.key; +explain select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from T1_n116 k left outer join T1_n116 v on k.key+1=v.key; diff --git a/ql/src/test/queries/clientpositive/spark_multi_insert_parallel_orderby.q b/ql/src/test/queries/clientpositive/spark_multi_insert_parallel_orderby.q index bef1f853fa..bc6ed43e79 100644 --- a/ql/src/test/queries/clientpositive/spark_multi_insert_parallel_orderby.q +++ b/ql/src/test/queries/clientpositive/spark_multi_insert_parallel_orderby.q @@ -5,8 +5,8 @@ set hive.optimize.sampling.orderby=true; -- SORT_QUERY_RESULTS -create table e1 (key string, value string); -create table e2 (key string); +create table e1_n3 (key string, value string); +create table e2_n4 (key string); --test orderby+limit case explain @@ -16,53 +16,53 @@ select key,value from src order by key limit 10; --test orderby+limit+multi_insert case explain FROM (select key,value from src order by key limit 10) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n3 SELECT key, value -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n4 SELECT key; FROM (select key,value from src order by key limit 10) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n3 SELECT key, value -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n4 SELECT key; -select * from e1; -select * from e2; +select * from e1_n3; +select * from e2_n4; --test orderby in multi_insert case explain FROM (select key,value from src order by key) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n3 select key,value -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n4 select key; FROM (select key,value from src order by key) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n3 select key,value -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n4 select key; -select * from e1; -select * from e2; +select * from e1_n3; +select * from e2_n4; --test limit in subquery of multi_insert case explain FROM (select key,value from src order by key) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n3 select key,value limit 10 -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n4 select key; FROM (select key,value from src order by key) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n3 select key,value limit 10 -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n4 select key; --- the result of e1 is not the top 10, just randomly get 10 elements,so count the number of e1 ---select * from e1; -select count(*) from e1; -select * from e2; +-- the result of e1_n3 is not the top 10, just randomly get 10 elements,so count the number of e1_n3 +--select * from e1_n3; +select count(*) from e1_n3; +select * from e2_n4; -drop table e1; -drop table e2; +drop table e1_n3; +drop table e2_n4; diff --git a/ql/src/test/queries/clientpositive/spark_vectorized_dynamic_partition_pruning.q b/ql/src/test/queries/clientpositive/spark_vectorized_dynamic_partition_pruning.q index 47c826adba..22bb502129 100644 --- a/ql/src/test/queries/clientpositive/spark_vectorized_dynamic_partition_pruning.q +++ b/ql/src/test/queries/clientpositive/spark_vectorized_dynamic_partition_pruning.q @@ -11,47 +11,47 @@ set hive.strict.checks.cartesian.product=false; select distinct ds from srcpart; select distinct hr from srcpart; -EXPLAIN VECTORIZATION DETAIL create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds; -create table srcpart_date stored as orc as select ds as ds, ds as `date` from srcpart group by ds; +EXPLAIN VECTORIZATION DETAIL create table srcpart_date_n0 as select ds as ds, ds as `date` from srcpart group by ds; +create table srcpart_date_n0 stored as orc as select ds as ds, ds as `date` from srcpart group by ds; create table srcpart_hour stored as orc as select hr as hr, hr as hour from srcpart group by hr; create table srcpart_date_hour stored as orc as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr; create table srcpart_double_hour stored as orc as select (hr*2) as hr, hr as hour from srcpart group by hr; -- single column, single key -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = '2008-04-08'; set hive.spark.dynamic.partition.pruning=false; -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = '2008-04-08'; set hive.spark.dynamic.partition.pruning=true; select count(*) from srcpart where ds = '2008-04-08'; -- single column, single key, udf with typechange -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (day(srcpart.ds) = day(srcpart_date_n0.ds)) where srcpart_date_n0.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n0 on (day(srcpart.ds) = day(srcpart_date_n0.ds)) where srcpart_date_n0.`date` = '2008-04-08'; set hive.spark.dynamic.partition.pruning=false; -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (day(srcpart.ds) = day(srcpart_date_n0.ds)) where srcpart_date_n0.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n0 on (day(srcpart.ds) = day(srcpart_date_n0.ds)) where srcpart_date_n0.`date` = '2008-04-08'; set hive.spark.dynamic.partition.pruning=true; -- multiple udfs and casts -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on abs(negative(cast(concat(cast(day(srcpart.ds) as string), "0") as bigint)) + 10) = abs(negative(cast(concat(cast(day(srcpart_date.ds) as string), "0") as bigint)) + 10) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on abs(negative(cast(concat(cast(day(srcpart.ds) as string), "0") as bigint)) + 10) = abs(negative(cast(concat(cast(day(srcpart_date.ds) as string), "0") as bigint)) + 10) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on abs(negative(cast(concat(cast(day(srcpart.ds) as string), "0") as bigint)) + 10) = abs(negative(cast(concat(cast(day(srcpart_date_n0.ds) as string), "0") as bigint)) + 10) where srcpart_date_n0.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n0 on abs(negative(cast(concat(cast(day(srcpart.ds) as string), "0") as bigint)) + 10) = abs(negative(cast(concat(cast(day(srcpart_date_n0.ds) as string), "0") as bigint)) + 10) where srcpart_date_n0.`date` = '2008-04-08'; -- implicit type conversion between join columns -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on cast(day(srcpart.ds) as smallint) = cast(day(srcpart_date.ds) as decimal) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on cast(day(srcpart.ds) as smallint) = cast(day(srcpart_date.ds) as decimal) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on cast(day(srcpart.ds) as smallint) = cast(day(srcpart_date_n0.ds) as decimal) where srcpart_date_n0.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n0 on cast(day(srcpart.ds) as smallint) = cast(day(srcpart_date_n0.ds) as decimal) where srcpart_date_n0.`date` = '2008-04-08'; -- multiple sources, single key -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date_n0.`date` = '2008-04-08' and srcpart_hour.hour = 11; +select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date_n0.`date` = '2008-04-08' and srcpart_hour.hour = 11; set hive.spark.dynamic.partition.pruning=false; -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date_n0.`date` = '2008-04-08' and srcpart_hour.hour = 11; +select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date_n0.`date` = '2008-04-08' and srcpart_hour.hour = 11; set hive.spark.dynamic.partition.pruning=true; select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; @@ -65,11 +65,11 @@ set hive.spark.dynamic.partition.pruning=true; select count(*) from srcpart where ds = '2008-04-08' and hr = 11; -- empty set -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = 'I DONT EXIST'; set hive.spark.dynamic.partition.pruning=false; -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = 'I DONT EXIST'; set hive.spark.dynamic.partition.pruning=true; select count(*) from srcpart where ds = 'I DONT EXIST'; @@ -105,21 +105,21 @@ EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart, srcpart_date_hour whe select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; -- left join -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart left join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart_date_n0 left join srcpart on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = '2008-04-08'; -- full outer -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart full outer join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = '2008-04-08'; -- with static pruning -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date_n0.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date_n0.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date_n0.`date` = '2008-04-08' and srcpart.hr = 13; +select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date_n0.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); @@ -134,19 +134,19 @@ set hive.auto.convert.join.noconditionaltask = true; set hive.auto.convert.join.noconditionaltask.size = 10000000; -- single column, single key -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = '2008-04-08'; select count(*) from srcpart where ds = '2008-04-08'; -- single column, single key, udf with typechange -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (day(srcpart.ds) = day(srcpart_date_n0.ds)) where srcpart_date_n0.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n0 on (day(srcpart.ds) = day(srcpart_date_n0.ds)) where srcpart_date_n0.`date` = '2008-04-08'; -- multiple sources, single key -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date_n0.`date` = '2008-04-08' and srcpart_hour.hour = 11; +select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date_n0.`date` = '2008-04-08' and srcpart_hour.hour = 11; select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; -- multiple columns single source @@ -155,8 +155,8 @@ select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_dat select count(*) from srcpart where ds = '2008-04-08' and hr = 11; -- empty set -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = 'I DONT EXIST'; -- expressions EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; @@ -171,27 +171,27 @@ select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart gr select count(*) from srcpart where ds = '2008-04-08'; -- left join -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart left join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart_date_n0 left join srcpart on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = '2008-04-08'; -- full outer -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart full outer join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) where srcpart_date_n0.`date` = '2008-04-08'; -- with static pruning -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date_n0.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date_n0.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +EXPLAIN VECTORIZATION DETAIL select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date_n0.`date` = '2008-04-08' and srcpart.hr = 13; +select count(*) from srcpart join srcpart_date_n0 on (srcpart.ds = srcpart_date_n0.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date_n0.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN VECTORIZATION DETAIL select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); -drop table srcpart_date; +drop table srcpart_date_n0; drop table srcpart_hour; drop table srcpart_date_hour; drop table srcpart_double_hour; diff --git a/ql/src/test/queries/clientpositive/special_character_in_tabnames_1.q b/ql/src/test/queries/clientpositive/special_character_in_tabnames_1.q index 67ba28db8f..08df0d803c 100644 --- a/ql/src/test/queries/clientpositive/special_character_in_tabnames_1.q +++ b/ql/src/test/queries/clientpositive/special_character_in_tabnames_1.q @@ -537,7 +537,7 @@ having not exists -- view test -create view cv1 as +create view cv1_n0 as select * @@ -555,7 +555,7 @@ where exists -select * from cv1 +select * from cv1_n0 ; @@ -957,33 +957,33 @@ set hive.auto.convert.join=false; -- 10. Test views -create view v1 as select c_int, value, c_boolean, dt from `c/b/o_t1`; +create view v1_n7 as select c_int, value, c_boolean, dt from `c/b/o_t1`; -create view v2 as select c_int, value from `//cbo_t2`; +create view v2_n2 as select c_int, value from `//cbo_t2`; -select value from v1 where c_boolean=false; +select value from v1_n7 where c_boolean=false; -select max(c_int) from v1 group by (c_boolean); +select max(c_int) from v1_n7 group by (c_boolean); -select count(v1.c_int) from v1 join `//cbo_t2` on v1.c_int = `//cbo_t2`.c_int; +select count(v1_n7.c_int) from v1_n7 join `//cbo_t2` on v1_n7.c_int = `//cbo_t2`.c_int; -select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int; +select count(v1_n7.c_int) from v1_n7 join v2_n2 on v1_n7.c_int = v2_n2.c_int; -select count(*) from v1 a join v1 b on a.value = b.value; +select count(*) from v1_n7 a join v1_n7 b on a.value = b.value; -create view v3 as select v1.value val from v1 join `c/b/o_t1` on v1.c_boolean = `c/b/o_t1`.c_boolean; +create view v3_n0 as select v1_n7.value val from v1_n7 join `c/b/o_t1` on v1_n7.c_boolean = `c/b/o_t1`.c_boolean; -select count(val) from v3 where val != '1'; +select count(val) from v3_n0 where val != '1'; with q1 as ( select key from `c/b/o_t1` where key = '1') @@ -991,13 +991,13 @@ select count(*) from q1; -with q1 as ( select value from v1 where c_boolean = false) +with q1 as ( select value from v1_n7 where c_boolean = false) select count(value) from q1 ; -create view v4 as +create view v4_n0 as with q1 as ( select key,c_int from `c/b/o_t1` where key = '1') @@ -1009,7 +1009,7 @@ select * from q1 with q1 as ( select c_int from q2 where c_boolean = false), -q2 as ( select c_int,c_boolean from v1 where value = '1') +q2 as ( select c_int,c_boolean from v1_n7 where value = '1') select sum(c_int) from (select c_int from q1) a; @@ -1017,21 +1017,21 @@ select sum(c_int) from (select c_int from q1) a; with q1 as ( select `c/b/o_t1`.c_int c_int from q2 join `c/b/o_t1` where q2.c_int = `c/b/o_t1`.c_int and `c/b/o_t1`.dt='2014'), -q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') +q2 as ( select c_int,c_boolean from v1_n7 where value = '1' or dt = '14') -select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int; +select count(*) from q1 join q2 join v4_n0 on q1.c_int = q2.c_int and v4_n0.c_int = q2.c_int; -drop view v1; +drop view v1_n7; -drop view v2; +drop view v2_n2; -drop view v3; +drop view v3_n0; -drop view v4; +drop view v4_n0; set hive.cbo.enable=false; diff --git a/ql/src/test/queries/clientpositive/sqlmerge.q b/ql/src/test/queries/clientpositive/sqlmerge.q index deaf91e37f..b35e3c8d48 100644 --- a/ql/src/test/queries/clientpositive/sqlmerge.q +++ b/ql/src/test/queries/clientpositive/sqlmerge.q @@ -4,15 +4,15 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.explain.user=false; set hive.merge.cardinality.check=true; -create table acidTbl(a int, b int) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -create table nonAcidOrcTbl(a int, b int) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='false'); +create table acidTbl_n0(a int, b int) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); +create table nonAcidOrcTbl_n0(a int, b int) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='false'); --expect a cardinality check because there is update and hive.merge.cardinality.check=true by default -explain merge into acidTbl as t using nonAcidOrcTbl s ON t.a = s.a +explain merge into acidTbl_n0 as t using nonAcidOrcTbl_n0 s ON t.a = s.a WHEN MATCHED AND s.a > 8 THEN DELETE WHEN MATCHED THEN UPDATE SET b = 7 WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b); --now we expect no cardinality check since only have insert clause -explain merge into acidTbl as t using nonAcidOrcTbl s ON t.a = s.a +explain merge into acidTbl_n0 as t using nonAcidOrcTbl_n0 s ON t.a = s.a WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b); diff --git a/ql/src/test/queries/clientpositive/stats1.q b/ql/src/test/queries/clientpositive/stats1.q index 89fe14c22c..da5d9de138 100644 --- a/ql/src/test/queries/clientpositive/stats1.q +++ b/ql/src/test/queries/clientpositive/stats1.q @@ -11,27 +11,27 @@ set hive.map.aggr=true; -- SORT_QUERY_RESULTS -create table tmptable(key string, value string); +create table tmptable_n4(key string, value string); EXPLAIN -INSERT OVERWRITE TABLE tmptable +INSERT OVERWRITE TABLE tmptable_n4 SELECT unionsrc.key, unionsrc.value FROM (SELECT 'tst1' AS key, cast(count(1) AS string) AS value FROM src s1 UNION ALL SELECT s2.key AS key, s2.value AS value FROM src1 s2) unionsrc; -INSERT OVERWRITE TABLE tmptable +INSERT OVERWRITE TABLE tmptable_n4 SELECT unionsrc.key, unionsrc.value FROM (SELECT 'tst1' AS key, cast(count(1) AS string) AS value FROM src s1 UNION ALL SELECT s2.key AS key, s2.value AS value FROM src1 s2) unionsrc; -SELECT * FROM tmptable x SORT BY x.key, x.value; +SELECT * FROM tmptable_n4 x SORT BY x.key, x.value; -DESCRIBE FORMATTED tmptable; +DESCRIBE FORMATTED tmptable_n4; -- Load a file into a existing table -- Some stats (numFiles, totalSize) should be updated correctly -- Some other stats (numRows, rawDataSize) should be cleared -load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable; -DESCRIBE FORMATTED tmptable; +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable_n4; +DESCRIBE FORMATTED tmptable_n4; diff --git a/ql/src/test/queries/clientpositive/stats10.q b/ql/src/test/queries/clientpositive/stats10.q index 0516b7d3cd..0fd7054e4b 100644 --- a/ql/src/test/queries/clientpositive/stats10.q +++ b/ql/src/test/queries/clientpositive/stats10.q @@ -5,26 +5,26 @@ set hive.stats.autogather=true; ; set hive.exec.reducers.max = 1; -CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS; +CREATE TABLE bucket3_1_n0(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS; explain -insert overwrite table bucket3_1 partition (ds='1') +insert overwrite table bucket3_1_n0 partition (ds='1') select * from src; -insert overwrite table bucket3_1 partition (ds='1') +insert overwrite table bucket3_1_n0 partition (ds='1') select * from src; -insert overwrite table bucket3_1 partition (ds='1') +insert overwrite table bucket3_1_n0 partition (ds='1') select * from src; -insert overwrite table bucket3_1 partition (ds='2') +insert overwrite table bucket3_1_n0 partition (ds='2') select * from src; -select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key; +select * from bucket3_1_n0 tablesample (bucket 1 out of 2) s where ds = '1' order by key; -explain analyze table bucket3_1 partition (ds) compute statistics; -analyze table bucket3_1 partition (ds) compute statistics; +explain analyze table bucket3_1_n0 partition (ds) compute statistics; +analyze table bucket3_1_n0 partition (ds) compute statistics; -describe formatted bucket3_1 partition (ds='1'); -describe formatted bucket3_1 partition (ds='2'); -describe formatted bucket3_1; +describe formatted bucket3_1_n0 partition (ds='1'); +describe formatted bucket3_1_n0 partition (ds='2'); +describe formatted bucket3_1_n0; diff --git a/ql/src/test/queries/clientpositive/stats11.q b/ql/src/test/queries/clientpositive/stats11.q index 9ff8d8328a..c590b2ad7f 100644 --- a/ql/src/test/queries/clientpositive/stats11.q +++ b/ql/src/test/queries/clientpositive/stats11.q @@ -3,93 +3,93 @@ set hive.strict.checks.bucketing=false; set datanucleus.cache.collections=false; set hive.stats.autogather=true; -CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin; -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin; +CREATE TABLE srcbucket_mapjoin_n15(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n15; +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_n15; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part_n16 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; explain -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n16 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n16 partition(ds='2008-04-08'); -desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08'); +desc formatted srcbucket_mapjoin_part_n16 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n16 partition(ds='2008-04-08'); +desc formatted srcbucket_mapjoin_part_n16 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n16 partition(ds='2008-04-08'); +desc formatted srcbucket_mapjoin_part_n16 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n16 partition(ds='2008-04-08'); +desc formatted srcbucket_mapjoin_part_n16 partition(ds='2008-04-08'); -CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08'); +CREATE TABLE srcbucket_mapjoin_part_2_n14 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n14 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n14 partition(ds='2008-04-08'); -create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint); -create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_1_n5 (key bigint , value1 bigint, value2 bigint); +create table bucketmapjoin_hash_result_2_n5 (key bigint , value1 bigint, value2 bigint); set hive.optimize.bucketmapjoin = true; -create table bucketmapjoin_tmp_result (key string , value1 string, value2 string); +create table bucketmapjoin_tmp_result_n7 (key string , value1 string, value2 string); explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n7 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n15 a join srcbucket_mapjoin_part_n16 b on a.key=b.key where b.ds="2008-04-08"; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n7 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n15 a join srcbucket_mapjoin_part_n16 b on a.key=b.key where b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n7; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +insert overwrite table bucketmapjoin_hash_result_1_n5 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n7; set hive.optimize.bucketmapjoin = false; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n7 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n15 a join srcbucket_mapjoin_part_n16 b on a.key=b.key where b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_2 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n7; +insert overwrite table bucketmapjoin_hash_result_2_n5 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n7; select a.key-b.key, a.value1-b.value1, a.value2-b.value2 -from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b +from bucketmapjoin_hash_result_1_n5 a left outer join bucketmapjoin_hash_result_2_n5 b on a.key = b.key; set hive.optimize.bucketmapjoin = true; explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n7 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n15 a join srcbucket_mapjoin_part_n16 b on a.key=b.key where b.ds="2008-04-08"; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n7 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n15 a join srcbucket_mapjoin_part_n16 b on a.key=b.key where b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n7; -insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +insert overwrite table bucketmapjoin_hash_result_1_n5 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n7; set hive.optimize.bucketmapjoin = false; -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n7 select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n15 a join srcbucket_mapjoin_part_n16 b on a.key=b.key where b.ds="2008-04-08"; -select count(1) from bucketmapjoin_tmp_result; -insert overwrite table bucketmapjoin_hash_result_2 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result; +select count(1) from bucketmapjoin_tmp_result_n7; +insert overwrite table bucketmapjoin_hash_result_2_n5 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n7; select a.key-b.key, a.value1-b.value1, a.value2-b.value2 -from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b +from bucketmapjoin_hash_result_1_n5 a left outer join bucketmapjoin_hash_result_2_n5 b on a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/stats12.q b/ql/src/test/queries/clientpositive/stats12.q index 9231d6f4df..2da94b49cf 100644 --- a/ql/src/test/queries/clientpositive/stats12.q +++ b/ql/src/test/queries/clientpositive/stats12.q @@ -4,17 +4,17 @@ set hive.stats.autogather=false; set hive.exec.dynamic.partition=true; set hive.exec.dynamic.partition.mode=nonstrict; -create table analyze_srcpart like srcpart; -insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null; +create table analyze_srcpart_n3 like srcpart; +insert overwrite table analyze_srcpart_n3 partition (ds, hr) select * from srcpart where ds is not null; explain extended -analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics; +analyze table analyze_srcpart_n3 PARTITION(ds='2008-04-08',hr) compute statistics; -analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics; +analyze table analyze_srcpart_n3 PARTITION(ds='2008-04-08',hr) compute statistics; -desc formatted analyze_srcpart; -desc formatted analyze_srcpart partition (ds='2008-04-08', hr=11); -desc formatted analyze_srcpart partition (ds='2008-04-08', hr=12); -desc formatted analyze_srcpart partition (ds='2008-04-09', hr=11); -desc formatted analyze_srcpart partition (ds='2008-04-09', hr=12); +desc formatted analyze_srcpart_n3; +desc formatted analyze_srcpart_n3 partition (ds='2008-04-08', hr=11); +desc formatted analyze_srcpart_n3 partition (ds='2008-04-08', hr=12); +desc formatted analyze_srcpart_n3 partition (ds='2008-04-09', hr=11); +desc formatted analyze_srcpart_n3 partition (ds='2008-04-09', hr=12); diff --git a/ql/src/test/queries/clientpositive/stats15.q b/ql/src/test/queries/clientpositive/stats15.q index 5fbd4ea4d7..2745c7460e 100644 --- a/ql/src/test/queries/clientpositive/stats15.q +++ b/ql/src/test/queries/clientpositive/stats15.q @@ -2,27 +2,27 @@ --! qt:dataset:src set datanucleus.cache.collections=false; -create table stats_src like src; -insert overwrite table stats_src select * from src; -analyze table stats_src compute statistics; -desc formatted stats_src; +create table stats_src_n0 like src; +insert overwrite table stats_src_n0 select * from src; +analyze table stats_src_n0 compute statistics; +desc formatted stats_src_n0; -create table stats_part like srcpart; +create table stats_part_n0 like srcpart; -insert overwrite table stats_part partition (ds='2010-04-08', hr = '11') select key, value from src; -insert overwrite table stats_part partition (ds='2010-04-08', hr = '12') select key, value from src; +insert overwrite table stats_part_n0 partition (ds='2010-04-08', hr = '11') select key, value from src; +insert overwrite table stats_part_n0 partition (ds='2010-04-08', hr = '12') select key, value from src; -analyze table stats_part partition(ds='2010-04-08', hr='11') compute statistics; -analyze table stats_part partition(ds='2010-04-08', hr='12') compute statistics; +analyze table stats_part_n0 partition(ds='2010-04-08', hr='11') compute statistics; +analyze table stats_part_n0 partition(ds='2010-04-08', hr='12') compute statistics; -insert overwrite table stats_part partition (ds='2010-04-08', hr = '13') select key, value from src; +insert overwrite table stats_part_n0 partition (ds='2010-04-08', hr = '13') select key, value from src; -desc formatted stats_part; -desc formatted stats_part partition (ds='2010-04-08', hr = '11'); -desc formatted stats_part partition (ds='2010-04-08', hr = '12'); +desc formatted stats_part_n0; +desc formatted stats_part_n0 partition (ds='2010-04-08', hr = '11'); +desc formatted stats_part_n0 partition (ds='2010-04-08', hr = '12'); -analyze table stats_part partition(ds, hr) compute statistics; -desc formatted stats_part; +analyze table stats_part_n0 partition(ds, hr) compute statistics; +desc formatted stats_part_n0; -drop table stats_src; -drop table stats_part; +drop table stats_src_n0; +drop table stats_part_n0; diff --git a/ql/src/test/queries/clientpositive/stats18.q b/ql/src/test/queries/clientpositive/stats18.q index 9d308075bf..a9835e415e 100644 --- a/ql/src/test/queries/clientpositive/stats18.q +++ b/ql/src/test/queries/clientpositive/stats18.q @@ -8,18 +8,18 @@ set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; set hive.map.aggr=true; -create table stats_part like srcpart; +create table stats_part_n1 like srcpart; -insert overwrite table stats_part partition (ds='2010-04-08', hr = '13') select key, value from src; +insert overwrite table stats_part_n1 partition (ds='2010-04-08', hr = '13') select key, value from src; -- Load a file into a existing partition -- Some stats (numFiles, totalSize) should be updated correctly -- Some other stats (numRows, rawDataSize) should be cleared -desc formatted stats_part partition (ds='2010-04-08', hr='13'); +desc formatted stats_part_n1 partition (ds='2010-04-08', hr='13'); -load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE stats_part partition (ds='2010-04-08', hr='13'); +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE stats_part_n1 partition (ds='2010-04-08', hr='13'); -desc formatted stats_part partition (ds='2010-04-08', hr='13'); +desc formatted stats_part_n1 partition (ds='2010-04-08', hr='13'); drop table stats_src; -drop table stats_part; +drop table stats_part_n1; diff --git a/ql/src/test/queries/clientpositive/stats3.q b/ql/src/test/queries/clientpositive/stats3.q index 93162deeb4..522dd9924f 100644 --- a/ql/src/test/queries/clientpositive/stats3.q +++ b/ql/src/test/queries/clientpositive/stats3.q @@ -1,33 +1,33 @@ set hive.mapred.mode=nonstrict; set datanucleus.cache.collections=false; set hive.stats.autogather=true; -drop table hive_test_src; -drop table hive_test_dst; +drop table hive_test_src_n3; +drop table hive_test_dst_n0; -create table hive_test_src ( col1 string ) stored as textfile ; +create table hive_test_src_n3 ( col1 string ) stored as textfile ; explain extended -load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src ; +load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n3 ; -load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src ; +load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n3 ; -desc formatted hive_test_src; +desc formatted hive_test_src_n3; -create table hive_test_dst ( col1 string ) partitioned by ( pcol1 string , pcol2 string) stored as sequencefile; -insert overwrite table hive_test_dst partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src ; -select * from hive_test_dst where pcol1='test_part' and pcol2='test_Part'; +create table hive_test_dst_n0 ( col1 string ) partitioned by ( pcol1 string , pcol2 string) stored as sequencefile; +insert overwrite table hive_test_dst_n0 partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src_n3 ; +select * from hive_test_dst_n0 where pcol1='test_part' and pcol2='test_Part'; -select count(1) from hive_test_dst; +select count(1) from hive_test_dst_n0; -insert overwrite table hive_test_dst partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src ; -select * from hive_test_dst where pcol1='test_part' and pcol2='test_part'; +insert overwrite table hive_test_dst_n0 partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src_n3 ; +select * from hive_test_dst_n0 where pcol1='test_part' and pcol2='test_part'; -select count(1) from hive_test_dst; +select count(1) from hive_test_dst_n0; -select * from hive_test_dst where pcol1='test_part'; -select * from hive_test_dst where pcol1='test_part' and pcol2='test_part'; -select * from hive_test_dst where pcol1='test_Part'; +select * from hive_test_dst_n0 where pcol1='test_part'; +select * from hive_test_dst_n0 where pcol1='test_part' and pcol2='test_part'; +select * from hive_test_dst_n0 where pcol1='test_Part'; -describe formatted hive_test_dst; +describe formatted hive_test_dst_n0; -drop table hive_test_src; -drop table hive_test_dst; +drop table hive_test_src_n3; +drop table hive_test_dst_n0; diff --git a/ql/src/test/queries/clientpositive/stats6.q b/ql/src/test/queries/clientpositive/stats6.q index 82a3b3e4b6..7eb410bcf9 100644 --- a/ql/src/test/queries/clientpositive/stats6.q +++ b/ql/src/test/queries/clientpositive/stats6.q @@ -4,15 +4,15 @@ set hive.stats.autogather=false; set hive.exec.dynamic.partition=true; set hive.exec.dynamic.partition.mode=nonstrict; -create table analyze_srcpart like srcpart; -insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null; +create table analyze_srcpart_n2 like srcpart; +insert overwrite table analyze_srcpart_n2 partition (ds, hr) select * from srcpart where ds is not null; -analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics; -analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics; +analyze table analyze_srcpart_n2 PARTITION(ds='2008-04-08',hr=11) compute statistics; +analyze table analyze_srcpart_n2 PARTITION(ds='2008-04-08',hr=12) compute statistics; -describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11); -describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12); -describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=11); -describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=12); +describe formatted analyze_srcpart_n2 PARTITION(ds='2008-04-08',hr=11); +describe formatted analyze_srcpart_n2 PARTITION(ds='2008-04-08',hr=12); +describe formatted analyze_srcpart_n2 PARTITION(ds='2008-04-09',hr=11); +describe formatted analyze_srcpart_n2 PARTITION(ds='2008-04-09',hr=12); -describe formatted analyze_srcpart; +describe formatted analyze_srcpart_n2; diff --git a/ql/src/test/queries/clientpositive/stats7.q b/ql/src/test/queries/clientpositive/stats7.q index 8ca719824b..4bb5b993a5 100644 --- a/ql/src/test/queries/clientpositive/stats7.q +++ b/ql/src/test/queries/clientpositive/stats7.q @@ -4,14 +4,14 @@ set hive.stats.autogather=false; set hive.exec.dynamic.partition=true; set hive.exec.dynamic.partition.mode=nonstrict; -create table analyze_srcpart like srcpart; -insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null; +create table analyze_srcpart_n4 like srcpart; +insert overwrite table analyze_srcpart_n4 partition (ds, hr) select * from srcpart where ds is not null; -explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics; +explain analyze table analyze_srcpart_n4 PARTITION(ds='2008-04-08',hr) compute statistics; -analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics; +analyze table analyze_srcpart_n4 PARTITION(ds='2008-04-08',hr) compute statistics; -describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11); -describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12); +describe formatted analyze_srcpart_n4 PARTITION(ds='2008-04-08',hr=11); +describe formatted analyze_srcpart_n4 PARTITION(ds='2008-04-08',hr=12); -describe formatted analyze_srcpart; +describe formatted analyze_srcpart_n4; diff --git a/ql/src/test/queries/clientpositive/stats8.q b/ql/src/test/queries/clientpositive/stats8.q index 70cff1daf7..e0a71f79e9 100644 --- a/ql/src/test/queries/clientpositive/stats8.q +++ b/ql/src/test/queries/clientpositive/stats8.q @@ -4,31 +4,31 @@ set hive.stats.autogather=false; set hive.exec.dynamic.partition=true; set hive.exec.dynamic.partition.mode=nonstrict; -create table analyze_srcpart like srcpart; -insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null; +create table analyze_srcpart_n1 like srcpart; +insert overwrite table analyze_srcpart_n1 partition (ds, hr) select * from srcpart where ds is not null; -explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics; -analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics; -describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11); -describe formatted analyze_srcpart; +explain analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=11) compute statistics; +analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=11) compute statistics; +describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=11); +describe formatted analyze_srcpart_n1; -explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics; -analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics; -describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12); +explain analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=12) compute statistics; +analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=12) compute statistics; +describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=12); -explain analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics; -analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics; -describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=11); +explain analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=11) compute statistics; +analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=11) compute statistics; +describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=11); -explain analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics; -analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics; -describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=12); +explain analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=12) compute statistics; +analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=12) compute statistics; +describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=12); -explain analyze table analyze_srcpart PARTITION(ds, hr) compute statistics; -analyze table analyze_srcpart PARTITION(ds, hr) compute statistics; +explain analyze table analyze_srcpart_n1 PARTITION(ds, hr) compute statistics; +analyze table analyze_srcpart_n1 PARTITION(ds, hr) compute statistics; -describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11); -describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12); -describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=11); -describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=12); -describe formatted analyze_srcpart; +describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=11); +describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=12); +describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=11); +describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=12); +describe formatted analyze_srcpart_n1; diff --git a/ql/src/test/queries/clientpositive/stats_aggregator_error_1.q b/ql/src/test/queries/clientpositive/stats_aggregator_error_1.q index f6b8b4ef97..6f8700f537 100644 --- a/ql/src/test/queries/clientpositive/stats_aggregator_error_1.q +++ b/ql/src/test/queries/clientpositive/stats_aggregator_error_1.q @@ -5,7 +5,7 @@ -- insert statements succeed. The insert statement succeeds even if the stats aggregator -- is set to null, since stats need not be reliable. -create table tmptable(key string, value string); +create table tmptable_n6(key string, value string); set hive.stats.dbclass=custom; set hive.stats.default.publisher=org.apache.hadoop.hive.ql.stats.DummyStatsPublisher; @@ -15,17 +15,17 @@ set hive.compute.query.using.stats=false; set hive.test.dummystats.aggregator=connect; -INSERT OVERWRITE TABLE tmptable select * from src; -select count(1) from tmptable; +INSERT OVERWRITE TABLE tmptable_n6 select * from src; +select count(1) from tmptable_n6; set hive.test.dummystats.aggregator=closeConnection; -INSERT OVERWRITE TABLE tmptable select * from src; -select count(1) from tmptable; +INSERT OVERWRITE TABLE tmptable_n6 select * from src; +select count(1) from tmptable_n6; set hive.test.dummystats.aggregator=cleanUp; -INSERT OVERWRITE TABLE tmptable select * from src; -select count(1) from tmptable; +INSERT OVERWRITE TABLE tmptable_n6 select * from src; +select count(1) from tmptable_n6; set hive.stats.default.aggregator=""; -INSERT OVERWRITE TABLE tmptable select * from src; -select count(1) from tmptable; +INSERT OVERWRITE TABLE tmptable_n6 select * from src; +select count(1) from tmptable_n6; diff --git a/ql/src/test/queries/clientpositive/stats_date.q b/ql/src/test/queries/clientpositive/stats_date.q index da1ef58541..3618b3767f 100644 --- a/ql/src/test/queries/clientpositive/stats_date.q +++ b/ql/src/test/queries/clientpositive/stats_date.q @@ -1,18 +1,18 @@ -create table foo(x date, y timestamp) stored as orc; +create table foo_n9(x date, y timestamp) stored as orc; -insert into foo values('1999-01-01', '1999-01-01 00:00:01'), ('2018-01-01', '2018-01-01 23:23:59'); +insert into foo_n9 values('1999-01-01', '1999-01-01 00:00:01'), ('2018-01-01', '2018-01-01 23:23:59'); -analyze table foo compute statistics for columns; +analyze table foo_n9 compute statistics for columns; set hive.compute.query.using.stats=true; set test.comment=All queries need to be just metadata fetch tasks -explain select min(x) from foo; -explain select max(x) from foo; -explain select count(x) from foo; +explain select min(x) from foo_n9; +explain select max(x) from foo_n9; +explain select count(x) from foo_n9; -explain select count(x), max(x), min(x) from foo; +explain select count(x), max(x), min(x) from foo_n9; -select count(x), max(x), min(x) from foo; +select count(x), max(x), min(x) from foo_n9; diff --git a/ql/src/test/queries/clientpositive/stats_empty_dyn_part.q b/ql/src/test/queries/clientpositive/stats_empty_dyn_part.q index bcb657acbd..f11a706b0b 100644 --- a/ql/src/test/queries/clientpositive/stats_empty_dyn_part.q +++ b/ql/src/test/queries/clientpositive/stats_empty_dyn_part.q @@ -4,12 +4,12 @@ -- which results in no partitions actually being created with -- hive.stats.reliable set to true -create table tmptable(key string) partitioned by (part string); +create table tmptable_n7(key string) partitioned by (part string); set hive.stats.autogather=true; set hive.stats.reliable=true; set hive.exec.dynamic.partition.mode=nonstrict; -explain insert overwrite table tmptable partition (part) select key, value from src where key = 'no_such_value'; +explain insert overwrite table tmptable_n7 partition (part) select key, value from src where key = 'no_such_value'; -insert overwrite table tmptable partition (part) select key, value from src where key = 'no_such_value'; +insert overwrite table tmptable_n7 partition (part) select key, value from src where key = 'no_such_value'; diff --git a/ql/src/test/queries/clientpositive/stats_empty_partition.q b/ql/src/test/queries/clientpositive/stats_empty_partition.q index 8fb941e354..d47d9a5924 100644 --- a/ql/src/test/queries/clientpositive/stats_empty_partition.q +++ b/ql/src/test/queries/clientpositive/stats_empty_partition.q @@ -3,11 +3,11 @@ -- This test verifies that writing an empty partition succeeds when -- hive.stats.reliable is set to true. -create table tmptable(key string, value string) partitioned by (part string); +create table tmptable_n11(key string, value string) partitioned by (part string); set hive.stats.autogather=true; set hive.stats.reliable=true; -insert overwrite table tmptable partition (part = '1') select * from src where key = 'no_such_value'; +insert overwrite table tmptable_n11 partition (part = '1') select * from src where key = 'no_such_value'; -describe formatted tmptable partition (part = '1'); +describe formatted tmptable_n11 partition (part = '1'); diff --git a/ql/src/test/queries/clientpositive/stats_empty_partition2.q b/ql/src/test/queries/clientpositive/stats_empty_partition2.q index 5afab579d2..22abc958e5 100644 --- a/ql/src/test/queries/clientpositive/stats_empty_partition2.q +++ b/ql/src/test/queries/clientpositive/stats_empty_partition2.q @@ -1,22 +1,22 @@ set hive.explain.user=false; -drop table if exists p1; -drop table if exists t; +drop table if exists p1_n0; +drop table if exists t_n32; -create table t (a int); -insert into t values (1); +create table t_n32 (a int); +insert into t_n32 values (1); -create table p1 (a int) partitioned by (p int); +create table p1_n0 (a int) partitioned by (p int); -insert into p1 partition (p=1) values (1); -insert into p1 partition (p=2) values (1); +insert into p1_n0 partition (p=1) values (1); +insert into p1_n0 partition (p=2) values (1); -truncate table p1; +truncate table p1_n0; -insert into p1 partition (p=1) values (1); +insert into p1_n0 partition (p=1) values (1); explain -select * from p1 join t on (t.a=p1.a); +select * from p1_n0 join t_n32 on (t_n32.a=p1_n0.a); -describe formatted p1; +describe formatted p1_n0; diff --git a/ql/src/test/queries/clientpositive/stats_noscan_1.q b/ql/src/test/queries/clientpositive/stats_noscan_1.q index e23a376d4c..fe446076cf 100644 --- a/ql/src/test/queries/clientpositive/stats_noscan_1.q +++ b/ql/src/test/queries/clientpositive/stats_noscan_1.q @@ -8,19 +8,19 @@ set hive.exec.dynamic.partition.mode=nonstrict; -- test analyze table ... compute statistics noscan -- 1. test full spec -create table analyze_srcpart like srcpart; -insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null; +create table analyze_srcpart_n0 like srcpart; +insert overwrite table analyze_srcpart_n0 partition (ds, hr) select * from srcpart where ds is not null; explain -analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics noscan; -analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics noscan; -analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics noscan; +analyze table analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=11) compute statistics noscan; +analyze table analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=11) compute statistics noscan; +analyze table analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=12) compute statistics noscan; -- confirm result -describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11); -describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12); -describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=11); -describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=12); -describe formatted analyze_srcpart; -drop table analyze_srcpart; +describe formatted analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=11); +describe formatted analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=12); +describe formatted analyze_srcpart_n0 PARTITION(ds='2008-04-09',hr=11); +describe formatted analyze_srcpart_n0 PARTITION(ds='2008-04-09',hr=12); +describe formatted analyze_srcpart_n0; +drop table analyze_srcpart_n0; -- 2. test partial spec create table analyze_srcpart_partial like srcpart; diff --git a/ql/src/test/queries/clientpositive/statsfs.q b/ql/src/test/queries/clientpositive/statsfs.q index 166ba360c1..cfd674e807 100644 --- a/ql/src/test/queries/clientpositive/statsfs.q +++ b/ql/src/test/queries/clientpositive/statsfs.q @@ -4,60 +4,60 @@ set hive.stats.dbclass=fs; -- stats computation on partitioned table with analyze command -create table t1 (key string, value string) partitioned by (ds string); -load data local inpath '../../data/files/kv1.txt' into table t1 partition (ds = '2010'); -load data local inpath '../../data/files/kv1.txt' into table t1 partition (ds = '2011'); +create table t1_n120 (key string, value string) partitioned by (ds string); +load data local inpath '../../data/files/kv1.txt' into table t1_n120 partition (ds = '2010'); +load data local inpath '../../data/files/kv1.txt' into table t1_n120 partition (ds = '2011'); -analyze table t1 partition (ds) compute statistics; +analyze table t1_n120 partition (ds) compute statistics; -describe formatted t1 partition (ds='2010'); -describe formatted t1 partition (ds='2011'); +describe formatted t1_n120 partition (ds='2010'); +describe formatted t1_n120 partition (ds='2011'); -drop table t1; +drop table t1_n120; -- stats computation on partitioned table with autogather on insert query -create table t1 (key string, value string) partitioned by (ds string); +create table t1_n120 (key string, value string) partitioned by (ds string); -insert into table t1 partition (ds='2010') select * from src; -insert into table t1 partition (ds='2011') select * from src; +insert into table t1_n120 partition (ds='2010') select * from src; +insert into table t1_n120 partition (ds='2011') select * from src; -describe formatted t1 partition (ds='2010'); -describe formatted t1 partition (ds='2011'); +describe formatted t1_n120 partition (ds='2010'); +describe formatted t1_n120 partition (ds='2011'); -drop table t1; +drop table t1_n120; -- analyze stmt on unpartitioned table -create table t1 (key string, value string); -load data local inpath '../../data/files/kv1.txt' into table t1; +create table t1_n120 (key string, value string); +load data local inpath '../../data/files/kv1.txt' into table t1_n120; -analyze table t1 compute statistics; +analyze table t1_n120 compute statistics; -describe formatted t1 ; +describe formatted t1_n120 ; -drop table t1; +drop table t1_n120; -- stats computation on unpartitioned table with autogather on insert query -create table t1 (key string, value string); +create table t1_n120 (key string, value string); -insert into table t1 select * from src; +insert into table t1_n120 select * from src; -describe formatted t1 ; +describe formatted t1_n120 ; -drop table t1; +drop table t1_n120; -- stats computation on partitioned table with autogather on insert query with dynamic partitioning -create table t1 (key string, value string) partitioned by (ds string, hr string); +create table t1_n120 (key string, value string) partitioned by (ds string, hr string); set hive.exec.dynamic.partition.mode=nonstrict; -insert into table t1 partition (ds,hr) select * from srcpart; +insert into table t1_n120 partition (ds,hr) select * from srcpart; -describe formatted t1 partition (ds='2008-04-08',hr='11'); -describe formatted t1 partition (ds='2008-04-09',hr='12'); +describe formatted t1_n120 partition (ds='2008-04-08',hr='11'); +describe formatted t1_n120 partition (ds='2008-04-09',hr='12'); -drop table t1; +drop table t1_n120; set hive.exec.dynamic.partition.mode=strict; diff --git a/ql/src/test/queries/clientpositive/struct_in_view.q b/ql/src/test/queries/clientpositive/struct_in_view.q index 2cff74d0ea..e2d62e61f7 100644 --- a/ql/src/test/queries/clientpositive/struct_in_view.q +++ b/ql/src/test/queries/clientpositive/struct_in_view.q @@ -13,13 +13,13 @@ drop view testreservedview; drop table testreserved; -create table s (default struct, id: string>, id: string>); +create table s_n1 (default struct, id: string>, id: string>); -create view vs1 as select default.src.`end`.key from s; +create view vs1 as select default.src.`end`.key from s_n1; describe extended vs1; -create view vs2 as select default.src.`end` from s; +create view vs2 as select default.src.`end` from s_n1; describe extended vs2; @@ -27,23 +27,23 @@ drop view vs1; drop view vs2; -create view v as select named_struct('key', 1).key from src limit 1; +create view v_n3 as select named_struct('key', 1).key from src limit 1; -desc extended v; +desc extended v_n3; -select * from v; +select * from v_n3; set hive.cbo.returnpath.hiveop=true; -select * from v; +select * from v_n3; set hive.cbo.returnpath.hiveop=false; -drop view v; +drop view v_n3; -create view v as select named_struct('end', 1).`end` from src limit 1; +create view v_n3 as select named_struct('end', 1).`end` from src limit 1; -desc extended v; +desc extended v_n3; -select * from v; +select * from v_n3; -drop view v; +drop view v_n3; diff --git a/ql/src/test/queries/clientpositive/structin.q b/ql/src/test/queries/clientpositive/structin.q index 35498bf1b7..962119574f 100644 --- a/ql/src/test/queries/clientpositive/structin.q +++ b/ql/src/test/queries/clientpositive/structin.q @@ -1,9 +1,9 @@ -create table t11 (`id` string, `lineid` string); +create table t11_n1 (`id` string, `lineid` string); set hive.cbo.enable=false; set hive.tez.dynamic.partition.pruning=false; set hive.vectorized.execution.enabled=true; -explain select * from t11 where struct(`id`, `lineid`) +explain select * from t11_n1 where struct(`id`, `lineid`) IN ( struct('1234-1111-0074578664','3'), struct('1234-1111-0074578695','1'), @@ -16,7 +16,7 @@ struct('1234-1111-0074019610','1'), struct('1234-1111-0074022106','1') ); -explain select * from t11 where struct(`id`, `lineid`) +explain select * from t11_n1 where struct(`id`, `lineid`) IN ( struct('1234-1111-0074578664','3'), struct('1234-1111-0074578695',1) diff --git a/ql/src/test/queries/clientpositive/subquery_exists.q b/ql/src/test/queries/clientpositive/subquery_exists.q index 40106f98cc..17d0a98426 100644 --- a/ql/src/test/queries/clientpositive/subquery_exists.q +++ b/ql/src/test/queries/clientpositive/subquery_exists.q @@ -25,7 +25,7 @@ where exists ; -- view test -create view cv1 as +create view cv1_n1 as select * from src b where exists @@ -34,7 +34,7 @@ where exists where b.value = a.value and a.key = b.key and a.value > 'val_9') ; -select * from cv1 +select * from cv1_n1 ; -- sq in from @@ -78,35 +78,35 @@ where exists ); -- uncorr, aggregate in sub which produces result irrespective of zero rows -create table t(i int); -insert into t values(1); -insert into t values(0); +create table t_n12(i int); +insert into t_n12 values(1); +insert into t_n12 values(0); -explain select * from t where exists (select count(*) from src where 1=2); -select * from t where exists (select count(*) from src where 1=2); +explain select * from t_n12 where exists (select count(*) from src where 1=2); +select * from t_n12 where exists (select count(*) from src where 1=2); -drop table t; +drop table t_n12; -drop table if exists tx1; -create table tx1 (a integer,b integer); -insert into tx1 values (1, 1), +drop table if exists tx1_n0; +create table tx1_n0 (a integer,b integer); +insert into tx1_n0 values (1, 1), (1, 2), (1, 3); -select count(*) as result,3 as expected from tx1 u - where exists (select * from tx1 v where u.a=v.a and u.b <> v.b); -explain select count(*) as result,3 as expected from tx1 u - where exists (select * from tx1 v where u.a=v.a and u.b <> v.b); +select count(*) as result,3 as expected from tx1_n0 u + where exists (select * from tx1_n0 v where u.a=v.a and u.b <> v.b); +explain select count(*) as result,3 as expected from tx1_n0 u + where exists (select * from tx1_n0 v where u.a=v.a and u.b <> v.b); -drop table tx1; +drop table tx1_n0; -create table t1(i int, j int); -insert into t1 values(4,1); +create table t1_n68(i int, j int); +insert into t1_n68 values(4,1); -create table t2(i int, j int); -insert into t2 values(4,2),(4,3),(4,5); +create table t2_n41(i int, j int); +insert into t2_n41 values(4,2),(4,3),(4,5); -explain select * from t1 where t1.i in (select t2.i from t2 where t2.j <> t1.j); -select * from t1 where t1.i in (select t2.i from t2 where t2.j <> t1.j); -drop table t1; -drop table t2; \ No newline at end of file +explain select * from t1_n68 where t1_n68.i in (select t2_n41.i from t2_n41 where t2_n41.j <> t1_n68.j); +select * from t1_n68 where t1_n68.i in (select t2_n41.i from t2_n41 where t2_n41.j <> t1_n68.j); +drop table t1_n68; +drop table t2_n41; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/subquery_exists_having.q b/ql/src/test/queries/clientpositive/subquery_exists_having.q index af0d71cf8b..f72fc09c13 100644 --- a/ql/src/test/queries/clientpositive/subquery_exists_having.q +++ b/ql/src/test/queries/clientpositive/subquery_exists_having.q @@ -49,7 +49,7 @@ having exists ; -- view test -create view cv1 as +create view cv1_n6 as select b.key, count(*) as c from src b group by b.key @@ -60,7 +60,7 @@ having exists ) ; -select * from cv1; +select * from cv1_n6; -- sq in from select * diff --git a/ql/src/test/queries/clientpositive/subquery_in.q b/ql/src/test/queries/clientpositive/subquery_in.q index 0fdcd0048e..62542a7b82 100644 --- a/ql/src/test/queries/clientpositive/subquery_in.q +++ b/ql/src/test/queries/clientpositive/subquery_in.q @@ -1,38 +1,38 @@ ---! qt:dataset:src ---! qt:dataset:part ---! qt:dataset:lineitem -set hive.mapred.mode=nonstrict; -set hive.explain.user=false; +--! qt_n22:dataset_n22:src +--! qt_n22:dataset_n22:part_n22 +--! qt_n22:dataset_n22:lineitem +set_n22 hive.mapred.mode=nonstrict_n22; +set_n22 hive.explain.user=false; -- SORT_QUERY_RESULTS -- non agg, non corr explain - select * + select_n22 * from src -where src.key in (select key from src s1 where s1.key > '9') +where src.key in (select_n22 key from src s1 where s1.key > '9') ; -select * +select_n22 * from src -where src.key in (select key from src s1 where s1.key > '9') +where src.key in (select_n22 key from src s1 where s1.key > '9') ; -- non agg, corr explain -select * +select_n22 * from src b where b.key in - (select a.key + (select_n22 a.key from src a where b.value = a.value and a.key > '9' ) ; -select * +select_n22 * from src b where b.key in - (select a.key + (select_n22 a.key from src a where b.value = a.value and a.key > '9' ) @@ -41,268 +41,268 @@ where b.key in -- agg, non corr explain -select p_name, p_size +select_n22 p_name, p_size from -part where part.p_size in - (select avg(p_size) - from (select p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a +part_n22 where part_n22.p_size in + (select_n22 avg(p_size) + from (select_n22 p_size, rank() over(partition by p_mfgr order by p_size) as r from part_n22) a where r <= 2 ) ; -select p_name, p_size +select_n22 p_name, p_size from -part where part.p_size in - (select avg(p_size) - from (select p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a +part_n22 where part_n22.p_size in + (select_n22 avg(p_size) + from (select_n22 p_size, rank() over(partition by p_mfgr order by p_size) as r from part_n22) a where r <= 2 ) ; -- agg, corr explain -select p_mfgr, p_name, p_size -from part b where b.p_size in - (select min(p_size) - from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a +select_n22 p_mfgr, p_name, p_size +from part_n22 b where b.p_size in + (select_n22 min(p_size) + from (select_n22 p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part_n22) a where r <= 2 and b.p_mfgr = a.p_mfgr ) ; -select p_mfgr, p_name, p_size -from part b where b.p_size in - (select min(p_size) - from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a +select_n22 p_mfgr, p_name, p_size +from part_n22 b where b.p_size in + (select_n22 min(p_size) + from (select_n22 p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part_n22) a where r <= 2 and b.p_mfgr = a.p_mfgr ) ; --- distinct, corr +-- distinct_n22, corr explain -select * +select_n22 * from src b where b.key in - (select distinct a.key + (select_n22 distinct_n22 a.key from src a where b.value = a.value and a.key > '9' ) ; -select * +select_n22 * from src b where b.key in - (select distinct a.key + (select_n22 distinct_n22 a.key from src a where b.value = a.value and a.key > '9' ) ; --- corr, non equi predicate, should not have a join with outer to generate +-- corr, non equi predicate, should not_n22 have a join with outer to generate -- corr values explain -select * +select_n22 * from src b where b.key in - (select distinct a.key + (select_n22 distinct_n22 a.key from src a where b.value <> a.key and a.key > '9' ) ; -select * +select_n22 * from src b where b.key in - (select distinct a.key + (select_n22 distinct_n22 a.key from src a where b.value <> a.key and a.key > '9' ) ; --- Right side shouldn't have aggregate -explain select * from src b where b.key in (select distinct key from src a where a.value > b.value); -select * from src b where b.key in (select distinct key from src a where a.value > b.value); +-- Right_n22 side shouldn't_n22 have aggregate +explain select_n22 * from src b where b.key in (select_n22 distinct_n22 key from src a where a.value > b.value); +select_n22 * from src b where b.key in (select_n22 distinct_n22 key from src a where a.value > b.value); -- non agg, non corr, windowing -select p_mfgr, p_name, p_size -from part -where part.p_size in - (select first_value(p_size) over(partition by p_mfgr order by p_size) from part) +select_n22 p_mfgr, p_name, p_size +from part_n22 +where part_n22.p_size in + (select_n22 first_value(p_size) over(partition by p_mfgr order by p_size) from part_n22) ; --- non agg, non corr, with join in Parent Query +-- non agg, non corr, with join in Parent_n22 Query explain -select p.p_partkey, li.l_suppkey -from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +select_n22 p.p_partkey, li.l_suppkey +from (select_n22 distinct_n22 l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and - li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR') + li.l_orderkey in (select_n22 l_orderkey from lineitem where l_shipmode = 'AIR') ; -select p.p_partkey, li.l_suppkey -from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +select_n22 p.p_partkey, li.l_suppkey +from (select_n22 distinct_n22 l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and - li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR') + li.l_orderkey in (select_n22 l_orderkey from lineitem where l_shipmode = 'AIR') ; --- non agg, corr, with join in Parent Query -select p.p_partkey, li.l_suppkey -from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +-- non agg, corr, with join in Parent_n22 Query +select_n22 p.p_partkey, li.l_suppkey +from (select_n22 distinct_n22 l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and - li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) + li.l_orderkey in (select_n22 l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) ; -- corr, agg in outer and inner -explain select sum(l_extendedprice) from lineitem, part where p_partkey = l_partkey and l_quantity IN (select avg(l_quantity) from lineitem where l_partkey = p_partkey); -select sum(l_extendedprice) from lineitem, part where p_partkey = l_partkey and l_quantity IN (select avg(l_quantity) from lineitem where l_partkey = p_partkey); +explain select_n22 sum(l_extendedprice) from lineitem, part_n22 where p_partkey = l_partkey and l_quantity IN (select_n22 avg(l_quantity) from lineitem where l_partkey = p_partkey); +select_n22 sum(l_extendedprice) from lineitem, part_n22 where p_partkey = l_partkey and l_quantity IN (select_n22 avg(l_quantity) from lineitem where l_partkey = p_partkey); --where has multiple conjuction -explain select * from part where p_brand <> 'Brand#14' AND p_size IN (select (p_size) from part p where p.p_type = part.p_type group by p_size) AND p_size <> 340; -select * from part where p_brand <> 'Brand#14' AND p_size IN (select (p_size) from part p where p.p_type = part.p_type group by p_size) AND p_size <> 340; +explain select_n22 * from part_n22 where p_brand <> 'Brand#14' AND p_size IN (select_n22 (p_size) from part_n22 p where p.p_type = part_n22.p_type group by p_size) AND p_size <> 340; +select_n22 * from part_n22 where p_brand <> 'Brand#14' AND p_size IN (select_n22 (p_size) from part_n22 p where p.p_type = part_n22.p_type group by p_size) AND p_size <> 340; --lhs contains non-simple expression -explain select * from part where (p_size-1) IN (select min(p_size) from part group by p_type); -select * from part where (p_size-1) IN (select min(p_size) from part group by p_type); +explain select_n22 * from part_n22 where (p_size-1) IN (select_n22 min(p_size) from part_n22 group by p_type); +select_n22 * from part_n22 where (p_size-1) IN (select_n22 min(p_size) from part_n22 group by p_type); -explain select * from part where (p_partkey*p_size) IN (select min(p_partkey) from part group by p_type); -select * from part where (p_partkey*p_size) IN (select min(p_partkey) from part group by p_type); +explain select_n22 * from part_n22 where (p_partkey*p_size) IN (select_n22 min(p_partkey) from part_n22 group by p_type); +select_n22 * from part_n22 where (p_partkey*p_size) IN (select_n22 min(p_partkey) from part_n22 group by p_type); --lhs contains non-simple expression, corr -explain select count(*) as c from part as e where p_size + 100 IN (select p_partkey from part where p_name = e.p_name); -select count(*) as c from part as e where p_size + 100 IN (select p_partkey from part where p_name = e.p_name); +explain select_n22 count_n22(*) as c from part_n22 as e where p_size + 100 IN (select_n22 p_partkey from part_n22 where p_name = e.p_name); +select_n22 count_n22(*) as c from part_n22 as e where p_size + 100 IN (select_n22 p_partkey from part_n22 where p_name = e.p_name); -- lhs contains udf expression -explain select * from part where floor(p_retailprice) IN (select floor(min(p_retailprice)) from part group by p_type); -select * from part where floor(p_retailprice) IN (select floor(min(p_retailprice)) from part group by p_type); +explain select_n22 * from part_n22 where floor(p_retailprice) IN (select_n22 floor(min(p_retailprice)) from part_n22 group by p_type); +select_n22 * from part_n22 where floor(p_retailprice) IN (select_n22 floor(min(p_retailprice)) from part_n22 group by p_type); -explain select * from part where p_name IN (select p_name from part p where p.p_size = part.p_size AND part.p_size + 121150 = p.p_partkey ); -select * from part where p_name IN (select p_name from part p where p.p_size = part.p_size AND part.p_size + 121150 = p.p_partkey ); +explain select_n22 * from part_n22 where p_name IN (select_n22 p_name from part_n22 p where p.p_size = part_n22.p_size AND part_n22.p_size + 121150 = p.p_partkey ); +select_n22 * from part_n22 where p_name IN (select_n22 p_name from part_n22 p where p.p_size = part_n22.p_size AND part_n22.p_size + 121150 = p.p_partkey ); --- correlated query, multiple correlated variables referring to different outer var -explain select * from part where p_name IN (select p_name from part p where p.p_size = part.p_size AND part.p_partkey= p.p_partkey ); -select * from part where p_name IN (select p_name from part p where p.p_size = part.p_size AND part.p_partkey= p.p_partkey ); +-- correlated query, multiple correlated variables referring to different_n22 outer var +explain select_n22 * from part_n22 where p_name IN (select_n22 p_name from part_n22 p where p.p_size = part_n22.p_size AND part_n22.p_partkey= p.p_partkey ); +select_n22 * from part_n22 where p_name IN (select_n22 p_name from part_n22 p where p.p_size = part_n22.p_size AND part_n22.p_partkey= p.p_partkey ); -- correlated var refers to outer table alias -explain select p_name from (select p_name, p_type, p_brand as brand from part) fpart where fpart.p_type IN (select p_type from part where part.p_brand = fpart.brand); -select p_name from (select p_name, p_type, p_brand as brand from part) fpart where fpart.p_type IN (select p_type from part where part.p_brand = fpart.brand); +explain select_n22 p_name from (select_n22 p_name, p_type, p_brand as brand from part_n22) fpart_n22 where fpart_n22.p_type IN (select_n22 p_type from part_n22 where part_n22.p_brand = fpart_n22.brand); +select_n22 p_name from (select_n22 p_name, p_type, p_brand as brand from part_n22) fpart_n22 where fpart_n22.p_type IN (select_n22 p_type from part_n22 where part_n22.p_brand = fpart_n22.brand); -- correlated var refers to outer table alias which is an expression -explain select p_name from (select p_name, p_type, p_size+1 as size from part) fpart where fpart.p_type IN (select p_type from part where (part.p_size+1) = fpart.size); -select p_name from (select p_name, p_type, p_size+1 as size from part) fpart where fpart.p_type IN (select p_type from part where (part.p_size+1) = fpart.size); +explain select_n22 p_name from (select_n22 p_name, p_type, p_size+1 as size from part_n22) fpart_n22 where fpart_n22.p_type IN (select_n22 p_type from part_n22 where (part_n22.p_size+1) = fpart_n22.size); +select_n22 p_name from (select_n22 p_name, p_type, p_size+1 as size from part_n22) fpart_n22 where fpart_n22.p_type IN (select_n22 p_type from part_n22 where (part_n22.p_size+1) = fpart_n22.size); -- where plus having -explain select key, count(*) from src where value IN (select value from src) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key ); -select key, count(*) from src where value IN (select value from src) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key ); +explain select_n22 key, count_n22(*) from src where value IN (select_n22 value from src) group by key having count_n22(*) in (select_n22 count_n22(*) from src s1 where s1.key = '90' group by s1.key ); +select_n22 key, count_n22(*) from src where value IN (select_n22 value from src) group by key having count_n22(*) in (select_n22 count_n22(*) from src s1 where s1.key = '90' group by s1.key ); -- where with having, correlated -explain select key, count(*) from src where value IN (select value from src sc where sc.key = src.key ) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key ); -select key, count(*) from src where value IN (select value from src sc where sc.key = src.key ) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key ); +explain select_n22 key, count_n22(*) from src where value IN (select_n22 value from src sc where sc.key = src.key ) group by key having count_n22(*) in (select_n22 count_n22(*) from src s1 where s1.key = '90' group by s1.key ); +select_n22 key, count_n22(*) from src where value IN (select_n22 value from src sc where sc.key = src.key ) group by key having count_n22(*) in (select_n22 count_n22(*) from src s1 where s1.key = '90' group by s1.key ); -- subquery with order by -explain select * from part where (p_size-1) IN (select min(p_size) from part group by p_type) order by p_brand; -select * from part where (p_size-1) IN (select min(p_size) from part group by p_type) order by p_brand; +explain select_n22 * from part_n22 where (p_size-1) IN (select_n22 min(p_size) from part_n22 group by p_type) order by p_brand; +select_n22 * from part_n22 where (p_size-1) IN (select_n22 min(p_size) from part_n22 group by p_type) order by p_brand; ---order by with limit -explain select * from part where (p_size-1) IN (select min(p_size) from part group by p_type) order by p_brand limit 4; -select * from part where (p_size-1) IN (select min(p_size) from part group by p_type) order by p_brand limit 4; +--order by with limit_n22 +explain select_n22 * from part_n22 where (p_size-1) IN (select_n22 min(p_size) from part_n22 group by p_type) order by p_brand limit_n22 4; +select_n22 * from part_n22 where (p_size-1) IN (select_n22 min(p_size) from part_n22 group by p_type) order by p_brand limit_n22 4; -- union, uncorr -explain select * from src where key IN (select p_name from part UNION ALL select p_brand from part); -select * from src where key IN (select p_name from part UNION ALL select p_brand from part); +explain select_n22 * from src where key IN (select_n22 p_name from part_n22 UNION ALL select_n22 p_brand from part_n22); +select_n22 * from src where key IN (select_n22 p_name from part_n22 UNION ALL select_n22 p_brand from part_n22); -- corr, subquery has another subquery in from -explain select p_mfgr, b.p_name, p_size from part b where b.p_name in - (select p_name from (select p_mfgr, p_name, p_size as r from part) a where r < 10 and b.p_mfgr = a.p_mfgr ) order by p_mfgr,p_size; -select p_mfgr, b.p_name, p_size from part b where b.p_name in - (select p_name from (select p_mfgr, p_name, p_size as r from part) a where r < 10 and b.p_mfgr = a.p_mfgr ) order by p_mfgr,p_size; +explain select_n22 p_mfgr, b.p_name, p_size from part_n22 b where b.p_name in + (select_n22 p_name from (select_n22 p_mfgr, p_name, p_size as r from part_n22) a where r < 10 and b.p_mfgr = a.p_mfgr ) order by p_mfgr,p_size; +select_n22 p_mfgr, b.p_name, p_size from part_n22 b where b.p_name in + (select_n22 p_name from (select_n22 p_mfgr, p_name, p_size as r from part_n22) a where r < 10 and b.p_mfgr = a.p_mfgr ) order by p_mfgr,p_size; -- join in subquery, correlated predicate with only one table -explain select p_partkey from part where p_name in (select p.p_name from part p left outer join part pp on p.p_type = pp.p_type where pp.p_size = part.p_size); -select p_partkey from part where p_name in (select p.p_name from part p left outer join part pp on p.p_type = pp.p_type where pp.p_size = part.p_size); +explain select_n22 p_partkey from part_n22 where p_name in (select_n22 p.p_name from part_n22 p left_n22 outer join part_n22 pp on p.p_type = pp.p_type where pp.p_size = part_n22.p_size); +select_n22 p_partkey from part_n22 where p_name in (select_n22 p.p_name from part_n22 p left_n22 outer join part_n22 pp on p.p_type = pp.p_type where pp.p_size = part_n22.p_size); -- join in subquery, correlated predicate with both inner tables, same outer var -explain select p_partkey from part where p_name in - (select p.p_name from part p left outer join part pp on p.p_type = pp.p_type where pp.p_size = part.p_size and p.p_size=part.p_size); -select p_partkey from part where p_name in - (select p.p_name from part p left outer join part pp on p.p_type = pp.p_type where pp.p_size = part.p_size and p.p_size=part.p_size); +explain select_n22 p_partkey from part_n22 where p_name in + (select_n22 p.p_name from part_n22 p left_n22 outer join part_n22 pp on p.p_type = pp.p_type where pp.p_size = part_n22.p_size and p.p_size=part_n22.p_size); +select_n22 p_partkey from part_n22 where p_name in + (select_n22 p.p_name from part_n22 p left_n22 outer join part_n22 pp on p.p_type = pp.p_type where pp.p_size = part_n22.p_size and p.p_size=part_n22.p_size); --- join in subquery, correlated predicate with both inner tables, different outer var -explain select p_partkey from part where p_name in - (select p.p_name from part p left outer join part pp on p.p_type = pp.p_type where pp.p_size = part.p_size and p.p_type=part.p_type); +-- join in subquery, correlated predicate with both inner tables, different_n22 outer var +explain select_n22 p_partkey from part_n22 where p_name in + (select_n22 p.p_name from part_n22 p left_n22 outer join part_n22 pp on p.p_type = pp.p_type where pp.p_size = part_n22.p_size and p.p_type=part_n22.p_type); -- subquery within from -explain select p_partkey from - (select p_size, p_partkey from part where p_name in (select p.p_name from part p left outer join part pp on p.p_type = pp.p_type where pp.p_size = part.p_size)) subq; -select p_partkey from - (select p_size, p_partkey from part where p_name in (select p.p_name from part p left outer join part pp on p.p_type = pp.p_type where pp.p_size = part.p_size)) subq; +explain select_n22 p_partkey from + (select_n22 p_size, p_partkey from part_n22 where p_name in (select_n22 p.p_name from part_n22 p left_n22 outer join part_n22 pp on p.p_type = pp.p_type where pp.p_size = part_n22.p_size)) subq; +select_n22 p_partkey from + (select_n22 p_size, p_partkey from part_n22 where p_name in (select_n22 p.p_name from part_n22 p left_n22 outer join part_n22 pp on p.p_type = pp.p_type where pp.p_size = part_n22.p_size)) subq; -- corr IN with COUNT aggregate -explain select * from part where p_size IN (select count(*) from part pp where pp.p_type = part.p_type); -select * from part where p_size IN (select count(*) from part pp where pp.p_type = part.p_type); +explain select_n22 * from part_n22 where p_size IN (select_n22 count_n22(*) from part_n22 pp where pp.p_type = part_n22.p_type); +select_n22 * from part_n22 where p_size IN (select_n22 count_n22(*) from part_n22 pp where pp.p_type = part_n22.p_type); -- corr IN with aggregate other than COUNT -explain select * from part where p_size in (select avg(pp.p_size) from part pp where pp.p_partkey = part.p_partkey); -select * from part where p_size in (select avg(pp.p_size) from part pp where pp.p_partkey = part.p_partkey); +explain select_n22 * from part_n22 where p_size in (select_n22 avg(pp.p_size) from part_n22 pp where pp.p_partkey = part_n22.p_partkey); +select_n22 * from part_n22 where p_size in (select_n22 avg(pp.p_size) from part_n22 pp where pp.p_partkey = part_n22.p_partkey); -- corr IN with aggregate other than COUNT (MIN) with non-equi join -explain select * from part where p_size in (select min(pp.p_size) from part pp where pp.p_partkey > part.p_partkey); -select * from part where p_size in (select min(pp.p_size) from part pp where pp.p_partkey > part.p_partkey); +explain select_n22 * from part_n22 where p_size in (select_n22 min(pp.p_size) from part_n22 pp where pp.p_partkey > part_n22.p_partkey); +select_n22 * from part_n22 where p_size in (select_n22 min(pp.p_size) from part_n22 pp where pp.p_partkey > part_n22.p_partkey); -- corr IN with COUNT aggregate -explain select * from part where p_size NOT IN (select count(*) from part pp where pp.p_type = part.p_type); -select * from part where p_size NOT IN (select count(*) from part pp where pp.p_type = part.p_type); +explain select_n22 * from part_n22 where p_size NOT IN (select_n22 count_n22(*) from part_n22 pp where pp.p_type = part_n22.p_type); +select_n22 * from part_n22 where p_size NOT IN (select_n22 count_n22(*) from part_n22 pp where pp.p_type = part_n22.p_type); -- corr IN with aggregate other than COUNT -explain select * from part where p_size not in (select avg(pp.p_size) from part pp where pp.p_partkey = part.p_partkey); -select * from part where p_size not in (select avg(pp.p_size) from part pp where pp.p_partkey = part.p_partkey); +explain select_n22 * from part_n22 where p_size not_n22 in (select_n22 avg(pp.p_size) from part_n22 pp where pp.p_partkey = part_n22.p_partkey); +select_n22 * from part_n22 where p_size not_n22 in (select_n22 avg(pp.p_size) from part_n22 pp where pp.p_partkey = part_n22.p_partkey); -create table t(i int); -insert into t values(1); -insert into t values(0); +create table t_n22(i int_n22); +insert_n22 into t_n22 values(1); +insert_n22 into t_n22 values(0); -create table tempty(i int); +create table tempty_n2(i int_n22); --- uncorr sub with aggregate which produces result irrespective of zero rows -explain select * from t where i IN (select count(*) from tempty); -select * from t where i IN (select count(*) from tempty); +-- uncorr sub with aggregate which produces result_n22 irrespective of zero rows +explain select_n22 * from t_n22 where i IN (select_n22 count_n22(*) from tempty_n2); +select_n22 * from t_n22 where i IN (select_n22 count_n22(*) from tempty_n2); -drop table t; +drop table t_n22; -create table tnull(i int); -insert into tnull values(NULL) , (NULL); +create table tnull_n2(i int_n22); +insert_n22 into tnull_n2 values(NULL) , (NULL); --- empty inner table, non-null sq key, expected empty result -select * from part where p_size IN (select i from tempty); +-- empty inner table, non-null sq key, expected empty result_n22 +select_n22 * from part_n22 where p_size IN (select_n22 i from tempty_n2); --- empty inner table, null sq key, expected empty result -select * from tnull where i IN (select i from tempty); +-- empty inner table, null sq key, expected empty result_n22 +select_n22 * from tnull_n2 where i IN (select_n22 i from tempty_n2); -- null inner table, non-null sq key -select * from part where p_size IN (select i from tnull); +select_n22 * from part_n22 where p_size IN (select_n22 i from tnull_n2); -- null inner table, null sq key -select * from tnull where i IN (select i from tnull); +select_n22 * from tnull_n2 where i IN (select_n22 i from tnull_n2); -drop table tempty; +drop table tempty_n2; -create table t(i int, j int); -insert into t values(0,1), (0,2); +create table t_n22(i int_n22, j int_n22); +insert_n22 into t_n22 values(0,1), (0,2); -create table tt(i int, j int); -insert into tt values(0,3); +create table tt_n2(i int_n22, j int_n22); +insert_n22 into tt_n2 values(0,3); -- corr IN with aggregate other than COUNT return zero rows -explain select * from t where i IN (select sum(i) from tt where tt.j = t.j); -select * from t where i IN (select sum(i) from tt where tt.j = t.j); +explain select_n22 * from t_n22 where i IN (select_n22 sum(i) from tt_n2 where tt_n2.j = t_n22.j); +select_n22 * from t_n22 where i IN (select_n22 sum(i) from tt_n2 where tt_n2.j = t_n22.j); -drop table t; -drop table tt; +drop table t_n22; +drop table tt_n2; --- since inner query has aggregate it will be joined with outer to get all possible corrrelated values -explain select * from part where p_size IN (select max(p_size) from part p where p.p_type <> part.p_name); -select * from part where p_size IN (select max(p_size) from part p where p.p_type <> part.p_name); +-- since inner query has aggregate it_n22 will be joined with outer to get_n22 all possible corrrelated values +explain select_n22 * from part_n22 where p_size IN (select_n22 max(p_size) from part_n22 p where p.p_type <> part_n22.p_name); +select_n22 * from part_n22 where p_size IN (select_n22 max(p_size) from part_n22 p where p.p_type <> part_n22.p_name); -- inner query has join so should have a join with outer query to fetch all corr values -explain select * from part where p_size IN (select pp.p_size from part p join part pp on pp.p_type = p.p_type where part.p_type <> p.p_name); -select * from part where p_size IN (select pp.p_size from part p join part pp on pp.p_type = p.p_type where part.p_type <> p.p_name); +explain select_n22 * from part_n22 where p_size IN (select_n22 pp.p_size from part_n22 p join part_n22 pp on pp.p_type = p.p_type where part_n22.p_type <> p.p_name); +select_n22 * from part_n22 where p_size IN (select_n22 pp.p_size from part_n22 p join part_n22 pp on pp.p_type = p.p_type where part_n22.p_type <> p.p_name); diff --git a/ql/src/test/queries/clientpositive/subquery_in_having.q b/ql/src/test/queries/clientpositive/subquery_in_having.q index 67bb6a95ed..ec6981b70c 100644 --- a/ql/src/test/queries/clientpositive/subquery_in_having.q +++ b/ql/src/test/queries/clientpositive/subquery_in_having.q @@ -136,22 +136,22 @@ having p_name in (select first_value(p_name) over(partition by p_mfgr order by p_size) from part_subq) ; -CREATE TABLE src_null (key STRING COMMENT 'default', value STRING COMMENT 'default') STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH "../../data/files/kv1.txt" INTO TABLE src_null; -INSERT INTO src_null values('5444', null); +CREATE TABLE src_null_n4 (key STRING COMMENT 'default', value STRING COMMENT 'default') STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH "../../data/files/kv1.txt" INTO TABLE src_null_n4; +INSERT INTO src_null_n4 values('5444', null); explain select key, value, count(*) -from src_null b -where NOT EXISTS (select key from src_null where src_null.value <> b.value) +from src_null_n4 b +where NOT EXISTS (select key from src_null_n4 where src_null_n4.value <> b.value) group by key, value -having count(*) not in (select count(*) from src_null s1 where s1.key > '9' and s1.value <> b.value group by s1.key ); +having count(*) not in (select count(*) from src_null_n4 s1 where s1.key > '9' and s1.value <> b.value group by s1.key ); select key, value, count(*) -from src_null b -where NOT EXISTS (select key from src_null where src_null.value <> b.value) +from src_null_n4 b +where NOT EXISTS (select key from src_null_n4 where src_null_n4.value <> b.value) group by key, value -having count(*) not in (select count(*) from src_null s1 where s1.key > '9' and s1.value <> b.value group by s1.key ); +having count(*) not in (select count(*) from src_null_n4 s1 where s1.key > '9' and s1.value <> b.value group by s1.key ); -DROP TABLE src_null; +DROP TABLE src_null_n4; DROP TABLE part_subq; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/subquery_notin.q b/ql/src/test/queries/clientpositive/subquery_notin.q index 6fcee21727..82508361fa 100644 --- a/ql/src/test/queries/clientpositive/subquery_notin.q +++ b/ql/src/test/queries/clientpositive/subquery_notin.q @@ -1,40 +1,40 @@ ---! qt:dataset:src ---! qt:dataset:part ---! qt:dataset:lineitem -set hive.mapred.mode=nonstrict; +--! qt_n0:dataset_n0:src +--! qt_n0:dataset_n0:part_n0 +--! qt_n0:dataset_n0:lineitem +set_n0 hive.mapred.mode=nonstrict_n0; -- non agg, non corr explain -select * +select_n0 * from src -where src.key not in - ( select key from src s1 +where src.key not_n0 in + ( select_n0 key from src s1 where s1.key > '2' ) ; -select * +select_n0 * from src -where src.key not in ( select key from src s1 where s1.key > '2') +where src.key not_n0 in ( select_n0 key from src s1 where s1.key > '2') order by key ; -- non agg, corr explain -select p_mfgr, b.p_name, p_size -from part b -where b.p_name not in - (select p_name - from (select p_mfgr, p_name, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a +select_n0 p_mfgr, b.p_name, p_size +from part_n0 b +where b.p_name not_n0 in + (select_n0 p_name + from (select_n0 p_mfgr, p_name, p_size, rank() over(partition by p_mfgr order by p_size) as r from part_n0) a where r <= 2 and b.p_mfgr = a.p_mfgr ) ; -select p_mfgr, b.p_name, p_size -from part b -where b.p_name not in - (select p_name - from (select p_mfgr, p_name, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a +select_n0 p_mfgr, b.p_name, p_size +from part_n0 b +where b.p_name not_n0 in + (select_n0 p_name + from (select_n0 p_mfgr, p_name, p_size, rank() over(partition by p_mfgr order by p_size) as r from part_n0) a where r <= 2 and b.p_mfgr = a.p_mfgr ) order by p_mfgr, b.p_name @@ -42,19 +42,19 @@ order by p_mfgr, b.p_name -- agg, non corr explain -select p_name, p_size +select_n0 p_name, p_size from -part where part.p_size not in - (select avg(p_size) - from (select p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a +part_n0 where part_n0.p_size not_n0 in + (select_n0 avg(p_size) + from (select_n0 p_size, rank() over(partition by p_mfgr order by p_size) as r from part_n0) a where r <= 2 ) ; -select p_name, p_size +select_n0 p_name, p_size from -part where part.p_size not in - (select avg(p_size) - from (select p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a +part_n0 where part_n0.p_size not_n0 in + (select_n0 avg(p_size) + from (select_n0 p_size, rank() over(partition by p_mfgr order by p_size) as r from part_n0) a where r <= 2 ) order by p_name, p_size @@ -62,187 +62,187 @@ order by p_name, p_size -- agg, corr explain -select p_mfgr, p_name, p_size -from part b where b.p_size not in - (select min(p_size) - from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a +select_n0 p_mfgr, p_name, p_size +from part_n0 b where b.p_size not_n0 in + (select_n0 min(p_size) + from (select_n0 p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part_n0) a where r <= 2 and b.p_mfgr = a.p_mfgr ) ; -select p_mfgr, p_name, p_size -from part b where b.p_size not in - (select min(p_size) - from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a +select_n0 p_mfgr, p_name, p_size +from part_n0 b where b.p_size not_n0 in + (select_n0 min(p_size) + from (select_n0 p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part_n0) a where r <= 2 and b.p_mfgr = a.p_mfgr ) ; --- non agg, non corr, Group By in Parent Query -select li.l_partkey, count(*) +-- non agg, non corr, Group By in Parent_n0 Query +select_n0 li.l_partkey, count_n0(*) from lineitem li where li.l_linenumber = 1 and - li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') + li.l_orderkey not_n0 in (select_n0 l_orderkey from lineitem where l_shipmode = 'AIR') group by li.l_partkey ; --- alternate not in syntax -select * +-- alternate not_n0 in syntax +select_n0 * from src -where not src.key in ( select key from src s1 where s1.key > '2') +where not_n0 src.key in ( select_n0 key from src s1 where s1.key > '2') order by key ; -- null check create view T1_v as -select key from src where key <'11'; +select_n0 key from src where key <'11'; create view T2_v as -select case when key > '104' then null else key end as key from T1_v; +select_n0 case when key > '104' then null else key end as key from T1_v; explain -select * -from T1_v where T1_v.key not in (select T2_v.key from T2_v); +select_n0 * +from T1_v where T1_v.key not_n0 in (select_n0 T2_v.key from T2_v); -select * -from T1_v where T1_v.key not in (select T2_v.key from T2_v); +select_n0 * +from T1_v where T1_v.key not_n0 in (select_n0 T2_v.key from T2_v); --where has multiple conjuction -explain select * from part where p_brand <> 'Brand#14' AND p_size NOT IN (select (p_size*p_size) from part p where p.p_type = part.p_type ) AND p_size <> 340; -select * from part where p_brand <> 'Brand#14' AND p_size NOT IN (select (p_size*p_size) from part p where p.p_type = part.p_type ) AND p_size <> 340; +explain select_n0 * from part_n0 where p_brand <> 'Brand#14' AND p_size NOT IN (select_n0 (p_size*p_size) from part_n0 p where p.p_type = part_n0.p_type ) AND p_size <> 340; +select_n0 * from part_n0 where p_brand <> 'Brand#14' AND p_size NOT IN (select_n0 (p_size*p_size) from part_n0 p where p.p_type = part_n0.p_type ) AND p_size <> 340; --lhs contains non-simple expression -explain select * from part where (p_size-1) NOT IN (select min(p_size) from part group by p_type) order by p_partkey; -select * from part where (p_size-1) NOT IN (select min(p_size) from part group by p_type) order by p_partkey; +explain select_n0 * from part_n0 where (p_size-1) NOT IN (select_n0 min(p_size) from part_n0 group by p_type) order by p_partkey; +select_n0 * from part_n0 where (p_size-1) NOT IN (select_n0 min(p_size) from part_n0 group by p_type) order by p_partkey; -explain select * from part where (p_partkey*p_size) NOT IN (select min(p_partkey) from part group by p_type); -select * from part where (p_partkey*p_size) NOT IN (select min(p_partkey) from part group by p_type); +explain select_n0 * from part_n0 where (p_partkey*p_size) NOT IN (select_n0 min(p_partkey) from part_n0 group by p_type); +select_n0 * from part_n0 where (p_partkey*p_size) NOT IN (select_n0 min(p_partkey) from part_n0 group by p_type); --lhs contains non-simple expression, corr -explain select count(*) as c from part as e where p_size + 100 NOT IN (select p_partkey from part where p_name = e.p_name); -select count(*) as c from part as e where p_size + 100 NOT IN (select p_partkey from part where p_name = e.p_name); +explain select_n0 count_n0(*) as c from part_n0 as e where p_size + 100 NOT IN (select_n0 p_partkey from part_n0 where p_name = e.p_name); +select_n0 count_n0(*) as c from part_n0 as e where p_size + 100 NOT IN (select_n0 p_partkey from part_n0 where p_name = e.p_name); -- lhs contains udf expression -explain select * from part where floor(p_retailprice) NOT IN (select floor(min(p_retailprice)) from part group by p_type); -select * from part where floor(p_retailprice) NOT IN (select floor(min(p_retailprice)) from part group by p_type); +explain select_n0 * from part_n0 where floor(p_retailprice) NOT IN (select_n0 floor(min(p_retailprice)) from part_n0 group by p_type); +select_n0 * from part_n0 where floor(p_retailprice) NOT IN (select_n0 floor(min(p_retailprice)) from part_n0 group by p_type); -explain select * from part where p_name NOT IN (select p_name from part p where p.p_size = part.p_size AND part.p_size + 121150 = p.p_partkey ); -select * from part where p_name NOT IN (select p_name from part p where p.p_size = part.p_size AND part.p_size + 121150 = p.p_partkey ); +explain select_n0 * from part_n0 where p_name NOT IN (select_n0 p_name from part_n0 p where p.p_size = part_n0.p_size AND part_n0.p_size + 121150 = p.p_partkey ); +select_n0 * from part_n0 where p_name NOT IN (select_n0 p_name from part_n0 p where p.p_size = part_n0.p_size AND part_n0.p_size + 121150 = p.p_partkey ); --- correlated query, multiple correlated variables referring to different outer var -explain select * from part where p_name NOT IN (select p_name from part p where p.p_size = part.p_size AND part.p_partkey= p.p_partkey ); -select * from part where p_name NOT IN (select p_name from part p where p.p_size = part.p_size AND part.p_partkey= p.p_partkey ); +-- correlated query, multiple correlated variables referring to different_n0 outer var +explain select_n0 * from part_n0 where p_name NOT IN (select_n0 p_name from part_n0 p where p.p_size = part_n0.p_size AND part_n0.p_partkey= p.p_partkey ); +select_n0 * from part_n0 where p_name NOT IN (select_n0 p_name from part_n0 p where p.p_size = part_n0.p_size AND part_n0.p_partkey= p.p_partkey ); -- correlated var refers to outer table alias -explain select p_name from (select p_name, p_type, p_brand as brand from part) fpart where fpart.p_type NOT IN (select p_type+2 from part where part.p_brand = fpart.brand); -select p_name from (select p_name, p_type, p_brand as brand from part) fpart where fpart.p_type NOT IN (select p_type+2 from part where part.p_brand = fpart.brand); +explain select_n0 p_name from (select_n0 p_name, p_type, p_brand as brand from part_n0) fpart_n0 where fpart_n0.p_type NOT IN (select_n0 p_type+2 from part_n0 where part_n0.p_brand = fpart_n0.brand); +select_n0 p_name from (select_n0 p_name, p_type, p_brand as brand from part_n0) fpart_n0 where fpart_n0.p_type NOT IN (select_n0 p_type+2 from part_n0 where part_n0.p_brand = fpart_n0.brand); -- correlated var refers to outer table alias which is an expression -explain select p_name from (select p_name, p_type, p_size+1 as size from part) fpart where fpart.p_type NOT IN (select p_type from part where (part.p_size+1) = fpart.size); -select p_name from (select p_name, p_type, p_size+1 as size from part) fpart where fpart.p_type NOT IN (select p_type from part where (part.p_size+1) = fpart.size+1); +explain select_n0 p_name from (select_n0 p_name, p_type, p_size+1 as size from part_n0) fpart_n0 where fpart_n0.p_type NOT IN (select_n0 p_type from part_n0 where (part_n0.p_size+1) = fpart_n0.size); +select_n0 p_name from (select_n0 p_name, p_type, p_size+1 as size from part_n0) fpart_n0 where fpart_n0.p_type NOT IN (select_n0 p_type from part_n0 where (part_n0.p_size+1) = fpart_n0.size+1); -- where plus having -explain select key, count(*) from src where value NOT IN (select key from src) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key ); -select key, count(*) from src where value NOT IN (select key from src) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key ); +explain select_n0 key, count_n0(*) from src where value NOT IN (select_n0 key from src) group by key having count_n0(*) in (select_n0 count_n0(*) from src s1 where s1.key = '90' group by s1.key ); +select_n0 key, count_n0(*) from src where value NOT IN (select_n0 key from src) group by key having count_n0(*) in (select_n0 count_n0(*) from src s1 where s1.key = '90' group by s1.key ); -- where with having, correlated -explain select key, count(*) from src where value NOT IN (select concat('v', value) from src sc where sc.key = src.key ) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key ); -select key, count(*) from src where value NOT IN (select concat('v', value) from src sc where sc.key = src.key ) group by key having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key ); +explain select_n0 key, count_n0(*) from src where value NOT IN (select_n0 concat_n0('v', value) from src sc where sc.key = src.key ) group by key having count_n0(*) in (select_n0 count_n0(*) from src s1 where s1.key = '90' group by s1.key ); +select_n0 key, count_n0(*) from src where value NOT IN (select_n0 concat_n0('v', value) from src sc where sc.key = src.key ) group by key having count_n0(*) in (select_n0 count_n0(*) from src s1 where s1.key = '90' group by s1.key ); -- subquery with order by -explain select * from part where (p_size-1) NOT IN (select min(p_size) from part group by p_type) order by p_brand; -select * from part where (p_size-1) NOT IN (select min(p_size) from part group by p_type) order by p_brand; +explain select_n0 * from part_n0 where (p_size-1) NOT IN (select_n0 min(p_size) from part_n0 group by p_type) order by p_brand; +select_n0 * from part_n0 where (p_size-1) NOT IN (select_n0 min(p_size) from part_n0 group by p_type) order by p_brand; ---order by with limit -explain select * from part where (p_size-1) NOT IN (select min(p_size) from part group by p_type) order by p_brand, p_partkey limit 4; -select * from part where (p_size-1) NOT IN (select min(p_size) from part group by p_type) order by p_brand, p_partkey limit 4; +--order by with limit_n0 +explain select_n0 * from part_n0 where (p_size-1) NOT IN (select_n0 min(p_size) from part_n0 group by p_type) order by p_brand, p_partkey limit_n0 4; +select_n0 * from part_n0 where (p_size-1) NOT IN (select_n0 min(p_size) from part_n0 group by p_type) order by p_brand, p_partkey limit_n0 4; -- union, uncorr -explain select * from src where key NOT IN (select p_name from part UNION ALL select p_brand from part); -select * from src where key NOT IN (select p_name from part UNION ALL select p_brand from part); +explain select_n0 * from src where key NOT IN (select_n0 p_name from part_n0 UNION ALL select_n0 p_brand from part_n0); +select_n0 * from src where key NOT IN (select_n0 p_name from part_n0 UNION ALL select_n0 p_brand from part_n0); -explain select count(*) as c from part as e where p_size + 100 not in ( select p_type from part where p_brand = e.p_brand); -select count(*) as c from part as e where p_size + 100 not in ( select p_type from part where p_brand = e.p_brand); +explain select_n0 count_n0(*) as c from part_n0 as e where p_size + 100 not_n0 in ( select_n0 p_type from part_n0 where p_brand = e.p_brand); +select_n0 count_n0(*) as c from part_n0 as e where p_size + 100 not_n0 in ( select_n0 p_type from part_n0 where p_brand = e.p_brand); --nullability tests -CREATE TABLE t1 (c1 INT, c2 CHAR(100)); -INSERT INTO t1 VALUES (null,null), (1,''), (2,'abcde'), (100,'abcdefghij'); +CREATE TABLE t1_n0 (c1 INT, c2 CHAR(100)); +INSERT INTO t1_n0 VALUES (null,null), (1,''), (2,'abcde'), (100,'abcdefghij'); -CREATE TABLE t2 (c1 INT); -INSERT INTO t2 VALUES (null), (2), (100); +CREATE TABLE t2_n0 (c1 INT); +INSERT INTO t2_n0 VALUES (null), (2), (100); -- uncorr -explain SELECT c1 FROM t1 WHERE c1 NOT IN (SELECT c1 FROM t2); -SELECT c1 FROM t1 WHERE c1 NOT IN (SELECT c1 FROM t2); +explain SELECT c1 FROM t1_n0 WHERE c1 NOT IN (SELECT c1 FROM t2_n0); +SELECT c1 FROM t1_n0 WHERE c1 NOT IN (SELECT c1 FROM t2_n0); -- corr -explain SELECT c1 FROM t1 WHERE c1 NOT IN (SELECT c1 FROM t2 where t1.c2=t2.c1); -SELECT c1 FROM t1 WHERE c1 NOT IN (SELECT c1 FROM t2 where t1.c1=t2.c1); +explain SELECT c1 FROM t1_n0 WHERE c1 NOT IN (SELECT c1 FROM t2_n0 where t1_n0.c2=t2_n0.c1); +SELECT c1 FROM t1_n0 WHERE c1 NOT IN (SELECT c1 FROM t2_n0 where t1_n0.c1=t2_n0.c1); -DROP TABLE t1; -DROP TABLE t2; +DROP TABLE t1_n0; +DROP TABLE t2_n0; --- corr, nullability, should not produce any result -create table t1(a int, b int); -insert into t1 values(1,0), (1,0),(1,0); +-- corr, nullability, should not_n0 produce any result_n0 +create table t1_n0(a int_n0, b int_n0); +insert_n0 into t1_n0 values(1,0), (1,0),(1,0); -create table t2(a int, b int); -insert into t2 values(2,1), (3,1), (NULL,1); +create table t2_n0(a int_n0, b int_n0); +insert_n0 into t2_n0 values(2,1), (3,1), (NULL,1); -explain select t1.a from t1 where t1.b NOT IN (select t2.a from t2 where t2.b=t1.a); -select t1.a from t1 where t1.b NOT IN (select t2.a from t2 where t2.b=t1.a); -drop table t1; -drop table t2; +explain select_n0 t1_n0.a from t1_n0 where t1_n0.b NOT IN (select_n0 t2_n0.a from t2_n0 where t2_n0.b=t1_n0.a); +select_n0 t1_n0.a from t1_n0 where t1_n0.b NOT IN (select_n0 t2_n0.a from t2_n0 where t2_n0.b=t1_n0.a); +drop table t1_n0; +drop table t2_n0; --- coor, nullability, should produce result -create table t7(i int, j int); -insert into t7 values(null, 5), (4, 15); +-- coor, nullability, should produce result_n0 +create table t7(i int_n0, j int_n0); +insert_n0 into t7 values(null, 5), (4, 15); -create table fixOb(i int, j int); -insert into fixOb values(-1, 5), (-1, 15); +create table fixOb(i int_n0, j int_n0); +insert_n0 into fixOb values(-1, 5), (-1, 15); -explain select * from fixOb where j NOT IN (select i from t7 where t7.j=fixOb.j); -select * from fixOb where j NOT IN (select i from t7 where t7.j=fixOb.j); +explain select_n0 * from fixOb where j NOT IN (select_n0 i from t7 where t7.j=fixOb.j); +select_n0 * from fixOb where j NOT IN (select_n0 i from t7 where t7.j=fixOb.j); drop table t7; drop table fixOb; -create table t(i int, j int); -insert into t values(1,2), (4,5), (7, NULL); +create table t_n0(i int_n0, j int_n0); +insert_n0 into t_n0 values(1,2), (4,5), (7, NULL); --- case with empty inner result (t1.j=t.j=NULL) and null subquery key(t.j = NULL) -explain select t.i from t where t.j NOT IN (select t1.i from t t1 where t1.j=t.j); -select t.i from t where t.j NOT IN (select t1.i from t t1 where t1.j=t.j); +-- case with empty inner result_n0 (t1_n0.j=t_n0.j=NULL) and null subquery key(t_n0.j = NULL) +explain select_n0 t.i from t_n0 where t_n0.j NOT IN (select_n0 t1_n0.i from t_n0 t1_n0 where t1_n0.j=t_n0.j); +select_n0 t.i from t_n0 where t_n0.j NOT IN (select_n0 t1_n0.i from t_n0 t1_n0 where t1_n0.j=t_n0.j); --- case with empty inner result (t1.j=t.j=NULL) and non-null subquery key(t.i is never null) -explain select t.i from t where t.i NOT IN (select t1.i from t t1 where t1.j=t.j); -select t.i from t where t.i NOT IN (select t1.i from t t1 where t1.j=t.j); +-- case with empty inner result_n0 (t1_n0.j=t_n0.j=NULL) and non-null subquery key(t_n0.i is never null) +explain select_n0 t.i from t_n0 where t_n0.i NOT IN (select_n0 t1_n0.i from t_n0 t1_n0 where t1_n0.j=t_n0.j); +select_n0 t.i from t_n0 where t_n0.i NOT IN (select_n0 t1_n0.i from t_n0 t1_n0 where t1_n0.j=t_n0.j); --- case with non-empty inner result and null subquery key(t.j is null) -explain select t.i from t where t.j NOT IN (select t1.i from t t1 ); -select t.i from t where t.j NOT IN (select t1.i from t t1 ); +-- case with non-empty inner result_n0 and null subquery key(t_n0.j is null) +explain select_n0 t.i from t_n0 where t_n0.j NOT IN (select_n0 t1_n0.i from t_n0 t1_n0 ); +select_n0 t.i from t_n0 where t_n0.j NOT IN (select_n0 t1_n0.i from t_n0 t1_n0 ); --- case with non-empty inner result and non-null subquery key(t.i is never null) -explain select t.i from t where t.i NOT IN (select t1.i from t t1 ); -select t.i from t where t.i NOT IN (select t1.i from t t1 ); +-- case with non-empty inner result_n0 and non-null subquery key(t_n0.i is never null) +explain select_n0 t.i from t_n0 where t_n0.i NOT IN (select_n0 t1_n0.i from t_n0 t1_n0 ); +select_n0 t.i from t_n0 where t_n0.i NOT IN (select_n0 t1_n0.i from t_n0 t1_n0 ); -drop table t1; +drop table t1_n0; --- corr predicate is not equi -explain select * +-- corr predicate is not_n0 equi +explain select_n0 * from src b -where b.key not in - (select a.key +where b.key not_n0 in + (select_n0 a.key from src a where b.value > a.value and a.key > '9' ) ; -select * +select_n0 * from src b -where b.key not in - (select a.key +where b.key not_n0 in + (select_n0 a.key from src a where b.value > a.value and a.key > '9' ); diff --git a/ql/src/test/queries/clientpositive/subquery_notin_having.q b/ql/src/test/queries/clientpositive/subquery_notin_having.q index bc328bb42b..d19e4d8ab7 100644 --- a/ql/src/test/queries/clientpositive/subquery_notin_having.q +++ b/ql/src/test/queries/clientpositive/subquery_notin_having.q @@ -60,17 +60,17 @@ having b.p_mfgr not in ; --nullability tests -CREATE TABLE t1 (c1 INT, c2 CHAR(100)); -INSERT INTO t1 VALUES (null,null), (1,''), (2,'abcde'), (100,'abcdefghij'); +CREATE TABLE t1_n145 (c1 INT, c2 CHAR(100)); +INSERT INTO t1_n145 VALUES (null,null), (1,''), (2,'abcde'), (100,'abcdefghij'); -CREATE TABLE t2 (c1 INT); -INSERT INTO t2 VALUES (null), (2), (100); +CREATE TABLE t2_n85 (c1 INT); +INSERT INTO t2_n85 VALUES (null), (2), (100); -explain SELECT c1 FROM t1 group by c1 having c1 NOT IN (SELECT c1 FROM t2); -SELECT c1 FROM t1 group by c1 having c1 NOT IN (SELECT c1 FROM t2); +explain SELECT c1 FROM t1_n145 group by c1 having c1 NOT IN (SELECT c1 FROM t2_n85); +SELECT c1 FROM t1_n145 group by c1 having c1 NOT IN (SELECT c1 FROM t2_n85); -explain SELECT c1 FROM t1 group by c1 having c1 NOT IN (SELECT c1 FROM t2 where t1.c1=t2.c1); -SELECT c1 FROM t1 group by c1 having c1 NOT IN (SELECT c1 FROM t2 where t1.c1=t2.c1); +explain SELECT c1 FROM t1_n145 group by c1 having c1 NOT IN (SELECT c1 FROM t2_n85 where t1_n145.c1=t2_n85.c1); +SELECT c1 FROM t1_n145 group by c1 having c1 NOT IN (SELECT c1 FROM t2_n85 where t1_n145.c1=t2_n85.c1); -DROP TABLE t1; -DROP TABLE t2; +DROP TABLE t1_n145; +DROP TABLE t2_n85; diff --git a/ql/src/test/queries/clientpositive/subquery_scalar.q b/ql/src/test/queries/clientpositive/subquery_scalar.q index 781bb2980a..8d494f29cd 100644 --- a/ql/src/test/queries/clientpositive/subquery_scalar.q +++ b/ql/src/test/queries/clientpositive/subquery_scalar.q @@ -1,16 +1,16 @@ ---! qt:dataset:src ---! qt:dataset:part ---! qt:dataset:lineitem -set hive.mapred.mode=nonstrict; -set hive.explain.user=false; +--! qt_n11:dataset_n11:src +--! qt_n11:dataset_n11:part_n11 +--! qt_n11:dataset_n11:lineitem +set_n11 hive.mapred.mode=nonstrict_n11; +set_n11 hive.explain.user=false; -- SORT_QUERY_RESULTS -create table tnull(i int, c char(2)); -insert into tnull values(NULL, NULL), (NULL, NULL); +create table tnull_n0(i int_n11, c char(2)); +insert_n11 into tnull_n0 values(NULL, NULL), (NULL, NULL); -create table tempty(c char(2)); +create table tempty_n0(c char(2)); -CREATE TABLE part_null( +CREATE TABLE part_null_n0( p_partkey INT, p_name STRING, p_mfgr STRING, @@ -19,245 +19,245 @@ CREATE TABLE part_null( p_size INT, p_container STRING, p_retailprice DOUBLE, - p_comment STRING + p_comment_n11 STRING ) ROW FORMAT DELIMITED FIELDS TERMINATED BY "," ; -LOAD DATA LOCAL INPATH '../../data/files/part_tiny_nulls.txt' overwrite into table part_null; +LOAD DATA LOCAL INPATH '../../data/files/part_tiny_nulls.txt_n11' overwrite into table part_null_n0; -insert into part_null values(78487,NULL,'Manufacturer#6','Brand#52','LARGE BRUSHED BRASS', 23, 'MED BAG',1464.48,'hely blith'); +insert_n11 into part_null_n0 values(78487,NULL,'Manufacturer#6','Brand#52','LARGE BRUSHED BRASS', 23, 'MED BAG',1464.48,'hely blith'); -- non corr, simple less than -explain select * from part where p_size > (select avg(p_size) from part_null); -select * from part where p_size > (select avg(p_size) from part_null); +explain select_n11 * from part_n11 where p_size > (select_n11 avg(p_size) from part_null_n0); +select_n11 * from part_n11 where p_size > (select_n11 avg(p_size) from part_null_n0); -- non corr, empty -select * from part where p_size > (select * from tempty); -explain select * from part where p_size > (select * from tempty); +select_n11 * from part_n11 where p_size > (select_n11 * from tempty_n0); +explain select_n11 * from part_n11 where p_size > (select_n11 * from tempty_n0); -- non corr, null comparison -explain select * from part where p_name = (select p_name from part_null where p_name is null); -select * from part where p_name = (select p_name from part_null where p_name is null); +explain select_n11 * from part_n11 where p_name = (select_n11 p_name from part_null_n0 where p_name is null); +select_n11 * from part_n11 where p_name = (select_n11 p_name from part_null_n0 where p_name is null); -- non corr, is null -explain select * from part where (select i from tnull limit 1) is null; -select * from part where (select i from tnull limit 1) is null; +explain select_n11 * from part_n11 where (select_n11 i from tnull_n0 limit_n11 1) is null; +select_n11 * from part_n11 where (select_n11 i from tnull_n0 limit_n11 1) is null; --- non corr, is not null -explain select * from part where (select max(p_name) from part_null) is not null; -select * from part where (select max(p_name) from part_null) is not null; +-- non corr, is not_n11 null +explain select_n11 * from part_n11 where (select_n11 max(p_name) from part_null_n0) is not_n11 null; +select_n11 * from part_n11 where (select_n11 max(p_name) from part_null_n0) is not_n11 null; -- non corr, between -explain select * from part where p_size between (select min(p_size) from part) and (select avg(p_size) from part); -select * from part where p_size between (select min(p_size) from part) and (select avg(p_size) from part); +explain select_n11 * from part_n11 where p_size between (select_n11 min(p_size) from part_n11) and (select_n11 avg(p_size) from part_n11); +select_n11 * from part_n11 where p_size between (select_n11 min(p_size) from part_n11) and (select_n11 avg(p_size) from part_n11); -- non corr, windowing -explain select p_mfgr, p_name, p_size from part -where part.p_size > - (select first_value(p_size) over(partition by p_mfgr order by p_size) as fv from part order by fv limit 1); -select p_mfgr, p_name, p_size from part -where part.p_size > - (select first_value(p_size) over(partition by p_mfgr order by p_size) as fv from part order by fv limit 1); +explain select_n11 p_mfgr, p_name, p_size from part_n11 +where part_n11.p_size > + (select_n11 first_value(p_size) over(partition by p_mfgr order by p_size) as fv from part_n11 order by fv limit_n11 1); +select_n11 p_mfgr, p_name, p_size from part_n11 +where part_n11.p_size > + (select_n11 first_value(p_size) over(partition by p_mfgr order by p_size) as fv from part_n11 order by fv limit_n11 1); -- lhs contain complex expressions -explain select * from part where (p_partkey*p_size) <> (select min(p_partkey) from part); -select * from part where (p_partkey*p_size) <> (select min(p_partkey) from part); +explain select_n11 * from part_n11 where (p_partkey*p_size) <> (select_n11 min(p_partkey) from part_n11); +select_n11 * from part_n11 where (p_partkey*p_size) <> (select_n11 min(p_partkey) from part_n11); -- corr, lhs contain complex expressions -explain select count(*) as c from part as e where p_size + 100 < (select max(p_partkey) from part where p_name = e.p_name); -select count(*) as c from part as e where p_size + 100 < (select max(p_partkey) from part where p_name = e.p_name); +explain select_n11 count_n11(*) as c from part_n11 as e where p_size + 100 < (select_n11 max(p_partkey) from part_n11 where p_name = e.p_name); +select_n11 count_n11(*) as c from part_n11 as e where p_size + 100 < (select_n11 max(p_partkey) from part_n11 where p_name = e.p_name); --- corr, lhs contain constant expressions (HIVE-16689) -explain select count(*) as c from part as e where 100 < (select max(p_partkey) from part where p_name = e.p_name); -select count(*) as c from part as e where 100 < (select max(p_partkey) from part where p_name = e.p_name); +-- corr, lhs contain constant_n11 expressions (HIVE-16689) +explain select_n11 count_n11(*) as c from part_n11 as e where 100 < (select_n11 max(p_partkey) from part_n11 where p_name = e.p_name); +select_n11 count_n11(*) as c from part_n11 as e where 100 < (select_n11 max(p_partkey) from part_n11 where p_name = e.p_name); -- corr, equi-join predicate -explain select * from part where p_size > (select avg(p_size) from part_null where part_null.p_type = part.p_type); -select * from part where p_size > (select avg(p_size) from part_null where part_null.p_type = part.p_type); +explain select_n11 * from part_n11 where p_size > (select_n11 avg(p_size) from part_null_n0 where part_null_n0.p_type = part_n11.p_type); +select_n11 * from part_n11 where p_size > (select_n11 avg(p_size) from part_null_n0 where part_null_n0.p_type = part_n11.p_type); -- mix of corr and uncorr -explain select * from part where p_size BETWEEN (select min(p_size) from part_null where part_null.p_type = part.p_type) AND (select max(p_size) from part_null); -select * from part where p_size BETWEEN (select min(p_size) from part_null where part_null.p_type = part.p_type) AND (select max(p_size) from part_null); +explain select_n11 * from part_n11 where p_size BETWEEN (select_n11 min(p_size) from part_null_n0 where part_null_n0.p_type = part_n11.p_type) AND (select_n11 max(p_size) from part_null_n0); +select_n11 * from part_n11 where p_size BETWEEN (select_n11 min(p_size) from part_null_n0 where part_null_n0.p_type = part_n11.p_type) AND (select_n11 max(p_size) from part_null_n0); -- mix of corr and uncorr -explain select * from part where p_size >= (select min(p_size) from part_null where part_null.p_type = part.p_type) AND p_retailprice <= (select max(p_retailprice) from part_null); -select * from part where p_size >= (select min(p_size) from part_null where part_null.p_type = part.p_type) AND p_retailprice <= (select max(p_retailprice) from part_null); +explain select_n11 * from part_n11 where p_size >= (select_n11 min(p_size) from part_null_n0 where part_null_n0.p_type = part_n11.p_type) AND p_retailprice <= (select_n11 max(p_retailprice) from part_null_n0); +select_n11 * from part_n11 where p_size >= (select_n11 min(p_size) from part_null_n0 where part_null_n0.p_type = part_n11.p_type) AND p_retailprice <= (select_n11 max(p_retailprice) from part_null_n0); -- mix of scalar and IN corr -explain select * from part where p_brand <> (select min(p_brand) from part ) AND p_size IN (select (p_size) from part p where p.p_type = part.p_type ) AND p_size <> 340; -select * from part where p_brand <> (select min(p_brand) from part ) AND p_size IN (select (p_size) from part p where p.p_type = part.p_type ) AND p_size <> 340; +explain select_n11 * from part_n11 where p_brand <> (select_n11 min(p_brand) from part_n11 ) AND p_size IN (select_n11 (p_size) from part_n11 p where p.p_type = part_n11.p_type ) AND p_size <> 340; +select_n11 * from part_n11 where p_brand <> (select_n11 min(p_brand) from part_n11 ) AND p_size IN (select_n11 (p_size) from part_n11 p where p.p_type = part_n11.p_type ) AND p_size <> 340; -- multiple corr var with scalar query -explain select * from part where p_size <> (select count(p_name) from part p where p.p_size = part.p_size AND part.p_partkey= p.p_partkey ); -select * from part where p_size <> (select count(p_name) from part p where p.p_size = part.p_size AND part.p_partkey= p.p_partkey ); +explain select_n11 * from part_n11 where p_size <> (select_n11 count_n11(p_name) from part_n11 p where p.p_size = part_n11.p_size AND part_n11.p_partkey= p.p_partkey ); +select_n11 * from part_n11 where p_size <> (select_n11 count_n11(p_name) from part_n11 p where p.p_size = part_n11.p_size AND part_n11.p_partkey= p.p_partkey ); -- where + having -explain select key, count(*) from src where value <> (select max(value) from src) group by key having count(*) > (select count(*) from src s1 where s1.key = '90' group by s1.key ); -select key, count(*) from src where value <> (select max(value) from src) group by key having count(*) > (select count(*) from src s1 where s1.key = '90' group by s1.key ); +explain select_n11 key, count_n11(*) from src where value <> (select_n11 max(value) from src) group by key having count_n11(*) > (select_n11 count_n11(*) from src s1 where s1.key = '90' group by s1.key ); +select_n11 key, count_n11(*) from src where value <> (select_n11 max(value) from src) group by key having count_n11(*) > (select_n11 count_n11(*) from src s1 where s1.key = '90' group by s1.key ); -explain select sum(p_retailprice) from part group by p_type having sum(p_retailprice) > (select max(pp.p_retailprice) from part pp); -select sum(p_retailprice) from part group by p_type having sum(p_retailprice) > (select max(pp.p_retailprice) from part pp); +explain select_n11 sum(p_retailprice) from part_n11 group by p_type having sum(p_retailprice) > (select_n11 max(pp.p_retailprice) from part_n11 pp); +select_n11 sum(p_retailprice) from part_n11 group by p_type having sum(p_retailprice) > (select_n11 max(pp.p_retailprice) from part_n11 pp); -- scalar subquery with INTERSECT -explain select * from part where p_size > (select count(p_name) from part INTERSECT select count(p_brand) from part); -select * from part where p_size > (select count(p_name) from part INTERSECT select count(p_brand) from part); +explain select_n11 * from part_n11 where p_size > (select_n11 count_n11(p_name) from part_n11 INTERSECT select_n11 count_n11(p_brand) from part_n11); +select_n11 * from part_n11 where p_size > (select_n11 count_n11(p_name) from part_n11 INTERSECT select_n11 count_n11(p_brand) from part_n11); -- join in subquery -explain select p_partkey from part where p_name like (select max(p.p_name) from part p left outer join part pp on p.p_type = pp.p_type where pp.p_size = part.p_size); -select p_partkey from part where p_name like (select max(p.p_name) from part p left outer join part pp on p.p_type = pp.p_type where pp.p_size = part.p_size); +explain select_n11 p_partkey from part_n11 where p_name like (select_n11 max(p.p_name) from part_n11 p left_n11 outer join part_n11 pp on p.p_type = pp.p_type where pp.p_size = part_n11.p_size); +select_n11 p_partkey from part_n11 where p_name like (select_n11 max(p.p_name) from part_n11 p left_n11 outer join part_n11 pp on p.p_type = pp.p_type where pp.p_size = part_n11.p_size); -- mix of NOT IN and scalar -explain select * from part_null where p_name NOT LIKE (select min(p_name) from part_null) AND p_brand NOT IN (select p_name from part); -select * from part_null where p_name NOT LIKE (select min(p_name) from part_null) AND p_brand NOT IN (select p_name from part); +explain select_n11 * from part_null_n0 where p_name NOT LIKE (select_n11 min(p_name) from part_null_n0) AND p_brand NOT IN (select_n11 p_name from part_n11); +select_n11 * from part_null_n0 where p_name NOT LIKE (select_n11 min(p_name) from part_null_n0) AND p_brand NOT IN (select_n11 p_name from part_n11); -- mix of NOT IN and corr scalar -explain select * from part_null where p_brand NOT IN (select p_name from part) AND p_name NOT LIKE (select min(p_name) from part_null pp where part_null.p_type = pp.p_type); -select * from part_null where p_brand NOT IN (select p_name from part) AND p_name NOT LIKE (select min(p_name) from part_null pp where part_null.p_type = pp.p_type); +explain select_n11 * from part_null_n0 where p_brand NOT IN (select_n11 p_name from part_n11) AND p_name NOT LIKE (select_n11 min(p_name) from part_null_n0 pp where part_null_n0.p_type = pp.p_type); +select_n11 * from part_null_n0 where p_brand NOT IN (select_n11 p_name from part_n11) AND p_name NOT LIKE (select_n11 min(p_name) from part_null_n0 pp where part_null_n0.p_type = pp.p_type); --- non corr, with join in parent query -explain select p.p_partkey, li.l_suppkey -from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +-- non corr, with join in parent_n11 query +explain select_n11 p.p_partkey, li.l_suppkey +from (select_n11 distinct_n11 l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and -li.l_orderkey <> (select min(l_orderkey) from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +li.l_orderkey <> (select_n11 min(l_orderkey) from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) ; -select p.p_partkey, li.l_suppkey -from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +select_n11 p.p_partkey, li.l_suppkey +from (select_n11 distinct_n11 l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and -li.l_orderkey <> (select min(l_orderkey) from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +li.l_orderkey <> (select_n11 min(l_orderkey) from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) ; -- corr, with join in outer query -explain select p.p_partkey, li.l_suppkey -from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +explain select_n11 p.p_partkey, li.l_suppkey +from (select_n11 distinct_n11 l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and - li.l_orderkey <> (select min(l_orderkey) from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber); -select p.p_partkey, li.l_suppkey -from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey + li.l_orderkey <> (select_n11 min(l_orderkey) from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber); +select_n11 p.p_partkey, li.l_suppkey +from (select_n11 distinct_n11 l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and -li.l_orderkey <> (select min(l_orderkey) from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber); +li.l_orderkey <> (select_n11 min(l_orderkey) from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber); -- corr, aggregate in outer -explain select sum(l_extendedprice) from lineitem, part where p_partkey = l_partkey and l_quantity > (select avg(l_quantity) from lineitem where l_partkey = p_partkey); -select sum(l_extendedprice) from lineitem, part where p_partkey = l_partkey and l_quantity > (select avg(l_quantity) from lineitem where l_partkey = p_partkey); +explain select_n11 sum(l_extendedprice) from lineitem, part_n11 where p_partkey = l_partkey and l_quantity > (select_n11 avg(l_quantity) from lineitem where l_partkey = p_partkey); +select_n11 sum(l_extendedprice) from lineitem, part_n11 where p_partkey = l_partkey and l_quantity > (select_n11 avg(l_quantity) from lineitem where l_partkey = p_partkey); -- nested with scalar -explain select * from part_null where p_name IN (select p_name from part where part.p_type = part_null.p_type AND p_brand NOT LIKE (select min(p_brand) from part pp where part.p_type = pp.p_type)); -select * from part_null where p_name IN (select p_name from part where part.p_type = part_null.p_type AND p_brand NOT LIKE (select min(p_brand) from part pp where part.p_type = pp.p_type)); +explain select_n11 * from part_null_n0 where p_name IN (select_n11 p_name from part_n11 where part_n11.p_type = part_null_n0.p_type AND p_brand NOT LIKE (select_n11 min(p_brand) from part_n11 pp where part_n11.p_type = pp.p_type)); +select_n11 * from part_null_n0 where p_name IN (select_n11 p_name from part_n11 where part_n11.p_type = part_null_n0.p_type AND p_brand NOT LIKE (select_n11 min(p_brand) from part_n11 pp where part_n11.p_type = pp.p_type)); -drop table tnull; -drop table part_null; -drop table tempty; +drop table tnull_n0; +drop table part_null_n0; +drop table tempty_n0; -create table EMPS(EMPNO int,NAME string,DEPTNO int,GENDER string,CITY string,EMPID int,AGE int,SLACKER boolean,MANAGER boolean,JOINEDAT date); +create table EMPS_n4(EMPNO int_n11,NAME string,DEPTNO int_n11,GENDER string,CITY string,EMPID int_n11,AGE int_n11,SLACKER boolean,MANAGER boolean,JOINEDAT date); -insert into EMPS values (100,'Fred',10,NULL,NULL,30,25,true,false,'1996-08-03'); -insert into EMPS values (110,'Eric',20,'M','San Francisco',3,80,NULL,false,'2001-01-01') ; -insert into EMPS values (110,'John',40,'M','Vancouver',2,NULL,false,true,'2002-05-03'); -insert into EMPS values (120,'Wilma',20,'F',NULL,1,5,NULL,true,'2005-09-07'); -insert into EMPS values (130,'Alice',40,'F','Vancouver',2,NULL,false,true,'2007-01-01'); +insert_n11 into EMPS_n4 values (100,'Fred',10,NULL,NULL,30,25,true,false,'1996-08-03'); +insert_n11 into EMPS_n4 values (110,'Eric',20,'M','San Francisco',3,80,NULL,false,'2001-01-01') ; +insert_n11 into EMPS_n4 values (110,'John',40,'M','Vancouver',2,NULL,false,true,'2002-05-03'); +insert_n11 into EMPS_n4 values (120,'Wilma',20,'F',NULL,1,5,NULL,true,'2005-09-07'); +insert_n11 into EMPS_n4 values (130,'Alice',40,'F','Vancouver',2,NULL,false,true,'2007-01-01'); -create table DEPTS(deptno int, name string); -insert into DEPTS values( 10,'Sales'); -insert into DEPTS values( 20,'Marketing'); -insert into DEPTS values( 30,'Accounts'); +create table DEPTS_n3(deptno int_n11, name string); +insert_n11 into DEPTS_n3 values( 10,'Sales'); +insert_n11 into DEPTS_n3 values( 20,'Marketing'); +insert_n11 into DEPTS_n3 values( 30,'Accounts'); --- corr, scalar, with count aggregate -explain select * from emps where deptno <> (select count(deptno) from depts where depts.name = emps.name); -select * from emps where deptno <> (select count(deptno) from depts where depts.name = emps.name); +-- corr, scalar, with count_n11 aggregate +explain select_n11 * from emps where deptno <> (select_n11 count_n11(deptno) from depts where depts.name = emps.name); +select_n11 * from emps where deptno <> (select_n11 count_n11(deptno) from depts where depts.name = emps.name); -explain select * from emps where name > (select min(name) from depts where depts.deptno=emps.deptno); -select * from emps where name > (select min(name) from depts where depts.deptno=emps.deptno); +explain select_n11 * from emps where name > (select_n11 min(name) from depts where depts.deptno=emps.deptno); +select_n11 * from emps where name > (select_n11 min(name) from depts where depts.deptno=emps.deptno); --- corr, scalar multiple subq with count aggregate -explain select * from emps where deptno <> (select count(deptno) from depts where depts.name = emps.name) and empno > (select count(name) from depts where depts.deptno = emps.deptno); -select * from emps where deptno <> (select count(deptno) from depts where depts.name = emps.name) and empno > (select count(name) from depts where depts.deptno = emps.deptno); +-- corr, scalar multiple subq with count_n11 aggregate +explain select_n11 * from emps where deptno <> (select_n11 count_n11(deptno) from depts where depts.name = emps.name) and empno > (select_n11 count_n11(name) from depts where depts.deptno = emps.deptno); +select_n11 * from emps where deptno <> (select_n11 count_n11(deptno) from depts where depts.name = emps.name) and empno > (select_n11 count_n11(name) from depts where depts.deptno = emps.deptno); -- mix of corr, uncorr with aggregate -explain select * from emps where deptno <> (select sum(deptno) from depts where depts.name = emps.name) and empno > (select count(name) from depts); -select * from emps where deptno <> (select count(deptno) from depts where depts.name = emps.name) and empno > (select count(name) from depts); +explain select_n11 * from emps where deptno <> (select_n11 sum(deptno) from depts where depts.name = emps.name) and empno > (select_n11 count_n11(name) from depts); +select_n11 * from emps where deptno <> (select_n11 count_n11(deptno) from depts where depts.name = emps.name) and empno > (select_n11 count_n11(name) from depts); -drop table DEPTS; -drop table EMPS; +drop table DEPTS_n3; +drop table EMPS_n4; -- having explain - select key, count(*) + select_n11 key, count_n11(*) from src group by key -having count(*) > (select count(*) from src s1 where s1.key > '9' ) +having count_n11(*) > (select_n11 count_n11(*) from src s1 where s1.key > '9' ) ; -select key, count(*) +select_n11 key, count_n11(*) from src group by key -having count(*) > (select count(*) from src s1 where s1.key = '90') +having count_n11(*) > (select_n11 count_n11(*) from src s1 where s1.key = '90') ; explain -select key, value, count(*) +select_n11 key, value, count_n11(*) from src b -where b.key in (select key from src where src.key > '8') +where b.key in (select_n11 key from src where src.key > '8') group by key, value -having count(*) > (select count(*) from src s1 where s1.key > '9' ) +having count_n11(*) > (select_n11 count_n11(*) from src s1 where s1.key > '9' ) ; -select key, value, count(*) +select_n11 key, value, count_n11(*) from src b -where b.key in (select key from src where src.key > '8') +where b.key in (select_n11 key from src where src.key > '8') group by key, value -having count(*) > (select count(*) from src s1 where s1.key > '9' ) +having count_n11(*) > (select_n11 count_n11(*) from src s1 where s1.key > '9' ) ; --- since subquery has implicit group by this should have sq_count_check (HIVE-16793) -explain select * from part where p_size > (select max(p_size) from part group by p_type); +-- since subquery has implicit_n11 group by this should have sq_count_check (HIVE-16793) +explain select_n11 * from part_n11 where p_size > (select_n11 max(p_size) from part_n11 group by p_type); -- same as above, for correlated columns -explain select * from part where p_size > (select max(p_size) from part p where p.p_type = part.p_type group by p_type); +explain select_n11 * from part_n11 where p_size > (select_n11 max(p_size) from part_n11 p where p.p_type = part_n11.p_type group by p_type); -- corr scalar subquery with aggregate, having non-equi corr predicate -explain select * from part where p_size <> - (select count(p_size) from part pp where part.p_type <> pp.p_type); -select * from part where p_size <> - (select count(p_size) from part pp where part.p_type <> pp.p_type); +explain select_n11 * from part_n11 where p_size <> + (select_n11 count_n11(p_size) from part_n11 pp where part_n11.p_type <> pp.p_type); +select_n11 * from part_n11 where p_size <> + (select_n11 count_n11(p_size) from part_n11 pp where part_n11.p_type <> pp.p_type); -create table t(i int, j int); -insert into t values(3,1), (1,1); +create table t_n11(i int_n11, j int_n11); +insert_n11 into t_n11 values(3,1), (1,1); --- for t.i=1 inner query will result empty result, making count(*) = 0 +-- for t_n11.i=1 inner query will result_n11 empty result_n11, making count_n11(*) = 0 -- therefore where predicate will be true -explain select * from t where 0 = (select count(*) from t tt where tt.j <> t.i); -select * from t where 0 = (select count(*) from t tt where tt.j <> t.i); +explain select_n11 * from t_n11 where 0 = (select_n11 count_n11(*) from t_n11 tt_n11 where tt_n11.j <> t_n11.i); +select_n11 * from t_n11 where 0 = (select_n11 count_n11(*) from t_n11 tt_n11 where tt_n11.j <> t_n11.i); --- same as above but with avg aggregate, avg(tt.i) will be null therefore --- empty result set -explain select * from t where 0 = (select avg(tt.i) from t tt where tt.j <> t.i); -select * from t where 0 = (select avg(tt.i) from t tt where tt.j <> t.i); +-- same as above but_n11 with avg aggregate, avg(tt_n11.i) will be null therefore +-- empty result_n11 set_n11 +explain select_n11 * from t_n11 where 0 = (select_n11 avg(tt_n11.i) from t_n11 tt_n11 where tt_n11.j <> t_n11.i); +select_n11 * from t_n11 where 0 = (select_n11 avg(tt_n11.i) from t_n11 tt_n11 where tt_n11.j <> t_n11.i); -create table tempty(i int, j int); +create table tempty_n0(i int_n11, j int_n11); --- following query has subquery on empty making count(*) to zero and where predicate +-- following query has subquery on empty making count_n11(*) to zero and where predicate -- to true for all rows in outer query -explain select * from t where 0 = (select count(*) from tempty tt where t.i=tt.i); -select * from t where 0 = (select count(*) from tempty tt where t.i=tt.i); +explain select_n11 * from t_n11 where 0 = (select_n11 count_n11(*) from tempty_n0 tt_n11 where t_n11.i=tt_n11.i); +select_n11 * from t_n11 where 0 = (select_n11 count_n11(*) from tempty_n0 tt_n11 where t_n11.i=tt_n11.i); --- same as above but with min aggregate, since min on empty will return null +-- same as above but_n11 with min aggregate, since min on empty will return null -- making where predicate false for all -explain select * from t where 0 = (select min(tt.j) from tempty tt where t.i=tt.i); -select * from t where 0 = (select min(tt.j) from tempty tt where t.i=tt.i); +explain select_n11 * from t_n11 where 0 = (select_n11 min(tt_n11.j) from tempty_n0 tt_n11 where t_n11.i=tt_n11.i); +select_n11 * from t_n11 where 0 = (select_n11 min(tt_n11.j) from tempty_n0 tt_n11 where t_n11.i=tt_n11.i); -drop table t; -drop table tempty; +drop table t_n11; +drop table tempty_n0; --- following queries shouldn't have a join with sq_count_check -set hive.optimize.remove.sq_count_check = true; -explain select key, count(*) from src group by key having count(*) > - (select count(*) from src s1 group by 4); +-- following queries shouldn't_n11 have a join with sq_count_check +set_n11 hive.optimize.remove.sq_count_check = true; +explain select_n11 key, count_n11(*) from src group by key having count_n11(*) > + (select_n11 count_n11(*) from src s1 group by 4); -explain select key, count(*) from src group by key having count(*) > - (select count(*) from src s1 where s1.key = '90' group by s1.key ); +explain select_n11 key, count_n11(*) from src group by key having count_n11(*) > + (select_n11 count_n11(*) from src s1 where s1.key = '90' group by s1.key ); -set hive.optimize.remove.sq_count_check = false; +set_n11 hive.optimize.remove.sq_count_check = false; diff --git a/ql/src/test/queries/clientpositive/subquery_select.q b/ql/src/test/queries/clientpositive/subquery_select.q index 12b2f1f3ea..80618c777b 100644 --- a/ql/src/test/queries/clientpositive/subquery_select.q +++ b/ql/src/test/queries/clientpositive/subquery_select.q @@ -117,23 +117,23 @@ where b.key in from src a where b.value = a.value and a.key > '9' ); -CREATE table tnull(i int); -insert into tnull values(null); +CREATE table tnull_n3(i int); +insert into tnull_n3 values(null); -- IN query returns unknown/NULL instead of true/false -explain select p_size, p_size IN (select i from tnull) from part; -select p_size, p_size IN (select i from tnull) from part; +explain select p_size, p_size IN (select i from tnull_n3) from part; +select p_size, p_size IN (select i from tnull_n3) from part; -CREATE TABLE tempty(i int); +CREATE TABLE tempty_n3(i int); -explain select p_size, (select count(*) from tempty) from part; -select p_size, (select count(*) from tempty) from part; +explain select p_size, (select count(*) from tempty_n3) from part; +select p_size, (select count(*) from tempty_n3) from part; -explain select p_size, (select max(i) from tempty) from part; -select p_size, (select max(i) from tempty) from part; +explain select p_size, (select max(i) from tempty_n3) from part; +select p_size, (select max(i) from tempty_n3) from part; -DROP table tempty; -DROP table tnull; +DROP table tempty_n3; +DROP table tnull_n3; -- following tests test subquery in all kind of expressions (except UDAF, UDA and UDTF) diff --git a/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q b/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q index 753032d9de..a3a8ab0443 100644 --- a/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q +++ b/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q @@ -1,9 +1,9 @@ --! qt:dataset:src --! qt:dataset:part set hive.mapred.mode=nonstrict; -create table src11 (key1 string, value1 string); +create table src11_n0 (key1 string, value1 string); -create table part2( +create table part2_n2( p2_partkey INT, p2_name STRING, p2_mfgr STRING, @@ -16,7 +16,7 @@ create table part2( ); -- non agg, corr -explain select * from src11 where src11.key1 in (select key from src where src11.value1 = value and key > '9'); +explain select * from src11_n0 where src11_n0.key1 in (select key from src where src11_n0.value1 = value and key > '9'); explain select * from src a where a.key in (select key from src where a.value = value and key > '9'); diff --git a/ql/src/test/queries/clientpositive/table_access_keys_stats.q b/ql/src/test/queries/clientpositive/table_access_keys_stats.q index f955acd46d..90254d78b0 100644 --- a/ql/src/test/queries/clientpositive/table_access_keys_stats.q +++ b/ql/src/test/queries/clientpositive/table_access_keys_stats.q @@ -5,56 +5,56 @@ SET hive.stats.collect.tablekeys=true; -- SORT_QUERY_RESULTS -- This test is used for testing the TableAccessAnalyzer -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +CREATE TABLE T1_n13(key STRING, val STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n13; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n8(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T3_n4(key STRING, val STRING) STORED AS TEXTFILE; -- Simple group-by queries -SELECT key, count(1) FROM T1 GROUP BY key; -SELECT key, val, count(1) FROM T1 GROUP BY key, val; +SELECT key, count(1) FROM T1_n13 GROUP BY key; +SELECT key, val, count(1) FROM T1_n13 GROUP BY key, val; -- With subqueries and column aliases -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key; -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k; +SELECT key, count(1) FROM (SELECT key, val FROM T1_n13) subq1 GROUP BY key; +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n13) subq1 GROUP BY k; -- With constants -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key; -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val; -SELECT key, 1, val, 2, count(1) FROM T1 GROUP BY key, 1, val, 2; +SELECT 1, key, count(1) FROM T1_n13 GROUP BY 1, key; +SELECT key, 1, val, count(1) FROM T1_n13 GROUP BY key, 1, val; +SELECT key, 1, val, 2, count(1) FROM T1_n13 GROUP BY key, 1, val, 2; -- no mapping with functions -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1; +SELECT key, key + 1, count(1) FROM T1_n13 GROUP BY key, key + 1; SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n13 GROUP BY key) subq1 group by key + key; -- group by followed by union SELECT * FROM ( -SELECT key, count(1) as c FROM T1 GROUP BY key +SELECT key, count(1) as c FROM T1_n13 GROUP BY key UNION ALL -SELECT key, count(1) as c FROM T1 GROUP BY key +SELECT key, count(1) as c FROM T1_n13 GROUP BY key ) subq1; -- group by followed by a join SELECT * FROM -(SELECT key, count(1) as c FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as c FROM T1_n13 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as c FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as c FROM T1_n13 GROUP BY key) subq2 ON subq1.key = subq2.key; SELECT * FROM -(SELECT key, count(1) as c FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as c FROM T1_n13 GROUP BY key) subq1 JOIN -(SELECT key, val, count(1) as c FROM T1 GROUP BY key, val) subq2 +(SELECT key, val, count(1) as c FROM T1_n13 GROUP BY key, val) subq2 ON subq1.key = subq2.key ORDER BY subq1.key ASC, subq1.c ASC, subq2.key ASC, subq2.val ASC, subq2.c ASC; -- constants from sub-queries should work fine SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T1) subq1 +(SELECT key, 1 as constant, val from T1_n13) subq1 group by key, constant, val; -- multiple levels of constants from sub-queries should work fine @@ -64,66 +64,66 @@ SELECT key, constant3, val, count(1) FROM FROM ( SELECT key, 1 AS constant, val - FROM T1 + FROM T1_n13 ) subq ) subq2 GROUP BY key, constant3, val; -- work with insert overwrite -FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, count(1) GROUP BY key, 1 -INSERT OVERWRITE TABLE T3 SELECT key, sum(val) GROUP BY key; +FROM T1_n13 +INSERT OVERWRITE TABLE T2_n8 SELECT key, count(1) GROUP BY key, 1 +INSERT OVERWRITE TABLE T3_n4 SELECT key, sum(val) GROUP BY key; -- simple joins SELECT * -FROM T1 JOIN T2 -ON T1.key = t2.key -ORDER BY T1.key ASC, T1.val ASC; +FROM T1_n13 JOIN T2_n8 +ON T1_n13.key = t2.key +ORDER BY T1_n13.key ASC, T1_n13.val ASC; SELECT * -FROM T1 JOIN T2 -ON T1.key = T2.key AND T1.val = T2.val; +FROM T1_n13 JOIN T2_n8 +ON T1_n13.key = T2_n8.key AND T1_n13.val = T2_n8.val; -- map join SELECT /*+ MAPJOIN(a) */ * -FROM T1 a JOIN T2 b +FROM T1_n13 a JOIN T2_n8 b ON a.key = b.key; -- with constant in join condition SELECT * -FROM T1 JOIN T2 -ON T1.key = T2.key AND T1.val = 3 and T2.val = 3; +FROM T1_n13 JOIN T2_n8 +ON T1_n13.key = T2_n8.key AND T1_n13.val = 3 and T2_n8.val = 3; -- subqueries SELECT * FROM ( - SELECT val FROM T1 WHERE key = 5 + SELECT val FROM T1_n13 WHERE key = 5 ) subq1 JOIN ( - SELECT val FROM T2 WHERE key = 6 + SELECT val FROM T2_n8 WHERE key = 6 ) subq2 ON subq1.val = subq2.val; SELECT * FROM ( - SELECT val FROM T1 WHERE key = 5 + SELECT val FROM T1_n13 WHERE key = 5 ) subq1 JOIN T2 -ON subq1.val = T2.val; +ON subq1.val = T2_n8.val; -- with column aliases in subqueries SELECT * FROM ( - SELECT val as v FROM T1 WHERE key = 5 + SELECT val as v FROM T1_n13 WHERE key = 5 ) subq1 JOIN ( - SELECT val FROM T2 WHERE key = 6 + SELECT val FROM T2_n8 WHERE key = 6 ) subq2 ON subq1.v = subq2.val; @@ -131,11 +131,11 @@ ON subq1.v = subq2.val; SELECT * FROM ( - SELECT key, val FROM T1 + SELECT key, val FROM T1_n13 ) subq1 JOIN ( - SELECT key, 'teststring' as val FROM T2 + SELECT key, 'teststring' as val FROM T2_n8 ) subq2 ON subq1.val = subq2.val AND subq1.key = subq2.key; @@ -145,12 +145,12 @@ FROM ( SELECT key, val from ( - SELECT key, 'teststring' as val from T1 + SELECT key, 'teststring' as val from T1_n13 ) subq1 ) subq2 JOIN ( - SELECT key, val FROM T2 + SELECT key, val FROM T2_n8 ) subq3 ON subq3.val = subq2.val AND subq3.key = subq2.key; @@ -158,11 +158,11 @@ ON subq3.val = subq2.val AND subq3.key = subq2.key; SELECT * FROM ( - SELECT key, val from T1 + SELECT key, val from T1_n13 ) subq1 JOIN ( - SELECT key, val FROM T2 + SELECT key, val FROM T2_n8 ) subq2 ON subq1.val = subq2.val AND subq1.key + 1 = subq2.key; @@ -170,11 +170,11 @@ ON subq1.val = subq2.val AND subq1.key + 1 = subq2.key; SELECT subq1.val, COUNT(*) FROM ( - SELECT key, val FROM T1 + SELECT key, val FROM T1_n13 ) subq1 JOIN ( - SELECT key, 'teststring' as val FROM T2 + SELECT key, 'teststring' as val FROM T2_n8 ) subq2 ON subq1.val = subq2.val AND subq1.key = subq2.key GROUP BY subq1.val; @@ -186,17 +186,17 @@ FROM SELECT subq1.val, COUNT(*) FROM ( - SELECT key, val FROM T1 + SELECT key, val FROM T1_n13 ) subq1 JOIN ( - SELECT key, 'teststring' as val FROM T2 + SELECT key, 'teststring' as val FROM T2_n8 ) subq2 ON subq1.val = subq2.val AND subq1.key = subq2.key GROUP BY subq1.val UNION ALL SELECT val, COUNT(*) - FROM T3 + FROM T3_n4 GROUP BY val ) subq4; @@ -207,36 +207,36 @@ FROM SELECT subq1.val as val, COUNT(*) FROM ( - SELECT key, val FROM T1 + SELECT key, val FROM T1_n13 ) subq1 JOIN ( - SELECT key, 'teststring' as val FROM T2 + SELECT key, 'teststring' as val FROM T2_n8 ) subq2 ON subq1.val = subq2.val AND subq1.key = subq2.key GROUP by subq1.val ) T4 -JOIN T3 -ON T3.val = T4.val; +JOIN T3_n4 +ON T3_n4.val = T4.val; set hive.cbo.returnpath.hiveop=true; -- simple joins SELECT * -FROM T1 JOIN T2 -ON T1.key = t2.key -ORDER BY T1.key ASC, T1.val ASC; +FROM T1_n13 JOIN T2_n8 +ON T1_n13.key = t2.key +ORDER BY T1_n13.key ASC, T1_n13.val ASC; SELECT * -FROM T1 JOIN T2 -ON T1.key = T2.key AND T1.val = T2.val; +FROM T1_n13 JOIN T2_n8 +ON T1_n13.key = T2_n8.key AND T1_n13.val = T2_n8.val; -- group by followed by a join SELECT * FROM -(SELECT key, count(1) as c FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as c FROM T1_n13 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as c FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as c FROM T1_n13 GROUP BY key) subq2 ON subq1.key = subq2.key; diff --git a/ql/src/test/queries/clientpositive/tablevalues.q b/ql/src/test/queries/clientpositive/tablevalues.q index eade2ffd14..a557cb2e0d 100644 --- a/ql/src/test/queries/clientpositive/tablevalues.q +++ b/ql/src/test/queries/clientpositive/tablevalues.q @@ -2,31 +2,31 @@ -- VALUES -> array(struct(),struct()) -- TABLE -> LATERAL VIEW INLINE -CREATE TABLE mytbl AS +CREATE TABLE mytbl_n1 AS SELECT key, value FROM src ORDER BY key LIMIT 5; EXPLAIN -INSERT INTO mytbl(key,value) +INSERT INTO mytbl_n1(key,value) SELECT a,b as c FROM TABLE(VALUES(1,2),(3,4)) AS vc(a,b) WHERE b = 9; -INSERT INTO mytbl(key,value) +INSERT INTO mytbl_n1(key,value) SELECT a,b as c FROM TABLE(VALUES(1,2),(3,4)) AS vc(a,b) WHERE b = 9; EXPLAIN SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t + (SELECT key, value FROM mytbl_n1) t LATERAL VIEW INLINE(array(struct('A', 10, t.key),struct('B', 20, t.key))) tf AS col1, col2, col3; SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t + (SELECT key, value FROM mytbl_n1) t LATERAL VIEW INLINE(array(struct('A', 10, t.key),struct('B', 20, t.key))) tf AS col1, col2, col3; @@ -49,12 +49,12 @@ EXPLAIN SELECT tf.col1, tf.col2, tf.col3 FROM TABLE(VALUES('A', 10, 30),('B', 20, 30)) AS tf(col1, col2, col3), - (SELECT key, value FROM mytbl) t; + (SELECT key, value FROM mytbl_n1) t; SELECT tf.col1, tf.col2, tf.col3 FROM TABLE(VALUES('A', 10, 30),('B', 20, 30)) AS tf(col1, col2, col3), - (SELECT key, value FROM mytbl) t; + (SELECT key, value FROM mytbl_n1) t; -- CROSS PRODUCT (FIRST CANNOT BE EXPRESSED WITH LVJ, SECOND CAN -- BUT IT IS NOT NEEDED) @@ -73,12 +73,12 @@ FROM EXPLAIN SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, TABLE(VALUES('A', 10, 30),('B', 20, 30)) AS tf(col1, col2, col3); SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, TABLE(VALUES('A', 10, 30),('B', 20, 30)) AS tf(col1, col2, col3); -- LVJ (CORRELATED). LATERAL COULD BE OPTIONAL, BUT IF WE MAKE IT @@ -86,43 +86,43 @@ FROM EXPLAIN SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf(col1, col2, col3); SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf(col1, col2, col3); EXPLAIN SELECT t.key FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf; SELECT t.key FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf; EXPLAIN SELECT tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf(col1, col2, col3); SELECT tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf(col1, col2, col3); EXPLAIN SELECT tf.col3 FROM - (SELECT row_number() over (order by key desc) as r FROM mytbl) t, + (SELECT row_number() over (order by key desc) as r FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.r),('B', 20, t.r)) AS tf(col1, col2, col3); SELECT tf.col3 FROM - (SELECT row_number() over (order by key desc) as r FROM mytbl) t, + (SELECT row_number() over (order by key desc) as r FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.r),('B', 20, t.r)) AS tf(col1, col2, col3); diff --git a/ql/src/test/queries/clientpositive/temp_table.q b/ql/src/test/queries/clientpositive/temp_table.q index 83049592ab..2e567eda38 100644 --- a/ql/src/test/queries/clientpositive/temp_table.q +++ b/ql/src/test/queries/clientpositive/temp_table.q @@ -43,28 +43,28 @@ DROP DATABASE two CASCADE; DROP TABLE bay; -create table s as select * from src limit 10; +create table s_n4 as_n4 select * from src limit 10; -select count(*) from s; +select count(*) from s_n4; -create temporary table s as select * from s limit 2; +create temporary table s_n4 as_n4 select * from s_n4 limit 2; -select count(*) from s; +select count(*) from s_n4; -with s as ( select * from src limit 1) -select count(*) from s; +with s_n4 as_n4 ( select * from src limit 1) +select count(*) from s_n4; -with src as ( select * from s) +with src as_n4 ( select * from s_n4) select count(*) from src; -drop table s; +drop table s_n4; -select count(*) from s; +select count(*) from s_n4; -with s as ( select * from src limit 1) -select count(*) from s; +with s_n4 as_n4 ( select * from src limit 1) +select count(*) from s_n4; -with src as ( select * from s) +with src as_n4 ( select * from s_n4) select count(*) from src; -drop table s; +drop table s_n4; diff --git a/ql/src/test/queries/clientpositive/temp_table_gb1.q b/ql/src/test/queries/clientpositive/temp_table_gb1.q index 4765b4adb6..1cb1824f40 100644 --- a/ql/src/test/queries/clientpositive/temp_table_gb1.q +++ b/ql/src/test/queries/clientpositive/temp_table_gb1.q @@ -7,13 +7,13 @@ set hive.groupby.skewindata=true; -- SORT_QUERY_RESULTS -- Taken from groupby2.q -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; +CREATE TABLE dest_g2_n0(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src; FROM src_temp -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1); +INSERT OVERWRITE TABLE dest_g2_n0 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1); -SELECT dest_g2.* FROM dest_g2; +SELECT dest_g2_n0.* FROM dest_g2_n0; -DROP TABLE dest_g2; +DROP TABLE dest_g2_n0; DROP TABLE src_temp; diff --git a/ql/src/test/queries/clientpositive/temp_table_windowing_expressions.q b/ql/src/test/queries/clientpositive/temp_table_windowing_expressions.q index ccaf25db1b..b40063e188 100644 --- a/ql/src/test/queries/clientpositive/temp_table_windowing_expressions.q +++ b/ql/src/test/queries/clientpositive/temp_table_windowing_expressions.q @@ -41,11 +41,11 @@ select p_mfgr, avg(p_retailprice) over(partition by p_mfgr, p_type order by p_mf select p_mfgr, avg(p_retailprice) over(partition by p_mfgr order by p_type,p_mfgr rows between unbounded preceding and current row) from part; -- multi table insert test -create table t1 (a1 int, b1 string); -create table t2 (a1 int, b1 string); -from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1 select * insert overwrite table t2 select * ; -select * from t1 limit 3; -select * from t2 limit 3; +create table t1_n144 (a1 int, b1 string); +create table t2_n84 (a1 int, b1 string); +from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1_n144 select * insert overwrite table t2_n84 select * ; +select * from t1_n144 limit 3; +select * from t2_n84 limit 3; select p_mfgr, p_retailprice, p_size, round(sum(p_retailprice) over w1 , 2) + 50.0 = round(sum(lag(p_retailprice,1,50.0)) over w1 + (last_value(p_retailprice) over w1),2) diff --git a/ql/src/test/queries/clientpositive/testSetQueryString.q b/ql/src/test/queries/clientpositive/testSetQueryString.q index f11afb2771..2a6df1ece8 100644 --- a/ql/src/test/queries/clientpositive/testSetQueryString.q +++ b/ql/src/test/queries/clientpositive/testSetQueryString.q @@ -1,4 +1,4 @@ -create table t1 (c1 int); -insert into t1 values (1); -select * from t1; +create table t1_n162 (c1 int); +insert into t1_n162 values (1); +select * from t1_n162; set hive.query.string; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/tez-tag.q b/ql/src/test/queries/clientpositive/tez-tag.q index b71fd47743..b50eed9293 100644 --- a/ql/src/test/queries/clientpositive/tez-tag.q +++ b/ql/src/test/queries/clientpositive/tez-tag.q @@ -21,29 +21,29 @@ set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000; set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ; -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n9(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part_n6 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part_n9 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -CREATE TABLE src2 as select * from src1; -insert into src2 select * from src2; -insert into src2 select * from src2; +CREATE TABLE src2_n0 as select * from src1; +insert into src2_n0 select * from src2_n0; +insert into src2_n0 select * from src2_n0; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n9 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n9 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n9 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n9 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n9 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n9 partition(ds='2008-04-08'); set hive.optimize.bucketingsorting=false; -insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +insert overwrite table tab_part_n6 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n9; -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n5(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab_n5 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n9; set hive.convert.join.bucket.mapjoin.tez = true; set hive.auto.convert.sortmerge.join = true; @@ -54,17 +54,17 @@ set hive.mapjoin.hybridgrace.minnumpartitions=4; set hive.llap.memory.oversubscription.max.executors.per.query=3; -CREATE TABLE tab2 (key int, value string, ds string); +CREATE TABLE tab2_n2 (key int, value string, ds string); set hive.exec.dynamic.partition.mode=nonstrict -insert into tab2select key, value, ds from tab; -analyze table tab2 compute statistics; -analyze table tab2 compute statistics for columns; +insert into tab2select key, value, ds from tab_n5; +analyze table tab2_n2 compute statistics; +analyze table tab2_n2 compute statistics for columns; -explain select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; +explain select count(*) from tab_n5 a join tab_part_n6 b on a.key = b.key join src1 c on a.value = c.value; -select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; +select count(*) from tab_n5 a join tab_part_n6 b on a.key = b.key join src1 c on a.value = c.value; -explain select count(*) from (select x.key as key, min(x.value) as value from tab2 x group by x.key) a join (select x.key as key, min(x.value) as value from tab2 x group by x.key) b on a.key = b.key join src1 c on a.value = c.value where c.key < 0; +explain select count(*) from (select x.key as key, min(x.value) as value from tab2_n2 x group by x.key) a join (select x.key as key, min(x.value) as value from tab2_n2 x group by x.key) b on a.key = b.key join src1 c on a.value = c.value where c.key < 0; diff --git a/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q b/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q index 7f0f8d3a22..de89c27353 100644 --- a/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q +++ b/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q @@ -6,21 +6,21 @@ set hive.explain.user=false; set hive.optimize.bucketingsorting=false; set hive.auto.convert.join.noconditionaltask.size=10000; -create table test (key int, value string) partitioned by (p int) clustered by (key) into 2 buckets stored as textfile; +create table test_n1 (key int, value string) partitioned by (p int) clustered by (key) into 2 buckets stored as textfile; create table test1 (key int, value string) stored as textfile; -insert into table test partition (p=1) select * from src; +insert into table test_n1 partition (p=1) select * from src; -alter table test set fileformat orc; +alter table test_n1 set fileformat orc; -insert into table test partition (p=2) select * from src; +insert into table test_n1 partition (p=2) select * from src; insert into table test1 select * from src; -describe test; +describe test_n1; set hive.auto.convert.join = true; set hive.convert.join.bucket.mapjoin.tez = true; -explain select test.key, test.value from test join test1 on (test.key = test1.key) order by test.key; +explain select test_n1.key, test_n1.value from test_n1 join test1 on (test_n1.key = test1.key) order by test_n1.key; -select test.key, test.value from test join test1 on (test.key = test1.key) order by test.key; +select test_n1.key, test_n1.value from test_n1 join test1 on (test_n1.key = test1.key) order by test_n1.key; diff --git a/ql/src/test/queries/clientpositive/tez_fsstat.q b/ql/src/test/queries/clientpositive/tez_fsstat.q index 67ec7a8114..d3fee35f18 100644 --- a/ql/src/test/queries/clientpositive/tez_fsstat.q +++ b/ql/src/test/queries/clientpositive/tez_fsstat.q @@ -1,19 +1,19 @@ set hive.strict.checks.bucketing=false; set hive.mapred.mode=nonstrict; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -CREATE TABLE t1 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part_n0 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE t1_n5 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE t1 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE t1 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE t1 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE t1 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE t1_n5 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE t1_n5 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE t1_n5 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE t1_n5 partition(ds='2008-04-08'); set hive.optimize.bucketingsorting=false; set hive.stats.dbclass=fs; -insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from t1; -describe formatted tab_part partition(ds='2008-04-08'); +insert overwrite table tab_part_n0 partition (ds='2008-04-08') +select key,value from t1_n5; +describe formatted tab_part_n0 partition(ds='2008-04-08'); diff --git a/ql/src/test/queries/clientpositive/tez_join.q b/ql/src/test/queries/clientpositive/tez_join.q index 107080d210..0d80070e87 100644 --- a/ql/src/test/queries/clientpositive/tez_join.q +++ b/ql/src/test/queries/clientpositive/tez_join.q @@ -2,27 +2,27 @@ set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.auto.convert.sortmerge.join = true; -create table t1( +create table t1_n42( id string, od string); -create table t2( +create table t2_n26( id string, od string); explain -select vt1.id from -(select rt1.id from -(select t1.id, t1.od from t1 order by t1.id, t1.od) rt1) vt1 +select vt1_n42.id from +(select rt1_n42.id from +(select t1_n42.id, t1_n42.od from t1_n42 order by t1_n42.id, t1_n42.od) rt1_n42) vt1_n42 join -(select rt2.id from -(select t2.id, t2.od from t2 order by t2.id, t2.od) rt2) vt2 -where vt1.id=vt2.id; +(select rt2_n26.id from +(select t2_n26.id, t2_n26.od from t2_n26 order by t2_n26.id, t2_n26.od) rt2_n26) vt2_n26 +where vt1_n42.id=vt2_n26.id; -select vt1.id from -(select rt1.id from -(select t1.id, t1.od from t1 order by t1.id, t1.od) rt1) vt1 +select vt1_n42.id from +(select rt1_n42.id from +(select t1_n42.id, t1_n42.od from t1_n42 order by t1_n42.id, t1_n42.od) rt1_n42) vt1_n42 join -(select rt2.id from -(select t2.id, t2.od from t2 order by t2.id, t2.od) rt2) vt2 -where vt1.id=vt2.id; +(select rt2_n26.id from +(select t2_n26.id, t2_n26.od from t2_n26 order by t2_n26.id, t2_n26.od) rt2_n26) vt2_n26 +where vt1_n42.id=vt2_n26.id; diff --git a/ql/src/test/queries/clientpositive/tez_schema_evolution.q b/ql/src/test/queries/clientpositive/tez_schema_evolution.q index 62d66017eb..4933931947 100644 --- a/ql/src/test/queries/clientpositive/tez_schema_evolution.q +++ b/ql/src/test/queries/clientpositive/tez_schema_evolution.q @@ -3,17 +3,17 @@ SET hive.vectorized.execution.enabled=false; set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -create table test (key int, value string) partitioned by (p int) stored as textfile; +create table test_n9 (key int, value string) partitioned by (p int) stored as textfile; -insert into table test partition (p=1) select * from src order by key limit 10; +insert into table test_n9 partition (p=1) select * from src order by key limit 10; -alter table test set fileformat orc; +alter table test_n9 set fileformat orc; -insert into table test partition (p=2) select * from src order by key limit 10; +insert into table test_n9 partition (p=2) select * from src order by key limit 10; -describe test; +describe test_n9; -select * from test where p=1 and key > 0 order by key; -select * from test where p=2 and key > 0 order by key; -select * from test where key > 0 order by key; +select * from test_n9 where p=1 and key > 0 order by key; +select * from test_n9 where p=2 and key > 0 order by key; +select * from test_n9 where key > 0 order by key; diff --git a/ql/src/test/queries/clientpositive/tez_smb_empty.q b/ql/src/test/queries/clientpositive/tez_smb_empty.q index eb73f8884f..ffe30cc8f2 100644 --- a/ql/src/test/queries/clientpositive/tez_smb_empty.q +++ b/ql/src/test/queries/clientpositive/tez_smb_empty.q @@ -10,62 +10,62 @@ set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hado -- SORT_QUERY_RESULTS -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n7(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part_n5 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part_n8 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n7 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n7 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n8 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n8 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n8 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n8 partition(ds='2008-04-08'); set hive.optimize.bucketingsorting=false; -insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +insert overwrite table tab_part_n5 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n8; -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n4(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab_n4 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n7; set hive.auto.convert.sortmerge.join = true; set hive.auto.convert.join.noconditionaltask.size=500; -CREATE TABLE empty(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE empty_n0(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; explain -select count(*) from tab s1 join empty s3 on s1.key=s3.key; +select count(*) from tab_n4 s1 join empty_n0 s3 on s1.key=s3.key; -select count(*) from tab s1 join empty s3 on s1.key=s3.key; +select count(*) from tab_n4 s1 join empty_n0 s3 on s1.key=s3.key; explain -select * from tab s1 left outer join empty s3 on s1.key=s3.key; +select * from tab_n4 s1 left outer join empty_n0 s3 on s1.key=s3.key; -select * from tab s1 left outer join empty s3 on s1.key=s3.key; +select * from tab_n4 s1 left outer join empty_n0 s3 on s1.key=s3.key; explain -select count(*) from tab s1 left outer join tab s2 on s1.key=s2.key join empty s3 on s1.key = s3.key; +select count(*) from tab_n4 s1 left outer join tab_n4 s2 on s1.key=s2.key join empty_n0 s3 on s1.key = s3.key; -select count(*) from tab s1 left outer join tab s2 on s1.key=s2.key join empty s3 on s1.key = s3.key; +select count(*) from tab_n4 s1 left outer join tab_n4 s2 on s1.key=s2.key join empty_n0 s3 on s1.key = s3.key; explain -select count(*) from tab s1 left outer join empty s2 on s1.key=s2.key join tab s3 on s1.key = s3.key; +select count(*) from tab_n4 s1 left outer join empty_n0 s2 on s1.key=s2.key join tab_n4 s3 on s1.key = s3.key; -select count(*) from tab s1 left outer join empty s2 on s1.key=s2.key join tab s3 on s1.key = s3.key; +select count(*) from tab_n4 s1 left outer join empty_n0 s2 on s1.key=s2.key join tab_n4 s3 on s1.key = s3.key; explain -select count(*) from empty s1 join empty s3 on s1.key=s3.key; +select count(*) from empty_n0 s1 join empty_n0 s3 on s1.key=s3.key; -select count(*) from empty s1 join empty s3 on s1.key=s3.key; +select count(*) from empty_n0 s1 join empty_n0 s3 on s1.key=s3.key; set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSelectorForAutoSMJ; explain -select count(*) from empty s1 join tab s3 on s1.key=s3.key; +select count(*) from empty_n0 s1 join tab_n4 s3 on s1.key=s3.key; -select count(*) from empty s1 join tab s3 on s1.key=s3.key; +select count(*) from empty_n0 s1 join tab_n4 s3 on s1.key=s3.key; diff --git a/ql/src/test/queries/clientpositive/tez_smb_main.q b/ql/src/test/queries/clientpositive/tez_smb_main.q index 43b0a5f280..85a560964c 100644 --- a/ql/src/test/queries/clientpositive/tez_smb_main.q +++ b/ql/src/test/queries/clientpositive/tez_smb_main.q @@ -16,47 +16,47 @@ set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000; set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ; -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n19(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part_n12 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part_n21 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n19 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n19 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n21 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n21 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n21 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n21 partition(ds='2008-04-08'); set hive.optimize.bucketingsorting=false; -insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +insert overwrite table tab_part_n12 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n21; -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n11(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab_n11 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n19; set hive.convert.join.bucket.mapjoin.tez = true; set hive.auto.convert.sortmerge.join = true; explain select count(*) -from tab a join tab_part b on a.key = b.key; +from tab_n11 a join tab_part_n12 b on a.key = b.key; select count(*) -from tab a join tab_part b on a.key = b.key; +from tab_n11 a join tab_part_n12 b on a.key = b.key; set hive.auto.convert.join.noconditionaltask.size=2000; set hive.mapjoin.hybridgrace.minwbsize=500; set hive.mapjoin.hybridgrace.minnumpartitions=4; explain select count (*) -from tab a join tab_part b on a.key = b.key; +from tab_n11 a join tab_part_n12 b on a.key = b.key; select count(*) -from tab a join tab_part b on a.key = b.key; +from tab_n11 a join tab_part_n12 b on a.key = b.key; set hive.stats.fetch.column.stats=false; set hive.auto.convert.join.noconditionaltask.size=1000; @@ -64,67 +64,67 @@ set hive.mapjoin.hybridgrace.minwbsize=250; set hive.mapjoin.hybridgrace.minnumpartitions=4; explain select count (*) -from tab a join tab_part b on a.key = b.key; +from tab_n11 a join tab_part_n12 b on a.key = b.key; select count(*) -from tab a join tab_part b on a.key = b.key; +from tab_n11 a join tab_part_n12 b on a.key = b.key; set hive.auto.convert.join.noconditionaltask.size=500; set hive.mapjoin.hybridgrace.minwbsize=125; set hive.mapjoin.hybridgrace.minnumpartitions=4; set hive.llap.memory.oversubscription.max.executors.per.query=0; -explain select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; +explain select count(*) from tab_n11 a join tab_part_n12 b on a.key = b.key join src1 c on a.value = c.value; set hive.llap.memory.oversubscription.max.executors.per.query=3; -explain select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; -select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; +explain select count(*) from tab_n11 a join tab_part_n12 b on a.key = b.key join src1 c on a.value = c.value; +select count(*) from tab_n11 a join tab_part_n12 b on a.key = b.key join src1 c on a.value = c.value; set hive.llap.memory.oversubscription.max.executors.per.query=0; -explain select count(*) from tab a join tab_part b on a.value = b.value; +explain select count(*) from tab_n11 a join tab_part_n12 b on a.value = b.value; set hive.llap.memory.oversubscription.max.executors.per.query=3; -explain select count(*) from tab a join tab_part b on a.value = b.value; -select count(*) from tab a join tab_part b on a.value = b.value; +explain select count(*) from tab_n11 a join tab_part_n12 b on a.value = b.value; +select count(*) from tab_n11 a join tab_part_n12 b on a.value = b.value; explain -select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +select count(*) from (select s1.key as key, s1.value as value from tab_n11 s1 join tab_n11 s3 on s1.key=s3.key UNION ALL -select s2.key as key, s2.value as value from tab s2 -) a join tab_part b on (a.key = b.key); +select s2.key as key, s2.value as value from tab_n11 s2 +) a join tab_part_n12 b on (a.key = b.key); set hive.auto.convert.join.noconditionaltask.size=10000; set hive.llap.memory.oversubscription.max.executors.per.query=0; -explain select count(*) from tab a join tab_part b on a.value = b.value; +explain select count(*) from tab_n11 a join tab_part_n12 b on a.value = b.value; set hive.llap.memory.oversubscription.max.executors.per.query=2; -explain select count(*) from tab a join tab_part b on a.value = b.value; -select count(*) from tab a join tab_part b on a.value = b.value; +explain select count(*) from tab_n11 a join tab_part_n12 b on a.value = b.value; +select count(*) from tab_n11 a join tab_part_n12 b on a.value = b.value; set hive.llap.memory.oversubscription.max.executors.per.query=0; -explain select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; +explain select count(*) from tab_n11 a join tab_part_n12 b on a.key = b.key join src1 c on a.value = c.value; set hive.llap.memory.oversubscription.max.executors.per.query=2; -explain select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; -select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; +explain select count(*) from tab_n11 a join tab_part_n12 b on a.key = b.key join src1 c on a.value = c.value; +select count(*) from tab_n11 a join tab_part_n12 b on a.key = b.key join src1 c on a.value = c.value; set hive.stats.fetch.column.stats=true; explain -select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +select count(*) from (select s1.key as key, s1.value as value from tab_n11 s1 join tab_n11 s3 on s1.key=s3.key UNION ALL -select s2.key as key, s2.value as value from tab s2 -) a join tab_part b on (a.key = b.key); +select s2.key as key, s2.value as value from tab_n11 s2 +) a join tab_part_n12 b on (a.key = b.key); explain select count(*) from (select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 +(select t1.key as id, t1.value as od from tab_n11 t1 order by id, od) rt1) vt1 join (select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 +(select t2.key as id, t2.value as od from tab_part_n12 t2 order by id, od) rt2) vt2 where vt1.id=vt2.id; select count(*) from (select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 +(select t1.key as id, t1.value as od from tab_n11 t1 order by id, od) rt1) vt1 join (select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 +(select t2.key as id, t2.value as od from tab_part_n12 t2 order by id, od) rt2) vt2 where vt1.id=vt2.id; diff --git a/ql/src/test/queries/clientpositive/tez_union.q b/ql/src/test/queries/clientpositive/tez_union.q index da21886d69..51581649eb 100644 --- a/ql/src/test/queries/clientpositive/tez_union.q +++ b/ql/src/test/queries/clientpositive/tez_union.q @@ -99,18 +99,18 @@ drop table ut; set hive.vectorized.execution.enabled=true; -create table TABLE1(EMP_NAME STRING, EMP_ID INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY ','; +create table TABLE1_n3(EMP_NAME STRING, EMP_ID INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY ','; -create table table2 (EMP_NAME STRING) PARTITIONED BY (EMP_ID INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY ','; +create table table2_n1 (EMP_NAME STRING) PARTITIONED BY (EMP_ID INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY ','; -CREATE OR REPLACE VIEW TABLE3 as select EMP_NAME, EMP_ID from TABLE1; +CREATE OR REPLACE VIEW TABLE3 as select EMP_NAME, EMP_ID from TABLE1_n3; explain formatted select count(*) from TABLE3; -drop table table2; +drop table table2_n1; -create table table2 (EMP_NAME STRING) PARTITIONED BY (EMP_ID INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY ','; +create table table2_n1 (EMP_NAME STRING) PARTITIONED BY (EMP_ID INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY ','; -CREATE OR REPLACE VIEW TABLE3 as select EMP_NAME, EMP_ID from TABLE1 UNION ALL select EMP_NAME,EMP_ID from TABLE2; +CREATE OR REPLACE VIEW TABLE3 as select EMP_NAME, EMP_ID from TABLE1_n3 UNION ALL select EMP_NAME,EMP_ID from TABLE2; explain formatted select count(*) from TABLE3; diff --git a/ql/src/test/queries/clientpositive/tez_union_dynamic_partition.q b/ql/src/test/queries/clientpositive/tez_union_dynamic_partition.q index e9efba4add..ee487c5afd 100644 --- a/ql/src/test/queries/clientpositive/tez_union_dynamic_partition.q +++ b/ql/src/test/queries/clientpositive/tez_union_dynamic_partition.q @@ -1,9 +1,9 @@ SET hive.vectorized.execution.enabled=false; set hive.mapred.mode=nonstrict; set hive.explain.user=false; -create table dummy(i int); -insert into table dummy values (1); -select * from dummy; +create table dummy_n2(i int); +insert into table dummy_n2 values (1); +select * from dummy_n2; create table partunion1(id1 int) partitioned by (part1 string); @@ -11,14 +11,14 @@ set hive.exec.dynamic.partition.mode=nonstrict; explain insert into table partunion1 partition(part1) select temps.* from ( -select 1 as id1, '2014' as part1 from dummy +select 1 as id1, '2014' as part1 from dummy_n2 union all -select 2 as id1, '2014' as part1 from dummy ) temps; +select 2 as id1, '2014' as part1 from dummy_n2 ) temps; insert into table partunion1 partition(part1) select temps.* from ( -select 1 as id1, '2014' as part1 from dummy +select 1 as id1, '2014' as part1 from dummy_n2 union all -select 2 as id1, '2014' as part1 from dummy ) temps; +select 2 as id1, '2014' as part1 from dummy_n2 ) temps; select * from partunion1; diff --git a/ql/src/test/queries/clientpositive/tez_union_dynamic_partition_2.q b/ql/src/test/queries/clientpositive/tez_union_dynamic_partition_2.q index 7107b0b5a6..525dd84748 100644 --- a/ql/src/test/queries/clientpositive/tez_union_dynamic_partition_2.q +++ b/ql/src/test/queries/clientpositive/tez_union_dynamic_partition_2.q @@ -1,28 +1,28 @@ SET hive.vectorized.execution.enabled=false; -drop table if exists dummy; -drop table if exists partunion1; +drop table if exists dummy_n7; +drop table if exists partunion1_n0; -create table dummy(i int); -insert into table dummy values (1); -select * from dummy; +create table dummy_n7(i int); +insert into table dummy_n7 values (1); +select * from dummy_n7; -create table partunion1(id1 int) partitioned by (part1 string) stored as orc; +create table partunion1_n0(id1 int) partitioned by (part1 string) stored as orc; set hive.exec.dynamic.partition.mode=nonstrict; set hive.merge.tezfiles=true; -explain insert into table partunion1 partition(part1) +explain insert into table partunion1_n0 partition(part1) select temps.* from ( -select 1 as id1, '2014' as part1 from dummy +select 1 as id1, '2014' as part1 from dummy_n7 union all -select 2 as id1, '2014' as part1 from dummy ) temps; +select 2 as id1, '2014' as part1 from dummy_n7 ) temps; -insert into table partunion1 partition(part1) -select 1 as id1, '2014' as part1 from dummy +insert into table partunion1_n0 partition(part1) +select 1 as id1, '2014' as part1 from dummy_n7 union all -select 2 as id1, '2014' as part1 from dummy; +select 2 as id1, '2014' as part1 from dummy_n7; -select * from partunion1; +select * from partunion1_n0; -drop table dummy; -drop table partunion1; +drop table dummy_n7; +drop table partunion1_n0; diff --git a/ql/src/test/queries/clientpositive/tez_union_group_by.q b/ql/src/test/queries/clientpositive/tez_union_group_by.q index 200f38d3bc..5cd7ed7bf1 100644 --- a/ql/src/test/queries/clientpositive/tez_union_group_by.q +++ b/ql/src/test/queries/clientpositive/tez_union_group_by.q @@ -1,5 +1,5 @@ set hive.explain.user=false; -CREATE TABLE x +CREATE TABLE x_n3 ( u bigint, t string, @@ -9,7 +9,7 @@ PARTITIONED BY (`date` string) STORED AS ORC TBLPROPERTIES ("orc.compress"="ZLIB"); -CREATE TABLE y +CREATE TABLE y_n1 ( u bigint ) @@ -17,7 +17,7 @@ PARTITIONED BY (`date` string) STORED AS ORC TBLPROPERTIES ("orc.compress"="ZLIB"); -CREATE TABLE z +CREATE TABLE z_n0 ( u bigint ) @@ -25,7 +25,7 @@ PARTITIONED BY (`date` string) STORED AS ORC TBLPROPERTIES ("orc.compress"="ZLIB"); -CREATE TABLE v +CREATE TABLE v_n15 ( t string, st string, @@ -41,22 +41,22 @@ FROM SELECT m.u, Min(`date`) as ft FROM ( -SELECT u, `date` FROM x WHERE `date` < '2014-09-02' +SELECT u, `date` FROM x_n3 WHERE `date` < '2014-09-02' UNION ALL -SELECT u, `date` FROM y WHERE `date` < '2014-09-02' +SELECT u, `date` FROM y_n1 WHERE `date` < '2014-09-02' UNION ALL -SELECT u, `date` FROM z WHERE `date` < '2014-09-02' +SELECT u, `date` FROM z_n0 WHERE `date` < '2014-09-02' ) m GROUP BY m.u ) n LEFT OUTER JOIN ( -SELECT x.u -FROM x -JOIN v -ON (x.t = v.t AND x.st <=> v.st) -WHERE x.`date` >= '2014-03-04' AND x.`date` < '2014-09-03' -GROUP BY x.u +SELECT x_n3.u +FROM x_n3 +JOIN v_n15 +ON (x_n3.t = v_n15.t AND x_n3.st <=> v_n15.st) +WHERE x_n3.`date` >= '2014-03-04' AND x_n3.`date` < '2014-09-03' +GROUP BY x_n3.u ) o ON n.u = o.u WHERE n.u <> 0 AND n.ft <= '2014-09-02'; @@ -67,22 +67,22 @@ FROM SELECT m.u, Min(`date`) as ft FROM ( -SELECT u, `date` FROM x WHERE `date` < '2014-09-02' +SELECT u, `date` FROM x_n3 WHERE `date` < '2014-09-02' UNION ALL -SELECT u, `date` FROM y WHERE `date` < '2014-09-02' +SELECT u, `date` FROM y_n1 WHERE `date` < '2014-09-02' UNION ALL -SELECT u, `date` FROM z WHERE `date` < '2014-09-02' +SELECT u, `date` FROM z_n0 WHERE `date` < '2014-09-02' ) m GROUP BY m.u ) n LEFT OUTER JOIN ( -SELECT x.u -FROM x -JOIN v -ON (x.t = v.t AND x.st <=> v.st) -WHERE x.`date` >= '2014-03-04' AND x.`date` < '2014-09-03' -GROUP BY x.u +SELECT x_n3.u +FROM x_n3 +JOIN v_n15 +ON (x_n3.t = v_n15.t AND x_n3.st <=> v_n15.st) +WHERE x_n3.`date` >= '2014-03-04' AND x_n3.`date` < '2014-09-03' +GROUP BY x_n3.u ) o ON n.u = o.u WHERE n.u <> 0 AND n.ft <= '2014-09-02'; diff --git a/ql/src/test/queries/clientpositive/tez_union_multiinsert.q b/ql/src/test/queries/clientpositive/tez_union_multiinsert.q index e0a24bba1f..df84ee11c4 100644 --- a/ql/src/test/queries/clientpositive/tez_union_multiinsert.q +++ b/ql/src/test/queries/clientpositive/tez_union_multiinsert.q @@ -2,9 +2,9 @@ set hive.explain.user=false; -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n150(key STRING, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n39(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; explain FROM ( @@ -15,8 +15,8 @@ FROM ( UNION all select key, value from src s0 ) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) +INSERT OVERWRITE TABLE DEST1_n150 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n39 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; FROM ( @@ -27,12 +27,12 @@ FROM ( UNION all select key, value from src s0 ) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) +INSERT OVERWRITE TABLE DEST1_n150 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n39 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; -select * from DEST1; -select * from DEST2; +select * from DEST1_n150; +select * from DEST2_n39; explain FROM ( @@ -42,8 +42,8 @@ FROM ( select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as value2 from src s1 UNION all select s2.key as key, s2.value as value, 'tst1' as value2 from src s2) unionsub) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) +INSERT OVERWRITE TABLE DEST1_n150 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n39 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; FROM ( @@ -53,12 +53,12 @@ FROM ( select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as value2 from src s1 UNION all select s2.key as key, s2.value as value, 'tst1' as value2 from src s2) unionsub) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) +INSERT OVERWRITE TABLE DEST1_n150 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n39 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; -select * from DEST1; -select * from DEST2; +select * from DEST1_n150; +select * from DEST2_n39; explain @@ -68,8 +68,8 @@ FROM ( select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION all select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) +INSERT OVERWRITE TABLE DEST1_n150 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n39 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; FROM ( @@ -78,45 +78,45 @@ FROM ( select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION all select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) +INSERT OVERWRITE TABLE DEST1_n150 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n39 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; -select * from DEST1; -select * from DEST2; +select * from DEST1_n150; +select * from DEST2_n39; explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION all select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) +INSERT OVERWRITE TABLE DEST1_n150 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n39 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION all select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) +INSERT OVERWRITE TABLE DEST1_n150 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n39 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; -select * from DEST1; -select * from DEST2; +select * from DEST1_n150; +select * from DEST2_n39; explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION distinct select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) +INSERT OVERWRITE TABLE DEST1_n150 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n39 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION distinct select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) +INSERT OVERWRITE TABLE DEST1_n150 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n39 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; -select * from DEST1; -select * from DEST2; \ No newline at end of file +select * from DEST1_n150; +select * from DEST2_n39; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/timestamp_udf.q b/ql/src/test/queries/clientpositive/timestamp_udf.q index cb38be6fd2..53a36837f1 100644 --- a/ql/src/test/queries/clientpositive/timestamp_udf.q +++ b/ql/src/test/queries/clientpositive/timestamp_udf.q @@ -1,13 +1,13 @@ --! qt:dataset:src set hive.fetch.task.conversion=more; -drop table timestamp_udf; +drop table timestamp_udf_n0; drop table timestamp_udf_string; -create table timestamp_udf (t timestamp); +create table timestamp_udf_n0 (t timestamp); create table timestamp_udf_string (t string); from (select * from src tablesample (1 rows)) s - insert overwrite table timestamp_udf + insert overwrite table timestamp_udf_n0 select '2011-05-06 07:08:09.1234567' insert overwrite table timestamp_udf_string select '2011-05-06 07:08:09.1234567'; @@ -15,31 +15,31 @@ from (select * from src tablesample (1 rows)) s -- Test UDFs with Timestamp input select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), weekofyear(t), hour(t), minute(t), second(t), to_date(t) - from timestamp_udf; + from timestamp_udf_n0; select date_add(t, 5), date_sub(t, 10) - from timestamp_udf; + from timestamp_udf_n0; select datediff(t, t), datediff(t, '2002-03-21'), datediff('2002-03-21', t) - from timestamp_udf; + from timestamp_udf_n0; select from_utc_timestamp(t, 'America/Chicago') - from timestamp_udf; + from timestamp_udf_n0; select to_utc_timestamp(t, 'America/Chicago') - from timestamp_udf; + from timestamp_udf_n0; select t, from_utc_timestamp(t, 'America/Chicago') - from timestamp_udf; + from timestamp_udf_n0; select t, from_utc_timestamp(t, 'America/Chicago'), t, from_utc_timestamp(t, 'America/Chicago') - from timestamp_udf; + from timestamp_udf_n0; select t, to_utc_timestamp(t, 'America/Chicago') - from timestamp_udf; + from timestamp_udf_n0; select t, to_utc_timestamp(t, 'America/Chicago'), t, to_utc_timestamp(t, 'America/Chicago') - from timestamp_udf; + from timestamp_udf_n0; -- Test UDFs with string input select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), @@ -57,5 +57,5 @@ select from_utc_timestamp(t, 'America/Chicago') select to_utc_timestamp(t, 'America/Chicago') from timestamp_udf_string; -drop table timestamp_udf; +drop table timestamp_udf_n0; drop table timestamp_udf_string; diff --git a/ql/src/test/queries/clientpositive/timestamptz_3.q b/ql/src/test/queries/clientpositive/timestamptz_3.q index 7c55a50111..6cc3657519 100644 --- a/ql/src/test/queries/clientpositive/timestamptz_3.q +++ b/ql/src/test/queries/clientpositive/timestamptz_3.q @@ -1,15 +1,15 @@ set hive.fetch.task.conversion=more; -drop table tstz1; +drop table tstz1_n1; -create table tstz1(t timestamp with local time zone); +create table tstz1_n1(t timestamp with local time zone); -insert overwrite table tstz1 select cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); +insert overwrite table tstz1_n1 select cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone); -select cast(t as timestamp) from tstz1; -select cast(to_epoch_milli(t) as timestamp) from tstz1; +select cast(t as timestamp) from tstz1_n1; +select cast(to_epoch_milli(t) as timestamp) from tstz1_n1; set time zone UTC; -select cast(t as timestamp) from tstz1; -select cast(to_epoch_milli(t) as timestamp) from tstz1; +select cast(t as timestamp) from tstz1_n1; +select cast(to_epoch_milli(t) as timestamp) from tstz1_n1; diff --git a/ql/src/test/queries/clientpositive/truncate_column.q b/ql/src/test/queries/clientpositive/truncate_column.q index 1e29b55001..d15fbf1212 100644 --- a/ql/src/test/queries/clientpositive/truncate_column.q +++ b/ql/src/test/queries/clientpositive/truncate_column.q @@ -3,67 +3,67 @@ set hive.mapred.mode=nonstrict; -- Tests truncating column(s) from a table, also tests that stats are updated -CREATE TABLE test_tab (key STRING, value STRING) +CREATE TABLE test_tab_n1 (key STRING, value STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS RCFILE; set hive.stats.autogather=true; -INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows); +INSERT OVERWRITE TABLE test_tab_n1 SELECT * FROM src tablesample (10 rows); -DESC FORMATTED test_tab; +DESC FORMATTED test_tab_n1; -SELECT * FROM test_tab ORDER BY value; +SELECT * FROM test_tab_n1 ORDER BY value; -- Truncate 1 column -TRUNCATE TABLE test_tab COLUMNS (key); +TRUNCATE TABLE test_tab_n1 COLUMNS (key); -DESC FORMATTED test_tab; +DESC FORMATTED test_tab_n1; -- First column should be null -SELECT * FROM test_tab ORDER BY value; +SELECT * FROM test_tab_n1 ORDER BY value; -- Truncate multiple columns -INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows); +INSERT OVERWRITE TABLE test_tab_n1 SELECT * FROM src tablesample (10 rows); -TRUNCATE TABLE test_tab COLUMNS (key, value); +TRUNCATE TABLE test_tab_n1 COLUMNS (key, value); -DESC FORMATTED test_tab; +DESC FORMATTED test_tab_n1; -- Both columns should be null -SELECT * FROM test_tab ORDER BY value; +SELECT * FROM test_tab_n1 ORDER BY value; -- Truncate columns again -TRUNCATE TABLE test_tab COLUMNS (key, value); +TRUNCATE TABLE test_tab_n1 COLUMNS (key, value); -DESC FORMATTED test_tab; +DESC FORMATTED test_tab_n1; -- Both columns should be null -SELECT * FROM test_tab ORDER BY value; +SELECT * FROM test_tab_n1 ORDER BY value; -- Test truncating with a binary serde -ALTER TABLE test_tab SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +ALTER TABLE test_tab_n1 SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; -INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows); +INSERT OVERWRITE TABLE test_tab_n1 SELECT * FROM src tablesample (10 rows); -DESC FORMATTED test_tab; +DESC FORMATTED test_tab_n1; -SELECT * FROM test_tab ORDER BY value; +SELECT * FROM test_tab_n1 ORDER BY value; -- Truncate 1 column -TRUNCATE TABLE test_tab COLUMNS (key); +TRUNCATE TABLE test_tab_n1 COLUMNS (key); -DESC FORMATTED test_tab; +DESC FORMATTED test_tab_n1; -- First column should be null -SELECT * FROM test_tab ORDER BY value; +SELECT * FROM test_tab_n1 ORDER BY value; -- Truncate 2 columns -TRUNCATE TABLE test_tab COLUMNS (key, value); +TRUNCATE TABLE test_tab_n1 COLUMNS (key, value); -DESC FORMATTED test_tab; +DESC FORMATTED test_tab_n1; -- Both columns should be null -SELECT * FROM test_tab ORDER BY value; +SELECT * FROM test_tab_n1 ORDER BY value; -- Test truncating a partition CREATE TABLE test_tab_part (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE; diff --git a/ql/src/test/queries/clientpositive/truncate_column_list_bucket.q b/ql/src/test/queries/clientpositive/truncate_column_list_bucket.q index 44f580652f..f7498aaba5 100644 --- a/ql/src/test/queries/clientpositive/truncate_column_list_bucket.q +++ b/ql/src/test/queries/clientpositive/truncate_column_list_bucket.q @@ -10,26 +10,26 @@ set mapred.input.dir.recursive=true; -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) -CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE; +CREATE TABLE test_tab_n3 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE; -ALTER TABLE test_tab +ALTER TABLE test_tab_n3 SKEWED BY (key) ON ("484") STORED AS DIRECTORIES; -INSERT OVERWRITE TABLE test_tab PARTITION (part = '1') SELECT * FROM src; +INSERT OVERWRITE TABLE test_tab_n3 PARTITION (part = '1') SELECT * FROM src; set hive.optimize.listbucketing=true; -SELECT * FROM test_tab WHERE part = '1' AND key = '0'; +SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '0'; -TRUNCATE TABLE test_tab PARTITION (part ='1') COLUMNS (value); +TRUNCATE TABLE test_tab_n3 PARTITION (part ='1') COLUMNS (value); -- In the following select statements the list bucketing optimization should still be used -- In both cases value should be null -EXPLAIN EXTENDED SELECT * FROM test_tab WHERE part = '1' AND key = '484'; +EXPLAIN EXTENDED SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '484'; -SELECT * FROM test_tab WHERE part = '1' AND key = '484'; +SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '484'; -EXPLAIN EXTENDED SELECT * FROM test_tab WHERE part = '1' AND key = '0'; +EXPLAIN EXTENDED SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '0'; -SELECT * FROM test_tab WHERE part = '1' AND key = '0'; +SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '0'; diff --git a/ql/src/test/queries/clientpositive/truncate_column_merge.q b/ql/src/test/queries/clientpositive/truncate_column_merge.q index fb309b5728..e58fdf6856 100644 --- a/ql/src/test/queries/clientpositive/truncate_column_merge.q +++ b/ql/src/test/queries/clientpositive/truncate_column_merge.q @@ -2,21 +2,21 @@ set hive.mapred.mode=nonstrict; -- Tests truncating a column from a table with multiple files, then merging those files -CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE; +CREATE TABLE test_tab_n2 (key STRING, value STRING) STORED AS RCFILE; -INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (5 rows); +INSERT OVERWRITE TABLE test_tab_n2 SELECT * FROM src tablesample (5 rows); -INSERT INTO TABLE test_tab SELECT * FROM src tablesample (5 rows); +INSERT INTO TABLE test_tab_n2 SELECT * FROM src tablesample (5 rows); -- The value should be 2 indicating the table has 2 files -SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab; +SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab_n2; -TRUNCATE TABLE test_tab COLUMNS (key); +TRUNCATE TABLE test_tab_n2 COLUMNS (key); -ALTER TABLE test_tab CONCATENATE; +ALTER TABLE test_tab_n2 CONCATENATE; -- The first column (key) should be null for all 10 rows -SELECT * FROM test_tab ORDER BY value; +SELECT * FROM test_tab_n2 ORDER BY value; -- The value should be 1 indicating the table has 1 file -SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab; +SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab_n2; diff --git a/ql/src/test/queries/clientpositive/tunable_ndv.q b/ql/src/test/queries/clientpositive/tunable_ndv.q index fed51f64bb..e5e0d57f89 100644 --- a/ql/src/test/queries/clientpositive/tunable_ndv.q +++ b/ql/src/test/queries/clientpositive/tunable_ndv.q @@ -4,61 +4,61 @@ set hive.exec.dynamic.partition=true; set hive.exec.dynamic.partition.mode=nonstrict; set hive.metastore.aggregate.stats.cache.enabled=false; -create table if not exists ext_loc ( +create table if not exists ext_loc_n2 ( state string, locid int, zip int, year string ) row format delimited fields terminated by '|' stored as textfile; -LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_full.txt' OVERWRITE INTO TABLE ext_loc; +LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_full.txt' OVERWRITE INTO TABLE ext_loc_n2; -create table if not exists loc_orc_1d ( +create table if not exists loc_orc_1d_n2 ( state string, locid int, zip int ) partitioned by(year string) stored as orc; -insert overwrite table loc_orc_1d partition(year) select * from ext_loc; +insert overwrite table loc_orc_1d_n2 partition(year) select * from ext_loc_n2; -analyze table loc_orc_1d compute statistics for columns state,locid; +analyze table loc_orc_1d_n2 compute statistics for columns state,locid; -describe formatted loc_orc_1d partition(year=2000) locid; -describe formatted loc_orc_1d partition(year=2001) locid; +describe formatted loc_orc_1d_n2 partition(year=2000) locid; +describe formatted loc_orc_1d_n2 partition(year=2001) locid; -describe formatted loc_orc_1d locid; +describe formatted loc_orc_1d_n2 locid; set hive.metastore.stats.ndv.tuner=1.0; -describe formatted loc_orc_1d locid; +describe formatted loc_orc_1d_n2 locid; set hive.metastore.stats.ndv.tuner=0.5; -describe formatted loc_orc_1d locid; +describe formatted loc_orc_1d_n2 locid; -create table if not exists loc_orc_2d ( +create table if not exists loc_orc_2d_n2 ( state string, locid int ) partitioned by(zip int, year string) stored as orc; -insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc; +insert overwrite table loc_orc_2d_n2 partition(zip, year) select * from ext_loc_n2; -analyze table loc_orc_2d partition(zip=94086, year='2000') compute statistics for columns state,locid; +analyze table loc_orc_2d_n2 partition(zip=94086, year='2000') compute statistics for columns state,locid; -analyze table loc_orc_2d partition(zip=94087, year='2000') compute statistics for columns state,locid; +analyze table loc_orc_2d_n2 partition(zip=94087, year='2000') compute statistics for columns state,locid; -analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid; +analyze table loc_orc_2d_n2 partition(zip=94086, year='2001') compute statistics for columns state,locid; -analyze table loc_orc_2d partition(zip=94087, year='2001') compute statistics for columns state,locid; +analyze table loc_orc_2d_n2 partition(zip=94087, year='2001') compute statistics for columns state,locid; set hive.metastore.stats.ndv.tuner=0.0; -describe formatted loc_orc_2d locid; +describe formatted loc_orc_2d_n2 locid; set hive.metastore.stats.ndv.tuner=1.0; -describe formatted loc_orc_2d locid; +describe formatted loc_orc_2d_n2 locid; set hive.metastore.stats.ndv.tuner=0.5; -describe formatted loc_orc_2d locid; +describe formatted loc_orc_2d_n2 locid; diff --git a/ql/src/test/queries/clientpositive/type_change_test_int.q b/ql/src/test/queries/clientpositive/type_change_test_int.q index 0e982420a6..112a674d51 100644 --- a/ql/src/test/queries/clientpositive/type_change_test_int.q +++ b/ql/src/test/queries/clientpositive/type_change_test_int.q @@ -1,41 +1,41 @@ -- Create a base table to be used for loading data: Begin -drop table if exists testAltCol; -create table testAltCol +drop table if exists testAltCol_n1; +create table testAltCol_n1 (cId TINYINT, cBigInt BIGINT, cInt INT, cSmallInt SMALLINT, cTinyint TINYINT); -insert into testAltCol values +insert into testAltCol_n1 values (1, 1234567890123456789, 1234567890, 12345, 123); -insert into testAltCol values +insert into testAltCol_n1 values (2, 1, 2, 3, 4); -insert into testAltCol values +insert into testAltCol_n1 values (3, 1234567890123456789, 1234567890, 12345, 123); -insert into testAltCol values +insert into testAltCol_n1 values (4, -1234567890123456789, -1234567890, -12345, -123); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltCol order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltCol_n1 order by cId; -- Create a base table to be used for loading data: End -- Enable change of column type @@ -45,752 +45,752 @@ SET hive.metastore.disallow.incompatible.col.type.changes=false; SET hive.vectorized.execution.enabled=false; -- Text type: Begin -drop table if exists testAltColT; +drop table if exists testAltColT_n1; -create table testAltColT stored as textfile as select * from testAltCol; +create table testAltColT_n1 stored as textfile as select * from testAltCol_n1; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to bigint -alter table testAltColT replace columns +alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to int -alter table testAltColT replace columns +alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to smallint -alter table testAltColT replace columns +alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to tinyint -alter table testAltColT replace columns +alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to float -alter table testAltColT replace columns +alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to double -alter table testAltColT replace columns +alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- all values fit and should return all values -alter table testAltColT replace columns +alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int doesn't fit and should return null where it didn't fit -alter table testAltColT replace columns +alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int and int doesn't fit and should return null where it didn't fit -alter table testAltColT replace columns +alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int, int and small int doesn't fit and should return null where it didn't fit -alter table testAltColT replace columns +alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- only single digit fits and should return null where it didn't fit -alter table testAltColT replace columns +alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId; -drop table if exists testAltColT; +drop table if exists testAltColT_n1; -- Text type: End -- Sequence File type: Begin -drop table if exists testAltColSF; +drop table if exists testAltColSF_n1; -create table testAltColSF stored as sequencefile as select * from testAltCol; +create table testAltColSF_n1 stored as sequencefile as select * from testAltCol_n1; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to bigint -alter table testAltColSF replace columns +alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to int -alter table testAltColSF replace columns +alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to smallint -alter table testAltColSF replace columns +alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to tinyint -alter table testAltColSF replace columns +alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to float -alter table testAltColSF replace columns +alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to double -alter table testAltColSF replace columns +alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- all values fit and should return all values -alter table testAltColSF replace columns +alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int doesn't fit and should return null where it didn't fit -alter table testAltColSF replace columns +alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int and int doesn't fit and should return null where it didn't fit -alter table testAltColSF replace columns +alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int, int and small int doesn't fit and should return null where it didn't fit -alter table testAltColSF replace columns +alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- only single digit fits and should return null where it didn't fit -alter table testAltColSF replace columns +alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId; -drop table if exists testAltColSF; +drop table if exists testAltColSF_n1; -- Sequence File type: End -- RCFile type: Begin -drop table if exists testAltColRCF; +drop table if exists testAltColRCF_n1; -create table testAltColRCF stored as rcfile as select * from testAltCol; +create table testAltColRCF_n1 stored as rcfile as select * from testAltCol_n1; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to bigint -alter table testAltColRCF replace columns +alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to int -alter table testAltColRCF replace columns +alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to smallint -alter table testAltColRCF replace columns +alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to tinyint -alter table testAltColRCF replace columns +alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to float -alter table testAltColRCF replace columns +alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to double -alter table testAltColRCF replace columns +alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- all values fit and should return all values -alter table testAltColRCF replace columns +alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int doesn't fit and should return null where it didn't fit -alter table testAltColRCF replace columns +alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int and int doesn't fit and should return null where it didn't fit -alter table testAltColRCF replace columns +alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int, int and small int doesn't fit and should return null where it didn't fit -alter table testAltColRCF replace columns +alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- only single digit fits and should return null where it didn't fit -alter table testAltColRCF replace columns +alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId; -drop table if exists testAltColRCF; +drop table if exists testAltColRCF_n1; -- RCFile type: End -- ORC type: Begin -drop table if exists testAltColORC; +drop table if exists testAltColORC_n1; -create table testAltColORC stored as orc as select * from testAltCol; +create table testAltColORC_n1 stored as orc as select * from testAltCol_n1; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to bigint -alter table testAltColORC replace columns +alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to int -alter table testAltColORC replace columns +alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to smallint -alter table testAltColORC replace columns +alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to tinyint -alter table testAltColORC replace columns +alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to float -alter table testAltColORC replace columns +alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to double -alter table testAltColORC replace columns +alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- all values fit and should return all values -alter table testAltColORC replace columns +alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int doesn't fit and should return null where it didn't fit -alter table testAltColORC replace columns +alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int and int doesn't fit and should return null where it didn't fit -alter table testAltColORC replace columns +alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int, int and small int doesn't fit and should return null where it didn't fit -alter table testAltColORC replace columns +alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- only single digit fits and should return null where it didn't fit -alter table testAltColORC replace columns +alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId; -drop table if exists testAltColORC; +drop table if exists testAltColORC_n1; -- ORC type: End -- Parquet type with Dictionary encoding enabled: Begin -drop table if exists testAltColPDE; +drop table if exists testAltColPDE_n0; -create table testAltColPDE stored as parquet as select * from testAltCol; +create table testAltColPDE_n0 stored as parquet as select * from testAltCol_n1; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to bigint -alter table testAltColPDE replace columns +alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to int -alter table testAltColPDE replace columns +alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to smallint -alter table testAltColPDE replace columns +alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to tinyint -alter table testAltColPDE replace columns +alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to float -alter table testAltColPDE replace columns +alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to double -alter table testAltColPDE replace columns +alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- all values fit and should return all values -alter table testAltColPDE replace columns +alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int doesn't fit and should return null where it didn't fit -alter table testAltColPDE replace columns +alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int and int doesn't fit and should return null where it didn't fit -alter table testAltColPDE replace columns +alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int, int and small int doesn't fit and should return null where it didn't fit -alter table testAltColPDE replace columns +alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- only single digit fits and should return null where it didn't fit -alter table testAltColPDE replace columns +alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId; -drop table if exists testAltColPDE; +drop table if exists testAltColPDE_n0; -- Parquet type with Dictionary encoding enabled: End -- Parquet type with Dictionary encoding disabled: Begin -drop table if exists testAltColPDD; +drop table if exists testAltColPDD_n0; -create table testAltColPDD stored as parquet tblproperties ("parquet.enable.dictionary"="false") as -select * from testAltCol; +create table testAltColPDD_n0 stored as parquet tblproperties ("parquet.enable.dictionary"="false") as +select * from testAltCol_n1; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to bigint -alter table testAltColPDD replace columns +alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to int -alter table testAltColPDD replace columns +alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to smallint -alter table testAltColPDD replace columns +alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to tinyint -alter table testAltColPDD replace columns +alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to float -alter table testAltColPDD replace columns +alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to double -alter table testAltColPDD replace columns +alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- all values fit and should return all values -alter table testAltColPDD replace columns +alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int doesn't fit and should return null where it didn't fit -alter table testAltColPDD replace columns +alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int and int doesn't fit and should return null where it didn't fit -alter table testAltColPDD replace columns +alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- some of big int, int and small int doesn't fit and should return null where it didn't fit -alter table testAltColPDD replace columns +alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId; -- bigint, int, smallint, and tinyint: type changed to decimal -- only single digit fits and should return null where it didn't fit -alter table testAltColPDD replace columns +alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)); -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId; -drop table if exists testAltColPDD; +drop table if exists testAltColPDD_n0; -- Parquet type with Dictionary encoding enabled: End diff --git a/ql/src/test/queries/clientpositive/type_widening.q b/ql/src/test/queries/clientpositive/type_widening.q index dba7be54b7..0e4c80ffa8 100644 --- a/ql/src/test/queries/clientpositive/type_widening.q +++ b/ql/src/test/queries/clientpositive/type_widening.q @@ -7,9 +7,9 @@ SELECT COALESCE(0, 9223372036854775807) FROM src LIMIT 1; EXPLAIN SELECT * FROM (SELECT 0 AS numcol FROM src UNION ALL SELECT 9223372036854775807 AS numcol FROM src) a ORDER BY numcol; SELECT * FROM (SELECT 0 AS numcol FROM src UNION ALL SELECT 9223372036854775807 AS numcol FROM src) a ORDER BY numcol; -create table t1(a tinyint, b smallint); -explain select * from t1 where a > 2; -explain select * from t1 where b < 2; -explain select * from t1 where a < 200; -explain select * from t1 where b > 40000; -drop table t1; +create table t1_n114(a tinyint, b smallint); +explain select * from t1_n114 where a > 2; +explain select * from t1_n114 where b < 2; +explain select * from t1_n114 where a < 200; +explain select * from t1_n114 where b > 40000; +drop table t1_n114; diff --git a/ql/src/test/queries/clientpositive/typechangetest.q b/ql/src/test/queries/clientpositive/typechangetest.q index 314fc8be97..1897d060e9 100644 --- a/ql/src/test/queries/clientpositive/typechangetest.q +++ b/ql/src/test/queries/clientpositive/typechangetest.q @@ -1,7 +1,7 @@ -- Create a base table to be used for loading data: Begin -drop table if exists testAltCol; -create table testAltCol +drop table if exists testAltCol_n0; +create table testAltCol_n0 (cId TINYINT, cTimeStamp TIMESTAMP, cDecimal DECIMAL(38,18), @@ -13,7 +13,7 @@ create table testAltCol cTinyint TINYINT, cBoolean BOOLEAN); -insert into testAltCol values +insert into testAltCol_n0 values (1, '2017-11-07 09:02:49.999999999', 12345678901234567890.123456789012345678, @@ -25,7 +25,7 @@ insert into testAltCol values 123, TRUE); -insert into testAltCol values +insert into testAltCol_n0 values (2, '1400-01-01 01:01:01.000000001', 1.1, @@ -37,7 +37,7 @@ insert into testAltCol values 4, FALSE); -insert into testAltCol values +insert into testAltCol_n0 values (3, '1400-01-01 01:01:01.000000001', 10.1, @@ -49,7 +49,7 @@ insert into testAltCol values 123, TRUE); -insert into testAltCol values +insert into testAltCol_n0 values (4, '1400-01-01 01:01:01.000000001', -10.1, @@ -61,10 +61,10 @@ insert into testAltCol values -123, FALSE); -select cId, cTimeStamp from testAltCol order by cId; -select cId, cDecimal, cDouble, cFloat from testAltCol order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltCol order by cId; -select cId, cBoolean from testAltCol order by cId; +select cId, cTimeStamp from testAltCol_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltCol_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltCol_n0 order by cId; +select cId, cBoolean from testAltCol_n0 order by cId; -- Create a base table to be used for loading data: Begin -- Enable change of column type @@ -73,16 +73,16 @@ SET hive.metastore.disallow.incompatible.col.type.changes=false; -- Text type: Begin -- timestamp, decimal, double, float, bigint, int, smallint, tinyint and boolean: after type -- changed to string, varchar and char return correct data. -drop table if exists testAltColT; +drop table if exists testAltColT_n0; -create table testAltColT stored as textfile as select * from testAltCol; +create table testAltColT_n0 stored as textfile as select * from testAltCol_n0; -select cId, cTimeStamp from testAltColT order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColT order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; -select cId, cBoolean from testAltColT order by cId; +select cId, cTimeStamp from testAltColT_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId; +select cId, cBoolean from testAltColT_n0 order by cId; -alter table testAltColT replace columns +alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp STRING, cDecimal STRING, @@ -94,12 +94,12 @@ alter table testAltColT replace columns cTinyint STRING, cBoolean STRING); -select cId, cTimeStamp from testAltColT order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColT order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; -select cId, cBoolean from testAltColT order by cId; +select cId, cTimeStamp from testAltColT_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId; +select cId, cBoolean from testAltColT_n0 order by cId; -alter table testAltColT replace columns +alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(100), cDecimal VARCHAR(100), @@ -111,12 +111,12 @@ alter table testAltColT replace columns cTinyint VARCHAR(100), cBoolean VARCHAR(100)); -select cId, cTimeStamp from testAltColT order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColT order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; -select cId, cBoolean from testAltColT order by cId; +select cId, cTimeStamp from testAltColT_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId; +select cId, cBoolean from testAltColT_n0 order by cId; -alter table testAltColT replace columns +alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp CHAR(100), cDecimal CHAR(100), @@ -128,12 +128,12 @@ alter table testAltColT replace columns cTinyint CHAR(100), cBoolean CHAR(100)); -select cId, cTimeStamp from testAltColT order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColT order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; -select cId, cBoolean from testAltColT order by cId; +select cId, cTimeStamp from testAltColT_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId; +select cId, cBoolean from testAltColT_n0 order by cId; -alter table testAltColT replace columns +alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(4), cDecimal VARCHAR(4), @@ -145,12 +145,12 @@ alter table testAltColT replace columns cTinyint VARCHAR(4), cBoolean VARCHAR(4)); -select cId, cTimeStamp from testAltColT order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColT order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; -select cId, cBoolean from testAltColT order by cId; +select cId, cTimeStamp from testAltColT_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId; +select cId, cBoolean from testAltColT_n0 order by cId; -alter table testAltColT replace columns +alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp CHAR(4), cDecimal CHAR(4), @@ -162,26 +162,26 @@ alter table testAltColT replace columns cTinyint CHAR(4), cBoolean CHAR(4)); -select cId, cTimeStamp from testAltColT order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColT order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId; -select cId, cBoolean from testAltColT order by cId; -drop table if exists testAltColT; +select cId, cTimeStamp from testAltColT_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId; +select cId, cBoolean from testAltColT_n0 order by cId; +drop table if exists testAltColT_n0; -- Text type: End -- Sequence File type: Begin -- timestamp, decimal, double, float, bigint, int, smallint, tinyint and boolean: after type -- changed to string, varchar and char return correct data. -drop table if exists testAltColSF; +drop table if exists testAltColSF_n0; -create table testAltColSF stored as sequencefile as select * from testAltCol; +create table testAltColSF_n0 stored as sequencefile as select * from testAltCol_n0; -select cId, cTimeStamp from testAltColSF order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; -select cId, cBoolean from testAltColSF order by cId; +select cId, cTimeStamp from testAltColSF_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId; +select cId, cBoolean from testAltColSF_n0 order by cId; -alter table testAltColSF replace columns +alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp STRING, cDecimal STRING, @@ -193,12 +193,12 @@ alter table testAltColSF replace columns cTinyint STRING, cBoolean STRING); -select cId, cTimeStamp from testAltColSF order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; -select cId, cBoolean from testAltColSF order by cId; +select cId, cTimeStamp from testAltColSF_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId; +select cId, cBoolean from testAltColSF_n0 order by cId; -alter table testAltColSF replace columns +alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(100), cDecimal VARCHAR(100), @@ -210,12 +210,12 @@ alter table testAltColSF replace columns cTinyint VARCHAR(100), cBoolean VARCHAR(100)); -select cId, cTimeStamp from testAltColSF order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; -select cId, cBoolean from testAltColSF order by cId; +select cId, cTimeStamp from testAltColSF_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId; +select cId, cBoolean from testAltColSF_n0 order by cId; -alter table testAltColSF replace columns +alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp CHAR(100), cDecimal CHAR(100), @@ -227,12 +227,12 @@ alter table testAltColSF replace columns cTinyint CHAR(100), cBoolean CHAR(100)); -select cId, cTimeStamp from testAltColSF order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; -select cId, cBoolean from testAltColSF order by cId; +select cId, cTimeStamp from testAltColSF_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId; +select cId, cBoolean from testAltColSF_n0 order by cId; -alter table testAltColSF replace columns +alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(4), cDecimal VARCHAR(4), @@ -244,12 +244,12 @@ alter table testAltColSF replace columns cTinyint VARCHAR(4), cBoolean VARCHAR(4)); -select cId, cTimeStamp from testAltColSF order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; -select cId, cBoolean from testAltColSF order by cId; +select cId, cTimeStamp from testAltColSF_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId; +select cId, cBoolean from testAltColSF_n0 order by cId; -alter table testAltColSF replace columns +alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp CHAR(4), cDecimal CHAR(4), @@ -261,27 +261,27 @@ alter table testAltColSF replace columns cTinyint CHAR(4), cBoolean CHAR(4)); -select cId, cTimeStamp from testAltColSF order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId; -select cId, cBoolean from testAltColSF order by cId; +select cId, cTimeStamp from testAltColSF_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId; +select cId, cBoolean from testAltColSF_n0 order by cId; -drop table if exists testAltColSF; +drop table if exists testAltColSF_n0; -- Sequence File type: End -- ORC type: Begin -- timestamp, decimal, double, float, bigint, int, smallint, tinyint and boolean: after type -- changed to string, varchar and char return correct data. -drop table if exists testAltColORC; +drop table if exists testAltColORC_n0; -create table testAltColORC stored as orc as select * from testAltCol; +create table testAltColORC_n0 stored as orc as select * from testAltCol_n0; -select cId, cTimeStamp from testAltColORC order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; -select cId, cBoolean from testAltColORC order by cId; +select cId, cTimeStamp from testAltColORC_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId; +select cId, cBoolean from testAltColORC_n0 order by cId; -alter table testAltColORC replace columns +alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp STRING, cDecimal STRING, @@ -293,12 +293,12 @@ alter table testAltColORC replace columns cTinyint STRING, cBoolean STRING); -select cId, cTimeStamp from testAltColORC order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; -select cId, cBoolean from testAltColORC order by cId; +select cId, cTimeStamp from testAltColORC_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId; +select cId, cBoolean from testAltColORC_n0 order by cId; -alter table testAltColORC replace columns +alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(100), cDecimal VARCHAR(100), @@ -310,12 +310,12 @@ alter table testAltColORC replace columns cTinyint VARCHAR(100), cBoolean VARCHAR(100)); -select cId, cTimeStamp from testAltColORC order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; -select cId, cBoolean from testAltColORC order by cId; +select cId, cTimeStamp from testAltColORC_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId; +select cId, cBoolean from testAltColORC_n0 order by cId; -alter table testAltColORC replace columns +alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp CHAR(100), cDecimal CHAR(100), @@ -327,12 +327,12 @@ alter table testAltColORC replace columns cTinyint CHAR(100), cBoolean CHAR(100)); -select cId, cTimeStamp from testAltColORC order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; -select cId, cBoolean from testAltColORC order by cId; +select cId, cTimeStamp from testAltColORC_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId; +select cId, cBoolean from testAltColORC_n0 order by cId; -alter table testAltColORC replace columns +alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(4), cDecimal VARCHAR(4), @@ -344,12 +344,12 @@ alter table testAltColORC replace columns cTinyint VARCHAR(4), cBoolean VARCHAR(4)); -select cId, cTimeStamp from testAltColORC order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; -select cId, cBoolean from testAltColORC order by cId; +select cId, cTimeStamp from testAltColORC_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId; +select cId, cBoolean from testAltColORC_n0 order by cId; -alter table testAltColORC replace columns +alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp CHAR(4), cDecimal CHAR(4), @@ -361,27 +361,27 @@ alter table testAltColORC replace columns cTinyint CHAR(4), cBoolean CHAR(4)); -select cId, cTimeStamp from testAltColORC order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId; -select cId, cBoolean from testAltColORC order by cId; +select cId, cTimeStamp from testAltColORC_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId; +select cId, cBoolean from testAltColORC_n0 order by cId; -drop table if exists testAltColORC; +drop table if exists testAltColORC_n0; -- ORC type: End -- RCFile type: Begin -- timestamp, decimal, double, float, bigint, int, smallint, tinyint and boolean: after type -- changed to string, varchar and char return correct data. -drop table if exists testAltColRCF; +drop table if exists testAltColRCF_n0; -create table testAltColRCF stored as rcfile as select * from testAltCol; +create table testAltColRCF_n0 stored as rcfile as select * from testAltCol_n0; -select cId, cTimeStamp from testAltColRCF order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; -select cId, cBoolean from testAltColRCF order by cId; +select cId, cTimeStamp from testAltColRCF_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId; +select cId, cBoolean from testAltColRCF_n0 order by cId; -alter table testAltColRCF replace columns +alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp STRING, cDecimal STRING, @@ -393,12 +393,12 @@ alter table testAltColRCF replace columns cTinyint STRING, cBoolean STRING); -select cId, cTimeStamp from testAltColRCF order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; -select cId, cBoolean from testAltColRCF order by cId; +select cId, cTimeStamp from testAltColRCF_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId; +select cId, cBoolean from testAltColRCF_n0 order by cId; -alter table testAltColRCF replace columns +alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(100), cDecimal VARCHAR(100), @@ -410,12 +410,12 @@ alter table testAltColRCF replace columns cTinyint VARCHAR(100), cBoolean VARCHAR(100)); -select cId, cTimeStamp from testAltColRCF order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; -select cId, cBoolean from testAltColRCF order by cId; +select cId, cTimeStamp from testAltColRCF_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId; +select cId, cBoolean from testAltColRCF_n0 order by cId; -alter table testAltColRCF replace columns +alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp CHAR(100), cDecimal CHAR(100), @@ -427,12 +427,12 @@ alter table testAltColRCF replace columns cTinyint CHAR(100), cBoolean CHAR(100)); -select cId, cTimeStamp from testAltColRCF order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; -select cId, cBoolean from testAltColRCF order by cId; +select cId, cTimeStamp from testAltColRCF_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId; +select cId, cBoolean from testAltColRCF_n0 order by cId; -alter table testAltColRCF replace columns +alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(4), cDecimal VARCHAR(4), @@ -444,12 +444,12 @@ alter table testAltColRCF replace columns cTinyint VARCHAR(4), cBoolean VARCHAR(4)); -select cId, cTimeStamp from testAltColRCF order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; -select cId, cBoolean from testAltColRCF order by cId; +select cId, cTimeStamp from testAltColRCF_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId; +select cId, cBoolean from testAltColRCF_n0 order by cId; -alter table testAltColRCF replace columns +alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp CHAR(4), cDecimal CHAR(4), @@ -461,17 +461,17 @@ alter table testAltColRCF replace columns cTinyint CHAR(4), cBoolean CHAR(4)); -select cId, cTimeStamp from testAltColRCF order by cId; -select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId; -select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId; -select cId, cBoolean from testAltColRCF order by cId; +select cId, cTimeStamp from testAltColRCF_n0 order by cId; +select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId; +select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId; +select cId, cBoolean from testAltColRCF_n0 order by cId; -drop table if exists testAltColRCF; +drop table if exists testAltColRCF_n0; -- RCFile type: End -- Parquet type: Begin drop table if exists testAltColP; -create table testAltColP stored as parquet as select * from testAltCol; +create table testAltColP stored as parquet as select * from testAltCol_n0; select cId, cTimeStamp from testAltColP order by cId; select cId, cDecimal, cDouble, cFloat from testAltColP order by cId; @@ -566,4 +566,4 @@ select cId, cBoolean from testAltColP order by cId; drop table if exists testAltColP; -- Parquet type: End -drop table if exists testAltCol; \ No newline at end of file +drop table if exists testAltCol_n0; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/uber_reduce.q b/ql/src/test/queries/clientpositive/uber_reduce.q index 34d5a129a5..c6bbf60c81 100644 --- a/ql/src/test/queries/clientpositive/uber_reduce.q +++ b/ql/src/test/queries/clientpositive/uber_reduce.q @@ -5,7 +5,7 @@ SET mapred.reduce.tasks=1; -- Uberized mode is a YARN option, ignore this test for non-YARN Hadoop versions -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) -CREATE TABLE T1(key STRING, val STRING); -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +CREATE TABLE T1_n136(key STRING, val STRING); +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n136; -SELECT count(*) FROM T1; +SELECT count(*) FROM T1_n136; diff --git a/ql/src/test/queries/clientpositive/udaf_binarysetfunctions.q b/ql/src/test/queries/clientpositive/udaf_binarysetfunctions.q index 254ac15b28..0d3045329a 100644 --- a/ql/src/test/queries/clientpositive/udaf_binarysetfunctions.q +++ b/ql/src/test/queries/clientpositive/udaf_binarysetfunctions.q @@ -1,42 +1,42 @@ -drop table t; -create table t (id int,px int,y decimal,x decimal); +drop table t_n21; +create table t_n21 (id int,px int,y decimal,x decimal); -insert into t values (101,1,1,1); -insert into t values (201,2,1,1); -insert into t values (301,3,1,1); -insert into t values (401,4,1,11); -insert into t values (501,5,1,null); -insert into t values (601,6,null,1); -insert into t values (701,6,null,null); -insert into t values (102,1,2,2); -insert into t values (202,2,1,2); -insert into t values (302,3,2,1); -insert into t values (402,4,2,12); -insert into t values (502,5,2,null); -insert into t values (602,6,null,2); -insert into t values (702,6,null,null); -insert into t values (103,1,3,3); -insert into t values (203,2,1,3); -insert into t values (303,3,3,1); -insert into t values (403,4,3,13); -insert into t values (503,5,3,null); -insert into t values (603,6,null,3); -insert into t values (703,6,null,null); -insert into t values (104,1,4,4); -insert into t values (204,2,1,4); -insert into t values (304,3,4,1); -insert into t values (404,4,4,14); -insert into t values (504,5,4,null); -insert into t values (604,6,null,4); -insert into t values (704,6,null,null); -insert into t values (800,7,1,1); +insert into t_n21 values (101,1,1,1); +insert into t_n21 values (201,2,1,1); +insert into t_n21 values (301,3,1,1); +insert into t_n21 values (401,4,1,11); +insert into t_n21 values (501,5,1,null); +insert into t_n21 values (601,6,null,1); +insert into t_n21 values (701,6,null,null); +insert into t_n21 values (102,1,2,2); +insert into t_n21 values (202,2,1,2); +insert into t_n21 values (302,3,2,1); +insert into t_n21 values (402,4,2,12); +insert into t_n21 values (502,5,2,null); +insert into t_n21 values (602,6,null,2); +insert into t_n21 values (702,6,null,null); +insert into t_n21 values (103,1,3,3); +insert into t_n21 values (203,2,1,3); +insert into t_n21 values (303,3,3,1); +insert into t_n21 values (403,4,3,13); +insert into t_n21 values (503,5,3,null); +insert into t_n21 values (603,6,null,3); +insert into t_n21 values (703,6,null,null); +insert into t_n21 values (104,1,4,4); +insert into t_n21 values (204,2,1,4); +insert into t_n21 values (304,3,4,1); +insert into t_n21 values (404,4,4,14); +insert into t_n21 values (504,5,4,null); +insert into t_n21 values (604,6,null,4); +insert into t_n21 values (704,6,null,null); +insert into t_n21 values (800,7,1,1); explain select px,var_pop(x),var_pop(y),corr(y,x),covar_samp(y,x),covar_pop(y,x),regr_count(y,x),regr_slope(y,x), regr_intercept(y,x), regr_r2(y,x), regr_sxx(y,x), regr_syy(y,x), regr_sxy(y,x), regr_avgx(y,x), regr_avgy(y,x), regr_count(y,x) - from t group by px order by px; + from t_n21 group by px order by px; -select px, +select px, round( var_pop(x),5), round( var_pop(y),5), round( corr(y,x),5), @@ -52,7 +52,7 @@ select px, round( regr_avgx(y,x),5), round( regr_avgy(y,x),5), round( regr_count(y,x),5) - from t group by px order by px; + from t_n21 group by px order by px; -select id,regr_count(y,x) over (partition by px) from t order by id; +select id,regr_count(y,x) over (partition by px) from t_n21 order by id; diff --git a/ql/src/test/queries/clientpositive/udaf_binarysetfunctions_no_cbo.q b/ql/src/test/queries/clientpositive/udaf_binarysetfunctions_no_cbo.q index ae4733f705..db9292af27 100644 --- a/ql/src/test/queries/clientpositive/udaf_binarysetfunctions_no_cbo.q +++ b/ql/src/test/queries/clientpositive/udaf_binarysetfunctions_no_cbo.q @@ -1,42 +1,42 @@ set hive.cbo.enable=false; -drop table t; -create table t (id int,px int,y decimal,x decimal); +drop table t_n6; +create table t_n6 (id int,px int,y decimal,x decimal); -insert into t values (101,1,1,1); -insert into t values (201,2,1,1); -insert into t values (301,3,1,1); -insert into t values (401,4,1,11); -insert into t values (501,5,1,null); -insert into t values (601,6,null,1); -insert into t values (701,6,null,null); -insert into t values (102,1,2,2); -insert into t values (202,2,1,2); -insert into t values (302,3,2,1); -insert into t values (402,4,2,12); -insert into t values (502,5,2,null); -insert into t values (602,6,null,2); -insert into t values (702,6,null,null); -insert into t values (103,1,3,3); -insert into t values (203,2,1,3); -insert into t values (303,3,3,1); -insert into t values (403,4,3,13); -insert into t values (503,5,3,null); -insert into t values (603,6,null,3); -insert into t values (703,6,null,null); -insert into t values (104,1,4,4); -insert into t values (204,2,1,4); -insert into t values (304,3,4,1); -insert into t values (404,4,4,14); -insert into t values (504,5,4,null); -insert into t values (604,6,null,4); -insert into t values (704,6,null,null); -insert into t values (800,7,1,1); +insert into t_n6 values (101,1,1,1); +insert into t_n6 values (201,2,1,1); +insert into t_n6 values (301,3,1,1); +insert into t_n6 values (401,4,1,11); +insert into t_n6 values (501,5,1,null); +insert into t_n6 values (601,6,null,1); +insert into t_n6 values (701,6,null,null); +insert into t_n6 values (102,1,2,2); +insert into t_n6 values (202,2,1,2); +insert into t_n6 values (302,3,2,1); +insert into t_n6 values (402,4,2,12); +insert into t_n6 values (502,5,2,null); +insert into t_n6 values (602,6,null,2); +insert into t_n6 values (702,6,null,null); +insert into t_n6 values (103,1,3,3); +insert into t_n6 values (203,2,1,3); +insert into t_n6 values (303,3,3,1); +insert into t_n6 values (403,4,3,13); +insert into t_n6 values (503,5,3,null); +insert into t_n6 values (603,6,null,3); +insert into t_n6 values (703,6,null,null); +insert into t_n6 values (104,1,4,4); +insert into t_n6 values (204,2,1,4); +insert into t_n6 values (304,3,4,1); +insert into t_n6 values (404,4,4,14); +insert into t_n6 values (504,5,4,null); +insert into t_n6 values (604,6,null,4); +insert into t_n6 values (704,6,null,null); +insert into t_n6 values (800,7,1,1); explain select px,var_pop(x),var_pop(y),corr(y,x),covar_samp(y,x),covar_pop(y,x),regr_count(y,x),regr_slope(y,x), regr_intercept(y,x), regr_r2(y,x), regr_sxx(y,x), regr_syy(y,x), regr_sxy(y,x), regr_avgx(y,x), regr_avgy(y,x), regr_count(y,x) - from t group by px order by px; + from t_n6 group by px order by px; select px, round( var_pop(x),5), @@ -54,7 +54,7 @@ select px, round( regr_avgx(y,x),5), round( regr_avgy(y,x),5), round( regr_count(y,x),5) - from t group by px order by px; + from t_n6 group by px order by px; -select id,regr_count(y,x) over (partition by px) from t order by id; +select id,regr_count(y,x) over (partition by px) from t_n6 order by id; diff --git a/ql/src/test/queries/clientpositive/udaf_context_ngrams.q b/ql/src/test/queries/clientpositive/udaf_context_ngrams.q index f065385688..45105bbeb2 100644 --- a/ql/src/test/queries/clientpositive/udaf_context_ngrams.q +++ b/ql/src/test/queries/clientpositive/udaf_context_ngrams.q @@ -1,12 +1,12 @@ -CREATE TABLE kafka (contents STRING); -LOAD DATA LOCAL INPATH '../../data/files/text-en.txt' INTO TABLE kafka; +CREATE TABLE kafka_n0 (contents STRING); +LOAD DATA LOCAL INPATH '../../data/files/text-en.txt' INTO TABLE kafka_n0; set mapred.reduce.tasks=1; set hive.exec.reducers.max=1; -SELECT context_ngrams(sentences(lower(contents)), array(null), 100, 1000).estfrequency FROM kafka; -SELECT context_ngrams(sentences(lower(contents)), array("he",null), 100, 1000) FROM kafka; -SELECT context_ngrams(sentences(lower(contents)), array(null,"salesmen"), 100, 1000) FROM kafka; -SELECT context_ngrams(sentences(lower(contents)), array("what","i",null), 100, 1000) FROM kafka; -SELECT context_ngrams(sentences(lower(contents)), array(null,null), 100, 1000).estfrequency FROM kafka; +SELECT context_ngrams(sentences(lower(contents)), array(null), 100, 1000).estfrequency FROM kafka_n0; +SELECT context_ngrams(sentences(lower(contents)), array("he",null), 100, 1000) FROM kafka_n0; +SELECT context_ngrams(sentences(lower(contents)), array(null,"salesmen"), 100, 1000) FROM kafka_n0; +SELECT context_ngrams(sentences(lower(contents)), array("what","i",null), 100, 1000) FROM kafka_n0; +SELECT context_ngrams(sentences(lower(contents)), array(null,null), 100, 1000).estfrequency FROM kafka_n0; -DROP TABLE kafka; +DROP TABLE kafka_n0; diff --git a/ql/src/test/queries/clientpositive/udaf_corr.q b/ql/src/test/queries/clientpositive/udaf_corr.q index 5e9840ec0f..3578ac22be 100644 --- a/ql/src/test/queries/clientpositive/udaf_corr.q +++ b/ql/src/test/queries/clientpositive/udaf_corr.q @@ -1,17 +1,17 @@ set hive.mapred.mode=nonstrict; -DROP TABLE covar_tab; -CREATE TABLE covar_tab (a INT, b INT, c INT) +DROP TABLE covar_tab_n0; +CREATE TABLE covar_tab_n0 (a INT, b INT, c INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE; LOAD DATA LOCAL INPATH '../../data/files/covar_tab.txt' OVERWRITE -INTO TABLE covar_tab; +INTO TABLE covar_tab_n0; DESCRIBE FUNCTION corr; DESCRIBE FUNCTION EXTENDED corr; -SELECT corr(b, c) FROM covar_tab WHERE a < 1; -SELECT corr(b, c) FROM covar_tab WHERE a < 3; -SELECT corr(b, c) FROM covar_tab WHERE a = 3; -SELECT a, corr(b, c) FROM covar_tab GROUP BY a ORDER BY a; -SELECT corr(b, c) FROM covar_tab; +SELECT corr(b, c) FROM covar_tab_n0 WHERE a < 1; +SELECT corr(b, c) FROM covar_tab_n0 WHERE a < 3; +SELECT corr(b, c) FROM covar_tab_n0 WHERE a = 3; +SELECT a, corr(b, c) FROM covar_tab_n0 GROUP BY a ORDER BY a; +SELECT corr(b, c) FROM covar_tab_n0; -DROP TABLE covar_tab; +DROP TABLE covar_tab_n0; diff --git a/ql/src/test/queries/clientpositive/udaf_covar_samp.q b/ql/src/test/queries/clientpositive/udaf_covar_samp.q index 9caf421acc..19933e9964 100644 --- a/ql/src/test/queries/clientpositive/udaf_covar_samp.q +++ b/ql/src/test/queries/clientpositive/udaf_covar_samp.q @@ -1,17 +1,17 @@ set hive.mapred.mode=nonstrict; -DROP TABLE covar_tab; -CREATE TABLE covar_tab (a INT, b INT, c INT) +DROP TABLE covar_tab_n1; +CREATE TABLE covar_tab_n1 (a INT, b INT, c INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE; LOAD DATA LOCAL INPATH '../../data/files/covar_tab.txt' OVERWRITE -INTO TABLE covar_tab; +INTO TABLE covar_tab_n1; DESCRIBE FUNCTION covar_samp; DESCRIBE FUNCTION EXTENDED covar_samp; -SELECT covar_samp(b, c) FROM covar_tab WHERE a < 1; -SELECT covar_samp(b, c) FROM covar_tab WHERE a < 3; -SELECT covar_samp(b, c) FROM covar_tab WHERE a = 3; -SELECT a, covar_samp(b, c) FROM covar_tab GROUP BY a ORDER BY a; -SELECT ROUND(covar_samp(b, c), 5) FROM covar_tab; +SELECT covar_samp(b, c) FROM covar_tab_n1 WHERE a < 1; +SELECT covar_samp(b, c) FROM covar_tab_n1 WHERE a < 3; +SELECT covar_samp(b, c) FROM covar_tab_n1 WHERE a = 3; +SELECT a, covar_samp(b, c) FROM covar_tab_n1 GROUP BY a ORDER BY a; +SELECT ROUND(covar_samp(b, c), 5) FROM covar_tab_n1; -DROP TABLE covar_tab; +DROP TABLE covar_tab_n1; diff --git a/ql/src/test/queries/clientpositive/udaf_percentile_approx_20.q b/ql/src/test/queries/clientpositive/udaf_percentile_approx_20.q index ea4dd5c39f..8fca1c92f5 100644 --- a/ql/src/test/queries/clientpositive/udaf_percentile_approx_20.q +++ b/ql/src/test/queries/clientpositive/udaf_percentile_approx_20.q @@ -8,13 +8,13 @@ load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket; load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket; -create table t1 (result double); -create table t2 (result double); -create table t3 (result double); -create table t4 (result double); +create table t1_n10 (result double); +create table t2_n6 (result double); +create table t3_n3 (result double); +create table t4_n0 (result double); create table t5 (result double); create table t6 (result double); -create table t7 (result array); +create table t7_n0 (result array); create table t8 (result array); create table t9 (result array); create table t10 (result array); @@ -25,15 +25,15 @@ set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; set hive.map.aggr=false; -- disable map-side aggregation FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) +insert overwrite table t1_n10 SELECT percentile_approx(cast(key AS double), 0.5) +insert overwrite table t2_n6 SELECT percentile_approx(cast(key AS double), 0.5, 100) +insert overwrite table t3_n3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) +insert overwrite table t4_n0 SELECT percentile_approx(cast(key AS int), 0.5) insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) +insert overwrite table t7_n0 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) @@ -41,13 +41,13 @@ insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05 insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000); -select * from t1; -select * from t2; -select * from t3; -select * from t4; +select * from t1_n10; +select * from t2_n6; +select * from t3_n3; +select * from t4_n0; select * from t5; select * from t6; -select * from t7; +select * from t7_n0; select * from t8; select * from t9; select * from t10; @@ -57,15 +57,15 @@ select * from t12; set hive.map.aggr=true; -- enable map-side aggregation FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) +insert overwrite table t1_n10 SELECT percentile_approx(cast(key AS double), 0.5) +insert overwrite table t2_n6 SELECT percentile_approx(cast(key AS double), 0.5, 100) +insert overwrite table t3_n3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) +insert overwrite table t4_n0 SELECT percentile_approx(cast(key AS int), 0.5) insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) +insert overwrite table t7_n0 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) @@ -73,13 +73,13 @@ insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05 insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000); -select * from t1; -select * from t2; -select * from t3; -select * from t4; +select * from t1_n10; +select * from t2_n6; +select * from t3_n3; +select * from t4_n0; select * from t5; select * from t6; -select * from t7; +select * from t7_n0; select * from t8; select * from t9; select * from t10; diff --git a/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q b/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q index 70974bae0f..db1fc88604 100644 --- a/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q +++ b/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q @@ -4,99 +4,99 @@ set hive.mapred.mode=nonstrict; -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) -- 0.23 changed input order of data in reducer task, which affects result of percentile_approx -CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket; -load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket; -load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket; -load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket; - -create table t1 (result double); -create table t2 (result double); -create table t3 (result double); -create table t4 (result double); -create table t5 (result double); -create table t6 (result double); -create table t7 (result array); -create table t8 (result array); -create table t9 (result array); -create table t10 (result array); -create table t11 (result array); -create table t12 (result array); +CREATE TABLE bucket_n0 (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE; +load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_n0; +load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_n0; +load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_n0; +load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_n0; + +create table t1_n58 (result double); +create table t2_n36 (result double); +create table t3_n13 (result double); +create table t4_n6 (result double); +create table t5_n2 (result double); +create table t6_n1 (result double); +create table t7_n3 (result array); +create table t8_n1 (result array); +create table t9_n0 (result array); +create table t10_n0 (result array); +create table t11_n0 (result array); +create table t12_n0 (result array); set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; set hive.map.aggr=false; -- disable map-side aggregation -FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) - -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) - -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) - -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000); - -select * from t1; -select * from t2; -select * from t3; -select * from t4; -select * from t5; -select * from t6; -select * from t7; -select * from t8; -select * from t9; -select * from t10; -select * from t11; -select * from t12; +FROM bucket_n0 +insert overwrite table t1_n58 SELECT percentile_approx(cast(key AS double), 0.5) +insert overwrite table t2_n36 SELECT percentile_approx(cast(key AS double), 0.5, 100) +insert overwrite table t3_n13 SELECT percentile_approx(cast(key AS double), 0.5, 1000) + +insert overwrite table t4_n6 SELECT percentile_approx(cast(key AS int), 0.5) +insert overwrite table t5_n2 SELECT percentile_approx(cast(key AS int), 0.5, 100) +insert overwrite table t6_n1 SELECT percentile_approx(cast(key AS int), 0.5, 1000) + +insert overwrite table t7_n3 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) +insert overwrite table t8_n1 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t9_n0 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) + +insert overwrite table t10_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) +insert overwrite table t11_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t12_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000); + +select * from t1_n58; +select * from t2_n36; +select * from t3_n13; +select * from t4_n6; +select * from t5_n2; +select * from t6_n1; +select * from t7_n3; +select * from t8_n1; +select * from t9_n0; +select * from t10_n0; +select * from t11_n0; +select * from t12_n0; set hive.map.aggr=true; -- enable map-side aggregation -FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) - -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) - -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) - -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000); - -select * from t1; -select * from t2; -select * from t3; -select * from t4; -select * from t5; -select * from t6; -select * from t7; -select * from t8; -select * from t9; -select * from t10; -select * from t11; -select * from t12; +FROM bucket_n0 +insert overwrite table t1_n58 SELECT percentile_approx(cast(key AS double), 0.5) +insert overwrite table t2_n36 SELECT percentile_approx(cast(key AS double), 0.5, 100) +insert overwrite table t3_n13 SELECT percentile_approx(cast(key AS double), 0.5, 1000) + +insert overwrite table t4_n6 SELECT percentile_approx(cast(key AS int), 0.5) +insert overwrite table t5_n2 SELECT percentile_approx(cast(key AS int), 0.5, 100) +insert overwrite table t6_n1 SELECT percentile_approx(cast(key AS int), 0.5, 1000) + +insert overwrite table t7_n3 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) +insert overwrite table t8_n1 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t9_n0 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) + +insert overwrite table t10_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) +insert overwrite table t11_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t12_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000); + +select * from t1_n58; +select * from t2_n36; +select * from t3_n13; +select * from t4_n6; +select * from t5_n2; +select * from t6_n1; +select * from t7_n3; +select * from t8_n1; +select * from t9_n0; +select * from t10_n0; +select * from t11_n0; +select * from t12_n0; -- NaN explain -select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket; -select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) between 340.5 and 343.0 from bucket; +select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket_n0; +select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) between 340.5 and 343.0 from bucket_n0; -- with CBO explain -select percentile_approx(key, 0.5) from bucket; -select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket; +select percentile_approx(key, 0.5) from bucket_n0; +select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket_n0; -- test where number of elements is zero -select percentile_approx(key, array(0.50, 0.70, 0.90, 0.95, 0.99)) from bucket where key > 10000; +select percentile_approx(key, array(0.50, 0.70, 0.90, 0.95, 0.99)) from bucket_n0 where key > 10000; diff --git a/ql/src/test/queries/clientpositive/udf1.q b/ql/src/test/queries/clientpositive/udf1.q index b054eb3147..dedba45a6c 100644 --- a/ql/src/test/queries/clientpositive/udf1.q +++ b/ql/src/test/queries/clientpositive/udf1.q @@ -1,13 +1,13 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -CREATE TABLE dest1(c1 STRING, c2 STRING, c3 STRING, c4 STRING, +CREATE TABLE dest1_n1(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING, c6 STRING, c7 STRING, c8 STRING, c9 STRING, c10 STRING, c11 STRING, c12 STRING, c13 STRING, c14 STRING, c15 STRING, c16 STRING, c17 STRING, c18 STRING, c19 STRING, c20 STRING) STORED AS TEXTFILE; EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_', +FROM src INSERT OVERWRITE TABLE dest1_n1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_', '%_' LIKE '\%\_', 'ab' LIKE '\%\_', 'ab' LIKE '_a%', 'ab' LIKE 'a', '' RLIKE '.*', 'a' RLIKE '[ab]', '' RLIKE '[ab]', 'hadoop' RLIKE '[a-z]*', 'hadoop' RLIKE 'o*', REGEXP_REPLACE('abc', 'b', 'c'), REGEXP_REPLACE('abc', 'z', 'a'), REGEXP_REPLACE('abbbb', 'bb', 'b'), @@ -15,7 +15,7 @@ FROM src INSERT OVERWRITE TABLE dest1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab REGEXP_REPLACE('abc', '', 'A'), 'abc' RLIKE '' WHERE src.key = 86; -FROM src INSERT OVERWRITE TABLE dest1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_', +FROM src INSERT OVERWRITE TABLE dest1_n1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_', '%_' LIKE '\%\_', 'ab' LIKE '\%\_', 'ab' LIKE '_a%', 'ab' LIKE 'a', '' RLIKE '.*', 'a' RLIKE '[ab]', '' RLIKE '[ab]', 'hadoop' RLIKE '[a-z]*', 'hadoop' RLIKE 'o*', REGEXP_REPLACE('abc', 'b', 'c'), REGEXP_REPLACE('abc', 'z', 'a'), REGEXP_REPLACE('abbbb', 'bb', 'b'), @@ -23,4 +23,4 @@ FROM src INSERT OVERWRITE TABLE dest1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab REGEXP_REPLACE('abc', '', 'A'), 'abc' RLIKE '' WHERE src.key = 86; -SELECT dest1.* FROM dest1; +SELECT dest1_n1.* FROM dest1_n1; diff --git a/ql/src/test/queries/clientpositive/udf2.q b/ql/src/test/queries/clientpositive/udf2.q index aef771745d..cde7829681 100644 --- a/ql/src/test/queries/clientpositive/udf2.q +++ b/ql/src/test/queries/clientpositive/udf2.q @@ -1,9 +1,9 @@ --! qt:dataset:src -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n55(c1 STRING) STORED AS TEXTFILE; -FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86; +FROM src INSERT OVERWRITE TABLE dest1_n55 SELECT ' abc ' WHERE src.key = 86; EXPLAIN -SELECT '|', trim(dest1.c1), '|', rtrim(dest1.c1), '|', ltrim(dest1.c1), '|' FROM dest1; +SELECT '|', trim(dest1_n55.c1), '|', rtrim(dest1_n55.c1), '|', ltrim(dest1_n55.c1), '|' FROM dest1_n55; -SELECT '|', trim(dest1.c1), '|', rtrim(dest1.c1), '|', ltrim(dest1.c1), '|' FROM dest1; +SELECT '|', trim(dest1_n55.c1), '|', rtrim(dest1_n55.c1), '|', ltrim(dest1_n55.c1), '|' FROM dest1_n55; diff --git a/ql/src/test/queries/clientpositive/udf3.q b/ql/src/test/queries/clientpositive/udf3.q index 730aca199b..2149f036f9 100644 --- a/ql/src/test/queries/clientpositive/udf3.q +++ b/ql/src/test/queries/clientpositive/udf3.q @@ -1,12 +1,12 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -CREATE TABLE dest1(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n104(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING) STORED AS TEXTFILE; EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), +FROM src INSERT OVERWRITE TABLE dest1_n104 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), min(CAST('' AS INT)), max(CAST('' AS INT)); -FROM src INSERT OVERWRITE TABLE dest1 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), +FROM src INSERT OVERWRITE TABLE dest1_n104 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), min(CAST('' AS INT)), max(CAST('' AS INT)); -SELECT dest1.* FROM dest1; +SELECT dest1_n104.* FROM dest1_n104; diff --git a/ql/src/test/queries/clientpositive/udf4.q b/ql/src/test/queries/clientpositive/udf4.q index 164f858cdc..94b3045ee6 100644 --- a/ql/src/test/queries/clientpositive/udf4.q +++ b/ql/src/test/queries/clientpositive/udf4.q @@ -1,7 +1,7 @@ --! qt:dataset:src -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n149(c1 STRING) STORED AS TEXTFILE; -FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86; +FROM src INSERT OVERWRITE TABLE dest1_n149 SELECT ' abc ' WHERE src.key = 86; EXPLAIN SELECT round(1.0), round(1.5), round(-1.5), floor(1.0), floor(1.5), floor(-1.5), sqrt(1.0), sqrt(-1.0), sqrt(0.0), ceil(1.0), ceil(1.5), ceil(-1.5), ceiling(1.0), rand(3), +3, -3, 1++2, 1+-2, @@ -26,7 +26,7 @@ CAST(1 AS SMALLINT) ^ CAST(3 AS SMALLINT), 1 ^ 3, CAST(1 AS BIGINT) ^ CAST(3 AS BIGINT) -FROM dest1; +FROM dest1_n149; SELECT round(1.0), round(1.5), round(-1.5), floor(1.0), floor(1.5), floor(-1.5), sqrt(1.0), sqrt(-1.0), sqrt(0.0), ceil(1.0), ceil(1.5), ceil(-1.5), ceiling(1.0), rand(3), +3, -3, 1++2, 1+-2, ~1, @@ -49,4 +49,4 @@ CAST(1 AS SMALLINT) ^ CAST(3 AS SMALLINT), 1 ^ 3, CAST(1 AS BIGINT) ^ CAST(3 AS BIGINT) -FROM dest1; +FROM dest1_n149; diff --git a/ql/src/test/queries/clientpositive/udf5.q b/ql/src/test/queries/clientpositive/udf5.q index 1a46ca7062..a967d1f174 100644 --- a/ql/src/test/queries/clientpositive/udf5.q +++ b/ql/src/test/queries/clientpositive/udf5.q @@ -1,14 +1,14 @@ --! qt:dataset:src -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n14(c1 STRING) STORED AS TEXTFILE; -FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86; +FROM src INSERT OVERWRITE TABLE dest1_n14 SELECT ' abc ' WHERE src.key = 86; EXPLAIN -SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1; +SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1_n14; -SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1; +SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1_n14; EXPLAIN -SELECT from_unixtime(unix_timestamp('2010-01-13 11:57:40', 'yyyy-MM-dd HH:mm:ss'), 'MM/dd/yy HH:mm:ss'), from_unixtime(unix_timestamp('2010-01-13 11:57:40')) from dest1; +SELECT from_unixtime(unix_timestamp('2010-01-13 11:57:40', 'yyyy-MM-dd HH:mm:ss'), 'MM/dd/yy HH:mm:ss'), from_unixtime(unix_timestamp('2010-01-13 11:57:40')) from dest1_n14; -SELECT from_unixtime(unix_timestamp('2010-01-13 11:57:40', 'yyyy-MM-dd HH:mm:ss'), 'MM/dd/yy HH:mm:ss'), from_unixtime(unix_timestamp('2010-01-13 11:57:40')) from dest1; +SELECT from_unixtime(unix_timestamp('2010-01-13 11:57:40', 'yyyy-MM-dd HH:mm:ss'), 'MM/dd/yy HH:mm:ss'), from_unixtime(unix_timestamp('2010-01-13 11:57:40')) from dest1_n14; diff --git a/ql/src/test/queries/clientpositive/udf6.q b/ql/src/test/queries/clientpositive/udf6.q index 9f7afec1db..065a7b2087 100644 --- a/ql/src/test/queries/clientpositive/udf6.q +++ b/ql/src/test/queries/clientpositive/udf6.q @@ -1,12 +1,12 @@ --! qt:dataset:src -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n60(c1 STRING) STORED AS TEXTFILE; -FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86; +FROM src INSERT OVERWRITE TABLE dest1_n60 SELECT ' abc ' WHERE src.key = 86; EXPLAIN -SELECT IF(TRUE, 1, 2) FROM dest1; +SELECT IF(TRUE, 1, 2) FROM dest1_n60; -SELECT IF(TRUE, 1, 2) FROM dest1; +SELECT IF(TRUE, 1, 2) FROM dest1_n60; EXPLAIN SELECT IF(TRUE, 1, 2), IF(FALSE, 1, 2), IF(NULL, 1, 2), IF(TRUE, "a", "b"), @@ -14,11 +14,11 @@ SELECT IF(TRUE, 1, 2), IF(FALSE, 1, 2), IF(NULL, 1, 2), IF(TRUE, "a", "b"), IF(FALSE, CAST(127 AS TINYINT), CAST(126 AS TINYINT)), IF(FALSE, CAST(127 AS SMALLINT), CAST(128 AS SMALLINT)), CAST(128 AS INT), CAST(1.0 AS DOUBLE), - CAST('128' AS STRING) FROM dest1; + CAST('128' AS STRING) FROM dest1_n60; SELECT IF(TRUE, 1, 2), IF(FALSE, 1, 2), IF(NULL, 1, 2), IF(TRUE, "a", "b"), IF(TRUE, 0.1, 0.2), IF(FALSE, CAST(1 AS BIGINT), CAST(2 AS BIGINT)), IF(FALSE, CAST(127 AS TINYINT), CAST(126 AS TINYINT)), IF(FALSE, CAST(127 AS SMALLINT), CAST(128 AS SMALLINT)), CAST(128 AS INT), CAST(1.0 AS DOUBLE), - CAST('128' AS STRING) FROM dest1; + CAST('128' AS STRING) FROM dest1_n60; diff --git a/ql/src/test/queries/clientpositive/udf7.q b/ql/src/test/queries/clientpositive/udf7.q index c9724e581d..2ec122c7b7 100644 --- a/ql/src/test/queries/clientpositive/udf7.q +++ b/ql/src/test/queries/clientpositive/udf7.q @@ -1,7 +1,7 @@ --! qt:dataset:src -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n111(c1 STRING) STORED AS TEXTFILE; -FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86; +FROM src INSERT OVERWRITE TABLE dest1_n111 SELECT ' abc ' WHERE src.key = 86; EXPLAIN SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), @@ -11,7 +11,7 @@ SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), - POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1; + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1_n111; SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), @@ -20,4 +20,4 @@ SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), - POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1; + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1_n111; diff --git a/ql/src/test/queries/clientpositive/udf8.q b/ql/src/test/queries/clientpositive/udf8.q index dad92db955..958b303192 100644 --- a/ql/src/test/queries/clientpositive/udf8.q +++ b/ql/src/test/queries/clientpositive/udf8.q @@ -1,11 +1,11 @@ --! qt:dataset:src -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n54(c1 STRING) STORED AS TEXTFILE; -FROM src INSERT OVERWRITE TABLE dest1 SELECT '' WHERE src.key = 86; +FROM src INSERT OVERWRITE TABLE dest1_n54 SELECT '' WHERE src.key = 86; -FROM src INSERT OVERWRITE TABLE dest1 SELECT '1' WHERE src.key = 86; +FROM src INSERT OVERWRITE TABLE dest1_n54 SELECT '1' WHERE src.key = 86; EXPLAIN -SELECT avg(c1), sum(c1), count(c1) FROM dest1; +SELECT avg(c1), sum(c1), count(c1) FROM dest1_n54; -SELECT avg(c1), sum(c1), count(c1) FROM dest1; +SELECT avg(c1), sum(c1), count(c1) FROM dest1_n54; diff --git a/ql/src/test/queries/clientpositive/udf_10_trims.q b/ql/src/test/queries/clientpositive/udf_10_trims.q index bae9c06e5e..6fe6a1f425 100644 --- a/ql/src/test/queries/clientpositive/udf_10_trims.q +++ b/ql/src/test/queries/clientpositive/udf_10_trims.q @@ -1,14 +1,14 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n5(c1 STRING) STORED AS TEXTFILE; EXPLAIN -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n5 SELECT trim(trim(trim(trim(trim(trim(trim(trim(trim(trim( ' abc ')))))))))) FROM src WHERE src.key = 86; -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n5 SELECT trim(trim(trim(trim(trim(trim(trim(trim(trim(trim( ' abc ')))))))))) FROM src WHERE src.key = 86; diff --git a/ql/src/test/queries/clientpositive/udf_character_length.q b/ql/src/test/queries/clientpositive/udf_character_length.q index fe0c1ba9f2..99ff77bb50 100644 --- a/ql/src/test/queries/clientpositive/udf_character_length.q +++ b/ql/src/test/queries/clientpositive/udf_character_length.q @@ -8,23 +8,23 @@ DESCRIBE FUNCTION EXTENDED character_length; DESCRIBE FUNCTION char_length; DESCRIBE FUNCTION EXTENDED char_length; -CREATE TABLE dest1(len INT); -EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT character_length(src1.value); -FROM src1 INSERT OVERWRITE TABLE dest1 SELECT character_length(src1.value); +CREATE TABLE dest1_n97(len INT); +EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n97 SELECT character_length(src1.value); +FROM src1 INSERT OVERWRITE TABLE dest1_n97 SELECT character_length(src1.value); -- SORT_BEFORE_DIFF -SELECT dest1.* FROM dest1; -DROP TABLE dest1; +SELECT dest1_n97.* FROM dest1_n97; +DROP TABLE dest1_n97; -- Test with non-ascii characters. -CREATE TABLE dest1(name STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1; -INSERT INTO dest1 VALUES(NULL); +CREATE TABLE dest1_n97(name STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n97; +INSERT INTO dest1_n97 VALUES(NULL); -EXPLAIN SELECT character_length(dest1.name) FROM dest1; +EXPLAIN SELECT character_length(dest1_n97.name) FROM dest1_n97; -- SORT_BEFORE_DIFF -SELECT character_length(dest1.name) FROM dest1; +SELECT character_length(dest1_n97.name) FROM dest1_n97; -EXPLAIN SELECT char_length(dest1.name) FROM dest1; +EXPLAIN SELECT char_length(dest1_n97.name) FROM dest1_n97; -- SORT_BEFORE_DIFF -SELECT char_length(dest1.name) FROM dest1; -DROP TABLE dest1; +SELECT char_length(dest1_n97.name) FROM dest1_n97; +DROP TABLE dest1_n97; diff --git a/ql/src/test/queries/clientpositive/udf_concat_insert1.q b/ql/src/test/queries/clientpositive/udf_concat_insert1.q index dd639cb9ac..f735da6a66 100644 --- a/ql/src/test/queries/clientpositive/udf_concat_insert1.q +++ b/ql/src/test/queries/clientpositive/udf_concat_insert1.q @@ -1,8 +1,8 @@ --! qt:dataset:src -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n139(key INT, value STRING) STORED AS TEXTFILE; FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', concat(src.key) WHERE src.key < 100 group by src.key; +INSERT OVERWRITE TABLE dest1_n139 SELECT '1234', concat(src.key) WHERE src.key < 100 group by src.key; -SELECT dest1.* FROM dest1; +SELECT dest1_n139.* FROM dest1_n139; diff --git a/ql/src/test/queries/clientpositive/udf_concat_insert2.q b/ql/src/test/queries/clientpositive/udf_concat_insert2.q index c85bd44c82..98e3b7b319 100644 --- a/ql/src/test/queries/clientpositive/udf_concat_insert2.q +++ b/ql/src/test/queries/clientpositive/udf_concat_insert2.q @@ -1,9 +1,9 @@ --! qt:dataset:src -CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n108(key STRING, value STRING) STORED AS TEXTFILE; FROM src -INSERT OVERWRITE TABLE dest1 SELECT concat('1234', 'abc', 'extra argument'), src.value WHERE src.key < 100; +INSERT OVERWRITE TABLE dest1_n108 SELECT concat('1234', 'abc', 'extra argument'), src.value WHERE src.key < 100; -SELECT dest1.* FROM dest1; +SELECT dest1_n108.* FROM dest1_n108; diff --git a/ql/src/test/queries/clientpositive/udf_concat_ws.q b/ql/src/test/queries/clientpositive/udf_concat_ws.q index 136ad58443..98af447361 100644 --- a/ql/src/test/queries/clientpositive/udf_concat_ws.q +++ b/ql/src/test/queries/clientpositive/udf_concat_ws.q @@ -4,20 +4,20 @@ set hive.fetch.task.conversion=more; DESCRIBE FUNCTION concat_ws; DESCRIBE FUNCTION EXTENDED concat_ws; -CREATE TABLE dest1(c1 STRING, c2 STRING, c3 STRING); +CREATE TABLE dest1_n8(c1 STRING, c2 STRING, c3 STRING); -FROM src INSERT OVERWRITE TABLE dest1 SELECT 'abc', 'xyz', '8675309' WHERE src.key = 86; +FROM src INSERT OVERWRITE TABLE dest1_n8 SELECT 'abc', 'xyz', '8675309' WHERE src.key = 86; EXPLAIN -SELECT concat_ws(dest1.c1, dest1.c2, dest1.c3), - concat_ws(',', dest1.c1, dest1.c2, dest1.c3), - concat_ws(NULL, dest1.c1, dest1.c2, dest1.c3), - concat_ws('**', dest1.c1, NULL, dest1.c3) FROM dest1; +SELECT concat_ws(dest1_n8.c1, dest1_n8.c2, dest1_n8.c3), + concat_ws(',', dest1_n8.c1, dest1_n8.c2, dest1_n8.c3), + concat_ws(NULL, dest1_n8.c1, dest1_n8.c2, dest1_n8.c3), + concat_ws('**', dest1_n8.c1, NULL, dest1_n8.c3) FROM dest1_n8; -SELECT concat_ws(dest1.c1, dest1.c2, dest1.c3), - concat_ws(',', dest1.c1, dest1.c2, dest1.c3), - concat_ws(NULL, dest1.c1, dest1.c2, dest1.c3), - concat_ws('**', dest1.c1, NULL, dest1.c3) FROM dest1; +SELECT concat_ws(dest1_n8.c1, dest1_n8.c2, dest1_n8.c3), + concat_ws(',', dest1_n8.c1, dest1_n8.c2, dest1_n8.c3), + concat_ws(NULL, dest1_n8.c1, dest1_n8.c2, dest1_n8.c3), + concat_ws('**', dest1_n8.c1, NULL, dest1_n8.c3) FROM dest1_n8; -- evalutes function for array of strings EXPLAIN @@ -27,7 +27,7 @@ SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234'), concat_ws('_', array('www', 'face'), array('book', 'com', '1234')), concat_ws('**', 'www', array('face'), array('book', 'com', '1234')), concat_ws('[]', array('www'), 'face', array('book', 'com', '1234')), - concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1 tablesample (1 rows); + concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1_n8 tablesample (1 rows); SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234'), concat_ws('-', 'www', array('face', 'book', 'com'), '1234'), @@ -35,7 +35,7 @@ SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234'), concat_ws('_', array('www', 'face'), array('book', 'com', '1234')), concat_ws('**', 'www', array('face'), array('book', 'com', '1234')), concat_ws('[]', array('www'), 'face', array('book', 'com', '1234')), - concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1 tablesample (1 rows); + concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1_n8 tablesample (1 rows); SELECT concat_ws(NULL, array('www', 'face', 'book', 'com'), '1234'), concat_ws(NULL, 'www', array('face', 'book', 'com'), '1234'), @@ -43,4 +43,4 @@ SELECT concat_ws(NULL, array('www', 'face', 'book', 'com'), '1234'), concat_ws(NULL, array('www', 'face'), array('book', 'com', '1234')), concat_ws(NULL, 'www', array('face'), array('book', 'com', '1234')), concat_ws(NULL, array('www'), 'face', array('book', 'com', '1234')), - concat_ws(NULL, array('www'), array('face', 'book', 'com'), '1234') FROM dest1 tablesample (1 rows); + concat_ws(NULL, array('www'), array('face', 'book', 'com'), '1234') FROM dest1_n8 tablesample (1 rows); diff --git a/ql/src/test/queries/clientpositive/udf_field.q b/ql/src/test/queries/clientpositive/udf_field.q index 82b335e1b8..daf8ea9cc7 100644 --- a/ql/src/test/queries/clientpositive/udf_field.q +++ b/ql/src/test/queries/clientpositive/udf_field.q @@ -27,8 +27,8 @@ SELECT FROM src tablesample (1 rows); -CREATE TABLE test_table(col1 STRING, col2 STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_table; +CREATE TABLE test_table_n10(col1 STRING, col2 STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_table_n10; select col1,col2, field("66",col1), @@ -41,11 +41,11 @@ select col1,col2, field(col2, "66", "88"), field(col1, col2, col1), field(col1, col2, "66") -from test_table where col1="86" or col1="66"; +from test_table_n10 where col1="86" or col1="66"; -CREATE TABLE test_table1(col1 int, col2 string) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_table1; +CREATE TABLE test_table1_n13(col1 int, col2 string) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_table1_n13; select col1,col2, field(66,col1), @@ -55,4 +55,4 @@ select col1,col2, field(86,col1,n,col2), field(NULL,col1,n,col2), field(col1, col2) -from (select col1, col2, NULL as n from test_table1 where col1=86 or col1=66) t; +from (select col1, col2, NULL as n from test_table1_n13 where col1=86 or col1=66) t; diff --git a/ql/src/test/queries/clientpositive/udf_get_json_object.q b/ql/src/test/queries/clientpositive/udf_get_json_object.q index 407ce50975..e5762c2e55 100644 --- a/ql/src/test/queries/clientpositive/udf_get_json_object.q +++ b/ql/src/test/queries/clientpositive/udf_get_json_object.q @@ -5,9 +5,9 @@ set hive.fetch.task.conversion=more; DESCRIBE FUNCTION get_json_object; DESCRIBE FUNCTION EXTENDED get_json_object; -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE; +CREATE TABLE dest1_n66(c1 STRING) STORED AS TEXTFILE; -FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86; +FROM src INSERT OVERWRITE TABLE dest1_n66 SELECT ' abc ' WHERE src.key = 86; set hive.fetch.task.conversion=more; @@ -37,13 +37,13 @@ SELECT get_json_object(src_json.json, '$.fb:testid') FROM src_json; -- Verify that get_json_object can handle new lines in JSON values -CREATE TABLE dest2(c1 STRING) STORED AS RCFILE; +CREATE TABLE dest2_n14(c1 STRING) STORED AS RCFILE; -INSERT OVERWRITE TABLE dest2 SELECT '{"a":"b\nc"}' FROM src tablesample (1 rows); +INSERT OVERWRITE TABLE dest2_n14 SELECT '{"a":"b\nc"}' FROM src tablesample (1 rows); -SELECT * FROM dest2; +SELECT * FROM dest2_n14; -SELECT get_json_object(c1, '$.a') FROM dest2; +SELECT get_json_object(c1, '$.a') FROM dest2_n14; --root is array SELECT diff --git a/ql/src/test/queries/clientpositive/udf_isops.q b/ql/src/test/queries/clientpositive/udf_isops.q index 414209e840..0b6b535699 100644 --- a/ql/src/test/queries/clientpositive/udf_isops.q +++ b/ql/src/test/queries/clientpositive/udf_isops.q @@ -1,34 +1,34 @@ -drop table if exists t; -create table t (a int,v int, b boolean); -insert into t values (1,null, true); -insert into t values (2,1, false); -insert into t values (3,2, null); +drop table if exists t_n29; +create table t_n29 (a int,v int, b boolean); +insert into t_n29 values (1,null, true); +insert into t_n29 values (2,1, false); +insert into t_n29 values (3,2, null); -select assert_true(sum(a*a) = 1) from t +select assert_true(sum(a*a) = 1) from t_n29 where v is null; -select assert_true(sum(a*a) = 2*2+3*3) from t +select assert_true(sum(a*a) = 2*2+3*3) from t_n29 where v is not null; -select assert_true(sum(a*a) = 1) from t +select assert_true(sum(a*a) = 1) from t_n29 where b is true; -select assert_true(sum(a*a) = 2*2 + 3*3) from t +select assert_true(sum(a*a) = 2*2 + 3*3) from t_n29 where b is not true; -select assert_true(sum(a*a) = 4) from t +select assert_true(sum(a*a) = 4) from t_n29 where b is false; -select assert_true(sum(a*a) = 1*1 + 3*3) from t +select assert_true(sum(a*a) = 1*1 + 3*3) from t_n29 where b is not false; -select assert_true(sum(a*a) = 2*2) from t +select assert_true(sum(a*a) = 2*2) from t_n29 where (v>0 and v<2) is true; -select assert_true(sum(a*a) = 2*2) from t +select assert_true(sum(a*a) = 2*2) from t_n29 where (v<2) is true; select NULL is true, NULL is not true, NULL is false, NULL is not false -from t; +from t_n29; diff --git a/ql/src/test/queries/clientpositive/udf_length.q b/ql/src/test/queries/clientpositive/udf_length.q index 4d9dc020e8..6f0f57068b 100644 --- a/ql/src/test/queries/clientpositive/udf_length.q +++ b/ql/src/test/queries/clientpositive/udf_length.q @@ -4,14 +4,14 @@ set hive.fetch.task.conversion=more; DESCRIBE FUNCTION length; DESCRIBE FUNCTION EXTENDED length; -CREATE TABLE dest1(len INT); -EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT length(src1.value); -FROM src1 INSERT OVERWRITE TABLE dest1 SELECT length(src1.value); -SELECT dest1.* FROM dest1; -DROP TABLE dest1; +CREATE TABLE dest1_n134(len INT); +EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n134 SELECT length(src1.value); +FROM src1 INSERT OVERWRITE TABLE dest1_n134 SELECT length(src1.value); +SELECT dest1_n134.* FROM dest1_n134; +DROP TABLE dest1_n134; -- Test with non-ascii characters. -CREATE TABLE dest1(name STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1; -EXPLAIN SELECT length(dest1.name) FROM dest1; -SELECT length(dest1.name) FROM dest1; +CREATE TABLE dest1_n134(name STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n134; +EXPLAIN SELECT length(dest1_n134.name) FROM dest1_n134; +SELECT length(dest1_n134.name) FROM dest1_n134; diff --git a/ql/src/test/queries/clientpositive/udf_octet_length.q b/ql/src/test/queries/clientpositive/udf_octet_length.q index 62173b65c1..ba7d861f86 100644 --- a/ql/src/test/queries/clientpositive/udf_octet_length.q +++ b/ql/src/test/queries/clientpositive/udf_octet_length.q @@ -5,18 +5,18 @@ set hive.fetch.task.conversion=more; DESCRIBE FUNCTION octet_length; DESCRIBE FUNCTION EXTENDED octet_length; -CREATE TABLE dest1(len INT); -EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT octet_length(src1.value); -FROM src1 INSERT OVERWRITE TABLE dest1 SELECT octet_length(src1.value); +CREATE TABLE dest1_n165(len INT); +EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n165 SELECT octet_length(src1.value); +FROM src1 INSERT OVERWRITE TABLE dest1_n165 SELECT octet_length(src1.value); -- SORT_BEFORE_DIFF -SELECT dest1.* FROM dest1; -DROP TABLE dest1; +SELECT dest1_n165.* FROM dest1_n165; +DROP TABLE dest1_n165; -- Test with non-ascii characters. -CREATE TABLE dest1(name STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1; -INSERT INTO dest1 VALUES(NULL); -EXPLAIN SELECT octet_length(dest1.name) FROM dest1; +CREATE TABLE dest1_n165(name STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n165; +INSERT INTO dest1_n165 VALUES(NULL); +EXPLAIN SELECT octet_length(dest1_n165.name) FROM dest1_n165; -- SORT_BEFORE_DIFF -SELECT octet_length(dest1.name) FROM dest1; -DROP TABLE dest1; +SELECT octet_length(dest1_n165.name) FROM dest1_n165; +DROP TABLE dest1_n165; diff --git a/ql/src/test/queries/clientpositive/udf_printf.q b/ql/src/test/queries/clientpositive/udf_printf.q index eeaa3c6dcd..6cb34834fe 100644 --- a/ql/src/test/queries/clientpositive/udf_printf.q +++ b/ql/src/test/queries/clientpositive/udf_printf.q @@ -31,8 +31,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '9' STORED AS TEXTFILE; LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE binay_udf; -create table dest1 (key binary, value int); -insert overwrite table dest1 select transform(*) using 'cat' as key binary, value int from binay_udf; -select value, printf("format key: %s", key) from dest1; -drop table dest1; +create table dest1_n110 (key binary, value int); +insert overwrite table dest1_n110 select transform(*) using 'cat' as key binary, value int from binay_udf; +select value, printf("format key: %s", key) from dest1_n110; +drop table dest1_n110; drop table binary_udf; diff --git a/ql/src/test/queries/clientpositive/udf_reverse.q b/ql/src/test/queries/clientpositive/udf_reverse.q index 43ee1ed080..f95e94ee35 100644 --- a/ql/src/test/queries/clientpositive/udf_reverse.q +++ b/ql/src/test/queries/clientpositive/udf_reverse.q @@ -2,15 +2,15 @@ DESCRIBE FUNCTION reverse; DESCRIBE FUNCTION EXTENDED reverse; -CREATE TABLE dest1(len STRING); -EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT reverse(src1.value); -FROM src1 INSERT OVERWRITE TABLE dest1 SELECT reverse(src1.value); -SELECT dest1.* FROM dest1; -DROP TABLE dest1; +CREATE TABLE dest1_n44(len STRING); +EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n44 SELECT reverse(src1.value); +FROM src1 INSERT OVERWRITE TABLE dest1_n44 SELECT reverse(src1.value); +SELECT dest1_n44.* FROM dest1_n44; +DROP TABLE dest1_n44; -- Test with non-ascii characters -- kv4.txt contains the text 0xE982B5E993AE, which should be reversed to -- 0xE993AEE982B5 -CREATE TABLE dest1(name STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1; -SELECT count(1) FROM dest1 WHERE reverse(dest1.name) = _UTF-8 0xE993AEE982B5; +CREATE TABLE dest1_n44(name STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n44; +SELECT count(1) FROM dest1_n44 WHERE reverse(dest1_n44.name) = _UTF-8 0xE993AEE982B5; diff --git a/ql/src/test/queries/clientpositive/udf_round_2.q b/ql/src/test/queries/clientpositive/udf_round_2.q index 10f4561e03..a94b3d2cf7 100644 --- a/ql/src/test/queries/clientpositive/udf_round_2.q +++ b/ql/src/test/queries/clientpositive/udf_round_2.q @@ -3,15 +3,15 @@ set hive.fetch.task.conversion=more; set hive.stats.column.autogather=false; -- test for NaN (not-a-number) -create table tstTbl1(n double); +create table tstTbl1_n0(n double); -insert overwrite table tstTbl1 +insert overwrite table tstTbl1_n0 select 'NaN' from src tablesample (1 rows); -select * from tstTbl1; +select * from tstTbl1_n0; -select round(n, 1) from tstTbl1; -select round(n) from tstTbl1; +select round(n, 1) from tstTbl1_n0; +select round(n) from tstTbl1_n0; -- test for Infinity select round(1/0), round(1/0, 2), round(1.0/0.0), round(1.0/0.0, 2) from src tablesample (1 rows); diff --git a/ql/src/test/queries/clientpositive/udf_sort_array.q b/ql/src/test/queries/clientpositive/udf_sort_array.q index 7568fe3308..3512b7152f 100644 --- a/ql/src/test/queries/clientpositive/udf_sort_array.q +++ b/ql/src/test/queries/clientpositive/udf_sort_array.q @@ -31,7 +31,7 @@ SELECT sort_array(array(map("b", 2, "a", 9, "c", 7), map("c", 3, "b", 5, "a", 1) -- Test it against data in a table. -CREATE TABLE dest1 ( +CREATE TABLE dest1_n130 ( tinyints ARRAY, smallints ARRAY, ints ARRAY, @@ -43,9 +43,9 @@ CREATE TABLE dest1 ( timestamps ARRAY ) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/primitive_type_arrays.txt' OVERWRITE INTO TABLE dest1; +LOAD DATA LOCAL INPATH '../../data/files/primitive_type_arrays.txt' OVERWRITE INTO TABLE dest1_n130; SELECT sort_array(tinyints), sort_array(smallints), sort_array(ints), sort_array(bigints), sort_array(booleans), sort_array(floats), sort_array(doubles), sort_array(strings), sort_array(timestamps) - FROM dest1; + FROM dest1_n130; diff --git a/ql/src/test/queries/clientpositive/udf_stddev_samp.q b/ql/src/test/queries/clientpositive/udf_stddev_samp.q index 76e479d75a..2ed50a17a2 100644 --- a/ql/src/test/queries/clientpositive/udf_stddev_samp.q +++ b/ql/src/test/queries/clientpositive/udf_stddev_samp.q @@ -4,10 +4,10 @@ DESCRIBE FUNCTION stddev_samp; DESCRIBE FUNCTION EXTENDED stddev_samp; -drop table if exists t; -create table t (a int); -insert into t values (1),(-1),(0); +drop table if exists t_n23; +create table t_n23 (a int); +insert into t_n23 values (1),(-1),(0); -select stddev_samp(a) from t; -select stddev_samp(a) from t where a=0; -select round(stddev_samp(a),5) from t where a>=0; +select stddev_samp(a) from t_n23; +select stddev_samp(a) from t_n23 where a=0; +select round(stddev_samp(a),5) from t_n23 where a>=0; diff --git a/ql/src/test/queries/clientpositive/udf_to_unix_timestamp.q b/ql/src/test/queries/clientpositive/udf_to_unix_timestamp.q index 78ecd5684d..d95dbcc2de 100644 --- a/ql/src/test/queries/clientpositive/udf_to_unix_timestamp.q +++ b/ql/src/test/queries/clientpositive/udf_to_unix_timestamp.q @@ -4,28 +4,28 @@ set hive.fetch.task.conversion=more; DESCRIBE FUNCTION to_unix_timestamp; DESCRIBE FUNCTION EXTENDED to_unix_timestamp; -create table oneline(key int, value string); -load data local inpath '../../data/files/things.txt' into table oneline; +create table oneline_n0(key int, value string); +load data local inpath '../../data/files/things.txt' into table oneline_n0; SELECT '2009-03-20 11:30:01', to_unix_timestamp('2009-03-20 11:30:01') -FROM oneline; +FROM oneline_n0; SELECT '2009-03-20', to_unix_timestamp('2009-03-20', 'yyyy-MM-dd') -FROM oneline; +FROM oneline_n0; SELECT '2009 Mar 20 11:30:01 am', to_unix_timestamp('2009 Mar 20 11:30:01 am', 'yyyy MMM dd h:mm:ss a') -FROM oneline; +FROM oneline_n0; SELECT 'random_string', to_unix_timestamp('random_string') -FROM oneline; +FROM oneline_n0; -- PPD explain select * from (select * from src) a where unix_timestamp(a.key) > 10; diff --git a/ql/src/test/queries/clientpositive/udf_unix_timestamp.q b/ql/src/test/queries/clientpositive/udf_unix_timestamp.q index a0bc08b9c4..a6c9941d14 100644 --- a/ql/src/test/queries/clientpositive/udf_unix_timestamp.q +++ b/ql/src/test/queries/clientpositive/udf_unix_timestamp.q @@ -22,11 +22,11 @@ SELECT unix_timestamp('2009 Mar 20 11:30:01 am', 'yyyy MMM dd h:mm:ss a') FROM oneline; -create table foo as SELECT +create table foo_n3 as SELECT 'deprecated' as a, unix_timestamp() as b FROM oneline; -drop table foo; +drop table foo_n3; SELECT 'random_string', diff --git a/ql/src/test/queries/clientpositive/udf_var_samp.q b/ql/src/test/queries/clientpositive/udf_var_samp.q index 710cbf07ef..e220f4be94 100644 --- a/ql/src/test/queries/clientpositive/udf_var_samp.q +++ b/ql/src/test/queries/clientpositive/udf_var_samp.q @@ -3,11 +3,11 @@ DESCRIBE FUNCTION EXTENDED var_samp; DESCRIBE FUNCTION var_samp; DESCRIBE FUNCTION EXTENDED var_samp; -drop table if exists t; -create table t (a int); -insert into t values (1),(-1),(0); +drop table if exists t_n27; +create table t_n27 (a int); +insert into t_n27 values (1),(-1),(0); -select var_samp(a) from t; -select var_samp(a) from t where a=0; -select round(var_samp(a),5) from t where a>=0; +select var_samp(a) from t_n27; +select var_samp(a) from t_n27 where a=0; +select round(var_samp(a),5) from t_n27 where a>=0; diff --git a/ql/src/test/queries/clientpositive/udf_width_bucket.q b/ql/src/test/queries/clientpositive/udf_width_bucket.q index dfdbee1ca6..9fce6fc60c 100644 --- a/ql/src/test/queries/clientpositive/udf_width_bucket.q +++ b/ql/src/test/queries/clientpositive/udf_width_bucket.q @@ -90,7 +90,7 @@ width_bucket(0.25, 0, cdouble, 10) from alldecimaltypes; -- Test with all numeric types -create table alltypes( +create table alltypes_n3( ctinyint TINYINT, csmallint SMALLINT, cint INT, @@ -98,7 +98,7 @@ create table alltypes( cfloat FLOAT, cdouble DOUBLE); -insert into table alltypes values +insert into table alltypes_n3 values (0, 0, 0, 0, 0.0, 0.0), (1, 1, 1, 1, 1.0, 1.0), (25, 25, 25, 25, 25.0, 25.0), @@ -114,11 +114,11 @@ width_bucket(cint, 0, 100, 10), width_bucket(cbigint, 0, 100, 10), width_bucket(cfloat, 0, 100, 10), width_bucket(cdouble, 0, 100, 10) -from alltypes; +from alltypes_n3; -truncate table alltypes; +truncate table alltypes_n3; -insert into table alltypes values (5, 5, 5, 10, 4.5, 7.25); +insert into table alltypes_n3 values (5, 5, 5, 10, 4.5, 7.25); -- Test different numeric types in a single query select @@ -126,7 +126,7 @@ width_bucket(cdouble, ctinyint, cbigint, 10), width_bucket(cdouble, csmallint, cbigint, 10), width_bucket(cdouble, cint, cbigint, 10), width_bucket(cdouble, cfloat, cbigint, 10) -from alltypes; +from alltypes_n3; -- Test all tinyints create table alltinyints ( @@ -194,7 +194,7 @@ insert into table testgroupingsets values (1, 1), (2, 2); select c1, c2, width_bucket(5, c1, 10, case when grouping(c2) = 0 then 10 else 5 end) from testgroupingsets group by cube(c1, c2); drop table alldecimaltype; -drop table alltypes; +drop table alltypes_n3; drop table alltinyints; drop table allsmallints; drop table allints; diff --git a/ql/src/test/queries/clientpositive/udtf_json_tuple.q b/ql/src/test/queries/clientpositive/udtf_json_tuple.q index f3b55490fe..e724a54589 100644 --- a/ql/src/test/queries/clientpositive/udtf_json_tuple.q +++ b/ql/src/test/queries/clientpositive/udtf_json_tuple.q @@ -40,10 +40,10 @@ select f2, count(*) from json_t a lateral view json_tuple(a.jstring, 'f1', 'f2', -- Verify that json_tuple can handle new lines in JSON values -CREATE TABLE dest1(c1 STRING) STORED AS RCFILE; +CREATE TABLE dest1_n65(c1 STRING) STORED AS RCFILE; -INSERT OVERWRITE TABLE dest1 SELECT '{"a":"b\nc"}' FROM src tablesample (1 rows); +INSERT OVERWRITE TABLE dest1_n65 SELECT '{"a":"b\nc"}' FROM src tablesample (1 rows); -SELECT * FROM dest1; +SELECT * FROM dest1_n65; -SELECT json FROM dest1 a LATERAL VIEW json_tuple(c1, 'a') b AS json; \ No newline at end of file +SELECT json FROM dest1_n65 a LATERAL VIEW json_tuple(c1, 'a') b AS json; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/udtf_replicate_rows.q b/ql/src/test/queries/clientpositive/udtf_replicate_rows.q index a074a78985..be07359ac4 100644 --- a/ql/src/test/queries/clientpositive/udtf_replicate_rows.q +++ b/ql/src/test/queries/clientpositive/udtf_replicate_rows.q @@ -4,20 +4,20 @@ set hive.cbo.enable=false; DESCRIBE FUNCTION replicate_rows; DESCRIBE FUNCTION EXTENDED replicate_rows; -create table t (x bigint, y string, z int); +create table t_n13 (x bigint, y string, z int); -insert into table t values (3,'2',0),(2,'3',1),(0,'2',2),(-1,'k',3); +insert into table t_n13 values (3,'2',0),(2,'3',1),(0,'2',2),(-1,'k',3); -SELECT replicate_rows(x,y) FROM t; +SELECT replicate_rows(x,y) FROM t_n13; -SELECT replicate_rows(x,y,y) FROM t; +SELECT replicate_rows(x,y,y) FROM t_n13; -SELECT replicate_rows(x,y,y,y,z) FROM t; +SELECT replicate_rows(x,y,y,y,z) FROM t_n13; -select y,x from (SELECT replicate_rows(x,y) as (x,y) FROM t)subq; +select y,x from (SELECT replicate_rows(x,y) as (x,y) FROM t_n13)subq; -select z,y,x from(SELECT replicate_rows(x,y,y) as (z,y,x) FROM t)subq; +select z,y,x from(SELECT replicate_rows(x,y,y) as (z,y,x) FROM t_n13)subq; -SELECT replicate_rows(x,concat(y,'...'),y) FROM t; +SELECT replicate_rows(x,concat(y,'...'),y) FROM t_n13; diff --git a/ql/src/test/queries/clientpositive/union10.q b/ql/src/test/queries/clientpositive/union10.q index e4f1ac75ba..417c351a70 100644 --- a/ql/src/test/queries/clientpositive/union10.q +++ b/ql/src/test/queries/clientpositive/union10.q @@ -6,10 +6,10 @@ set hive.map.aggr = true; -- union case: all subqueries are a map-reduce jobs, 3 way union, same input for all sub-queries, followed by filesink -create table tmptable(key string, value int); +create table tmptable_n3(key string, value int); explain -insert overwrite table tmptable +insert overwrite table tmptable_n3 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2 @@ -17,7 +17,7 @@ insert overwrite table tmptable select 'tst3' as key, count(1) as value from src s3) unionsrc; -insert overwrite table tmptable +insert overwrite table tmptable_n3 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2 @@ -25,5 +25,5 @@ insert overwrite table tmptable select 'tst3' as key, count(1) as value from src s3) unionsrc; -select * from tmptable x sort by x.key; +select * from tmptable_n3 x sort by x.key; diff --git a/ql/src/test/queries/clientpositive/union12.q b/ql/src/test/queries/clientpositive/union12.q index dc00ed392e..92e7af240c 100644 --- a/ql/src/test/queries/clientpositive/union12.q +++ b/ql/src/test/queries/clientpositive/union12.q @@ -8,10 +8,10 @@ set hive.map.aggr = true; -- union case: all subqueries are a map-reduce jobs, 3 way union, different inputs for all sub-queries, followed by filesink -create table tmptable(key string, value int); +create table tmptable_n10(key string, value int); explain -insert overwrite table tmptable +insert overwrite table tmptable_n10 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src1 s2 @@ -19,11 +19,11 @@ insert overwrite table tmptable select 'tst3' as key, count(1) as value from srcbucket s3) unionsrc; -insert overwrite table tmptable +insert overwrite table tmptable_n10 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src1 s2 UNION ALL select 'tst3' as key, count(1) as value from srcbucket s3) unionsrc; -select * from tmptable x sort by x.key; +select * from tmptable_n10 x sort by x.key; diff --git a/ql/src/test/queries/clientpositive/union17.q b/ql/src/test/queries/clientpositive/union17.q index 4c3a6c94a2..d6120f9a3c 100644 --- a/ql/src/test/queries/clientpositive/union17.q +++ b/ql/src/test/queries/clientpositive/union17.q @@ -1,6 +1,6 @@ --! qt:dataset:src -CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n78(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n17(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; -- SORT_QUERY_RESULTS -- union case:map-reduce sub-queries followed by multi-table insert @@ -9,14 +9,14 @@ explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; +INSERT OVERWRITE TABLE DEST1_n78 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n17 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; +INSERT OVERWRITE TABLE DEST1_n78 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n17 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n78.* FROM DEST1_n78; +SELECT DEST2_n17.* FROM DEST2_n17; diff --git a/ql/src/test/queries/clientpositive/union18.q b/ql/src/test/queries/clientpositive/union18.q index 1640b8af84..bc83c10604 100644 --- a/ql/src/test/queries/clientpositive/union18.q +++ b/ql/src/test/queries/clientpositive/union18.q @@ -1,6 +1,6 @@ --! qt:dataset:src -CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n128(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n33(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; -- SORT_QUERY_RESULTS @@ -10,14 +10,14 @@ explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, unionsrc.value -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value; +INSERT OVERWRITE TABLE DEST1_n128 SELECT unionsrc.key, unionsrc.value +INSERT OVERWRITE TABLE DEST2_n33 SELECT unionsrc.key, unionsrc.value, unionsrc.value; FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, unionsrc.value -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value; +INSERT OVERWRITE TABLE DEST1_n128 SELECT unionsrc.key, unionsrc.value +INSERT OVERWRITE TABLE DEST2_n33 SELECT unionsrc.key, unionsrc.value, unionsrc.value; -SELECT DEST1.* FROM DEST1 SORT BY DEST1.key, DEST1.value; -SELECT DEST2.* FROM DEST2 SORT BY DEST2.key, DEST2.val1, DEST2.val2; +SELECT DEST1_n128.* FROM DEST1_n128 SORT BY DEST1_n128.key, DEST1_n128.value; +SELECT DEST2_n33.* FROM DEST2_n33 SORT BY DEST2_n33.key, DEST2_n33.val1, DEST2_n33.val2; diff --git a/ql/src/test/queries/clientpositive/union19.q b/ql/src/test/queries/clientpositive/union19.q index e3f5888a54..857a704455 100644 --- a/ql/src/test/queries/clientpositive/union19.q +++ b/ql/src/test/queries/clientpositive/union19.q @@ -1,8 +1,8 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n86(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n21(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; -- union case:map-reduce sub-queries followed by multi-table insert @@ -10,17 +10,17 @@ explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value; +INSERT OVERWRITE TABLE DEST1_n86 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key +INSERT OVERWRITE TABLE DEST2_n21 SELECT unionsrc.key, unionsrc.value, unionsrc.value; FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value; +INSERT OVERWRITE TABLE DEST1_n86 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key +INSERT OVERWRITE TABLE DEST2_n21 SELECT unionsrc.key, unionsrc.value, unionsrc.value; -SELECT DEST1.* FROM DEST1 SORT BY DEST1.key, DEST1.value; -SELECT DEST2.* FROM DEST2 SORT BY DEST2.key, DEST2.val1, DEST2.val2; +SELECT DEST1_n86.* FROM DEST1_n86 SORT BY DEST1_n86.key, DEST1_n86.value; +SELECT DEST2_n21.* FROM DEST2_n21 SORT BY DEST2_n21.key, DEST2_n21.val1, DEST2_n21.val2; diff --git a/ql/src/test/queries/clientpositive/union24.q b/ql/src/test/queries/clientpositive/union24.q index abe825c843..2eafee8a6c 100644 --- a/ql/src/test/queries/clientpositive/union24.q +++ b/ql/src/test/queries/clientpositive/union24.q @@ -2,10 +2,10 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -create table src2 as select key, count(1) as count from src group by key; -create table src3 as select * from src2; -create table src4 as select * from src2; -create table src5 as select * from src2; +create table src2_n6 as select key, count(1) as count from src group by key; +create table src3_n2 as select * from src2_n6; +create table src4_n0 as select * from src2_n6; +create table src5_n3 as select * from src2_n6; set hive.merge.mapfiles=false; @@ -14,61 +14,61 @@ set hive.merge.mapredfiles=false; explain extended select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select key, count from src4 where key < 10 + select key, count from src4_n0 where key < 10 union all - select key, count(1) as count from src5 where key < 10 group by key + select key, count(1) as count from src5_n3 where key < 10 group by key )s ; select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select key, count from src4 where key < 10 + select key, count from src4_n0 where key < 10 union all - select key, count(1) as count from src5 where key < 10 group by key + select key, count(1) as count from src5_n3 where key < 10 group by key )s ; explain extended select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select a.key as key, b.count as count from src4 a join src5 b on a.key=b.key where a.key < 10 + select a.key as key, b.count as count from src4_n0 a join src5_n3 b on a.key=b.key where a.key < 10 )s ; select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select a.key as key, b.count as count from src4 a join src5 b on a.key=b.key where a.key < 10 + select a.key as key, b.count as count from src4_n0 a join src5_n3 b on a.key=b.key where a.key < 10 )s ; explain extended select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select a.key as key, count(1) as count from src4 a join src5 b on a.key=b.key where a.key < 10 group by a.key + select a.key as key, count(1) as count from src4_n0 a join src5_n3 b on a.key=b.key where a.key < 10 group by a.key )s ; select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select a.key as key, count(1) as count from src4 a join src5 b on a.key=b.key where a.key < 10 group by a.key + select a.key as key, count(1) as count from src4_n0 a join src5_n3 b on a.key=b.key where a.key < 10 group by a.key )s ; diff --git a/ql/src/test/queries/clientpositive/union27.q b/ql/src/test/queries/clientpositive/union27.q index b0445c442d..0a8d479c20 100644 --- a/ql/src/test/queries/clientpositive/union27.q +++ b/ql/src/test/queries/clientpositive/union27.q @@ -1,8 +1,8 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -create table jackson_sev_same as select * from src; -create table dim_pho as select * from src; -create table jackson_sev_add as select * from src; -explain select b.* from jackson_sev_same a join (select * from dim_pho union all select * from jackson_sev_add)b on a.key=b.key and b.key=97; -select b.* from jackson_sev_same a join (select * from dim_pho union all select * from jackson_sev_add)b on a.key=b.key and b.key=97; +create table jackson_sev_same_n0 as select * from src; +create table dim_pho_n0 as select * from src; +create table jackson_sev_add_n0 as select * from src; +explain select b.* from jackson_sev_same_n0 a join (select * from dim_pho_n0 union all select * from jackson_sev_add_n0)b on a.key=b.key and b.key=97; +select b.* from jackson_sev_same_n0 a join (select * from dim_pho_n0 union all select * from jackson_sev_add_n0)b on a.key=b.key and b.key=97; diff --git a/ql/src/test/queries/clientpositive/union29.q b/ql/src/test/queries/clientpositive/union29.q index 1cc29b97d5..5141a2398e 100644 --- a/ql/src/test/queries/clientpositive/union29.q +++ b/ql/src/test/queries/clientpositive/union29.q @@ -1,8 +1,8 @@ --! qt:dataset:src -create table union_subq_union(key int, value string); +create table union_subq_union_n1(key int, value string); explain -insert overwrite table union_subq_union +insert overwrite table union_subq_union_n1 select * from ( select key, value from src union all @@ -15,7 +15,7 @@ select * from ( ) a ; -insert overwrite table union_subq_union +insert overwrite table union_subq_union_n1 select * from ( select key, value from src union all @@ -28,4 +28,4 @@ select * from ( ) a ; -select * from union_subq_union order by key, value limit 20; +select * from union_subq_union_n1 order by key, value limit 20; diff --git a/ql/src/test/queries/clientpositive/union3.q b/ql/src/test/queries/clientpositive/union3.q index b4e0702f0a..0d8bd8661c 100644 --- a/ql/src/test/queries/clientpositive/union3.q +++ b/ql/src/test/queries/clientpositive/union3.q @@ -21,9 +21,9 @@ FROM ( -CREATE TABLE union_out (id int); +CREATE TABLE union_out_n0 (id int); -insert overwrite table union_out +insert overwrite table union_out_n0 SELECT * FROM ( SELECT 1 AS id @@ -40,4 +40,4 @@ FROM ( CLUSTER BY id ) a; -select * from union_out; +select * from union_out_n0; diff --git a/ql/src/test/queries/clientpositive/union30.q b/ql/src/test/queries/clientpositive/union30.q index 925d847e53..a87406fd31 100644 --- a/ql/src/test/queries/clientpositive/union30.q +++ b/ql/src/test/queries/clientpositive/union30.q @@ -1,9 +1,9 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -create table union_subq_union(key int, value string); +create table union_subq_union_n2(key int, value string); explain -insert overwrite table union_subq_union +insert overwrite table union_subq_union_n2 select * from ( select * from ( @@ -23,7 +23,7 @@ select key, value from src ) aa ; -insert overwrite table union_subq_union +insert overwrite table union_subq_union_n2 select * from ( select * from ( @@ -43,4 +43,4 @@ select key, value from src ) aa ; -select * from union_subq_union order by key, value limit 20; +select * from union_subq_union_n2 order by key, value limit 20; diff --git a/ql/src/test/queries/clientpositive/union31.q b/ql/src/test/queries/clientpositive/union31.q index 9523cf3ec4..c462633b4d 100644 --- a/ql/src/test/queries/clientpositive/union31.q +++ b/ql/src/test/queries/clientpositive/union31.q @@ -1,100 +1,100 @@ --! qt:dataset:src -- SORT_QUERY_RESULTS -drop table t1; -drop table t2; +drop table t1_n28; +drop table t2_n19; -create table t1 as select * from src where key < 10; -create table t2 as select * from src where key < 10; +create table t1_n28 as select * from src where key < 10; +create table t2_n19 as select * from src where key < 10; -create table t3(key string, cnt int); -create table t4(value string, cnt int); +create table t3_n6(key string, cnt int); +create table t4_n2(value string, cnt int); explain from -(select * from t1 +(select * from t1_n28 union all - select * from t2 + select * from t2_n19 ) x -insert overwrite table t3 +insert overwrite table t3_n6 select key, count(1) group by key -insert overwrite table t4 +insert overwrite table t4_n2 select value, count(1) group by value; from -(select * from t1 +(select * from t1_n28 union all - select * from t2 + select * from t2_n19 ) x -insert overwrite table t3 +insert overwrite table t3_n6 select key, count(1) group by key -insert overwrite table t4 +insert overwrite table t4_n2 select value, count(1) group by value; -select * from t3; -select * from t4; +select * from t3_n6; +select * from t4_n2; -create table t5(c1 string, cnt int); -create table t6(c1 string, cnt int); +create table t5_n0(c1 string, cnt int); +create table t6_n0(c1 string, cnt int); explain from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n28 group by key union all - select key as c1, count(1) as cnt from t2 group by key + select key as c1, count(1) as cnt from t2_n19 group by key ) x -insert overwrite table t5 +insert overwrite table t5_n0 select c1, sum(cnt) group by c1 -insert overwrite table t6 +insert overwrite table t6_n0 select c1, sum(cnt) group by c1; from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n28 group by key union all - select key as c1, count(1) as cnt from t2 group by key + select key as c1, count(1) as cnt from t2_n19 group by key ) x -insert overwrite table t5 +insert overwrite table t5_n0 select c1, sum(cnt) group by c1 -insert overwrite table t6 +insert overwrite table t6_n0 select c1, sum(cnt) group by c1; -select * from t5; -select * from t6; +select * from t5_n0; +select * from t6_n0; -drop table t1; -drop table t2; +drop table t1_n28; +drop table t2_n19; -create table t1 as select * from src where key < 10; -create table t2 as select key, count(1) as cnt from src where key < 10 group by key; +create table t1_n28 as select * from src where key < 10; +create table t2_n19 as select key, count(1) as cnt from src where key < 10 group by key; -create table t7(c1 string, cnt int); -create table t8(c1 string, cnt int); +create table t7_n1(c1 string, cnt int); +create table t8_n0(c1 string, cnt int); explain from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n28 group by key union all - select key as c1, cnt from t2 + select key as c1, cnt from t2_n19 ) x -insert overwrite table t7 +insert overwrite table t7_n1 select c1, count(1) group by c1 -insert overwrite table t8 +insert overwrite table t8_n0 select c1, count(1) group by c1; from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n28 group by key union all - select key as c1, cnt from t2 + select key as c1, cnt from t2_n19 ) x -insert overwrite table t7 +insert overwrite table t7_n1 select c1, count(1) group by c1 -insert overwrite table t8 +insert overwrite table t8_n0 select c1, count(1) group by c1; -select * from t7; -select * from t8; +select * from t7_n1; +select * from t8_n0; diff --git a/ql/src/test/queries/clientpositive/union32.q b/ql/src/test/queries/clientpositive/union32.q index 958529c623..05576d007e 100644 --- a/ql/src/test/queries/clientpositive/union32.q +++ b/ql/src/test/queries/clientpositive/union32.q @@ -5,74 +5,74 @@ set hive.mapred.mode=nonstrict; -- This tests various union queries which have columns on one side of the query -- being of double type and those on the other side another -CREATE TABLE t1 AS SELECT * FROM src WHERE key < 10; -CREATE TABLE t2 AS SELECT * FROM src WHERE key < 10; +CREATE TABLE t1_n70 AS SELECT * FROM src WHERE key < 10; +CREATE TABLE t2_n43 AS SELECT * FROM src WHERE key < 10; -- Test simple union with double EXPLAIN SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t1 +(SELECT CAST(key AS DOUBLE) AS key FROM t1_n70 UNION ALL -SELECT CAST(key AS BIGINT) AS key FROM t2) a; +SELECT CAST(key AS BIGINT) AS key FROM t2_n43) a; SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t1 +(SELECT CAST(key AS DOUBLE) AS key FROM t1_n70 UNION ALL -SELECT CAST(key AS BIGINT) AS key FROM t2) a +SELECT CAST(key AS BIGINT) AS key FROM t2_n43) a ; -- Test union with join on the left EXPLAIN SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key FROM t1_n70 a JOIN t2_n43 b ON a.key = b.key UNION ALL -SELECT CAST(key AS DOUBLE) AS key FROM t2) a +SELECT CAST(key AS DOUBLE) AS key FROM t2_n43) a ; SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key FROM t1_n70 a JOIN t2_n43 b ON a.key = b.key UNION ALL -SELECT CAST(key AS DOUBLE) AS key FROM t2) a +SELECT CAST(key AS DOUBLE) AS key FROM t2_n43) a ; -- Test union with join on the right EXPLAIN SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t2 +(SELECT CAST(key AS DOUBLE) AS key FROM t2_n43 UNION ALL -SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key FROM t1_n70 a JOIN t2_n43 b ON a.key = b.key) a ; SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t2 +(SELECT CAST(key AS DOUBLE) AS key FROM t2_n43 UNION ALL -SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key FROM t1_n70 a JOIN t2_n43 b ON a.key = b.key) a ; -- Test union with join on the left selecting multiple columns EXPLAIN SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1_n70 a JOIN t2_n43 b ON a.key = b.key UNION ALL -SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2) a +SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n43) a ; SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1_n70 a JOIN t2_n43 b ON a.key = b.key UNION ALL -SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2) a +SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n43) a ; -- Test union with join on the right selecting multiple columns EXPLAIN SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2 +(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n43 UNION ALL -SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1_n70 a JOIN t2_n43 b ON a.key = b.key) a ; SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2 +(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n43 UNION ALL -SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1_n70 a JOIN t2_n43 b ON a.key = b.key) a ; diff --git a/ql/src/test/queries/clientpositive/union33.q b/ql/src/test/queries/clientpositive/union33.q index ad17e000d3..f68037e961 100644 --- a/ql/src/test/queries/clientpositive/union33.q +++ b/ql/src/test/queries/clientpositive/union33.q @@ -5,9 +5,9 @@ set hive.groupby.skewindata=true; -- This tests that a union all with a map only subquery on one side and a -- subquery involving two map reduce jobs on the other runs correctly. -CREATE TABLE test_src (key STRING, value STRING); +CREATE TABLE test_src_n1 (key STRING, value STRING); -EXPLAIN INSERT OVERWRITE TABLE test_src +EXPLAIN INSERT OVERWRITE TABLE test_src_n1 SELECT key, value FROM ( SELECT key, value FROM src WHERE key = 0 @@ -16,7 +16,7 @@ UNION ALL GROUP BY key )a; -INSERT OVERWRITE TABLE test_src +INSERT OVERWRITE TABLE test_src_n1 SELECT key, value FROM ( SELECT key, value FROM src WHERE key = 0 @@ -25,9 +25,9 @@ UNION ALL GROUP BY key )a; -SELECT COUNT(*) FROM test_src; +SELECT COUNT(*) FROM test_src_n1; -EXPLAIN INSERT OVERWRITE TABLE test_src +EXPLAIN INSERT OVERWRITE TABLE test_src_n1 SELECT key, value FROM ( SELECT key, cast(COUNT(*) as string) AS value FROM src GROUP BY key @@ -36,7 +36,7 @@ UNION ALL WHERE key = 0 )a; -INSERT OVERWRITE TABLE test_src +INSERT OVERWRITE TABLE test_src_n1 SELECT key, value FROM ( SELECT key, cast(COUNT(*) as string) AS value FROM src GROUP BY key @@ -45,5 +45,5 @@ UNION ALL WHERE key = 0 )a; -SELECT COUNT(*) FROM test_src; +SELECT COUNT(*) FROM test_src_n1; diff --git a/ql/src/test/queries/clientpositive/union34.q b/ql/src/test/queries/clientpositive/union34.q index 492c941afa..7d1300bd61 100644 --- a/ql/src/test/queries/clientpositive/union34.q +++ b/ql/src/test/queries/clientpositive/union34.q @@ -1,31 +1,31 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -create table src10_1 (key string, value string); -create table src10_2 (key string, value string); -create table src10_3 (key string, value string); -create table src10_4 (key string, value string); +create table src10_1_n0 (key string, value string); +create table src10_2_n0 (key string, value string); +create table src10_3_n0 (key string, value string); +create table src10_4_n0 (key string, value string); from (select * from src tablesample (10 rows)) a -insert overwrite table src10_1 select * -insert overwrite table src10_2 select * -insert overwrite table src10_3 select * -insert overwrite table src10_4 select *; +insert overwrite table src10_1_n0 select * +insert overwrite table src10_2_n0 select * +insert overwrite table src10_3_n0 select * +insert overwrite table src10_4_n0 select *; set hive.auto.convert.join=true; -- When we convert the Join of sub1 and sub0 into a MapJoin, -- we can use a single MR job to evaluate this entire query. explain SELECT * FROM ( - SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) + SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1_n0) sub1 JOIN (SELECT * FROM src10_2_n0) sub0 ON (sub0.key = sub1.key) UNION ALL - SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 + SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3_n0) sub2 UNION ALL SELECT * FROM src10_4_n0 ) alias0 ) alias1; SELECT * FROM ( - SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) + SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1_n0) sub1 JOIN (SELECT * FROM src10_2_n0) sub0 ON (sub0.key = sub1.key) UNION ALL - SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 + SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3_n0) sub2 UNION ALL SELECT * FROM src10_4_n0 ) alias0 ) alias1; set hive.auto.convert.join=false; @@ -35,13 +35,13 @@ set hive.auto.convert.join=false; -- is for the UNION ALL and ORDER BY. explain SELECT * FROM ( - SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) + SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1_n0) sub1 JOIN (SELECT * FROM src10_2_n0) sub0 ON (sub0.key = sub1.key) UNION ALL - SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 + SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3_n0) sub2 UNION ALL SELECT * FROM src10_4_n0 ) alias0 ) alias1; SELECT * FROM ( - SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) + SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1_n0) sub1 JOIN (SELECT * FROM src10_2_n0) sub0 ON (sub0.key = sub1.key) UNION ALL - SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 + SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3_n0) sub2 UNION ALL SELECT * FROM src10_4_n0 ) alias0 ) alias1; diff --git a/ql/src/test/queries/clientpositive/union4.q b/ql/src/test/queries/clientpositive/union4.q index 5b92d9f729..66b8646854 100644 --- a/ql/src/test/queries/clientpositive/union4.q +++ b/ql/src/test/queries/clientpositive/union4.q @@ -8,19 +8,19 @@ set hive.map.aggr = true; -- union case: both subqueries are map-reduce jobs on same input, followed by filesink -create table tmptable(key string, value int); +create table tmptable_n12(key string, value int); explain -insert overwrite table tmptable +insert overwrite table tmptable_n12 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2) unionsrc; -insert overwrite table tmptable +insert overwrite table tmptable_n12 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2) unionsrc; -select * from tmptable x sort by x.key; +select * from tmptable_n12 x sort by x.key; diff --git a/ql/src/test/queries/clientpositive/union6.q b/ql/src/test/queries/clientpositive/union6.q index daaf21fb32..36b5ba9b49 100644 --- a/ql/src/test/queries/clientpositive/union6.q +++ b/ql/src/test/queries/clientpositive/union6.q @@ -8,19 +8,19 @@ set hive.map.aggr = true; -- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by filesink -create table tmptable(key string, value string); +create table tmptable_n5(key string, value string); explain -insert overwrite table tmptable +insert overwrite table tmptable_n5 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src1 s2) unionsrc; -insert overwrite table tmptable +insert overwrite table tmptable_n5 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src1 s2) unionsrc; -select * from tmptable x sort by x.key, x.value; +select * from tmptable_n5 x sort by x.key, x.value; diff --git a/ql/src/test/queries/clientpositive/unionDistinct_1.q b/ql/src/test/queries/clientpositive/unionDistinct_1.q index 16a3eea8cf..1ea9264d38 100644 --- a/ql/src/test/queries/clientpositive/unionDistinct_1.q +++ b/ql/src/test/queries/clientpositive/unionDistinct_1.q @@ -11,26 +11,26 @@ set hive.explain.user=false; -- union case: all subqueries are a map-reduce jobs, 3 way union, same input for all sub-queries, followed by filesink -create table tmptable(key string, value int); +create table tmptable_n9(key string, value int); explain -insert overwrite table tmptable - select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 +insert overwrite table tmptable_n9 + select unionsrc.key, unionsrc.value FROM (select 'tst1_n93' as key, count(1) as value from src s1 UNION DISTINCT - select 'tst2' as key, count(1) as value from src s2 + select 'tst2_n58' as key, count(1) as value from src s2 UNION DISTINCT - select 'tst3' as key, count(1) as value from src s3) unionsrc; + select 'tst3_n22' as key, count(1) as value from src s3) unionsrc; -insert overwrite table tmptable - select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 +insert overwrite table tmptable_n9 + select unionsrc.key, unionsrc.value FROM (select 'tst1_n93' as key, count(1) as value from src s1 UNION DISTINCT - select 'tst2' as key, count(1) as value from src s2 + select 'tst2_n58' as key, count(1) as value from src s2 UNION DISTINCT - select 'tst3' as key, count(1) as value from src s3) unionsrc; + select 'tst3_n22' as key, count(1) as value from src s3) unionsrc; -select * from tmptable x sort by x.key; +select * from tmptable_n9 x sort by x.key; -- union12.q @@ -43,19 +43,19 @@ create table tmptable12(key string, value int); explain insert overwrite table tmptable12 - select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 + select unionsrc.key, unionsrc.value FROM (select 'tst1_n93' as key, count(1) as value from src s1 UNION DISTINCT - select 'tst2' as key, count(1) as value from src1 s2 + select 'tst2_n58' as key, count(1) as value from src1 s2 UNION DISTINCT - select 'tst3' as key, count(1) as value from srcbucket s3) unionsrc; + select 'tst3_n22' as key, count(1) as value from srcbucket s3) unionsrc; insert overwrite table tmptable12 - select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 + select unionsrc.key, unionsrc.value FROM (select 'tst1_n93' as key, count(1) as value from src s1 UNION DISTINCT - select 'tst2' as key, count(1) as value from src1 s2 + select 'tst2_n58' as key, count(1) as value from src1 s2 UNION DISTINCT - select 'tst3' as key, count(1) as value from srcbucket s3) unionsrc; + select 'tst3_n22' as key, count(1) as value from srcbucket s3) unionsrc; select * from tmptable12 x sort by x.key; -- union13.q @@ -71,27 +71,27 @@ select unionsrc.key, unionsrc.value FROM (select s1.key as key, s1.value as valu select s2.key as key, s2.value as value from src s2) unionsrc; -- union17.q -CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE; -CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; +CREATE TABLE DEST1_n96(key STRING, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2_n26(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; -- SORT_BEFORE_DIFF -- union case:map-reduce sub-queries followed by multi-table insert explain -FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 +FROM (select 'tst1_n93' as key, cast(count(1) as string) as value from src s1 UNION DISTINCT select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; +INSERT OVERWRITE TABLE DEST1_n96 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n26 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; -FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 +FROM (select 'tst1_n93' as key, cast(count(1) as string) as value from src s1 UNION DISTINCT select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; +INSERT OVERWRITE TABLE DEST1_n96 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n26 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value; -SELECT DEST1.* FROM DEST1; -SELECT DEST2.* FROM DEST2; +SELECT DEST1_n96.* FROM DEST1_n96; +SELECT DEST2_n26.* FROM DEST2_n26; -- union18.q CREATE TABLE DEST118(key STRING, value STRING) STORED AS TEXTFILE; @@ -100,13 +100,13 @@ CREATE TABLE DEST218(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; -- union case:map-reduce sub-queries followed by multi-table insert explain -FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 +FROM (select 'tst1_n93' as key, cast(count(1) as string) as value from src s1 UNION DISTINCT select s2.key as key, s2.value as value from src s2) unionsrc INSERT OVERWRITE TABLE DEST118 SELECT unionsrc.key, unionsrc.value INSERT OVERWRITE TABLE DEST218 SELECT unionsrc.key, unionsrc.value, unionsrc.value; -FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 +FROM (select 'tst1_n93' as key, cast(count(1) as string) as value from src s1 UNION DISTINCT select s2.key as key, s2.value as value from src s2) unionsrc INSERT OVERWRITE TABLE DEST118 SELECT unionsrc.key, unionsrc.value @@ -125,13 +125,13 @@ CREATE TABLE DEST219(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE; -- union case:map-reduce sub-queries followed by multi-table insert explain -FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 +FROM (select 'tst1_n93' as key, cast(count(1) as string) as value from src s1 UNION DISTINCT select s2.key as key, s2.value as value from src s2) unionsrc INSERT OVERWRITE TABLE DEST119 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key INSERT OVERWRITE TABLE DEST219 SELECT unionsrc.key, unionsrc.value, unionsrc.value; -FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 +FROM (select 'tst1_n93' as key, cast(count(1) as string) as value from src s1 UNION DISTINCT select s2.key as key, s2.value as value from src s2) unionsrc INSERT OVERWRITE TABLE DEST119 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key @@ -145,13 +145,13 @@ SELECT DEST219.* FROM DEST219 SORT BY DEST219.key, DEST219.val1, DEST219.val2; -- SORT_QUERY_RESULTS -create table dst_union22(k1 string, k2 string, k3 string, k4 string) partitioned by (ds string); -create table dst_union22_delta(k0 string, k1 string, k2 string, k3 string, k4 string, k5 string) partitioned by (ds string); +create table dst_union22_n0(k1 string, k2 string, k3 string, k4 string) partitioned by (ds string); +create table dst_union22_delta_n0(k0 string, k1 string, k2 string, k3 string, k4 string, k5 string) partitioned by (ds string); -insert overwrite table dst_union22 partition (ds='1') +insert overwrite table dst_union22_n0 partition (ds='1') select key, value, key , value from src; -insert overwrite table dst_union22_delta partition (ds='1') +insert overwrite table dst_union22_delta_n0 partition (ds='1') select key, key, value, key, value, value from src; set hive.merge.mapfiles=false; @@ -163,31 +163,31 @@ set hive.auto.convert.join.noconditionaltask.size=10000; -- Since the inputs are small, it should be automatically converted to mapjoin explain extended -insert overwrite table dst_union22 partition (ds='2') +insert overwrite table dst_union22_n0 partition (ds='2') select * from ( -select k1 as k1, k2 as k2, k3 as k3, k4 as k4 from dst_union22_delta where ds = '1' and k0 <= 50 +select k1 as k1, k2 as k2, k3 as k3, k4 as k4 from dst_union22_delta_n0 where ds = '1' and k0 <= 50 UNION DISTINCT select a.k1 as k1, a.k2 as k2, b.k3 as k3, b.k4 as k4 -from dst_union22 a left outer join (select * from dst_union22_delta where ds = '1' and k0 > 50) b on +from dst_union22_n0 a left outer join (select * from dst_union22_delta_n0 where ds = '1' and k0 > 50) b on a.k1 = b.k1 and a.ds='1' where a.k1 > 20 ) subq; -insert overwrite table dst_union22 partition (ds='2') +insert overwrite table dst_union22_n0 partition (ds='2') select * from ( -select k1 as k1, k2 as k2, k3 as k3, k4 as k4 from dst_union22_delta where ds = '1' and k0 <= 50 +select k1 as k1, k2 as k2, k3 as k3, k4 as k4 from dst_union22_delta_n0 where ds = '1' and k0 <= 50 UNION DISTINCT select a.k1 as k1, a.k2 as k2, b.k3 as k3, b.k4 as k4 -from dst_union22 a left outer join (select * from dst_union22_delta where ds = '1' and k0 > 50) b on +from dst_union22_n0 a left outer join (select * from dst_union22_delta_n0 where ds = '1' and k0 > 50) b on a.k1 = b.k1 and a.ds='1' where a.k1 > 20 ) subq; -select * from dst_union22 where ds = '2'; +select * from dst_union22_n0 where ds = '2'; -- union23.q explain @@ -209,10 +209,10 @@ from ( -- SORT_QUERY_RESULTS -create table src2 as select key, count(1) as count from src group by key; -create table src3 as select * from src2; -create table src4 as select * from src2; -create table src5 as select * from src2; +create table src2_n2 as select key, count(1) as count from src group by key; +create table src3 as select * from src2_n2; +create table src4 as select * from src2_n2; +create table src5_n1 as select * from src2_n2; set hive.merge.mapfiles=false; @@ -221,73 +221,73 @@ set hive.merge.mapredfiles=false; explain extended select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n2 where key < 10 UNION DISTINCT select key, count from src3 where key < 10 UNION DISTINCT select key, count from src4 where key < 10 UNION DISTINCT - select key, count(1) as count from src5 where key < 10 group by key + select key, count(1) as count from src5_n1 where key < 10 group by key )s ; select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n2 where key < 10 UNION DISTINCT select key, count from src3 where key < 10 UNION DISTINCT select key, count from src4 where key < 10 UNION DISTINCT - select key, count(1) as count from src5 where key < 10 group by key + select key, count(1) as count from src5_n1 where key < 10 group by key )s ; explain extended select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n2 where key < 10 UNION DISTINCT select key, count from src3 where key < 10 UNION DISTINCT - select a.key as key, b.count as count from src4 a join src5 b on a.key=b.key where a.key < 10 + select a.key as key, b.count as count from src4 a join src5_n1 b on a.key=b.key where a.key < 10 )s ; select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n2 where key < 10 UNION DISTINCT select key, count from src3 where key < 10 UNION DISTINCT - select a.key as key, b.count as count from src4 a join src5 b on a.key=b.key where a.key < 10 + select a.key as key, b.count as count from src4 a join src5_n1 b on a.key=b.key where a.key < 10 )s ; explain extended select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n2 where key < 10 UNION DISTINCT select key, count from src3 where key < 10 UNION DISTINCT - select a.key as key, count(1) as count from src4 a join src5 b on a.key=b.key where a.key < 10 group by a.key + select a.key as key, count(1) as count from src4 a join src5_n1 b on a.key=b.key where a.key < 10 group by a.key )s ; select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n2 where key < 10 UNION DISTINCT select key, count from src3 where key < 10 UNION DISTINCT - select a.key as key, count(1) as count from src4 a join src5 b on a.key=b.key where a.key < 10 group by a.key + select a.key as key, count(1) as count from src4 a join src5_n1 b on a.key=b.key where a.key < 10 group by a.key )s ; -- union25.q -create table tmp_srcpart like srcpart; +create table tmp_srcpart_n0 like srcpart; -insert overwrite table tmp_srcpart partition (ds='2008-04-08', hr='11') +insert overwrite table tmp_srcpart_n0 partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' and hr='11'; explain -create table tmp_unionall as +create table tmp_unionall_n0 as SELECT count(1) as counts, key, value FROM ( @@ -297,9 +297,9 @@ FROM SELECT key, key as value FROM ( SELECT distinct key FROM ( - SELECT key, value FROM tmp_srcpart a WHERE a.ds='2008-04-08' and a.hr='11' + SELECT key, value FROM tmp_srcpart_n0 a WHERE a.ds='2008-04-08' and a.hr='11' UNION DISTINCT - SELECT key, value FROM tmp_srcpart b WHERE b.ds='2008-04-08' and b.hr='11' + SELECT key, value FROM tmp_srcpart_n0 b WHERE b.ds='2008-04-08' and b.hr='11' )t ) master_table ) a GROUP BY key, value @@ -411,10 +411,10 @@ explain select b.* from jackson_sev_same a join (select * from dim_pho UNION DIS select b.* from jackson_sev_same a join (select * from dim_pho UNION DISTINCT select * from jackson_sev_add)b on a.key=b.key and b.key=97; -- union28.q -create table union_subq_union(key int, value string); +create table union_subq_union_n0(key int, value string); explain -insert overwrite table union_subq_union +insert overwrite table union_subq_union_n0 select * from ( select key, value from src UNION DISTINCT @@ -427,7 +427,7 @@ select * from ( ) a ; -insert overwrite table union_subq_union +insert overwrite table union_subq_union_n0 select * from ( select key, value from src UNION DISTINCT @@ -440,7 +440,7 @@ select * from ( ) a ; -select * from union_subq_union order by key, value limit 20; +select * from union_subq_union_n0 order by key, value limit 20; -- union29.q create table union_subq_union29(key int, value string); @@ -566,100 +566,100 @@ select * from union_subq_union30 order by key, value limit 20; -- SORT_QUERY_RESULTS -drop table t1; -drop table t2; +drop table t1_n93; +drop table t2_n58; -create table t1 as select * from src where key < 10; -create table t2 as select * from src where key < 10; +create table t1_n93 as select * from src where key < 10; +create table t2_n58 as select * from src where key < 10; -create table t3(key string, cnt int); -create table t4(value string, cnt int); +create table t3_n22(key string, cnt int); +create table t4_n11(value string, cnt int); explain from -(select * from t1 +(select * from t1_n93 UNION DISTINCT - select * from t2 + select * from t2_n58 ) x -insert overwrite table t3 +insert overwrite table t3_n22 select key, count(1) group by key -insert overwrite table t4 +insert overwrite table t4_n11 select value, count(1) group by value; from -(select * from t1 +(select * from t1_n93 UNION DISTINCT - select * from t2 + select * from t2_n58 ) x -insert overwrite table t3 +insert overwrite table t3_n22 select key, count(1) group by key -insert overwrite table t4 +insert overwrite table t4_n11 select value, count(1) group by value; -select * from t3; -select * from t4; +select * from t3_n22; +select * from t4_n11; -create table t5(c1 string, cnt int); -create table t6(c1 string, cnt int); +create table t5_n4(c1 string, cnt int); +create table t6_n3(c1 string, cnt int); explain from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n93 group by key UNION DISTINCT - select key as c1, count(1) as cnt from t2 group by key + select key as c1, count(1) as cnt from t2_n58 group by key ) x -insert overwrite table t5 +insert overwrite table t5_n4 select c1, sum(cnt) group by c1 -insert overwrite table t6 +insert overwrite table t6_n3 select c1, sum(cnt) group by c1; from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n93 group by key UNION DISTINCT - select key as c1, count(1) as cnt from t2 group by key + select key as c1, count(1) as cnt from t2_n58 group by key ) x -insert overwrite table t5 +insert overwrite table t5_n4 select c1, sum(cnt) group by c1 -insert overwrite table t6 +insert overwrite table t6_n3 select c1, sum(cnt) group by c1; -select * from t5; -select * from t6; +select * from t5_n4; +select * from t6_n3; -create table t9 as select key, count(1) as cnt from src where key < 10 group by key; +create table t9_n1 as select key, count(1) as cnt from src where key < 10 group by key; -create table t7(c1 string, cnt int); -create table t8(c1 string, cnt int); +create table t7_n4(c1 string, cnt int); +create table t8_n2(c1 string, cnt int); explain from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n93 group by key UNION DISTINCT - select key as c1, cnt from t9 + select key as c1, cnt from t9_n1 ) x -insert overwrite table t7 +insert overwrite table t7_n4 select c1, count(1) group by c1 -insert overwrite table t8 +insert overwrite table t8_n2 select c1, count(1) group by c1; from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n93 group by key UNION DISTINCT - select key as c1, cnt from t9 + select key as c1, cnt from t9_n1 ) x -insert overwrite table t7 +insert overwrite table t7_n4 select c1, count(1) group by c1 -insert overwrite table t8 +insert overwrite table t8_n2 select c1, count(1) group by c1; -select * from t7; -select * from t8; +select * from t7_n4; +select * from t8_n2; -- union32.q -- SORT_QUERY_RESULTS @@ -670,70 +670,70 @@ select * from t8; -- Test simple union with double EXPLAIN SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t1 +(SELECT CAST(key AS DOUBLE) AS key FROM t1_n93 UNION DISTINCT -SELECT CAST(key AS BIGINT) AS key FROM t2) a; +SELECT CAST(key AS BIGINT) AS key FROM t2_n58) a; SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t1 +(SELECT CAST(key AS DOUBLE) AS key FROM t1_n93 UNION DISTINCT -SELECT CAST(key AS BIGINT) AS key FROM t2) a +SELECT CAST(key AS BIGINT) AS key FROM t2_n58) a ; -- Test union with join on the left EXPLAIN SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key FROM t1_n93 a JOIN t2_n58 b ON a.key = b.key UNION DISTINCT -SELECT CAST(key AS DOUBLE) AS key FROM t2) a +SELECT CAST(key AS DOUBLE) AS key FROM t2_n58) a ; SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key FROM t1_n93 a JOIN t2_n58 b ON a.key = b.key UNION DISTINCT -SELECT CAST(key AS DOUBLE) AS key FROM t2) a +SELECT CAST(key AS DOUBLE) AS key FROM t2_n58) a ; -- Test union with join on the right EXPLAIN SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t2 +(SELECT CAST(key AS DOUBLE) AS key FROM t2_n58 UNION DISTINCT -SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key FROM t1_n93 a JOIN t2_n58 b ON a.key = b.key) a ; SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t2 +(SELECT CAST(key AS DOUBLE) AS key FROM t2_n58 UNION DISTINCT -SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key FROM t1_n93 a JOIN t2_n58 b ON a.key = b.key) a ; -- Test union with join on the left selecting multiple columns EXPLAIN SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS STRING) AS value FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS STRING) AS value FROM t1_n93 a JOIN t2_n58 b ON a.key = b.key UNION DISTINCT -SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2) a +SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n58) a ; SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1_n93 a JOIN t2_n58 b ON a.key = b.key UNION DISTINCT -SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2) a +SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n58) a ; -- Test union with join on the right selecting multiple columns EXPLAIN SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2 +(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n58 UNION DISTINCT -SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1_n93 a JOIN t2_n58 b ON a.key = b.key) a ; SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2 +(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n58 UNION DISTINCT -SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1_n93 a JOIN t2_n58 b ON a.key = b.key) a ; -- union33.q diff --git a/ql/src/test/queries/clientpositive/unionDistinct_2.q b/ql/src/test/queries/clientpositive/unionDistinct_2.q index 374aaed270..f229d89a5b 100644 --- a/ql/src/test/queries/clientpositive/unionDistinct_2.q +++ b/ql/src/test/queries/clientpositive/unionDistinct_2.q @@ -53,9 +53,9 @@ union all select key as key, value from u3 ) tab; -drop view if exists v; +drop view if exists v_n12; -create view v as select distinct * from +create view v_n12 as select distinct * from ( select distinct * from u1 union @@ -64,35 +64,35 @@ union all select key as key, value from u3 ) tab; -describe extended v; +describe extended v_n12; -select * from v; +select * from v_n12; -drop view if exists v; +drop view if exists v_n12; -create view v as select tab.* from +create view v_n12 as select tab.* from ( select distinct * from u1 union select distinct * from u2 ) tab; -describe extended v; +describe extended v_n12; -select * from v; +select * from v_n12; -drop view if exists v; +drop view if exists v_n12; -create view v as select * from +create view v_n12 as select * from ( select distinct u1.* from u1 union all select distinct * from u2 ) tab; -describe extended v; +describe extended v_n12; -select * from v; +select * from v_n12; select distinct * from ( diff --git a/ql/src/test/queries/clientpositive/unionDistinct_3.q b/ql/src/test/queries/clientpositive/unionDistinct_3.q index fad4f4fb69..2582828677 100644 --- a/ql/src/test/queries/clientpositive/unionDistinct_3.q +++ b/ql/src/test/queries/clientpositive/unionDistinct_3.q @@ -21,24 +21,24 @@ select count(1) FROM (select s1.key as key, s1.value as value from src s1 UNION -- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by filesink -drop table if exists tmptable; +drop table if exists tmptable_n0; -create table tmptable(key string, value string); +create table tmptable_n0(key string, value string); explain -insert overwrite table tmptable +insert overwrite table tmptable_n0 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION DISTINCT select s2.key as key, s2.value as value from src1 s2) unionsrc; -insert overwrite table tmptable +insert overwrite table tmptable_n0 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION DISTINCT select s2.key as key, s2.value as value from src1 s2) unionsrc; -select * from tmptable x sort by x.key, x.value; +select * from tmptable_n0 x sort by x.key, x.value; -drop table if exists tmptable; +drop table if exists tmptable_n0; -- union8.q diff --git a/ql/src/test/queries/clientpositive/union_fast_stats.q b/ql/src/test/queries/clientpositive/union_fast_stats.q index d69bef3ac0..221fbc1382 100644 --- a/ql/src/test/queries/clientpositive/union_fast_stats.q +++ b/ql/src/test/queries/clientpositive/union_fast_stats.q @@ -4,66 +4,66 @@ set hive.merge.mapfiles=false; set hive.merge.mapredfiles=false; set hive.merge.tezfiles=false; -drop table small_alltypesorc1a; -drop table small_alltypesorc2a; -drop table small_alltypesorc3a; -drop table small_alltypesorc4a; -drop table small_alltypesorc_a; - -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; -create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; -create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; -create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; - -create table small_alltypesorc_a stored as orc as select * from -(select * from (select * from small_alltypesorc1a) sq1 +drop table small_alltypesorc1a_n2; +drop table small_alltypesorc2a_n2; +drop table small_alltypesorc3a_n2; +drop table small_alltypesorc4a_n2; +drop table small_alltypesorc_a_n2; + +create table small_alltypesorc1a_n2 as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; +create table small_alltypesorc2a_n2 as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; +create table small_alltypesorc3a_n2 as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; +create table small_alltypesorc4a_n2 as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; + +create table small_alltypesorc_a_n2 stored as orc as select * from +(select * from (select * from small_alltypesorc1a_n2) sq1 union all - select * from (select * from small_alltypesorc2a) sq2 + select * from (select * from small_alltypesorc2a_n2) sq2 union all - select * from (select * from small_alltypesorc3a) sq3 + select * from (select * from small_alltypesorc3a_n2) sq3 union all - select * from (select * from small_alltypesorc4a) sq4) q; + select * from (select * from small_alltypesorc4a_n2) sq4) q; -desc formatted small_alltypesorc_a; +desc formatted small_alltypesorc_a_n2; -ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS; +ANALYZE TABLE small_alltypesorc_a_n2 COMPUTE STATISTICS; -desc formatted small_alltypesorc_a; +desc formatted small_alltypesorc_a_n2; -insert into table small_alltypesorc_a select * from small_alltypesorc1a; +insert into table small_alltypesorc_a_n2 select * from small_alltypesorc1a_n2; -desc formatted small_alltypesorc_a; +desc formatted small_alltypesorc_a_n2; set hive.merge.mapfiles=true; set hive.merge.mapredfiles=true; set hive.merge.tezfiles=true; -drop table small_alltypesorc1a; -drop table small_alltypesorc2a; -drop table small_alltypesorc3a; -drop table small_alltypesorc4a; -drop table small_alltypesorc_a; +drop table small_alltypesorc1a_n2; +drop table small_alltypesorc2a_n2; +drop table small_alltypesorc3a_n2; +drop table small_alltypesorc4a_n2; +drop table small_alltypesorc_a_n2; -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; -create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; -create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; -create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; +create table small_alltypesorc1a_n2 as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; +create table small_alltypesorc2a_n2 as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; +create table small_alltypesorc3a_n2 as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; +create table small_alltypesorc4a_n2 as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; -create table small_alltypesorc_a stored as orc as select * from -(select * from (select * from small_alltypesorc1a) sq1 +create table small_alltypesorc_a_n2 stored as orc as select * from +(select * from (select * from small_alltypesorc1a_n2) sq1 union all - select * from (select * from small_alltypesorc2a) sq2 + select * from (select * from small_alltypesorc2a_n2) sq2 union all - select * from (select * from small_alltypesorc3a) sq3 + select * from (select * from small_alltypesorc3a_n2) sq3 union all - select * from (select * from small_alltypesorc4a) sq4) q; + select * from (select * from small_alltypesorc4a_n2) sq4) q; -desc formatted small_alltypesorc_a; +desc formatted small_alltypesorc_a_n2; -ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS; +ANALYZE TABLE small_alltypesorc_a_n2 COMPUTE STATISTICS; -desc formatted small_alltypesorc_a; +desc formatted small_alltypesorc_a_n2; -insert into table small_alltypesorc_a select * from small_alltypesorc1a; +insert into table small_alltypesorc_a_n2 select * from small_alltypesorc1a_n2; -desc formatted small_alltypesorc_a; +desc formatted small_alltypesorc_a_n2; diff --git a/ql/src/test/queries/clientpositive/union_paren.q b/ql/src/test/queries/clientpositive/union_paren.q index f655a87d21..247cf234b2 100644 --- a/ql/src/test/queries/clientpositive/union_paren.q +++ b/ql/src/test/queries/clientpositive/union_paren.q @@ -3,21 +3,21 @@ set hive.mapred.mode=nonstrict; explain select * from src union all select * from src; -create table t1(c int); +create table t1_n3(c int); -insert into t1 values (1),(1),(2); +insert into t1_n3 values (1),(1),(2); -create table t2(c int); +create table t2_n2(c int); -insert into t2 values (2),(1),(2); +insert into t2_n2 values (2),(1),(2); -create table t3(c int); +create table t3_n1(c int); -insert into t3 values (2),(3),(2); +insert into t3_n1 values (2),(3),(2); -(select * from t1) union all select * from t2 union select * from t3 order by c; +(select * from t1_n3) union all select * from t2_n2 union select * from t3_n1 order by c; -(select * from t1) union all (select * from t2 union select * from t3) order by c; +(select * from t1_n3) union all (select * from t2_n2 union select * from t3_n1) order by c; (select * from src order by key limit 1); diff --git a/ql/src/test/queries/clientpositive/union_pos_alias.q b/ql/src/test/queries/clientpositive/union_pos_alias.q index e924d5d4e4..fd5c19f060 100644 --- a/ql/src/test/queries/clientpositive/union_pos_alias.q +++ b/ql/src/test/queries/clientpositive/union_pos_alias.q @@ -31,19 +31,19 @@ order by 2, 1 desc; drop table src_10; -drop view v; -create view v as select key as k from src intersect all select key as k1 from src; -desc formatted v; +drop view v_n8; +create view v_n8 as select key as k from src intersect all select key as k1 from src; +desc formatted v_n8; set hive.mapred.mode=nonstrict; set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -create table masking_test as select cast(key as int) as key, value from src; +create table masking_test_n9 as select cast(key as int) as key, value from src; explain -select * from masking_test union all select * from masking_test ; -select * from masking_test union all select * from masking_test ; +select * from masking_test_n9 union all select * from masking_test_n9 ; +select * from masking_test_n9 union all select * from masking_test_n9 ; explain -select key as k1, value as v1 from masking_test where key > 0 intersect all select key as k2, value as v2 from masking_test where key > 0; -select key as k1, value as v1 from masking_test where key > 0 intersect all select key as k2, value as v2 from masking_test where key > 0; +select key as k1, value as v1 from masking_test_n9 where key > 0 intersect all select key as k2, value as v2 from masking_test_n9 where key > 0; +select key as k1, value as v1 from masking_test_n9 where key > 0 intersect all select key as k2, value as v2 from masking_test_n9 where key > 0; diff --git a/ql/src/test/queries/clientpositive/union_remove_10.q b/ql/src/test/queries/clientpositive/union_remove_10.q index 7892e155c3..71a08926dd 100644 --- a/ql/src/test/queries/clientpositive/union_remove_10.q +++ b/ql/src/test/queries/clientpositive/union_remove_10.q @@ -20,40 +20,40 @@ set mapred.input.dir.recursive=true; -- on -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n9, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as rcfile; +create table inputTbl1_n7(key string, val string) stored as textfile; +create table outputTbl1_n9(key string, `values` bigint) stored as rcfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n7; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n9 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n7 union all select * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n7 group by key UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n7 ) a )b; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n9 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n7 union all select * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n7 group by key UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n7 ) a )b; -desc formatted outputTbl1; +desc formatted outputTbl1_n9; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n9; diff --git a/ql/src/test/queries/clientpositive/union_remove_11.q b/ql/src/test/queries/clientpositive/union_remove_11.q index fd416486da..bfd734dd2a 100644 --- a/ql/src/test/queries/clientpositive/union_remove_11.q +++ b/ql/src/test/queries/clientpositive/union_remove_11.q @@ -20,40 +20,40 @@ set mapred.input.dir.recursive=true; -- on -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n21, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as rcfile; +create table inputTbl1_n14(key string, val string) stored as textfile; +create table outputTbl1_n21(key string, `values` bigint) stored as rcfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n14; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n21 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n14 union all select * FROM ( - SELECT key, 2 `values` from inputTbl1 + SELECT key, 2 `values` from inputTbl1_n14 UNION ALL - SELECT key, 3 as `values` from inputTbl1 + SELECT key, 3 as `values` from inputTbl1_n14 ) a )b; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n21 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n14 union all select * FROM ( - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n14 UNION ALL - SELECT key, 3 as `values` from inputTbl1 + SELECT key, 3 as `values` from inputTbl1_n14 ) a )b; -desc formatted outputTbl1; +desc formatted outputTbl1_n21; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n21; diff --git a/ql/src/test/queries/clientpositive/union_remove_12.q b/ql/src/test/queries/clientpositive/union_remove_12.q index 6bfb991de2..cdddc7173e 100644 --- a/ql/src/test/queries/clientpositive/union_remove_12.q +++ b/ql/src/test/queries/clientpositive/union_remove_12.q @@ -16,38 +16,38 @@ set mapred.input.dir.recursive=true; -- The union optimization is applied, and the union is removed. -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n29, it might be easier -- to run the test only on hadoop 23 -- The final file format is different from the input and intermediate file format. -- It does not matter, whether the output is merged or not. In this case, merging is turned -- on -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as rcfile; +create table inputTbl1_n21(key string, val string) stored as textfile; +create table outputTbl1_n29(key string, `values` bigint) stored as rcfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n21; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n29 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n21 union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n21 a join inputTbl1_n21 b on a.key=b.key )c; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n29 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n21 union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n21 a join inputTbl1_n21 b on a.key=b.key )c; -desc formatted outputTbl1; +desc formatted outputTbl1_n29; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n29; diff --git a/ql/src/test/queries/clientpositive/union_remove_13.q b/ql/src/test/queries/clientpositive/union_remove_13.q index 4d59b6bc85..dcf14e0c66 100644 --- a/ql/src/test/queries/clientpositive/union_remove_13.q +++ b/ql/src/test/queries/clientpositive/union_remove_13.q @@ -16,38 +16,38 @@ set mapred.input.dir.recursive=true; -- The union selectstar optimization should be performed, and the union should be removed. -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n3, it might be easier -- to run the test only on hadoop 23 -- The final file format is different from the input and intermediate file format. -- It does not matter, whether the output is merged or not. In this case, merging is turned -- on -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as rcfile; +create table inputTbl1_n2(key string, val string) stored as textfile; +create table outputTbl1_n3(key string, `values` bigint) stored as rcfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n2; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n3 SELECT * FROM ( -select key, count(1) as `values` from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1_n2 group by key union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n2 a join inputTbl1_n2 b on a.key=b.key )c; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n3 SELECT * FROM ( -select key, count(1) as `values` from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1_n2 group by key union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n2 a join inputTbl1_n2 b on a.key=b.key )c; -desc formatted outputTbl1; +desc formatted outputTbl1_n3; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n3; diff --git a/ql/src/test/queries/clientpositive/union_remove_14.q b/ql/src/test/queries/clientpositive/union_remove_14.q index 3ffb985ac5..04e2998957 100644 --- a/ql/src/test/queries/clientpositive/union_remove_14.q +++ b/ql/src/test/queries/clientpositive/union_remove_14.q @@ -17,38 +17,38 @@ set mapred.input.dir.recursive=true; -- The union selectstar optimization should be performed, and the union should be removed. -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n16, it might be easier -- to run the test only on hadoop 23 -- The final file format is different from the input and intermediate file format. -- It does not matter, whether the output is merged or not. In this case, merging is turned -- on -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as rcfile; +create table inputTbl1_n11(key string, val string) stored as textfile; +create table outputTbl1_n16(key string, `values` bigint) stored as rcfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n11; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n16 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n11 union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n11 a join inputTbl1_n11 b on a.key=b.key )c; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n16 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n11 union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n11 a join inputTbl1_n11 b on a.key=b.key )c; -desc formatted outputTbl1; +desc formatted outputTbl1_n16; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n16; diff --git a/ql/src/test/queries/clientpositive/union_remove_15.q b/ql/src/test/queries/clientpositive/union_remove_15.q index 43c78347a6..096d330c1d 100644 --- a/ql/src/test/queries/clientpositive/union_remove_15.q +++ b/ql/src/test/queries/clientpositive/union_remove_15.q @@ -21,35 +21,35 @@ set mapred.input.dir.recursive=true; -- This tests demonstrates that this optimization works in the presence of dynamic partitions. -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n25, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile; +create table inputTbl1_n18(key string, val string) stored as textfile; +create table outputTbl1_n25(key string, `values` bigint) partitioned by (ds string) stored as rcfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n18; explain -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n25 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1_n18 group by key UNION ALL - SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1_n18 group by key ) a; -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n25 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1_n18 group by key UNION ALL - SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1_n18 group by key ) a; -desc formatted outputTbl1; +desc formatted outputTbl1_n25; -show partitions outputTbl1; +show partitions outputTbl1_n25; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 where ds = '1'; -select * from outputTbl1 where ds = '2'; +select * from outputTbl1_n25 where ds = '1'; +select * from outputTbl1_n25 where ds = '2'; diff --git a/ql/src/test/queries/clientpositive/union_remove_16.q b/ql/src/test/queries/clientpositive/union_remove_16.q index eca8aeb49a..053528ed9b 100644 --- a/ql/src/test/queries/clientpositive/union_remove_16.q +++ b/ql/src/test/queries/clientpositive/union_remove_16.q @@ -21,34 +21,34 @@ set hive.exec.dynamic.partition=true; -- on -- This test demonstrates that this optimization works in the presence of dynamic partitions. -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n32, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile ; +create table inputTbl1_n23(key string, val string) stored as textfile; +create table outputTbl1_n32(key string, `values` bigint) partitioned by (ds string) stored as rcfile ; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n23; explain -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n32 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1_n23 group by key UNION ALL - SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1_n23 group by key ) a; -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n32 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1_n23 group by key UNION ALL - SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1_n23 group by key ) a; -desc formatted outputTbl1; -show partitions outputTbl1; +desc formatted outputTbl1_n32; +show partitions outputTbl1_n32; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 where ds = '1'; -select * from outputTbl1 where ds = '2'; +select * from outputTbl1_n32 where ds = '1'; +select * from outputTbl1_n32 where ds = '2'; diff --git a/ql/src/test/queries/clientpositive/union_remove_17.q b/ql/src/test/queries/clientpositive/union_remove_17.q index 59a3a9c409..eb9a0933ad 100644 --- a/ql/src/test/queries/clientpositive/union_remove_17.q +++ b/ql/src/test/queries/clientpositive/union_remove_17.q @@ -18,34 +18,34 @@ set mapred.input.dir.recursive=true; -- It does not matter, whether the output is merged or not. In this case, merging is turned -- off -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n4, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile; +create table inputTbl1_n3(key string, val string) stored as textfile; +create table outputTbl1_n4(key string, `values` bigint) partitioned by (ds string) stored as rcfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n3; explain -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n4 partition (ds) SELECT * FROM ( - SELECT key, 1 as `values`, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1_n3 UNION ALL - SELECT key, 2 as `values`, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1_n3 ) a; -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n4 partition (ds) SELECT * FROM ( - SELECT key, 1 as `values`, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1_n3 UNION ALL - SELECT key, 2 as `values`, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1_n3 ) a; -desc formatted outputTbl1; -show partitions outputTbl1; +desc formatted outputTbl1_n4; +show partitions outputTbl1_n4; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 where ds = '1'; -select * from outputTbl1 where ds = '2'; +select * from outputTbl1_n4 where ds = '1'; +select * from outputTbl1_n4 where ds = '2'; diff --git a/ql/src/test/queries/clientpositive/union_remove_18.q b/ql/src/test/queries/clientpositive/union_remove_18.q index 98ee7d0150..1c5e921823 100644 --- a/ql/src/test/queries/clientpositive/union_remove_18.q +++ b/ql/src/test/queries/clientpositive/union_remove_18.q @@ -20,36 +20,36 @@ set mapred.input.dir.recursive=true; -- This test demonstrates that the optimization works with dynamic partitions irrespective of the -- file format of the output file -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n30, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, ds string) stored as textfile; -create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile; +create table inputTbl1_n22(key string, ds string) stored as textfile; +create table outputTbl1_n30(key string, `values` bigint) partitioned by (ds string) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n22; explain -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n30 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1_n22 group by key, ds UNION ALL - SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1_n22 group by key, ds ) a; -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n30 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1_n22 group by key, ds UNION ALL - SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1_n22 group by key, ds ) a; -desc formatted outputTbl1; +desc formatted outputTbl1_n30; -show partitions outputTbl1; +show partitions outputTbl1_n30; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 where ds = '11'; -select * from outputTbl1 where ds = '18'; -select * from outputTbl1 where ds is not null; +select * from outputTbl1_n30 where ds = '11'; +select * from outputTbl1_n30 where ds = '18'; +select * from outputTbl1_n30 where ds is not null; diff --git a/ql/src/test/queries/clientpositive/union_remove_19.q b/ql/src/test/queries/clientpositive/union_remove_19.q index 9e47254cc8..75285ee5db 100644 --- a/ql/src/test/queries/clientpositive/union_remove_19.q +++ b/ql/src/test/queries/clientpositive/union_remove_19.q @@ -15,80 +15,80 @@ set mapred.input.dir.recursive=true; -- It does not matter, whether the output is merged or not. In this case, merging is turned -- off -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n1, it might be easier -- to run the test only on hadoop 23 -- SORT_QUERY_RESULTS -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as textfile; +create table inputTbl1_n1(key string, val string) stored as textfile; +create table outputTbl1_n1(key string, `values` bigint) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n1; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n1 SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n1 SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a; -desc formatted outputTbl1; +desc formatted outputTbl1_n1; -select * from outputTbl1; +select * from outputTbl1_n1; -- filter should be fine explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n1 SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a where a.key = 7; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n1 SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a where a.key = 7; -select * from outputTbl1; +select * from outputTbl1_n1; -- filters and sub-queries should be fine explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n1 select key, `values` from ( SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a ) b where b.key >= 7; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n1 select key, `values` from ( SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a ) b where b.key >= 7; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n1; diff --git a/ql/src/test/queries/clientpositive/union_remove_2.q b/ql/src/test/queries/clientpositive/union_remove_2.q index 2ebc1ad2f8..c0e395ff63 100644 --- a/ql/src/test/queries/clientpositive/union_remove_2.q +++ b/ql/src/test/queries/clientpositive/union_remove_2.q @@ -16,37 +16,37 @@ set mapred.input.dir.recursive=true; -- It does not matter, whether the output is merged or not. In this case, merging is turned -- off -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n11, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as textfile; +create table inputTbl1_n8(key string, val string) stored as textfile; +create table outputTbl1_n11(key string, `values` bigint) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n8; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n11 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n8 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n8 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n8 ) a; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n11 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n8 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n8 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n8 ) a; -desc formatted outputTbl1; +desc formatted outputTbl1_n11; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n11; diff --git a/ql/src/test/queries/clientpositive/union_remove_20.q b/ql/src/test/queries/clientpositive/union_remove_20.q index 700ee4daf7..5343f58d4d 100644 --- a/ql/src/test/queries/clientpositive/union_remove_20.q +++ b/ql/src/test/queries/clientpositive/union_remove_20.q @@ -15,33 +15,33 @@ set mapred.input.dir.recursive=true; -- It does not matter, whether the output is merged or not. In this case, merging is turned -- off -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n27, it might be easier -- to run the test only on hadoop 23. The union is removed, the select (which changes the order of -- columns being selected) is pushed above the union. -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(`values` bigint, key string) stored as textfile; +create table inputTbl1_n19(key string, val string) stored as textfile; +create table outputTbl1_n27(`values` bigint, key string) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n19; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n27 SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n19 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n19 group by key ) a; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n27 SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n19 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n19 group by key ) a; -desc formatted outputTbl1; +desc formatted outputTbl1_n27; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n27; diff --git a/ql/src/test/queries/clientpositive/union_remove_21.q b/ql/src/test/queries/clientpositive/union_remove_21.q index 1b1472e586..d51de6426f 100644 --- a/ql/src/test/queries/clientpositive/union_remove_21.q +++ b/ql/src/test/queries/clientpositive/union_remove_21.q @@ -15,33 +15,33 @@ set mapred.input.dir.recursive=true; -- It does not matter, whether the output is merged or not. In this case, merging is turned -- off -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n17, it might be easier -- to run the test only on hadoop 23. The union is removed, the select (which changes the order of -- columns being selected) is pushed above the union. -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string) stored as textfile; +create table inputTbl1_n12(key string, val string) stored as textfile; +create table outputTbl1_n17(key string) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n12; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n17 SELECT a.key FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n12 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n12 group by key ) a; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n17 SELECT a.key FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n12 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n12 group by key ) a; -desc formatted outputTbl1; +desc formatted outputTbl1_n17; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n17; diff --git a/ql/src/test/queries/clientpositive/union_remove_22.q b/ql/src/test/queries/clientpositive/union_remove_22.q index d4d3cbce5f..134e650abb 100644 --- a/ql/src/test/queries/clientpositive/union_remove_22.q +++ b/ql/src/test/queries/clientpositive/union_remove_22.q @@ -15,52 +15,52 @@ set mapred.input.dir.recursive=true; -- It does not matter, whether the output is merged or not. In this case, merging is turned -- off -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n7, it might be easier -- to run the test only on hadoop 23. The union is removed, the select (which selects columns from -- both the sub-qeuries of the union) is pushed above the union. -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint, values2 bigint) stored as textfile; +create table inputTbl1_n5(key string, val string) stored as textfile; +create table outputTbl1_n7(key string, `values` bigint, values2 bigint) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n5; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n7 SELECT a.key, a.`values`, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key ) a; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n7 SELECT a.key, a.`values`, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key ) a; -desc formatted outputTbl1; +desc formatted outputTbl1_n7; -select * from outputTbl1; +select * from outputTbl1_n7; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n7 SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key ) a; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n7 SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key ) a; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n7; diff --git a/ql/src/test/queries/clientpositive/union_remove_23.q b/ql/src/test/queries/clientpositive/union_remove_23.q index 98ae503669..8ac2093e85 100644 --- a/ql/src/test/queries/clientpositive/union_remove_23.q +++ b/ql/src/test/queries/clientpositive/union_remove_23.q @@ -16,34 +16,34 @@ set mapred.input.dir.recursive=true; -- It does not matter, whether the output is merged or not. In this case, merging is turned -- off -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n34, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as textfile; +create table inputTbl1_n25(key string, val string) stored as textfile; +create table outputTbl1_n34(key string, `values` bigint) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n25; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n34 SELECT * FROM ( SELECT key, count(1) as `values` from - (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key + (SELECT a.key, b.val from inputTbl1_n25 a join inputTbl1_n25 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n25 group by key ) subq2; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n34 SELECT * FROM ( SELECT key, count(1) as `values` from - (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key + (SELECT a.key, b.val from inputTbl1_n25 a join inputTbl1_n25 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n25 group by key ) subq2; -desc formatted outputTbl1; +desc formatted outputTbl1_n34; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n34; diff --git a/ql/src/test/queries/clientpositive/union_remove_24.q b/ql/src/test/queries/clientpositive/union_remove_24.q index 7ed80d1b24..ea3c12bb9c 100644 --- a/ql/src/test/queries/clientpositive/union_remove_24.q +++ b/ql/src/test/queries/clientpositive/union_remove_24.q @@ -14,32 +14,32 @@ set mapred.input.dir.recursive=true; -- again to process the union. The union can be removed completely. -- One sub-query has a double and the other sub-query has a bigint. -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n28, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key double, `values` bigint) stored as textfile; +create table inputTbl1_n20(key string, val string) stored as textfile; +create table outputTbl1_n28(key double, `values` bigint) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n20; EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n28 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1_n20 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1_n20 group by key ) a; -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n28 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1_n20 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1_n20 group by key ) a; -desc formatted outputTbl1; +desc formatted outputTbl1_n28; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n28; diff --git a/ql/src/test/queries/clientpositive/union_remove_25.q b/ql/src/test/queries/clientpositive/union_remove_25.q index 0460eb38e8..e6d1b0d2ff 100644 --- a/ql/src/test/queries/clientpositive/union_remove_25.q +++ b/ql/src/test/queries/clientpositive/union_remove_25.q @@ -17,40 +17,40 @@ set mapred.input.dir.recursive=true; -- It does not matter, whether the output is merged or not. In this case, merging is turned -- off -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n19, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile; -create table outputTbl2(key string, `values` bigint) partitioned by (ds string) stored as textfile; -create table outputTbl3(key string, `values` bigint) partitioned by (ds string,hr string) stored as textfile; +create table inputTbl1_n13(key string, val string) stored as textfile; +create table outputTbl1_n19(key string, `values` bigint) partitioned by (ds string) stored as textfile; +create table outputTbl2_n6(key string, `values` bigint) partitioned by (ds string) stored as textfile; +create table outputTbl3_n3(key string, `values` bigint) partitioned by (ds string,hr string) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n13; explain -insert overwrite table outputTbl1 partition(ds='2004') +insert overwrite table outputTbl1_n19 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n13 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n13 group by key ) a; -insert overwrite table outputTbl1 partition(ds='2004') +insert overwrite table outputTbl1_n19 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n13 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n13 group by key ) a; -desc formatted outputTbl1 partition(ds='2004'); +desc formatted outputTbl1_n19 partition(ds='2004'); set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n19; explain -insert overwrite table outputTbl2 partition(ds) +insert overwrite table outputTbl2_n6 partition(ds) SELECT * FROM ( select * from (SELECT key, value, ds from srcpart where ds='2008-04-08' limit 500)a @@ -58,7 +58,7 @@ FROM ( select * from (SELECT key, value, ds from srcpart where ds='2008-04-08' limit 500)b ) a; -insert overwrite table outputTbl2 partition(ds) +insert overwrite table outputTbl2_n6 partition(ds) SELECT * FROM ( select * from (SELECT key, value, ds from srcpart where ds='2008-04-08' limit 500)a @@ -66,10 +66,10 @@ FROM ( select * from (SELECT key, value, ds from srcpart where ds='2008-04-08' limit 500)b ) a; -show partitions outputTbl2; -desc formatted outputTbl2 partition(ds='2008-04-08'); +show partitions outputTbl2_n6; +desc formatted outputTbl2_n6 partition(ds='2008-04-08'); -explain insert overwrite table outputTbl3 partition(ds, hr) +explain insert overwrite table outputTbl3_n3 partition(ds, hr) SELECT * FROM ( select * from (SELECT key, value, ds, hr from srcpart where ds='2008-04-08' limit 1000)a @@ -77,7 +77,7 @@ FROM ( select * from (SELECT key, value, ds, hr from srcpart where ds='2008-04-08' limit 1000)b ) a; -insert overwrite table outputTbl3 partition(ds, hr) +insert overwrite table outputTbl3_n3 partition(ds, hr) SELECT * FROM ( select * from (SELECT key, value, ds, hr from srcpart where ds='2008-04-08' limit 1000)a @@ -85,5 +85,5 @@ FROM ( select * from (SELECT key, value, ds, hr from srcpart where ds='2008-04-08' limit 1000)b ) a; -show partitions outputTbl3; -desc formatted outputTbl3 partition(ds='2008-04-08', hr='11'); +show partitions outputTbl3_n3; +desc formatted outputTbl3_n3 partition(ds='2008-04-08', hr='11'); diff --git a/ql/src/test/queries/clientpositive/union_remove_26.q b/ql/src/test/queries/clientpositive/union_remove_26.q index 92ccbc3a52..6f8891845b 100644 --- a/ql/src/test/queries/clientpositive/union_remove_26.q +++ b/ql/src/test/queries/clientpositive/union_remove_26.q @@ -11,11 +11,11 @@ load data local inpath '../../data/files/T1.txt' into table inputSrcTbl1; load data local inpath '../../data/files/T2.txt' into table inputSrcTbl2; load data local inpath '../../data/files/T3.txt' into table inputSrcTbl3; -create table inputTbl1(key string, val int) stored as textfile; +create table inputTbl1_n6(key string, val int) stored as textfile; create table inputTbl2(key string, val int) stored as textfile; create table inputTbl3(key string, val int) stored as textfile; -insert into inputTbl1 select * from inputSrcTbl1; +insert into inputTbl1_n6 select * from inputSrcTbl1; insert into inputTbl2 select * from inputSrcTbl2; insert into inputTbl3 select * from inputSrcTbl3; @@ -24,10 +24,10 @@ set hive.optimize.union.remove=true; set mapred.input.dir.recursive=true; --- union remove optimization effects, stats optimization does not though it is on since inputTbl2 column stats is not available -analyze table inputTbl1 compute statistics for columns; +analyze table inputTbl1_n6 compute statistics for columns; analyze table inputTbl3 compute statistics for columns; explain - SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1 + SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1_n6 UNION ALL SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl2 UNION ALL @@ -35,7 +35,7 @@ explain select count(*) from ( - SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1 + SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1_n6 UNION ALL SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl2 UNION ALL @@ -44,7 +44,7 @@ select count(*) from ( --- union remove optimization and stats optimization are effective after inputTbl2 column stats is calculated analyze table inputTbl2 compute statistics for columns; explain - SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1 + SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1_n6 UNION ALL SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl2 UNION ALL @@ -52,7 +52,7 @@ explain select count(*) from ( - SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1 + SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1_n6 UNION ALL SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl2 UNION ALL @@ -60,14 +60,14 @@ select count(*) from ( --- union remove optimization effects but stats optimization does not (with group by) though it is on explain - SELECT key, count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1 group by key + SELECT key, count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1_n6 group by key UNION ALL SELECT key, count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl2 group by key UNION ALL SELECT key, count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl3 group by key; select count(*) from ( - SELECT key, count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1 group by key + SELECT key, count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1_n6 group by key UNION ALL SELECT key, count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl2 group by key UNION ALL @@ -79,14 +79,14 @@ set hive.optimize.union.remove=true; set mapred.input.dir.recursive=true; explain - SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1 + SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1_n6 UNION ALL SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl2 UNION ALL SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl3; select count(*) from ( - SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1 + SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1_n6 UNION ALL SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl2 UNION ALL @@ -97,7 +97,7 @@ set hive.compute.query.using.stats=false; set hive.optimize.union.remove=false; explain - SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1 + SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1_n6 UNION ALL SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl2 UNION ALL @@ -105,7 +105,7 @@ explain select count(*) from ( - SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1 + SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl1_n6 UNION ALL SELECT count(1) as rowcnt, min(val) as ms, max(val) as mx from inputTbl2 UNION ALL diff --git a/ql/src/test/queries/clientpositive/union_remove_3.q b/ql/src/test/queries/clientpositive/union_remove_3.q index b0f63fccea..f2c8541608 100644 --- a/ql/src/test/queries/clientpositive/union_remove_3.q +++ b/ql/src/test/queries/clientpositive/union_remove_3.q @@ -16,37 +16,37 @@ set mapred.input.dir.recursive=true; -- It does not matter, whether the output is merged or not. In this case, merging is turned -- off -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n23, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as textfile; +create table inputTbl1_n16(key string, val string) stored as textfile; +create table outputTbl1_n23(key string, `values` bigint) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n16; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n23 SELECT * FROM ( - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n16 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n16 UNION ALL - SELECT key, 3 as `values` from inputTbl1 + SELECT key, 3 as `values` from inputTbl1_n16 ) a; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n23 SELECT * FROM ( - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n16 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n16 UNION ALL - SELECT key, 3 as `values` from inputTbl1 + SELECT key, 3 as `values` from inputTbl1_n16 ) a; -desc formatted outputTbl1; +desc formatted outputTbl1_n23; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n23; diff --git a/ql/src/test/queries/clientpositive/union_remove_4.q b/ql/src/test/queries/clientpositive/union_remove_4.q index 9ee628248f..0b2b8185b2 100644 --- a/ql/src/test/queries/clientpositive/union_remove_4.q +++ b/ql/src/test/queries/clientpositive/union_remove_4.q @@ -16,32 +16,32 @@ set hive.merge.smallfiles.avgsize=1; -- It does not matter, whether the output is merged or not. In this case, merging is turned -- on -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n33, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as textfile; +create table inputTbl1_n24(key string, val string) stored as textfile; +create table outputTbl1_n33(key string, `values` bigint) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n24; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n33 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n24 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n24 group by key ) a; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n33 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n24 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n24 group by key ) a; -desc formatted outputTbl1; +desc formatted outputTbl1_n33; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n33; diff --git a/ql/src/test/queries/clientpositive/union_remove_5.q b/ql/src/test/queries/clientpositive/union_remove_5.q index 5d3c48224c..7c787141ef 100644 --- a/ql/src/test/queries/clientpositive/union_remove_5.q +++ b/ql/src/test/queries/clientpositive/union_remove_5.q @@ -18,36 +18,36 @@ set mapred.input.dir.recursive=true; -- on -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n6, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as textfile; +create table inputTbl1_n4(key string, val string) stored as textfile; +create table outputTbl1_n6(key string, `values` bigint) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n4; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n6 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n4 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n4 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n4 ) a; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n6 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n4 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n4 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n4 ) a; -desc formatted outputTbl1; +desc formatted outputTbl1_n6; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n6; diff --git a/ql/src/test/queries/clientpositive/union_remove_6.q b/ql/src/test/queries/clientpositive/union_remove_6.q index 23eb760cc5..9dd5fb302c 100644 --- a/ql/src/test/queries/clientpositive/union_remove_6.q +++ b/ql/src/test/queries/clientpositive/union_remove_6.q @@ -15,29 +15,29 @@ set mapred.input.dir.recursive=true; -- It does not matter, whether the output is merged or not. In this case, -- merging is turned off -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as textfile; -create table outputTbl2(key string, `values` bigint) stored as textfile; +create table inputTbl1_n10(key string, val string) stored as textfile; +create table outputTbl1_n14(key string, `values` bigint) stored as textfile; +create table outputTbl2_n4(key string, `values` bigint) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n10; explain FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n10 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n10 group by key ) a -insert overwrite table outputTbl1 select * -insert overwrite table outputTbl2 select *; +insert overwrite table outputTbl1_n14 select * +insert overwrite table outputTbl2_n4 select *; FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n10 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n10 group by key ) a -insert overwrite table outputTbl1 select * -insert overwrite table outputTbl2 select *; +insert overwrite table outputTbl1_n14 select * +insert overwrite table outputTbl2_n4 select *; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; -select * from outputTbl2; +select * from outputTbl1_n14; +select * from outputTbl2_n4; diff --git a/ql/src/test/queries/clientpositive/union_remove_6_subq.q b/ql/src/test/queries/clientpositive/union_remove_6_subq.q index 2dd47167bf..3ae5d95728 100644 --- a/ql/src/test/queries/clientpositive/union_remove_6_subq.q +++ b/ql/src/test/queries/clientpositive/union_remove_6_subq.q @@ -16,35 +16,35 @@ set mapred.input.dir.recursive=true; -- It does not matter, whether the output is merged or not. In this case, -- merging is turned off -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as textfile; +create table inputTbl1_n0(key string, val string) stored as textfile; +create table outputTbl1_n0(key string, `values` bigint) stored as textfile; create table outputTbl2(key string, `values` bigint) stored as textfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n0; explain FROM ( select * from( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n0 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n0 group by key )subq ) a -insert overwrite table outputTbl1 select * +insert overwrite table outputTbl1_n0 select * insert overwrite table outputTbl2 select *; FROM ( select * from( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n0 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n0 group by key )subq ) a -insert overwrite table outputTbl1 select * +insert overwrite table outputTbl1_n0 select * insert overwrite table outputTbl2 select *; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n0; select * from outputTbl2; -- The following queries guarantee the correctness. diff --git a/ql/src/test/queries/clientpositive/union_remove_7.q b/ql/src/test/queries/clientpositive/union_remove_7.q index 5e0d14bac8..43a5fe1850 100644 --- a/ql/src/test/queries/clientpositive/union_remove_7.q +++ b/ql/src/test/queries/clientpositive/union_remove_7.q @@ -17,32 +17,32 @@ set mapred.input.dir.recursive=true; -- off -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n24, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as rcfile; +create table inputTbl1_n17(key string, val string) stored as textfile; +create table outputTbl1_n24(key string, `values` bigint) stored as rcfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n17; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n24 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n17 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n17 group by key ) a; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n24 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n17 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n17 group by key ) a; -desc formatted outputTbl1; +desc formatted outputTbl1_n24; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n24; diff --git a/ql/src/test/queries/clientpositive/union_remove_8.q b/ql/src/test/queries/clientpositive/union_remove_8.q index 1e049badc1..05a5671f76 100644 --- a/ql/src/test/queries/clientpositive/union_remove_8.q +++ b/ql/src/test/queries/clientpositive/union_remove_8.q @@ -18,36 +18,36 @@ set mapred.input.dir.recursive=true; -- off -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n12, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as rcfile; +create table inputTbl1_n9(key string, val string) stored as textfile; +create table outputTbl1_n12(key string, `values` bigint) stored as rcfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n9; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n12 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n9 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n9 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n9 ) a; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n12 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n9 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n9 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n9 ) a; -desc formatted outputTbl1; +desc formatted outputTbl1_n12; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n12; diff --git a/ql/src/test/queries/clientpositive/union_remove_9.q b/ql/src/test/queries/clientpositive/union_remove_9.q index b819560891..475e52c49f 100644 --- a/ql/src/test/queries/clientpositive/union_remove_9.q +++ b/ql/src/test/queries/clientpositive/union_remove_9.q @@ -18,40 +18,40 @@ set mapred.input.dir.recursive=true; -- on -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- Since this test creates sub-directories for the output table outputTbl1_n22, it might be easier -- to run the test only on hadoop 23 -create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, `values` bigint) stored as rcfile; +create table inputTbl1_n15(key string, val string) stored as textfile; +create table outputTbl1_n22(key string, `values` bigint) stored as rcfile; -load data local inpath '../../data/files/T1.txt' into table inputTbl1; +load data local inpath '../../data/files/T1.txt' into table inputTbl1_n15; explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n22 SELECT * FROM ( -select key, count(1) as `values` from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1_n15 group by key union all select * FROM ( - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n15 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n15 ) a )b; -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n22 SELECT * FROM ( -select key, count(1) as `values` from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1_n15 group by key union all select * FROM ( - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n15 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n15 ) a )b; -desc formatted outputTbl1; +desc formatted outputTbl1_n22; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1; +select * from outputTbl1_n22; diff --git a/ql/src/test/queries/clientpositive/union_stats.q b/ql/src/test/queries/clientpositive/union_stats.q index 0e91c23fea..80856edcee 100644 --- a/ql/src/test/queries/clientpositive/union_stats.q +++ b/ql/src/test/queries/clientpositive/union_stats.q @@ -1,30 +1,30 @@ ---! qt:dataset:src -explain extended create table t as select * from src union all select * from src; +--! qt_n4:dataset_n4:src +explain extended create table t_n4 as select_n4 * from src union all select_n4 * from src; -create table t as select * from src union all select * from src; +create table t_n4 as select_n4 * from src union all select_n4 * from src; -select count(1) from t; +select_n4 count_n4(1) from t_n4; -desc formatted t; +desc formatted t_n4; -create table tt as select * from t union all select * from src; +create table tt_n4 as select_n4 * from t_n4 union all select_n4 * from src; -desc formatted tt; +desc formatted tt_n4; -drop table tt; +drop table tt_n4; -create table tt as select * from src union all select * from t; +create table tt_n4 as select_n4 * from src union all select_n4 * from t_n4; -desc formatted tt; +desc formatted tt_n4; -create table t1 like src; -create table t2 like src; +create table t1_n26 like src; +create table t2_n17 like src; -from (select * from src union all select * from src)s -insert overwrite table t1 select * -insert overwrite table t2 select *; +from (select_n4 * from src union all select_n4 * from src)s +insert_n4 overwrite table t1_n26 select_n4 * +insert_n4 overwrite table t2_n17 select_n4 *; -desc formatted t1; -desc formatted t2; +desc formatted t1_n26; +desc formatted t2_n17; -select count(1) from t1; +select_n4 count_n4(1) from t1_n26; diff --git a/ql/src/test/queries/clientpositive/union_view.q b/ql/src/test/queries/clientpositive/union_view.q index 11682da541..186cb026cf 100644 --- a/ql/src/test/queries/clientpositive/union_view.q +++ b/ql/src/test/queries/clientpositive/union_view.q @@ -3,11 +3,11 @@ set hive.mapred.mode=nonstrict; set hive.stats.dbclass=fs; set hive.explain.user=false; -CREATE TABLE src_union_1 (key int, value string) PARTITIONED BY (ds string); +CREATE TABLE src_union_1_n0 (key int, value string) PARTITIONED BY (ds string); -CREATE TABLE src_union_2 (key int, value string) PARTITIONED BY (ds string, part_1 string); +CREATE TABLE src_union_2_n0 (key int, value string) PARTITIONED BY (ds string, part_1 string); -CREATE TABLE src_union_3(key int, value string) PARTITIONED BY (ds string, part_1 string, part_2 string); +CREATE TABLE src_union_3_n0(key int, value string) PARTITIONED BY (ds string, part_1 string, part_2 string); SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; @@ -19,62 +19,62 @@ SET hive.semantic.analyzer.hook=; SET hive.merge.mapfiles=false; SET hive.merge.mapredfiles=false; -INSERT OVERWRITE TABLE src_union_1 PARTITION (ds='1') SELECT * FROM src; +INSERT OVERWRITE TABLE src_union_1_n0 PARTITION (ds='1') SELECT * FROM src; -INSERT OVERWRITE TABLE src_union_2 PARTITION (ds='2', part_1='1') SELECT * FROM src; -INSERT OVERWRITE TABLE src_union_2 PARTITION (ds='2', part_1='2') SELECT * FROM src; +INSERT OVERWRITE TABLE src_union_2_n0 PARTITION (ds='2', part_1='1') SELECT * FROM src; +INSERT OVERWRITE TABLE src_union_2_n0 PARTITION (ds='2', part_1='2') SELECT * FROM src; -INSERT OVERWRITE TABLE src_union_3 PARTITION (ds='3', part_1='1', part_2='2:3+4') SELECT * FROM src; -INSERT OVERWRITE TABLE src_union_3 PARTITION (ds='3', part_1='2', part_2='2:3+4') SELECT * FROM src; +INSERT OVERWRITE TABLE src_union_3_n0 PARTITION (ds='3', part_1='1', part_2='2:3+4') SELECT * FROM src; +INSERT OVERWRITE TABLE src_union_3_n0 PARTITION (ds='3', part_1='2', part_2='2:3+4') SELECT * FROM src; -EXPLAIN SELECT key, value, ds FROM src_union_1 WHERE key=86 and ds='1'; -EXPLAIN SELECT key, value, ds FROM src_union_2 WHERE key=86 and ds='2'; -EXPLAIN SELECT key, value, ds FROM src_union_3 WHERE key=86 and ds='3'; +EXPLAIN SELECT key, value, ds FROM src_union_1_n0 WHERE key=86 and ds='1'; +EXPLAIN SELECT key, value, ds FROM src_union_2_n0 WHERE key=86 and ds='2'; +EXPLAIN SELECT key, value, ds FROM src_union_3_n0 WHERE key=86 and ds='3'; -SELECT key, value, ds FROM src_union_1 WHERE key=86 AND ds ='1'; -SELECT key, value, ds FROM src_union_2 WHERE key=86 AND ds ='2'; -SELECT key, value, ds FROM src_union_3 WHERE key=86 AND ds ='3'; +SELECT key, value, ds FROM src_union_1_n0 WHERE key=86 AND ds ='1'; +SELECT key, value, ds FROM src_union_2_n0 WHERE key=86 AND ds ='2'; +SELECT key, value, ds FROM src_union_3_n0 WHERE key=86 AND ds ='3'; -EXPLAIN SELECT count(1) from src_union_1 WHERE ds ='1'; -EXPLAIN SELECT count(1) from src_union_2 WHERE ds ='2'; -EXPLAIN SELECT count(1) from src_union_3 WHERE ds ='3'; +EXPLAIN SELECT count(1) from src_union_1_n0 WHERE ds ='1'; +EXPLAIN SELECT count(1) from src_union_2_n0 WHERE ds ='2'; +EXPLAIN SELECT count(1) from src_union_3_n0 WHERE ds ='3'; -SELECT count(1) from src_union_1 WHERE ds ='1'; -SELECT count(1) from src_union_2 WHERE ds ='2'; -SELECT count(1) from src_union_3 WHERE ds ='3'; +SELECT count(1) from src_union_1_n0 WHERE ds ='1'; +SELECT count(1) from src_union_2_n0 WHERE ds ='2'; +SELECT count(1) from src_union_3_n0 WHERE ds ='3'; -CREATE VIEW src_union_view PARTITIONED ON (ds) as +CREATE VIEW src_union_view_n0 PARTITIONED ON (ds) as SELECT key, value, ds FROM ( -SELECT key, value, ds FROM src_union_1 +SELECT key, value, ds FROM src_union_1_n0 UNION ALL -SELECT key, value, ds FROM src_union_2 +SELECT key, value, ds FROM src_union_2_n0 UNION ALL -SELECT key, value, ds FROM src_union_3 +SELECT key, value, ds FROM src_union_3_n0 ) subq; -EXPLAIN SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds ='1'; -EXPLAIN SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds ='2'; -EXPLAIN SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds ='3'; -EXPLAIN SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds IS NOT NULL; +EXPLAIN SELECT key, value, ds FROM src_union_view_n0 WHERE key=86 AND ds ='1'; +EXPLAIN SELECT key, value, ds FROM src_union_view_n0 WHERE key=86 AND ds ='2'; +EXPLAIN SELECT key, value, ds FROM src_union_view_n0 WHERE key=86 AND ds ='3'; +EXPLAIN SELECT key, value, ds FROM src_union_view_n0 WHERE key=86 AND ds IS NOT NULL; -SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds ='1'; -SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds ='2'; -SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds ='3'; +SELECT key, value, ds FROM src_union_view_n0 WHERE key=86 AND ds ='1'; +SELECT key, value, ds FROM src_union_view_n0 WHERE key=86 AND ds ='2'; +SELECT key, value, ds FROM src_union_view_n0 WHERE key=86 AND ds ='3'; -- SORT_BEFORE_DIFF -SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds IS NOT NULL; +SELECT key, value, ds FROM src_union_view_n0 WHERE key=86 AND ds IS NOT NULL; -EXPLAIN SELECT count(1) from src_union_view WHERE ds ='1'; -EXPLAIN SELECT count(1) from src_union_view WHERE ds ='2'; -EXPLAIN SELECT count(1) from src_union_view WHERE ds ='3'; +EXPLAIN SELECT count(1) from src_union_view_n0 WHERE ds ='1'; +EXPLAIN SELECT count(1) from src_union_view_n0 WHERE ds ='2'; +EXPLAIN SELECT count(1) from src_union_view_n0 WHERE ds ='3'; -SELECT count(1) from src_union_view WHERE ds ='1'; -SELECT count(1) from src_union_view WHERE ds ='2'; -SELECT count(1) from src_union_view WHERE ds ='3'; +SELECT count(1) from src_union_view_n0 WHERE ds ='1'; +SELECT count(1) from src_union_view_n0 WHERE ds ='2'; +SELECT count(1) from src_union_view_n0 WHERE ds ='3'; -INSERT OVERWRITE TABLE src_union_3 PARTITION (ds='4', part_1='1', part_2='2:3+4') SELECT * FROM src; +INSERT OVERWRITE TABLE src_union_3_n0 PARTITION (ds='4', part_1='1', part_2='2:3+4') SELECT * FROM src; -EXPLAIN SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds ='4'; -SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds ='4'; +EXPLAIN SELECT key, value, ds FROM src_union_view_n0 WHERE key=86 AND ds ='4'; +SELECT key, value, ds FROM src_union_view_n0 WHERE key=86 AND ds ='4'; -EXPLAIN SELECT count(1) from src_union_view WHERE ds ='4'; -SELECT count(1) from src_union_view WHERE ds ='4'; +EXPLAIN SELECT count(1) from src_union_view_n0 WHERE ds ='4'; +SELECT count(1) from src_union_view_n0 WHERE ds ='4'; diff --git a/ql/src/test/queries/clientpositive/uniquejoin.q b/ql/src/test/queries/clientpositive/uniquejoin.q index 5b9e94d2b6..e365896f61 100644 --- a/ql/src/test/queries/clientpositive/uniquejoin.q +++ b/ql/src/test/queries/clientpositive/uniquejoin.q @@ -1,27 +1,27 @@ -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE; -CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n1(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T2_n1(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T3_n0(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n1; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n1; +LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n0; -- SORT_QUERY_RESULTS -FROM UNIQUEJOIN PRESERVE T1 a (a.key), PRESERVE T2 b (b.key), PRESERVE T3 c (c.key) +FROM UNIQUEJOIN PRESERVE T1_n1 a (a.key), PRESERVE T2_n1 b (b.key), PRESERVE T3_n0 c (c.key) SELECT a.key, b.key, c.key; -FROM UNIQUEJOIN T1 a (a.key), T2 b (b.key), T3 c (c.key) +FROM UNIQUEJOIN T1_n1 a (a.key), T2_n1 b (b.key), T3_n0 c (c.key) SELECT a.key, b.key, c.key; -FROM UNIQUEJOIN T1 a (a.key), T2 b (b.key-1), T3 c (c.key) +FROM UNIQUEJOIN T1_n1 a (a.key), T2_n1 b (b.key-1), T3_n0 c (c.key) SELECT a.key, b.key, c.key; -FROM UNIQUEJOIN PRESERVE T1 a (a.key, a.val), PRESERVE T2 b (b.key, b.val), PRESERVE T3 c (c.key, c.val) +FROM UNIQUEJOIN PRESERVE T1_n1 a (a.key, a.val), PRESERVE T2_n1 b (b.key, b.val), PRESERVE T3_n0 c (c.key, c.val) SELECT a.key, a.val, b.key, b.val, c.key, c.val; -FROM UNIQUEJOIN PRESERVE T1 a (a.key), T2 b (b.key), PRESERVE T3 c (c.key) +FROM UNIQUEJOIN PRESERVE T1_n1 a (a.key), T2_n1 b (b.key), PRESERVE T3_n0 c (c.key) SELECT a.key, b.key, c.key; -FROM UNIQUEJOIN PRESERVE T1 a (a.key), T2 b(b.key) +FROM UNIQUEJOIN PRESERVE T1_n1 a (a.key), T2_n1 b(b.key) SELECT a.key, b.key; diff --git a/ql/src/test/queries/clientpositive/updateAccessTime.q b/ql/src/test/queries/clientpositive/updateAccessTime.q index f2125e5e39..8879d9f6d3 100644 --- a/ql/src/test/queries/clientpositive/updateAccessTime.q +++ b/ql/src/test/queries/clientpositive/updateAccessTime.q @@ -1,36 +1,36 @@ --! qt:dataset:srcpart --! qt:dataset:src set hive.mapred.mode=nonstrict; -drop table tstsrc; +drop table tstsrc_n0; set hive.exec.pre.hooks = org.apache.hadoop.hive.ql.hooks.PreExecutePrinter,org.apache.hadoop.hive.ql.hooks.EnforceReadOnlyTables,org.apache.hadoop.hive.ql.hooks.UpdateInputAccessTimeHook$PreExec; -create table tstsrc as select * from src; -desc extended tstsrc; -select count(1) from tstsrc; -desc extended tstsrc; -drop table tstsrc; +create table tstsrc_n0 as select * from src; +desc extended tstsrc_n0; +select count(1) from tstsrc_n0; +desc extended tstsrc_n0; +drop table tstsrc_n0; -drop table tstsrcpart; -create table tstsrcpart like srcpart; +drop table tstsrcpart_n1; +create table tstsrcpart_n1 like srcpart; set hive.exec.dynamic.partition.mode=nonstrict; set hive.exec.dynamic.partition=true; -insert overwrite table tstsrcpart partition (ds, hr) select key, value, ds, hr from srcpart; +insert overwrite table tstsrcpart_n1 partition (ds, hr) select key, value, ds, hr from srcpart; -desc extended tstsrcpart; -desc extended tstsrcpart partition (ds='2008-04-08', hr='11'); -desc extended tstsrcpart partition (ds='2008-04-08', hr='12'); +desc extended tstsrcpart_n1; +desc extended tstsrcpart_n1 partition (ds='2008-04-08', hr='11'); +desc extended tstsrcpart_n1 partition (ds='2008-04-08', hr='12'); -select count(1) from tstsrcpart where ds = '2008-04-08' and hr = '11'; +select count(1) from tstsrcpart_n1 where ds = '2008-04-08' and hr = '11'; -desc extended tstsrcpart; -desc extended tstsrcpart partition (ds='2008-04-08', hr='11'); -desc extended tstsrcpart partition (ds='2008-04-08', hr='12'); +desc extended tstsrcpart_n1; +desc extended tstsrcpart_n1 partition (ds='2008-04-08', hr='11'); +desc extended tstsrcpart_n1 partition (ds='2008-04-08', hr='12'); -drop table tstsrcpart; +drop table tstsrcpart_n1; set hive.exec.pre.hooks = org.apache.hadoop.hive.ql.hooks.PreExecutePrinter; diff --git a/ql/src/test/queries/clientpositive/updateBasicStats.q b/ql/src/test/queries/clientpositive/updateBasicStats.q index 3db535e1f7..24d567e628 100644 --- a/ql/src/test/queries/clientpositive/updateBasicStats.q +++ b/ql/src/test/queries/clientpositive/updateBasicStats.q @@ -1,55 +1,55 @@ --! qt:dataset:src set hive.mapred.mode=nonstrict; -create table s as select * from src limit 10; +create table s_n5 as select * from src limit 10; -explain select * from s; +explain select * from s_n5; -alter table s update statistics set('numRows'='12'); +alter table s_n5 update statistics set('numRows'='12'); -explain select * from s; +explain select * from s_n5; -analyze table s compute statistics; +analyze table s_n5 compute statistics; -explain select * from s; +explain select * from s_n5; -alter table s update statistics set('numRows'='1212', 'rawDataSize'='500500'); +alter table s_n5 update statistics set('numRows'='1212', 'rawDataSize'='500500'); -explain select * from s; +explain select * from s_n5; -CREATE TABLE calendarp (`year` int) partitioned by (p int); +CREATE TABLE calendarp_n0 (`year` int) partitioned by (p int); -insert into table calendarp partition (p=1) values (2010), (2011), (2012); +insert into table calendarp_n0 partition (p=1) values (2010), (2011), (2012); -explain select * from calendarp where p=1; +explain select * from calendarp_n0 where p=1; -alter table calendarp partition (p=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000'); +alter table calendarp_n0 partition (p=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000'); -explain select * from calendarp where p=1; +explain select * from calendarp_n0 where p=1; -create table src_stat_part_two(key string, value string) partitioned by (px int, py string); +create table src_stat_part_two_n0(key string, value string) partitioned by (px int, py string); -insert overwrite table src_stat_part_two partition (px=1, py='a') +insert overwrite table src_stat_part_two_n0 partition (px=1, py='a') select * from src limit 1; -insert overwrite table src_stat_part_two partition (px=1, py='b') +insert overwrite table src_stat_part_two_n0 partition (px=1, py='b') select * from src limit 10; -insert overwrite table src_stat_part_two partition (px=2, py='b') +insert overwrite table src_stat_part_two_n0 partition (px=2, py='b') select * from src limit 100; -explain select * from src_stat_part_two where px=1 and py='a'; +explain select * from src_stat_part_two_n0 where px=1 and py='a'; -explain select * from src_stat_part_two where px=1; +explain select * from src_stat_part_two_n0 where px=1; -alter table src_stat_part_two partition (px=1, py='a') update statistics set('numRows'='1000020000', 'rawDataSize'='300040000'); +alter table src_stat_part_two_n0 partition (px=1, py='a') update statistics set('numRows'='1000020000', 'rawDataSize'='300040000'); -explain select * from src_stat_part_two where px=1 and py='a'; +explain select * from src_stat_part_two_n0 where px=1 and py='a'; -explain select * from src_stat_part_two where px=1; +explain select * from src_stat_part_two_n0 where px=1; -alter table src_stat_part_two partition (px=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000'); +alter table src_stat_part_two_n0 partition (px=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000'); -explain select * from src_stat_part_two where px=1 and py='a'; +explain select * from src_stat_part_two_n0 where px=1 and py='a'; -explain select * from src_stat_part_two where px=1; +explain select * from src_stat_part_two_n0 where px=1; diff --git a/ql/src/test/queries/clientpositive/update_access_time_non_current_db.q b/ql/src/test/queries/clientpositive/update_access_time_non_current_db.q index 2fdfd7c806..c0377808a9 100644 --- a/ql/src/test/queries/clientpositive/update_access_time_non_current_db.q +++ b/ql/src/test/queries/clientpositive/update_access_time_non_current_db.q @@ -1,9 +1,9 @@ create database temp1; use temp1; -create table test1(id int); +create table test1_n3(id int); create database temp2; use temp2; -create table test2(id int); +create table test2_n1(id int); set hive.exec.pre.hooks=org.apache.hadoop.hive.ql.hooks.UpdateInputAccessTimeHook$PreExec; use temp1; -desc temp2.test2; +desc temp2.test2_n1; diff --git a/ql/src/test/queries/clientpositive/varchar_join1.q b/ql/src/test/queries/clientpositive/varchar_join1.q index d710339add..c504c26064 100644 --- a/ql/src/test/queries/clientpositive/varchar_join1.q +++ b/ql/src/test/queries/clientpositive/varchar_join1.q @@ -1,36 +1,36 @@ set hive.mapred.mode=nonstrict; -drop table varchar_join1_vc1; -drop table varchar_join1_vc2; -drop table varchar_join1_str; +drop table varchar_join1_vc1_n0; +drop table varchar_join1_vc2_n0; +drop table varchar_join1_str_n0; -create table varchar_join1_vc1 ( +create table varchar_join1_vc1_n0 ( c1 int, c2 varchar(10) ); -create table varchar_join1_vc2 ( +create table varchar_join1_vc2_n0 ( c1 int, c2 varchar(20) ); -create table varchar_join1_str ( +create table varchar_join1_str_n0 ( c1 int, c2 string ); -load data local inpath '../../data/files/vc1.txt' into table varchar_join1_vc1; -load data local inpath '../../data/files/vc1.txt' into table varchar_join1_vc2; -load data local inpath '../../data/files/vc1.txt' into table varchar_join1_str; +load data local inpath '../../data/files/vc1.txt' into table varchar_join1_vc1_n0; +load data local inpath '../../data/files/vc1.txt' into table varchar_join1_vc2_n0; +load data local inpath '../../data/files/vc1.txt' into table varchar_join1_str_n0; -- Join varchar with same length varchar -select * from varchar_join1_vc1 a join varchar_join1_vc1 b on (a.c2 = b.c2) order by a.c1; +select * from varchar_join1_vc1_n0 a join varchar_join1_vc1_n0 b on (a.c2 = b.c2) order by a.c1; -- Join varchar with different length varchar -select * from varchar_join1_vc1 a join varchar_join1_vc2 b on (a.c2 = b.c2) order by a.c1; +select * from varchar_join1_vc1_n0 a join varchar_join1_vc2_n0 b on (a.c2 = b.c2) order by a.c1; -- Join varchar with string -select * from varchar_join1_vc1 a join varchar_join1_str b on (a.c2 = b.c2) order by a.c1; +select * from varchar_join1_vc1_n0 a join varchar_join1_str_n0 b on (a.c2 = b.c2) order by a.c1; -drop table varchar_join1_vc1; -drop table varchar_join1_vc2; -drop table varchar_join1_str; +drop table varchar_join1_vc1_n0; +drop table varchar_join1_vc2_n0; +drop table varchar_join1_str_n0; diff --git a/ql/src/test/queries/clientpositive/vector_adaptor_usage_mode.q b/ql/src/test/queries/clientpositive/vector_adaptor_usage_mode.q index f3cb52f1eb..b011664d12 100644 --- a/ql/src/test/queries/clientpositive/vector_adaptor_usage_mode.q +++ b/ql/src/test/queries/clientpositive/vector_adaptor_usage_mode.q @@ -5,32 +5,32 @@ SET hive.auto.convert.join=true; -- SORT_QUERY_RESULTS -drop table varchar_udf_1; +drop table varchar_udf_1_n0; -create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)) STORED AS ORC; -insert overwrite table varchar_udf_1 +create table varchar_udf_1_n0 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)) STORED AS ORC; +insert overwrite table varchar_udf_1_n0 select key, value, key, value from src where key = '238' limit 1; -- Add a single NULL row that will come from ORC as isRepeated. -insert into varchar_udf_1 values (NULL, NULL, NULL, NULL); +insert into varchar_udf_1_n0 values (NULL, NULL, NULL, NULL); -DROP TABLE IF EXISTS DECIMAL_UDF_txt; -DROP TABLE IF EXISTS DECIMAL_UDF; +DROP TABLE IF EXISTS DECIMAL_UDF_txt_n0; +DROP TABLE IF EXISTS DECIMAL_UDF_n1; -CREATE TABLE DECIMAL_UDF_txt (key decimal(20,10), value int) +CREATE TABLE DECIMAL_UDF_txt_n0 (key decimal(20,10), value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF_txt; +LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF_txt_n0; -CREATE TABLE DECIMAL_UDF (key decimal(20,10), value int) +CREATE TABLE DECIMAL_UDF_n1 (key decimal(20,10), value int) STORED AS ORC; -INSERT OVERWRITE TABLE DECIMAL_UDF SELECT * FROM DECIMAL_UDF_txt; +INSERT OVERWRITE TABLE DECIMAL_UDF_n1 SELECT * FROM DECIMAL_UDF_txt_n0; -- Add a single NULL row that will come from ORC as isRepeated. -insert into DECIMAL_UDF values (NULL, NULL); +insert into DECIMAL_UDF_n1 values (NULL, NULL); drop table if exists count_case_groupby; @@ -47,39 +47,39 @@ select c2 regexp 'val', c4 regexp 'val', (c2 regexp 'val') = (c4 regexp 'val') -from varchar_udf_1; +from varchar_udf_1_n0; select c2 regexp 'val', c4 regexp 'val', (c2 regexp 'val') = (c4 regexp 'val') -from varchar_udf_1; +from varchar_udf_1_n0; explain vectorization expression select regexp_extract(c2, 'val_([0-9]+)', 1), regexp_extract(c4, 'val_([0-9]+)', 1), regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1) -from varchar_udf_1; +from varchar_udf_1_n0; select regexp_extract(c2, 'val_([0-9]+)', 1), regexp_extract(c4, 'val_([0-9]+)', 1), regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1) -from varchar_udf_1; +from varchar_udf_1_n0; explain vectorization expression select regexp_replace(c2, 'val', 'replaced'), regexp_replace(c4, 'val', 'replaced'), regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced') -from varchar_udf_1; +from varchar_udf_1_n0; select regexp_replace(c2, 'val', 'replaced'), regexp_replace(c4, 'val', 'replaced'), regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced') -from varchar_udf_1; +from varchar_udf_1_n0; set hive.vectorized.adaptor.usage.mode=chosen; @@ -89,78 +89,78 @@ select c2 regexp 'val', c4 regexp 'val', (c2 regexp 'val') = (c4 regexp 'val') -from varchar_udf_1; +from varchar_udf_1_n0; select c2 regexp 'val', c4 regexp 'val', (c2 regexp 'val') = (c4 regexp 'val') -from varchar_udf_1; +from varchar_udf_1_n0; explain vectorization expression select regexp_extract(c2, 'val_([0-9]+)', 1), regexp_extract(c4, 'val_([0-9]+)', 1), regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1) -from varchar_udf_1; +from varchar_udf_1_n0; select regexp_extract(c2, 'val_([0-9]+)', 1), regexp_extract(c4, 'val_([0-9]+)', 1), regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1) -from varchar_udf_1; +from varchar_udf_1_n0; explain vectorization expression select regexp_replace(c2, 'val', 'replaced'), regexp_replace(c4, 'val', 'replaced'), regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced') -from varchar_udf_1; +from varchar_udf_1_n0; select regexp_replace(c2, 'val', 'replaced'), regexp_replace(c4, 'val', 'replaced'), regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced') -from varchar_udf_1; +from varchar_udf_1_n0; set hive.vectorized.adaptor.usage.mode=none; -EXPLAIN VECTORIZATION EXPRESSION SELECT POWER(key, 2) FROM DECIMAL_UDF; +EXPLAIN VECTORIZATION EXPRESSION SELECT POWER(key, 2) FROM DECIMAL_UDF_n1; -SELECT POWER(key, 2) FROM DECIMAL_UDF; +SELECT POWER(key, 2) FROM DECIMAL_UDF_n1; EXPLAIN VECTORIZATION EXPRESSION SELECT exp(key), ln(key), log(key), log(key, key), log(key, value), log(value, key), log10(key), sqrt(key) -FROM DECIMAL_UDF WHERE key = 10; +FROM DECIMAL_UDF_n1 WHERE key = 10; SELECT exp(key), ln(key), log(key), log(key, key), log(key, value), log(value, key), log10(key), sqrt(key) -FROM DECIMAL_UDF WHERE key = 10; +FROM DECIMAL_UDF_n1 WHERE key = 10; set hive.vectorized.adaptor.usage.mode=chosen; -EXPLAIN VECTORIZATION EXPRESSION SELECT POWER(key, 2) FROM DECIMAL_UDF; +EXPLAIN VECTORIZATION EXPRESSION SELECT POWER(key, 2) FROM DECIMAL_UDF_n1; -SELECT POWER(key, 2) FROM DECIMAL_UDF; +SELECT POWER(key, 2) FROM DECIMAL_UDF_n1; EXPLAIN VECTORIZATION EXPRESSION SELECT exp(key), ln(key), log(key), log(key, key), log(key, value), log(value, key), log10(key), sqrt(key) -FROM DECIMAL_UDF WHERE key = 10; +FROM DECIMAL_UDF_n1 WHERE key = 10; SELECT exp(key), ln(key), log(key), log(key, key), log(key, value), log(value, key), log10(key), sqrt(key) -FROM DECIMAL_UDF WHERE key = 10; +FROM DECIMAL_UDF_n1 WHERE key = 10; set hive.vectorized.adaptor.usage.mode=none; @@ -178,10 +178,10 @@ SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS c SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key; -drop table varchar_udf_1; +drop table varchar_udf_1_n0; -DROP TABLE DECIMAL_UDF_txt; -DROP TABLE DECIMAL_UDF; +DROP TABLE DECIMAL_UDF_txt_n0; +DROP TABLE DECIMAL_UDF_n1; drop table count_case_groupby; diff --git a/ql/src/test/queries/clientpositive/vector_aggregate_9.q b/ql/src/test/queries/clientpositive/vector_aggregate_9.q index d7322ec60d..5eeeecf3db 100644 --- a/ql/src/test/queries/clientpositive/vector_aggregate_9.q +++ b/ql/src/test/queries/clientpositive/vector_aggregate_9.q @@ -2,7 +2,7 @@ set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -create table vectortab2k( +create table vectortab2k_n4( t tinyint, si smallint, i int, @@ -19,9 +19,9 @@ create table vectortab2k( ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n4; -create table vectortab2korc( +create table vectortab2korc_n4( t tinyint, si smallint, i int, @@ -37,21 +37,21 @@ create table vectortab2korc( dt date) STORED AS ORC; -INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; +INSERT INTO TABLE vectortab2korc_n4 SELECT * FROM vectortab2k_n4; -- SORT_QUERY_RESULTS explain vectorization detail -select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc; +select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc_n4; -select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc; +select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc_n4; explain vectorization detail -select min(d), max(d), sum(d), avg(d) from vectortab2korc; +select min(d), max(d), sum(d), avg(d) from vectortab2korc_n4; -select min(d), max(d), sum(d), avg(d) from vectortab2korc; +select min(d), max(d), sum(d), avg(d) from vectortab2korc_n4; explain vectorization detail -select min(ts), max(ts), sum(ts), avg(ts) from vectortab2korc; +select min(ts), max(ts), sum(ts), avg(ts) from vectortab2korc_n4; -select min(ts), max(ts), sum(ts), avg(ts) from vectortab2korc; \ No newline at end of file +select min(ts), max(ts), sum(ts), avg(ts) from vectortab2korc_n4; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_annotate_stats_select.q b/ql/src/test/queries/clientpositive/vector_annotate_stats_select.q index e45dff64fb..050646bb0c 100644 --- a/ql/src/test/queries/clientpositive/vector_annotate_stats_select.q +++ b/ql/src/test/queries/clientpositive/vector_annotate_stats_select.q @@ -3,7 +3,7 @@ set hive.fetch.task.conversion=none; set hive.mapred.mode=nonstrict; set hive.stats.fetch.column.stats=true; -create table if not exists alltypes ( +create table if not exists alltypes_n4 ( bo1 boolean, ti1 tinyint, si1 smallint, @@ -23,124 +23,124 @@ create table if not exists alltypes ( collection items terminated by ',' map keys terminated by ':' stored as textfile; -create table alltypes_orc like alltypes; -alter table alltypes_orc set fileformat orc; +create table alltypes_orc_n4 like alltypes_n4; +alter table alltypes_orc_n4 set fileformat orc; -load data local inpath '../../data/files/alltypes.txt' overwrite into table alltypes; +load data local inpath '../../data/files/alltypes.txt' overwrite into table alltypes_n4; -insert overwrite table alltypes_orc select * from alltypes; +insert overwrite table alltypes_orc_n4 select * from alltypes_n4; -- basicStatState: COMPLETE colStatState: NONE numRows: 2 rawDataSize: 1514 -explain select * from alltypes_orc; +explain select * from alltypes_orc_n4; -- statistics for complex types are not supported yet -analyze table alltypes_orc compute statistics for columns bo1, ti1, si1, i1, bi1, f1, d1, s1, vc1; +analyze table alltypes_orc_n4 compute statistics for columns bo1, ti1, si1, i1, bi1, f1, d1, s1, vc1; -- numRows: 2 rawDataSize: 1514 -explain select * from alltypes_orc; +explain select * from alltypes_orc_n4; -- numRows: 2 rawDataSize: 8 -explain select bo1 from alltypes_orc; +explain select bo1 from alltypes_orc_n4; -- col alias renaming -- numRows: 2 rawDataSize: 8 -explain select i1 as int1 from alltypes_orc; +explain select i1 as int1 from alltypes_orc_n4; -- numRows: 2 rawDataSize: 174 -explain select s1 from alltypes_orc; +explain select s1 from alltypes_orc_n4; -- column statistics for complex types unsupported and so statistics will not be updated -- numRows: 2 rawDataSize: 1514 -explain select m1 from alltypes_orc; +explain select m1 from alltypes_orc_n4; -- numRows: 2 rawDataSize: 246 -explain select bo1, ti1, si1, i1, bi1, f1, d1,s1 from alltypes_orc; +explain select bo1, ti1, si1, i1, bi1, f1, d1,s1 from alltypes_orc_n4; -- numRows: 2 rawDataSize: 0 -explain vectorization expression select null from alltypes_orc; +explain vectorization expression select null from alltypes_orc_n4; -- numRows: 2 rawDataSize: 8 -explain vectorization expression select 11 from alltypes_orc; +explain vectorization expression select 11 from alltypes_orc_n4; -- numRows: 2 rawDataSize: 16 -explain vectorization expression select 11L from alltypes_orc; +explain vectorization expression select 11L from alltypes_orc_n4; -- numRows: 2 rawDataSize: 16 -explain vectorization expression select 11.0 from alltypes_orc; +explain vectorization expression select 11.0 from alltypes_orc_n4; -- numRows: 2 rawDataSize: 178 -explain vectorization expression select "hello" from alltypes_orc; -explain vectorization expression select cast("hello" as char(5)) from alltypes_orc; -explain vectorization expression select cast("hello" as varchar(5)) from alltypes_orc; +explain vectorization expression select "hello" from alltypes_orc_n4; +explain vectorization expression select cast("hello" as char(5)) from alltypes_orc_n4; +explain vectorization expression select cast("hello" as varchar(5)) from alltypes_orc_n4; -- numRows: 2 rawDataSize: 96 -explain vectorization expression select unbase64("0xe23") from alltypes_orc; +explain vectorization expression select unbase64("0xe23") from alltypes_orc_n4; -- numRows: 2 rawDataSize: 16 -explain vectorization expression select cast("1" as TINYINT), cast("20" as SMALLINT) from alltypes_orc; +explain vectorization expression select cast("1" as TINYINT), cast("20" as SMALLINT) from alltypes_orc_n4; -- numRows: 2 rawDataSize: 80 -explain vectorization expression select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from alltypes_orc; +explain vectorization expression select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from alltypes_orc_n4; -- numRows: 2 rawDataSize: 112 -explain vectorization expression select cast("1970-12-31 15:59:58.174" as DATE) from alltypes_orc; +explain vectorization expression select cast("1970-12-31 15:59:58.174" as DATE) from alltypes_orc_n4; -- numRows: 2 rawDataSize: 224 -explain vectorization expression select cast("58.174" as DECIMAL) from alltypes_orc; +explain vectorization expression select cast("58.174" as DECIMAL) from alltypes_orc_n4; -- numRows: 2 rawDataSize: 112 -explain vectorization expression select array(1,2,3) from alltypes_orc; +explain vectorization expression select array(1,2,3) from alltypes_orc_n4; -- numRows: 2 rawDataSize: 1508 -explain vectorization expression select str_to_map("a=1 b=2 c=3", " ", "=") from alltypes_orc; +explain vectorization expression select str_to_map("a=1 b=2 c=3", " ", "=") from alltypes_orc_n4; -- numRows: 2 rawDataSize: 112 -explain vectorization expression select NAMED_STRUCT("a", 11, "b", 11) from alltypes_orc; +explain vectorization expression select NAMED_STRUCT("a", 11, "b", 11) from alltypes_orc_n4; -- numRows: 2 rawDataSize: 250 -explain vectorization expression select CREATE_UNION(0, "hello") from alltypes_orc; +explain vectorization expression select CREATE_UNION(0, "hello") from alltypes_orc_n4; -- COUNT(*) is projected as new column. It is not projected as GenericUDF and so datasize estimate will be based on number of rows -- numRows: 1 rawDataSize: 8 -explain vectorization expression select count(*) from alltypes_orc; +explain vectorization expression select count(*) from alltypes_orc_n4; -- COUNT(1) is projected as new column. It is not projected as GenericUDF and so datasize estimate will be based on number of rows -- numRows: 1 rawDataSize: 8 -explain vectorization expression select count(1) from alltypes_orc; +explain vectorization expression select count(1) from alltypes_orc_n4; -- column statistics for complex column types will be missing. data size will be calculated from available column statistics -- numRows: 2 rawDataSize: 254 -explain vectorization expression select *,11 from alltypes_orc; +explain vectorization expression select *,11 from alltypes_orc_n4; -- subquery selects -- inner select - numRows: 2 rawDataSize: 8 -- outer select - numRows: 2 rawDataSize: 8 -explain vectorization expression select i1 from (select i1 from alltypes_orc limit 10) temp; +explain vectorization expression select i1 from (select i1 from alltypes_orc_n4 limit 10) temp; -- inner select - numRows: 2 rawDataSize: 16 -- outer select - numRows: 2 rawDataSize: 8 -explain vectorization expression select i1 from (select i1,11 from alltypes_orc limit 10) temp; +explain vectorization expression select i1 from (select i1,11 from alltypes_orc_n4 limit 10) temp; -- inner select - numRows: 2 rawDataSize: 16 -- outer select - numRows: 2 rawDataSize: 186 -explain vectorization expression select i1,"hello" from (select i1,11 from alltypes_orc limit 10) temp; +explain vectorization expression select i1,"hello" from (select i1,11 from alltypes_orc_n4 limit 10) temp; -- inner select - numRows: 2 rawDataSize: 24 -- outer select - numRows: 2 rawDataSize: 16 -explain vectorization expression select x from (select i1,11.0 as x from alltypes_orc limit 10) temp; +explain vectorization expression select x from (select i1,11.0 as x from alltypes_orc_n4 limit 10) temp; -- inner select - numRows: 2 rawDataSize: 104 -- outer select - numRows: 2 rawDataSize: 186 -explain vectorization expression select x,"hello" from (select i1 as x, unbase64("0xe23") as ub from alltypes_orc limit 10) temp; +explain vectorization expression select x,"hello" from (select i1 as x, unbase64("0xe23") as ub from alltypes_orc_n4 limit 10) temp; -- inner select - numRows: 2 rawDataSize: 186 -- middle select - numRows: 2 rawDataSize: 178 -- outer select - numRows: 2 rawDataSize: 194 -explain vectorization expression select h, 11.0 from (select hell as h from (select i1, "hello" as hell from alltypes_orc limit 10) in1 limit 10) in2; +explain vectorization expression select h, 11.0 from (select hell as h from (select i1, "hello" as hell from alltypes_orc_n4 limit 10) in1 limit 10) in2; -- This test is for FILTER operator where filter expression is a boolean column -- numRows: 2 rawDataSize: 8 -explain vectorization expression select bo1 from alltypes_orc where bo1; +explain vectorization expression select bo1 from alltypes_orc_n4 where bo1; -- numRows: 0 rawDataSize: 0 -explain vectorization expression select bo1 from alltypes_orc where !bo1; +explain vectorization expression select bo1 from alltypes_orc_n4 where !bo1; diff --git a/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q b/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q index ed25756e06..d559571235 100644 --- a/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q +++ b/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q @@ -11,13 +11,13 @@ set hive.exec.reducers.max = 1; -- SORT_QUERY_RESULTS -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORC; -CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORC; +CREATE TABLE tbl1_n12(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORC; +CREATE TABLE tbl2_n11(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORC; -insert overwrite table tbl1 +insert overwrite table tbl1_n12 select * from src where key < 10; -insert overwrite table tbl2 +insert overwrite table tbl2_n11 select * from src where key < 10; set hive.optimize.bucketmapjoin = true; @@ -29,11 +29,11 @@ set hive.auto.convert.sortmerge.join=true; -- The join is being performed as part of sub-query. It should be converted to a sort-merge join explain vectorization expression select count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n12 a join tbl2_n11 b on a.key = b.key ) subq1; select count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n12 a join tbl2_n11 b on a.key = b.key ) subq1; -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join @@ -42,7 +42,7 @@ select count(*) from ( select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n12 a join tbl2_n11 b on a.key = b.key ) subq1 group by key ) subq2; @@ -51,7 +51,7 @@ select count(*) from ( select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n12 a join tbl2_n11 b on a.key = b.key ) subq1 group by key ) subq2; @@ -63,14 +63,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n12 a join tbl2_n11 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n12 a join tbl2_n11 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key; @@ -79,14 +79,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n12 a join tbl2_n11 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n12 a join tbl2_n11 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key; @@ -95,15 +95,15 @@ on src1.key = src2.key; -- be converted to a sort-merge join. explain vectorization expression select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n12 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n11 a where key < 6) subq2 on subq1.key = subq2.key; select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n12 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n11 a where key < 6) subq2 on subq1.key = subq2.key; -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should @@ -113,22 +113,22 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n12 a where key < 8 ) subq1 where key < 6 ) subq2 - join tbl2 b + join tbl2_n11 b on subq2.key = b.key; select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n12 a where key < 8 ) subq1 where key < 6 ) subq2 - join tbl2 b + join tbl2_n11 b on subq2.key = b.key; -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. @@ -138,7 +138,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n12 a where key < 8 ) subq1 where key < 6 ) subq2 @@ -146,7 +146,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n12 a where key < 8 ) subq3 where key < 6 ) subq4 @@ -156,7 +156,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n12 a where key < 8 ) subq1 where key < 6 ) subq2 @@ -164,7 +164,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n12 a where key < 8 ) subq3 where key < 6 ) subq4 @@ -175,62 +175,62 @@ select count(*) from -- item, but that is not part of the join key. explain vectorization expression select count(*) from - (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + (select a.key as key, concat(a.value, a.value) as value from tbl1_n12 a where key < 8) subq1 join - (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + (select a.key as key, concat(a.value, a.value) as value from tbl2_n11 a where key < 8) subq2 on subq1.key = subq2.key; select count(*) from - (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + (select a.key as key, concat(a.value, a.value) as value from tbl1_n12 a where key < 8) subq1 join - (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + (select a.key as key, concat(a.value, a.value) as value from tbl2_n11 a where key < 8) subq2 on subq1.key = subq2.key; -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side -- join should be performed explain vectorization expression select count(*) from - (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n12 a) subq1 join - (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n11 a) subq2 on subq1.key = subq2.key; select count(*) from - (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n12 a) subq1 join - (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n11 a) subq2 on subq1.key = subq2.key; -- One of the tables is a sub-query and the other is not. -- It should be converted to a sort-merge join. explain vectorization expression select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key; + (select a.key as key, a.value as value from tbl1_n12 a where key < 6) subq1 + join tbl2_n11 a on subq1.key = a.key; select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key; + (select a.key as key, a.value as value from tbl1_n12 a where key < 6) subq1 + join tbl2_n11 a on subq1.key = a.key; -- There are more than 2 inputs to the join, all of them being sub-queries. -- It should be converted to to a sort-merge join explain vectorization expression select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n12 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n11 a where key < 6) subq2 on (subq1.key = subq2.key) join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + (select a.key as key, a.value as value from tbl2_n11 a where key < 6) subq3 on (subq1.key = subq3.key); select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n12 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n11 a where key < 6) subq2 on subq1.key = subq2.key join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + (select a.key as key, a.value as value from tbl2_n11 a where key < 6) subq3 on (subq1.key = subq3.key); -- The join is being performed on a nested sub-query, and an aggregation is performed after that. @@ -241,11 +241,11 @@ select count(*) from ( ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n12 a where key < 8 ) subq1 where key < 6 ) subq2 -join tbl2 b +join tbl2_n11 b on subq2.key = b.key) a; select count(*) from ( @@ -253,51 +253,51 @@ select count(*) from ( ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n12 a where key < 8 ) subq1 where key < 6 ) subq2 -join tbl2 b +join tbl2_n11 b on subq2.key = b.key) a; -CREATE TABLE dest1(key int, value string); -CREATE TABLE dest2(key int, val1 string, val2 string); +CREATE TABLE dest1_n157(key int, value string); +CREATE TABLE dest2_n40(key int, val1 string, val2 string); -- The join is followed by a multi-table insert. It should be converted to -- a sort-merge join explain vectorization expression from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n12 a join tbl2_n11 b on a.key = b.key ) subq1 -insert overwrite table dest1 select key, val1 -insert overwrite table dest2 select key, val1, val2; +insert overwrite table dest1_n157 select key, val1 +insert overwrite table dest2_n40 select key, val1, val2; from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n12 a join tbl2_n11 b on a.key = b.key ) subq1 -insert overwrite table dest1 select key, val1 -insert overwrite table dest2 select key, val1, val2; +insert overwrite table dest1_n157 select key, val1 +insert overwrite table dest2_n40 select key, val1, val2; -select * from dest1; -select * from dest2; +select * from dest1_n157; +select * from dest2_n40; -DROP TABLE dest2; -CREATE TABLE dest2(key int, cnt int); +DROP TABLE dest2_n40; +CREATE TABLE dest2_n40(key int, cnt int); -- The join is followed by a multi-table insert, and one of the inserts involves a reducer. -- It should be converted to a sort-merge join explain vectorization expression from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n12 a join tbl2_n11 b on a.key = b.key ) subq1 -insert overwrite table dest1 select key, val1 -insert overwrite table dest2 select key, count(*) group by key; +insert overwrite table dest1_n157 select key, val1 +insert overwrite table dest2_n40 select key, count(*) group by key; from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n12 a join tbl2_n11 b on a.key = b.key ) subq1 -insert overwrite table dest1 select key, val1 -insert overwrite table dest2 select key, count(*) group by key; +insert overwrite table dest1_n157 select key, val1 +insert overwrite table dest2_n40 select key, count(*) group by key; -select * from dest1; -select * from dest2; +select * from dest1_n157; +select * from dest2_n40; diff --git a/ql/src/test/queries/clientpositive/vector_binary_join_groupby.q b/ql/src/test/queries/clientpositive/vector_binary_join_groupby.q index 826918f6b6..f7a14e7abb 100644 --- a/ql/src/test/queries/clientpositive/vector_binary_join_groupby.q +++ b/ql/src/test/queries/clientpositive/vector_binary_join_groupby.q @@ -6,11 +6,11 @@ SET hive.auto.convert.join.noconditionaltask.size=1000000000; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -DROP TABLE over1k; +DROP TABLE over1k_n7; DROP TABLE hundredorc; -- data setup -CREATE TABLE over1k(t tinyint, +CREATE TABLE over1k_n7(t tinyint, si smallint, i int, b bigint, @@ -24,7 +24,7 @@ CREATE TABLE over1k(t tinyint, ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k; +LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k_n7; CREATE TABLE hundredorc(t tinyint, si smallint, @@ -39,7 +39,7 @@ CREATE TABLE hundredorc(t tinyint, bin binary) STORED AS ORC; -INSERT INTO TABLE hundredorc SELECT * FROM over1k LIMIT 100; +INSERT INTO TABLE hundredorc SELECT * FROM over1k_n7 LIMIT 100; EXPLAIN VECTORIZATION EXPRESSION SELECT sum(hash(*)) k diff --git a/ql/src/test/queries/clientpositive/vector_cast_constant.q b/ql/src/test/queries/clientpositive/vector_cast_constant.q index 1f38e762e7..1f0f09a8d3 100644 --- a/ql/src/test/queries/clientpositive/vector_cast_constant.q +++ b/ql/src/test/queries/clientpositive/vector_cast_constant.q @@ -3,11 +3,11 @@ set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -DROP TABLE over1k; -DROP TABLE over1korc; +DROP TABLE over1k_n0; +DROP TABLE over1korc_n0; -- data setup -CREATE TABLE over1k(t tinyint, +CREATE TABLE over1k_n0(t tinyint, si smallint, i int, b bigint, @@ -21,9 +21,9 @@ CREATE TABLE over1k(t tinyint, ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k; +LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k_n0; -CREATE TABLE over1korc(t tinyint, +CREATE TABLE over1korc_n0(t tinyint, si smallint, i int, b bigint, @@ -36,18 +36,18 @@ CREATE TABLE over1korc(t tinyint, bin binary) STORED AS ORC; -INSERT INTO TABLE over1korc SELECT * FROM over1k; +INSERT INTO TABLE over1korc_n0 SELECT * FROM over1k_n0; EXPLAIN VECTORIZATION EXPRESSION SELECT i, AVG(CAST(50 AS INT)) AS `avg_int_ok`, AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`, AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok` - FROM over1korc GROUP BY i ORDER BY i LIMIT 10; + FROM over1korc_n0 GROUP BY i ORDER BY i LIMIT 10; SELECT i, AVG(CAST(50 AS INT)) AS `avg_int_ok`, AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`, AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok` - FROM over1korc GROUP BY i ORDER BY i LIMIT 10; + FROM over1korc_n0 GROUP BY i ORDER BY i LIMIT 10; diff --git a/ql/src/test/queries/clientpositive/vector_char_2.q b/ql/src/test/queries/clientpositive/vector_char_2.q index 02062d5b20..1110ef6f9b 100644 --- a/ql/src/test/queries/clientpositive/vector_char_2.q +++ b/ql/src/test/queries/clientpositive/vector_char_2.q @@ -4,17 +4,17 @@ set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -drop table char_2; +drop table char_2_n0; -create table char_2 ( +create table char_2_n0 ( key char(10), value char(20) ) stored as orc; -insert overwrite table char_2 select * from src; +insert overwrite table char_2_n0 select * from src; -- Add a single NULL row that will come from ORC as isRepeated. -insert into char_2 values (NULL, NULL); +insert into char_2_n0 values (NULL, NULL); select value, sum(cast(key as int)), count(*) numrows from src @@ -23,14 +23,14 @@ order by value asc limit 5; explain vectorization expression select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n0 group by value order by value asc limit 5; -- should match the query from src select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n0 group by value order by value asc limit 5; @@ -42,16 +42,16 @@ order by value desc limit 5; explain vectorization expression select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n0 group by value order by value desc limit 5; -- should match the query from src select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n0 group by value order by value desc limit 5; -drop table char_2; +drop table char_2_n0; diff --git a/ql/src/test/queries/clientpositive/vector_char_cast.q b/ql/src/test/queries/clientpositive/vector_char_cast.q index c7d3c3c838..827d8d1289 100644 --- a/ql/src/test/queries/clientpositive/vector_char_cast.q +++ b/ql/src/test/queries/clientpositive/vector_char_cast.q @@ -1,11 +1,11 @@ set hive.fetch.task.conversion=none; -create table s1(id smallint) stored as orc; +create table s1_n2(id smallint) stored as orc; -insert into table s1 values (1000),(1001),(1002),(1003),(1000); +insert into table s1_n2 values (1000),(1001),(1002),(1003),(1000); set hive.vectorized.execution.enabled=true; -select count(1) from s1 where cast(id as char(4))='1000'; +select count(1) from s1_n2 where cast(id as char(4))='1000'; set hive.vectorized.execution.enabled=false; -select count(1) from s1 where cast(id as char(4))='1000'; \ No newline at end of file +select count(1) from s1_n2 where cast(id as char(4))='1000'; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_coalesce_3.q b/ql/src/test/queries/clientpositive/vector_coalesce_3.q index 7d5e82c6fe..9e00bdce41 100644 --- a/ql/src/test/queries/clientpositive/vector_coalesce_3.q +++ b/ql/src/test/queries/clientpositive/vector_coalesce_3.q @@ -4,21 +4,21 @@ SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; SET hive.auto.convert.join=true; -CREATE TABLE test_1 (member BIGINT, attr BIGINT) STORED AS ORC; +CREATE TABLE test_1_n0 (member BIGINT, attr BIGINT) STORED AS ORC; -CREATE TABLE test_2 (member BIGINT) STORED AS ORC; +CREATE TABLE test_2_n0 (member BIGINT) STORED AS ORC; -INSERT INTO test_1 VALUES (3,1),(2,2); -INSERT INTO test_2 VALUES (1),(2),(3),(4); +INSERT INTO test_1_n0 VALUES (3,1),(2,2); +INSERT INTO test_2_n0 VALUES (1),(2),(3),(4); -- Add a single NULL row that will come from ORC as isRepeated. -insert into test_1 values (NULL, NULL); -insert into test_2 values (NULL); +insert into test_1_n0 values (NULL, NULL); +insert into test_2_n0 values (NULL); EXPLAIN VECTORIZATION DETAIL SELECT m.member, (CASE WHEN COALESCE(n.attr, 5)>1 THEN n.attr END) AS attr -FROM test_2 m LEFT JOIN test_1 n ON m.member = n.member; +FROM test_2_n0 m LEFT JOIN test_1_n0 n ON m.member = n.member; SELECT m.member, (CASE WHEN COALESCE(n.attr, 5)>1 THEN n.attr END) AS attr -FROM test_2 m LEFT JOIN test_1 n ON m.member = n.member; +FROM test_2_n0 m LEFT JOIN test_1_n0 n ON m.member = n.member; diff --git a/ql/src/test/queries/clientpositive/vector_complex_all.q b/ql/src/test/queries/clientpositive/vector_complex_all.q index e223c8efdc..894aade9d2 100644 --- a/ql/src/test/queries/clientpositive/vector_complex_all.q +++ b/ql/src/test/queries/clientpositive/vector_complex_all.q @@ -10,7 +10,7 @@ set hive.llap.io.enabled=false; set hive.mapred.mode=nonstrict; set hive.auto.convert.join=true; -CREATE TABLE orc_create_staging ( +CREATE TABLE orc_create_staging_n0 ( str STRING, mp MAP, lst ARRAY, @@ -20,9 +20,9 @@ CREATE TABLE orc_create_staging ( COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'; -LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging; +LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging_n0; -CREATE TABLE orc_create_complex ( +CREATE TABLE orc_create_complex_n0 ( str STRING, mp MAP, lst ARRAY, @@ -30,81 +30,81 @@ CREATE TABLE orc_create_complex ( val string ) STORED AS ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="1000", "orc.compress.size"="10000"); -INSERT OVERWRITE TABLE orc_create_complex -SELECT orc_create_staging.*, '0' FROM orc_create_staging; +INSERT OVERWRITE TABLE orc_create_complex_n0 +SELECT orc_create_staging_n0.*, '0' FROM orc_create_staging_n0; set hive.llap.io.enabled=true; EXPLAIN VECTORIZATION DETAIL -SELECT * FROM orc_create_complex; +SELECT * FROM orc_create_complex_n0; -SELECT * FROM orc_create_complex; +SELECT * FROM orc_create_complex_n0; EXPLAIN VECTORIZATION DETAIL -SELECT str FROM orc_create_complex; +SELECT str FROM orc_create_complex_n0; -SELECT str FROM orc_create_complex; +SELECT str FROM orc_create_complex_n0; EXPLAIN VECTORIZATION DETAIL -SELECT strct, mp, lst FROM orc_create_complex; +SELECT strct, mp, lst FROM orc_create_complex_n0; -SELECT strct, mp, lst FROM orc_create_complex; +SELECT strct, mp, lst FROM orc_create_complex_n0; EXPLAIN VECTORIZATION DETAIL -SELECT lst, str FROM orc_create_complex; +SELECT lst, str FROM orc_create_complex_n0; -SELECT lst, str FROM orc_create_complex; +SELECT lst, str FROM orc_create_complex_n0; EXPLAIN VECTORIZATION DETAIL -SELECT mp, str FROM orc_create_complex; +SELECT mp, str FROM orc_create_complex_n0; -SELECT mp, str FROM orc_create_complex; +SELECT mp, str FROM orc_create_complex_n0; EXPLAIN VECTORIZATION DETAIL -SELECT strct, str FROM orc_create_complex; +SELECT strct, str FROM orc_create_complex_n0; -SELECT strct, str FROM orc_create_complex; +SELECT strct, str FROM orc_create_complex_n0; EXPLAIN VECTORIZATION DETAIL -SELECT strct.B, str FROM orc_create_complex; +SELECT strct.B, str FROM orc_create_complex_n0; -SELECT strct.B, str FROM orc_create_complex; +SELECT strct.B, str FROM orc_create_complex_n0; set hive.llap.io.enabled=false; EXPLAIN VECTORIZATION DETAIL -INSERT INTO TABLE orc_create_complex -SELECT orc_create_staging.*, src1.key FROM orc_create_staging cross join src src1 cross join orc_create_staging spam1 cross join orc_create_staging spam2; +INSERT INTO TABLE orc_create_complex_n0 +SELECT orc_create_staging_n0.*, src1.key FROM orc_create_staging_n0 cross join src src1 cross join orc_create_staging_n0 spam1 cross join orc_create_staging_n0 spam2; -INSERT INTO TABLE orc_create_complex -SELECT orc_create_staging.*, src1.key FROM orc_create_staging cross join src src1 cross join orc_create_staging spam1 cross join orc_create_staging spam2; +INSERT INTO TABLE orc_create_complex_n0 +SELECT orc_create_staging_n0.*, src1.key FROM orc_create_staging_n0 cross join src src1 cross join orc_create_staging_n0 spam1 cross join orc_create_staging_n0 spam2; EXPLAIN VECTORIZATION DETAIL -select count(*) from orc_create_complex; +select count(*) from orc_create_complex_n0; -select count(*) from orc_create_complex; +select count(*) from orc_create_complex_n0; set hive.llap.io.enabled=true; EXPLAIN VECTORIZATION DETAIL -SELECT distinct lst, strct FROM orc_create_complex; +SELECT distinct lst, strct FROM orc_create_complex_n0; -SELECT distinct lst, strct FROM orc_create_complex; +SELECT distinct lst, strct FROM orc_create_complex_n0; EXPLAIN VECTORIZATION DETAIL -SELECT str, count(val) FROM orc_create_complex GROUP BY str; +SELECT str, count(val) FROM orc_create_complex_n0 GROUP BY str; -SELECT str, count(val) FROM orc_create_complex GROUP BY str; +SELECT str, count(val) FROM orc_create_complex_n0 GROUP BY str; EXPLAIN VECTORIZATION DETAIL -SELECT strct.B, count(val) FROM orc_create_complex GROUP BY strct.B; +SELECT strct.B, count(val) FROM orc_create_complex_n0 GROUP BY strct.B; -SELECT strct.B, count(val) FROM orc_create_complex GROUP BY strct.B; +SELECT strct.B, count(val) FROM orc_create_complex_n0 GROUP BY strct.B; EXPLAIN VECTORIZATION DETAIL -SELECT strct, mp, lst, str, count(val) FROM orc_create_complex GROUP BY strct, mp, lst, str; +SELECT strct, mp, lst, str, count(val) FROM orc_create_complex_n0 GROUP BY strct, mp, lst, str; -SELECT strct, mp, lst, str, count(val) FROM orc_create_complex GROUP BY strct, mp, lst, str; +SELECT strct, mp, lst, str, count(val) FROM orc_create_complex_n0 GROUP BY strct, mp, lst, str; diff --git a/ql/src/test/queries/clientpositive/vector_complex_join.q b/ql/src/test/queries/clientpositive/vector_complex_join.q index 9316378d91..13954c395e 100644 --- a/ql/src/test/queries/clientpositive/vector_complex_join.q +++ b/ql/src/test/queries/clientpositive/vector_complex_join.q @@ -7,30 +7,30 @@ SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=true; set hive.fetch.task.conversion=none; --- The test case is updated for HIVE-18043. +-- The test_n8 case is updated for HIVE-18043. -- -CREATE TABLE test (a INT, b MAP) STORED AS ORC; -INSERT OVERWRITE TABLE test SELECT 199408978, MAP(1, "val_1", 2, "val_2") FROM src LIMIT 1; +CREATE TABLE test_n8 (a INT, b MAP) STORED AS ORC; +INSERT OVERWRITE TABLE test_n8 SELECT 199408978, MAP(1, "val_1", 2, "val_2") FROM src LIMIT 1; explain vectorization expression -select * from alltypesorc join test where alltypesorc.cint=test.a; +select * from alltypesorc join test_n8 where alltypesorc.cint=test_n8.a; -select * from alltypesorc join test where alltypesorc.cint=test.a; +select * from alltypesorc join test_n8 where alltypesorc.cint=test_n8.a; -CREATE TABLE test2a (a ARRAY, index INT) STORED AS ORC; -INSERT OVERWRITE TABLE test2a SELECT ARRAY(1, 2), 1 FROM src LIMIT 1; +CREATE TABLE test2a_n0 (a ARRAY, index INT) STORED AS ORC; +INSERT OVERWRITE TABLE test2a_n0 SELECT ARRAY(1, 2), 1 FROM src LIMIT 1; -CREATE TABLE test2b (a INT) STORED AS ORC; -INSERT OVERWRITE TABLE test2b VALUES (2), (3), (4); +CREATE TABLE test2b_n0 (a INT) STORED AS ORC; +INSERT OVERWRITE TABLE test2b_n0 VALUES (2), (3), (4); explain vectorization expression -select * from test2b join test2a on test2b.a = test2a.a[1]; +select * from test2b_n0 join test2a_n0 on test2b_n0.a = test2a_n0.a[1]; -select * from test2b join test2a on test2b.a = test2a.a[1]; +select * from test2b_n0 join test2a_n0 on test2b_n0.a = test2a_n0.a[1]; explain vectorization expression -select * from test2b join test2a on test2b.a = test2a.a[test2a.index]; +select * from test2b_n0 join test2a_n0 on test2b_n0.a = test2a_n0.a[test2a_n0.index]; -select * from test2b join test2a on test2b.a = test2a.a[test2a.index]; \ No newline at end of file +select * from test2b_n0 join test2a_n0 on test2b_n0.a = test2a_n0.a[test2a_n0.index]; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_create_struct_table.q b/ql/src/test/queries/clientpositive/vector_create_struct_table.q index db26cb23a2..0c6cac7e55 100644 --- a/ql/src/test/queries/clientpositive/vector_create_struct_table.q +++ b/ql/src/test/queries/clientpositive/vector_create_struct_table.q @@ -3,43 +3,43 @@ set hive.fetch.task.conversion=none; -- The kv1 input file has 2 data fields, so when the 3 field struct is deserialized, -- the premature end will put a NULL in field #3. -create table string_fields(strct struct) +create table string_fields_n0(strct struct) row format delimited fields terminated by '\t' collection items terminated by '\001'; load data local inpath '../../data/files/kv1.txt' -overwrite into table string_fields; +overwrite into table string_fields_n0; EXPLAIN VECTORIZATION EXPRESSION -SELECT strct, strct.a, strct.b, strct.c FROM string_fields LIMIT 10; +SELECT strct, strct.a, strct.b, strct.c FROM string_fields_n0 LIMIT 10; -SELECT strct, strct.a, strct.b, strct.c FROM string_fields LIMIT 10; +SELECT strct, strct.a, strct.b, strct.c FROM string_fields_n0 LIMIT 10; -create table char_fields(strct struct) +create table char_fields_n0(strct struct) row format delimited fields terminated by '\t' collection items terminated by '\001'; load data local inpath '../../data/files/kv1.txt' -overwrite into table char_fields; +overwrite into table char_fields_n0; EXPLAIN VECTORIZATION EXPRESSION -SELECT strct, strct.a, strct.b, strct.c FROM char_fields LIMIT 10; +SELECT strct, strct.a, strct.b, strct.c FROM char_fields_n0 LIMIT 10; -SELECT strct, strct.a, strct.b, strct.c FROM char_fields LIMIT 10; +SELECT strct, strct.a, strct.b, strct.c FROM char_fields_n0 LIMIT 10; -create table varchar_fields(strct struct) +create table varchar_fields_n0(strct struct) row format delimited fields terminated by '\t' collection items terminated by '\001'; load data local inpath '../../data/files/kv1.txt' -overwrite into table varchar_fields; +overwrite into table varchar_fields_n0; EXPLAIN VECTORIZATION EXPRESSION -SELECT strct, strct.a, strct.b, strct.c FROM varchar_fields LIMIT 10; +SELECT strct, strct.a, strct.b, strct.c FROM varchar_fields_n0 LIMIT 10; -SELECT strct, strct.a, strct.b, strct.c FROM varchar_fields LIMIT 10; +SELECT strct, strct.a, strct.b, strct.c FROM varchar_fields_n0 LIMIT 10; diff --git a/ql/src/test/queries/clientpositive/vector_data_types.q b/ql/src/test/queries/clientpositive/vector_data_types.q index 41146d76b2..332da13a48 100644 --- a/ql/src/test/queries/clientpositive/vector_data_types.q +++ b/ql/src/test/queries/clientpositive/vector_data_types.q @@ -2,11 +2,11 @@ set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.fetch.task.conversion=none; -DROP TABLE over1k; -DROP TABLE over1korc; +DROP TABLE over1k_n8; +DROP TABLE over1korc_n1; -- data setup -CREATE TABLE over1k(t tinyint, +CREATE TABLE over1k_n8(t tinyint, si smallint, i int, b bigint, @@ -20,9 +20,9 @@ CREATE TABLE over1k(t tinyint, ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k; +LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k_n8; -CREATE TABLE over1korc(t tinyint, +CREATE TABLE over1korc_n1(t tinyint, si smallint, i int, b bigint, @@ -35,29 +35,29 @@ CREATE TABLE over1korc(t tinyint, bin binary) STORED AS ORC; -INSERT INTO TABLE over1korc SELECT * FROM over1k; +INSERT INTO TABLE over1korc_n1 SELECT * FROM over1k_n8; -- Add a single NULL row that will come from ORC as isRepeated. -insert into over1korc values (NULL, NULL,NULL, NULL,NULL, NULL,NULL, NULL,NULL, NULL,NULL); +insert into over1korc_n1 values (NULL, NULL,NULL, NULL,NULL, NULL,NULL, NULL,NULL, NULL,NULL); SET hive.vectorized.execution.enabled=false; -EXPLAIN SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i LIMIT 20; +EXPLAIN SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i LIMIT 20; -SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i LIMIT 20; +SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i LIMIT 20; SELECT SUM(HASH(*)) -FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i) as q; +FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i) as q; SET hive.vectorized.execution.enabled=true; -EXPLAIN VECTORIZATION EXPRESSION select t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i LIMIT 20; +EXPLAIN VECTORIZATION EXPRESSION select t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i LIMIT 20; -SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i LIMIT 20; +SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i LIMIT 20; EXPLAIN VECTORIZATION EXPRESSION SELECT SUM(HASH(*)) -FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i) as q; +FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i) as q; SELECT SUM(HASH(*)) -FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i) as q; +FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i) as q; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_3.q b/ql/src/test/queries/clientpositive/vector_decimal_3.q index c23a65295a..551e8c931a 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_3.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_3.q @@ -2,35 +2,35 @@ set hive.mapred.mode=nonstrict; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -DROP TABLE IF EXISTS DECIMAL_3_txt; -DROP TABLE IF EXISTS DECIMAL_3; +DROP TABLE IF EXISTS DECIMAL_3_txt_n0; +DROP TABLE IF EXISTS DECIMAL_3_n1; -CREATE TABLE DECIMAL_3_txt(key decimal(38,18), value int) +CREATE TABLE DECIMAL_3_txt_n0(key decimal(38,18), value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3_txt; +LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3_txt_n0; -CREATE TABLE DECIMAL_3 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt; +CREATE TABLE DECIMAL_3_n1 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt_n0; -SELECT * FROM DECIMAL_3 ORDER BY key, value; +SELECT * FROM DECIMAL_3_n1 ORDER BY key, value; -SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC; +SELECT * FROM DECIMAL_3_n1 ORDER BY key DESC, value DESC; -SELECT * FROM DECIMAL_3 ORDER BY key, value; +SELECT * FROM DECIMAL_3_n1 ORDER BY key, value; -SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key; +SELECT DISTINCT key FROM DECIMAL_3_n1 ORDER BY key; -SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key; +SELECT key, sum(value) FROM DECIMAL_3_n1 GROUP BY key ORDER BY key; -SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value; +SELECT value, sum(key) FROM DECIMAL_3_n1 GROUP BY value ORDER BY value; -SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value; +SELECT * FROM DECIMAL_3_n1 a JOIN DECIMAL_3_n1 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value; -SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value; +SELECT * FROM DECIMAL_3_n1 WHERE key=3.14 ORDER BY key, value; -SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value; +SELECT * FROM DECIMAL_3_n1 WHERE key=3.140 ORDER BY key, value; -DROP TABLE DECIMAL_3_txt; -DROP TABLE DECIMAL_3; +DROP TABLE DECIMAL_3_txt_n0; +DROP TABLE DECIMAL_3_n1; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_4.q b/ql/src/test/queries/clientpositive/vector_decimal_4.q index 0c3407418a..1beb17d3d6 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_4.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_4.q @@ -2,28 +2,28 @@ set hive.mapred.mode=nonstrict; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -DROP TABLE IF EXISTS DECIMAL_4_1; -DROP TABLE IF EXISTS DECIMAL_4_2; +DROP TABLE IF EXISTS DECIMAL_4_1_n0; +DROP TABLE IF EXISTS DECIMAL_4_2_n0; -CREATE TABLE DECIMAL_4_1(key decimal(35,25), value int) +CREATE TABLE DECIMAL_4_1_n0(key decimal(35,25), value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -CREATE TABLE DECIMAL_4_2(key decimal(35,25), value decimal(35,25)) +CREATE TABLE DECIMAL_4_2_n0(key decimal(35,25), value decimal(35,25)) STORED AS ORC; -LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_4_1; +LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_4_1_n0; -INSERT OVERWRITE TABLE DECIMAL_4_2 SELECT key, key * 3 FROM DECIMAL_4_1; +INSERT OVERWRITE TABLE DECIMAL_4_2_n0 SELECT key, key * 3 FROM DECIMAL_4_1_n0; -SELECT * FROM DECIMAL_4_1 ORDER BY key, value; +SELECT * FROM DECIMAL_4_1_n0 ORDER BY key, value; -SELECT * FROM DECIMAL_4_2 ORDER BY key, value; +SELECT * FROM DECIMAL_4_2_n0 ORDER BY key, value; -SELECT * FROM DECIMAL_4_2 ORDER BY key; +SELECT * FROM DECIMAL_4_2_n0 ORDER BY key; -SELECT * FROM DECIMAL_4_2 ORDER BY key, value; +SELECT * FROM DECIMAL_4_2_n0 ORDER BY key, value; -DROP TABLE DECIMAL_4_1; -DROP TABLE DECIMAL_4_2; +DROP TABLE DECIMAL_4_1_n0; +DROP TABLE DECIMAL_4_2_n0; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_expressions.q b/ql/src/test/queries/clientpositive/vector_decimal_expressions.q index 3e590cb9f1..8a3875254a 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_expressions.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_expressions.q @@ -6,41 +6,41 @@ set hive.stats.column.autogather=false; -- SORT_QUERY_RESULTS -CREATE TABLE decimal_test (cdouble double,cdecimal1 DECIMAL(20,10), cdecimal2 DECIMAL(23,14)) STORED AS ORC; +CREATE TABLE decimal_test_n1 (cdouble double,cdecimal1 DECIMAL(20,10), cdecimal2 DECIMAL(23,14)) STORED AS ORC; -- Add a single NULL row that will come from ORC as isRepeated. -insert into decimal_test values (NULL, NULL, NULL); +insert into decimal_test_n1 values (NULL, NULL, NULL); -INSERT INTO TABLE decimal_test SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc; +INSERT INTO TABLE decimal_test_n1 SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc; SET hive.vectorized.execution.enabled=true; EXPLAIN VECTORIZATION DETAIL -SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_n1 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14 LIMIT 10; -SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_n1 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14 LIMIT 10; SELECT SUM(HASH(*)) -FROM (SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +FROM (SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_n1 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14) q; -- DECIMAL_64 -CREATE TABLE decimal_test_small STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(10,3)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(7,2)) AS cdecimal2 FROM alltypesorc; +CREATE TABLE decimal_test_small_n0 STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(10,3)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(7,2)) AS cdecimal2 FROM alltypesorc; EXPLAIN VECTORIZATION DETAIL -SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small_n0 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14 LIMIT 10; -SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small_n0 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14 LIMIT 10; SELECT SUM(HASH(*)) -FROM (SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +FROM (SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small_n0 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14) q; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q b/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q index ed27ce9e8b..6e5b5b6540 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q @@ -8,7 +8,7 @@ SET hive.auto.convert.join.noconditionaltask.size=1000000000; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -CREATE TABLE over1k(t tinyint, +CREATE TABLE over1k_n2(t tinyint, si smallint, i int, b bigint, @@ -22,24 +22,24 @@ CREATE TABLE over1k(t tinyint, ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k; +LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k_n2; -CREATE TABLE t1(`dec` decimal(22,2), value_dec decimal(22,2)) STORED AS ORC; -INSERT INTO TABLE t1 select `dec`, cast(d as decimal(22,2)) from over1k; -CREATE TABLE t2(`dec` decimal(24,0), value_dec decimal(24,0)) STORED AS ORC; -INSERT INTO TABLE t2 select `dec`, cast(d as decimal(24,0)) from over1k; +CREATE TABLE t1_n48(`dec` decimal(22,2), value_dec decimal(22,2)) STORED AS ORC; +INSERT INTO TABLE t1_n48 select `dec`, cast(d as decimal(22,2)) from over1k_n2; +CREATE TABLE t2_n29(`dec` decimal(24,0), value_dec decimal(24,0)) STORED AS ORC; +INSERT INTO TABLE t2_n29 select `dec`, cast(d as decimal(24,0)) from over1k_n2; explain vectorization detail -select t1.`dec`, t2.`dec` from t1 join t2 on (t1.`dec`=t2.`dec`); +select t1_n48.`dec`, t2_n29.`dec` from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`); -- SORT_QUERY_RESULTS -select t1.`dec`, t2.`dec` from t1 join t2 on (t1.`dec`=t2.`dec`); +select t1_n48.`dec`, t2_n29.`dec` from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`); explain vectorization detail -select t1.`dec`, t1.value_dec, t2.`dec`, t2.value_dec from t1 join t2 on (t1.`dec`=t2.`dec`); +select t1_n48.`dec`, t1_n48.value_dec, t2_n29.`dec`, t2_n29.value_dec from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`); -select t1.`dec`, t1.value_dec, t2.`dec`, t2.value_dec from t1 join t2 on (t1.`dec`=t2.`dec`); +select t1_n48.`dec`, t1_n48.value_dec, t2_n29.`dec`, t2_n29.value_dec from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`); diff --git a/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q b/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q index 16ffdc2c27..7ee7891eeb 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q @@ -3,10 +3,10 @@ set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.fetch.task.conversion=none; -CREATE TABLE decimal_test STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc; +CREATE TABLE decimal_test_n0 STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc; -- Add a single NULL row that will come from ORC as isRepeated. -insert into decimal_test values (NULL, NULL, NULL, NULL); +insert into decimal_test_n0 values (NULL, NULL, NULL, NULL); SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; @@ -44,7 +44,7 @@ select ,Sign(cdecimal1) -- Test nesting ,cos(-sin(log(cdecimal1)) + 3.14159) -from decimal_test +from decimal_test_n0 -- limit output to a reasonably small number of rows where cbigint % 500 = 0 -- test use of a math function in the WHERE clause @@ -80,7 +80,7 @@ select ,Sign(cdecimal1) -- Test nesting ,cos(-sin(log(cdecimal1)) + 3.14159) -from decimal_test +from decimal_test_n0 -- limit output to a reasonably small number of rows where cbigint % 500 = 0 -- test use of a math function in the WHERE clause diff --git a/ql/src/test/queries/clientpositive/vector_decimal_udf.q b/ql/src/test/queries/clientpositive/vector_decimal_udf.q index 091f502661..b3288ee042 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_udf.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_udf.q @@ -6,7 +6,7 @@ set hive.fetch.task.conversion=minimal; -- SORT_QUERY_RESULTS DROP TABLE IF EXISTS DECIMAL_UDF_txt; -DROP TABLE IF EXISTS DECIMAL_UDF; +DROP TABLE IF EXISTS DECIMAL_UDF_n0; CREATE TABLE DECIMAL_UDF_txt (key decimal(20,10), value int) ROW FORMAT DELIMITED @@ -15,169 +15,169 @@ STORED AS TEXTFILE; LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF_txt; -CREATE TABLE DECIMAL_UDF (key decimal(20,10), value int) +CREATE TABLE DECIMAL_UDF_n0 (key decimal(20,10), value int) STORED AS ORC; -INSERT OVERWRITE TABLE DECIMAL_UDF SELECT * FROM DECIMAL_UDF_txt; +INSERT OVERWRITE TABLE DECIMAL_UDF_n0 SELECT * FROM DECIMAL_UDF_txt; -- Add a single NULL row that will come from ORC as isRepeated. -insert into DECIMAL_UDF values (NULL, NULL); +insert into DECIMAL_UDF_n0 values (NULL, NULL); -- addition EXPLAIN VECTORIZATION DETAIL -SELECT key + key FROM DECIMAL_UDF; -SELECT key + key FROM DECIMAL_UDF; +SELECT key + key FROM DECIMAL_UDF_n0; +SELECT key + key FROM DECIMAL_UDF_n0; EXPLAIN VECTORIZATION DETAIL -SELECT key + value FROM DECIMAL_UDF; -SELECT key + value FROM DECIMAL_UDF; +SELECT key + value FROM DECIMAL_UDF_n0; +SELECT key + value FROM DECIMAL_UDF_n0; EXPLAIN VECTORIZATION DETAIL -SELECT key + (value/2) FROM DECIMAL_UDF; -SELECT key + (value/2) FROM DECIMAL_UDF; +SELECT key + (value/2) FROM DECIMAL_UDF_n0; +SELECT key + (value/2) FROM DECIMAL_UDF_n0; EXPLAIN VECTORIZATION DETAIL -SELECT key + '1.0' FROM DECIMAL_UDF; -SELECT key + '1.0' FROM DECIMAL_UDF; +SELECT key + '1.0' FROM DECIMAL_UDF_n0; +SELECT key + '1.0' FROM DECIMAL_UDF_n0; -- substraction EXPLAIN VECTORIZATION DETAIL -SELECT key - key FROM DECIMAL_UDF; -SELECT key - key FROM DECIMAL_UDF; +SELECT key - key FROM DECIMAL_UDF_n0; +SELECT key - key FROM DECIMAL_UDF_n0; EXPLAIN VECTORIZATION DETAIL -SELECT key - value FROM DECIMAL_UDF; -SELECT key - value FROM DECIMAL_UDF; +SELECT key - value FROM DECIMAL_UDF_n0; +SELECT key - value FROM DECIMAL_UDF_n0; EXPLAIN VECTORIZATION DETAIL -SELECT key - (value/2) FROM DECIMAL_UDF; -SELECT key - (value/2) FROM DECIMAL_UDF; +SELECT key - (value/2) FROM DECIMAL_UDF_n0; +SELECT key - (value/2) FROM DECIMAL_UDF_n0; EXPLAIN VECTORIZATION DETAIL -SELECT key - '1.0' FROM DECIMAL_UDF; -SELECT key - '1.0' FROM DECIMAL_UDF; +SELECT key - '1.0' FROM DECIMAL_UDF_n0; +SELECT key - '1.0' FROM DECIMAL_UDF_n0; -- multiplication EXPLAIN VECTORIZATION DETAIL -SELECT key * key FROM DECIMAL_UDF; -SELECT key * key FROM DECIMAL_UDF; +SELECT key * key FROM DECIMAL_UDF_n0; +SELECT key * key FROM DECIMAL_UDF_n0; EXPLAIN VECTORIZATION DETAIL -SELECT key, value FROM DECIMAL_UDF where key * value > 0; -SELECT key, value FROM DECIMAL_UDF where key * value > 0; +SELECT key, value FROM DECIMAL_UDF_n0 where key * value > 0; +SELECT key, value FROM DECIMAL_UDF_n0 where key * value > 0; EXPLAIN VECTORIZATION DETAIL -SELECT key * value FROM DECIMAL_UDF; -SELECT key * value FROM DECIMAL_UDF; +SELECT key * value FROM DECIMAL_UDF_n0; +SELECT key * value FROM DECIMAL_UDF_n0; EXPLAIN VECTORIZATION DETAIL -SELECT key * (value/2) FROM DECIMAL_UDF; -SELECT key * (value/2) FROM DECIMAL_UDF; +SELECT key * (value/2) FROM DECIMAL_UDF_n0; +SELECT key * (value/2) FROM DECIMAL_UDF_n0; EXPLAIN VECTORIZATION DETAIL -SELECT key * '2.0' FROM DECIMAL_UDF; -SELECT key * '2.0' FROM DECIMAL_UDF; +SELECT key * '2.0' FROM DECIMAL_UDF_n0; +SELECT key * '2.0' FROM DECIMAL_UDF_n0; -- division EXPLAIN VECTORIZATION DETAIL -SELECT key / 0 FROM DECIMAL_UDF; -SELECT key / 0 FROM DECIMAL_UDF; +SELECT key / 0 FROM DECIMAL_UDF_n0; +SELECT key / 0 FROM DECIMAL_UDF_n0; -- Output not stable. -- EXPLAIN VECTORIZATION DETAIL --- SELECT key / NULL FROM DECIMAL_UDF; --- SELECT key / NULL FROM DECIMAL_UDF; +-- SELECT key / NULL FROM DECIMAL_UDF_n0; +-- SELECT key / NULL FROM DECIMAL_UDF_n0; EXPLAIN VECTORIZATION DETAIL -SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0; -SELECT key / key FROM DECIMAL_UDF WHERE key is not null and key <> 0; +SELECT key / key FROM DECIMAL_UDF_n0 WHERE key is not null and key <> 0; +SELECT key / key FROM DECIMAL_UDF_n0 WHERE key is not null and key <> 0; EXPLAIN VECTORIZATION DETAIL -SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0; -SELECT key / value FROM DECIMAL_UDF WHERE value is not null and value <> 0; +SELECT key / value FROM DECIMAL_UDF_n0 WHERE value is not null and value <> 0; +SELECT key / value FROM DECIMAL_UDF_n0 WHERE value is not null and value <> 0; EXPLAIN VECTORIZATION DETAIL -SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0; -SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0; +SELECT key / (value/2) FROM DECIMAL_UDF_n0 WHERE value is not null and value <> 0; +SELECT key / (value/2) FROM DECIMAL_UDF_n0 WHERE value is not null and value <> 0; EXPLAIN VECTORIZATION DETAIL -SELECT 1 + (key / '2.0') FROM DECIMAL_UDF; -SELECT 1 + (key / '2.0') FROM DECIMAL_UDF; +SELECT 1 + (key / '2.0') FROM DECIMAL_UDF_n0; +SELECT 1 + (key / '2.0') FROM DECIMAL_UDF_n0; -- abs EXPLAIN VECTORIZATION DETAIL -SELECT abs(key) FROM DECIMAL_UDF; -SELECT abs(key) FROM DECIMAL_UDF; +SELECT abs(key) FROM DECIMAL_UDF_n0; +SELECT abs(key) FROM DECIMAL_UDF_n0; -- avg EXPLAIN VECTORIZATION DETAIL -SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value; -SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value; +SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF_n0 GROUP BY value ORDER BY value; +SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF_n0 GROUP BY value ORDER BY value; -- negative EXPLAIN VECTORIZATION DETAIL -SELECT -key FROM DECIMAL_UDF; -SELECT -key FROM DECIMAL_UDF; +SELECT -key FROM DECIMAL_UDF_n0; +SELECT -key FROM DECIMAL_UDF_n0; -- positive EXPLAIN VECTORIZATION DETAIL -SELECT +key FROM DECIMAL_UDF; -SELECT +key FROM DECIMAL_UDF; +SELECT +key FROM DECIMAL_UDF_n0; +SELECT +key FROM DECIMAL_UDF_n0; -- ceiling -EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF; -SELECT CEIL(key) FROM DECIMAL_UDF; +EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF_n0; +SELECT CEIL(key) FROM DECIMAL_UDF_n0; -- floor EXPLAIN VECTORIZATION DETAIL -SELECT FLOOR(key) FROM DECIMAL_UDF; -SELECT FLOOR(key) FROM DECIMAL_UDF; +SELECT FLOOR(key) FROM DECIMAL_UDF_n0; +SELECT FLOOR(key) FROM DECIMAL_UDF_n0; -- round EXPLAIN VECTORIZATION DETAIL -SELECT ROUND(key, 2) FROM DECIMAL_UDF; -SELECT ROUND(key, 2) FROM DECIMAL_UDF; +SELECT ROUND(key, 2) FROM DECIMAL_UDF_n0; +SELECT ROUND(key, 2) FROM DECIMAL_UDF_n0; -- power EXPLAIN VECTORIZATION DETAIL -SELECT POWER(key, 2) FROM DECIMAL_UDF; -SELECT POWER(key, 2) FROM DECIMAL_UDF; +SELECT POWER(key, 2) FROM DECIMAL_UDF_n0; +SELECT POWER(key, 2) FROM DECIMAL_UDF_n0; -- modulo EXPLAIN VECTORIZATION DETAIL -SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF; -SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF; +SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF_n0; +SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF_n0; -- stddev, var EXPLAIN VECTORIZATION DETAIL -SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value; -SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value; +SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF_n0 GROUP BY value; +SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF_n0 GROUP BY value; -- stddev_samp, var_samp EXPLAIN VECTORIZATION DETAIL -SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value; -SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value; +SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF_n0 GROUP BY value; +SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF_n0 GROUP BY value; -- histogram EXPLAIN VECTORIZATION DETAIL -SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF; -SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF; +SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF_n0; +SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF_n0; -- min EXPLAIN VECTORIZATION DETAIL -SELECT MIN(key) FROM DECIMAL_UDF; -SELECT MIN(key) FROM DECIMAL_UDF; +SELECT MIN(key) FROM DECIMAL_UDF_n0; +SELECT MIN(key) FROM DECIMAL_UDF_n0; -- max EXPLAIN VECTORIZATION DETAIL -SELECT MAX(key) FROM DECIMAL_UDF; -SELECT MAX(key) FROM DECIMAL_UDF; +SELECT MAX(key) FROM DECIMAL_UDF_n0; +SELECT MAX(key) FROM DECIMAL_UDF_n0; -- count EXPLAIN VECTORIZATION DETAIL -SELECT COUNT(key) FROM DECIMAL_UDF; -SELECT COUNT(key) FROM DECIMAL_UDF; +SELECT COUNT(key) FROM DECIMAL_UDF_n0; +SELECT COUNT(key) FROM DECIMAL_UDF_n0; -- DECIMAL_64 @@ -347,5 +347,5 @@ SELECT COUNT(key) FROM DECIMAL_UDF_txt_small; SELECT COUNT(key) FROM DECIMAL_UDF_txt_small; DROP TABLE IF EXISTS DECIMAL_UDF_txt; -DROP TABLE IF EXISTS DECIMAL_UDF; +DROP TABLE IF EXISTS DECIMAL_UDF_n0; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_udf2.q b/ql/src/test/queries/clientpositive/vector_decimal_udf2.q index e10b7d171e..d88217b51f 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_udf2.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_udf2.q @@ -4,7 +4,7 @@ SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; DROP TABLE IF EXISTS DECIMAL_UDF2_txt; -DROP TABLE IF EXISTS DECIMAL_UDF2; +DROP TABLE IF EXISTS DECIMAL_UDF2_n0; CREATE TABLE DECIMAL_UDF2_txt (key decimal(14,5), value int) ROW FORMAT DELIMITED @@ -13,44 +13,44 @@ STORED AS TEXTFILE; LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF2_txt; -CREATE TABLE DECIMAL_UDF2 (key decimal(14,5), value int) +CREATE TABLE DECIMAL_UDF2_n0 (key decimal(14,5), value int) STORED AS ORC; -INSERT OVERWRITE TABLE DECIMAL_UDF2 SELECT * FROM DECIMAL_UDF2_txt; +INSERT OVERWRITE TABLE DECIMAL_UDF2_n0 SELECT * FROM DECIMAL_UDF2_txt; -- Add a single NULL row that will come from ORC as isRepeated. -insert into DECIMAL_UDF2 values (NULL, NULL); +insert into DECIMAL_UDF2_n0 values (NULL, NULL); EXPLAIN VECTORIZATION DETAIL SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) -FROM DECIMAL_UDF2 WHERE key = 10; +FROM DECIMAL_UDF2_n0 WHERE key = 10; SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) -FROM DECIMAL_UDF2 WHERE key = 10; +FROM DECIMAL_UDF2_n0 WHERE key = 10; SELECT SUM(HASH(*)) FROM (SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) -FROM DECIMAL_UDF2) q; +FROM DECIMAL_UDF2_n0) q; EXPLAIN VECTORIZATION DETAIL SELECT exp(key), ln(key), log(key), log(key, key), log(key, value), log(value, key), log10(key), sqrt(key) -FROM DECIMAL_UDF2 WHERE key = 10; +FROM DECIMAL_UDF2_n0 WHERE key = 10; SELECT exp(key), ln(key), log(key), log(key, key), log(key, value), log(value, key), log10(key), sqrt(key) -FROM DECIMAL_UDF2 WHERE key = 10; +FROM DECIMAL_UDF2_n0 WHERE key = 10; SELECT SUM(HASH(*)) FROM (SELECT exp(key), ln(key), log(key), log(key, key), log(key, value), log(value, key), log10(key), sqrt(key) -FROM DECIMAL_UDF2) q; +FROM DECIMAL_UDF2_n0) q; -- DECIMAL_64 @@ -86,4 +86,4 @@ FROM (SELECT FROM DECIMAL_UDF2_txt) q; DROP TABLE IF EXISTS DECIMAL_UDF2_txt; -DROP TABLE IF EXISTS DECIMAL_UDF2; +DROP TABLE IF EXISTS DECIMAL_UDF2_n0; diff --git a/ql/src/test/queries/clientpositive/vector_distinct_2.q b/ql/src/test/queries/clientpositive/vector_distinct_2.q index 4be23c1132..5b8fcedf79 100644 --- a/ql/src/test/queries/clientpositive/vector_distinct_2.q +++ b/ql/src/test/queries/clientpositive/vector_distinct_2.q @@ -5,7 +5,7 @@ set hive.fetch.task.conversion=none; -- SORT_QUERY_RESULTS -create table vectortab2k( +create table vectortab2k_n3( t tinyint, si smallint, i int, @@ -22,9 +22,9 @@ create table vectortab2k( ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n3; -create table vectortab2korc( +create table vectortab2korc_n3( t tinyint, si smallint, i int, @@ -40,9 +40,9 @@ create table vectortab2korc( dt date) STORED AS ORC; -INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; +INSERT INTO TABLE vectortab2korc_n3 SELECT * FROM vectortab2k_n3; explain vectorization expression -select distinct s, t from vectortab2korc; +select distinct s, t from vectortab2korc_n3; -select distinct s, t from vectortab2korc; +select distinct s, t from vectortab2korc_n3; diff --git a/ql/src/test/queries/clientpositive/vector_groupby4.q b/ql/src/test/queries/clientpositive/vector_groupby4.q index 373509cf2b..66cefb8ed6 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby4.q +++ b/ql/src/test/queries/clientpositive/vector_groupby4.q @@ -7,18 +7,18 @@ set hive.groupby.skewindata=true; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -CREATE TABLE srcorc STORED AS ORC AS SELECT * FROM src; +CREATE TABLE srcorc_n1 STORED AS ORC AS SELECT * FROM src; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(c1 STRING) STORED AS ORC; +CREATE TABLE dest1_n154(c1 STRING) STORED AS ORC; EXPLAIN VECTORIZATION EXPRESSION -FROM srcorc -INSERT OVERWRITE TABLE dest1 SELECT substr(srcorc.key,1,1) GROUP BY substr(srcorc.key,1,1); +FROM srcorc_n1 +INSERT OVERWRITE TABLE dest1_n154 SELECT substr(srcorc_n1.key,1,1) GROUP BY substr(srcorc_n1.key,1,1); -FROM srcorc -INSERT OVERWRITE TABLE dest1 SELECT substr(srcorc.key,1,1) GROUP BY substr(srcorc.key,1,1); +FROM srcorc_n1 +INSERT OVERWRITE TABLE dest1_n154 SELECT substr(srcorc_n1.key,1,1) GROUP BY substr(srcorc_n1.key,1,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n154.* FROM dest1_n154; diff --git a/ql/src/test/queries/clientpositive/vector_groupby6.q b/ql/src/test/queries/clientpositive/vector_groupby6.q index 2085c19639..93704ba11a 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby6.q +++ b/ql/src/test/queries/clientpositive/vector_groupby6.q @@ -7,19 +7,19 @@ set hive.groupby.skewindata=true; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -CREATE TABLE srcorc STORED AS ORC AS SELECT * FROM src; +CREATE TABLE srcorc_n0 STORED AS ORC AS SELECT * FROM src; -- SORT_QUERY_RESULTS -CREATE TABLE dest1(c1 STRING) STORED AS ORC; +CREATE TABLE dest1_n73(c1 STRING) STORED AS ORC; EXPLAIN VECTORIZATION EXPRESSION -FROM srcorc -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(srcorc.value,5,1); +FROM srcorc_n0 +INSERT OVERWRITE TABLE dest1_n73 SELECT DISTINCT substr(srcorc_n0.value,5,1); -FROM srcorc -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(srcorc.value,5,1); +FROM srcorc_n0 +INSERT OVERWRITE TABLE dest1_n73 SELECT DISTINCT substr(srcorc_n0.value,5,1); -SELECT dest1.* FROM dest1; +SELECT dest1_n73.* FROM dest1_n73; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_3.q b/ql/src/test/queries/clientpositive/vector_groupby_3.q index 299ee92706..ef00147839 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_3.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_3.q @@ -5,7 +5,7 @@ set hive.fetch.task.conversion=none; -- SORT_QUERY_RESULTS -create table vectortab2k( +create table vectortab2k_n9( t tinyint, si smallint, i int, @@ -22,9 +22,9 @@ create table vectortab2k( ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n9; -create table vectortab2korc( +create table vectortab2korc_n8( t tinyint, si smallint, i int, @@ -40,9 +40,9 @@ create table vectortab2korc( dt date) STORED AS ORC; -INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; +INSERT INTO TABLE vectortab2korc_n8 SELECT * FROM vectortab2k_n9; explain vectorization expression -select s, t, max(b) from vectortab2korc group by s, t; +select s, t, max(b) from vectortab2korc_n8 group by s, t; -select s, t, max(b) from vectortab2korc group by s, t; +select s, t, max(b) from vectortab2korc_n8 group by s, t; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_cube1.q b/ql/src/test/queries/clientpositive/vector_groupby_cube1.q index d6bab2cc43..2e9996e480 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_cube1.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_cube1.q @@ -7,52 +7,52 @@ set hive.fetch.task.conversion=none; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T1_n90(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n90; EXPLAIN VECTORIZATION DETAIL -SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube; +SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube; EXPLAIN VECTORIZATION DETAIL -SELECT key, val, count(1) FROM T1 GROUP BY CUBE(key, val); +SELECT key, val, count(1) FROM T1_n90 GROUP BY CUBE(key, val); -SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube; +SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube; EXPLAIN VECTORIZATION DETAIL -SELECT key, val, GROUPING__ID, count(1) FROM T1 GROUP BY key, val with cube; +SELECT key, val, GROUPING__ID, count(1) FROM T1_n90 GROUP BY key, val with cube; -SELECT key, val, GROUPING__ID, count(1) FROM T1 GROUP BY key, val with cube; +SELECT key, val, GROUPING__ID, count(1) FROM T1_n90 GROUP BY key, val with cube; EXPLAIN VECTORIZATION DETAIL -SELECT key, count(distinct val) FROM T1 GROUP BY key with cube; +SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube; -SELECT key, count(distinct val) FROM T1 GROUP BY key with cube; +SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube; set hive.groupby.skewindata=true; EXPLAIN VECTORIZATION DETAIL -SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube; +SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube; -SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube; +SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube; EXPLAIN VECTORIZATION DETAIL -SELECT key, count(distinct val) FROM T1 GROUP BY key with cube; +SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube; -SELECT key, count(distinct val) FROM T1 GROUP BY key with cube; +SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube; set hive.multigroupby.singlereducer=true; -CREATE TABLE T2(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE; -CREATE TABLE T3(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE; +CREATE TABLE T2_n55(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE; +CREATE TABLE T3_n19(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE; EXPLAIN VECTORIZATION DETAIL -FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with cube -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by key, val with cube; +FROM T1_n90 +INSERT OVERWRITE TABLE T2_n55 SELECT key, val, count(1) group by key, val with cube +INSERT OVERWRITE TABLE T3_n19 SELECT key, val, sum(1) group by key, val with cube; -FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with cube -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by key, val with cube; +FROM T1_n90 +INSERT OVERWRITE TABLE T2_n55 SELECT key, val, count(1) group by key, val with cube +INSERT OVERWRITE TABLE T3_n19 SELECT key, val, sum(1) group by key, val with cube; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_grouping_id1.q b/ql/src/test/queries/clientpositive/vector_groupby_grouping_id1.q index 4b34891382..f9b19eacfa 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_grouping_id1.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_grouping_id1.q @@ -8,37 +8,37 @@ CREATE TABLE T1_text(key STRING, val STRING) STORED AS TEXTFILE; LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_text; -CREATE TABLE T1 STORED AS ORC AS SELECT * FROM T1_text; +CREATE TABLE T1_n17 STORED AS ORC AS SELECT * FROM T1_text; -- SORT_QUERY_RESULTS EXPLAIN VECTORIZATION DETAIL -SELECT key, val, GROUPING__ID from T1 group by key, val with cube; +SELECT key, val, GROUPING__ID from T1_n17 group by key, val with cube; -SELECT key, val, GROUPING__ID from T1 group by key, val with cube; +SELECT key, val, GROUPING__ID from T1_n17 group by key, val with cube; EXPLAIN VECTORIZATION DETAIL -SELECT key, val, GROUPING__ID from T1 group by cube(key, val); +SELECT key, val, GROUPING__ID from T1_n17 group by cube(key, val); -SELECT key, val, GROUPING__ID from T1 group by cube(key, val); +SELECT key, val, GROUPING__ID from T1_n17 group by cube(key, val); EXPLAIN VECTORIZATION DETAIL -SELECT GROUPING__ID, key, val from T1 group by key, val with rollup; +SELECT GROUPING__ID, key, val from T1_n17 group by key, val with rollup; -SELECT GROUPING__ID, key, val from T1 group by key, val with rollup; +SELECT GROUPING__ID, key, val from T1_n17 group by key, val with rollup; EXPLAIN VECTORIZATION DETAIL -SELECT GROUPING__ID, key, val from T1 group by rollup (key, val); +SELECT GROUPING__ID, key, val from T1_n17 group by rollup (key, val); -SELECT GROUPING__ID, key, val from T1 group by rollup (key, val); +SELECT GROUPING__ID, key, val from T1_n17 group by rollup (key, val); EXPLAIN VECTORIZATION DETAIL -SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1 group by key, val with cube; +SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1_n17 group by key, val with cube; -SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1 group by key, val with cube; +SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1_n17 group by key, val with cube; EXPLAIN VECTORIZATION DETAIL -SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1 group by cube(key, val); +SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1_n17 group by cube(key, val); -SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1 group by cube(key, val); +SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1_n17 group by cube(key, val); diff --git a/ql/src/test/queries/clientpositive/vector_groupby_grouping_id2.q b/ql/src/test/queries/clientpositive/vector_groupby_grouping_id2.q index fa14b4d31c..5938ed2f29 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_grouping_id2.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_grouping_id2.q @@ -4,38 +4,38 @@ SET hive.vectorized.execution.reduce.enabled=true; set hive.fetch.task.conversion=none; set hive.cli.print.header=true; -CREATE TABLE T1_text(key INT, value INT) STORED AS TEXTFILE; +CREATE TABLE T1_text_n4(key INT, value INT) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_text; +LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_text_n4; -CREATE TABLE T1 STORED AS ORC AS SELECT * FROM T1_text; +CREATE TABLE T1_n73 STORED AS ORC AS SELECT * FROM T1_text_n4; set hive.groupby.skewindata = true; -- SORT_QUERY_RESULTS EXPLAIN VECTORIZATION DETAIL -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP; +SELECT key, value, GROUPING__ID, count(*) from T1_n73 GROUP BY key, value WITH ROLLUP; -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP; +SELECT key, value, GROUPING__ID, count(*) from T1_n73 GROUP BY key, value WITH ROLLUP; EXPLAIN VECTORIZATION DETAIL -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY ROLLUP (key, value); +SELECT key, value, GROUPING__ID, count(*) from T1_n73 GROUP BY ROLLUP (key, value); -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY ROLLUP (key, value); +SELECT key, value, GROUPING__ID, count(*) from T1_n73 GROUP BY ROLLUP (key, value); EXPLAIN VECTORIZATION DETAIL SELECT GROUPING__ID, count(*) FROM ( -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP +SELECT key, value, GROUPING__ID, count(*) from T1_n73 GROUP BY key, value WITH ROLLUP ) t GROUP BY GROUPING__ID; SELECT GROUPING__ID, count(*) FROM ( -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP +SELECT key, value, GROUPING__ID, count(*) from T1_n73 GROUP BY key, value WITH ROLLUP ) t GROUP BY GROUPING__ID; @@ -43,38 +43,38 @@ EXPLAIN VECTORIZATION DETAIL SELECT GROUPING__ID, count(*) FROM ( -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY ROLLUP(key, value) +SELECT key, value, GROUPING__ID, count(*) from T1_n73 GROUP BY ROLLUP(key, value) ) t GROUP BY GROUPING__ID; SELECT GROUPING__ID, count(*) FROM ( -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY ROLLUP(key, value) +SELECT key, value, GROUPING__ID, count(*) from T1_n73 GROUP BY ROLLUP(key, value) ) t GROUP BY GROUPING__ID; EXPLAIN VECTORIZATION DETAIL -SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1 GROUP BY key,value WITH ROLLUP) t1 +SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1_n73 GROUP BY key,value WITH ROLLUP) t1 JOIN -(SELECT GROUPING__ID FROM T1 GROUP BY key, value WITH ROLLUP) t2 +(SELECT GROUPING__ID FROM T1_n73 GROUP BY key, value WITH ROLLUP) t2 ON t1.GROUPING__ID = t2.GROUPING__ID; -SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1 GROUP BY key,value WITH ROLLUP) t1 +SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1_n73 GROUP BY key,value WITH ROLLUP) t1 JOIN -(SELECT GROUPING__ID FROM T1 GROUP BY key, value WITH ROLLUP) t2 +(SELECT GROUPING__ID FROM T1_n73 GROUP BY key, value WITH ROLLUP) t2 ON t1.GROUPING__ID = t2.GROUPING__ID; EXPLAIN VECTORIZATION DETAIL -SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1 GROUP BY ROLLUP(key,value)) t1 +SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1_n73 GROUP BY ROLLUP(key,value)) t1 JOIN -(SELECT GROUPING__ID FROM T1 GROUP BY ROLLUP(key, value)) t2 +(SELECT GROUPING__ID FROM T1_n73 GROUP BY ROLLUP(key, value)) t2 ON t1.GROUPING__ID = t2.GROUPING__ID; -SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1 GROUP BY ROLLUP(key,value)) t1 +SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1_n73 GROUP BY ROLLUP(key,value)) t1 JOIN -(SELECT GROUPING__ID FROM T1 GROUP BY ROLLUP(key, value)) t2 +(SELECT GROUPING__ID FROM T1_n73 GROUP BY ROLLUP(key, value)) t2 ON t1.GROUPING__ID = t2.GROUPING__ID; @@ -84,34 +84,34 @@ ON t1.GROUPING__ID = t2.GROUPING__ID; set hive.groupby.skewindata = false; EXPLAIN VECTORIZATION DETAIL -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP; +SELECT key, value, GROUPING__ID, count(*) from T1_n73 GROUP BY key, value WITH ROLLUP; -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP; +SELECT key, value, GROUPING__ID, count(*) from T1_n73 GROUP BY key, value WITH ROLLUP; EXPLAIN VECTORIZATION DETAIL SELECT GROUPING__ID, count(*) FROM ( -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP +SELECT key, value, GROUPING__ID, count(*) from T1_n73 GROUP BY key, value WITH ROLLUP ) t GROUP BY GROUPING__ID; SELECT GROUPING__ID, count(*) FROM ( -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP +SELECT key, value, GROUPING__ID, count(*) from T1_n73 GROUP BY key, value WITH ROLLUP ) t GROUP BY GROUPING__ID; EXPLAIN VECTORIZATION DETAIL -SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1 GROUP BY key,value WITH ROLLUP) t1 +SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1_n73 GROUP BY key,value WITH ROLLUP) t1 JOIN -(SELECT GROUPING__ID FROM T1 GROUP BY key, value WITH ROLLUP) t2 +(SELECT GROUPING__ID FROM T1_n73 GROUP BY key, value WITH ROLLUP) t2 ON t1.GROUPING__ID = t2.GROUPING__ID; -SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1 GROUP BY key,value WITH ROLLUP) t1 +SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1_n73 GROUP BY key,value WITH ROLLUP) t1 JOIN -(SELECT GROUPING__ID FROM T1 GROUP BY key, value WITH ROLLUP) t2 +(SELECT GROUPING__ID FROM T1_n73 GROUP BY key, value WITH ROLLUP) t2 ON t1.GROUPING__ID = t2.GROUPING__ID; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_grouping_id3.q b/ql/src/test/queries/clientpositive/vector_groupby_grouping_id3.q index 732b2d87e7..4fffba50c8 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_grouping_id3.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_grouping_id3.q @@ -4,11 +4,11 @@ SET hive.vectorized.execution.reduce.enabled=true; set hive.fetch.task.conversion=none; set hive.cli.print.header=true; -CREATE TABLE T1_text(key INT, value INT) STORED AS TEXTFILE; +CREATE TABLE T1_text_n9(key INT, value INT) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_text; +LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_text_n9; -CREATE TABLE T1 STORED AS ORC AS SELECT * FROM T1_text; +CREATE TABLE T1_n112 STORED AS ORC AS SELECT * FROM T1_text_n9; set hive.cbo.enable = false; @@ -16,12 +16,12 @@ set hive.cbo.enable = false; EXPLAIN VECTORIZATION DETAIL SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n112 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1; SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n112 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1; @@ -30,12 +30,12 @@ set hive.cbo.enable = true; EXPLAIN VECTORIZATION DETAIL SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n112 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1; SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n112 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets1.q b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets1.q index 67779b9b8e..6168270997 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets1.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets1.q @@ -6,38 +6,38 @@ set hive.cli.print.header=true; -- SORT_QUERY_RESULTS -CREATE TABLE T1_text(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_text_n0(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_text; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_text_n0; -CREATE TABLE T1 STORED AS ORC AS SELECT * FROM T1_text; +CREATE TABLE T1_n30 STORED AS ORC AS SELECT * FROM T1_text_n0; -SELECT * FROM T1; +SELECT * FROM T1_n30; EXPLAIN VECTORIZATION DETAIL -SELECT a, b, count(*) from T1 group by a, b with cube; -SELECT a, b, count(*) from T1 group by a, b with cube; +SELECT a, b, count(*) from T1_n30 group by a, b with cube; +SELECT a, b, count(*) from T1_n30 group by a, b with cube; EXPLAIN VECTORIZATION DETAIL -SELECT a, b, count(*) from T1 group by cube(a, b); -SELECT a, b, count(*) from T1 group by cube(a, b); +SELECT a, b, count(*) from T1_n30 group by cube(a, b); +SELECT a, b, count(*) from T1_n30 group by cube(a, b); EXPLAIN VECTORIZATION DETAIL -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()); -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()); +SELECT a, b, count(*) FROM T1_n30 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()); +SELECT a, b, count(*) FROM T1_n30 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()); EXPLAIN VECTORIZATION DETAIL -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)); -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)); +SELECT a, b, count(*) FROM T1_n30 GROUP BY a, b GROUPING SETS (a, (a, b)); +SELECT a, b, count(*) FROM T1_n30 GROUP BY a, b GROUPING SETS (a, (a, b)); EXPLAIN VECTORIZATION DETAIL -SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c); -SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c); +SELECT a FROM T1_n30 GROUP BY a, b, c GROUPING SETS (a, b, c); +SELECT a FROM T1_n30 GROUP BY a, b, c GROUPING SETS (a, b, c); EXPLAIN VECTORIZATION DETAIL -SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)); -SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)); +SELECT a FROM T1_n30 GROUP BY a GROUPING SETS ((a), (a)); +SELECT a FROM T1_n30 GROUP BY a GROUPING SETS ((a), (a)); EXPLAIN VECTORIZATION DETAIL -SELECT a + b, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b); -SELECT a + b, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b); +SELECT a + b, count(*) FROM T1_n30 GROUP BY a + b GROUPING SETS (a+b); +SELECT a + b, count(*) FROM T1_n30 GROUP BY a + b GROUPING SETS (a+b); diff --git a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets2.q b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets2.q index 97b5989fa8..fcbd345adb 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets2.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets2.q @@ -8,29 +8,29 @@ set hive.new.job.grouping.set.cardinality=2; -- SORT_QUERY_RESULTS -CREATE TABLE T1_text(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_text_n3(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_text; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_text_n3; -CREATE TABLE T1 STORED AS ORC AS SELECT * FROM T1_text; +CREATE TABLE T1_n69 STORED AS ORC AS SELECT * FROM T1_text_n3; -- Since 4 grouping sets would be generated for the query below, an additional MR job should be created EXPLAIN VECTORIZATION DETAIL -SELECT a, b, count(*) from T1 group by a, b with cube; +SELECT a, b, count(*) from T1_n69 group by a, b with cube; EXPLAIN VECTORIZATION DETAIL -SELECT a, b, count(*) from T1 group by cube(a, b); -SELECT a, b, count(*) from T1 group by a, b with cube; +SELECT a, b, count(*) from T1_n69 group by cube(a, b); +SELECT a, b, count(*) from T1_n69 group by a, b with cube; EXPLAIN VECTORIZATION DETAIL -SELECT a, b, sum(c) from T1 group by a, b with cube; -SELECT a, b, sum(c) from T1 group by a, b with cube; +SELECT a, b, sum(c) from T1_n69 group by a, b with cube; +SELECT a, b, sum(c) from T1_n69 group by a, b with cube; -CREATE TABLE T2(a STRING, b STRING, c int, d int) STORED AS ORC; +CREATE TABLE T2_n42(a STRING, b STRING, c int, d int) STORED AS ORC; -INSERT OVERWRITE TABLE T2 -SELECT a, b, c, c from T1; +INSERT OVERWRITE TABLE T2_n42 +SELECT a, b, c, c from T1_n69; EXPLAIN VECTORIZATION DETAIL -SELECT a, b, sum(c+d) from T2 group by a, b with cube; -SELECT a, b, sum(c+d) from T2 group by a, b with cube; +SELECT a, b, sum(c+d) from T2_n42 group by a, b with cube; +SELECT a, b, sum(c+d) from T2_n42 group by a, b with cube; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets3.q b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets3.q index a5e020f1d3..99838a5ab8 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets3.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets3.q @@ -6,16 +6,16 @@ set hive.cli.print.header=true; -- SORT_QUERY_RESULTS --- In this test, 2 files are loaded into table T1. The data contains rows with the same value of a and b, +-- In this test, 2 files are loaded into table T1_n106. The data contains rows with the same value of a and b, -- with different number of rows for a and b in each file. Since bucketizedHiveInputFormat is used, -- this tests that the aggregate function stores the partial aggregate state correctly even if an -- additional MR job is created for processing the grouping sets. -CREATE TABLE T1_text(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_text_n7(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets1.txt' INTO TABLE T1_text; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets2.txt' INTO TABLE T1_text; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets1.txt' INTO TABLE T1_text_n7; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets2.txt' INTO TABLE T1_text_n7; -CREATE TABLE T1 STORED AS ORC AS SELECT * FROM T1_text; +CREATE TABLE T1_n106 STORED AS ORC AS SELECT * FROM T1_text_n7; set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; set hive.new.job.grouping.set.cardinality = 30; @@ -24,17 +24,17 @@ set hive.new.job.grouping.set.cardinality = 30; -- (cube of a,b will lead to (a,b), (a, null), (null, b) and (null, null) and -- hive.new.job.grouping.set.cardinality is more than 4. EXPLAIN VECTORIZATION DETAIL -SELECT a, b, avg(c), count(*) from T1 group by a, b with cube; +SELECT a, b, avg(c), count(*) from T1_n106 group by a, b with cube; EXPLAIN VECTORIZATION DETAIL -SELECT a, b, avg(c), count(*) from T1 group by cube(a, b); -SELECT a, b, avg(c), count(*) from T1 group by a, b with cube; +SELECT a, b, avg(c), count(*) from T1_n106 group by cube(a, b); +SELECT a, b, avg(c), count(*) from T1_n106 group by a, b with cube; set hive.new.job.grouping.set.cardinality=2; -- The query below will execute in 2 MR jobs, since hive.new.job.grouping.set.cardinality is set to 2. -- The partial aggregation state should be maintained correctly across MR jobs. EXPLAIN VECTORIZATION DETAIL -SELECT a, b, avg(c), count(*) from T1 group by a, b with cube; -SELECT a, b, avg(c), count(*) from T1 group by a, b with cube; +SELECT a, b, avg(c), count(*) from T1_n106 group by a, b with cube; +SELECT a, b, avg(c), count(*) from T1_n106 group by a, b with cube; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets3_dec.q b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets3_dec.q index 1dff14c62d..a500b2543f 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets3_dec.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets3_dec.q @@ -6,16 +6,16 @@ set hive.cli.print.header=true; -- SORT_QUERY_RESULTS --- In this test, 2 files are loaded into table T1. The data contains rows with the same value of a and b, +-- In this test, 2 files are loaded into table T1_n115. The data contains rows with the same value of a and b, -- with different number of rows for a and b in each file. Since bucketizedHiveInputFormat is used, -- this tests that the aggregate function stores the partial aggregate state correctly even if an -- additional MR job is created for processing the grouping sets. -CREATE TABLE T1_text(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_text_n10(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets1.txt' INTO TABLE T1_text; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets2.txt' INTO TABLE T1_text; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets1.txt' INTO TABLE T1_text_n10; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets2.txt' INTO TABLE T1_text_n10; -CREATE TABLE T1 STORED AS ORC AS SELECT a, b, cast(c as decimal(10,2)) as c_dec FROM T1_text; +CREATE TABLE T1_n115 STORED AS ORC AS SELECT a, b, cast(c as decimal(10,2)) as c_dec FROM T1_text_n10; set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; set hive.new.job.grouping.set.cardinality = 30; @@ -24,17 +24,17 @@ set hive.new.job.grouping.set.cardinality = 30; -- (cube of a,b will lead to (a,b), (a, null), (null, b) and (null, null) and -- hive.new.job.grouping.set.cardinality is more than 4. EXPLAIN VECTORIZATION DETAIL -SELECT a, b, avg(c_dec), count(*) from T1 group by a, b with cube; +SELECT a, b, avg(c_dec), count(*) from T1_n115 group by a, b with cube; EXPLAIN VECTORIZATION DETAIL -SELECT a, b, avg(c_dec), count(*) from T1 group by cube(a, b); -SELECT a, b, avg(c_dec), count(*) from T1 group by a, b with cube; +SELECT a, b, avg(c_dec), count(*) from T1_n115 group by cube(a, b); +SELECT a, b, avg(c_dec), count(*) from T1_n115 group by a, b with cube; set hive.new.job.grouping.set.cardinality=2; -- The query below will execute in 2 MR jobs, since hive.new.job.grouping.set.cardinality is set to 2. -- The partial aggregation state should be maintained correctly across MR jobs. EXPLAIN VECTORIZATION DETAIL -SELECT a, b, avg(c_dec), count(*) from T1 group by a, b with cube; -SELECT a, b, avg(c_dec), count(*) from T1 group by a, b with cube; +SELECT a, b, avg(c_dec), count(*) from T1_n115 group by a, b with cube; +SELECT a, b, avg(c_dec), count(*) from T1_n115 group by a, b with cube; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets4.q b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets4.q index 8fbc956450..39da3a8a1f 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets4.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets4.q @@ -11,31 +11,31 @@ set hive.merge.mapredfiles = false; -- Set merging to false above to make the explain more readable -CREATE TABLE T1_text(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_text_n11(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_text; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_text_n11; -CREATE TABLE T1 STORED AS ORC AS SELECT * FROM T1_text; +CREATE TABLE T1_n161 STORED AS ORC AS SELECT * FROM T1_text_n11; -- This tests that cubes and rollups work fine inside sub-queries. EXPLAIN VECTORIZATION DETAIL SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n161 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n161 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a; EXPLAIN VECTORIZATION DETAIL SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by cube(a, b) ) subq1 +(SELECT a, b, count(*) from T1_n161 where a < 3 group by cube(a, b) ) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by cube(a, b) ) subq2 +(SELECT a, b, count(*) from T1_n161 where a < 3 group by cube(a, b) ) subq2 on subq1.a = subq2.a; SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n161 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n161 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a; set hive.new.job.grouping.set.cardinality=2; @@ -44,14 +44,14 @@ set hive.new.job.grouping.set.cardinality=2; -- for each of them EXPLAIN VECTORIZATION DETAIL SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n161 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n161 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a; SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n161 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n161 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets5.q b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets5.q index 8f94c17ff6..46b23099f8 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets5.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets5.q @@ -8,32 +8,32 @@ set hive.merge.mapfiles = false; set hive.merge.mapredfiles = false; -- Set merging to false above to make the explain more readable -CREATE TABLE T1_text(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_text_n1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_text; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_text_n1; -CREATE TABLE T1 STORED AS ORC AS SELECT * FROM T1_text; +CREATE TABLE T1_n33 STORED AS ORC AS SELECT * FROM T1_text_n1; -- SORT_QUERY_RESULTS -- This tests that cubes and rollups work fine where the source is a sub-query EXPLAIN VECTORIZATION DETAIL SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube; +(SELECT a, b, count(1) from T1_n33 group by a, b) subq1 group by a, b with cube; EXPLAIN VECTORIZATION DETAIL SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by cube(a, b); +(SELECT a, b, count(1) from T1_n33 group by a, b) subq1 group by cube(a, b); SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube; +(SELECT a, b, count(1) from T1_n33 group by a, b) subq1 group by a, b with cube; set hive.new.job.grouping.set.cardinality=2; -- Since 4 grouping sets would be generated for the cube, an additional MR job should be created EXPLAIN VECTORIZATION DETAIL SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube; +(SELECT a, b, count(1) from T1_n33 group by a, b) subq1 group by a, b with cube; SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube; +(SELECT a, b, count(1) from T1_n33 group by a, b) subq1 group by a, b with cube; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets6.q b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets6.q index 2997cde2d3..1cffe9cd66 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets6.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets6.q @@ -5,11 +5,11 @@ set hive.fetch.task.conversion=none; set hive.cli.print.header=true; set hive.mapred.mode=nonstrict; -CREATE TABLE T1_text(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_text_n6(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_text; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_text_n6; -CREATE TABLE T1 STORED AS ORC AS SELECT * FROM T1_text; +CREATE TABLE T1_n84 STORED AS ORC AS SELECT * FROM T1_text_n6; -- SORT_QUERY_RESULTS @@ -18,11 +18,11 @@ set hive.optimize.ppd = false; -- This filter is not pushed down EXPLAIN VECTORIZATION DETAIL SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n84 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5; SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n84 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5; set hive.cbo.enable = true; @@ -30,9 +30,9 @@ set hive.cbo.enable = true; -- This filter is pushed down through aggregate with grouping sets by Calcite EXPLAIN VECTORIZATION DETAIL SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n84 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5; SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n84 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets_grouping.q b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets_grouping.q index 5a5757dafd..04e68e4cd9 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets_grouping.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets_grouping.q @@ -4,52 +4,52 @@ SET hive.vectorized.execution.reduce.enabled=true; set hive.fetch.task.conversion=none; set hive.cli.print.header=true; -CREATE TABLE T1_text(key INT, value INT) STORED AS TEXTFILE; +CREATE TABLE T1_text_n2(key INT, value INT) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_text; +LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_text_n2; -CREATE TABLE T1 STORED AS ORC AS SELECT * FROM T1_text; +CREATE TABLE T1_n47 STORED AS ORC AS SELECT * FROM T1_text_n2; -- SORT_QUERY_RESULTS explain vectorization detail select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n47 group by rollup(key, value); select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n47 group by rollup(key, value); explain vectorization detail select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n47 group by cube(key, value); select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n47 group by cube(key, value); explain vectorization detail select key, value -from T1 +from T1_n47 group by cube(key, value) having grouping(key) = 1; select key, value -from T1 +from T1_n47 group by cube(key, value) having grouping(key) = 1; explain vectorization detail select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n47 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end; select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n47 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end; @@ -58,107 +58,107 @@ set hive.cbo.enable=false; explain vectorization detail select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n47 group by rollup(key, value); select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n47 group by rollup(key, value); explain vectorization detail select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n47 group by cube(key, value); select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n47 group by cube(key, value); explain vectorization detail select key, value -from T1 +from T1_n47 group by cube(key, value) having grouping(key) = 1; select key, value -from T1 +from T1_n47 group by cube(key, value) having grouping(key) = 1; explain vectorization detail select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n47 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end; select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n47 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end; explain vectorization detail select key, value, grouping(key), grouping(value) -from T1 +from T1_n47 group by key, value; select key, value, grouping(key), grouping(value) -from T1 +from T1_n47 group by key, value; explain vectorization detail select key, value, grouping(value) -from T1 +from T1_n47 group by key, value; select key, value, grouping(value) -from T1 +from T1_n47 group by key, value; explain vectorization detail select key, value -from T1 +from T1_n47 group by key, value having grouping(key) = 0; select key, value -from T1 +from T1_n47 group by key, value having grouping(key) = 0; explain vectorization detail select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n47 group by cube(key, value); select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n47 group by cube(key, value); explain vectorization detail select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n47 group by cube(key, value); select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n47 group by cube(key, value); explain vectorization detail select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n47 group by rollup(key, value); select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n47 group by rollup(key, value); explain vectorization detail select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n47 group by rollup(key, value); select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n47 group by rollup(key, value); diff --git a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets_limit.q b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets_limit.q index b45d9806e2..d6ce57c817 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets_limit.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_grouping_sets_limit.q @@ -5,40 +5,40 @@ set hive.fetch.task.conversion=none; set hive.cli.print.header=true; -- SORT_QUERY_RESULTS -CREATE TABLE T1_text(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; +CREATE TABLE T1_text_n8(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_text; +LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_text_n8; -CREATE TABLE T1 STORED AS ORC AS SELECT * FROM T1_text; +CREATE TABLE T1_n110 STORED AS ORC AS SELECT * FROM T1_text_n8; -- SORT_QUERY_RESULTS EXPLAIN VECTORIZATION DETAIL -SELECT a, b, count(*) from T1 group by a, b with cube order by a, b LIMIT 10; +SELECT a, b, count(*) from T1_n110 group by a, b with cube order by a, b LIMIT 10; -SELECT a, b, count(*) from T1 group by a, b with cube order by a, b LIMIT 10; +SELECT a, b, count(*) from T1_n110 group by a, b with cube order by a, b LIMIT 10; EXPLAIN VECTORIZATION DETAIL -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) order by a, b LIMIT 10; +SELECT a, b, count(*) FROM T1_n110 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) order by a, b LIMIT 10; -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) order by a, b LIMIT 10; +SELECT a, b, count(*) FROM T1_n110 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) order by a, b LIMIT 10; EXPLAIN VECTORIZATION DETAIL -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)) order by a, b LIMIT 10; +SELECT a, b, count(*) FROM T1_n110 GROUP BY a, b GROUPING SETS (a, (a, b)) order by a, b LIMIT 10; -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)) order by a, b LIMIT 10; +SELECT a, b, count(*) FROM T1_n110 GROUP BY a, b GROUPING SETS (a, (a, b)) order by a, b LIMIT 10; EXPLAIN VECTORIZATION DETAIL -SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c) order by a LIMIT 10; +SELECT a FROM T1_n110 GROUP BY a, b, c GROUPING SETS (a, b, c) order by a LIMIT 10; -SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c) order by a LIMIT 10; +SELECT a FROM T1_n110 GROUP BY a, b, c GROUPING SETS (a, b, c) order by a LIMIT 10; EXPLAIN VECTORIZATION DETAIL -SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)) order by a LIMIT 10; +SELECT a FROM T1_n110 GROUP BY a GROUPING SETS ((a), (a)) order by a LIMIT 10; -SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)) order by a LIMIT 10; +SELECT a FROM T1_n110 GROUP BY a GROUPING SETS ((a), (a)) order by a LIMIT 10; EXPLAIN VECTORIZATION DETAIL -SELECT a + b ab, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b) order by ab LIMIT 10; +SELECT a + b ab, count(*) FROM T1_n110 GROUP BY a + b GROUPING SETS (a+b) order by ab LIMIT 10; -SELECT a + b ab, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b) order by ab LIMIT 10; +SELECT a + b ab, count(*) FROM T1_n110 GROUP BY a + b GROUPING SETS (a+b) order by ab LIMIT 10; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_grouping_window.q b/ql/src/test/queries/clientpositive/vector_groupby_grouping_window.q index 95644c4621..4f9ade85b1 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_grouping_window.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_grouping_window.q @@ -1,23 +1,23 @@ ---! qt:dataset:src -set hive.explain.user=false; +--! qt_n15:dataset_n15:src +set_n15 hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.vectorized.execution.reduce.enabled=true; -set hive.vectorized.execution.ptf.enabled=true; -set hive.fetch.task.conversion=none; -set hive.cli.print.header=true; +set_n15 hive.vectorized.execution.ptf.enabled=true; +set_n15 hive.fetch.task.conversion=none; +set_n15 hive.cli.print_n15.header=true; -create table t(category int, live int, comments int) stored as orc; -insert into table t select key, 0, 2 from src tablesample(3 rows); +create table t_n15(category int_n15, live int_n15, comments int_n15) stored as orc; +insert_n15 into table t_n15 select_n15 key, 0, 2 from src tablesample(3 rows); explain vectorization detail -select category, max(live) live, max(comments) comments, rank() OVER (PARTITION BY category ORDER BY comments) rank1 -FROM t +select_n15 category, max(live) live, max(comments) comments, rank() OVER (PARTITION BY category ORDER BY comments) rank1 +FROM t_n15 GROUP BY category GROUPING SETS ((), (category)) HAVING max(comments) > 0; -select category, max(live) live, max(comments) comments, rank() OVER (PARTITION BY category ORDER BY comments) rank1 -FROM t +select_n15 category, max(live) live, max(comments) comments, rank() OVER (PARTITION BY category ORDER BY comments) rank1 +FROM t_n15 GROUP BY category GROUPING SETS ((), (category)) HAVING max(comments) > 0; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_reduce.q b/ql/src/test/queries/clientpositive/vector_groupby_reduce.q index a030c62751..6af93ec4b8 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_reduce.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_reduce.q @@ -3,7 +3,7 @@ set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -create table store_sales_txt +create table store_sales_txt_n0 ( ss_sold_date_sk int, ss_sold_time_sk int, @@ -32,9 +32,9 @@ create table store_sales_txt row format delimited fields terminated by '|' stored as textfile; -LOAD DATA LOCAL INPATH '../../data/files/store_sales.txt' OVERWRITE INTO TABLE store_sales_txt; +LOAD DATA LOCAL INPATH '../../data/files/store_sales.txt' OVERWRITE INTO TABLE store_sales_txt_n0; -create table store_sales +create table store_sales_n3 ( ss_sold_date_sk int, ss_sold_time_sk int, @@ -66,7 +66,7 @@ tblproperties ("orc.stripe.size"="33554432", "orc.compress.size"="16384"); set hive.exec.dynamic.partition.mode=nonstrict; -insert overwrite table store_sales +insert overwrite table store_sales_n3 select ss_sold_date_sk , ss_sold_time_sk , @@ -92,13 +92,13 @@ ss_sold_date_sk , ss_net_paid , ss_net_paid_inc_tax , ss_net_profit - from store_sales_txt; + from store_sales_txt_n0; explain vectorization expression select ss_ticket_number from - store_sales + store_sales_n3 group by ss_ticket_number order by ss_ticket_number limit 20; @@ -106,7 +106,7 @@ limit 20; select ss_ticket_number from - store_sales + store_sales_n3 group by ss_ticket_number order by ss_ticket_number limit 20; @@ -120,7 +120,7 @@ from (select ss_ticket_number from - store_sales + store_sales_n3 group by ss_ticket_number) a group by ss_ticket_number order by m; @@ -131,7 +131,7 @@ from (select ss_ticket_number from - store_sales + store_sales_n3 group by ss_ticket_number) a group by ss_ticket_number order by m; @@ -145,7 +145,7 @@ from (select ss_ticket_number, ss_item_sk, min(ss_quantity) q, max(ss_net_profit) np, max(ss_wholesale_cost_decimal) decwc from - store_sales + store_sales_n3 where ss_ticket_number = 1 group by ss_ticket_number, ss_item_sk) a group by ss_ticket_number @@ -157,7 +157,7 @@ from (select ss_ticket_number, ss_item_sk, min(ss_quantity) q, max(ss_net_profit) np, max(ss_wholesale_cost_decimal) decwc from - store_sales + store_sales_n3 where ss_ticket_number = 1 group by ss_ticket_number, ss_item_sk) a group by ss_ticket_number @@ -171,7 +171,7 @@ from (select ss_ticket_number, ss_item_sk, min(ss_quantity) q, max(ss_net_profit) np, max(ss_wholesale_cost_decimal) decwc from - store_sales + store_sales_n3 group by ss_ticket_number, ss_item_sk) a group by ss_ticket_number, ss_item_sk order by ss_ticket_number, ss_item_sk; @@ -182,7 +182,7 @@ from (select ss_ticket_number, ss_item_sk, min(ss_quantity) q, max(ss_wholesale_cost) wc, max(ss_wholesale_cost_decimal) decwc from - store_sales + store_sales_n3 group by ss_ticket_number, ss_item_sk) a group by ss_ticket_number, ss_item_sk order by ss_ticket_number, ss_item_sk; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_groupby_rollup1.q b/ql/src/test/queries/clientpositive/vector_groupby_rollup1.q index 39bc2c1283..fe1ca42d2a 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_rollup1.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_rollup1.q @@ -9,47 +9,47 @@ set hive.groupby.skewindata=false; -- SORT_QUERY_RESULTS -CREATE TABLE T1_text(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE T1_text_n5(key STRING, val STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_text; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_text_n5; -CREATE TABLE T1 STORED AS ORC AS SELECT * FROM T1_text; +CREATE TABLE T1_n83 STORED AS ORC AS SELECT * FROM T1_text_n5; EXPLAIN VECTORIZATION DETAIL -SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup; +SELECT key, val, count(1) FROM T1_n83 GROUP BY key, val with rollup; -SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup; +SELECT key, val, count(1) FROM T1_n83 GROUP BY key, val with rollup; EXPLAIN VECTORIZATION DETAIL -SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup; +SELECT key, count(distinct val) FROM T1_n83 GROUP BY key with rollup; -SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup; +SELECT key, count(distinct val) FROM T1_n83 GROUP BY key with rollup; set hive.groupby.skewindata=true; EXPLAIN VECTORIZATION DETAIL -SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup; +SELECT key, val, count(1) FROM T1_n83 GROUP BY key, val with rollup; -SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup; +SELECT key, val, count(1) FROM T1_n83 GROUP BY key, val with rollup; EXPLAIN VECTORIZATION DETAIL -SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup; +SELECT key, count(distinct val) FROM T1_n83 GROUP BY key with rollup; -SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup; +SELECT key, count(distinct val) FROM T1_n83 GROUP BY key with rollup; set hive.multigroupby.singlereducer=true; -CREATE TABLE T2(key1 STRING, key2 STRING, val INT) STORED AS ORC; -CREATE TABLE T3(key1 STRING, key2 STRING, val INT) STORED AS ORC; +CREATE TABLE T2_n52(key1 STRING, key2 STRING, val INT) STORED AS ORC; +CREATE TABLE T3_n17(key1 STRING, key2 STRING, val INT) STORED AS ORC; EXPLAIN VECTORIZATION DETAIL -FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with rollup -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by rollup(key, val); +FROM T1_n83 +INSERT OVERWRITE TABLE T2_n52 SELECT key, val, count(1) group by key, val with rollup +INSERT OVERWRITE TABLE T3_n17 SELECT key, val, sum(1) group by rollup(key, val); -FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with rollup -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by key, val with rollup; +FROM T1_n83 +INSERT OVERWRITE TABLE T2_n52 SELECT key, val, count(1) group by key, val with rollup +INSERT OVERWRITE TABLE T3_n17 SELECT key, val, sum(1) group by key, val with rollup; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_sort_11.q b/ql/src/test/queries/clientpositive/vector_groupby_sort_11.q index 012fe826fc..61745d3c9f 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_sort_11.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_sort_11.q @@ -6,45 +6,45 @@ set hive.map.groupby.sorted=true; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +CREATE TABLE T1_n137(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS; -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') +INSERT OVERWRITE TABLE T1_n137 PARTITION (ds='1') SELECT * from src where key < 10; -- The plan is optimized to perform partial aggregation on the mapper EXPLAIN VECTORIZATION DETAIL -select count(distinct key) from T1; -select count(distinct key) from T1; +select count(distinct key) from T1_n137; +select count(distinct key) from T1_n137; -- The plan is optimized to perform partial aggregation on the mapper EXPLAIN VECTORIZATION DETAIL -select count(distinct key), count(1), count(key), sum(distinct key) from T1; -select count(distinct key), count(1), count(key), sum(distinct key) from T1; +select count(distinct key), count(1), count(key), sum(distinct key) from T1_n137; +select count(distinct key), count(1), count(key), sum(distinct key) from T1_n137; -- The plan is not changed in the presence of a grouping key EXPLAIN VECTORIZATION DETAIL -select count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key; -select count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key; +select count(distinct key), count(1), count(key), sum(distinct key) from T1_n137 group by key; +select count(distinct key), count(1), count(key), sum(distinct key) from T1_n137 group by key; -- The plan is not changed in the presence of a grouping key EXPLAIN VECTORIZATION DETAIL -select key, count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key; -select key, count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key; +select key, count(distinct key), count(1), count(key), sum(distinct key) from T1_n137 group by key; +select key, count(distinct key), count(1), count(key), sum(distinct key) from T1_n137 group by key; -- The plan is not changed in the presence of a grouping key expression EXPLAIN VECTORIZATION DETAIL -select count(distinct key+key) from T1; -select count(distinct key+key) from T1; +select count(distinct key+key) from T1_n137; +select count(distinct key+key) from T1_n137; EXPLAIN VECTORIZATION DETAIL -select count(distinct 1) from T1; -select count(distinct 1) from T1; +select count(distinct 1) from T1_n137; +select count(distinct 1) from T1_n137; set hive.map.aggr=false; -- no plan change if map aggr is turned off EXPLAIN VECTORIZATION DETAIL -select count(distinct key) from T1; -select count(distinct key) from T1; +select count(distinct key) from T1_n137; +select count(distinct key) from T1_n137; diff --git a/ql/src/test/queries/clientpositive/vector_groupby_sort_8.q b/ql/src/test/queries/clientpositive/vector_groupby_sort_8.q index b0c569976e..06138ceaf6 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_sort_8.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_sort_8.q @@ -6,19 +6,19 @@ set hive.map.groupby.sorted=true; -- SORT_QUERY_RESULTS -CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +CREATE TABLE T1_n2(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 PARTITION (ds='1'); +LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n2 PARTITION (ds='1'); -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1'; +INSERT OVERWRITE TABLE T1_n2 PARTITION (ds='1') select key, val from T1_n2 where ds = '1'; -- The plan is not converted to a map-side, since although the sorting columns and grouping -- columns match, the user is issueing a distinct. -- However, after HIVE-4310, partial aggregation is performed on the mapper EXPLAIN VECTORIZATION DETAIL -select count(distinct key) from T1; -select count(distinct key) from T1; +select count(distinct key) from T1_n2; +select count(distinct key) from T1_n2; -DROP TABLE T1; +DROP TABLE T1_n2; diff --git a/ql/src/test/queries/clientpositive/vector_grouping_sets.q b/ql/src/test/queries/clientpositive/vector_grouping_sets.q index 3ea273061e..09fed7c86d 100644 --- a/ql/src/test/queries/clientpositive/vector_grouping_sets.q +++ b/ql/src/test/queries/clientpositive/vector_grouping_sets.q @@ -41,29 +41,29 @@ stored as textfile; LOAD DATA LOCAL INPATH '../../data/files/store_200' OVERWRITE INTO TABLE store_txt; -create table store +create table store_n1 stored as orc as select * from store_txt; explain vectorization expression select s_store_id - from store + from store_n1 group by s_store_id with rollup; select s_store_id - from store + from store_n1 group by s_store_id with rollup; explain vectorization expression select s_store_id, GROUPING__ID - from store + from store_n1 group by s_store_id with rollup; select s_store_id, GROUPING__ID - from store + from store_n1 group by s_store_id with rollup; explain select s_store_id, GROUPING__ID - from store + from store_n1 group by rollup(s_store_id); diff --git a/ql/src/test/queries/clientpositive/vector_include_no_sel.q b/ql/src/test/queries/clientpositive/vector_include_no_sel.q index a499ae592d..e68db1fcde 100644 --- a/ql/src/test/queries/clientpositive/vector_include_no_sel.q +++ b/ql/src/test/queries/clientpositive/vector_include_no_sel.q @@ -48,7 +48,7 @@ stored as textfile; LOAD DATA LOCAL INPATH '../../data/files/store_sales.txt' OVERWRITE INTO TABLE store_sales_txt; -create table store_sales stored as orc as select * from store_sales_txt; +create table store_sales_n1 stored as orc as select * from store_sales_txt; create table customer_demographics_txt @@ -71,10 +71,10 @@ LOAD DATA LOCAL INPATH '../../data/files/customer_demographics.txt' OVERWRITE IN create table customer_demographics stored as orc as select * from customer_demographics_txt; explain vectorization expression -select count(1) from customer_demographics,store_sales -where ((customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or - (customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U')); +select count(1) from customer_demographics,store_sales_n1 +where ((customer_demographics.cd_demo_sk = store_sales_n1.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or + (customer_demographics.cd_demo_sk = store_sales_n1.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U')); -select count(1) from customer_demographics,store_sales -where ((customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or - (customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U')); +select count(1) from customer_demographics,store_sales_n1 +where ((customer_demographics.cd_demo_sk = store_sales_n1.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or + (customer_demographics.cd_demo_sk = store_sales_n1.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U')); diff --git a/ql/src/test/queries/clientpositive/vector_join30.q b/ql/src/test/queries/clientpositive/vector_join30.q index 82dda493ae..9672a475f7 100644 --- a/ql/src/test/queries/clientpositive/vector_join30.q +++ b/ql/src/test/queries/clientpositive/vector_join30.q @@ -9,154 +9,154 @@ SET hive.auto.convert.join.noconditionaltask.size=1000000000; -- SORT_QUERY_RESULTS -CREATE TABLE orcsrc STORED AS ORC AS SELECT * FROM src; +CREATE TABLE orcsrc_n0 STORED AS ORC AS SELECT * FROM src; explain vectorization expression FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) select sum(hash(Y.key,Y.value)); FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) select sum(hash(Y.key,Y.value)); explain vectorization expression FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x LEFT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) select sum(hash(Y.key,Y.value)); FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x LEFT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) select sum(hash(Y.key,Y.value)); explain vectorization expression FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x RIGHT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) select sum(hash(Y.key,Y.value)); FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x RIGHT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) select sum(hash(Y.key,Y.value)); explain vectorization expression FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Z +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Z ON (x.key = Z.key) select sum(hash(Y.key,Y.value)); FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Z +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Z ON (x.key = Z.key) select sum(hash(Y.key,Y.value)); explain vectorization expression FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) LEFT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Z +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Z ON (x.key = Z.key) select sum(hash(Y.key,Y.value)); FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) LEFT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Z +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Z ON (x.key = Z.key) select sum(hash(Y.key,Y.value)); explain vectorization expression FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x LEFT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) LEFT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Z +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Z ON (x.key = Z.key) select sum(hash(Y.key,Y.value)); FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x LEFT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) LEFT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Z +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Z ON (x.key = Z.key) select sum(hash(Y.key,Y.value)); explain vectorization expression FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x LEFT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) RIGHT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Z +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Z ON (x.key = Z.key) select sum(hash(Y.key,Y.value)); FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x LEFT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) RIGHT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Z +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Z ON (x.key = Z.key) select sum(hash(Y.key,Y.value)); explain vectorization expression FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x RIGHT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) RIGHT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Z +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Z ON (x.key = Z.key) select sum(hash(Y.key,Y.value)); FROM -(SELECT orcsrc.* FROM orcsrc sort by key) x +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by key) x RIGHT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Y +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Y ON (x.key = Y.key) RIGHT OUTER JOIN -(SELECT orcsrc.* FROM orcsrc sort by value) Z +(SELECT orcsrc_n0.* FROM orcsrc_n0 sort by value) Z ON (x.key = Z.key) select sum(hash(Y.key,Y.value)); diff --git a/ql/src/test/queries/clientpositive/vector_join_filters.q b/ql/src/test/queries/clientpositive/vector_join_filters.q index aac10c11b8..88458f89e8 100644 --- a/ql/src/test/queries/clientpositive/vector_join_filters.q +++ b/ql/src/test/queries/clientpositive/vector_join_filters.q @@ -8,32 +8,32 @@ SET hive.auto.convert.join.noconditionaltask.size=1000000000; -- SORT_QUERY_RESULTS -CREATE TABLE myinput1_txt(key int, value int); -LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1_txt; -CREATE TABLE myinput1 STORED AS ORC AS SELECT * FROM myinput1_txt; +CREATE TABLE myinput1_txt_n0(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1_txt_n0; +CREATE TABLE myinput1_n1 STORED AS ORC AS SELECT * FROM myinput1_txt_n0; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a JOIN myinput1_n1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a LEFT OUTER JOIN myinput1_n1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a RIGHT OUTER JOIN myinput1_n1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a JOIN myinput1_n1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a JOIN myinput1_n1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a JOIN myinput1_n1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a JOIN myinput1_n1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a LEFT OUTER JOIN myinput1_n1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a LEFT OUTER JOIN myinput1_n1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a LEFT OUTER JOIN myinput1_n1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a LEFT OUTER JOIN myinput1_n1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a RIGHT OUTER JOIN myinput1_n1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a RIGHT OUTER JOIN myinput1_n1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a RIGHT OUTER JOIN myinput1_n1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a RIGHT OUTER JOIN myinput1_n1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; \ No newline at end of file +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n1 a LEFT OUTER JOIN myinput1_n1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1_n1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n1 a RIGHT OUTER JOIN myinput1_n1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1_n1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a LEFT OUTER JOIN myinput1_n1 b RIGHT OUTER JOIN myinput1_n1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n1 a LEFT OUTER JOIN myinput1_n1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1_n1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n1 a RIGHT OUTER JOIN myinput1_n1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1_n1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n1 a LEFT OUTER JOIN myinput1_n1 b RIGHT OUTER JOIN myinput1_n1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_join_nulls.q b/ql/src/test/queries/clientpositive/vector_join_nulls.q index b978b41562..3e8df9a850 100644 --- a/ql/src/test/queries/clientpositive/vector_join_nulls.q +++ b/ql/src/test/queries/clientpositive/vector_join_nulls.q @@ -8,27 +8,27 @@ SET hive.auto.convert.join.noconditionaltask.size=1000000000; -- SORT_QUERY_RESULTS -CREATE TABLE myinput1_txt(key int, value int); -LOAD DATA LOCAL INPATH '../../data/files/in1.txt' INTO TABLE myinput1_txt; -CREATE TABLE myinput1 STORED AS ORC AS SELECT * FROM myinput1_txt; +CREATE TABLE myinput1_txt_n1(key int, value int); +LOAD DATA LOCAL INPATH '../../data/files/in1.txt' INTO TABLE myinput1_txt_n1; +CREATE TABLE myinput1_n4 STORED AS ORC AS SELECT * FROM myinput1_txt_n1; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a JOIN myinput1_n4 b; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a LEFT OUTER JOIN myinput1_n4 b; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a RIGHT OUTER JOIN myinput1_n4 b; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a JOIN myinput1_n4 b ON a.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a JOIN myinput1_n4 b ON a.key = b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a JOIN myinput1_n4 b ON a.value = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a JOIN myinput1_n4 b ON a.value = b.value and a.key=b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a LEFT OUTER JOIN myinput1_n4 b ON a.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a LEFT OUTER JOIN myinput1_n4 b ON a.value = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a LEFT OUTER JOIN myinput1_n4 b ON a.key = b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a LEFT OUTER JOIN myinput1_n4 b ON a.key = b.key and a.value=b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a RIGHT OUTER JOIN myinput1_n4 b ON a.key = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a RIGHT OUTER JOIN myinput1_n4 b ON a.key = b.key; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a RIGHT OUTER JOIN myinput1_n4 b ON a.value = b.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a RIGHT OUTER JOIN myinput1_n4 b ON a.key=b.key and a.value = b.value; -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value); -SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value; +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n4 a LEFT OUTER JOIN myinput1_n4 b ON (a.value=b.value) RIGHT OUTER JOIN myinput1_n4 c ON (b.value=c.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1_n4 a RIGHT OUTER JOIN myinput1_n4 b ON (a.value=b.value) LEFT OUTER JOIN myinput1_n4 c ON (b.value=c.value); +SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n4 a LEFT OUTER JOIN myinput1_n4 b RIGHT OUTER JOIN myinput1_n4 c ON a.value = b.value and b.value = c.value; diff --git a/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q b/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q index 588eb20511..6ecfa1ad33 100644 --- a/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q +++ b/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q @@ -7,21 +7,21 @@ set hive.auto.convert.join.noconditionaltask.size=10000; -- SORT_QUERY_RESULTS -create table t1 stored as orc as select cast(key as int) key, value from src where key <= 10; +create table t1_n148 stored as orc as select cast(key as int) key, value from src where key <= 10; -select * from t1 sort by key; +select * from t1_n148 sort by key; -create table t2 stored as orc as select cast(2*key as int) key, value from t1; +create table t2_n87 stored as orc as select cast(2*key as int) key, value from t1_n148; -select * from t2 sort by key; +select * from t2_n87 sort by key; -create table t3 stored as orc as select * from (select * from t1 union all select * from t2) b; -select * from t3 sort by key, value; +create table t3_n35 stored as orc as select * from (select * from t1_n148 union all select * from t2_n87) b; +select * from t3_n35 sort by key, value; -analyze table t3 compute statistics; +analyze table t3_n35 compute statistics; -create table t4 (key int, value string) stored as orc; -select * from t4; +create table t4_n19 (key int, value string) stored as orc; +select * from t4_n19; set hive.vectorized.execution.enabled=false; @@ -29,500 +29,500 @@ set hive.mapjoin.hybridgrace.hashtable=false; explain vectorization only summary -select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key=b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key=b.key sort by a.key, a.value; explain vectorization only summary -select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value; -select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value; +select * from t2_n87 a left semi join t1_n148 b on b.key=a.key sort by a.key, a.value; +select * from t2_n87 a left semi join t1_n148 b on b.key=a.key sort by a.key, a.value; explain vectorization only summary -select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value; -select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value; +select * from t1_n148 a left semi join t4_n19 b on b.key=a.key sort by a.key, a.value; +select * from t1_n148 a left semi join t4_n19 b on b.key=a.key sort by a.key, a.value; explain vectorization only summary -select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value; -select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value; +select a.value from t1_n148 a left semi join t3_n35 b on (b.key = a.key and b.key < '15') sort by a.value; +select a.value from t1_n148 a left semi join t3_n35 b on (b.key = a.key and b.key < '15') sort by a.value; explain vectorization only summary -select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; explain vectorization only summary -select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value; -select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value; +select a.value from t1_n148 a left semi join (select key from t3_n35 where key > 5) b on a.key = b.key sort by a.value; +select a.value from t1_n148 a left semi join (select key from t3_n35 where key > 5) b on a.key = b.key sort by a.value; explain vectorization only summary -select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; -select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; +select a.value from t1_n148 a left semi join (select key , value from t2_n87 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; +select a.value from t1_n148 a left semi join (select key , value from t2_n87 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; explain vectorization only summary -select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value; -select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value; +select * from t2_n87 a left semi join (select key , value from t1_n148 where key > 2) b on a.key = b.key sort by a.key, a.value; +select * from t2_n87 a left semi join (select key , value from t1_n148 where key > 2) b on a.key = b.key sort by a.key, a.value; explain vectorization only summary -select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key; -select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key; +select /*+ mapjoin(b) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key sort by a.key; +select /*+ mapjoin(b) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key sort by a.key; explain vectorization only summary -select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = 2*b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = 2*b.key sort by a.key, a.value; explain vectorization only summary -select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value; -select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value; +select * from t1_n148 a join t2_n87 b on a.key = b.key left semi join t3_n35 c on b.key = c.key sort by a.key, a.value; +select * from t1_n148 a join t2_n87 b on a.key = b.key left semi join t3_n35 c on b.key = c.key sort by a.key, a.value; explain vectorization only summary -select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value; -select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value; +select * from t3_n35 a left semi join t1_n148 b on a.key = b.key and a.value=b.value sort by a.key, a.value; +select * from t3_n35 a left semi join t1_n148 b on a.key = b.key and a.value=b.value sort by a.key, a.value; explain vectorization only summary -select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key; -select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key; +select /*+ mapjoin(b, c) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key left semi join t2_n87 c on a.key = c.key sort by a.key; +select /*+ mapjoin(b, c) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key left semi join t2_n87 c on a.key = c.key sort by a.key; explain vectorization only summary -select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +select a.key from t3_n35 a left outer join t1_n148 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; +select a.key from t3_n35 a left outer join t1_n148 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; explain vectorization only summary -select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +select a.key from t1_n148 a full outer join t3_n35 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; +select a.key from t1_n148 a full outer join t3_n35 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; explain vectorization only summary -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.key = c.key sort by a.key; explain vectorization only summary -select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key right outer join t1_n148 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key right outer join t1_n148 c on a.key = c.key sort by a.key; explain vectorization only summary -select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key full outer join t2_n87 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key full outer join t2_n87 c on a.key = c.key sort by a.key; explain vectorization only summary -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.value = c.value sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.value = c.value sort by a.key; explain vectorization only summary -select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100; -select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100; +select a.key from t3_n35 a left semi join t2_n87 b on a.value = b.value where a.key > 100; +select a.key from t3_n35 a left semi join t2_n87 b on a.value = b.value where a.key > 100; set hive.vectorized.execution.enabled=false; set hive.mapjoin.hybridgrace.hashtable=true; explain vectorization summary -select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key=b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key=b.key sort by a.key, a.value; explain vectorization summary -select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value; -select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value; +select * from t2_n87 a left semi join t1_n148 b on b.key=a.key sort by a.key, a.value; +select * from t2_n87 a left semi join t1_n148 b on b.key=a.key sort by a.key, a.value; explain vectorization summary -select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value; -select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value; +select * from t1_n148 a left semi join t4_n19 b on b.key=a.key sort by a.key, a.value; +select * from t1_n148 a left semi join t4_n19 b on b.key=a.key sort by a.key, a.value; explain vectorization summary -select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value; -select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value; +select a.value from t1_n148 a left semi join t3_n35 b on (b.key = a.key and b.key < '15') sort by a.value; +select a.value from t1_n148 a left semi join t3_n35 b on (b.key = a.key and b.key < '15') sort by a.value; explain vectorization summary -select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; explain vectorization summary -select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value; -select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value; +select a.value from t1_n148 a left semi join (select key from t3_n35 where key > 5) b on a.key = b.key sort by a.value; +select a.value from t1_n148 a left semi join (select key from t3_n35 where key > 5) b on a.key = b.key sort by a.value; explain vectorization summary -select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; -select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; +select a.value from t1_n148 a left semi join (select key , value from t2_n87 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; +select a.value from t1_n148 a left semi join (select key , value from t2_n87 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; explain vectorization summary -select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value; -select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value; +select * from t2_n87 a left semi join (select key , value from t1_n148 where key > 2) b on a.key = b.key sort by a.key, a.value; +select * from t2_n87 a left semi join (select key , value from t1_n148 where key > 2) b on a.key = b.key sort by a.key, a.value; explain vectorization summary -select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key; -select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key; +select /*+ mapjoin(b) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key sort by a.key; +select /*+ mapjoin(b) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key sort by a.key; explain vectorization summary -select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = 2*b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = 2*b.key sort by a.key, a.value; explain vectorization summary -select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value; -select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value; +select * from t1_n148 a join t2_n87 b on a.key = b.key left semi join t3_n35 c on b.key = c.key sort by a.key, a.value; +select * from t1_n148 a join t2_n87 b on a.key = b.key left semi join t3_n35 c on b.key = c.key sort by a.key, a.value; explain vectorization summary -select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value; -select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value; +select * from t3_n35 a left semi join t1_n148 b on a.key = b.key and a.value=b.value sort by a.key, a.value; +select * from t3_n35 a left semi join t1_n148 b on a.key = b.key and a.value=b.value sort by a.key, a.value; explain vectorization summary -select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key; -select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key; +select /*+ mapjoin(b, c) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key left semi join t2_n87 c on a.key = c.key sort by a.key; +select /*+ mapjoin(b, c) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key left semi join t2_n87 c on a.key = c.key sort by a.key; explain vectorization summary -select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +select a.key from t3_n35 a left outer join t1_n148 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; +select a.key from t3_n35 a left outer join t1_n148 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; explain vectorization summary -select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +select a.key from t1_n148 a full outer join t3_n35 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; +select a.key from t1_n148 a full outer join t3_n35 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; explain vectorization summary -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.key = c.key sort by a.key; explain vectorization summary -select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key right outer join t1_n148 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key right outer join t1_n148 c on a.key = c.key sort by a.key; explain vectorization summary -select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key full outer join t2_n87 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key full outer join t2_n87 c on a.key = c.key sort by a.key; explain vectorization summary -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.value = c.value sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.value = c.value sort by a.key; explain vectorization summary -select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100; -select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100; +select a.key from t3_n35 a left semi join t2_n87 b on a.value = b.value where a.key > 100; +select a.key from t3_n35 a left semi join t2_n87 b on a.value = b.value where a.key > 100; set hive.vectorized.execution.enabled=true; set hive.mapjoin.hybridgrace.hashtable=false; SET hive.vectorized.execution.mapjoin.native.enabled=false; explain vectorization only operator -select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key=b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key=b.key sort by a.key, a.value; explain vectorization only operator -select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value; -select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value; +select * from t2_n87 a left semi join t1_n148 b on b.key=a.key sort by a.key, a.value; +select * from t2_n87 a left semi join t1_n148 b on b.key=a.key sort by a.key, a.value; explain vectorization only operator -select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value; -select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value; +select * from t1_n148 a left semi join t4_n19 b on b.key=a.key sort by a.key, a.value; +select * from t1_n148 a left semi join t4_n19 b on b.key=a.key sort by a.key, a.value; explain vectorization only operator -select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value; -select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value; +select a.value from t1_n148 a left semi join t3_n35 b on (b.key = a.key and b.key < '15') sort by a.value; +select a.value from t1_n148 a left semi join t3_n35 b on (b.key = a.key and b.key < '15') sort by a.value; explain vectorization only operator -select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; explain vectorization only operator -select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value; -select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value; +select a.value from t1_n148 a left semi join (select key from t3_n35 where key > 5) b on a.key = b.key sort by a.value; +select a.value from t1_n148 a left semi join (select key from t3_n35 where key > 5) b on a.key = b.key sort by a.value; explain vectorization only operator -select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; -select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; +select a.value from t1_n148 a left semi join (select key , value from t2_n87 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; +select a.value from t1_n148 a left semi join (select key , value from t2_n87 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; explain vectorization only operator -select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value; -select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value; +select * from t2_n87 a left semi join (select key , value from t1_n148 where key > 2) b on a.key = b.key sort by a.key, a.value; +select * from t2_n87 a left semi join (select key , value from t1_n148 where key > 2) b on a.key = b.key sort by a.key, a.value; explain vectorization only operator -select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key; -select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key; +select /*+ mapjoin(b) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key sort by a.key; +select /*+ mapjoin(b) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key sort by a.key; explain vectorization only operator -select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = 2*b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = 2*b.key sort by a.key, a.value; explain vectorization only operator -select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value; -select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value; +select * from t1_n148 a join t2_n87 b on a.key = b.key left semi join t3_n35 c on b.key = c.key sort by a.key, a.value; +select * from t1_n148 a join t2_n87 b on a.key = b.key left semi join t3_n35 c on b.key = c.key sort by a.key, a.value; explain vectorization only operator -select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value; -select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value; +select * from t3_n35 a left semi join t1_n148 b on a.key = b.key and a.value=b.value sort by a.key, a.value; +select * from t3_n35 a left semi join t1_n148 b on a.key = b.key and a.value=b.value sort by a.key, a.value; explain vectorization only operator -select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key; -select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key; +select /*+ mapjoin(b, c) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key left semi join t2_n87 c on a.key = c.key sort by a.key; +select /*+ mapjoin(b, c) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key left semi join t2_n87 c on a.key = c.key sort by a.key; explain vectorization only operator -select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +select a.key from t3_n35 a left outer join t1_n148 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; +select a.key from t3_n35 a left outer join t1_n148 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; explain vectorization only operator -select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +select a.key from t1_n148 a full outer join t3_n35 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; +select a.key from t1_n148 a full outer join t3_n35 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; explain vectorization only operator -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.key = c.key sort by a.key; explain vectorization only operator -select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key right outer join t1_n148 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key right outer join t1_n148 c on a.key = c.key sort by a.key; explain vectorization only operator -select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key full outer join t2_n87 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key full outer join t2_n87 c on a.key = c.key sort by a.key; explain vectorization only operator -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.value = c.value sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.value = c.value sort by a.key; explain vectorization only operator -select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100; -select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100; +select a.key from t3_n35 a left semi join t2_n87 b on a.value = b.value where a.key > 100; +select a.key from t3_n35 a left semi join t2_n87 b on a.value = b.value where a.key > 100; set hive.vectorized.execution.enabled=true; set hive.mapjoin.hybridgrace.hashtable=true; SET hive.vectorized.execution.mapjoin.native.enabled=false; explain vectorization detail -select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key=b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key=b.key sort by a.key, a.value; explain vectorization detail -select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value; -select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value; +select * from t2_n87 a left semi join t1_n148 b on b.key=a.key sort by a.key, a.value; +select * from t2_n87 a left semi join t1_n148 b on b.key=a.key sort by a.key, a.value; explain vectorization detail -select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value; -select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value; +select * from t1_n148 a left semi join t4_n19 b on b.key=a.key sort by a.key, a.value; +select * from t1_n148 a left semi join t4_n19 b on b.key=a.key sort by a.key, a.value; explain vectorization detail -select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value; -select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value; +select a.value from t1_n148 a left semi join t3_n35 b on (b.key = a.key and b.key < '15') sort by a.value; +select a.value from t1_n148 a left semi join t3_n35 b on (b.key = a.key and b.key < '15') sort by a.value; explain vectorization detail -select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; explain vectorization detail -select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value; -select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value; +select a.value from t1_n148 a left semi join (select key from t3_n35 where key > 5) b on a.key = b.key sort by a.value; +select a.value from t1_n148 a left semi join (select key from t3_n35 where key > 5) b on a.key = b.key sort by a.value; explain vectorization detail -select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; -select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; +select a.value from t1_n148 a left semi join (select key , value from t2_n87 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; +select a.value from t1_n148 a left semi join (select key , value from t2_n87 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; explain vectorization detail -select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value; -select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value; +select * from t2_n87 a left semi join (select key , value from t1_n148 where key > 2) b on a.key = b.key sort by a.key, a.value; +select * from t2_n87 a left semi join (select key , value from t1_n148 where key > 2) b on a.key = b.key sort by a.key, a.value; explain vectorization detail -select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key; -select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key; +select /*+ mapjoin(b) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key sort by a.key; +select /*+ mapjoin(b) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key sort by a.key; explain vectorization detail -select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = 2*b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = 2*b.key sort by a.key, a.value; explain vectorization detail -select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value; -select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value; +select * from t1_n148 a join t2_n87 b on a.key = b.key left semi join t3_n35 c on b.key = c.key sort by a.key, a.value; +select * from t1_n148 a join t2_n87 b on a.key = b.key left semi join t3_n35 c on b.key = c.key sort by a.key, a.value; explain vectorization detail -select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value; -select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value; +select * from t3_n35 a left semi join t1_n148 b on a.key = b.key and a.value=b.value sort by a.key, a.value; +select * from t3_n35 a left semi join t1_n148 b on a.key = b.key and a.value=b.value sort by a.key, a.value; explain vectorization detail -select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key; -select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key; +select /*+ mapjoin(b, c) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key left semi join t2_n87 c on a.key = c.key sort by a.key; +select /*+ mapjoin(b, c) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key left semi join t2_n87 c on a.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +select a.key from t3_n35 a left outer join t1_n148 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; +select a.key from t3_n35 a left outer join t1_n148 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; explain vectorization detail -select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +select a.key from t1_n148 a full outer join t3_n35 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; +select a.key from t1_n148 a full outer join t3_n35 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key right outer join t1_n148 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key right outer join t1_n148 c on a.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key full outer join t2_n87 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key full outer join t2_n87 c on a.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.value = c.value sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.value = c.value sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100; -select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100; +select a.key from t3_n35 a left semi join t2_n87 b on a.value = b.value where a.key > 100; +select a.key from t3_n35 a left semi join t2_n87 b on a.value = b.value where a.key > 100; set hive.vectorized.execution.enabled=true; set hive.mapjoin.hybridgrace.hashtable=false; SET hive.vectorized.execution.mapjoin.native.enabled=true; explain vectorization detail -select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key=b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key=b.key sort by a.key, a.value; explain vectorization detail -select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value; -select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value; +select * from t2_n87 a left semi join t1_n148 b on b.key=a.key sort by a.key, a.value; +select * from t2_n87 a left semi join t1_n148 b on b.key=a.key sort by a.key, a.value; explain vectorization detail -select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value; -select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value; +select * from t1_n148 a left semi join t4_n19 b on b.key=a.key sort by a.key, a.value; +select * from t1_n148 a left semi join t4_n19 b on b.key=a.key sort by a.key, a.value; explain vectorization detail -select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value; -select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value; +select a.value from t1_n148 a left semi join t3_n35 b on (b.key = a.key and b.key < '15') sort by a.value; +select a.value from t1_n148 a left semi join t3_n35 b on (b.key = a.key and b.key < '15') sort by a.value; explain vectorization detail -select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; explain vectorization detail -select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value; -select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value; +select a.value from t1_n148 a left semi join (select key from t3_n35 where key > 5) b on a.key = b.key sort by a.value; +select a.value from t1_n148 a left semi join (select key from t3_n35 where key > 5) b on a.key = b.key sort by a.value; explain vectorization detail -select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; -select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; +select a.value from t1_n148 a left semi join (select key , value from t2_n87 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; +select a.value from t1_n148 a left semi join (select key , value from t2_n87 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; explain vectorization detail -select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value; -select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value; +select * from t2_n87 a left semi join (select key , value from t1_n148 where key > 2) b on a.key = b.key sort by a.key, a.value; +select * from t2_n87 a left semi join (select key , value from t1_n148 where key > 2) b on a.key = b.key sort by a.key, a.value; explain vectorization detail -select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key; -select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key; +select /*+ mapjoin(b) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key sort by a.key; +select /*+ mapjoin(b) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key sort by a.key; explain vectorization detail -select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = 2*b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = 2*b.key sort by a.key, a.value; explain vectorization detail -select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value; -select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value; +select * from t1_n148 a join t2_n87 b on a.key = b.key left semi join t3_n35 c on b.key = c.key sort by a.key, a.value; +select * from t1_n148 a join t2_n87 b on a.key = b.key left semi join t3_n35 c on b.key = c.key sort by a.key, a.value; explain vectorization detail -select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value; -select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value; +select * from t3_n35 a left semi join t1_n148 b on a.key = b.key and a.value=b.value sort by a.key, a.value; +select * from t3_n35 a left semi join t1_n148 b on a.key = b.key and a.value=b.value sort by a.key, a.value; explain vectorization detail -select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key; -select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key; +select /*+ mapjoin(b, c) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key left semi join t2_n87 c on a.key = c.key sort by a.key; +select /*+ mapjoin(b, c) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key left semi join t2_n87 c on a.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +select a.key from t3_n35 a left outer join t1_n148 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; +select a.key from t3_n35 a left outer join t1_n148 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; explain vectorization detail -select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +select a.key from t1_n148 a full outer join t3_n35 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; +select a.key from t1_n148 a full outer join t3_n35 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key right outer join t1_n148 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key right outer join t1_n148 c on a.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key full outer join t2_n87 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key full outer join t2_n87 c on a.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.value = c.value sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.value = c.value sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100; -select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100; +select a.key from t3_n35 a left semi join t2_n87 b on a.value = b.value where a.key > 100; +select a.key from t3_n35 a left semi join t2_n87 b on a.value = b.value where a.key > 100; set hive.vectorized.execution.enabled=true; set hive.mapjoin.hybridgrace.hashtable=true; SET hive.vectorized.execution.mapjoin.native.enabled=true; explain vectorization detail -select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key=b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key=b.key sort by a.key, a.value; explain vectorization detail -select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value; -select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value; +select * from t2_n87 a left semi join t1_n148 b on b.key=a.key sort by a.key, a.value; +select * from t2_n87 a left semi join t1_n148 b on b.key=a.key sort by a.key, a.value; explain vectorization detail -select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value; -select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value; +select * from t1_n148 a left semi join t4_n19 b on b.key=a.key sort by a.key, a.value; +select * from t1_n148 a left semi join t4_n19 b on b.key=a.key sort by a.key, a.value; explain vectorization detail -select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value; -select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value; +select a.value from t1_n148 a left semi join t3_n35 b on (b.key = a.key and b.key < '15') sort by a.value; +select a.value from t1_n148 a left semi join t3_n35 b on (b.key = a.key and b.key < '15') sort by a.value; explain vectorization detail -select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value; explain vectorization detail -select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value; -select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value; +select a.value from t1_n148 a left semi join (select key from t3_n35 where key > 5) b on a.key = b.key sort by a.value; +select a.value from t1_n148 a left semi join (select key from t3_n35 where key > 5) b on a.key = b.key sort by a.value; explain vectorization detail -select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; -select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; +select a.value from t1_n148 a left semi join (select key , value from t2_n87 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; +select a.value from t1_n148 a left semi join (select key , value from t2_n87 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ; explain vectorization detail -select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value; -select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value; +select * from t2_n87 a left semi join (select key , value from t1_n148 where key > 2) b on a.key = b.key sort by a.key, a.value; +select * from t2_n87 a left semi join (select key , value from t1_n148 where key > 2) b on a.key = b.key sort by a.key, a.value; explain vectorization detail -select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key; -select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key; +select /*+ mapjoin(b) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key sort by a.key; +select /*+ mapjoin(b) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key sort by a.key; explain vectorization detail -select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value; -select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = 2*b.key sort by a.key, a.value; +select * from t1_n148 a left semi join t2_n87 b on a.key = 2*b.key sort by a.key, a.value; explain vectorization detail -select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value; -select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value; +select * from t1_n148 a join t2_n87 b on a.key = b.key left semi join t3_n35 c on b.key = c.key sort by a.key, a.value; +select * from t1_n148 a join t2_n87 b on a.key = b.key left semi join t3_n35 c on b.key = c.key sort by a.key, a.value; explain vectorization detail -select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value; -select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value; +select * from t3_n35 a left semi join t1_n148 b on a.key = b.key and a.value=b.value sort by a.key, a.value; +select * from t3_n35 a left semi join t1_n148 b on a.key = b.key and a.value=b.value sort by a.key, a.value; explain vectorization detail -select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key; -select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key; +select /*+ mapjoin(b, c) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key left semi join t2_n87 c on a.key = c.key sort by a.key; +select /*+ mapjoin(b, c) */ a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key left semi join t2_n87 c on a.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +select a.key from t3_n35 a left outer join t1_n148 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; +select a.key from t3_n35 a left outer join t1_n148 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; explain vectorization detail -select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; -select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key; +select a.key from t1_n148 a full outer join t3_n35 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; +select a.key from t1_n148 a full outer join t3_n35 b on a.key = b.key left semi join t2_n87 c on b.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key right outer join t1_n148 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key right outer join t1_n148 c on a.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key; -select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key full outer join t2_n87 c on a.key = c.key sort by a.key; +select a.key from t3_n35 a left semi join t1_n148 b on a.key = b.key full outer join t2_n87 c on a.key = c.key sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key; -select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.value = c.value sort by a.key; +select a.key from t3_n35 a left semi join t2_n87 b on a.key = b.key left outer join t1_n148 c on a.value = c.value sort by a.key; explain vectorization detail -select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100; -select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100; +select a.key from t3_n35 a left semi join t2_n87 b on a.value = b.value where a.key > 100; +select a.key from t3_n35 a left semi join t2_n87 b on a.value = b.value where a.key > 100; diff --git a/ql/src/test/queries/clientpositive/vector_llap_text_1.q b/ql/src/test/queries/clientpositive/vector_llap_text_1.q index e038146d44..f5c805eea8 100644 --- a/ql/src/test/queries/clientpositive/vector_llap_text_1.q +++ b/ql/src/test/queries/clientpositive/vector_llap_text_1.q @@ -10,34 +10,34 @@ set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000; -CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_n13(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part_n8 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part_n14 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n13 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n13 partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); -load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n14 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n14 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n14 partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n14 partition(ds='2008-04-08'); set hive.optimize.bucketingsorting=false; -insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part; +insert overwrite table tab_part_n8 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n14; -CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; -insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin; +CREATE TABLE tab_n7(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab_n7 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n13; set hive.convert.join.bucket.mapjoin.tez = true; explain vectorization detail select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key +from tab_n7 a join tab_part_n8 b on a.key = b.key order by a.key, a.value, b.value limit 10; select a.key, a.value, b.value -from tab a join tab_part b on a.key = b.key +from tab_n7 a join tab_part_n8 b on a.key = b.key order by a.key, a.value, b.value limit 10; diff --git a/ql/src/test/queries/clientpositive/vector_map_order.q b/ql/src/test/queries/clientpositive/vector_map_order.q index b1f05d5f39..e500664378 100644 --- a/ql/src/test/queries/clientpositive/vector_map_order.q +++ b/ql/src/test/queries/clientpositive/vector_map_order.q @@ -1,15 +1,15 @@ SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -create table map_table (foo STRING , bar MAP) +create table map_table_n0 (foo STRING , bar MAP) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' STORED AS TEXTFILE; -load data local inpath "../../data/files/map_table.txt" overwrite into table map_table; +load data local inpath "../../data/files/map_table.txt" overwrite into table map_table_n0; explain vectorization detail -select * from map_table; -select * from map_table; \ No newline at end of file +select * from map_table_n0; +select * from map_table_n0; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_mr_diff_schema_alias.q b/ql/src/test/queries/clientpositive/vector_mr_diff_schema_alias.q index 22830a5fdb..7c0faa83d2 100644 --- a/ql/src/test/queries/clientpositive/vector_mr_diff_schema_alias.q +++ b/ql/src/test/queries/clientpositive/vector_mr_diff_schema_alias.q @@ -3,7 +3,7 @@ set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -create table date_dim +create table date_dim_n0 ( d_date_sk int, d_date_id string, @@ -36,7 +36,7 @@ create table date_dim ) stored as orc; -create table store_sales +create table store_sales_n2 ( ss_sold_date_sk int, ss_sold_time_sk int, @@ -68,7 +68,7 @@ partitioned by stored as orc tblproperties ("orc.stripe.size"="33554432", "orc.compress.size"="16384"); -create table store +create table store_n2 ( s_store_sk int, s_store_id string, @@ -107,12 +107,12 @@ stored as orc; explain vectorization select s_state, count(1) - from store_sales, - store, - date_dim - where store_sales.ss_sold_date_sk = date_dim.d_date_sk and - store_sales.ss_store_sk = store.s_store_sk and - store.s_state in ('KS','AL', 'MN', 'AL', 'SC', 'VT') + from store_sales_n2, + store_n2, + date_dim_n0 + where store_sales_n2.ss_sold_date_sk = date_dim_n0.d_date_sk and + store_sales_n2.ss_store_sk = store_n2.s_store_sk and + store_n2.s_state in ('KS','AL', 'MN', 'AL', 'SC', 'VT') group by s_state order by s_state limit 100; diff --git a/ql/src/test/queries/clientpositive/vector_null_projection.q b/ql/src/test/queries/clientpositive/vector_null_projection.q index 6cea1e6a41..1083d0b577 100644 --- a/ql/src/test/queries/clientpositive/vector_null_projection.q +++ b/ql/src/test/queries/clientpositive/vector_null_projection.q @@ -4,18 +4,18 @@ set hive.fetch.task.conversion=none; -- SORT_QUERY_RESULTS -create table a(s string) stored as orc; -create table b(s string) stored as orc; -insert into table a values('aaa'); -insert into table b values('aaa'); +create table a_n6(s string) stored as orc; +create table b_n4(s string) stored as orc; +insert into table a_n6 values('aaa_n6'); +insert into table b_n4 values('aaa_n6'); -- We expect some vectorization due to NULL (void) projection type. explain vectorization detail -select NULL from a; +select NULL from a_n6; -select NULL from a; +select NULL from a_n6; explain vectorization expression -select NULL as x from a union distinct select NULL as x from b; +select NULL as x from a_n6 union distinct select NULL as x from b_n4; -select NULL as x from a union distinct select NULL as x from b; \ No newline at end of file +select NULL as x from a_n6 union distinct select NULL as x from b_n4; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_number_compare_projection.q b/ql/src/test/queries/clientpositive/vector_number_compare_projection.q index 3f4f5aa9fd..7abae94449 100644 --- a/ql/src/test/queries/clientpositive/vector_number_compare_projection.q +++ b/ql/src/test/queries/clientpositive/vector_number_compare_projection.q @@ -6,7 +6,7 @@ set hive.mapred.mode=nonstrict; -- SORT_QUERY_RESULTS -create table vectortab2k( +create table vectortab2k_n6( t tinyint, si smallint, i int, @@ -23,9 +23,9 @@ create table vectortab2k( ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n6; -CREATE TABLE scratch AS SELECT t, si, i, b, f, d, dc FROM vectortab2k; +CREATE TABLE scratch AS SELECT t, si, i, b, f, d, dc FROM vectortab2k_n6; INSERT INTO TABLE scratch VALUES (NULL, NULL, NULL, NULL, NULL, NULL, NULL); CREATE TABLE vectortab2k_orc STORED AS ORC AS SELECT * FROM scratch; @@ -88,7 +88,7 @@ SELECT sum(hash(*)) FROM SET hive.vectorized.execution.enabled=false; CREATE TABLE scratch_repeat AS SELECT t, si, i, b, bo, 20 as t_repeat, - 9000 as si_repeat, 9233320 as i_repeat, -823823999339992 as b_repeat, false as bo_repeat_false, true as bo_repeat_true FROM vectortab2k; + 9000 as si_repeat, 9233320 as i_repeat, -823823999339992 as b_repeat, false as bo_repeat_false, true as bo_repeat_true FROM vectortab2k_n6; -- The repeated columns ought to create repeated VectorizedRowBatch for those columns. -- And then when we do a comparison, we should generate a repeated boolean result. @@ -141,7 +141,7 @@ SELECT sum(hash(*)) FROM SET hive.vectorized.execution.enabled=false; CREATE TABLE scratch_null AS SELECT t, si, i, b, bo, - cast(null as tinyint) as t_null, cast(null as smallint) as si_null, cast(null as int) as i_null, cast(null as bigint) as b_null, cast(null as boolean) as bo_null FROM vectortab2k; + cast(null as tinyint) as t_null, cast(null as smallint) as si_null, cast(null as int) as i_null, cast(null as bigint) as b_null, cast(null as boolean) as bo_null FROM vectortab2k_n6; -- The nulled columns ought to create repeated null VectorizedRowBatch for those columns. CREATE TABLE vectortab2k_orc_null STORED AS ORC AS SELECT * FROM scratch_null; diff --git a/ql/src/test/queries/clientpositive/vector_orc_nested_column_pruning.q b/ql/src/test/queries/clientpositive/vector_orc_nested_column_pruning.q index 3121ec8719..e5a2607eb8 100644 --- a/ql/src/test/queries/clientpositive/vector_orc_nested_column_pruning.q +++ b/ql/src/test/queries/clientpositive/vector_orc_nested_column_pruning.q @@ -4,9 +4,9 @@ set hive.strict.checks.cartesian.product=false; SET hive.vectorized.execution.enabled=true; -- First, create source tables -DROP TABLE IF EXISTS dummy; -CREATE TABLE dummy (i int); -INSERT INTO TABLE dummy VALUES (42); +DROP TABLE IF EXISTS dummy_n0; +CREATE TABLE dummy_n0 (i int); +INSERT INTO TABLE dummy_n0 VALUES (42); DROP TABLE IF EXISTS nested_tbl_1; CREATE TABLE nested_tbl_1 ( @@ -27,7 +27,7 @@ INSERT INTO TABLE nested_tbl_1 SELECT named_struct('f16', array(named_struct('f17', 'foo', 'f18', named_struct('f19', 14)), named_struct('f17', 'bar', 'f18', named_struct('f19', 28)))), map('key1', named_struct('f20', array(named_struct('f21', named_struct('f22', 1)))), 'key2', named_struct('f20', array(named_struct('f21', named_struct('f22', 2))))) -FROM dummy; +FROM dummy_n0; DROP TABLE IF EXISTS nested_tbl_2; CREATE TABLE nested_tbl_2 LIKE nested_tbl_1; @@ -40,7 +40,7 @@ INSERT INTO TABLE nested_tbl_2 SELECT named_struct('f16', array(named_struct('f17', 'bar', 'f18', named_struct('f19', 28)), named_struct('f17', 'foo', 'f18', named_struct('f19', 56)))), map('key3', named_struct('f20', array(named_struct('f21', named_struct('f22', 3)))), 'key4', named_struct('f20', array(named_struct('f21', named_struct('f22', 4))))) -FROM dummy; +FROM dummy_n0; -- Testing only select statements diff --git a/ql/src/test/queries/clientpositive/vector_orc_null_check.q b/ql/src/test/queries/clientpositive/vector_orc_null_check.q index 8f415c3175..f7d567f53a 100644 --- a/ql/src/test/queries/clientpositive/vector_orc_null_check.q +++ b/ql/src/test/queries/clientpositive/vector_orc_null_check.q @@ -1,13 +1,13 @@ SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -create table listtable(l array); -create table listtable_orc(l array) stored as orc; +create table listtable_n0(l array); +create table listtable_orc_n0(l array) stored as orc; -insert overwrite table listtable select array(null) from src; -insert overwrite table listtable_orc select * from listtable; +insert overwrite table listtable_n0 select array(null) from src; +insert overwrite table listtable_orc_n0 select * from listtable_n0; explain vectorization expression -select size(l) from listtable_orc limit 10; -select size(l) from listtable_orc limit 10; +select size(l) from listtable_orc_n0 limit 10; +select size(l) from listtable_orc_n0 limit 10; diff --git a/ql/src/test/queries/clientpositive/vector_order_null.q b/ql/src/test/queries/clientpositive/vector_order_null.q index 8b0593ff5e..b59c073384 100644 --- a/ql/src/test/queries/clientpositive/vector_order_null.q +++ b/ql/src/test/queries/clientpositive/vector_order_null.q @@ -3,54 +3,54 @@ set hive.cli.print.header=true; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -create table src_null (a int, b string); -insert into src_null values (1, 'A'); -insert into src_null values (null, null); -insert into src_null values (3, null); -insert into src_null values (2, null); -insert into src_null values (2, 'A'); -insert into src_null values (2, 'B'); +create table src_null_n3 (a int, b string); +insert into src_null_n3 values (1, 'A'); +insert into src_null_n3 values (null, null); +insert into src_null_n3 values (3, null); +insert into src_null_n3 values (2, null); +insert into src_null_n3 values (2, 'A'); +insert into src_null_n3 values (2, 'B'); EXPLAIN VECTORIZATION DETAIL -SELECT x.* FROM src_null x ORDER BY a asc, b asc; -SELECT x.* FROM src_null x ORDER BY a asc, b asc; +SELECT x.* FROM src_null_n3 x ORDER BY a asc, b asc; +SELECT x.* FROM src_null_n3 x ORDER BY a asc, b asc; EXPLAIN VECTORIZATION DETAIL -SELECT x.* FROM src_null x ORDER BY a desc, b asc; -SELECT x.* FROM src_null x ORDER BY a desc, b asc; +SELECT x.* FROM src_null_n3 x ORDER BY a desc, b asc; +SELECT x.* FROM src_null_n3 x ORDER BY a desc, b asc; EXPLAIN VECTORIZATION DETAIL -SELECT x.* FROM src_null x ORDER BY b asc, a asc nulls last; -SELECT x.* FROM src_null x ORDER BY b asc, a asc nulls last; +SELECT x.* FROM src_null_n3 x ORDER BY b asc, a asc nulls last; +SELECT x.* FROM src_null_n3 x ORDER BY b asc, a asc nulls last; EXPLAIN VECTORIZATION DETAIL -SELECT x.* FROM src_null x ORDER BY b desc, a asc; -SELECT x.* FROM src_null x ORDER BY b desc, a asc; +SELECT x.* FROM src_null_n3 x ORDER BY b desc, a asc; +SELECT x.* FROM src_null_n3 x ORDER BY b desc, a asc; EXPLAIN VECTORIZATION DETAIL -SELECT x.* FROM src_null x ORDER BY a asc nulls first, b asc; -SELECT x.* FROM src_null x ORDER BY a asc nulls first, b asc; +SELECT x.* FROM src_null_n3 x ORDER BY a asc nulls first, b asc; +SELECT x.* FROM src_null_n3 x ORDER BY a asc nulls first, b asc; EXPLAIN VECTORIZATION DETAIL -SELECT x.* FROM src_null x ORDER BY a desc nulls first, b asc; -SELECT x.* FROM src_null x ORDER BY a desc nulls first, b asc; +SELECT x.* FROM src_null_n3 x ORDER BY a desc nulls first, b asc; +SELECT x.* FROM src_null_n3 x ORDER BY a desc nulls first, b asc; EXPLAIN VECTORIZATION DETAIL -SELECT x.* FROM src_null x ORDER BY b asc nulls last, a; -SELECT x.* FROM src_null x ORDER BY b asc nulls last, a; +SELECT x.* FROM src_null_n3 x ORDER BY b asc nulls last, a; +SELECT x.* FROM src_null_n3 x ORDER BY b asc nulls last, a; EXPLAIN VECTORIZATION DETAIL -SELECT x.* FROM src_null x ORDER BY b desc nulls last, a; -SELECT x.* FROM src_null x ORDER BY b desc nulls last, a; +SELECT x.* FROM src_null_n3 x ORDER BY b desc nulls last, a; +SELECT x.* FROM src_null_n3 x ORDER BY b desc nulls last, a; EXPLAIN VECTORIZATION DETAIL -SELECT x.* FROM src_null x ORDER BY a asc nulls last, b desc; -SELECT x.* FROM src_null x ORDER BY a asc nulls last, b desc; +SELECT x.* FROM src_null_n3 x ORDER BY a asc nulls last, b desc; +SELECT x.* FROM src_null_n3 x ORDER BY a asc nulls last, b desc; EXPLAIN VECTORIZATION DETAIL -SELECT x.* FROM src_null x ORDER BY b desc nulls last, a desc nulls last; -SELECT x.* FROM src_null x ORDER BY b desc nulls last, a desc nulls last; +SELECT x.* FROM src_null_n3 x ORDER BY b desc nulls last, a desc nulls last; +SELECT x.* FROM src_null_n3 x ORDER BY b desc nulls last, a desc nulls last; EXPLAIN VECTORIZATION DETAIL -SELECT x.* FROM src_null x ORDER BY b asc nulls first, a asc nulls last; -SELECT x.* FROM src_null x ORDER BY b asc nulls first, a asc nulls last; +SELECT x.* FROM src_null_n3 x ORDER BY b asc nulls first, a asc nulls last; +SELECT x.* FROM src_null_n3 x ORDER BY b asc nulls first, a asc nulls last; diff --git a/ql/src/test/queries/clientpositive/vector_orderby_5.q b/ql/src/test/queries/clientpositive/vector_orderby_5.q index 17ccf82fc5..db4b8f0a8c 100644 --- a/ql/src/test/queries/clientpositive/vector_orderby_5.q +++ b/ql/src/test/queries/clientpositive/vector_orderby_5.q @@ -3,7 +3,7 @@ set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -create table vectortab2k( +create table vectortab2k_n7( t tinyint, si smallint, i int, @@ -20,9 +20,9 @@ create table vectortab2k( ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n7; -create table vectortab2korc( +create table vectortab2korc_n6( t tinyint, si smallint, i int, @@ -38,9 +38,9 @@ create table vectortab2korc( dt date) STORED AS ORC; -INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; +INSERT INTO TABLE vectortab2korc_n6 SELECT * FROM vectortab2k_n7; explain vectorization expression -select bo, max(b) from vectortab2korc group by bo order by bo desc; +select bo, max(b) from vectortab2korc_n6 group by bo order by bo desc; -select bo, max(b) from vectortab2korc group by bo order by bo desc; \ No newline at end of file +select bo, max(b) from vectortab2korc_n6 group by bo order by bo desc; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_outer_join2.q b/ql/src/test/queries/clientpositive/vector_outer_join2.q index d79a124634..a7b10810a6 100644 --- a/ql/src/test/queries/clientpositive/vector_outer_join2.q +++ b/ql/src/test/queries/clientpositive/vector_outer_join2.q @@ -6,45 +6,45 @@ SET hive.vectorized.execution.mapjoin.native.enabled=true; set hive.fetch.task.conversion=none; -- Using cint and cbigint in test queries -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; -create table small_alltypesorc2a as select * from alltypesorc where cint is null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; -create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; -create table small_alltypesorc4a as select * from alltypesorc where cint is null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; - -select * from small_alltypesorc1a; -select * from small_alltypesorc2a; -select * from small_alltypesorc3a; -select * from small_alltypesorc4a; - -create table small_alltypesorc_a stored as orc as select * from -(select * from (select * from small_alltypesorc1a) sq1 +create table small_alltypesorc1a_n0 as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; +create table small_alltypesorc2a_n0 as select * from alltypesorc where cint is null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; +create table small_alltypesorc3a_n0 as select * from alltypesorc where cint is not null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; +create table small_alltypesorc4a_n0 as select * from alltypesorc where cint is null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; + +select * from small_alltypesorc1a_n0; +select * from small_alltypesorc2a_n0; +select * from small_alltypesorc3a_n0; +select * from small_alltypesorc4a_n0; + +create table small_alltypesorc_a_n0 stored as orc as select * from +(select * from (select * from small_alltypesorc1a_n0) sq1 union all - select * from (select * from small_alltypesorc2a) sq2 + select * from (select * from small_alltypesorc2a_n0) sq2 union all - select * from (select * from small_alltypesorc3a) sq3 + select * from (select * from small_alltypesorc3a_n0) sq3 union all - select * from (select * from small_alltypesorc4a) sq4) q; + select * from (select * from small_alltypesorc4a_n0) sq4) q; -ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS; -ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS; +ANALYZE TABLE small_alltypesorc_a_n0 COMPUTE STATISTICS; +ANALYZE TABLE small_alltypesorc_a_n0 COMPUTE STATISTICS FOR COLUMNS; -select * from small_alltypesorc_a; +select * from small_alltypesorc_a_n0; explain vectorization detail select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n0 c +left outer join small_alltypesorc_a_n0 cd on cd.cint = c.cint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n0 hd on hd.cbigint = c.cbigint ) t1; -- SORT_QUERY_RESULTS select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n0 c +left outer join small_alltypesorc_a_n0 cd on cd.cint = c.cint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n0 hd on hd.cbigint = c.cbigint ) t1; diff --git a/ql/src/test/queries/clientpositive/vector_outer_join3.q b/ql/src/test/queries/clientpositive/vector_outer_join3.q index 2ecdd6d1a4..d5a51cb87b 100644 --- a/ql/src/test/queries/clientpositive/vector_outer_join3.q +++ b/ql/src/test/queries/clientpositive/vector_outer_join3.q @@ -6,35 +6,35 @@ SET hive.vectorized.execution.mapjoin.native.enabled=true; set hive.fetch.task.conversion=none; -- Using cint and cstring1 in test queries -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; -create table small_alltypesorc2a as select * from alltypesorc where cint is null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; -create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; -create table small_alltypesorc4a as select * from alltypesorc where cint is null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; +create table small_alltypesorc1a_n1 as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; +create table small_alltypesorc2a_n1 as select * from alltypesorc where cint is null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; +create table small_alltypesorc3a_n1 as select * from alltypesorc where cint is not null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; +create table small_alltypesorc4a_n1 as select * from alltypesorc where cint is null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5; -select * from small_alltypesorc1a; -select * from small_alltypesorc2a; -select * from small_alltypesorc3a; -select * from small_alltypesorc4a; +select * from small_alltypesorc1a_n1; +select * from small_alltypesorc2a_n1; +select * from small_alltypesorc3a_n1; +select * from small_alltypesorc4a_n1; -create table small_alltypesorc_a stored as orc as select * from -(select * from (select * from small_alltypesorc1a) sq1 +create table small_alltypesorc_a_n1 stored as orc as select * from +(select * from (select * from small_alltypesorc1a_n1) sq1 union all - select * from (select * from small_alltypesorc2a) sq2 + select * from (select * from small_alltypesorc2a_n1) sq2 union all - select * from (select * from small_alltypesorc3a) sq3 + select * from (select * from small_alltypesorc3a_n1) sq3 union all - select * from (select * from small_alltypesorc4a) sq4) q; + select * from (select * from small_alltypesorc4a_n1) sq4) q; -ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS; -ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS; +ANALYZE TABLE small_alltypesorc_a_n1 COMPUTE STATISTICS; +ANALYZE TABLE small_alltypesorc_a_n1 COMPUTE STATISTICS FOR COLUMNS; -select * from small_alltypesorc_a; +select * from small_alltypesorc_a_n1; explain vectorization detail formatted select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cint = c.cint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 ) t1 ; @@ -42,19 +42,19 @@ left outer join small_alltypesorc_a hd -- SORT_QUERY_RESULTS select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cint = c.cint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 ) t1; explain vectorization detail formatted select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cstring2 = c.cstring2 -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 ) t1 ; @@ -62,19 +62,19 @@ left outer join small_alltypesorc_a hd -- SORT_QUERY_RESULTS select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cstring2 = c.cstring2 -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 ) t1; explain vectorization detail formatted select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 and hd.cint = c.cint ) t1 ; @@ -82,9 +82,9 @@ left outer join small_alltypesorc_a hd -- SORT_QUERY_RESULTS select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 and hd.cint = c.cint ) t1; diff --git a/ql/src/test/queries/clientpositive/vector_outer_join6.q b/ql/src/test/queries/clientpositive/vector_outer_join6.q index b39e8eddc1..76fd8f4c74 100644 --- a/ql/src/test/queries/clientpositive/vector_outer_join6.q +++ b/ql/src/test/queries/clientpositive/vector_outer_join6.q @@ -24,21 +24,21 @@ load data local inpath '../../data/files/TJOIN2' into table TJOIN2_txt; load data local inpath '../../data/files/TJOIN3' into table TJOIN3_txt; load data local inpath '../../data/files/TJOIN4' into table TJOIN4_txt; -create table TJOIN1 stored as orc AS SELECT * FROM TJOIN1_txt; -create table TJOIN2 stored as orc AS SELECT * FROM TJOIN2_txt; +create table TJOIN1_n0 stored as orc AS SELECT * FROM TJOIN1_txt; +create table TJOIN2_n0 stored as orc AS SELECT * FROM TJOIN2_txt; create table TJOIN3 stored as orc AS SELECT * FROM TJOIN3_txt; create table TJOIN4 stored as orc AS SELECT * FROM TJOIN4_txt; explain vectorization detail formatted select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from - (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1; + (select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1; select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from - (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1; + (select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1; explain vectorization detail formatted select tj1rnum, tj2rnum as rnumt3 from - (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1; + (select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1; select tj1rnum, tj2rnum as rnumt3 from - (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1; + (select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1; diff --git a/ql/src/test/queries/clientpositive/vector_outer_reference_windowed.q b/ql/src/test/queries/clientpositive/vector_outer_reference_windowed.q index f041156993..23dfd9a2e3 100644 --- a/ql/src/test/queries/clientpositive/vector_outer_reference_windowed.q +++ b/ql/src/test/queries/clientpositive/vector_outer_reference_windowed.q @@ -4,20 +4,20 @@ SET hive.vectorized.execution.reduce.enabled=true; set hive.vectorized.execution.ptf.enabled=true; set hive.fetch.task.conversion=none; -DROP TABLE IF EXISTS e011_01; -DROP TABLE IF EXISTS e011_02; -DROP TABLE IF EXISTS e011_03; +DROP TABLE IF EXISTS e011_01_n0; +DROP TABLE IF EXISTS e011_02_n0; +DROP TABLE IF EXISTS e011_03_n0; -CREATE TABLE e011_01 ( +CREATE TABLE e011_01_n0 ( c1 decimal(15,2), c2 decimal(15,2)) STORED AS TEXTFILE; -CREATE TABLE e011_02 ( +CREATE TABLE e011_02_n0 ( c1 decimal(15,2), c2 decimal(15,2)); -CREATE TABLE e011_03 ( +CREATE TABLE e011_03_n0 ( c1 decimal(15,2), c2 decimal(15,2)); @@ -37,15 +37,15 @@ CREATE TABLE e011_03_small ( LOAD DATA LOCAL INPATH '../../data/files/e011_01.txt' OVERWRITE - INTO TABLE e011_01; + INTO TABLE e011_01_n0; -INSERT INTO TABLE e011_02 +INSERT INTO TABLE e011_02_n0 SELECT c1, c2 - FROM e011_01; + FROM e011_01_n0; -INSERT INTO TABLE e011_03 +INSERT INTO TABLE e011_03_n0 SELECT c1, c2 - FROM e011_01; + FROM e011_01_n0; LOAD DATA LOCAL INPATH '../../data/files/e011_01.txt' @@ -60,9 +60,9 @@ INSERT INTO TABLE e011_03_small SELECT c1, c2 FROM e011_01_small; -ANALYZE TABLE e011_01 COMPUTE STATISTICS FOR COLUMNS; -ANALYZE TABLE e011_02 COMPUTE STATISTICS FOR COLUMNS; -ANALYZE TABLE e011_03 COMPUTE STATISTICS FOR COLUMNS; +ANALYZE TABLE e011_01_n0 COMPUTE STATISTICS FOR COLUMNS; +ANALYZE TABLE e011_02_n0 COMPUTE STATISTICS FOR COLUMNS; +ANALYZE TABLE e011_03_n0 COMPUTE STATISTICS FOR COLUMNS; ANALYZE TABLE e011_01_small COMPUTE STATISTICS FOR COLUMNS; ANALYZE TABLE e011_02_small COMPUTE STATISTICS FOR COLUMNS; @@ -71,54 +71,54 @@ ANALYZE TABLE e011_03_small COMPUTE STATISTICS FOR COLUMNS; set hive.explain.user=false; explain vectorization detail -select sum(sum(c1)) over() from e011_01; -select sum(sum(c1)) over() from e011_01; +select sum(sum(c1)) over() from e011_01_n0; +select sum(sum(c1)) over() from e011_01_n0; explain vectorization detail select sum(sum(c1)) over( partition by c2 order by c1) - from e011_01 - group by e011_01.c1, e011_01.c2; + from e011_01_n0 + group by e011_01_n0.c1, e011_01_n0.c2; select sum(sum(c1)) over( partition by c2 order by c1) - from e011_01 - group by e011_01.c1, e011_01.c2; + from e011_01_n0 + group by e011_01_n0.c1, e011_01_n0.c2; explain vectorization detail -select sum(sum(e011_01.c1)) over( - partition by e011_01.c2 order by e011_01.c1) - from e011_01 - join e011_03 on e011_01.c1 = e011_03.c1 - group by e011_01.c1, e011_01.c2; -select sum(sum(e011_01.c1)) over( - partition by e011_01.c2 order by e011_01.c1) - from e011_01 - join e011_03 on e011_01.c1 = e011_03.c1 - group by e011_01.c1, e011_01.c2; +select sum(sum(e011_01_n0.c1)) over( + partition by e011_01_n0.c2 order by e011_01_n0.c1) + from e011_01_n0 + join e011_03_n0 on e011_01_n0.c1 = e011_03_n0.c1 + group by e011_01_n0.c1, e011_01_n0.c2; +select sum(sum(e011_01_n0.c1)) over( + partition by e011_01_n0.c2 order by e011_01_n0.c1) + from e011_01_n0 + join e011_03_n0 on e011_01_n0.c1 = e011_03_n0.c1 + group by e011_01_n0.c1, e011_01_n0.c2; explain vectorization detail -select sum(sum(e011_01.c1)) over( - partition by e011_03.c2 order by e011_03.c1) - from e011_01 - join e011_03 on e011_01.c1 = e011_03.c1 - group by e011_03.c1, e011_03.c2; -select sum(sum(e011_01.c1)) over( - partition by e011_03.c2 order by e011_03.c1) - from e011_01 - join e011_03 on e011_01.c1 = e011_03.c1 - group by e011_03.c1, e011_03.c2; +select sum(sum(e011_01_n0.c1)) over( + partition by e011_03_n0.c2 order by e011_03_n0.c1) + from e011_01_n0 + join e011_03_n0 on e011_01_n0.c1 = e011_03_n0.c1 + group by e011_03_n0.c1, e011_03_n0.c2; +select sum(sum(e011_01_n0.c1)) over( + partition by e011_03_n0.c2 order by e011_03_n0.c1) + from e011_01_n0 + join e011_03_n0 on e011_01_n0.c1 = e011_03_n0.c1 + group by e011_03_n0.c1, e011_03_n0.c2; explain vectorization detail -select sum(corr(e011_01.c1, e011_03.c1)) - over(partition by e011_01.c2 order by e011_03.c2) - from e011_01 - join e011_03 on e011_01.c1 = e011_03.c1 - group by e011_03.c2, e011_01.c2; -select sum(corr(e011_01.c1, e011_03.c1)) - over(partition by e011_01.c2 order by e011_03.c2) - from e011_01 - join e011_03 on e011_01.c1 = e011_03.c1 - group by e011_03.c2, e011_01.c2; +select sum(corr(e011_01_n0.c1, e011_03_n0.c1)) + over(partition by e011_01_n0.c2 order by e011_03_n0.c2) + from e011_01_n0 + join e011_03_n0 on e011_01_n0.c1 = e011_03_n0.c1 + group by e011_03_n0.c2, e011_01_n0.c2; +select sum(corr(e011_01_n0.c1, e011_03_n0.c1)) + over(partition by e011_01_n0.c2 order by e011_03_n0.c2) + from e011_01_n0 + join e011_03_n0 on e011_01_n0.c1 = e011_03_n0.c1 + group by e011_03_n0.c2, e011_01_n0.c2; diff --git a/ql/src/test/queries/clientpositive/vector_partitioned_date_time.q b/ql/src/test/queries/clientpositive/vector_partitioned_date_time.q index 107fe7cab0..74ca011401 100644 --- a/ql/src/test/queries/clientpositive/vector_partitioned_date_time.q +++ b/ql/src/test/queries/clientpositive/vector_partitioned_date_time.q @@ -5,7 +5,7 @@ set hive.fetch.task.conversion=none; -- Check if vectorization code is handling partitioning on DATE and the other data types. -CREATE TABLE flights_tiny ( +CREATE TABLE flights_tiny_n1 ( origin_city_name STRING, dest_city_name STRING, fl_date DATE, @@ -13,11 +13,11 @@ CREATE TABLE flights_tiny ( fl_num INT ); -LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt.1' OVERWRITE INTO TABLE flights_tiny; +LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt.1' OVERWRITE INTO TABLE flights_tiny_n1; CREATE TABLE flights_tiny_orc STORED AS ORC AS SELECT origin_city_name, dest_city_name, fl_date, to_utc_timestamp(fl_date, 'America/Los_Angeles') as fl_time, arr_delay, fl_num -FROM flights_tiny; +FROM flights_tiny_n1; SELECT * FROM flights_tiny_orc; @@ -130,7 +130,7 @@ select fl_time, count(*) from flights_tiny_orc_partitioned_timestamp group by fl -- test for Parquet file format CREATE TABLE flights_tiny_parquet STORED AS PARQUET AS SELECT origin_city_name, dest_city_name, fl_date, to_utc_timestamp(fl_date, 'America/Los_Angeles') as fl_time, arr_delay, fl_num -FROM flights_tiny; +FROM flights_tiny_n1; SELECT * FROM flights_tiny_parquet; diff --git a/ql/src/test/queries/clientpositive/vector_reduce1.q b/ql/src/test/queries/clientpositive/vector_reduce1.q index ce9049156f..e5dd59559c 100644 --- a/ql/src/test/queries/clientpositive/vector_reduce1.q +++ b/ql/src/test/queries/clientpositive/vector_reduce1.q @@ -6,7 +6,7 @@ set hive.fetch.task.conversion=none; -- SORT_QUERY_RESULTS -create table vectortab2k( +create table vectortab2k_n8( t tinyint, si smallint, i int, @@ -23,9 +23,9 @@ create table vectortab2k( ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n8; -create table vectortab2korc( +create table vectortab2korc_n7( t tinyint, si smallint, i int, @@ -41,9 +41,9 @@ create table vectortab2korc( dt date) STORED AS ORC; -INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; +INSERT INTO TABLE vectortab2korc_n7 SELECT * FROM vectortab2k_n8; explain vectorization expression -select b from vectortab2korc order by b; +select b from vectortab2korc_n7 order by b; -select b from vectortab2korc order by b; +select b from vectortab2korc_n7 order by b; diff --git a/ql/src/test/queries/clientpositive/vector_reduce2.q b/ql/src/test/queries/clientpositive/vector_reduce2.q index 80ad196f18..7f0cd7bbf6 100644 --- a/ql/src/test/queries/clientpositive/vector_reduce2.q +++ b/ql/src/test/queries/clientpositive/vector_reduce2.q @@ -6,7 +6,7 @@ set hive.fetch.task.conversion=none; -- SORT_QUERY_RESULTS -create table vectortab2k( +create table vectortab2k_n5( t tinyint, si smallint, i int, @@ -23,9 +23,9 @@ create table vectortab2k( ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n5; -create table vectortab2korc( +create table vectortab2korc_n5( t tinyint, si smallint, i int, @@ -41,9 +41,9 @@ create table vectortab2korc( dt date) STORED AS ORC; -INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; +INSERT INTO TABLE vectortab2korc_n5 SELECT * FROM vectortab2k_n5; explain vectorization expression -select s, i, s2 from vectortab2korc order by s, i, s2; +select s, i, s2 from vectortab2korc_n5 order by s, i, s2; -select s, i, s2 from vectortab2korc order by s, i, s2; +select s, i, s2 from vectortab2korc_n5 order by s, i, s2; diff --git a/ql/src/test/queries/clientpositive/vector_reduce3.q b/ql/src/test/queries/clientpositive/vector_reduce3.q index e01ed26a7b..8d13b8f32d 100644 --- a/ql/src/test/queries/clientpositive/vector_reduce3.q +++ b/ql/src/test/queries/clientpositive/vector_reduce3.q @@ -6,7 +6,7 @@ set hive.fetch.task.conversion=none; -- SORT_QUERY_RESULTS -create table vectortab2k( +create table vectortab2k_n2( t tinyint, si smallint, i int, @@ -23,9 +23,9 @@ create table vectortab2k( ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n2; -create table vectortab2korc( +create table vectortab2korc_n2( t tinyint, si smallint, i int, @@ -41,9 +41,9 @@ create table vectortab2korc( dt date) STORED AS ORC; -INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; +INSERT INTO TABLE vectortab2korc_n2 SELECT * FROM vectortab2k_n2; explain vectorization expression -select s from vectortab2korc order by s; +select s from vectortab2korc_n2 order by s; -select s from vectortab2korc order by s; +select s from vectortab2korc_n2 order by s; diff --git a/ql/src/test/queries/clientpositive/vector_reduce_groupby_decimal.q b/ql/src/test/queries/clientpositive/vector_reduce_groupby_decimal.q index 7d52874aa4..15c0e4ef91 100644 --- a/ql/src/test/queries/clientpositive/vector_reduce_groupby_decimal.q +++ b/ql/src/test/queries/clientpositive/vector_reduce_groupby_decimal.q @@ -2,19 +2,19 @@ set hive.explain.user=false; set hive.fetch.task.conversion=none; -CREATE TABLE decimal_test STORED AS ORC AS SELECT cint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc +CREATE TABLE decimal_test_n2 STORED AS ORC AS SELECT cint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc WHERE cint is not null and cdouble is not null; SET hive.vectorized.execution.enabled=true; EXPLAIN VECTORIZATION EXPRESSION -SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test +SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test_n2 WHERE cdecimal1 is not null and cdecimal2 is not null GROUP BY cint, cdouble, cdecimal1, cdecimal2 ORDER BY cint, cdouble, cdecimal1, cdecimal2 LIMIT 50; -SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test +SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test_n2 WHERE cdecimal1 is not null and cdecimal2 is not null GROUP BY cint, cdouble, cdecimal1, cdecimal2 ORDER BY cint, cdouble, cdecimal1, cdecimal2 @@ -23,7 +23,7 @@ LIMIT 50; SET hive.vectorized.execution.enabled=false; SELECT sum(hash(*)) - FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test + FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test_n2 WHERE cdecimal1 is not null and cdecimal2 is not null GROUP BY cint, cdouble, cdecimal1, cdecimal2 ORDER BY cint, cdouble, cdecimal1, cdecimal2 @@ -32,7 +32,7 @@ SELECT sum(hash(*)) SET hive.vectorized.execution.enabled=true; SELECT sum(hash(*)) - FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test + FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test_n2 WHERE cdecimal1 is not null and cdecimal2 is not null GROUP BY cint, cdouble, cdecimal1, cdecimal2 ORDER BY cint, cdouble, cdecimal1, cdecimal2 diff --git a/ql/src/test/queries/clientpositive/vector_retry_failure.q b/ql/src/test/queries/clientpositive/vector_retry_failure.q index 448af739b4..a5e44e7ff0 100644 --- a/ql/src/test/queries/clientpositive/vector_retry_failure.q +++ b/ql/src/test/queries/clientpositive/vector_retry_failure.q @@ -1,8 +1,8 @@ --! qt:dataset:src SET hive.vectorized.execution.enabled=true; -create table tx(a int,f string); -insert into tx values (1,'non_existent_file'); +create table tx_n0(a int,f string); +insert into tx_n0 values (1,'non_existent_file'); set zzz=1; set reexec.overlay.zzz=2; @@ -11,5 +11,5 @@ set hive.query.reexecution.enabled=true; set hive.query.reexecution.strategies=overlay; explain vectorization expression -select assert_true(${hiveconf:zzz} > a) from tx group by a; -select assert_true(${hiveconf:zzz} > a) from tx group by a; +select assert_true(${hiveconf:zzz} > a) from tx_n0 group by a; +select assert_true(${hiveconf:zzz} > a) from tx_n0 group by a; diff --git a/ql/src/test/queries/clientpositive/vector_string_concat.q b/ql/src/test/queries/clientpositive/vector_string_concat.q index 87b1ec1909..75da8f1e7c 100644 --- a/ql/src/test/queries/clientpositive/vector_string_concat.q +++ b/ql/src/test/queries/clientpositive/vector_string_concat.q @@ -50,7 +50,7 @@ SELECT s AS `string`, ------------------------------------------------------------------------------------------ -create table vectortab2k( +create table vectortab2k_n0( t tinyint, si smallint, i int, @@ -67,9 +67,9 @@ create table vectortab2k( ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n0; -create table vectortab2korc( +create table vectortab2korc_n0( t tinyint, si smallint, i int, @@ -85,17 +85,17 @@ create table vectortab2korc( dt date) STORED AS ORC; -INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; +INSERT INTO TABLE vectortab2korc_n0 SELECT * FROM vectortab2k_n0; EXPLAIN VECTORIZATION EXPRESSION SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field` - FROM vectortab2korc + FROM vectortab2korc_n0 GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) ORDER BY `field` LIMIT 50; SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field` - FROM vectortab2korc + FROM vectortab2korc_n0 GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) ORDER BY `field` LIMIT 50; diff --git a/ql/src/test/queries/clientpositive/vector_string_decimal.q b/ql/src/test/queries/clientpositive/vector_string_decimal.q index 186e339b73..933d046c4b 100644 --- a/ql/src/test/queries/clientpositive/vector_string_decimal.q +++ b/ql/src/test/queries/clientpositive/vector_string_decimal.q @@ -2,14 +2,14 @@ set hive.vectorized.execution.enabled=false; set hive.fetch.task.conversion=none; drop table orc_decimal; -drop table staging; +drop table staging_n1; create table orc_decimal (id decimal(18,0)) stored as orc; -create table staging (id decimal(18,0)); +create table staging_n1 (id decimal(18,0)); -insert into staging values (34324.0), (100000000.0), (200000000.0), (300000000.0); +insert into staging_n1 values (34324.0), (100000000.0), (200000000.0), (300000000.0); -insert overwrite table orc_decimal select id from staging; +insert overwrite table orc_decimal select id from staging_n1; set hive.vectorized.execution.enabled=true; @@ -18,4 +18,4 @@ select * from orc_decimal where id in ('100000000', '200000000'); select * from orc_decimal where id in ('100000000', '200000000'); drop table orc_decimal; -drop table staging; +drop table staging_n1; diff --git a/ql/src/test/queries/clientpositive/vector_struct_in.q b/ql/src/test/queries/clientpositive/vector_struct_in.q index 207be3733c..49a9374bd1 100644 --- a/ql/src/test/queries/clientpositive/vector_struct_in.q +++ b/ql/src/test/queries/clientpositive/vector_struct_in.q @@ -8,12 +8,12 @@ set hive.fetch.task.conversion=none; -- SORT_QUERY_RESULTS -- 2 Strings -create table test_1 (`id` string, `lineid` string) stored as orc; +create table test_1_n1 (`id` string, `lineid` string) stored as orc; -insert into table test_1 values ('one','1'), ('seven','1'); +insert into table test_1_n1 values ('one','1'), ('seven','1'); explain vectorization expression -select * from test_1 where struct(`id`, `lineid`) +select * from test_1_n1 where struct(`id`, `lineid`) IN ( struct('two','3'), struct('three','1'), @@ -26,7 +26,7 @@ struct('nine','1'), struct('ten','1') ); -select * from test_1 where struct(`id`, `lineid`) +select * from test_1_n1 where struct(`id`, `lineid`) IN ( struct('two','3'), struct('three','1'), @@ -51,7 +51,7 @@ struct('eight','1'), struct('seven','1'), struct('nine','1'), struct('ten','1') -) as b from test_1 ; +) as b from test_1_n1 ; select `id`, `lineid`, struct(`id`, `lineid`) IN ( @@ -64,16 +64,16 @@ struct('eight','1'), struct('seven','1'), struct('nine','1'), struct('ten','1') -) as b from test_1 ; +) as b from test_1_n1 ; -- 2 Integers -create table test_2 (`id` int, `lineid` int) stored as orc; +create table test_2_n1 (`id` int, `lineid` int) stored as orc; -insert into table test_2 values (1,1), (7,1); +insert into table test_2_n1 values (1,1), (7,1); explain vectorization expression -select * from test_2 where struct(`id`, `lineid`) +select * from test_2_n1 where struct(`id`, `lineid`) IN ( struct(2,3), struct(3,1), @@ -86,7 +86,7 @@ struct(9,1), struct(10,1) ); -select * from test_2 where struct(`id`, `lineid`) +select * from test_2_n1 where struct(`id`, `lineid`) IN ( struct(2,3), struct(3,1), @@ -111,7 +111,7 @@ struct(8,1), struct(7,1), struct(9,1), struct(10,1) -) as b from test_2; +) as b from test_2_n1; select `id`, `lineid`, struct(`id`, `lineid`) IN ( @@ -124,7 +124,7 @@ struct(8,1), struct(7,1), struct(9,1), struct(10,1) -) as b from test_2; +) as b from test_2_n1; -- 1 String and 1 Integer create table test_3 (`id` string, `lineid` int) stored as orc; diff --git a/ql/src/test/queries/clientpositive/vector_tablesample_rows.q b/ql/src/test/queries/clientpositive/vector_tablesample_rows.q index adc67b168b..24bd31ed48 100644 --- a/ql/src/test/queries/clientpositive/vector_tablesample_rows.q +++ b/ql/src/test/queries/clientpositive/vector_tablesample_rows.q @@ -12,18 +12,18 @@ select 'key1', 'value1' from alltypesorc tablesample (1 rows); select 'key1', 'value1' from alltypesorc tablesample (1 rows); -create table decimal_2 (t decimal(18,9)) stored as orc; +create table decimal_2_n0 (t decimal(18,9)) stored as orc; explain vectorization detail -insert overwrite table decimal_2 +insert overwrite table decimal_2_n0 select cast('17.29' as decimal(4,2)) from alltypesorc tablesample (1 rows); -insert overwrite table decimal_2 +insert overwrite table decimal_2_n0 select cast('17.29' as decimal(4,2)) from alltypesorc tablesample (1 rows); -select count(*) from decimal_2; +select count(*) from decimal_2_n0; -drop table decimal_2; +drop table decimal_2_n0; -- Dummy tables HIVE-13190 diff --git a/ql/src/test/queries/clientpositive/vector_udf1.q b/ql/src/test/queries/clientpositive/vector_udf1.q index 1474f6ced2..a258025dc1 100644 --- a/ql/src/test/queries/clientpositive/vector_udf1.q +++ b/ql/src/test/queries/clientpositive/vector_udf1.q @@ -2,11 +2,11 @@ SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -drop table varchar_udf_1; +drop table varchar_udf_1_n2; -create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20), +create table varchar_udf_1_n2 (c1 string, c2 string, c3 varchar(10), c4 varchar(20), d1 string, d2 string, d3 varchar(10), d4 varchar(10)) STORED AS ORC; -insert overwrite table varchar_udf_1 +insert overwrite table varchar_udf_1_n2 select key, value, key, value, '2015-01-14', '2015-01-14', '2017-01-11', '2017-01-11' from src where key = '238' limit 1; -- UDFs with varchar support @@ -15,39 +15,39 @@ select concat(c1, c2), concat(c3, c4), concat(c1, c2) = concat(c3, c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select concat(c1, c2), concat(c3, c4), concat(c1, c2) = concat(c3, c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select upper(c2), upper(c4), upper(c2) = upper(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select upper(c2), upper(c4), upper(c2) = upper(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select lower(c2), lower(c4), lower(c2) = lower(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select lower(c2), lower(c4), lower(c2) = lower(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; -- Scalar UDFs explain vectorization detail @@ -55,293 +55,293 @@ select ascii(c2), ascii(c4), ascii(c2) = ascii(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select ascii(c2), ascii(c4), ascii(c2) = ascii(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select concat_ws('|', c1, c2), concat_ws('|', c3, c4), concat_ws('|', c1, c2) = concat_ws('|', c3, c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select concat_ws('|', c1, c2), concat_ws('|', c3, c4), concat_ws('|', c1, c2) = concat_ws('|', c3, c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select decode(encode(c2, 'US-ASCII'), 'US-ASCII'), decode(encode(c4, 'US-ASCII'), 'US-ASCII'), decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select decode(encode(c2, 'US-ASCII'), 'US-ASCII'), decode(encode(c4, 'US-ASCII'), 'US-ASCII'), decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select instr(c2, '_'), instr(c4, '_'), instr(c2, '_') = instr(c4, '_') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select instr(c2, '_'), instr(c4, '_'), instr(c2, '_') = instr(c4, '_') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select replace(c1, '_', c2), replace(c3, '_', c4), replace(c1, '_', c2) = replace(c3, '_', c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select replace(c1, '_', c2), replace(c3, '_', c4), replace(c1, '_', c2) = replace(c3, '_', c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select reverse(c2), reverse(c4), reverse(c2) = reverse(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select reverse(c2), reverse(c4), reverse(c2) = reverse(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select next_day(d1, 'TU'), next_day(d4, 'WE'), next_day(d1, 'TU') = next_day(d4, 'WE') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select next_day(d1, 'TU'), next_day(d4, 'WE'), next_day(d1, 'TU') = next_day(d4, 'WE') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select months_between(d1, d3), months_between(d2, d4), months_between(d1, d3) = months_between(d2, d4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select months_between(d1, d3), months_between(d2, d4), months_between(d1, d3) = months_between(d2, d4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select length(c2), length(c4), length(c2) = length(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select length(c2), length(c4), length(c2) = length(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select locate('a', 'abcdabcd', 3), locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3), locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select locate('a', 'abcdabcd', 3), locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3), locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select lpad(c2, 15, ' '), lpad(c4, 15, ' '), lpad(c2, 15, ' ') = lpad(c4, 15, ' ') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select lpad(c2, 15, ' '), lpad(c4, 15, ' '), lpad(c2, 15, ' ') = lpad(c4, 15, ' ') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select ltrim(c2), ltrim(c4), ltrim(c2) = ltrim(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select ltrim(c2), ltrim(c4), ltrim(c2) = ltrim(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select c2 regexp 'val', c4 regexp 'val', (c2 regexp 'val') = (c4 regexp 'val') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select c2 regexp 'val', c4 regexp 'val', (c2 regexp 'val') = (c4 regexp 'val') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select regexp_extract(c2, 'val_([0-9]+)', 1), regexp_extract(c4, 'val_([0-9]+)', 1), regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select regexp_extract(c2, 'val_([0-9]+)', 1), regexp_extract(c4, 'val_([0-9]+)', 1), regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select regexp_replace(c2, 'val', 'replaced'), regexp_replace(c4, 'val', 'replaced'), regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select regexp_replace(c2, 'val', 'replaced'), regexp_replace(c4, 'val', 'replaced'), regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select reverse(c2), reverse(c4), reverse(c2) = reverse(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select reverse(c2), reverse(c4), reverse(c2) = reverse(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select rpad(c2, 15, ' '), rpad(c4, 15, ' '), rpad(c2, 15, ' ') = rpad(c4, 15, ' ') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select rpad(c2, 15, ' '), rpad(c4, 15, ' '), rpad(c2, 15, ' ') = rpad(c4, 15, ' ') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select rtrim(c2), rtrim(c4), rtrim(c2) = rtrim(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select rtrim(c2), rtrim(c4), rtrim(c2) = rtrim(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select sentences('See spot run. See jane run.'), sentences(cast('See spot run. See jane run.' as varchar(50))) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select sentences('See spot run. See jane run.'), sentences(cast('See spot run. See jane run.' as varchar(50))) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select split(c2, '_'), split(c4, '_') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select split(c2, '_'), split(c4, '_') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select str_to_map('a:1,b:2,c:3',',',':'), str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select str_to_map('a:1,b:2,c:3',',',':'), str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':') -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select substr(c2, 1, 3), substr(c4, 1, 3), substr(c2, 1, 3) = substr(c4, 1, 3) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select substr(c2, 1, 3), substr(c4, 1, 3), substr(c2, 1, 3) = substr(c4, 1, 3) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; explain vectorization detail select trim(c2), trim(c4), trim(c2) = trim(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; select trim(c2), trim(c4), trim(c2) = trim(c4) -from varchar_udf_1 limit 1; +from varchar_udf_1_n2 limit 1; -- Aggregate Functions @@ -349,33 +349,33 @@ explain vectorization detail select compute_stats(c2, 16), compute_stats(c4, 16) -from varchar_udf_1; +from varchar_udf_1_n2; select compute_stats(c2, 'fm', 16), compute_stats(c4, 'fm', 16) -from varchar_udf_1; +from varchar_udf_1_n2; explain vectorization detail select min(c2), min(c4) -from varchar_udf_1; +from varchar_udf_1_n2; select min(c2), min(c4) -from varchar_udf_1; +from varchar_udf_1_n2; explain vectorization detail select max(c2), max(c4) -from varchar_udf_1; +from varchar_udf_1_n2; select max(c2), max(c4) -from varchar_udf_1; +from varchar_udf_1_n2; -drop table varchar_udf_1; +drop table varchar_udf_1_n2; diff --git a/ql/src/test/queries/clientpositive/vector_udf_character_length.q b/ql/src/test/queries/clientpositive/vector_udf_character_length.q index e93a4d01e2..2d5b7a351a 100644 --- a/ql/src/test/queries/clientpositive/vector_udf_character_length.q +++ b/ql/src/test/queries/clientpositive/vector_udf_character_length.q @@ -9,25 +9,25 @@ DESCRIBE FUNCTION EXTENDED character_length; DESCRIBE FUNCTION char_length; DESCRIBE FUNCTION EXTENDED char_length; -CREATE TABLE dest1(len INT); -EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT character_length(src1.value); -FROM src1 INSERT OVERWRITE TABLE dest1 SELECT character_length(src1.value); +CREATE TABLE dest1_n59(len INT); +EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n59 SELECT character_length(src1.value); +FROM src1 INSERT OVERWRITE TABLE dest1_n59 SELECT character_length(src1.value); -- SORT_BEFORE_DIFF -SELECT dest1.* FROM dest1; -DROP TABLE dest1; +SELECT dest1_n59.* FROM dest1_n59; +DROP TABLE dest1_n59; -- Test with non-ascii characters. -CREATE TABLE dest1(name STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1; -INSERT INTO dest1 VALUES(NULL); -CREATE TABLE dest2 STORED AS ORC AS SELECT * FROM dest1; +CREATE TABLE dest1_n59(name STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n59; +INSERT INTO dest1_n59 VALUES(NULL); +CREATE TABLE dest2_n13 STORED AS ORC AS SELECT * FROM dest1_n59; -EXPLAIN SELECT character_length(dest2.name) FROM dest2; +EXPLAIN SELECT character_length(dest2_n13.name) FROM dest2_n13; -- SORT_BEFORE_DIFF -SELECT character_length(dest2.name) FROM dest2; +SELECT character_length(dest2_n13.name) FROM dest2_n13; -EXPLAIN SELECT char_length(dest2.name) FROM dest2; +EXPLAIN SELECT char_length(dest2_n13.name) FROM dest2_n13; -- SORT_BEFORE_DIFF -SELECT char_length(dest2.name) FROM dest2; -DROP TABLE dest1; -DROP TABLE dest2; +SELECT char_length(dest2_n13.name) FROM dest2_n13; +DROP TABLE dest1_n59; +DROP TABLE dest2_n13; diff --git a/ql/src/test/queries/clientpositive/vector_udf_octet_length.q b/ql/src/test/queries/clientpositive/vector_udf_octet_length.q index eea1b3801c..7c67291272 100644 --- a/ql/src/test/queries/clientpositive/vector_udf_octet_length.q +++ b/ql/src/test/queries/clientpositive/vector_udf_octet_length.q @@ -6,20 +6,20 @@ set hive.fetch.task.conversion=none; DESCRIBE FUNCTION octet_length; DESCRIBE FUNCTION EXTENDED octet_length; -CREATE TABLE dest1(len INT); -EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT octet_length(src1.value); -FROM src1 INSERT OVERWRITE TABLE dest1 SELECT octet_length(src1.value); +CREATE TABLE dest1_n51(len INT); +EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n51 SELECT octet_length(src1.value); +FROM src1 INSERT OVERWRITE TABLE dest1_n51 SELECT octet_length(src1.value); -- SORT_BEFORE_DIFF -SELECT dest1.* FROM dest1; -DROP TABLE dest1; +SELECT dest1_n51.* FROM dest1_n51; +DROP TABLE dest1_n51; -- Test with non-ascii characters. -CREATE TABLE dest1(name STRING) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1; -INSERT INTO dest1 VALUES(NULL); -CREATE TABLE dest2 STORED AS ORC AS SELECT * FROM dest1; -EXPLAIN SELECT octet_length(dest2.name) FROM dest2; +CREATE TABLE dest1_n51(name STRING) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n51; +INSERT INTO dest1_n51 VALUES(NULL); +CREATE TABLE dest2_n10 STORED AS ORC AS SELECT * FROM dest1_n51; +EXPLAIN SELECT octet_length(dest2_n10.name) FROM dest2_n10; -- SORT_BEFORE_DIFF -SELECT octet_length(dest2.name) FROM dest2; -DROP TABLE dest1; -DROP TABLE dest2; +SELECT octet_length(dest2_n10.name) FROM dest2_n10; +DROP TABLE dest1_n51; +DROP TABLE dest2_n10; diff --git a/ql/src/test/queries/clientpositive/vector_udf_string_to_boolean.q b/ql/src/test/queries/clientpositive/vector_udf_string_to_boolean.q index 5c052a1d12..57b25d969b 100644 --- a/ql/src/test/queries/clientpositive/vector_udf_string_to_boolean.q +++ b/ql/src/test/queries/clientpositive/vector_udf_string_to_boolean.q @@ -1,23 +1,23 @@ -set hive.mapred.mode=nonstrict; +set_n17 hive.mapred.mode=nonstrict_n17; SET hive.vectorized.execution.enabled = false; -SET hive.int.timestamp.conversion.in.seconds=false; -set hive.fetch.task.conversion=none; +SET hive.int_n17.timestamp.conversion.in.seconds=false; +set_n17 hive.fetch.task.conversion=none; -create table t (s string) stored as orc; +create table t_n17 (s string) stored as orc; -insert into t values ('false'); -insert into t values ('FALSE'); -insert into t values ('FaLsE'); -insert into t values ('true'); -insert into t values ('TRUE'); -insert into t values ('TrUe'); -insert into t values (''); -insert into t values ('Other'); -insert into t values ('Off'); -insert into t values ('No'); -insert into t values ('0'); -insert into t values ('1'); +insert_n17 into t_n17 values ('false'); +insert_n17 into t_n17 values ('FALSE'); +insert_n17 into t_n17 values ('FaLsE'); +insert_n17 into t_n17 values ('true'); +insert_n17 into t_n17 values ('TRUE'); +insert_n17 into t_n17 values ('TrUe'); +insert_n17 into t_n17 values (''); +insert_n17 into t_n17 values ('Other'); +insert_n17 into t_n17 values ('Off'); +insert_n17 into t_n17 values ('No'); +insert_n17 into t_n17 values ('0'); +insert_n17 into t_n17 values ('1'); -explain select s,cast(s as boolean) from t order by s; +explain select_n17 s,cast_n17(s as boolean) from t_n17 order by s; -select s,cast(s as boolean) from t order by s; +select_n17 s,cast_n17(s as boolean) from t_n17 order by s; diff --git a/ql/src/test/queries/clientpositive/vector_varchar_4.q b/ql/src/test/queries/clientpositive/vector_varchar_4.q index b3402d0df2..5ad0ff7559 100644 --- a/ql/src/test/queries/clientpositive/vector_varchar_4.q +++ b/ql/src/test/queries/clientpositive/vector_varchar_4.q @@ -3,10 +3,10 @@ set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -drop table if exists vectortab2k; -drop table if exists vectortab2korc; +drop table if exists vectortab2k_n1; +drop table if exists vectortab2korc_n1; -create table vectortab2k( +create table vectortab2k_n1( t tinyint, si smallint, i int, @@ -23,9 +23,9 @@ create table vectortab2k( ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n1; -create table vectortab2korc( +create table vectortab2korc_n1( t tinyint, si smallint, i int, @@ -41,14 +41,14 @@ create table vectortab2korc( dt date) STORED AS ORC; -INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; +INSERT INTO TABLE vectortab2korc_n1 SELECT * FROM vectortab2k_n1; drop table if exists varchar_lazy_binary_columnar; create table varchar_lazy_binary_columnar(vt varchar(10), vsi varchar(10), vi varchar(20), vb varchar(30), vf varchar(20),vd varchar(20),vs varchar(50)) row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile; explain vectorization expression -insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc; +insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc_n1; --- insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc; +-- insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc_n1; -- select count(*) as cnt from varchar_lazy_binary_columnar group by vs order by cnt asc; diff --git a/ql/src/test/queries/clientpositive/vector_varchar_simple.q b/ql/src/test/queries/clientpositive/vector_varchar_simple.q index f846490ae5..971e097ed7 100644 --- a/ql/src/test/queries/clientpositive/vector_varchar_simple.q +++ b/ql/src/test/queries/clientpositive/vector_varchar_simple.q @@ -5,14 +5,14 @@ set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -drop table varchar_2; +drop table varchar_2_n0; -create table varchar_2 ( +create table varchar_2_n0 ( key varchar(10), value varchar(20) ) stored as orc; -insert overwrite table varchar_2 select * from src; +insert overwrite table varchar_2_n0 select * from src; select key, value from src @@ -20,13 +20,13 @@ order by key asc limit 5; explain vectorization select key, value -from varchar_2 +from varchar_2_n0 order by key asc limit 5; -- should match the query from src select key, value -from varchar_2 +from varchar_2_n0 order by key asc limit 5; @@ -36,17 +36,17 @@ order by key desc limit 5; explain vectorization select key, value -from varchar_2 +from varchar_2_n0 order by key desc limit 5; -- should match the query from src select key, value -from varchar_2 +from varchar_2_n0 order by key desc limit 5; -drop table varchar_2; +drop table varchar_2_n0; -- Implicit conversion. Occurs in reduce-side under Tez. create table varchar_3 ( diff --git a/ql/src/test/queries/clientpositive/vector_when_case_null.q b/ql/src/test/queries/clientpositive/vector_when_case_null.q index 4acd6dc6c6..584170387e 100644 --- a/ql/src/test/queries/clientpositive/vector_when_case_null.q +++ b/ql/src/test/queries/clientpositive/vector_when_case_null.q @@ -5,10 +5,10 @@ set hive.fetch.task.conversion=none; -- SORT_QUERY_RESULTS -create table count_case_groupby (key string, bool boolean) STORED AS orc; -insert into table count_case_groupby values ('key1', true),('key2', false),('key3', NULL),('key4', false),('key5',NULL); +create table count_case_groupby_n0 (key string, bool boolean) STORED AS orc; +insert into table count_case_groupby_n0 values ('key1', true),('key2', false),('key3', NULL),('key4', false),('key5',NULL); explain vectorization expression -SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key; +SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby_n0 GROUP BY key; -SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key; \ No newline at end of file +SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby_n0 GROUP BY key; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_windowing.q b/ql/src/test/queries/clientpositive/vector_windowing.q index f288418f83..6f6029c407 100644 --- a/ql/src/test/queries/clientpositive/vector_windowing.q +++ b/ql/src/test/queries/clientpositive/vector_windowing.q @@ -375,12 +375,12 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi -- 22. testViewAsTableInputWithWindowing explain vectorization detail -create view IF NOT EXISTS mfgr_price_view as +create view IF NOT EXISTS mfgr_price_view_n2 as select p_mfgr, p_brand, round(sum(p_retailprice),2) as s from part group by p_mfgr, p_brand; -create view IF NOT EXISTS mfgr_price_view as +create view IF NOT EXISTS mfgr_price_view_n2 as select p_mfgr, p_brand, round(sum(p_retailprice),2) as s from part @@ -391,7 +391,7 @@ select * from ( select p_mfgr, p_brand, s, round(sum(s) over w1 , 2) as s1 -from mfgr_price_view +from mfgr_price_view_n2 window w1 as (distribute by p_mfgr sort by p_mfgr ) ) sq order by p_mfgr, p_brand; @@ -399,32 +399,32 @@ select * from ( select p_mfgr, p_brand, s, round(sum(s) over w1 , 2) as s1 -from mfgr_price_view +from mfgr_price_view_n2 window w1 as (distribute by p_mfgr sort by p_mfgr ) ) sq order by p_mfgr, p_brand; select p_mfgr, p_brand, s, round(sum(s) over w1 ,2) as s1 -from mfgr_price_view +from mfgr_price_view_n2 window w1 as (distribute by p_mfgr sort by p_brand rows between 2 preceding and current row); -- 23. testCreateViewWithWindowingQuery explain vectorization detail -create view IF NOT EXISTS mfgr_brand_price_view as +create view IF NOT EXISTS mfgr_brand_price_view_n0 as select p_mfgr, p_brand, round(sum(p_retailprice) over w1,2) as s from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and current row); -create view IF NOT EXISTS mfgr_brand_price_view as +create view IF NOT EXISTS mfgr_brand_price_view_n0 as select p_mfgr, p_brand, round(sum(p_retailprice) over w1,2) as s from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and current row); explain vectorization detail -select * from mfgr_brand_price_view; -select * from mfgr_brand_price_view; +select * from mfgr_brand_price_view_n0; +select * from mfgr_brand_price_view_n0; -- 24. testLateralViews explain vectorization detail @@ -440,7 +440,7 @@ lateral view explode(arr) part_lv as lv_col window w1 as (distribute by p_mfgr sort by p_size, lv_col rows between 2 preceding and current row); -- 25. testMultipleInserts3SWQs -CREATE TABLE part_1( +CREATE TABLE part_1_n0( p_mfgr STRING, p_name STRING, p_size INT, @@ -448,7 +448,7 @@ r INT, dr INT, s DOUBLE); -CREATE TABLE part_2( +CREATE TABLE part_2_n0( p_mfgr STRING, p_name STRING, p_size INT, @@ -458,7 +458,7 @@ cud INT, s2 DOUBLE, fv1 INT); -CREATE TABLE part_3( +CREATE TABLE part_3_n0( p_mfgr STRING, p_name STRING, p_size INT, @@ -468,12 +468,12 @@ fv INT); explain vectorization detail from part -INSERT OVERWRITE TABLE part_1 +INSERT OVERWRITE TABLE part_1_n0 select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name ) as r, dense_rank() over(distribute by p_mfgr sort by p_name ) as dr, round(sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row),2) as s -INSERT OVERWRITE TABLE part_2 +INSERT OVERWRITE TABLE part_2_n0 select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, @@ -481,19 +481,19 @@ cume_dist() over(distribute by p_mfgr sort by p_name) as cud, round(sum(p_size) over (distribute by p_mfgr sort by p_size range between 5 preceding and current row),1) as s2, first_value(p_size) over w1 as fv1 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following) -INSERT OVERWRITE TABLE part_3 +INSERT OVERWRITE TABLE part_3_n0 select p_mfgr,p_name, p_size, count(*) over(distribute by p_mfgr sort by p_name) as c, count(p_size) over(distribute by p_mfgr sort by p_name) as ca, first_value(p_size) over w1 as fv window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following); from part -INSERT OVERWRITE TABLE part_1 +INSERT OVERWRITE TABLE part_1_n0 select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name ) as r, dense_rank() over(distribute by p_mfgr sort by p_name ) as dr, round(sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row),2) as s -INSERT OVERWRITE TABLE part_2 +INSERT OVERWRITE TABLE part_2_n0 select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, @@ -501,18 +501,18 @@ cume_dist() over(distribute by p_mfgr sort by p_name) as cud, round(sum(p_size) over (distribute by p_mfgr sort by p_size range between 5 preceding and current row),1) as s2, first_value(p_size) over w1 as fv1 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following) -INSERT OVERWRITE TABLE part_3 +INSERT OVERWRITE TABLE part_3_n0 select p_mfgr,p_name, p_size, count(*) over(distribute by p_mfgr sort by p_name) as c, count(p_size) over(distribute by p_mfgr sort by p_name) as ca, first_value(p_size) over w1 as fv window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following); -select * from part_1; +select * from part_1_n0; -select * from part_2; +select * from part_2_n0; -select * from part_3; +select * from part_3_n0; -- 26. testGroupByHavingWithSWQAndAlias explain vectorization detail diff --git a/ql/src/test/queries/clientpositive/vector_windowing_expressions.q b/ql/src/test/queries/clientpositive/vector_windowing_expressions.q index 3ee878735f..90ef4f409b 100644 --- a/ql/src/test/queries/clientpositive/vector_windowing_expressions.q +++ b/ql/src/test/queries/clientpositive/vector_windowing_expressions.q @@ -6,9 +6,9 @@ SET hive.vectorized.execution.reduce.enabled=true; set hive.vectorized.execution.ptf.enabled=true; set hive.fetch.task.conversion=none; -drop table over10k; +drop table over10k_n3; -create table over10k( +create table over10k_n3( t tinyint, si smallint, i int, @@ -23,7 +23,7 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n3; explain vectorization detail select p_mfgr, p_retailprice, p_size, @@ -54,17 +54,17 @@ from part ; explain vectorization detail -select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k limit 100; -select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k limit 100; +select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k_n3 limit 100; +select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k_n3 limit 100; explain vectorization detail -select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k limit 100; -select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k limit 100; +select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k_n3 limit 100; +select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k_n3 limit 100; explain vectorization detail -select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k limit 100; -select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k limit 100; +select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k_n3 limit 100; +select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k_n3 limit 100; explain vectorization detail -select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k limit 100; -select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k limit 100; +select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k_n3 limit 100; +select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k_n3 limit 100; explain vectorization detail select p_mfgr, avg(p_retailprice) over(partition by p_mfgr, p_type order by p_mfgr) from part; @@ -75,13 +75,13 @@ select p_mfgr, avg(p_retailprice) over(partition by p_mfgr order by p_type,p_mfg select p_mfgr, avg(p_retailprice) over(partition by p_mfgr order by p_type,p_mfgr rows between unbounded preceding and current row) from part; -- multi table insert test -create table t1 (a1 int, b1 string); -create table t2 (a1 int, b1 string); +create table t1_n23 (a1 int, b1 string); +create table t2_n15 (a1 int, b1 string); explain vectorization detail -from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1 select * insert overwrite table t2 select * ; -from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1 select * insert overwrite table t2 select * ; -select * from t1 limit 3; -select * from t2 limit 3; +from (select sum(i) over (partition by ts order by i), s from over10k_n3) tt insert overwrite table t1_n23 select * insert overwrite table t2_n15 select * ; +from (select sum(i) over (partition by ts order by i), s from over10k_n3) tt insert overwrite table t1_n23 select * insert overwrite table t2_n15 select * ; +select * from t1_n23 limit 3; +select * from t2_n15 limit 3; explain vectorization detail select p_mfgr, p_retailprice, p_size, @@ -120,9 +120,9 @@ select p_mfgr, avg(p_retailprice) over(partition by p_mfgr, p_type order by p_mf select p_mfgr, avg(p_retailprice) over(partition by p_mfgr order by p_type,p_mfgr rows between unbounded preceding and current row) from part; -from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1 select * insert overwrite table t2 select * ; -select * from t1 limit 3; -select * from t2 limit 3; +from (select sum(i) over (partition by ts order by i), s from over10k_n3) tt insert overwrite table t1_n23 select * insert overwrite table t2_n15 select * ; +select * from t1_n23 limit 3; +select * from t2_n15 limit 3; select p_mfgr, p_retailprice, p_size, round(sum(p_retailprice) over w1 , 2) + 50.0 = round(sum(lag(p_retailprice,1,50.0)) over w1 + (last_value(p_retailprice) over w1),2) diff --git a/ql/src/test/queries/clientpositive/vector_windowing_multipartitioning.q b/ql/src/test/queries/clientpositive/vector_windowing_multipartitioning.q index cdd6e03561..acd9d397d3 100644 --- a/ql/src/test/queries/clientpositive/vector_windowing_multipartitioning.q +++ b/ql/src/test/queries/clientpositive/vector_windowing_multipartitioning.q @@ -5,9 +5,9 @@ SET hive.vectorized.execution.reduce.enabled=true; set hive.vectorized.execution.ptf.enabled=true; set hive.fetch.task.conversion=none; -drop table over10k; +drop table over10k_n6; -create table over10k( +create table over10k_n6( t tinyint, si smallint, i int, @@ -22,42 +22,42 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n6; explain vectorization detail -select s, rank() over (partition by s order by si), sum(b) over (partition by s order by si) from over10k; -select s, rank() over (partition by s order by si), sum(b) over (partition by s order by si) from over10k; +select s, rank() over (partition by s order by si), sum(b) over (partition by s order by si) from over10k_n6; +select s, rank() over (partition by s order by si), sum(b) over (partition by s order by si) from over10k_n6; explain vectorization detail select s, rank() over (partition by s order by `dec` desc), sum(b) over (partition by s order by ts desc) -from over10k +from over10k_n6 where s = 'tom allen' or s = 'bob steinbeck'; select s, rank() over (partition by s order by `dec` desc), sum(b) over (partition by s order by ts desc) -from over10k +from over10k_n6 where s = 'tom allen' or s = 'bob steinbeck'; explain vectorization detail -select s, sum(i) over (partition by s), sum(f) over (partition by si) from over10k where s = 'tom allen' or s = 'bob steinbeck' ; -select s, sum(i) over (partition by s), sum(f) over (partition by si) from over10k where s = 'tom allen' or s = 'bob steinbeck' ; +select s, sum(i) over (partition by s), sum(f) over (partition by si) from over10k_n6 where s = 'tom allen' or s = 'bob steinbeck' ; +select s, sum(i) over (partition by s), sum(f) over (partition by si) from over10k_n6 where s = 'tom allen' or s = 'bob steinbeck' ; explain vectorization detail -select s, rank() over (partition by s order by bo), rank() over (partition by si order by bin desc) from over10k +select s, rank() over (partition by s order by bo), rank() over (partition by si order by bin desc) from over10k_n6 where s = 'tom allen' or s = 'bob steinbeck'; -select s, rank() over (partition by s order by bo), rank() over (partition by si order by bin desc) from over10k +select s, rank() over (partition by s order by bo), rank() over (partition by si order by bin desc) from over10k_n6 where s = 'tom allen' or s = 'bob steinbeck'; explain vectorization detail -select s, sum(f) over (partition by i), row_number() over (order by f) from over10k where s = 'tom allen' or s = 'bob steinbeck'; -select s, sum(f) over (partition by i), row_number() over (order by f) from over10k where s = 'tom allen' or s = 'bob steinbeck'; +select s, sum(f) over (partition by i), row_number() over (order by f) from over10k_n6 where s = 'tom allen' or s = 'bob steinbeck'; +select s, sum(f) over (partition by i), row_number() over (order by f) from over10k_n6 where s = 'tom allen' or s = 'bob steinbeck'; explain vectorization detail select s, rank() over w1, rank() over w2 -from over10k +from over10k_n6 where s = 'tom allen' or s = 'bob steinbeck' window w1 as (partition by s order by `dec`), @@ -65,7 +65,7 @@ w2 as (partition by si order by f) ; select s, rank() over w1, rank() over w2 -from over10k +from over10k_n6 where s = 'tom allen' or s = 'bob steinbeck' window w1 as (partition by s order by `dec`), diff --git a/ql/src/test/queries/clientpositive/vector_windowing_navfn.q b/ql/src/test/queries/clientpositive/vector_windowing_navfn.q index e31eae8c79..4547061d1e 100644 --- a/ql/src/test/queries/clientpositive/vector_windowing_navfn.q +++ b/ql/src/test/queries/clientpositive/vector_windowing_navfn.q @@ -6,9 +6,9 @@ SET hive.vectorized.execution.reduce.enabled=true; set hive.vectorized.execution.ptf.enabled=true; set hive.fetch.task.conversion=none; -drop table over10k; +drop table over10k_n7; -create table over10k( +create table over10k_n7( t tinyint, si smallint, i int, @@ -23,7 +23,7 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n7; explain vectorization detail select row_number() over() from src where key = '238'; @@ -31,36 +31,36 @@ select row_number() over() from src where key = '238'; select row_number() over() from src where key = '238'; explain vectorization detail -select s, row_number() over (partition by d order by `dec`) from over10k limit 100; +select s, row_number() over (partition by d order by `dec`) from over10k_n7 limit 100; -select s, row_number() over (partition by d order by `dec`) from over10k limit 100; +select s, row_number() over (partition by d order by `dec`) from over10k_n7 limit 100; explain vectorization detail -select i, lead(s) over (partition by bin order by d,i desc) from over10k limit 100; +select i, lead(s) over (partition by bin order by d,i desc) from over10k_n7 limit 100; -select i, lead(s) over (partition by bin order by d,i desc) from over10k limit 100; +select i, lead(s) over (partition by bin order by d,i desc) from over10k_n7 limit 100; explain vectorization detail -select i, lag(`dec`) over (partition by i order by s,i,`dec`) from over10k limit 100; +select i, lag(`dec`) over (partition by i order by s,i,`dec`) from over10k_n7 limit 100; -select i, lag(`dec`) over (partition by i order by s,i,`dec`) from over10k limit 100; +select i, lag(`dec`) over (partition by i order by s,i,`dec`) from over10k_n7 limit 100; explain vectorization detail -select s, last_value(t) over (partition by d order by f) from over10k limit 100; +select s, last_value(t) over (partition by d order by f) from over10k_n7 limit 100; -select s, last_value(t) over (partition by d order by f) from over10k limit 100; +select s, last_value(t) over (partition by d order by f) from over10k_n7 limit 100; explain vectorization detail -select s, first_value(s) over (partition by bo order by s) from over10k limit 100; +select s, first_value(s) over (partition by bo order by s) from over10k_n7 limit 100; -select s, first_value(s) over (partition by bo order by s) from over10k limit 100; +select s, first_value(s) over (partition by bo order by s) from over10k_n7 limit 100; explain vectorization detail select t, s, i, last_value(i) over (partition by t order by s) -from over10k where (s = 'oscar allen' or s = 'oscar carson') and t = 10; +from over10k_n7 where (s = 'oscar allen' or s = 'oscar carson') and t = 10; -- select t, s, i, last_value(i) over (partition by t order by s) --- from over10k where (s = 'oscar allen' or s = 'oscar carson') and t = 10; +-- from over10k_n7 where (s = 'oscar allen' or s = 'oscar carson') and t = 10; drop table if exists wtest; create table wtest as diff --git a/ql/src/test/queries/clientpositive/vector_windowing_order_null.q b/ql/src/test/queries/clientpositive/vector_windowing_order_null.q index 35d260d8ff..fd4f82b431 100644 --- a/ql/src/test/queries/clientpositive/vector_windowing_order_null.q +++ b/ql/src/test/queries/clientpositive/vector_windowing_order_null.q @@ -5,9 +5,9 @@ SET hive.vectorized.execution.reduce.enabled=true; set hive.vectorized.execution.ptf.enabled=true; set hive.fetch.task.conversion=none; -drop table over10k; +drop table over10k_n21; -create table over10k( +create table over10k_n21( t tinyint, si smallint, i int, @@ -22,37 +22,37 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; -load data local inpath '../../data/files/over4_null' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n21; +load data local inpath '../../data/files/over4_null' into table over10k_n21; explain vectorization detail -select i, s, b, sum(b) over (partition by i order by s nulls last,b rows unbounded preceding) from over10k limit 10; -select i, s, b, sum(b) over (partition by i order by s nulls last,b rows unbounded preceding) from over10k limit 10; +select i, s, b, sum(b) over (partition by i order by s nulls last,b rows unbounded preceding) from over10k_n21 limit 10; +select i, s, b, sum(b) over (partition by i order by s nulls last,b rows unbounded preceding) from over10k_n21 limit 10; explain vectorization detail -select d, s, f, sum(f) over (partition by d order by s,f desc nulls first rows unbounded preceding) from over10k limit 10; -select d, s, f, sum(f) over (partition by d order by s,f desc nulls first rows unbounded preceding) from over10k limit 10; +select d, s, f, sum(f) over (partition by d order by s,f desc nulls first rows unbounded preceding) from over10k_n21 limit 10; +select d, s, f, sum(f) over (partition by d order by s,f desc nulls first rows unbounded preceding) from over10k_n21 limit 10; explain vectorization detail -select ts, s, f, sum(f) over (partition by ts order by f asc nulls first range between current row and unbounded following) from over10k limit 10; -select ts, s, f, sum(f) over (partition by ts order by f asc nulls first range between current row and unbounded following) from over10k limit 10; +select ts, s, f, sum(f) over (partition by ts order by f asc nulls first range between current row and unbounded following) from over10k_n21 limit 10; +select ts, s, f, sum(f) over (partition by ts order by f asc nulls first range between current row and unbounded following) from over10k_n21 limit 10; explain vectorization detail -select t, s, d, avg(d) over (partition by t order by s,d desc nulls first rows between 5 preceding and 5 following) from over10k limit 10; -select t, s, d, avg(d) over (partition by t order by s,d desc nulls first rows between 5 preceding and 5 following) from over10k limit 10; +select t, s, d, avg(d) over (partition by t order by s,d desc nulls first rows between 5 preceding and 5 following) from over10k_n21 limit 10; +select t, s, d, avg(d) over (partition by t order by s,d desc nulls first rows between 5 preceding and 5 following) from over10k_n21 limit 10; explain vectorization detail -select ts, s, sum(i) over(partition by ts order by s nulls last) from over10k limit 10 offset 3; -select ts, s, sum(i) over(partition by ts order by s nulls last) from over10k limit 10 offset 3; +select ts, s, sum(i) over(partition by ts order by s nulls last) from over10k_n21 limit 10 offset 3; +select ts, s, sum(i) over(partition by ts order by s nulls last) from over10k_n21 limit 10 offset 3; explain vectorization detail -select s, i, round(sum(d) over (partition by s order by i desc nulls last) , 3) from over10k limit 5; -select s, i, round(sum(d) over (partition by s order by i desc nulls last) , 3) from over10k limit 5; +select s, i, round(sum(d) over (partition by s order by i desc nulls last) , 3) from over10k_n21 limit 5; +select s, i, round(sum(d) over (partition by s order by i desc nulls last) , 3) from over10k_n21 limit 5; explain vectorization detail -select s, i, round(avg(d) over (partition by s order by i desc nulls last) / 10.0 , 3) from over10k limit 5; -select s, i, round(avg(d) over (partition by s order by i desc nulls last) / 10.0 , 3) from over10k limit 5; +select s, i, round(avg(d) over (partition by s order by i desc nulls last) / 10.0 , 3) from over10k_n21 limit 5; +select s, i, round(avg(d) over (partition by s order by i desc nulls last) / 10.0 , 3) from over10k_n21 limit 5; explain vectorization detail -select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),3) from over10k window w1 as (partition by s order by i nulls last) limit 5; -select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),3) from over10k window w1 as (partition by s order by i nulls last) limit 5; +select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),3) from over10k_n21 window w1 as (partition by s order by i nulls last) limit 5; +select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),3) from over10k_n21 window w1 as (partition by s order by i nulls last) limit 5; diff --git a/ql/src/test/queries/clientpositive/vector_windowing_range_multiorder.q b/ql/src/test/queries/clientpositive/vector_windowing_range_multiorder.q index 694431c176..fe964351e2 100644 --- a/ql/src/test/queries/clientpositive/vector_windowing_range_multiorder.q +++ b/ql/src/test/queries/clientpositive/vector_windowing_range_multiorder.q @@ -4,9 +4,9 @@ SET hive.vectorized.execution.reduce.enabled=true; set hive.vectorized.execution.ptf.enabled=true; set hive.fetch.task.conversion=none; -drop table over10k; +drop table over10k_n5; -create table over10k( +create table over10k_n5( t tinyint, si smallint, i int, @@ -21,48 +21,48 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n5; explain vectorization detail -select first_value(t) over ( partition by si order by i, b ) from over10k limit 100; -select first_value(t) over ( partition by si order by i, b ) from over10k limit 100; +select first_value(t) over ( partition by si order by i, b ) from over10k_n5 limit 100; +select first_value(t) over ( partition by si order by i, b ) from over10k_n5 limit 100; explain vectorization detail -select last_value(i) over (partition by si, bo order by i, f desc range current row) from over10k limit 100; -select last_value(i) over (partition by si, bo order by i, f desc range current row) from over10k limit 100; +select last_value(i) over (partition by si, bo order by i, f desc range current row) from over10k_n5 limit 100; +select last_value(i) over (partition by si, bo order by i, f desc range current row) from over10k_n5 limit 100; explain vectorization detail -select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) from over10k limit 100; -select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) from over10k limit 100; +select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) from over10k_n5 limit 100; +select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) from over10k_n5 limit 100; explain vectorization detail -select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) from over10k; -select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) from over10k; +select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) from over10k_n5; +select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) from over10k_n5; explain vectorization detail -select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100; -select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100; +select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k_n5 limit 100; +select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k_n5 limit 100; explain vectorization detail -select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100; -select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100; +select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k_n5 limit 100; +select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k_n5 limit 100; explain vectorization detail -select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) from over10k limit 100; -select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) from over10k limit 100; +select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) from over10k_n5 limit 100; +select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) from over10k_n5 limit 100; explain vectorization detail -select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) from over10k limit 100; -select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) from over10k limit 100; +select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) from over10k_n5 limit 100; +select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) from over10k_n5 limit 100; explain vectorization detail -select bo, rank() over (partition by i order by bo nulls first, b nulls last range between unbounded preceding and unbounded following) from over10k limit 100; -select bo, rank() over (partition by i order by bo nulls first, b nulls last range between unbounded preceding and unbounded following) from over10k limit 100; +select bo, rank() over (partition by i order by bo nulls first, b nulls last range between unbounded preceding and unbounded following) from over10k_n5 limit 100; +select bo, rank() over (partition by i order by bo nulls first, b nulls last range between unbounded preceding and unbounded following) from over10k_n5 limit 100; explain vectorization detail -select CAST(s as CHAR(12)), rank() over (partition by i order by CAST(s as CHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k limit 100; -select CAST(s as CHAR(12)), rank() over (partition by i order by CAST(s as CHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k limit 100; +select CAST(s as CHAR(12)), rank() over (partition by i order by CAST(s as CHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k_n5 limit 100; +select CAST(s as CHAR(12)), rank() over (partition by i order by CAST(s as CHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k_n5 limit 100; explain vectorization detail -select CAST(s as VARCHAR(12)), rank() over (partition by i order by CAST(s as VARCHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k limit 100; -select CAST(s as VARCHAR(12)), rank() over (partition by i order by CAST(s as VARCHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k limit 100; +select CAST(s as VARCHAR(12)), rank() over (partition by i order by CAST(s as VARCHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k_n5 limit 100; +select CAST(s as VARCHAR(12)), rank() over (partition by i order by CAST(s as VARCHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k_n5 limit 100; diff --git a/ql/src/test/queries/clientpositive/vector_windowing_rank.q b/ql/src/test/queries/clientpositive/vector_windowing_rank.q index 9f36330da3..43293897e5 100644 --- a/ql/src/test/queries/clientpositive/vector_windowing_rank.q +++ b/ql/src/test/queries/clientpositive/vector_windowing_rank.q @@ -4,9 +4,9 @@ SET hive.vectorized.execution.reduce.enabled=true; set hive.vectorized.execution.ptf.enabled=true; set hive.fetch.task.conversion=none; -drop table over10k; +drop table over10k_n0; -create table over10k( +create table over10k_n0( t tinyint, si smallint, i int, @@ -21,23 +21,23 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n0; explain vectorization detail -select s, rank() over (partition by f order by t) from over10k limit 100; -select s, rank() over (partition by f order by t) from over10k limit 100; +select s, rank() over (partition by f order by t) from over10k_n0 limit 100; +select s, rank() over (partition by f order by t) from over10k_n0 limit 100; explain vectorization detail -select s, dense_rank() over (partition by ts order by i,s desc) from over10k limit 100; -select s, dense_rank() over (partition by ts order by i,s desc) from over10k limit 100; +select s, dense_rank() over (partition by ts order by i,s desc) from over10k_n0 limit 100; +select s, dense_rank() over (partition by ts order by i,s desc) from over10k_n0 limit 100; explain vectorization detail -select s, cume_dist() over (partition by bo order by b,s) from over10k limit 100; -select s, cume_dist() over (partition by bo order by b,s) from over10k limit 100; +select s, cume_dist() over (partition by bo order by b,s) from over10k_n0 limit 100; +select s, cume_dist() over (partition by bo order by b,s) from over10k_n0 limit 100; explain vectorization detail -select s, percent_rank() over (partition by `dec` order by f) from over10k limit 100; -select s, percent_rank() over (partition by `dec` order by f) from over10k limit 100; +select s, percent_rank() over (partition by `dec` order by f) from over10k_n0 limit 100; +select s, percent_rank() over (partition by `dec` order by f) from over10k_n0 limit 100; -- If following tests fail, look for the comments in class PTFPPD::process() @@ -48,8 +48,8 @@ from rank() over (partition by ts order by `dec`) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n0 other + join over10k_n0 on (other.b = over10k_n0.b) ) joined ) ranked where rnk = 1 limit 10; @@ -59,8 +59,8 @@ from rank() over (partition by ts order by `dec`) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n0 other + join over10k_n0 on (other.b = over10k_n0.b) ) joined ) ranked where rnk = 1 limit 10; @@ -72,8 +72,8 @@ from rank() over (partition by ts) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n0 other + join over10k_n0 on (other.b = over10k_n0.b) ) joined ) ranked where `dec` = 89.5 limit 10; @@ -83,8 +83,8 @@ from rank() over (partition by ts) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n0 other + join over10k_n0 on (other.b = over10k_n0.b) ) joined ) ranked where `dec` = 89.5 limit 10; @@ -96,8 +96,8 @@ from rank() over (partition by ts order by `dec`) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n0 other + join over10k_n0 on (other.b = over10k_n0.b) where other.t < 10 ) joined ) ranked @@ -108,8 +108,8 @@ from rank() over (partition by ts order by `dec`) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n0 other + join over10k_n0 on (other.b = over10k_n0.b) where other.t < 10 ) joined ) ranked diff --git a/ql/src/test/queries/clientpositive/vector_windowing_streaming.q b/ql/src/test/queries/clientpositive/vector_windowing_streaming.q index e59872ac0f..e1011f9949 100644 --- a/ql/src/test/queries/clientpositive/vector_windowing_streaming.q +++ b/ql/src/test/queries/clientpositive/vector_windowing_streaming.q @@ -6,9 +6,9 @@ SET hive.vectorized.execution.reduce.enabled=true; set hive.vectorized.execution.ptf.enabled=true; set hive.fetch.task.conversion=none; -drop table over10k; +drop table over10k_n8; -create table over10k( +create table over10k_n8( t tinyint, si smallint, i int, @@ -23,7 +23,7 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n8; set hive.limit.pushdown.memory.usage=.8; @@ -46,18 +46,18 @@ select * from ( select p_mfgr, rank() over(partition by p_mfgr order by p_name) r from part) a where r < 2; --- over10k tests +-- over10k_n8 tests explain vectorization detail select * -from (select t, f, rank() over(partition by t order by f) r from over10k) a +from (select t, f, rank() over(partition by t order by f) r from over10k_n8) a where r < 6 and t < 5; select * -from (select t, f, rank() over(partition by t order by f) r from over10k) a +from (select t, f, rank() over(partition by t order by f) r from over10k_n8) a where r < 6 and t < 5; select * -from (select t, f, row_number() over(partition by t order by f) r from over10k) a +from (select t, f, row_number() over(partition by t order by f) r from over10k_n8) a where r < 8 and t < 0; set hive.vectorized.execution.enabled=false; diff --git a/ql/src/test/queries/clientpositive/vector_windowing_windowspec.q b/ql/src/test/queries/clientpositive/vector_windowing_windowspec.q index f2836c6095..a968e0b6ca 100644 --- a/ql/src/test/queries/clientpositive/vector_windowing_windowspec.q +++ b/ql/src/test/queries/clientpositive/vector_windowing_windowspec.q @@ -4,9 +4,9 @@ SET hive.vectorized.execution.reduce.enabled=true; set hive.vectorized.execution.ptf.enabled=true; set hive.fetch.task.conversion=none; -drop table over10k; +drop table over10k_n1; -create table over10k( +create table over10k_n1( t tinyint, si smallint, i int, @@ -21,50 +21,50 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n1; explain vectorization detail -select s, sum(b) over (partition by i order by s,b rows unbounded preceding) from over10k limit 100; -select s, sum(b) over (partition by i order by s,b rows unbounded preceding) from over10k limit 100; +select s, sum(b) over (partition by i order by s,b rows unbounded preceding) from over10k_n1 limit 100; +select s, sum(b) over (partition by i order by s,b rows unbounded preceding) from over10k_n1 limit 100; explain vectorization detail -select s, sum(f) over (partition by d order by s,f rows unbounded preceding) from over10k limit 100; -select s, sum(f) over (partition by d order by s,f rows unbounded preceding) from over10k limit 100; +select s, sum(f) over (partition by d order by s,f rows unbounded preceding) from over10k_n1 limit 100; +select s, sum(f) over (partition by d order by s,f rows unbounded preceding) from over10k_n1 limit 100; explain vectorization detail -select s, sum(f) over (partition by ts order by f range between current row and unbounded following) from over10k limit 100; -select s, sum(f) over (partition by ts order by f range between current row and unbounded following) from over10k limit 100; +select s, sum(f) over (partition by ts order by f range between current row and unbounded following) from over10k_n1 limit 100; +select s, sum(f) over (partition by ts order by f range between current row and unbounded following) from over10k_n1 limit 100; explain vectorization detail -select s, avg(f) over (partition by ts order by s,f rows between current row and 5 following) from over10k limit 100; -select s, avg(f) over (partition by ts order by s,f rows between current row and 5 following) from over10k limit 100; +select s, avg(f) over (partition by ts order by s,f rows between current row and 5 following) from over10k_n1 limit 100; +select s, avg(f) over (partition by ts order by s,f rows between current row and 5 following) from over10k_n1 limit 100; explain vectorization detail -select s, avg(d) over (partition by t order by s,d desc rows between 5 preceding and 5 following) from over10k limit 100; -select s, avg(d) over (partition by t order by s,d desc rows between 5 preceding and 5 following) from over10k limit 100; +select s, avg(d) over (partition by t order by s,d desc rows between 5 preceding and 5 following) from over10k_n1 limit 100; +select s, avg(d) over (partition by t order by s,d desc rows between 5 preceding and 5 following) from over10k_n1 limit 100; explain vectorization detail -select s, sum(i) over(partition by ts order by s) from over10k limit 100; -select s, sum(i) over(partition by ts order by s) from over10k limit 100; +select s, sum(i) over(partition by ts order by s) from over10k_n1 limit 100; +select s, sum(i) over(partition by ts order by s) from over10k_n1 limit 100; explain vectorization detail -select f, sum(f) over (partition by ts order by f range between unbounded preceding and current row) from over10k limit 100; -select f, sum(f) over (partition by ts order by f range between unbounded preceding and current row) from over10k limit 100; +select f, sum(f) over (partition by ts order by f range between unbounded preceding and current row) from over10k_n1 limit 100; +select f, sum(f) over (partition by ts order by f range between unbounded preceding and current row) from over10k_n1 limit 100; explain vectorization detail -select f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k limit 100; -select f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k limit 100; +select f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k_n1 limit 100; +select f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k_n1 limit 100; explain vectorization detail -select s, i, round(avg(d) over (partition by s order by i) / 10.0 , 2) from over10k limit 7; -select s, i, round(avg(d) over (partition by s order by i) / 10.0 , 2) from over10k limit 7; +select s, i, round(avg(d) over (partition by s order by i) / 10.0 , 2) from over10k_n1 limit 7; +select s, i, round(avg(d) over (partition by s order by i) / 10.0 , 2) from over10k_n1 limit 7; explain vectorization detail -select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k window w1 as (partition by s order by i) limit 7; -select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k window w1 as (partition by s order by i) limit 7; +select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k_n1 window w1 as (partition by s order by i) limit 7; +select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k_n1 window w1 as (partition by s order by i) limit 7; set hive.cbo.enable=false; -- HIVE-9228 explain vectorization detail -select s, i from ( select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k window w1 as (partition by s order by i)) X limit 7; -select s, i from ( select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k window w1 as (partition by s order by i)) X limit 7; +select s, i from ( select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k_n1 window w1 as (partition by s order by i)) X limit 7; +select s, i from ( select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k_n1 window w1 as (partition by s order by i)) X limit 7; diff --git a/ql/src/test/queries/clientpositive/vector_windowing_windowspec4.q b/ql/src/test/queries/clientpositive/vector_windowing_windowspec4.q index a787a43e9d..3eeb8390ee 100644 --- a/ql/src/test/queries/clientpositive/vector_windowing_windowspec4.q +++ b/ql/src/test/queries/clientpositive/vector_windowing_windowspec4.q @@ -6,12 +6,12 @@ SET hive.vectorized.execution.reduce.enabled=true; set hive.vectorized.execution.ptf.enabled=true; set hive.fetch.task.conversion=none; -drop table if exists smalltable_windowing; +drop table if exists smalltable_windowing_n0; -create table smalltable_windowing( +create table smalltable_windowing_n0( i int, type string); -insert into smalltable_windowing values(3, 'a'), (1, 'a'), (2, 'a'); +insert into smalltable_windowing_n0 values(3, 'a'), (1, 'a'), (2, 'a'); explain vectorization detail select type, i, @@ -23,7 +23,7 @@ avg(i) over (partition by type order by i rows between 1 preceding and 7 followi sum(i) over (partition by type order by i rows between 1 preceding and 7 following), collect_set(i) over (partition by type order by i rows between 1 preceding and 7 following), count(i) over (partition by type order by i rows between 1 preceding and 7 following) -from smalltable_windowing; +from smalltable_windowing_n0; select type, i, max(i) over (partition by type order by i rows between 1 preceding and 7 following), @@ -34,4 +34,4 @@ avg(i) over (partition by type order by i rows between 1 preceding and 7 followi sum(i) over (partition by type order by i rows between 1 preceding and 7 following), collect_set(i) over (partition by type order by i rows between 1 preceding and 7 following), count(i) over (partition by type order by i rows between 1 preceding and 7 following) -from smalltable_windowing; +from smalltable_windowing_n0; diff --git a/ql/src/test/queries/clientpositive/vectorization_0.q b/ql/src/test/queries/clientpositive/vectorization_0.q index 7a01f9c3cf..543029f17e 100644 --- a/ql/src/test/queries/clientpositive/vectorization_0.q +++ b/ql/src/test/queries/clientpositive/vectorization_0.q @@ -268,12 +268,12 @@ explain extended select count(*),cstring1 from alltypesorc where cstring1='biolo or cstring1='topology' group by cstring1 order by cstring1; -drop table if exists cast_string_to_int_1; -drop table if exists cast_string_to_int_2; +drop table if exists cast_string_to_int_1_n0; +drop table if exists cast_string_to_int_2_n0; -create table cast_string_to_int_1 as select CAST(CAST(key as float) as string),value from srcbucket; -create table cast_string_to_int_2(i int,s string); -insert overwrite table cast_string_to_int_2 select * from cast_string_to_int_1; +create table cast_string_to_int_1_n0 as select CAST(CAST(key as float) as string),value from srcbucket; +create table cast_string_to_int_2_n0(i int,s string); +insert overwrite table cast_string_to_int_2_n0 select * from cast_string_to_int_1_n0; --moving ALL_1 system test here select all key from src; diff --git a/ql/src/test/queries/clientpositive/vectorization_input_format_excludes.q b/ql/src/test/queries/clientpositive/vectorization_input_format_excludes.q index ed0eb56a57..33e0b7a4fa 100644 --- a/ql/src/test/queries/clientpositive/vectorization_input_format_excludes.q +++ b/ql/src/test/queries/clientpositive/vectorization_input_format_excludes.q @@ -7,7 +7,7 @@ set hive.vectorized.execution.reduce.enabled=true; -- SORT_QUERY_RESULTS -create table if not exists alltypes_parquet ( +create table if not exists alltypes_parquet_n0 ( cint int, ctinyint tinyint, csmallint smallint, @@ -15,7 +15,7 @@ create table if not exists alltypes_parquet ( cdouble double, cstring1 string) stored as parquet; -insert overwrite table alltypes_parquet +insert overwrite table alltypes_parquet_n0 select cint, ctinyint, csmallint, @@ -27,12 +27,12 @@ insert overwrite table alltypes_parquet -- test native fileinputformat vectorization explain vectorization select * - from alltypes_parquet + from alltypes_parquet_n0 where cint = 528534767 limit 10; select * - from alltypes_parquet + from alltypes_parquet_n0 where cint = 528534767 limit 10; @@ -42,7 +42,7 @@ explain vectorization select ctinyint, count(cstring1), avg(cfloat), stddev_pop(cdouble) - from alltypes_parquet + from alltypes_parquet_n0 group by ctinyint; select ctinyint, @@ -51,19 +51,19 @@ select ctinyint, count(cstring1), avg(cfloat), stddev_pop(cdouble) - from alltypes_parquet + from alltypes_parquet_n0 group by ctinyint; -- exclude MapredParquetInputFormat from vectorization, this should cause mapwork vectorization to be disabled set hive.vectorized.input.format.excludes=org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat; explain vectorization select * - from alltypes_parquet + from alltypes_parquet_n0 where cint = 528534767 limit 10; select * - from alltypes_parquet + from alltypes_parquet_n0 where cint = 528534767 limit 10; @@ -73,7 +73,7 @@ explain vectorization select ctinyint, count(cstring1), avg(cfloat), stddev_pop(cdouble) - from alltypes_parquet + from alltypes_parquet_n0 group by ctinyint; select ctinyint, @@ -82,7 +82,7 @@ select ctinyint, count(cstring1), avg(cfloat), stddev_pop(cdouble) - from alltypes_parquet + from alltypes_parquet_n0 group by ctinyint; @@ -90,12 +90,12 @@ select ctinyint, set hive.vectorized.input.format.excludes=; explain vectorization select * - from alltypes_parquet + from alltypes_parquet_n0 where cint = 528534767 limit 10; select * - from alltypes_parquet + from alltypes_parquet_n0 where cint = 528534767 limit 10; @@ -105,7 +105,7 @@ explain vectorization select ctinyint, count(cstring1), avg(cfloat), stddev_pop(cdouble) - from alltypes_parquet + from alltypes_parquet_n0 group by ctinyint; select ctinyint, @@ -114,7 +114,7 @@ select ctinyint, count(cstring1), avg(cfloat), stddev_pop(cdouble) - from alltypes_parquet + from alltypes_parquet_n0 group by ctinyint; @@ -124,7 +124,7 @@ set hive.vectorized.use.row.serde.deserialize=false; set hive.vectorized.use.vector.serde.deserialize=false; -create table if not exists alltypes_orc ( +create table if not exists alltypes_orc_n2 ( cint int, ctinyint tinyint, csmallint smallint, @@ -132,7 +132,7 @@ create table if not exists alltypes_orc ( cdouble double, cstring1 string) stored as orc; -insert overwrite table alltypes_orc +insert overwrite table alltypes_orc_n2 select cint, ctinyint, csmallint, @@ -142,12 +142,12 @@ insert overwrite table alltypes_orc from alltypesorc; explain vectorization select * - from alltypes_orc + from alltypes_orc_n2 where cint = 528534767 limit 10; select * - from alltypes_orc + from alltypes_orc_n2 where cint = 528534767 limit 10; @@ -157,7 +157,7 @@ explain vectorization select ctinyint, count(cstring1), avg(cfloat), stddev_pop(cdouble) - from alltypes_orc + from alltypes_orc_n2 group by ctinyint; select ctinyint, @@ -166,7 +166,7 @@ select ctinyint, count(cstring1), avg(cfloat), stddev_pop(cdouble) - from alltypes_orc + from alltypes_orc_n2 group by ctinyint; -- test when input format is excluded row serde is used for vectorization diff --git a/ql/src/test/queries/clientpositive/vectorization_parquet_ppd_decimal.q b/ql/src/test/queries/clientpositive/vectorization_parquet_ppd_decimal.q index 4bc8f2f901..0b0811b055 100644 --- a/ql/src/test/queries/clientpositive/vectorization_parquet_ppd_decimal.q +++ b/ql/src/test/queries/clientpositive/vectorization_parquet_ppd_decimal.q @@ -8,162 +8,162 @@ SET mapred.min.split.size=1000; SET mapred.max.split.size=5000; set hive.llap.cache.allow.synthetic.fileid=true; -create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet; +create table newtypestbl_n1(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet; -insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl; +insert overwrite table newtypestbl_n1 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl; -- decimal data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select * from newtypestbl where d=0.22; +select * from newtypestbl_n1 where d=0.22; set hive.optimize.index.filter=true; -select * from newtypestbl where d=0.22; +select * from newtypestbl_n1 where d=0.22; set hive.optimize.index.filter=false; -select * from newtypestbl where d='0.22'; +select * from newtypestbl_n1 where d='0.22'; set hive.optimize.index.filter=true; -select * from newtypestbl where d='0.22'; +select * from newtypestbl_n1 where d='0.22'; set hive.optimize.index.filter=false; -select * from newtypestbl where d=cast('0.22' as float); +select * from newtypestbl_n1 where d=cast('0.22' as float); set hive.optimize.index.filter=true; -select * from newtypestbl where d=cast('0.22' as float); +select * from newtypestbl_n1 where d=cast('0.22' as float); set hive.optimize.index.filter=false; -select * from newtypestbl where d!=0.22; +select * from newtypestbl_n1 where d!=0.22; set hive.optimize.index.filter=true; -select * from newtypestbl where d!=0.22; +select * from newtypestbl_n1 where d!=0.22; set hive.optimize.index.filter=false; -select * from newtypestbl where d!='0.22'; +select * from newtypestbl_n1 where d!='0.22'; set hive.optimize.index.filter=true; -select * from newtypestbl where d!='0.22'; +select * from newtypestbl_n1 where d!='0.22'; set hive.optimize.index.filter=false; -select * from newtypestbl where d!=cast('0.22' as float); +select * from newtypestbl_n1 where d!=cast('0.22' as float); set hive.optimize.index.filter=true; -select * from newtypestbl where d!=cast('0.22' as float); +select * from newtypestbl_n1 where d!=cast('0.22' as float); set hive.optimize.index.filter=false; -select * from newtypestbl where d<11.22; +select * from newtypestbl_n1 where d<11.22; set hive.optimize.index.filter=true; -select * from newtypestbl where d<11.22; +select * from newtypestbl_n1 where d<11.22; set hive.optimize.index.filter=false; -select * from newtypestbl where d<'11.22'; +select * from newtypestbl_n1 where d<'11.22'; set hive.optimize.index.filter=true; -select * from newtypestbl where d<'11.22'; +select * from newtypestbl_n1 where d<'11.22'; set hive.optimize.index.filter=false; -select * from newtypestbl where d ) STORED AS PARQUET; insert into parquet_project_test -select ctinyint, map("color","red") from parquet_types_staging +select ctinyint, map("color","red") from parquet_types_staging_n0 where ctinyint = 1; insert into parquet_project_test -select ctinyint, map("color","green") from parquet_types_staging +select ctinyint, map("color","green") from parquet_types_staging_n0 where ctinyint = 2; insert into parquet_project_test -select ctinyint, map("color","blue") from parquet_types_staging +select ctinyint, map("color","blue") from parquet_types_staging_n0 where ctinyint = 3; -- no columns in the projection @@ -93,4 +93,4 @@ select count(*) from parquet_nullsplit where len = '99'; drop table parquet_nullsplit; drop table parquet_project_test; -drop table parquet_types_staging; +drop table parquet_types_staging_n0; diff --git a/ql/src/test/queries/clientpositive/vectorization_part.q b/ql/src/test/queries/clientpositive/vectorization_part.q index 05068f5898..60f3f84b02 100644 --- a/ql/src/test/queries/clientpositive/vectorization_part.q +++ b/ql/src/test/queries/clientpositive/vectorization_part.q @@ -3,9 +3,9 @@ set hive.mapred.mode=nonstrict; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; -CREATE TABLE alltypesorc_part(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS ORC; -insert overwrite table alltypesorc_part partition (ds='2011') select * from alltypesorc limit 100; -insert overwrite table alltypesorc_part partition (ds='2012') select * from alltypesorc limit 100; +CREATE TABLE alltypesorc_part_n0(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS ORC; +insert overwrite table alltypesorc_part_n0 partition (ds='2011') select * from alltypesorc limit 100; +insert overwrite table alltypesorc_part_n0 partition (ds='2012') select * from alltypesorc limit 100; -select count(cdouble), cint from alltypesorc_part where ds='2011' group by cint limit 10; -select count(*) from alltypesorc_part A join alltypesorc_part B on A.ds=B.ds; +select count(cdouble), cint from alltypesorc_part_n0 where ds='2011' group by cint limit 10; +select count(*) from alltypesorc_part_n0 A join alltypesorc_part_n0 B on A.ds=B.ds; diff --git a/ql/src/test/queries/clientpositive/vectorized_case.q b/ql/src/test/queries/clientpositive/vectorized_case.q index 1852226950..d68a8ef9cd 100644 --- a/ql/src/test/queries/clientpositive/vectorized_case.q +++ b/ql/src/test/queries/clientpositive/vectorized_case.q @@ -76,49 +76,49 @@ select from alltypesorc; -- add test for VectorUDFAdaptor call IfExprConditionalFilter -CREATE TABLE test_1 (member DECIMAL , attr DECIMAL) STORED AS ORC; +CREATE TABLE test_1_n3 (member DECIMAL , attr DECIMAL) STORED AS ORC; -INSERT INTO test_1 VALUES (3.0,1.0),(2.0,2.0),(1.0,3.0); +INSERT INTO test_1_n3 VALUES (3.0,1.0),(2.0,2.0),(1.0,3.0); --for length=3 EXPLAIN VECTORIZATION DETAIL -SELECT CASE WHEN member =1.0 THEN attr+1.0 ELSE attr+2.0 END FROM test_1; +SELECT CASE WHEN member =1.0 THEN attr+1.0 ELSE attr+2.0 END FROM test_1_n3; -SELECT CASE WHEN member =1.0 THEN attr+1.0 ELSE attr+2.0 END FROM test_1; +SELECT CASE WHEN member =1.0 THEN attr+1.0 ELSE attr+2.0 END FROM test_1_n3; --for length=2 and the expr2 is null EXPLAIN VECTORIZATION DETAIL -SELECT CASE WHEN member =1.0 THEN 1.0 ELSE attr+2.0 END FROM test_1; +SELECT CASE WHEN member =1.0 THEN 1.0 ELSE attr+2.0 END FROM test_1_n3; -SELECT CASE WHEN member =1.0 THEN 1.0 ELSE attr+2.0 END FROM test_1; +SELECT CASE WHEN member =1.0 THEN 1.0 ELSE attr+2.0 END FROM test_1_n3; --for length=2 and the expr3 is null EXPLAIN VECTORIZATION DETAIL -SELECT CASE WHEN member =1.0 THEN attr+1.0 ELSE 2.0 END FROM test_1; +SELECT CASE WHEN member =1.0 THEN attr+1.0 ELSE 2.0 END FROM test_1_n3; -SELECT CASE WHEN member =1.0 THEN attr+1.0 ELSE 2.0 END FROM test_1; +SELECT CASE WHEN member =1.0 THEN attr+1.0 ELSE 2.0 END FROM test_1_n3; -- add test for IF**.java call IfExprConditionalFilter -CREATE TABLE test_2 (member BIGINT, attr BIGINT) STORED AS ORC; +CREATE TABLE test_2_n3 (member BIGINT, attr BIGINT) STORED AS ORC; -INSERT INTO test_2 VALUES (3,1),(2,2),(1,3); +INSERT INTO test_2_n3 VALUES (3,1),(2,2),(1,3); --for length=3 EXPLAIN VECTORIZATION DETAIL -SELECT CASE WHEN member=1 THEN attr+1 else attr+2 END FROM test_2; +SELECT CASE WHEN member=1 THEN attr+1 else attr+2 END FROM test_2_n3; -SELECT CASE WHEN member=1 THEN attr+1 else attr+2 END FROM test_2; +SELECT CASE WHEN member=1 THEN attr+1 else attr+2 END FROM test_2_n3; --for length=2 and the detail2 is null EXPLAIN VECTORIZATION DETAIL -SELECT CASE WHEN member=1 THEN null else attr+2 END FROM test_2; +SELECT CASE WHEN member=1 THEN null else attr+2 END FROM test_2_n3; -SELECT CASE WHEN member=1 THEN null else attr+2 END FROM test_2; +SELECT CASE WHEN member=1 THEN null else attr+2 END FROM test_2_n3; --for length=2 and the detail3 is null EXPLAIN VECTORIZATION DETAIL -SELECT CASE WHEN member=1 THEN attr+1 else null END FROM test_2; +SELECT CASE WHEN member=1 THEN attr+1 else null END FROM test_2_n3; -SELECT CASE WHEN member=1 THEN attr+1 else null END FROM test_2; +SELECT CASE WHEN member=1 THEN attr+1 else null END FROM test_2_n3; select count(*), sum(a.ceven) diff --git a/ql/src/test/queries/clientpositive/vectorized_date_funcs.q b/ql/src/test/queries/clientpositive/vectorized_date_funcs.q index e26a54c6ad..e88f054eb6 100644 --- a/ql/src/test/queries/clientpositive/vectorized_date_funcs.q +++ b/ql/src/test/queries/clientpositive/vectorized_date_funcs.q @@ -9,21 +9,21 @@ set hive.fetch.task.conversion=none; -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. -CREATE TABLE date_udf_flight ( +CREATE TABLE date_udf_flight_n0 ( origin_city_name STRING, dest_city_name STRING, fl_date DATE, arr_delay FLOAT, fl_num INT ); -LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt.1' OVERWRITE INTO TABLE date_udf_flight; +LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt.1' OVERWRITE INTO TABLE date_udf_flight_n0; CREATE TABLE date_udf_flight_orc ( fl_date DATE, fl_time TIMESTAMP ) STORED AS ORC; -INSERT INTO TABLE date_udf_flight_orc SELECT fl_date, to_utc_timestamp(fl_date, 'America/Los_Angeles') FROM date_udf_flight; +INSERT INTO TABLE date_udf_flight_orc SELECT fl_date, to_utc_timestamp(fl_date, 'America/Los_Angeles') FROM date_udf_flight_n0; SELECT * FROM date_udf_flight_orc; diff --git a/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q b/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q index 75edd41461..03e3d7b3ce 100644 --- a/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q +++ b/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q @@ -15,66 +15,66 @@ set hive.tez.min.bloom.filter.entries=1; select distinct ds from srcpart; select distinct hr from srcpart; -EXPLAIN VECTORIZATION create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds; -create table srcpart_date stored as orc as select ds as ds, ds as `date` from srcpart group by ds; -create table srcpart_hour stored as orc as select hr as hr, hr as hour from srcpart group by hr; -create table srcpart_date_hour stored as orc as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr; -create table srcpart_double_hour stored as orc as select (hr*2) as hr, hr as hour from srcpart group by hr; +EXPLAIN VECTORIZATION create table srcpart_date_n8 as select ds as ds, ds as `date` from srcpart group by ds; +create table srcpart_date_n8 stored as orc as select ds as ds, ds as `date` from srcpart group by ds; +create table srcpart_hour_n2 stored as orc as select hr as hr, hr as hour from srcpart group by hr; +create table srcpart_date_hour_n2 stored as orc as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr; +create table srcpart_double_hour_n2 stored as orc as select (hr*2) as hr, hr as hour from srcpart group by hr; -- single column, single key -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = '2008-04-08'; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = '2008-04-08'; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where ds = '2008-04-08'; -- multiple sources, single key -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) join srcpart_hour_n2 on (srcpart.hr = srcpart_hour_n2.hr) +where srcpart_date_n8.`date` = '2008-04-08' and srcpart_hour_n2.hour = 11; +select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) join srcpart_hour_n2 on (srcpart.hr = srcpart_hour_n2.hr) +where srcpart_date_n8.`date` = '2008-04-08' and srcpart_hour_n2.hour = 11; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) join srcpart_hour_n2 on (srcpart.hr = srcpart_hour_n2.hr) +where srcpart_date_n8.`date` = '2008-04-08' and srcpart_hour_n2.hour = 11; +select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) join srcpart_hour_n2 on (srcpart.hr = srcpart_hour_n2.hr) +where srcpart_date_n8.`date` = '2008-04-08' and srcpart_hour_n2.hour = 11; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; -- multiple columns single source -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour_n2 on (srcpart.ds = srcpart_date_hour_n2.ds and srcpart.hr = srcpart_date_hour_n2.hr) where srcpart_date_hour_n2.`date` = '2008-04-08' and srcpart_date_hour_n2.hour = 11; +select count(*) from srcpart join srcpart_date_hour_n2 on (srcpart.ds = srcpart_date_hour_n2.ds and srcpart.hr = srcpart_date_hour_n2.hr) where srcpart_date_hour_n2.`date` = '2008-04-08' and srcpart_date_hour_n2.hour = 11; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour_n2 on (srcpart.ds = srcpart_date_hour_n2.ds and srcpart.hr = srcpart_date_hour_n2.hr) where srcpart_date_hour_n2.`date` = '2008-04-08' and srcpart_date_hour_n2.hour = 11; +select count(*) from srcpart join srcpart_date_hour_n2 on (srcpart.ds = srcpart_date_hour_n2.ds and srcpart.hr = srcpart_date_hour_n2.hr) where srcpart_date_hour_n2.`date` = '2008-04-08' and srcpart_date_hour_n2.hour = 11; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where ds = '2008-04-08' and hr = 11; -- empty set -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = 'I DONT EXIST'; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = 'I DONT EXIST'; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where ds = 'I DONT EXIST'; -- expressions -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour_n2 on (srcpart.hr = cast(srcpart_double_hour_n2.hr/2 as int)) where srcpart_double_hour_n2.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n2 on (srcpart.hr = cast(srcpart_double_hour_n2.hr/2 as int)) where srcpart_double_hour_n2.hour = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour_n2 on (srcpart.hr*2 = srcpart_double_hour_n2.hr) where srcpart_double_hour_n2.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n2 on (srcpart.hr*2 = srcpart_double_hour_n2.hr) where srcpart_double_hour_n2.hour = 11; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour_n2 on (srcpart.hr = cast(srcpart_double_hour_n2.hr/2 as int)) where srcpart_double_hour_n2.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n2 on (srcpart.hr = cast(srcpart_double_hour_n2.hr/2 as int)) where srcpart_double_hour_n2.hour = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour_n2 on (srcpart.hr*2 = srcpart_double_hour_n2.hr) where srcpart_double_hour_n2.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n2 on (srcpart.hr*2 = srcpart_double_hour_n2.hr) where srcpart_double_hour_n2.hour = 11; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where hr = 11; -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour_n2 on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour_n2.hr as string)) where srcpart_double_hour_n2.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n2 on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour_n2.hr as string)) where srcpart_double_hour_n2.hour = 11; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where cast(hr as string) = 11; @@ -85,29 +85,29 @@ select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart gr select count(*) from srcpart where ds = '2008-04-08'; -- non-equi join -EXPLAIN VECTORIZATION select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); -select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); +EXPLAIN VECTORIZATION select count(*) from srcpart, srcpart_date_hour_n2 where (srcpart_date_hour_n2.`date` = '2008-04-08' and srcpart_date_hour_n2.hour = 11) and (srcpart.ds = srcpart_date_hour_n2.ds or srcpart.hr = srcpart_date_hour_n2.hr); +select count(*) from srcpart, srcpart_date_hour_n2 where (srcpart_date_hour_n2.`date` = '2008-04-08' and srcpart_date_hour_n2.hour = 11) and (srcpart.ds = srcpart_date_hour_n2.ds or srcpart.hr = srcpart_date_hour_n2.hr); -- old style join syntax -EXPLAIN VECTORIZATION select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; -select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; +EXPLAIN VECTORIZATION select count(*) from srcpart, srcpart_date_hour_n2 where srcpart_date_hour_n2.`date` = '2008-04-08' and srcpart_date_hour_n2.hour = 11 and srcpart.ds = srcpart_date_hour_n2.ds and srcpart.hr = srcpart_date_hour_n2.hr; +select count(*) from srcpart, srcpart_date_hour_n2 where srcpart_date_hour_n2.`date` = '2008-04-08' and srcpart_date_hour_n2.hour = 11 and srcpart.ds = srcpart_date_hour_n2.ds and srcpart.hr = srcpart_date_hour_n2.hr; -- left join -EXPLAIN VECTORIZATION select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -EXPLAIN VECTORIZATION select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION select count(*) from srcpart left join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION select count(*) from srcpart_date_n8 left join srcpart on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = '2008-04-08'; -- full outer -EXPLAIN VECTORIZATION select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION select count(*) from srcpart full outer join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = '2008-04-08'; -- with static pruning -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) join srcpart_hour_n2 on (srcpart.hr = srcpart_hour_n2.hr) +where srcpart_date_n8.`date` = '2008-04-08' and srcpart_hour_n2.hour = 11 and srcpart.hr = 11; +select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) join srcpart_hour_n2 on (srcpart.hr = srcpart_hour_n2.hr) +where srcpart_date_n8.`date` = '2008-04-08' and srcpart_hour_n2.hour = 11 and srcpart.hr = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) join srcpart_hour_n2 on (srcpart.hr = srcpart_hour_n2.hr) +where srcpart_date_n8.`date` = '2008-04-08' and srcpart.hr = 13; +select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) join srcpart_hour_n2 on (srcpart.hr = srcpart_hour_n2.hr) +where srcpart_date_n8.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN VECTORIZATION select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); @@ -122,31 +122,31 @@ set hive.auto.convert.join.noconditionaltask = true; set hive.auto.convert.join.noconditionaltask.size = 10000000; -- single column, single key -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = '2008-04-08'; select count(*) from srcpart where ds = '2008-04-08'; -- multiple sources, single key -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) join srcpart_hour_n2 on (srcpart.hr = srcpart_hour_n2.hr) +where srcpart_date_n8.`date` = '2008-04-08' and srcpart_hour_n2.hour = 11; +select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) join srcpart_hour_n2 on (srcpart.hr = srcpart_hour_n2.hr) +where srcpart_date_n8.`date` = '2008-04-08' and srcpart_hour_n2.hour = 11; select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; -- multiple columns single source -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_hour_n2 on (srcpart.ds = srcpart_date_hour_n2.ds and srcpart.hr = srcpart_date_hour_n2.hr) where srcpart_date_hour_n2.`date` = '2008-04-08' and srcpart_date_hour_n2.hour = 11; +select count(*) from srcpart join srcpart_date_hour_n2 on (srcpart.ds = srcpart_date_hour_n2.ds and srcpart.hr = srcpart_date_hour_n2.hr) where srcpart_date_hour_n2.`date` = '2008-04-08' and srcpart_date_hour_n2.hour = 11; select count(*) from srcpart where ds = '2008-04-08' and hr = 11; -- empty set -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = 'I DONT EXIST'; -- expressions -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; -select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour_n2 on (srcpart.hr = cast(srcpart_double_hour_n2.hr/2 as int)) where srcpart_double_hour_n2.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n2 on (srcpart.hr = cast(srcpart_double_hour_n2.hr/2 as int)) where srcpart_double_hour_n2.hour = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_double_hour_n2 on (srcpart.hr*2 = srcpart_double_hour_n2.hr) where srcpart_double_hour_n2.hour = 11; +select count(*) from srcpart join srcpart_double_hour_n2 on (srcpart.hr*2 = srcpart_double_hour_n2.hr) where srcpart_double_hour_n2.hour = 11; select count(*) from srcpart where hr = 11; set hive.stats.fetch.column.stats=false; @@ -158,21 +158,21 @@ select count(*) from srcpart where ds = '2008-04-08'; set hive.stats.fetch.column.stats=true; -- left join -EXPLAIN VECTORIZATION select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -EXPLAIN VECTORIZATION select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION select count(*) from srcpart left join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION select count(*) from srcpart_date_n8 left join srcpart on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = '2008-04-08'; -- full outer -EXPLAIN VECTORIZATION select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN VECTORIZATION select count(*) from srcpart full outer join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) where srcpart_date_n8.`date` = '2008-04-08'; -- with static pruning -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; -EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) join srcpart_hour_n2 on (srcpart.hr = srcpart_hour_n2.hr) +where srcpart_date_n8.`date` = '2008-04-08' and srcpart_hour_n2.hour = 11 and srcpart.hr = 11; +select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) join srcpart_hour_n2 on (srcpart.hr = srcpart_hour_n2.hr) +where srcpart_date_n8.`date` = '2008-04-08' and srcpart_hour_n2.hour = 11 and srcpart.hr = 11; +EXPLAIN VECTORIZATION select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) join srcpart_hour_n2 on (srcpart.hr = srcpart_hour_n2.hr) +where srcpart_date_n8.`date` = '2008-04-08' and srcpart.hr = 13; +select count(*) from srcpart join srcpart_date_n8 on (srcpart.ds = srcpart_date_n8.ds) join srcpart_hour_n2 on (srcpart.hr = srcpart_hour_n2.hr) +where srcpart_date_n8.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN VECTORIZATION select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); @@ -180,20 +180,20 @@ select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) fro -- different file format -create table srcpart_orc (key int, value string) partitioned by (ds string, hr int) stored as orc; +create table srcpart_orc_n0 (key int, value string) partitioned by (ds string, hr int) stored as orc; set hive.exec.dynamic.partition.mode=nonstrict; set hive.vectorized.execution.enabled=false; set hive.exec.max.dynamic.partitions=1000; -insert into table srcpart_orc partition (ds, hr) select key, value, ds, hr from srcpart; -EXPLAIN VECTORIZATION select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09'); -select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09'); +insert into table srcpart_orc_n0 partition (ds, hr) select key, value, ds, hr from srcpart; +EXPLAIN VECTORIZATION select count(*) from srcpart_orc_n0 join srcpart_date_hour_n2 on (srcpart_orc_n0.ds = srcpart_date_hour_n2.ds and srcpart_orc_n0.hr = srcpart_date_hour_n2.hr) where srcpart_date_hour_n2.hour = 11 and (srcpart_date_hour_n2.`date` = '2008-04-08' or srcpart_date_hour_n2.`date` = '2008-04-09'); +select count(*) from srcpart_orc_n0 join srcpart_date_hour_n2 on (srcpart_orc_n0.ds = srcpart_date_hour_n2.ds and srcpart_orc_n0.hr = srcpart_date_hour_n2.hr) where srcpart_date_hour_n2.hour = 11 and (srcpart_date_hour_n2.`date` = '2008-04-08' or srcpart_date_hour_n2.`date` = '2008-04-09'); select count(*) from srcpart where (ds = '2008-04-08' or ds = '2008-04-09') and hr = 11; -drop table srcpart_orc; -drop table srcpart_date; -drop table srcpart_hour; -drop table srcpart_date_hour; -drop table srcpart_double_hour; +drop table srcpart_orc_n0; +drop table srcpart_date_n8; +drop table srcpart_hour_n2; +drop table srcpart_date_hour_n2; +drop table srcpart_double_hour_n2; diff --git a/ql/src/test/queries/clientpositive/vectorized_join46.q b/ql/src/test/queries/clientpositive/vectorized_join46.q index af155ccbbf..145bc02073 100644 --- a/ql/src/test/queries/clientpositive/vectorized_join46.q +++ b/ql/src/test/queries/clientpositive/vectorized_join46.q @@ -5,223 +5,223 @@ set hive.join.emit.interval=2; -- SORT_QUERY_RESULTS -CREATE TABLE test1 (key INT, value INT, col_1 STRING); -INSERT INTO test1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), +CREATE TABLE test1_n14 (key INT, value INT, col_1 STRING); +INSERT INTO test1_n14 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car'); -CREATE TABLE test2 (key INT, value INT, col_2 STRING); -INSERT INTO test2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), +CREATE TABLE test2_n9 (key INT, value INT, col_2 STRING); +INSERT INTO test2_n9 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), (104, 3, 'Fli'), (105, NULL, 'None'); -- Basic outer join EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value); -- Conjunction with pred on multiple inputs and single inputs (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + AND test1_n14.key between 100 and 102 + AND test2_n9.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + AND test1_n14.key between 100 and 102 + AND test2_n9.key between 100 and 102); -- Conjunction with pred on single inputs (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.key between 100 and 102 + AND test2_n9.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.key between 100 and 102 + AND test2_n9.key between 100 and 102); -- Conjunction with pred on multiple inputs and none (left outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true); +FROM test1_n14 RIGHT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value AND true); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true); +FROM test1_n14 RIGHT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value AND true); -- Condition on one input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.key between 100 and 102); -- Disjunction with pred on multiple inputs and single inputs (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test1_n14.key between 100 and 102 + OR test2_n9.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test1_n14.key between 100 and 102 + OR test2_n9.key between 100 and 102); -- Disjunction with pred on multiple inputs and left input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test1_n14.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test1_n14.key between 100 and 102); -- Disjunction with pred on multiple inputs and right input (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test2_n9.key between 100 and 102); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test2_n9.key between 100 and 102); -- Keys plus residual (left outer join) EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + AND (test1_n14.key between 100 and 102 + OR test2_n9.key between 100 and 102)); SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n14 LEFT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + AND (test1_n14.key between 100 and 102 + OR test2_n9.key between 100 and 102)); -- Disjunction with pred on multiple inputs and single inputs (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n14 RIGHT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test1_n14.key between 100 and 102 + OR test2_n9.key between 100 and 102); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n14 RIGHT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test1_n14.key between 100 and 102 + OR test2_n9.key between 100 and 102); -- Disjunction with pred on multiple inputs and left input (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n14 RIGHT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test1_n14.key between 100 and 102); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n14 RIGHT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test1_n14.key between 100 and 102); -- Disjunction with pred on multiple inputs and right input (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n14 RIGHT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test2_n9.key between 100 and 102); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n14 RIGHT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test2_n9.key between 100 and 102); -- Keys plus residual (right outer join) EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n14 RIGHT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + AND (test1_n14.key between 100 and 102 + OR test2_n9.key between 100 and 102)); SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n14 RIGHT OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + AND (test1_n14.key between 100 and 102 + OR test2_n9.key between 100 and 102)); -- Disjunction with pred on multiple inputs and single inputs (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n14 FULL OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test1_n14.key between 100 and 102 + OR test2_n9.key between 100 and 102); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102); +FROM test1_n14 FULL OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test1_n14.key between 100 and 102 + OR test2_n9.key between 100 and 102); -- Disjunction with pred on multiple inputs and left input (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n14 FULL OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test1_n14.key between 100 and 102); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102); +FROM test1_n14 FULL OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test1_n14.key between 100 and 102); -- Disjunction with pred on multiple inputs and right input (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n14 FULL OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test2_n9.key between 100 and 102); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102); +FROM test1_n14 FULL OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + OR test2_n9.key between 100 and 102); -- Keys plus residual (full outer join) EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n14 FULL OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + AND (test1_n14.key between 100 and 102 + OR test2_n9.key between 100 and 102)); SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)); +FROM test1_n14 FULL OUTER JOIN test2_n9 +ON (test1_n14.value=test2_n9.value + AND (test1_n14.key between 100 and 102 + OR test2_n9.key between 100 and 102)); diff --git a/ql/src/test/queries/clientpositive/vectorized_parquet_types.q b/ql/src/test/queries/clientpositive/vectorized_parquet_types.q index b122103aa9..f5d28c4260 100644 --- a/ql/src/test/queries/clientpositive/vectorized_parquet_types.q +++ b/ql/src/test/queries/clientpositive/vectorized_parquet_types.q @@ -2,12 +2,12 @@ set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.llap.cache.allow.synthetic.fileid=true; -DROP TABLE parquet_types_staging; -DROP TABLE parquet_types; +DROP TABLE parquet_types_staging_n3; +DROP TABLE parquet_types_n2; DROP TABLE IF EXISTS parquet_type_nodict; -- init -CREATE TABLE parquet_types_staging ( +CREATE TABLE parquet_types_staging_n3 ( cint int, ctinyint tinyint, csmallint smallint, @@ -28,7 +28,7 @@ FIELDS TERMINATED BY '|' COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'; -CREATE TABLE parquet_types ( +CREATE TABLE parquet_types_n2 ( cint int, ctinyint tinyint, csmallint smallint, @@ -42,26 +42,26 @@ CREATE TABLE parquet_types ( cdecimal decimal(4,2) ) STORED AS PARQUET; -LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging; +LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging_n3; -INSERT OVERWRITE TABLE parquet_types +INSERT OVERWRITE TABLE parquet_types_n2 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, -unhex(cbinary), cdecimal FROM parquet_types_staging; +unhex(cbinary), cdecimal FROM parquet_types_staging_n3; SET hive.vectorized.execution.enabled=true; -- select explain vectorization expression SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, -hex(cbinary), cdecimal FROM parquet_types; +hex(cbinary), cdecimal FROM parquet_types_n2; SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, -hex(cbinary), cdecimal FROM parquet_types; +hex(cbinary), cdecimal FROM parquet_types_n2; explain vectorization expression -SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar), cdecimal, SIGN(cdecimal) FROM parquet_types; +SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar), cdecimal, SIGN(cdecimal) FROM parquet_types_n2; -SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar), cdecimal, SIGN(cdecimal) FROM parquet_types; +SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar), cdecimal, SIGN(cdecimal) FROM parquet_types_n2; explain vectorization expression SELECT ctinyint, @@ -71,7 +71,7 @@ SELECT ctinyint, AVG(cfloat), STDDEV_POP(cdouble), MAX(cdecimal) -FROM parquet_types +FROM parquet_types_n2 GROUP BY ctinyint ORDER BY ctinyint; @@ -82,16 +82,16 @@ SELECT ctinyint, AVG(cfloat), STDDEV_POP(cdouble), MAX(cdecimal) -FROM parquet_types +FROM parquet_types_n2 GROUP BY ctinyint ORDER BY ctinyint; --- test with dictionary encoding disabled -create table parquet_type_nodict like parquet_types +-- test_n10 with dictionary encoding disabled +create table parquet_type_nodict like parquet_types_n2 stored as parquet tblproperties ("parquet.enable.dictionary"="false"); insert into parquet_type_nodict -select * from parquet_types; +select * from parquet_types_n2; explain vectorization expression @@ -106,21 +106,21 @@ SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar), cdecimal, SIGN(cdecimal SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar), cdecimal, SIGN(cdecimal) FROM parquet_type_nodict; --- test timestamp vectorization +-- test_n10 timestamp vectorization explain vectorization select max(t), min(t) from parquet_type_nodict; select max(t), min(t) from parquet_type_nodict; --- test timestamp columnVector isRepeating -create table test (id int, ts timestamp) stored as parquet tblproperties ("parquet.enable.dictionary"="false"); +-- test_n10 timestamp columnVector isRepeating +create table test_n10 (id int, ts timestamp) stored as parquet tblproperties ("parquet.enable.dictionary"="false"); -insert into test values (1, '2019-01-01 23:12:45.123456'), (2, '2019-01-01 23:12:45.123456'), (3, '2019-01-01 23:12:45.123456'); +insert into test_n10 values (1, '2019-01-01 23:12:45.123456'), (2, '2019-01-01 23:12:45.123456'), (3, '2019-01-01 23:12:45.123456'); set hive.fetch.task.conversion=none; -select ts from test where id > 1; +select ts from test_n10 where id > 1; --- test null values in timestamp -insert into test values (3, NULL); -select ts from test where id > 1; +-- test_n10 null values in timestamp +insert into test_n10 values (3, NULL); +select ts from test_n10 where id > 1; DROP TABLE parquet_type_nodict; -DROP TABLE test; +DROP TABLE test_n10; diff --git a/ql/src/test/queries/clientpositive/vectorized_ptf.q b/ql/src/test/queries/clientpositive/vectorized_ptf.q index 7f5a055de3..c5a7ddf75e 100644 --- a/ql/src/test/queries/clientpositive/vectorized_ptf.q +++ b/ql/src/test/queries/clientpositive/vectorized_ptf.q @@ -337,7 +337,7 @@ order by p_name); -- 16. testViewAsTableInputToPTF -create view IF NOT EXISTS mfgr_price_view as +create view IF NOT EXISTS mfgr_price_view_n0 as select p_mfgr, p_brand, round(sum(p_retailprice),2) as s from part_orc @@ -346,14 +346,14 @@ group by p_mfgr, p_brand; explain vectorization detail select p_mfgr, p_brand, s, round(sum(s) over w1,2) as s1 -from noop(on mfgr_price_view +from noop(on mfgr_price_view_n0 partition by p_mfgr order by p_mfgr) window w1 as ( partition by p_mfgr order by p_brand rows between 2 preceding and current row); select p_mfgr, p_brand, s, round(sum(s) over w1,2) as s1 -from noop(on mfgr_price_view +from noop(on mfgr_price_view_n0 partition by p_mfgr order by p_mfgr) window w1 as ( partition by p_mfgr order by p_brand rows between 2 preceding and current row); diff --git a/ql/src/test/queries/clientpositive/vectorized_rcfile_columnar.q b/ql/src/test/queries/clientpositive/vectorized_rcfile_columnar.q index ef77d9af1d..6453250925 100644 --- a/ql/src/test/queries/clientpositive/vectorized_rcfile_columnar.q +++ b/ql/src/test/queries/clientpositive/vectorized_rcfile_columnar.q @@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict; --This query must pass even when vectorized reader is not available for --RC files. The query must fall back to the non-vector mode and run successfully. -CREATE table columnTable (key STRING, value STRING) +CREATE table columnTable_n0 (key STRING, value STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS @@ -11,10 +11,10 @@ STORED AS OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat'; FROM src -INSERT OVERWRITE TABLE columnTable SELECT src.key, src.value ORDER BY src.key, src.value LIMIT 10; -describe columnTable; +INSERT OVERWRITE TABLE columnTable_n0 SELECT src.key, src.value ORDER BY src.key, src.value LIMIT 10; +describe columnTable_n0; SET hive.vectorized.execution.enabled=true; -SELECT key, value FROM columnTable ORDER BY key; +SELECT key, value FROM columnTable_n0 ORDER BY key; diff --git a/ql/src/test/queries/clientpositive/vectorized_timestamp.q b/ql/src/test/queries/clientpositive/vectorized_timestamp.q index 8de4e868a2..7600a30724 100644 --- a/ql/src/test/queries/clientpositive/vectorized_timestamp.q +++ b/ql/src/test/queries/clientpositive/vectorized_timestamp.q @@ -2,40 +2,40 @@ set hive.fetch.task.conversion=none; set hive.explain.user=false; set hive.vectorized.execution.reduce.enabled=true; -DROP TABLE IF EXISTS test; -CREATE TABLE test(ts TIMESTAMP) STORED AS ORC; -INSERT INTO TABLE test VALUES ('0001-01-01 00:00:00.000000000'), ('9999-12-31 23:59:59.999999999'); +DROP TABLE IF EXISTS test_n2; +CREATE TABLE test_n2(ts TIMESTAMP) STORED AS ORC; +INSERT INTO TABLE test_n2 VALUES ('0001-01-01 00:00:00.000000000'), ('9999-12-31 23:59:59.999999999'); SET hive.vectorized.execution.enabled = false; EXPLAIN VECTORIZATION DETAIL -SELECT ts FROM test; +SELECT ts FROM test_n2; -SELECT ts FROM test; +SELECT ts FROM test_n2; -SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test; +SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test_n2; -SELECT ts FROM test WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000'); +SELECT ts FROM test_n2 WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000'); SET hive.vectorized.execution.enabled = true; -SELECT ts FROM test; +SELECT ts FROM test_n2; EXPLAIN VECTORIZATION DETAIL -SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test; +SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test_n2; -SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test; +SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test_n2; EXPLAIN VECTORIZATION DETAIL -SELECT ts FROM test WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000'); +SELECT ts FROM test_n2 WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000'); -SELECT ts FROM test WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000'); +SELECT ts FROM test_n2 WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000'); EXPLAIN VECTORIZATION DETAIL -SELECT AVG(ts), CAST(AVG(ts) AS TIMESTAMP) FROM test; +SELECT AVG(ts), CAST(AVG(ts) AS TIMESTAMP) FROM test_n2; -SELECT AVG(ts), CAST(AVG(ts) AS TIMESTAMP) FROM test; +SELECT AVG(ts), CAST(AVG(ts) AS TIMESTAMP) FROM test_n2; EXPLAIN VECTORIZATION DETAIL -SELECT variance(ts), var_pop(ts), var_samp(ts), std(ts), stddev(ts), stddev_pop(ts), stddev_samp(ts) FROM test; +SELECT variance(ts), var_pop(ts), var_samp(ts), std(ts), stddev(ts), stddev_pop(ts), stddev_samp(ts) FROM test_n2; -SELECT variance(ts), var_pop(ts), var_samp(ts), std(ts), stddev(ts), stddev_pop(ts), stddev_samp(ts) FROM test; +SELECT variance(ts), var_pop(ts), var_samp(ts), std(ts), stddev(ts), stddev_pop(ts), stddev_samp(ts) FROM test_n2; diff --git a/ql/src/test/queries/clientpositive/view.q b/ql/src/test/queries/clientpositive/view.q index bc193554f9..86bf141dfc 100644 --- a/ql/src/test/queries/clientpositive/view.q +++ b/ql/src/test/queries/clientpositive/view.q @@ -1,50 +1,50 @@ CREATE DATABASE db1; USE db1; -CREATE TABLE table1 (key STRING, value STRING) +CREATE TABLE table1_n19 (key STRING, value STRING) STORED AS TEXTFILE; LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' -OVERWRITE INTO TABLE table1; +OVERWRITE INTO TABLE table1_n19; -CREATE TABLE table2 (key STRING, value STRING) +CREATE TABLE table2_n13 (key STRING, value STRING) STORED AS TEXTFILE; LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' -OVERWRITE INTO TABLE table2; +OVERWRITE INTO TABLE table2_n13; -- relative reference, no alias -CREATE VIEW v1 AS SELECT * FROM table1; +CREATE VIEW v1_n17 AS SELECT * FROM table1_n19; -- relative reference, aliased -CREATE VIEW v2 AS SELECT t1.* FROM table1 t1; +CREATE VIEW v2_n10 AS SELECT t1.* FROM table1_n19 t1; -- relative reference, multiple tables -CREATE VIEW v3 AS SELECT t1.*, t2.key k FROM table1 t1 JOIN table2 t2 ON t1.key = t2.key; +CREATE VIEW v3_n3 AS SELECT t1.*, t2.key k FROM table1_n19 t1 JOIN table2_n13 t2 ON t1.key = t2.key; -- absolute reference, no alias -CREATE VIEW v4 AS SELECT * FROM db1.table1; +CREATE VIEW v4_n3 AS SELECT * FROM db1.table1_n19; -- absolute reference, aliased -CREATE VIEW v5 AS SELECT t1.* FROM db1.table1 t1; +CREATE VIEW v5_n1 AS SELECT t1.* FROM db1.table1_n19 t1; -- absolute reference, multiple tables -CREATE VIEW v6 AS SELECT t1.*, t2.key k FROM db1.table1 t1 JOIN db1.table2 t2 ON t1.key = t2.key; +CREATE VIEW v6 AS SELECT t1.*, t2.key k FROM db1.table1_n19 t1 JOIN db1.table2_n13 t2 ON t1.key = t2.key; -- relative reference, explicit column -CREATE VIEW v7 AS SELECT key from table1; +CREATE VIEW v7 AS SELECT key from table1_n19; -- absolute reference, explicit column -CREATE VIEW v8 AS SELECT key from db1.table1; +CREATE VIEW v8 AS SELECT key from db1.table1_n19; CREATE DATABASE db2; USE db2; -SELECT * FROM db1.v1; -SELECT * FROM db1.v2; -SELECT * FROM db1.v3; -SELECT * FROM db1.v4; -SELECT * FROM db1.v5; +SELECT * FROM db1.v1_n17; +SELECT * FROM db1.v2_n10; +SELECT * FROM db1.v3_n3; +SELECT * FROM db1.v4_n3; +SELECT * FROM db1.v5_n1; SELECT * FROM db1.v6; SELECT * FROM db1.v7; SELECT * FROM db1.v8; diff --git a/ql/src/test/queries/clientpositive/view_alias.q b/ql/src/test/queries/clientpositive/view_alias.q index bf306b5f62..1f3f3b165b 100644 --- a/ql/src/test/queries/clientpositive/view_alias.q +++ b/ql/src/test/queries/clientpositive/view_alias.q @@ -1,35 +1,35 @@ --! qt:dataset:src -drop view v; -create view v as select key, '12' from src; -desc formatted v; -select * from v order by `_c1` limit 5; +drop view v_n6; +create view v_n6 as select key, '12' from src; +desc formatted v_n6; +select * from v_n6 order by `_c1` limit 5; -drop view v; -create view v as select key as `_c1`, '12' from src; -desc formatted v; -select * from v order by `_c1` limit 5; +drop view v_n6; +create view v_n6 as select key as `_c1`, '12' from src; +desc formatted v_n6; +select * from v_n6 order by `_c1` limit 5; -drop view v; -create view v as select *, '12' from src; -desc formatted v; -select * from v order by `_c2` limit 5; +drop view v_n6; +create view v_n6 as select *, '12' from src; +desc formatted v_n6; +select * from v_n6 order by `_c2` limit 5; -drop view v; -create view v as select *, '12' as `_c121` from src; -desc formatted v; -select * from v order by `_c121` limit 5; +drop view v_n6; +create view v_n6 as select *, '12' as `_c121` from src; +desc formatted v_n6; +select * from v_n6 order by `_c121` limit 5; -drop view v; -create view v as select key, count(*) from src group by key; -desc formatted v; -select * from v order by `_c1` limit 5; +drop view v_n6; +create view v_n6 as select key, count(*) from src group by key; +desc formatted v_n6; +select * from v_n6 order by `_c1` limit 5; -drop view v; -create table a (ca string, caa string); -create table b (cb string, cbb string); -insert into a select * from src limit 5; -insert into b select * from src limit 5; -create view v as select '010', a.*, 121, b.*, 234 from a join b on a.ca = b.cb; -desc formatted v; -select * from v order by `_c3` limit 5; +drop view v_n6; +create table a_n9 (ca_n9 string, caa_n9 string); +create table b_n7 (cb_n7 string, cbb_n7 string); +insert into a_n9 select * from src limit 5; +insert into b_n7 select * from src limit 5; +create view v_n6 as select '010', a_n9.*, 121, b_n7.*, 234 from a_n9 join b_n7 on a_n9.ca_n9 = b_n7.cb_n7; +desc formatted v_n6; +select * from v_n6 order by `_c3` limit 5; diff --git a/ql/src/test/queries/clientpositive/view_authorization_sqlstd.q b/ql/src/test/queries/clientpositive/view_authorization_sqlstd.q index 64c552019e..bc5b9bdbce 100644 --- a/ql/src/test/queries/clientpositive/view_authorization_sqlstd.q +++ b/ql/src/test/queries/clientpositive/view_authorization_sqlstd.q @@ -6,42 +6,42 @@ set user.name=user1; -- Test view authorization , and 'show grant' variants -create table t1(i int, j int, k int); -grant select on t1 to user user2 with grant option; -show grant user user1 on table t1; +create table t1_n54(i int, j int, k int); +grant select on t1_n54 to user user2 with grant option; +show grant user user1 on table t1_n54; -- protecting certain columns -create view vt1 as select i,k from t1; +create view vt1_n54 as select i,k from t1_n54; -- protecting certain rows -create view vt2 as select * from t1 where i > 1; +create view vt2 as select * from t1_n54 where i > 1; show grant user user1 on all; --view grant to user -- try with and without table keyword -grant select on vt1 to user user2; -grant insert on table vt1 to user user3; +grant select on vt1_n54 to user user2; +grant insert on table vt1_n54 to user user3; set user.name=user2; -show grant user user2 on table vt1; -create view vt3 as select i,k from t1; +show grant user user2 on table vt1_n54; +create view vt3 as select i,k from t1_n54; set user.name=user3; -show grant user user3 on table vt1; +show grant user user3 on table vt1_n54; set user.name=user2; -explain authorization select * from vt1; -select * from vt1; +explain authorization select * from vt1_n54; +select * from vt1_n54; -- verify input objects required does not include table -- even if view is within a sub query -select * from (select * from vt1) a; +select * from (select * from vt1_n54) a; -select * from vt1 union all select * from vt1; +select * from vt1_n54 union all select * from vt1_n54; set user.name=user1; @@ -63,15 +63,15 @@ set role admin; show grant on table vt2; set user.name=user1; -revoke select on table vt1 from user user2; +revoke select on table vt1_n54 from user user2; set user.name=user2; -show grant user user2 on table vt1; +show grant user user2 on table vt1_n54; show grant user user2 on all; set user.name=user3; -- grant privileges on roles for view, after next statement -show grant user user3 on table vt1; +show grant user user3 on table vt1_n54; set user.name=hive_admin_user; show current roles; diff --git a/ql/src/test/queries/clientpositive/view_cbo.q b/ql/src/test/queries/clientpositive/view_cbo.q index cad12212b9..ad3cd01957 100644 --- a/ql/src/test/queries/clientpositive/view_cbo.q +++ b/ql/src/test/queries/clientpositive/view_cbo.q @@ -7,14 +7,14 @@ select key, value, avg(key + 1) from src group by value, key with rollup order by key, value limit 20; -drop view v; -create view v as +drop view v_n13; +create view v_n13 as with q1 as ( select key from src where key = '5') select * from q1; -desc formatted v; +desc formatted v_n13; -drop view v; -create view v as +drop view v_n13; +create view v_n13 as select b.key, count(*) as c from src b group by b.key @@ -24,10 +24,10 @@ having exists where a.key = b.key and a.value > 'val_9' ) ; -desc formatted v; +desc formatted v_n13; -drop view v; -create view v as +drop view v_n13; +create view v_n13 as select * from src b where not exists @@ -36,38 +36,38 @@ where not exists where b.value = a.value and a.value > 'val_2' ) ; -desc formatted v; +desc formatted v_n13; -drop view v; -create view v as select a.key from src a join src b on a.key=b.key; -desc formatted v; +drop view v_n13; +create view v_n13 as select a.key from src a join src b on a.key=b.key; +desc formatted v_n13; -CREATE VIEW view15 AS +CREATE VIEW view15_n0 AS SELECT key,COUNT(value) AS value_count FROM src GROUP BY key; -desc formatted view15; +desc formatted view15_n0; -CREATE VIEW view16 AS +CREATE VIEW view16_n0 AS SELECT DISTINCT value FROM src; -desc formatted view16; +desc formatted view16_n0; -drop view v; -create view v as select key from src; -desc formatted v; +drop view v_n13; +create view v_n13 as select key from src; +desc formatted v_n13; -drop view v; -create view v as select * from src; -desc formatted v; +drop view v_n13; +create view v_n13 as select * from src; +desc formatted v_n13; -drop view v; -create view v as select * from src intersect select * from src; -desc formatted v; +drop view v_n13; +create view v_n13 as select * from src intersect select * from src; +desc formatted v_n13; -drop view v; -create view v as select * from src except select * from src; -desc formatted v; +drop view v_n13; +create view v_n13 as select * from src except select * from src; +desc formatted v_n13; -explain select * from v; +explain select * from v_n13; diff --git a/ql/src/test/queries/clientpositive/windowing_duplicate.q b/ql/src/test/queries/clientpositive/windowing_duplicate.q index ebdecd7f84..d9973182cc 100644 --- a/ql/src/test/queries/clientpositive/windowing_duplicate.q +++ b/ql/src/test/queries/clientpositive/windowing_duplicate.q @@ -2,7 +2,7 @@ create table mytable1 ( mytime timestamp, string1 string); -create table t1 as +create table t1_n44 as select sum(bound3) OVER (PARTITION BY string1 ORDER BY mytime) as bound1 from ( diff --git a/ql/src/test/queries/clientpositive/windowing_expressions.q b/ql/src/test/queries/clientpositive/windowing_expressions.q index 5615f5966f..09e759b598 100644 --- a/ql/src/test/queries/clientpositive/windowing_expressions.q +++ b/ql/src/test/queries/clientpositive/windowing_expressions.q @@ -1,7 +1,7 @@ --! qt:dataset:part -drop table over10k; +drop table over10k_n22; -create table over10k( +create table over10k_n22( t tinyint, si smallint, i int, @@ -16,7 +16,7 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n22; select p_mfgr, p_retailprice, p_size, round(sum(p_retailprice) over w1 , 2) = round(sum(lag(p_retailprice,1,0.0)) over w1 + last_value(p_retailprice) over w1 , 2), @@ -31,21 +31,21 @@ sum(p_retailprice) over (distribute by p_mfgr sort by p_retailprice rows between from part ; -select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k limit 100; -select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k limit 100; -select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k limit 100; -select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k limit 100; +select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k_n22 limit 100; +select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k_n22 limit 100; +select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k_n22 limit 100; +select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k_n22 limit 100; select p_mfgr, avg(p_retailprice) over(partition by p_mfgr, p_type order by p_mfgr) from part; select p_mfgr, avg(p_retailprice) over(partition by p_mfgr order by p_type,p_mfgr rows between unbounded preceding and current row) from part; -- multi table insert test -create table t1 (a1 int, b1 string); -create table t2 (a1 int, b1 string); -from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1 select * insert overwrite table t2 select * ; -select * from t1 limit 3; -select * from t2 limit 3; +create table t1_n142 (a1 int, b1 string); +create table t2_n83 (a1 int, b1 string); +from (select sum(i) over (partition by ts order by i), s from over10k_n22) tt insert overwrite table t1_n142 select * insert overwrite table t2_n83 select * ; +select * from t1_n142 limit 3; +select * from t2_n83 limit 3; select p_mfgr, p_retailprice, p_size, round(sum(p_retailprice) over w1 , 2) + 50.0 = round(sum(lag(p_retailprice,1,50.0)) over w1 + (last_value(p_retailprice) over w1),2) diff --git a/ql/src/test/queries/clientpositive/windowing_multipartitioning.q b/ql/src/test/queries/clientpositive/windowing_multipartitioning.q index 92e229e140..622c244365 100644 --- a/ql/src/test/queries/clientpositive/windowing_multipartitioning.q +++ b/ql/src/test/queries/clientpositive/windowing_multipartitioning.q @@ -1,6 +1,6 @@ -drop table over10k; +drop table over10k_n11; -create table over10k( +create table over10k_n11( t tinyint, si smallint, i int, @@ -15,26 +15,26 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n11; -select s, rank() over (partition by s order by si), sum(b) over (partition by s order by si) from over10k limit 100; +select s, rank() over (partition by s order by si), sum(b) over (partition by s order by si) from over10k_n11 limit 100; select s, rank() over (partition by s order by `dec` desc), sum(b) over (partition by s order by ts desc) -from over10k +from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck'; -select s, sum(i) over (partition by s), sum(f) over (partition by si) from over10k where s = 'tom allen' or s = 'bob steinbeck' ; +select s, sum(i) over (partition by s), sum(f) over (partition by si) from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck' ; -select s, rank() over (partition by s order by bo), rank() over (partition by si order by bin desc) from over10k +select s, rank() over (partition by s order by bo), rank() over (partition by si order by bin desc) from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck'; -select s, sum(f) over (partition by i), row_number() over (order by f) from over10k where s = 'tom allen' or s = 'bob steinbeck'; +select s, sum(f) over (partition by i), row_number() over (order by f) from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck'; select s, rank() over w1, rank() over w2 -from over10k +from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck' window w1 as (partition by s order by `dec`), diff --git a/ql/src/test/queries/clientpositive/windowing_navfn.q b/ql/src/test/queries/clientpositive/windowing_navfn.q index a172578a24..21caf1c623 100644 --- a/ql/src/test/queries/clientpositive/windowing_navfn.q +++ b/ql/src/test/queries/clientpositive/windowing_navfn.q @@ -1,7 +1,7 @@ --! qt:dataset:src -drop table over10k; +drop table over10k_n19; -create table over10k( +create table over10k_n19( t tinyint, si smallint, i int, @@ -16,27 +16,27 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n19; explain select row_number() over() from src where key = '238'; select row_number() over() from src where key = '238'; -select s, row_number() over (partition by d order by `dec`) from over10k limit 100; +select s, row_number() over (partition by d order by `dec`) from over10k_n19 limit 100; -select i, lead(s) over (partition by bin order by d,i desc) from over10k limit 100; +select i, lead(s) over (partition by bin order by d,i desc) from over10k_n19 limit 100; -select i, lag(`dec`) over (partition by i order by s,i,`dec`) from over10k limit 100; +select i, lag(`dec`) over (partition by i order by s,i,`dec`) from over10k_n19 limit 100; -select s, last_value(t) over (partition by d order by f) from over10k limit 100; +select s, last_value(t) over (partition by d order by f) from over10k_n19 limit 100; -select s, first_value(s) over (partition by bo order by s) from over10k limit 100; +select s, first_value(s) over (partition by bo order by s) from over10k_n19 limit 100; select t, s, i, last_value(i) over (partition by t order by s) -from over10k where (s = 'oscar allen' or s = 'oscar carson') and t = 10; +from over10k_n19 where (s = 'oscar allen' or s = 'oscar carson') and t = 10; -drop table if exists wtest; -create table wtest as +drop table if exists wtest_n0; +create table wtest_n0 as select a, b from ( @@ -54,7 +54,7 @@ first_value(b) over (partition by a order by b rows between 1 preceding and 1 fo first_value(b, true) over (partition by a order by b rows between 1 preceding and 1 following ) , first_value(b) over (partition by a order by b rows between unbounded preceding and 1 following ) , first_value(b, true) over (partition by a order by b rows between unbounded preceding and 1 following ) -from wtest; +from wtest_n0; select a, b, @@ -62,18 +62,18 @@ first_value(b) over (partition by a order by b desc rows between 1 preceding an first_value(b, true) over (partition by a order by b desc rows between 1 preceding and 1 following ) , first_value(b) over (partition by a order by b desc rows between unbounded preceding and 1 following ) , first_value(b, true) over (partition by a order by b desc rows between unbounded preceding and 1 following ) -from wtest; +from wtest_n0; select a, b, last_value(b) over (partition by a order by b rows between 1 preceding and 1 following ) , last_value(b, true) over (partition by a order by b rows between 1 preceding and 1 following ) , last_value(b) over (partition by a order by b rows between unbounded preceding and 1 following ) , last_value(b, true) over (partition by a order by b rows between unbounded preceding and 1 following ) -from wtest; +from wtest_n0; select a, b, last_value(b) over (partition by a order by b desc rows between 1 preceding and 1 following ) , last_value(b, true) over (partition by a order by b desc rows between 1 preceding and 1 following ) , last_value(b) over (partition by a order by b desc rows between unbounded preceding and 1 following ) , last_value(b, true) over (partition by a order by b desc rows between unbounded preceding and 1 following ) -from wtest; +from wtest_n0; diff --git a/ql/src/test/queries/clientpositive/windowing_ntile.q b/ql/src/test/queries/clientpositive/windowing_ntile.q index 2382ca809c..abb68589b9 100644 --- a/ql/src/test/queries/clientpositive/windowing_ntile.q +++ b/ql/src/test/queries/clientpositive/windowing_ntile.q @@ -1,6 +1,6 @@ -drop table over10k; +drop table over10k_n13; -create table over10k( +create table over10k_n13( t tinyint, si smallint, i int, @@ -15,14 +15,14 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n13; -select i, ntile(10) over (partition by s order by i) from over10k limit 100; +select i, ntile(10) over (partition by s order by i) from over10k_n13 limit 100; -select s, ntile(100) over (partition by i order by s) from over10k limit 100; +select s, ntile(100) over (partition by i order by s) from over10k_n13 limit 100; -select f, ntile(4) over (partition by d order by f) from over10k limit 100; +select f, ntile(4) over (partition by d order by f) from over10k_n13 limit 100; -select d, ntile(1000) over (partition by `dec` order by d) from over10k limit 100; +select d, ntile(1000) over (partition by `dec` order by d) from over10k_n13 limit 100; diff --git a/ql/src/test/queries/clientpositive/windowing_range_multiorder.q b/ql/src/test/queries/clientpositive/windowing_range_multiorder.q index e727d27ec8..a09c717881 100644 --- a/ql/src/test/queries/clientpositive/windowing_range_multiorder.q +++ b/ql/src/test/queries/clientpositive/windowing_range_multiorder.q @@ -1,6 +1,6 @@ -drop table over10k; +drop table over10k_n17; -create table over10k( +create table over10k_n17( t tinyint, si smallint, i int, @@ -15,26 +15,26 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n17; -select first_value(t) over ( partition by si order by i, b ) from over10k limit 100; +select first_value(t) over ( partition by si order by i, b ) from over10k_n17 limit 100; -select last_value(i) over (partition by si, bo order by i, f desc range current row) from over10k limit 100; +select last_value(i) over (partition by si, bo order by i, f desc range current row) from over10k_n17 limit 100; -select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) from over10k limit 100; +select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) from over10k_n17 limit 100; -select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) from over10k limit 100; +select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) from over10k_n17 limit 100; -select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100; +select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k_n17 limit 100; -select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100; +select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k_n17 limit 100; -select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) from over10k limit 100; +select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) from over10k_n17 limit 100; -select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) from over10k limit 100; +select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) from over10k_n17 limit 100; -select bo, rank() over (partition by i order by bo nulls first, b nulls last range between unbounded preceding and unbounded following) from over10k limit 100; +select bo, rank() over (partition by i order by bo nulls first, b nulls last range between unbounded preceding and unbounded following) from over10k_n17 limit 100; -select CAST(s as CHAR(12)), rank() over (partition by i order by CAST(s as CHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k limit 100; +select CAST(s as CHAR(12)), rank() over (partition by i order by CAST(s as CHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k_n17 limit 100; -select CAST(s as VARCHAR(12)), rank() over (partition by i order by CAST(s as VARCHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k limit 100; +select CAST(s as VARCHAR(12)), rank() over (partition by i order by CAST(s as VARCHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k_n17 limit 100; diff --git a/ql/src/test/queries/clientpositive/windowing_rank.q b/ql/src/test/queries/clientpositive/windowing_rank.q index 9812037e92..5a9255723b 100644 --- a/ql/src/test/queries/clientpositive/windowing_rank.q +++ b/ql/src/test/queries/clientpositive/windowing_rank.q @@ -1,6 +1,6 @@ -drop table over10k; +drop table over10k_n10; -create table over10k( +create table over10k_n10( t tinyint, si smallint, i int, @@ -15,15 +15,15 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n10; -select s, rank() over (partition by f order by t) from over10k limit 100; +select s, rank() over (partition by f order by t) from over10k_n10 limit 100; -select s, dense_rank() over (partition by ts order by i,s desc) from over10k limit 100; +select s, dense_rank() over (partition by ts order by i,s desc) from over10k_n10 limit 100; -select s, cume_dist() over (partition by bo order by b,s) from over10k limit 100; +select s, cume_dist() over (partition by bo order by b,s) from over10k_n10 limit 100; -select s, percent_rank() over (partition by `dec` order by f) from over10k limit 100; +select s, percent_rank() over (partition by `dec` order by f) from over10k_n10 limit 100; -- If following tests fail, look for the comments in class PTFPPD::process() @@ -33,8 +33,8 @@ from rank() over (partition by ts order by `dec`) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n10 other + join over10k_n10 on (other.b = over10k_n10.b) ) joined ) ranked where rnk = 1 limit 10; @@ -45,8 +45,8 @@ from rank() over (partition by ts) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n10 other + join over10k_n10 on (other.b = over10k_n10.b) ) joined ) ranked where `dec` = 89.5 limit 10; @@ -57,8 +57,8 @@ from rank() over (partition by ts order by `dec`) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n10 other + join over10k_n10 on (other.b = over10k_n10.b) where other.t < 10 ) joined ) ranked diff --git a/ql/src/test/queries/clientpositive/windowing_streaming.q b/ql/src/test/queries/clientpositive/windowing_streaming.q index 30f0c1454c..2fd161b983 100644 --- a/ql/src/test/queries/clientpositive/windowing_streaming.q +++ b/ql/src/test/queries/clientpositive/windowing_streaming.q @@ -1,8 +1,8 @@ --! qt:dataset:part --! qt:dataset:alltypesorc -drop table over10k; +drop table over10k_n20; -create table over10k( +create table over10k_n20( t tinyint, si smallint, i int, @@ -17,7 +17,7 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n20; set hive.limit.pushdown.memory.usage=.8; @@ -40,13 +40,13 @@ select * from ( select p_mfgr, rank() over(partition by p_mfgr order by p_name) r from part) a where r < 2; --- over10k tests +-- over10k_n20 tests select * -from (select t, f, rank() over(partition by t order by f) r from over10k) a +from (select t, f, rank() over(partition by t order by f) r from over10k_n20) a where r < 6 and t < 5; select * -from (select t, f, row_number() over(partition by t order by f) r from over10k) a +from (select t, f, row_number() over(partition by t order by f) r from over10k_n20) a where r < 8 and t < 0; set hive.vectorized.execution.enabled=false; @@ -55,18 +55,18 @@ set hive.limit.pushdown.memory.usage=0.8; explain select * from (select ctinyint, cdouble, rank() over(partition by ctinyint order by cdouble) r from alltypesorc) a where r < 5; -drop table if exists sB; -create table sB ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE as +drop table if exists sB_n0; +create table sB_n0 ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE as select * from (select ctinyint, cdouble, rank() over(partition by ctinyint order by cdouble) r from alltypesorc) a where r < 5; -select * from sB +select * from sB_n0 where ctinyint is null; set hive.vectorized.execution.enabled=true; set hive.limit.pushdown.memory.usage=0.8; -drop table if exists sD; -create table sD ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE as +drop table if exists sD_n0; +create table sD_n0 ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE as select * from (select ctinyint, cdouble, rank() over(partition by ctinyint order by cdouble) r from alltypesorc) a where r < 5; -select * from sD +select * from sD_n0 where ctinyint is null; diff --git a/ql/src/test/queries/clientpositive/windowing_udaf.q b/ql/src/test/queries/clientpositive/windowing_udaf.q index e71bf8c123..2e7da0d2e9 100644 --- a/ql/src/test/queries/clientpositive/windowing_udaf.q +++ b/ql/src/test/queries/clientpositive/windowing_udaf.q @@ -1,7 +1,7 @@ --! qt:dataset:src1 -drop table over10k; +drop table over10k_n4; -create table over10k( +create table over10k_n4( t tinyint, si smallint, i int, @@ -16,17 +16,17 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n4; -select s, min(i) over (partition by s) from over10k limit 100; +select s, min(i) over (partition by s) from over10k_n4 limit 100; -select s, avg(f) over (partition by si order by s) from over10k limit 100; +select s, avg(f) over (partition by si order by s) from over10k_n4 limit 100; -select s, avg(i) over (partition by t, b order by s) from over10k limit 100; +select s, avg(i) over (partition by t, b order by s) from over10k_n4 limit 100; -select max(i) over w from over10k window w as (partition by f) limit 100; +select max(i) over w from over10k_n4 window w as (partition by f) limit 100; -select s, avg(d) over (partition by t order by f) from over10k limit 100; +select s, avg(d) over (partition by t order by f) from over10k_n4 limit 100; select key, max(value) over (order by key rows between 10 preceding and 20 following) diff --git a/ql/src/test/queries/clientpositive/windowing_windowspec.q b/ql/src/test/queries/clientpositive/windowing_windowspec.q index 7c5cc2cf7e..73388efcf8 100644 --- a/ql/src/test/queries/clientpositive/windowing_windowspec.q +++ b/ql/src/test/queries/clientpositive/windowing_windowspec.q @@ -1,6 +1,6 @@ -drop table over10k; +drop table over10k_n18; -create table over10k( +create table over10k_n18( t tinyint, si smallint, i int, @@ -15,36 +15,36 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n18; -select s, sum(b) over (partition by i order by s,b rows unbounded preceding) from over10k limit 100; +select s, sum(b) over (partition by i order by s,b rows unbounded preceding) from over10k_n18 limit 100; -select s, sum(f) over (partition by d order by s,f rows unbounded preceding) from over10k limit 100; +select s, sum(f) over (partition by d order by s,f rows unbounded preceding) from over10k_n18 limit 100; -select s, sum(f) over (partition by ts order by f range between current row and unbounded following) from over10k limit 100; +select s, sum(f) over (partition by ts order by f range between current row and unbounded following) from over10k_n18 limit 100; -select s, avg(f) over (partition by ts order by s,f rows between current row and 5 following) from over10k limit 100; +select s, avg(f) over (partition by ts order by s,f rows between current row and 5 following) from over10k_n18 limit 100; -select s, avg(d) over (partition by t order by s,d desc rows between 5 preceding and 5 following) from over10k limit 100; +select s, avg(d) over (partition by t order by s,d desc rows between 5 preceding and 5 following) from over10k_n18 limit 100; -select s, sum(i) over(partition by ts order by s) from over10k limit 100; +select s, sum(i) over(partition by ts order by s) from over10k_n18 limit 100; -select f, sum(f) over (partition by ts order by f range between unbounded preceding and current row) from over10k limit 100; +select f, sum(f) over (partition by ts order by f range between unbounded preceding and current row) from over10k_n18 limit 100; -select f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k limit 100; +select f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k_n18 limit 100; -select s, i, round(avg(d) over (partition by s order by i) / 10.0 , 2) from over10k limit 7; +select s, i, round(avg(d) over (partition by s order by i) / 10.0 , 2) from over10k_n18 limit 7; -select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k window w1 as (partition by s order by i) limit 7; +select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k_n18 window w1 as (partition by s order by i) limit 7; set hive.cbo.enable=false; -- HIVE-9228 -select s, i from ( select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k window w1 as (partition by s order by i)) X limit 7; +select s, i from ( select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k_n18 window w1 as (partition by s order by i)) X limit 7; create table over10k_2 as select t, si, i, b, f, d, bo, s, ts, cast(ts as timestamp with local time zone) as tstz, `dec`, bin -from over10k; +from over10k_n18; select ts, i, sum(f) over (partition by i order by ts) from over10k_2 diff --git a/ql/src/test/queries/clientpositive/windowing_windowspec2.q b/ql/src/test/queries/clientpositive/windowing_windowspec2.q index 24e3b320de..9650fd4237 100644 --- a/ql/src/test/queries/clientpositive/windowing_windowspec2.q +++ b/ql/src/test/queries/clientpositive/windowing_windowspec2.q @@ -1,6 +1,6 @@ -drop table over10k; +drop table over10k_n16; -create table over10k( +create table over10k_n16( t tinyint, si smallint, i int, @@ -15,46 +15,46 @@ create table over10k( row format delimited fields terminated by '|'; -load data local inpath '../../data/files/over10k' into table over10k; +load data local inpath '../../data/files/over10k' into table over10k_n16; -- sum -select ts, f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k limit 100; -select ts, f, sum(f) over (partition by ts order by f rows between unbounded preceding and 1 preceding) from over10k limit 100; -select ts, f, sum(f) over (partition by ts order by f rows between 1 following and 2 following) from over10k limit 100; -select ts, f, sum(f) over (partition by ts order by f rows between unbounded preceding and 1 following) from over10k limit 100; +select ts, f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k_n16 limit 100; +select ts, f, sum(f) over (partition by ts order by f rows between unbounded preceding and 1 preceding) from over10k_n16 limit 100; +select ts, f, sum(f) over (partition by ts order by f rows between 1 following and 2 following) from over10k_n16 limit 100; +select ts, f, sum(f) over (partition by ts order by f rows between unbounded preceding and 1 following) from over10k_n16 limit 100; -- avg -select ts, f, avg(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k limit 100; -select ts, f, avg(f) over (partition by ts order by f rows between unbounded preceding and 1 preceding) from over10k limit 100; -select ts, f, avg(f) over (partition by ts order by f rows between 1 following and 2 following) from over10k limit 100; -select ts, f, avg(f) over (partition by ts order by f rows between unbounded preceding and 1 following) from over10k limit 100; +select ts, f, avg(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k_n16 limit 100; +select ts, f, avg(f) over (partition by ts order by f rows between unbounded preceding and 1 preceding) from over10k_n16 limit 100; +select ts, f, avg(f) over (partition by ts order by f rows between 1 following and 2 following) from over10k_n16 limit 100; +select ts, f, avg(f) over (partition by ts order by f rows between unbounded preceding and 1 following) from over10k_n16 limit 100; -- count -select ts, f, count(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k limit 100; -select ts, f, count(f) over (partition by ts order by f rows between unbounded preceding and 1 preceding) from over10k limit 100; -select ts, f, count(f) over (partition by ts order by f rows between 1 following and 2 following) from over10k limit 100; -select ts, f, count(f) over (partition by ts order by f rows between unbounded preceding and 1 following) from over10k limit 100; +select ts, f, count(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k_n16 limit 100; +select ts, f, count(f) over (partition by ts order by f rows between unbounded preceding and 1 preceding) from over10k_n16 limit 100; +select ts, f, count(f) over (partition by ts order by f rows between 1 following and 2 following) from over10k_n16 limit 100; +select ts, f, count(f) over (partition by ts order by f rows between unbounded preceding and 1 following) from over10k_n16 limit 100; -- max -select ts, f, max(f) over (partition by ts order by t,f rows between 2 preceding and 1 preceding) from over10k limit 100; -select ts, f, max(f) over (partition by ts order by t,f rows between unbounded preceding and 1 preceding) from over10k limit 100; -select ts, f, max(f) over (partition by ts order by t,f rows between 1 following and 2 following) from over10k limit 100; -select ts, f, max(f) over (partition by ts order by t,f rows between unbounded preceding and 1 following) from over10k limit 100; +select ts, f, max(f) over (partition by ts order by t,f rows between 2 preceding and 1 preceding) from over10k_n16 limit 100; +select ts, f, max(f) over (partition by ts order by t,f rows between unbounded preceding and 1 preceding) from over10k_n16 limit 100; +select ts, f, max(f) over (partition by ts order by t,f rows between 1 following and 2 following) from over10k_n16 limit 100; +select ts, f, max(f) over (partition by ts order by t,f rows between unbounded preceding and 1 following) from over10k_n16 limit 100; -- min -select ts, f, min(f) over (partition by ts order by t,f rows between 2 preceding and 1 preceding) from over10k limit 100; -select ts, f, min(f) over (partition by ts order by t,f rows between unbounded preceding and 1 preceding) from over10k limit 100; -select ts, f, min(f) over (partition by ts order by t,f rows between 1 following and 2 following) from over10k limit 100; -select ts, f, min(f) over (partition by ts order by t,f rows between unbounded preceding and 1 following) from over10k limit 100; +select ts, f, min(f) over (partition by ts order by t,f rows between 2 preceding and 1 preceding) from over10k_n16 limit 100; +select ts, f, min(f) over (partition by ts order by t,f rows between unbounded preceding and 1 preceding) from over10k_n16 limit 100; +select ts, f, min(f) over (partition by ts order by t,f rows between 1 following and 2 following) from over10k_n16 limit 100; +select ts, f, min(f) over (partition by ts order by t,f rows between unbounded preceding and 1 following) from over10k_n16 limit 100; -- first_value -select ts, f, first_value(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k limit 100; -select ts, f, first_value(f) over (partition by ts order by f rows between unbounded preceding and 1 preceding) from over10k limit 100; -select ts, f, first_value(f) over (partition by ts order by f rows between 1 following and 2 following) from over10k limit 100; -select ts, f, first_value(f) over (partition by ts order by f rows between unbounded preceding and 1 following) from over10k limit 100; +select ts, f, first_value(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k_n16 limit 100; +select ts, f, first_value(f) over (partition by ts order by f rows between unbounded preceding and 1 preceding) from over10k_n16 limit 100; +select ts, f, first_value(f) over (partition by ts order by f rows between 1 following and 2 following) from over10k_n16 limit 100; +select ts, f, first_value(f) over (partition by ts order by f rows between unbounded preceding and 1 following) from over10k_n16 limit 100; -- last_value -select ts, f, last_value(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k limit 100; -select ts, f, last_value(f) over (partition by ts order by f rows between unbounded preceding and 1 preceding) from over10k limit 100; -select ts, f, last_value(f) over (partition by ts order by f rows between 1 following and 2 following) from over10k limit 100; -select ts, f, last_value(f) over (partition by ts order by f rows between unbounded preceding and 1 following) from over10k limit 100; +select ts, f, last_value(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k_n16 limit 100; +select ts, f, last_value(f) over (partition by ts order by f rows between unbounded preceding and 1 preceding) from over10k_n16 limit 100; +select ts, f, last_value(f) over (partition by ts order by f rows between 1 following and 2 following) from over10k_n16 limit 100; +select ts, f, last_value(f) over (partition by ts order by f rows between unbounded preceding and 1 following) from over10k_n16 limit 100; diff --git a/ql/src/test/queries/clientpositive/windowing_windowspec3.q b/ql/src/test/queries/clientpositive/windowing_windowspec3.q index aee007664f..debd076b72 100644 --- a/ql/src/test/queries/clientpositive/windowing_windowspec3.q +++ b/ql/src/test/queries/clientpositive/windowing_windowspec3.q @@ -1,8 +1,8 @@ -- Test value based windowing spec -drop table if exists emp; +drop table if exists emp_n0; -create table emp(empno smallint, +create table emp_n0(empno smallint, ename varchar(10), job varchar(10), manager smallint, @@ -15,10 +15,10 @@ create table emp(empno smallint, row format delimited fields terminated by '|'; -load data local inpath '../../data/files/emp2.txt' into table emp; +load data local inpath '../../data/files/emp2.txt' into table emp_n0; -- No order by -select hirets, salary, sum(salary) over (partition by hirets range between current row and unbounded following) from emp; +select hirets, salary, sum(salary) over (partition by hirets range between current row and unbounded following) from emp_n0; -- Support date datatype @@ -29,7 +29,7 @@ select deptno, empno, hiredate, salary, sum(salary) over (partition by deptno order by hiredate range between 10 following and 90 following), sum(salary) over (partition by deptno order by hiredate range between 10 following and unbounded following), sum(salary) over (partition by deptno order by hiredate range between unbounded preceding and 10 following) -from emp; +from emp_n0; -- Support timestamp datatype. Value in seconds (90days = 90 * 24 * 3600 seconds) select deptno, empno, hirets, salary, @@ -39,7 +39,7 @@ select deptno, empno, hirets, salary, sum(salary) over (partition by deptno order by hirets range between 864000 following and 7776000 following), sum(salary) over (partition by deptno order by hirets range between 864000 following and unbounded following), sum(salary) over (partition by deptno order by hirets range between unbounded preceding and 864000 following) -from emp; +from emp_n0; -- Support double datatype select deptno, empno, bonus, @@ -49,7 +49,7 @@ select deptno, empno, bonus, avg(bonus) over (partition by deptno order by bonus range between 100 following and 200 following), avg(bonus) over (partition by deptno order by bonus range between 200 following and unbounded following), avg(bonus) over (partition by deptno order by bonus range between unbounded preceding and 200 following) -from emp; +from emp_n0; -- Support Decimal datatype select deptno, empno, stock, salary, @@ -59,4 +59,4 @@ select deptno, empno, stock, salary, avg(salary) over (partition by deptno order by stock range between 100 following and 200 following), avg(salary) over (partition by deptno order by stock range between 200 following and unbounded following), avg(salary) over (partition by deptno order by stock range between unbounded preceding and 200 following) -from emp; +from emp_n0; diff --git a/ql/src/test/results/clientpositive/acid_nullscan.q.out b/ql/src/test/results/clientpositive/acid_nullscan.q.out index 902471971f..6dad4974ae 100644 --- a/ql/src/test/results/clientpositive/acid_nullscan.q.out +++ b/ql/src/test/results/clientpositive/acid_nullscan.q.out @@ -1,36 +1,36 @@ -PREHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: query: CREATE TABLE acid_vectorized_n1(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@acid_vectorized -POSTHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: Output: default@acid_vectorized_n1 +POSTHOOK: query: CREATE TABLE acid_vectorized_n1(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@acid_vectorized -PREHOOK: query: insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10 +POSTHOOK: Output: default@acid_vectorized_n1 +PREHOOK: query: insert into table acid_vectorized_n1 select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc -PREHOOK: Output: default@acid_vectorized -POSTHOOK: query: insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10 +PREHOOK: Output: default@acid_vectorized_n1 +POSTHOOK: query: insert into table acid_vectorized_n1 select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: default@acid_vectorized -POSTHOOK: Lineage: acid_vectorized.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: acid_vectorized.b SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -PREHOOK: query: insert into table acid_vectorized values (1, 'bar') +POSTHOOK: Output: default@acid_vectorized_n1 +POSTHOOK: Lineage: acid_vectorized_n1.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_vectorized_n1.b SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: insert into table acid_vectorized_n1 values (1, 'bar') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@acid_vectorized -POSTHOOK: query: insert into table acid_vectorized values (1, 'bar') +PREHOOK: Output: default@acid_vectorized_n1 +POSTHOOK: query: insert into table acid_vectorized_n1 values (1, 'bar') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@acid_vectorized -POSTHOOK: Lineage: acid_vectorized.a SCRIPT [] -POSTHOOK: Lineage: acid_vectorized.b SCRIPT [] +POSTHOOK: Output: default@acid_vectorized_n1 +POSTHOOK: Lineage: acid_vectorized_n1.a SCRIPT [] +POSTHOOK: Lineage: acid_vectorized_n1.b SCRIPT [] PREHOOK: query: explain extended -select sum(a) from acid_vectorized where false +select sum(a) from acid_vectorized_n1 where false PREHOOK: type: QUERY POSTHOOK: query: explain extended -select sum(a) from acid_vectorized where false +select sum(a) from acid_vectorized_n1 where false POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -41,7 +41,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: acid_vectorized + alias: acid_vectorized_n1 Statistics: Num rows: 88 Data size: 25400 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -62,9 +62,9 @@ STAGE PLANS: auto parallelism: false Execution mode: vectorized Path -> Alias: - nullscan://null/default.acid_vectorized/part_ [acid_vectorized] + nullscan://null/default.acid_vectorized_n1/part_ [acid_vectorized_n1] Path -> Partition: - nullscan://null/default.acid_vectorized/part_ + nullscan://null/default.acid_vectorized_n1/part_ Partition input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -77,9 +77,9 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.acid_vectorized + name default.acid_vectorized_n1 numFiles 3 - serialization.ddl struct acid_vectorized { i32 a, string b} + serialization.ddl struct acid_vectorized_n1 { i32 a, string b} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe totalSize 2540 @@ -99,9 +99,9 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.acid_vectorized + name default.acid_vectorized_n1 numFiles 3 - serialization.ddl struct acid_vectorized { i32 a, string b} + serialization.ddl struct acid_vectorized_n1 { i32 a, string b} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 2540 @@ -109,10 +109,10 @@ STAGE PLANS: transactional_properties default #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.acid_vectorized - name: default.acid_vectorized + name: default.acid_vectorized_n1 + name: default.acid_vectorized_n1 Truncated Path -> Alias: - nullscan://null/default.acid_vectorized/part_ [acid_vectorized] + nullscan://null/default.acid_vectorized_n1/part_ [acid_vectorized_n1] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -149,12 +149,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select sum(a) from acid_vectorized where false +PREHOOK: query: select sum(a) from acid_vectorized_n1 where false PREHOOK: type: QUERY -PREHOOK: Input: default@acid_vectorized +PREHOOK: Input: default@acid_vectorized_n1 #### A masked pattern was here #### -POSTHOOK: query: select sum(a) from acid_vectorized where false +POSTHOOK: query: select sum(a) from acid_vectorized_n1 where false POSTHOOK: type: QUERY -POSTHOOK: Input: default@acid_vectorized +POSTHOOK: Input: default@acid_vectorized_n1 #### A masked pattern was here #### NULL diff --git a/ql/src/test/results/clientpositive/acid_vectorization_project.q.out b/ql/src/test/results/clientpositive/acid_vectorization_project.q.out index 1bdacb9a76..b706e65f06 100644 --- a/ql/src/test/results/clientpositive/acid_vectorization_project.q.out +++ b/ql/src/test/results/clientpositive/acid_vectorization_project.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: query: CREATE TABLE acid_vectorized_n2(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@acid_vectorized -POSTHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: Output: default@acid_vectorized_n2 +POSTHOOK: query: CREATE TABLE acid_vectorized_n2(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@acid_vectorized -PREHOOK: query: insert into table acid_vectorized select cint, cstring1, cfloat from alltypesorc where cint is not null order by cint limit 10 +POSTHOOK: Output: default@acid_vectorized_n2 +PREHOOK: query: insert into table acid_vectorized_n2 select cint, cstring1, cfloat from alltypesorc where cint is not null order by cint limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc -PREHOOK: Output: default@acid_vectorized -POSTHOOK: query: insert into table acid_vectorized select cint, cstring1, cfloat from alltypesorc where cint is not null order by cint limit 10 +PREHOOK: Output: default@acid_vectorized_n2 +POSTHOOK: query: insert into table acid_vectorized_n2 select cint, cstring1, cfloat from alltypesorc where cint is not null order by cint limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: default@acid_vectorized -POSTHOOK: Lineage: acid_vectorized.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: acid_vectorized.b SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: acid_vectorized.c SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -PREHOOK: query: select a,b from acid_vectorized order by a +POSTHOOK: Output: default@acid_vectorized_n2 +POSTHOOK: Lineage: acid_vectorized_n2.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_vectorized_n2.b SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: acid_vectorized_n2.c SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +PREHOOK: query: select a,b from acid_vectorized_n2 order by a PREHOOK: type: QUERY -PREHOOK: Input: default@acid_vectorized +PREHOOK: Input: default@acid_vectorized_n2 #### A masked pattern was here #### -POSTHOOK: query: select a,b from acid_vectorized order by a +POSTHOOK: query: select a,b from acid_vectorized_n2 order by a POSTHOOK: type: QUERY -POSTHOOK: Input: default@acid_vectorized +POSTHOOK: Input: default@acid_vectorized_n2 #### A masked pattern was here #### -1073279343 oj1YrV5Wa -1073051226 A34p7oRr2WvUJNf @@ -35,13 +35,13 @@ POSTHOOK: Input: default@acid_vectorized -1070883071 0ruyd6Y50JpdGRf6HqD -1070551679 iUR3Q -1069736047 k17Am8uPHWk02cEf1jet -PREHOOK: query: select a,c from acid_vectorized order by a +PREHOOK: query: select a,c from acid_vectorized_n2 order by a PREHOOK: type: QUERY -PREHOOK: Input: default@acid_vectorized +PREHOOK: Input: default@acid_vectorized_n2 #### A masked pattern was here #### -POSTHOOK: query: select a,c from acid_vectorized order by a +POSTHOOK: query: select a,c from acid_vectorized_n2 order by a POSTHOOK: type: QUERY -POSTHOOK: Input: default@acid_vectorized +POSTHOOK: Input: default@acid_vectorized_n2 #### A masked pattern was here #### -1073279343 11.0 -1073051226 NULL @@ -53,13 +53,13 @@ POSTHOOK: Input: default@acid_vectorized -1070883071 NULL -1070551679 NULL -1069736047 11.0 -PREHOOK: query: select b,c from acid_vectorized order by b +PREHOOK: query: select b,c from acid_vectorized_n2 order by b PREHOOK: type: QUERY -PREHOOK: Input: default@acid_vectorized +PREHOOK: Input: default@acid_vectorized_n2 #### A masked pattern was here #### -POSTHOOK: query: select b,c from acid_vectorized order by b +POSTHOOK: query: select b,c from acid_vectorized_n2 order by b POSTHOOK: type: QUERY -POSTHOOK: Input: default@acid_vectorized +POSTHOOK: Input: default@acid_vectorized_n2 #### A masked pattern was here #### 0iqrc5 11.0 0ruyd6Y50JpdGRf6HqD NULL diff --git a/ql/src/test/results/clientpositive/add_part_multiple.q.out b/ql/src/test/results/clientpositive/add_part_multiple.q.out index 1e01b26743..5c9282d983 100644 --- a/ql/src/test/results/clientpositive/add_part_multiple.q.out +++ b/ql/src/test/results/clientpositive/add_part_multiple.q.out @@ -1,20 +1,20 @@ -PREHOOK: query: CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE add_part_test_n1 (key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@add_part_test -POSTHOOK: query: CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@add_part_test_n1 +POSTHOOK: query: CREATE TABLE add_part_test_n1 (key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@add_part_test +POSTHOOK: Output: default@add_part_test_n1 PREHOOK: query: explain -ALTER TABLE add_part_test ADD IF NOT EXISTS +ALTER TABLE add_part_test_n1 ADD IF NOT EXISTS PARTITION (ds='2010-01-01') location 'A' PARTITION (ds='2010-02-01') location 'B' PARTITION (ds='2010-03-01') PARTITION (ds='2010-04-01') location 'C' PREHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: query: explain -ALTER TABLE add_part_test ADD IF NOT EXISTS +ALTER TABLE add_part_test_n1 ADD IF NOT EXISTS PARTITION (ds='2010-01-01') location 'A' PARTITION (ds='2010-02-01') location 'B' PARTITION (ds='2010-03-01') @@ -29,71 +29,71 @@ STAGE PLANS: #### A masked pattern was here #### Spec: {ds=2010-01-01}, {ds=2010-02-01}, {ds=2010-03-01}, {ds=2010-04-01} -PREHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS +PREHOOK: query: ALTER TABLE add_part_test_n1 ADD IF NOT EXISTS PARTITION (ds='2010-01-01') location 'A' PARTITION (ds='2010-02-01') location 'B' PARTITION (ds='2010-03-01') PARTITION (ds='2010-04-01') location 'C' PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -PREHOOK: Output: default@add_part_test -POSTHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS +PREHOOK: Output: default@add_part_test_n1 +POSTHOOK: query: ALTER TABLE add_part_test_n1 ADD IF NOT EXISTS PARTITION (ds='2010-01-01') location 'A' PARTITION (ds='2010-02-01') location 'B' PARTITION (ds='2010-03-01') PARTITION (ds='2010-04-01') location 'C' POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -POSTHOOK: Output: default@add_part_test -POSTHOOK: Output: default@add_part_test@ds=2010-01-01 -POSTHOOK: Output: default@add_part_test@ds=2010-02-01 -POSTHOOK: Output: default@add_part_test@ds=2010-03-01 -POSTHOOK: Output: default@add_part_test@ds=2010-04-01 +POSTHOOK: Output: default@add_part_test_n1 +POSTHOOK: Output: default@add_part_test_n1@ds=2010-01-01 +POSTHOOK: Output: default@add_part_test_n1@ds=2010-02-01 +POSTHOOK: Output: default@add_part_test_n1@ds=2010-03-01 +POSTHOOK: Output: default@add_part_test_n1@ds=2010-04-01 PREHOOK: query: from src TABLESAMPLE (1 ROWS) -insert into table add_part_test PARTITION (ds='2010-01-01') select 100,100 -insert into table add_part_test PARTITION (ds='2010-02-01') select 200,200 -insert into table add_part_test PARTITION (ds='2010-03-01') select 400,300 -insert into table add_part_test PARTITION (ds='2010-04-01') select 500,400 +insert into table add_part_test_n1 PARTITION (ds='2010-01-01') select 100,100 +insert into table add_part_test_n1 PARTITION (ds='2010-02-01') select 200,200 +insert into table add_part_test_n1 PARTITION (ds='2010-03-01') select 400,300 +insert into table add_part_test_n1 PARTITION (ds='2010-04-01') select 500,400 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@add_part_test@ds=2010-01-01 -PREHOOK: Output: default@add_part_test@ds=2010-02-01 -PREHOOK: Output: default@add_part_test@ds=2010-03-01 -PREHOOK: Output: default@add_part_test@ds=2010-04-01 +PREHOOK: Output: default@add_part_test_n1@ds=2010-01-01 +PREHOOK: Output: default@add_part_test_n1@ds=2010-02-01 +PREHOOK: Output: default@add_part_test_n1@ds=2010-03-01 +PREHOOK: Output: default@add_part_test_n1@ds=2010-04-01 POSTHOOK: query: from src TABLESAMPLE (1 ROWS) -insert into table add_part_test PARTITION (ds='2010-01-01') select 100,100 -insert into table add_part_test PARTITION (ds='2010-02-01') select 200,200 -insert into table add_part_test PARTITION (ds='2010-03-01') select 400,300 -insert into table add_part_test PARTITION (ds='2010-04-01') select 500,400 +insert into table add_part_test_n1 PARTITION (ds='2010-01-01') select 100,100 +insert into table add_part_test_n1 PARTITION (ds='2010-02-01') select 200,200 +insert into table add_part_test_n1 PARTITION (ds='2010-03-01') select 400,300 +insert into table add_part_test_n1 PARTITION (ds='2010-04-01') select 500,400 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@add_part_test@ds=2010-01-01 -POSTHOOK: Output: default@add_part_test@ds=2010-02-01 -POSTHOOK: Output: default@add_part_test@ds=2010-03-01 -POSTHOOK: Output: default@add_part_test@ds=2010-04-01 -POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-01-01).key EXPRESSION [] -POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-01-01).value EXPRESSION [] -POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-02-01).key EXPRESSION [] -POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-02-01).value EXPRESSION [] -POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-03-01).key EXPRESSION [] -POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-03-01).value EXPRESSION [] -POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-04-01).key EXPRESSION [] -POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-04-01).value EXPRESSION [] -PREHOOK: query: select * from add_part_test +POSTHOOK: Output: default@add_part_test_n1@ds=2010-01-01 +POSTHOOK: Output: default@add_part_test_n1@ds=2010-02-01 +POSTHOOK: Output: default@add_part_test_n1@ds=2010-03-01 +POSTHOOK: Output: default@add_part_test_n1@ds=2010-04-01 +POSTHOOK: Lineage: add_part_test_n1 PARTITION(ds=2010-01-01).key EXPRESSION [] +POSTHOOK: Lineage: add_part_test_n1 PARTITION(ds=2010-01-01).value EXPRESSION [] +POSTHOOK: Lineage: add_part_test_n1 PARTITION(ds=2010-02-01).key EXPRESSION [] +POSTHOOK: Lineage: add_part_test_n1 PARTITION(ds=2010-02-01).value EXPRESSION [] +POSTHOOK: Lineage: add_part_test_n1 PARTITION(ds=2010-03-01).key EXPRESSION [] +POSTHOOK: Lineage: add_part_test_n1 PARTITION(ds=2010-03-01).value EXPRESSION [] +POSTHOOK: Lineage: add_part_test_n1 PARTITION(ds=2010-04-01).key EXPRESSION [] +POSTHOOK: Lineage: add_part_test_n1 PARTITION(ds=2010-04-01).value EXPRESSION [] +PREHOOK: query: select * from add_part_test_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@add_part_test -PREHOOK: Input: default@add_part_test@ds=2010-01-01 -PREHOOK: Input: default@add_part_test@ds=2010-02-01 -PREHOOK: Input: default@add_part_test@ds=2010-03-01 -PREHOOK: Input: default@add_part_test@ds=2010-04-01 +PREHOOK: Input: default@add_part_test_n1 +PREHOOK: Input: default@add_part_test_n1@ds=2010-01-01 +PREHOOK: Input: default@add_part_test_n1@ds=2010-02-01 +PREHOOK: Input: default@add_part_test_n1@ds=2010-03-01 +PREHOOK: Input: default@add_part_test_n1@ds=2010-04-01 #### A masked pattern was here #### -POSTHOOK: query: select * from add_part_test +POSTHOOK: query: select * from add_part_test_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@add_part_test -POSTHOOK: Input: default@add_part_test@ds=2010-01-01 -POSTHOOK: Input: default@add_part_test@ds=2010-02-01 -POSTHOOK: Input: default@add_part_test@ds=2010-03-01 -POSTHOOK: Input: default@add_part_test@ds=2010-04-01 +POSTHOOK: Input: default@add_part_test_n1 +POSTHOOK: Input: default@add_part_test_n1@ds=2010-01-01 +POSTHOOK: Input: default@add_part_test_n1@ds=2010-02-01 +POSTHOOK: Input: default@add_part_test_n1@ds=2010-03-01 +POSTHOOK: Input: default@add_part_test_n1@ds=2010-04-01 #### A masked pattern was here #### 100 100 2010-01-01 200 200 2010-02-01 diff --git a/ql/src/test/results/clientpositive/allow_change_col_type_par.q.out b/ql/src/test/results/clientpositive/allow_change_col_type_par.q.out index d24dfdb551..7f6bc4c768 100644 --- a/ql/src/test/results/clientpositive/allow_change_col_type_par.q.out +++ b/ql/src/test/results/clientpositive/allow_change_col_type_par.q.out @@ -1,20 +1,20 @@ -PREHOOK: query: create table t1 (c1 int) +PREHOOK: query: create table t1_n5 (c1 int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (c1 int) +PREHOOK: Output: default@t1_n5 +POSTHOOK: query: create table t1_n5 (c1 int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n5 hive.metastore.disallow.incompatible.col.type.changes=true metaconf:hive.metastore.disallow.incompatible.col.type.changes=true hive.metastore.disallow.incompatible.col.type.changes=true metaconf:hive.metastore.disallow.incompatible.col.type.changes=false -PREHOOK: query: alter table t1 change column c1 c1 smallint +PREHOOK: query: alter table t1_n5 change column c1 c1 smallint PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: alter table t1 change column c1 c1 smallint +PREHOOK: Input: default@t1_n5 +PREHOOK: Output: default@t1_n5 +POSTHOOK: query: alter table t1_n5 change column c1 c1 smallint POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n5 +POSTHOOK: Output: default@t1_n5 diff --git a/ql/src/test/results/clientpositive/alter5.q.out b/ql/src/test/results/clientpositive/alter5.q.out index 5874f9f55e..61a04f2702 100644 --- a/ql/src/test/results/clientpositive/alter5.q.out +++ b/ql/src/test/results/clientpositive/alter5.q.out @@ -14,29 +14,29 @@ POSTHOOK: query: load data local inpath '../../data/files/test.dat' overwrite in POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@alter5_src -PREHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) +PREHOOK: query: create table alter5_n1 ( col1 string ) partitioned by (dt string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@alter5 -POSTHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) +PREHOOK: Output: default@alter5_n1 +POSTHOOK: query: create table alter5_n1 ( col1 string ) partitioned by (dt string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@alter5 -PREHOOK: query: alter table alter5 add partition (dt='a') location 'parta' +POSTHOOK: Output: default@alter5_n1 +PREHOOK: query: alter table alter5_n1 add partition (dt='a') location 'parta' PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -PREHOOK: Output: default@alter5 -POSTHOOK: query: alter table alter5 add partition (dt='a') location 'parta' +PREHOOK: Output: default@alter5_n1 +POSTHOOK: query: alter table alter5_n1 add partition (dt='a') location 'parta' POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -POSTHOOK: Output: default@alter5 -POSTHOOK: Output: default@alter5@dt=a -PREHOOK: query: describe extended alter5 partition (dt='a') +POSTHOOK: Output: default@alter5_n1 +POSTHOOK: Output: default@alter5_n1@dt=a +PREHOOK: query: describe extended alter5_n1 partition (dt='a') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@alter5 -POSTHOOK: query: describe extended alter5 partition (dt='a') +PREHOOK: Input: default@alter5_n1 +POSTHOOK: query: describe extended alter5_n1 partition (dt='a') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@alter5 +POSTHOOK: Input: default@alter5_n1 col1 string dt string @@ -45,24 +45,24 @@ dt string dt string #### A masked pattern was here #### -PREHOOK: query: insert overwrite table alter5 partition (dt='a') select col1 from alter5_src +PREHOOK: query: insert overwrite table alter5_n1 partition (dt='a') select col1 from alter5_src PREHOOK: type: QUERY PREHOOK: Input: default@alter5_src -PREHOOK: Output: default@alter5@dt=a -POSTHOOK: query: insert overwrite table alter5 partition (dt='a') select col1 from alter5_src +PREHOOK: Output: default@alter5_n1@dt=a +POSTHOOK: query: insert overwrite table alter5_n1 partition (dt='a') select col1 from alter5_src POSTHOOK: type: QUERY POSTHOOK: Input: default@alter5_src -POSTHOOK: Output: default@alter5@dt=a -POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(alter5_src)alter5_src.FieldSchema(name:col1, type:string, comment:null), ] -PREHOOK: query: select * from alter5 where dt='a' +POSTHOOK: Output: default@alter5_n1@dt=a +POSTHOOK: Lineage: alter5_n1 PARTITION(dt=a).col1 SIMPLE [(alter5_src)alter5_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: select * from alter5_n1 where dt='a' PREHOOK: type: QUERY -PREHOOK: Input: default@alter5 -PREHOOK: Input: default@alter5@dt=a +PREHOOK: Input: default@alter5_n1 +PREHOOK: Input: default@alter5_n1@dt=a #### A masked pattern was here #### -POSTHOOK: query: select * from alter5 where dt='a' +POSTHOOK: query: select * from alter5_n1 where dt='a' POSTHOOK: type: QUERY -POSTHOOK: Input: default@alter5 -POSTHOOK: Input: default@alter5@dt=a +POSTHOOK: Input: default@alter5_n1 +POSTHOOK: Input: default@alter5_n1@dt=a #### A masked pattern was here #### 1 a 2 a @@ -70,12 +70,12 @@ POSTHOOK: Input: default@alter5@dt=a 4 a 5 a 6 a -PREHOOK: query: describe extended alter5 partition (dt='a') +PREHOOK: query: describe extended alter5_n1 partition (dt='a') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@alter5 -POSTHOOK: query: describe extended alter5 partition (dt='a') +PREHOOK: Input: default@alter5_n1 +POSTHOOK: query: describe extended alter5_n1 partition (dt='a') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@alter5 +POSTHOOK: Input: default@alter5_n1 col1 string dt string @@ -92,14 +92,14 @@ POSTHOOK: query: DROP TABLE alter5_src POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@alter5_src POSTHOOK: Output: default@alter5_src -PREHOOK: query: DROP TABLE alter5 +PREHOOK: query: DROP TABLE alter5_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@alter5 -PREHOOK: Output: default@alter5 -POSTHOOK: query: DROP TABLE alter5 +PREHOOK: Input: default@alter5_n1 +PREHOOK: Output: default@alter5_n1 +POSTHOOK: query: DROP TABLE alter5_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@alter5 -POSTHOOK: Output: default@alter5 +POSTHOOK: Input: default@alter5_n1 +POSTHOOK: Output: default@alter5_n1 PREHOOK: query: SHOW TABLES LIKE "alter*" PREHOOK: type: SHOWTABLES PREHOOK: Input: database:default @@ -140,29 +140,29 @@ POSTHOOK: query: load data local inpath '../../data/files/test.dat' overwrite in POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: alter5_db@alter5_src -PREHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) +PREHOOK: query: create table alter5_n1 ( col1 string ) partitioned by (dt string) PREHOOK: type: CREATETABLE -PREHOOK: Output: alter5_db@alter5 +PREHOOK: Output: alter5_db@alter5_n1 PREHOOK: Output: database:alter5_db -POSTHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) +POSTHOOK: query: create table alter5_n1 ( col1 string ) partitioned by (dt string) POSTHOOK: type: CREATETABLE -POSTHOOK: Output: alter5_db@alter5 +POSTHOOK: Output: alter5_db@alter5_n1 POSTHOOK: Output: database:alter5_db -PREHOOK: query: alter table alter5 add partition (dt='a') location 'parta' +PREHOOK: query: alter table alter5_n1 add partition (dt='a') location 'parta' PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -PREHOOK: Output: alter5_db@alter5 -POSTHOOK: query: alter table alter5 add partition (dt='a') location 'parta' +PREHOOK: Output: alter5_db@alter5_n1 +POSTHOOK: query: alter table alter5_n1 add partition (dt='a') location 'parta' POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -POSTHOOK: Output: alter5_db@alter5 -POSTHOOK: Output: alter5_db@alter5@dt=a -PREHOOK: query: describe extended alter5 partition (dt='a') +POSTHOOK: Output: alter5_db@alter5_n1 +POSTHOOK: Output: alter5_db@alter5_n1@dt=a +PREHOOK: query: describe extended alter5_n1 partition (dt='a') PREHOOK: type: DESCTABLE -PREHOOK: Input: alter5_db@alter5 -POSTHOOK: query: describe extended alter5 partition (dt='a') +PREHOOK: Input: alter5_db@alter5_n1 +POSTHOOK: query: describe extended alter5_n1 partition (dt='a') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: alter5_db@alter5 +POSTHOOK: Input: alter5_db@alter5_n1 col1 string dt string @@ -171,24 +171,24 @@ dt string dt string #### A masked pattern was here #### -PREHOOK: query: insert overwrite table alter5 partition (dt='a') select col1 from alter5_src +PREHOOK: query: insert overwrite table alter5_n1 partition (dt='a') select col1 from alter5_src PREHOOK: type: QUERY PREHOOK: Input: alter5_db@alter5_src -PREHOOK: Output: alter5_db@alter5@dt=a -POSTHOOK: query: insert overwrite table alter5 partition (dt='a') select col1 from alter5_src +PREHOOK: Output: alter5_db@alter5_n1@dt=a +POSTHOOK: query: insert overwrite table alter5_n1 partition (dt='a') select col1 from alter5_src POSTHOOK: type: QUERY POSTHOOK: Input: alter5_db@alter5_src -POSTHOOK: Output: alter5_db@alter5@dt=a -POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(alter5_src)alter5_src.FieldSchema(name:col1, type:string, comment:null), ] -PREHOOK: query: select * from alter5 where dt='a' +POSTHOOK: Output: alter5_db@alter5_n1@dt=a +POSTHOOK: Lineage: alter5_n1 PARTITION(dt=a).col1 SIMPLE [(alter5_src)alter5_src.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: select * from alter5_n1 where dt='a' PREHOOK: type: QUERY -PREHOOK: Input: alter5_db@alter5 -PREHOOK: Input: alter5_db@alter5@dt=a +PREHOOK: Input: alter5_db@alter5_n1 +PREHOOK: Input: alter5_db@alter5_n1@dt=a #### A masked pattern was here #### -POSTHOOK: query: select * from alter5 where dt='a' +POSTHOOK: query: select * from alter5_n1 where dt='a' POSTHOOK: type: QUERY -POSTHOOK: Input: alter5_db@alter5 -POSTHOOK: Input: alter5_db@alter5@dt=a +POSTHOOK: Input: alter5_db@alter5_n1 +POSTHOOK: Input: alter5_db@alter5_n1@dt=a #### A masked pattern was here #### 1 a 2 a @@ -196,12 +196,12 @@ POSTHOOK: Input: alter5_db@alter5@dt=a 4 a 5 a 6 a -PREHOOK: query: describe extended alter5 partition (dt='a') +PREHOOK: query: describe extended alter5_n1 partition (dt='a') PREHOOK: type: DESCTABLE -PREHOOK: Input: alter5_db@alter5 -POSTHOOK: query: describe extended alter5 partition (dt='a') +PREHOOK: Input: alter5_db@alter5_n1 +POSTHOOK: query: describe extended alter5_n1 partition (dt='a') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: alter5_db@alter5 +POSTHOOK: Input: alter5_db@alter5_n1 col1 string dt string diff --git a/ql/src/test/results/clientpositive/alter_change_db_location.q.out b/ql/src/test/results/clientpositive/alter_change_db_location.q.out index d511b16cb8..93e46782e5 100644 --- a/ql/src/test/results/clientpositive/alter_change_db_location.q.out +++ b/ql/src/test/results/clientpositive/alter_change_db_location.q.out @@ -18,20 +18,20 @@ PREHOOK: Input: database:newdb POSTHOOK: query: use newDB POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:newdb -PREHOOK: query: create table tab (name string) +PREHOOK: query: create table tab_n13 (name string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:newdb -PREHOOK: Output: newDB@tab -POSTHOOK: query: create table tab (name string) +PREHOOK: Output: newDB@tab_n13 +POSTHOOK: query: create table tab_n13 (name string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:newdb -POSTHOOK: Output: newDB@tab -PREHOOK: query: alter table tab rename to newName +POSTHOOK: Output: newDB@tab_n13 +PREHOOK: query: alter table tab_n13 rename to newName PREHOOK: type: ALTERTABLE_RENAME -PREHOOK: Input: newdb@tab -PREHOOK: Output: newdb@tab -POSTHOOK: query: alter table tab rename to newName +PREHOOK: Input: newdb@tab_n13 +PREHOOK: Output: newdb@tab_n13 +POSTHOOK: query: alter table tab_n13 rename to newName POSTHOOK: type: ALTERTABLE_RENAME -POSTHOOK: Input: newdb@tab +POSTHOOK: Input: newdb@tab_n13 POSTHOOK: Output: newDB@newName -POSTHOOK: Output: newdb@tab +POSTHOOK: Output: newdb@tab_n13 diff --git a/ql/src/test/results/clientpositive/alter_file_format.q.out b/ql/src/test/results/clientpositive/alter_file_format.q.out index e1a75a0889..7623e70416 100644 --- a/ql/src/test/results/clientpositive/alter_file_format.q.out +++ b/ql/src/test/results/clientpositive/alter_file_format.q.out @@ -305,31 +305,31 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table alter_partition_format_test +PREHOOK: query: drop table alter_partition_format_test_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table alter_partition_format_test +POSTHOOK: query: drop table alter_partition_format_test_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table alter_partition_format_test (key int, value string) partitioned by (ds string) +PREHOOK: query: create table alter_partition_format_test_n0 (key int, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@alter_partition_format_test -POSTHOOK: query: create table alter_partition_format_test (key int, value string) partitioned by (ds string) +PREHOOK: Output: default@alter_partition_format_test_n0 +POSTHOOK: query: create table alter_partition_format_test_n0 (key int, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@alter_partition_format_test -PREHOOK: query: alter table alter_partition_format_test add partition(ds='2010') +POSTHOOK: Output: default@alter_partition_format_test_n0 +PREHOOK: query: alter table alter_partition_format_test_n0 add partition(ds='2010') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@alter_partition_format_test -POSTHOOK: query: alter table alter_partition_format_test add partition(ds='2010') +PREHOOK: Output: default@alter_partition_format_test_n0 +POSTHOOK: query: alter table alter_partition_format_test_n0 add partition(ds='2010') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@alter_partition_format_test -POSTHOOK: Output: default@alter_partition_format_test@ds=2010 -PREHOOK: query: desc FORMATTED alter_partition_format_test partition(ds='2010') +POSTHOOK: Output: default@alter_partition_format_test_n0 +POSTHOOK: Output: default@alter_partition_format_test_n0@ds=2010 +PREHOOK: query: desc FORMATTED alter_partition_format_test_n0 partition(ds='2010') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@alter_partition_format_test -POSTHOOK: query: desc FORMATTED alter_partition_format_test partition(ds='2010') +PREHOOK: Input: default@alter_partition_format_test_n0 +POSTHOOK: query: desc FORMATTED alter_partition_format_test_n0 partition(ds='2010') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@alter_partition_format_test +POSTHOOK: Input: default@alter_partition_format_test_n0 # col_name data_type comment key int value string @@ -341,7 +341,7 @@ ds string # Detailed Partition Information Partition Value: [2010] Database: default -Table: alter_partition_format_test +Table: alter_partition_format_test_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -361,21 +361,21 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table alter_partition_format_test partition(ds='2010') set fileformat rcfile +PREHOOK: query: alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat rcfile PREHOOK: type: ALTERPARTITION_FILEFORMAT -PREHOOK: Input: default@alter_partition_format_test -PREHOOK: Output: default@alter_partition_format_test@ds=2010 -POSTHOOK: query: alter table alter_partition_format_test partition(ds='2010') set fileformat rcfile +PREHOOK: Input: default@alter_partition_format_test_n0 +PREHOOK: Output: default@alter_partition_format_test_n0@ds=2010 +POSTHOOK: query: alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat rcfile POSTHOOK: type: ALTERPARTITION_FILEFORMAT -POSTHOOK: Input: default@alter_partition_format_test -POSTHOOK: Input: default@alter_partition_format_test@ds=2010 -POSTHOOK: Output: default@alter_partition_format_test@ds=2010 -PREHOOK: query: desc FORMATTED alter_partition_format_test partition(ds='2010') +POSTHOOK: Input: default@alter_partition_format_test_n0 +POSTHOOK: Input: default@alter_partition_format_test_n0@ds=2010 +POSTHOOK: Output: default@alter_partition_format_test_n0@ds=2010 +PREHOOK: query: desc FORMATTED alter_partition_format_test_n0 partition(ds='2010') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@alter_partition_format_test -POSTHOOK: query: desc FORMATTED alter_partition_format_test partition(ds='2010') +PREHOOK: Input: default@alter_partition_format_test_n0 +POSTHOOK: query: desc FORMATTED alter_partition_format_test_n0 partition(ds='2010') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@alter_partition_format_test +POSTHOOK: Input: default@alter_partition_format_test_n0 # col_name data_type comment key int value string @@ -387,7 +387,7 @@ ds string # Detailed Partition Information Partition Value: [2010] Database: default -Table: alter_partition_format_test +Table: alter_partition_format_test_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -408,21 +408,21 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table alter_partition_format_test partition(ds='2010') set fileformat textfile +PREHOOK: query: alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat textfile PREHOOK: type: ALTERPARTITION_FILEFORMAT -PREHOOK: Input: default@alter_partition_format_test -PREHOOK: Output: default@alter_partition_format_test@ds=2010 -POSTHOOK: query: alter table alter_partition_format_test partition(ds='2010') set fileformat textfile +PREHOOK: Input: default@alter_partition_format_test_n0 +PREHOOK: Output: default@alter_partition_format_test_n0@ds=2010 +POSTHOOK: query: alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat textfile POSTHOOK: type: ALTERPARTITION_FILEFORMAT -POSTHOOK: Input: default@alter_partition_format_test -POSTHOOK: Input: default@alter_partition_format_test@ds=2010 -POSTHOOK: Output: default@alter_partition_format_test@ds=2010 -PREHOOK: query: desc FORMATTED alter_partition_format_test partition(ds='2010') +POSTHOOK: Input: default@alter_partition_format_test_n0 +POSTHOOK: Input: default@alter_partition_format_test_n0@ds=2010 +POSTHOOK: Output: default@alter_partition_format_test_n0@ds=2010 +PREHOOK: query: desc FORMATTED alter_partition_format_test_n0 partition(ds='2010') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@alter_partition_format_test -POSTHOOK: query: desc FORMATTED alter_partition_format_test partition(ds='2010') +PREHOOK: Input: default@alter_partition_format_test_n0 +POSTHOOK: query: desc FORMATTED alter_partition_format_test_n0 partition(ds='2010') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@alter_partition_format_test +POSTHOOK: Input: default@alter_partition_format_test_n0 # col_name data_type comment key int value string @@ -434,7 +434,7 @@ ds string # Detailed Partition Information Partition Value: [2010] Database: default -Table: alter_partition_format_test +Table: alter_partition_format_test_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -455,21 +455,21 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table alter_partition_format_test partition(ds='2010') set fileformat rcfile +PREHOOK: query: alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat rcfile PREHOOK: type: ALTERPARTITION_FILEFORMAT -PREHOOK: Input: default@alter_partition_format_test -PREHOOK: Output: default@alter_partition_format_test@ds=2010 -POSTHOOK: query: alter table alter_partition_format_test partition(ds='2010') set fileformat rcfile +PREHOOK: Input: default@alter_partition_format_test_n0 +PREHOOK: Output: default@alter_partition_format_test_n0@ds=2010 +POSTHOOK: query: alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat rcfile POSTHOOK: type: ALTERPARTITION_FILEFORMAT -POSTHOOK: Input: default@alter_partition_format_test -POSTHOOK: Input: default@alter_partition_format_test@ds=2010 -POSTHOOK: Output: default@alter_partition_format_test@ds=2010 -PREHOOK: query: desc FORMATTED alter_partition_format_test partition(ds='2010') +POSTHOOK: Input: default@alter_partition_format_test_n0 +POSTHOOK: Input: default@alter_partition_format_test_n0@ds=2010 +POSTHOOK: Output: default@alter_partition_format_test_n0@ds=2010 +PREHOOK: query: desc FORMATTED alter_partition_format_test_n0 partition(ds='2010') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@alter_partition_format_test -POSTHOOK: query: desc FORMATTED alter_partition_format_test partition(ds='2010') +PREHOOK: Input: default@alter_partition_format_test_n0 +POSTHOOK: query: desc FORMATTED alter_partition_format_test_n0 partition(ds='2010') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@alter_partition_format_test +POSTHOOK: Input: default@alter_partition_format_test_n0 # col_name data_type comment key int value string @@ -481,7 +481,7 @@ ds string # Detailed Partition Information Partition Value: [2010] Database: default -Table: alter_partition_format_test +Table: alter_partition_format_test_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -502,21 +502,21 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table alter_partition_format_test partition(ds='2010') set fileformat sequencefile +PREHOOK: query: alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat sequencefile PREHOOK: type: ALTERPARTITION_FILEFORMAT -PREHOOK: Input: default@alter_partition_format_test -PREHOOK: Output: default@alter_partition_format_test@ds=2010 -POSTHOOK: query: alter table alter_partition_format_test partition(ds='2010') set fileformat sequencefile +PREHOOK: Input: default@alter_partition_format_test_n0 +PREHOOK: Output: default@alter_partition_format_test_n0@ds=2010 +POSTHOOK: query: alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat sequencefile POSTHOOK: type: ALTERPARTITION_FILEFORMAT -POSTHOOK: Input: default@alter_partition_format_test -POSTHOOK: Input: default@alter_partition_format_test@ds=2010 -POSTHOOK: Output: default@alter_partition_format_test@ds=2010 -PREHOOK: query: desc FORMATTED alter_partition_format_test partition(ds='2010') +POSTHOOK: Input: default@alter_partition_format_test_n0 +POSTHOOK: Input: default@alter_partition_format_test_n0@ds=2010 +POSTHOOK: Output: default@alter_partition_format_test_n0@ds=2010 +PREHOOK: query: desc FORMATTED alter_partition_format_test_n0 partition(ds='2010') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@alter_partition_format_test -POSTHOOK: query: desc FORMATTED alter_partition_format_test partition(ds='2010') +PREHOOK: Input: default@alter_partition_format_test_n0 +POSTHOOK: query: desc FORMATTED alter_partition_format_test_n0 partition(ds='2010') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@alter_partition_format_test +POSTHOOK: Input: default@alter_partition_format_test_n0 # col_name data_type comment key int value string @@ -528,7 +528,7 @@ ds string # Detailed Partition Information Partition Value: [2010] Database: default -Table: alter_partition_format_test +Table: alter_partition_format_test_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -549,21 +549,21 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table alter_partition_format_test partition(ds='2010') set fileformat parquet +PREHOOK: query: alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat parquet PREHOOK: type: ALTERPARTITION_FILEFORMAT -PREHOOK: Input: default@alter_partition_format_test -PREHOOK: Output: default@alter_partition_format_test@ds=2010 -POSTHOOK: query: alter table alter_partition_format_test partition(ds='2010') set fileformat parquet +PREHOOK: Input: default@alter_partition_format_test_n0 +PREHOOK: Output: default@alter_partition_format_test_n0@ds=2010 +POSTHOOK: query: alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat parquet POSTHOOK: type: ALTERPARTITION_FILEFORMAT -POSTHOOK: Input: default@alter_partition_format_test -POSTHOOK: Input: default@alter_partition_format_test@ds=2010 -POSTHOOK: Output: default@alter_partition_format_test@ds=2010 -PREHOOK: query: desc FORMATTED alter_partition_format_test partition(ds='2010') +POSTHOOK: Input: default@alter_partition_format_test_n0 +POSTHOOK: Input: default@alter_partition_format_test_n0@ds=2010 +POSTHOOK: Output: default@alter_partition_format_test_n0@ds=2010 +PREHOOK: query: desc FORMATTED alter_partition_format_test_n0 partition(ds='2010') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@alter_partition_format_test -POSTHOOK: query: desc FORMATTED alter_partition_format_test partition(ds='2010') +PREHOOK: Input: default@alter_partition_format_test_n0 +POSTHOOK: query: desc FORMATTED alter_partition_format_test_n0 partition(ds='2010') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@alter_partition_format_test +POSTHOOK: Input: default@alter_partition_format_test_n0 # col_name data_type comment key int value string @@ -575,7 +575,7 @@ ds string # Detailed Partition Information Partition Value: [2010] Database: default -Table: alter_partition_format_test +Table: alter_partition_format_test_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -596,11 +596,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table alter_partition_format_test +PREHOOK: query: drop table alter_partition_format_test_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@alter_partition_format_test -PREHOOK: Output: default@alter_partition_format_test -POSTHOOK: query: drop table alter_partition_format_test +PREHOOK: Input: default@alter_partition_format_test_n0 +PREHOOK: Output: default@alter_partition_format_test_n0 +POSTHOOK: query: drop table alter_partition_format_test_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@alter_partition_format_test -POSTHOOK: Output: default@alter_partition_format_test +POSTHOOK: Input: default@alter_partition_format_test_n0 +POSTHOOK: Output: default@alter_partition_format_test_n0 diff --git a/ql/src/test/results/clientpositive/alter_merge.q.out b/ql/src/test/results/clientpositive/alter_merge.q.out index 17d86b83ba..3e924b3ec9 100644 --- a/ql/src/test/results/clientpositive/alter_merge.q.out +++ b/ql/src/test/results/clientpositive/alter_merge.q.out @@ -1,40 +1,40 @@ -PREHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile +PREHOOK: query: create table src_rc_merge_test_n2(key int, value string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_rc_merge_test -POSTHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile +PREHOOK: Output: default@src_rc_merge_test_n2 +POSTHOOK: query: create table src_rc_merge_test_n2(key int, value string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_rc_merge_test -PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test +POSTHOOK: Output: default@src_rc_merge_test_n2 +PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@src_rc_merge_test -POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test +PREHOOK: Output: default@src_rc_merge_test_n2 +POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@src_rc_merge_test -PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test +POSTHOOK: Output: default@src_rc_merge_test_n2 +PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@src_rc_merge_test -POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test +PREHOOK: Output: default@src_rc_merge_test_n2 +POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@src_rc_merge_test -PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test +POSTHOOK: Output: default@src_rc_merge_test_n2 +PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@src_rc_merge_test -POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test +PREHOOK: Output: default@src_rc_merge_test_n2 +POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@src_rc_merge_test -PREHOOK: query: show table extended like `src_rc_merge_test` +POSTHOOK: Output: default@src_rc_merge_test_n2 +PREHOOK: query: show table extended like `src_rc_merge_test_n2` PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like `src_rc_merge_test` +POSTHOOK: query: show table extended like `src_rc_merge_test_n2` POSTHOOK: type: SHOW_TABLESTATUS -tableName:src_rc_merge_test +tableName:src_rc_merge_test_n2 #### A masked pattern was here #### inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat @@ -47,37 +47,37 @@ maxFileSize:222 minFileSize:206 #### A masked pattern was here #### -PREHOOK: query: select count(1) from src_rc_merge_test +PREHOOK: query: select count(1) from src_rc_merge_test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@src_rc_merge_test +PREHOOK: Input: default@src_rc_merge_test_n2 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from src_rc_merge_test +POSTHOOK: query: select count(1) from src_rc_merge_test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_rc_merge_test +POSTHOOK: Input: default@src_rc_merge_test_n2 #### A masked pattern was here #### 15 -PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test +PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@src_rc_merge_test +PREHOOK: Input: default@src_rc_merge_test_n2 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test +POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_rc_merge_test +POSTHOOK: Input: default@src_rc_merge_test_n2 #### A masked pattern was here #### 214 -7678496319 -PREHOOK: query: alter table src_rc_merge_test concatenate +PREHOOK: query: alter table src_rc_merge_test_n2 concatenate PREHOOK: type: ALTER_TABLE_MERGE -PREHOOK: Input: default@src_rc_merge_test -PREHOOK: Output: default@src_rc_merge_test -POSTHOOK: query: alter table src_rc_merge_test concatenate +PREHOOK: Input: default@src_rc_merge_test_n2 +PREHOOK: Output: default@src_rc_merge_test_n2 +POSTHOOK: query: alter table src_rc_merge_test_n2 concatenate POSTHOOK: type: ALTER_TABLE_MERGE -POSTHOOK: Input: default@src_rc_merge_test -POSTHOOK: Output: default@src_rc_merge_test -PREHOOK: query: show table extended like `src_rc_merge_test` +POSTHOOK: Input: default@src_rc_merge_test_n2 +POSTHOOK: Output: default@src_rc_merge_test_n2 +PREHOOK: query: show table extended like `src_rc_merge_test_n2` PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like `src_rc_merge_test` +POSTHOOK: query: show table extended like `src_rc_merge_test_n2` POSTHOOK: type: SHOW_TABLESTATUS -tableName:src_rc_merge_test +tableName:src_rc_merge_test_n2 #### A masked pattern was here #### inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat @@ -90,68 +90,68 @@ maxFileSize:239 minFileSize:239 #### A masked pattern was here #### -PREHOOK: query: select count(1) from src_rc_merge_test +PREHOOK: query: select count(1) from src_rc_merge_test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@src_rc_merge_test +PREHOOK: Input: default@src_rc_merge_test_n2 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from src_rc_merge_test +POSTHOOK: query: select count(1) from src_rc_merge_test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_rc_merge_test +POSTHOOK: Input: default@src_rc_merge_test_n2 #### A masked pattern was here #### 15 -PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test +PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@src_rc_merge_test +PREHOOK: Input: default@src_rc_merge_test_n2 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test +POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_rc_merge_test +POSTHOOK: Input: default@src_rc_merge_test_n2 #### A masked pattern was here #### 214 -7678496319 -PREHOOK: query: create table src_rc_merge_test_part(key int, value string) partitioned by (ds string) stored as rcfile +PREHOOK: query: create table src_rc_merge_test_part_n0(key int, value string) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_rc_merge_test_part -POSTHOOK: query: create table src_rc_merge_test_part(key int, value string) partitioned by (ds string) stored as rcfile +PREHOOK: Output: default@src_rc_merge_test_part_n0 +POSTHOOK: query: create table src_rc_merge_test_part_n0(key int, value string) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_rc_merge_test_part -PREHOOK: query: alter table src_rc_merge_test_part add partition (ds='2011') +POSTHOOK: Output: default@src_rc_merge_test_part_n0 +PREHOOK: query: alter table src_rc_merge_test_part_n0 add partition (ds='2011') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@src_rc_merge_test_part -POSTHOOK: query: alter table src_rc_merge_test_part add partition (ds='2011') +PREHOOK: Output: default@src_rc_merge_test_part_n0 +POSTHOOK: query: alter table src_rc_merge_test_part_n0 add partition (ds='2011') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@src_rc_merge_test_part -POSTHOOK: Output: default@src_rc_merge_test_part@ds=2011 -PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2011') +POSTHOOK: Output: default@src_rc_merge_test_part_n0 +POSTHOOK: Output: default@src_rc_merge_test_part_n0@ds=2011 +PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part_n0 partition (ds='2011') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@src_rc_merge_test_part@ds=2011 -POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2011') +PREHOOK: Output: default@src_rc_merge_test_part_n0@ds=2011 +POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part_n0 partition (ds='2011') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@src_rc_merge_test_part@ds=2011 -PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2011') +POSTHOOK: Output: default@src_rc_merge_test_part_n0@ds=2011 +PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part_n0 partition (ds='2011') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@src_rc_merge_test_part@ds=2011 -POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2011') +PREHOOK: Output: default@src_rc_merge_test_part_n0@ds=2011 +POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part_n0 partition (ds='2011') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@src_rc_merge_test_part@ds=2011 -PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2011') +POSTHOOK: Output: default@src_rc_merge_test_part_n0@ds=2011 +PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part_n0 partition (ds='2011') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@src_rc_merge_test_part@ds=2011 -POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2011') +PREHOOK: Output: default@src_rc_merge_test_part_n0@ds=2011 +POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part_n0 partition (ds='2011') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@src_rc_merge_test_part@ds=2011 -PREHOOK: query: show table extended like `src_rc_merge_test_part` partition (ds='2011') +POSTHOOK: Output: default@src_rc_merge_test_part_n0@ds=2011 +PREHOOK: query: show table extended like `src_rc_merge_test_part_n0` partition (ds='2011') PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like `src_rc_merge_test_part` partition (ds='2011') +POSTHOOK: query: show table extended like `src_rc_merge_test_part_n0` partition (ds='2011') POSTHOOK: type: SHOW_TABLESTATUS -tableName:src_rc_merge_test_part +tableName:src_rc_merge_test_part_n0 #### A masked pattern was here #### inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat @@ -164,41 +164,41 @@ maxFileSize:222 minFileSize:206 #### A masked pattern was here #### -PREHOOK: query: select count(1) from src_rc_merge_test_part +PREHOOK: query: select count(1) from src_rc_merge_test_part_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@src_rc_merge_test_part -PREHOOK: Input: default@src_rc_merge_test_part@ds=2011 +PREHOOK: Input: default@src_rc_merge_test_part_n0 +PREHOOK: Input: default@src_rc_merge_test_part_n0@ds=2011 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from src_rc_merge_test_part +POSTHOOK: query: select count(1) from src_rc_merge_test_part_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_rc_merge_test_part -POSTHOOK: Input: default@src_rc_merge_test_part@ds=2011 +POSTHOOK: Input: default@src_rc_merge_test_part_n0 +POSTHOOK: Input: default@src_rc_merge_test_part_n0@ds=2011 #### A masked pattern was here #### 15 -PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part +PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@src_rc_merge_test_part -PREHOOK: Input: default@src_rc_merge_test_part@ds=2011 +PREHOOK: Input: default@src_rc_merge_test_part_n0 +PREHOOK: Input: default@src_rc_merge_test_part_n0@ds=2011 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part +POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_rc_merge_test_part -POSTHOOK: Input: default@src_rc_merge_test_part@ds=2011 +POSTHOOK: Input: default@src_rc_merge_test_part_n0 +POSTHOOK: Input: default@src_rc_merge_test_part_n0@ds=2011 #### A masked pattern was here #### 214 -7678496319 -PREHOOK: query: alter table src_rc_merge_test_part partition (ds='2011') concatenate +PREHOOK: query: alter table src_rc_merge_test_part_n0 partition (ds='2011') concatenate PREHOOK: type: ALTER_PARTITION_MERGE -PREHOOK: Input: default@src_rc_merge_test_part -PREHOOK: Output: default@src_rc_merge_test_part@ds=2011 -POSTHOOK: query: alter table src_rc_merge_test_part partition (ds='2011') concatenate +PREHOOK: Input: default@src_rc_merge_test_part_n0 +PREHOOK: Output: default@src_rc_merge_test_part_n0@ds=2011 +POSTHOOK: query: alter table src_rc_merge_test_part_n0 partition (ds='2011') concatenate POSTHOOK: type: ALTER_PARTITION_MERGE -POSTHOOK: Input: default@src_rc_merge_test_part -POSTHOOK: Output: default@src_rc_merge_test_part@ds=2011 -PREHOOK: query: show table extended like `src_rc_merge_test_part` partition (ds='2011') +POSTHOOK: Input: default@src_rc_merge_test_part_n0 +POSTHOOK: Output: default@src_rc_merge_test_part_n0@ds=2011 +PREHOOK: query: show table extended like `src_rc_merge_test_part_n0` partition (ds='2011') PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like `src_rc_merge_test_part` partition (ds='2011') +POSTHOOK: query: show table extended like `src_rc_merge_test_part_n0` partition (ds='2011') POSTHOOK: type: SHOW_TABLESTATUS -tableName:src_rc_merge_test_part +tableName:src_rc_merge_test_part_n0 #### A masked pattern was here #### inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat @@ -211,41 +211,41 @@ maxFileSize:239 minFileSize:239 #### A masked pattern was here #### -PREHOOK: query: select count(1) from src_rc_merge_test_part +PREHOOK: query: select count(1) from src_rc_merge_test_part_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@src_rc_merge_test_part -PREHOOK: Input: default@src_rc_merge_test_part@ds=2011 +PREHOOK: Input: default@src_rc_merge_test_part_n0 +PREHOOK: Input: default@src_rc_merge_test_part_n0@ds=2011 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from src_rc_merge_test_part +POSTHOOK: query: select count(1) from src_rc_merge_test_part_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_rc_merge_test_part -POSTHOOK: Input: default@src_rc_merge_test_part@ds=2011 +POSTHOOK: Input: default@src_rc_merge_test_part_n0 +POSTHOOK: Input: default@src_rc_merge_test_part_n0@ds=2011 #### A masked pattern was here #### 15 -PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part +PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@src_rc_merge_test_part -PREHOOK: Input: default@src_rc_merge_test_part@ds=2011 +PREHOOK: Input: default@src_rc_merge_test_part_n0 +PREHOOK: Input: default@src_rc_merge_test_part_n0@ds=2011 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part +POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_rc_merge_test_part -POSTHOOK: Input: default@src_rc_merge_test_part@ds=2011 +POSTHOOK: Input: default@src_rc_merge_test_part_n0 +POSTHOOK: Input: default@src_rc_merge_test_part_n0@ds=2011 #### A masked pattern was here #### 214 -7678496319 -PREHOOK: query: drop table src_rc_merge_test +PREHOOK: query: drop table src_rc_merge_test_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@src_rc_merge_test -PREHOOK: Output: default@src_rc_merge_test -POSTHOOK: query: drop table src_rc_merge_test +PREHOOK: Input: default@src_rc_merge_test_n2 +PREHOOK: Output: default@src_rc_merge_test_n2 +POSTHOOK: query: drop table src_rc_merge_test_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@src_rc_merge_test -POSTHOOK: Output: default@src_rc_merge_test -PREHOOK: query: drop table src_rc_merge_test_part +POSTHOOK: Input: default@src_rc_merge_test_n2 +POSTHOOK: Output: default@src_rc_merge_test_n2 +PREHOOK: query: drop table src_rc_merge_test_part_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@src_rc_merge_test_part -PREHOOK: Output: default@src_rc_merge_test_part -POSTHOOK: query: drop table src_rc_merge_test_part +PREHOOK: Input: default@src_rc_merge_test_part_n0 +PREHOOK: Output: default@src_rc_merge_test_part_n0 +POSTHOOK: query: drop table src_rc_merge_test_part_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@src_rc_merge_test_part -POSTHOOK: Output: default@src_rc_merge_test_part +POSTHOOK: Input: default@src_rc_merge_test_part_n0 +POSTHOOK: Output: default@src_rc_merge_test_part_n0 diff --git a/ql/src/test/results/clientpositive/alter_merge_2.q.out b/ql/src/test/results/clientpositive/alter_merge_2.q.out index 5f5778ebbe..d9ff5369a7 100644 --- a/ql/src/test/results/clientpositive/alter_merge_2.q.out +++ b/ql/src/test/results/clientpositive/alter_merge_2.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: create table src_rc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as rcfile +PREHOOK: query: create table src_rc_merge_test_part_n1(key int, value string) partitioned by (ds string, ts string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_rc_merge_test_part -POSTHOOK: query: create table src_rc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as rcfile +PREHOOK: Output: default@src_rc_merge_test_part_n1 +POSTHOOK: query: create table src_rc_merge_test_part_n1(key int, value string) partitioned by (ds string, ts string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_rc_merge_test_part -PREHOOK: query: alter table src_rc_merge_test_part add partition (ds='2012-01-03', ts='2012-01-03+14:46:31') +POSTHOOK: Output: default@src_rc_merge_test_part_n1 +PREHOOK: query: alter table src_rc_merge_test_part_n1 add partition (ds='2012-01-03', ts='2012-01-03+14:46:31') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@src_rc_merge_test_part -POSTHOOK: query: alter table src_rc_merge_test_part add partition (ds='2012-01-03', ts='2012-01-03+14:46:31') +PREHOOK: Output: default@src_rc_merge_test_part_n1 +POSTHOOK: query: alter table src_rc_merge_test_part_n1 add partition (ds='2012-01-03', ts='2012-01-03+14:46:31') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@src_rc_merge_test_part -POSTHOOK: Output: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 -PREHOOK: query: desc extended src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') +POSTHOOK: Output: default@src_rc_merge_test_part_n1 +POSTHOOK: Output: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +PREHOOK: query: desc extended src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_rc_merge_test_part -POSTHOOK: query: desc extended src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') +PREHOOK: Input: default@src_rc_merge_test_part_n1 +POSTHOOK: query: desc extended src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_rc_merge_test_part +POSTHOOK: Input: default@src_rc_merge_test_part_n1 key int value string ds string @@ -30,87 +30,87 @@ ds string ts string #### A masked pattern was here #### -PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') +PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 -POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') +PREHOOK: Output: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 -PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') +POSTHOOK: Output: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 -POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') +PREHOOK: Output: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 -PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') +POSTHOOK: Output: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 -POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') +PREHOOK: Output: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 -PREHOOK: query: select count(1) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' +POSTHOOK: Output: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +PREHOOK: query: select count(1) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31' PREHOOK: type: QUERY -PREHOOK: Input: default@src_rc_merge_test_part -PREHOOK: Input: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +PREHOOK: Input: default@src_rc_merge_test_part_n1 +PREHOOK: Input: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' +POSTHOOK: query: select count(1) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31' POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_rc_merge_test_part -POSTHOOK: Input: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +POSTHOOK: Input: default@src_rc_merge_test_part_n1 +POSTHOOK: Input: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 #### A masked pattern was here #### 15 -PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' +PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31' PREHOOK: type: QUERY -PREHOOK: Input: default@src_rc_merge_test_part -PREHOOK: Input: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +PREHOOK: Input: default@src_rc_merge_test_part_n1 +PREHOOK: Input: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' +POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31' POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_rc_merge_test_part -POSTHOOK: Input: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +POSTHOOK: Input: default@src_rc_merge_test_part_n1 +POSTHOOK: Input: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 #### A masked pattern was here #### 214 -7678496319 -PREHOOK: query: alter table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate +PREHOOK: query: alter table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate PREHOOK: type: ALTER_PARTITION_MERGE -PREHOOK: Input: default@src_rc_merge_test_part -PREHOOK: Output: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 -POSTHOOK: query: alter table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate +PREHOOK: Input: default@src_rc_merge_test_part_n1 +PREHOOK: Output: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +POSTHOOK: query: alter table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate POSTHOOK: type: ALTER_PARTITION_MERGE -POSTHOOK: Input: default@src_rc_merge_test_part -POSTHOOK: Output: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 -PREHOOK: query: select count(1) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' +POSTHOOK: Input: default@src_rc_merge_test_part_n1 +POSTHOOK: Output: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +PREHOOK: query: select count(1) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31' PREHOOK: type: QUERY -PREHOOK: Input: default@src_rc_merge_test_part -PREHOOK: Input: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +PREHOOK: Input: default@src_rc_merge_test_part_n1 +PREHOOK: Input: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' +POSTHOOK: query: select count(1) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31' POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_rc_merge_test_part -POSTHOOK: Input: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +POSTHOOK: Input: default@src_rc_merge_test_part_n1 +POSTHOOK: Input: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 #### A masked pattern was here #### 15 -PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' +PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31' PREHOOK: type: QUERY -PREHOOK: Input: default@src_rc_merge_test_part -PREHOOK: Input: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +PREHOOK: Input: default@src_rc_merge_test_part_n1 +PREHOOK: Input: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' +POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31' POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_rc_merge_test_part -POSTHOOK: Input: default@src_rc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 +POSTHOOK: Input: default@src_rc_merge_test_part_n1 +POSTHOOK: Input: default@src_rc_merge_test_part_n1@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 #### A masked pattern was here #### 214 -7678496319 -PREHOOK: query: drop table src_rc_merge_test_part +PREHOOK: query: drop table src_rc_merge_test_part_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@src_rc_merge_test_part -PREHOOK: Output: default@src_rc_merge_test_part -POSTHOOK: query: drop table src_rc_merge_test_part +PREHOOK: Input: default@src_rc_merge_test_part_n1 +PREHOOK: Output: default@src_rc_merge_test_part_n1 +POSTHOOK: query: drop table src_rc_merge_test_part_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@src_rc_merge_test_part -POSTHOOK: Output: default@src_rc_merge_test_part +POSTHOOK: Input: default@src_rc_merge_test_part_n1 +POSTHOOK: Output: default@src_rc_merge_test_part_n1 diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out index 58d803fd0b..e6de784399 100644 --- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out +++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE tst1_n0(key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tst1 -POSTHOOK: query: CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@tst1_n0 +POSTHOOK: query: CREATE TABLE tst1_n0(key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tst1 -PREHOOK: query: DESCRIBE FORMATTED tst1 +POSTHOOK: Output: default@tst1_n0 +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -46,22 +46,22 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tst1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tst1@ds=1 -POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +POSTHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -73,7 +73,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -93,20 +93,20 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS +PREHOOK: query: ALTER TABLE tst1_n0 CLUSTERED BY (key) INTO 8 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS +PREHOOK: Input: default@tst1_n0 +PREHOOK: Output: default@tst1_n0 +POSTHOOK: query: ALTER TABLE tst1_n0 CLUSTERED BY (key) INTO 8 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: DESCRIBE FORMATTED tst1 +POSTHOOK: Input: default@tst1_n0 +POSTHOOK: Output: default@tst1_n0 +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -142,22 +142,22 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tst1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tst1@ds=1 -POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +POSTHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -169,7 +169,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -189,20 +189,20 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS +PREHOOK: query: ALTER TABLE tst1_n0 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS +PREHOOK: Input: default@tst1_n0 +PREHOOK: Output: default@tst1_n0 +POSTHOOK: query: ALTER TABLE tst1_n0 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: DESCRIBE FORMATTED tst1 +POSTHOOK: Input: default@tst1_n0 +POSTHOOK: Output: default@tst1_n0 +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -238,22 +238,22 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tst1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tst1@ds=1 -POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +POSTHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -265,7 +265,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -285,20 +285,20 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS +PREHOOK: query: ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS +PREHOOK: Input: default@tst1_n0 +PREHOOK: Output: default@tst1_n0 +POSTHOOK: query: ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: DESCRIBE FORMATTED tst1 +POSTHOOK: Input: default@tst1_n0 +POSTHOOK: Output: default@tst1_n0 +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -334,22 +334,22 @@ Bucket Columns: [value] Sort Columns: [Order(col:key, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tst1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tst1@ds=1 -POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +POSTHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -361,7 +361,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -381,20 +381,20 @@ Bucket Columns: [value] Sort Columns: [Order(col:key, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS +PREHOOK: query: ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS +PREHOOK: Input: default@tst1_n0 +PREHOOK: Output: default@tst1_n0 +POSTHOOK: query: ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: DESCRIBE FORMATTED tst1 +POSTHOOK: Input: default@tst1_n0 +POSTHOOK: Output: default@tst1_n0 +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -430,22 +430,22 @@ Bucket Columns: [value] Sort Columns: [Order(col:key, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tst1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tst1@ds=1 -POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +POSTHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -457,7 +457,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -477,20 +477,20 @@ Bucket Columns: [value] Sort Columns: [Order(col:key, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS +PREHOOK: query: ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS +PREHOOK: Input: default@tst1_n0 +PREHOOK: Output: default@tst1_n0 +POSTHOOK: query: ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: DESCRIBE FORMATTED tst1 +POSTHOOK: Input: default@tst1_n0 +POSTHOOK: Output: default@tst1_n0 +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -526,22 +526,22 @@ Bucket Columns: [value] Sort Columns: [Order(col:value, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tst1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tst1@ds=1 -POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +POSTHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -553,7 +553,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -573,20 +573,20 @@ Bucket Columns: [value] Sort Columns: [Order(col:value, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS +PREHOOK: query: ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS +PREHOOK: Input: default@tst1_n0 +PREHOOK: Output: default@tst1_n0 +POSTHOOK: query: ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: DESCRIBE FORMATTED tst1 +POSTHOOK: Input: default@tst1_n0 +POSTHOOK: Output: default@tst1_n0 +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -622,22 +622,22 @@ Bucket Columns: [value] Sort Columns: [Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tst1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tst1@ds=1 -POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +POSTHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -649,7 +649,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -669,20 +669,20 @@ Bucket Columns: [value] Sort Columns: [Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS +PREHOOK: query: ALTER TABLE tst1_n0 CLUSTERED BY (value) INTO 4 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS +PREHOOK: Input: default@tst1_n0 +PREHOOK: Output: default@tst1_n0 +POSTHOOK: query: ALTER TABLE tst1_n0 CLUSTERED BY (value) INTO 4 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: DESCRIBE FORMATTED tst1 +POSTHOOK: Input: default@tst1_n0 +POSTHOOK: Output: default@tst1_n0 +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -718,22 +718,22 @@ Bucket Columns: [value] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tst1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tst1@ds=1 -POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +POSTHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -745,7 +745,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -765,20 +765,20 @@ Bucket Columns: [value] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: ALTER TABLE tst1 NOT CLUSTERED +PREHOOK: query: ALTER TABLE tst1_n0 NOT CLUSTERED PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: ALTER TABLE tst1 NOT CLUSTERED +PREHOOK: Input: default@tst1_n0 +PREHOOK: Output: default@tst1_n0 +POSTHOOK: query: ALTER TABLE tst1_n0 NOT CLUSTERED POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: DESCRIBE FORMATTED tst1 +POSTHOOK: Input: default@tst1_n0 +POSTHOOK: Output: default@tst1_n0 +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -814,22 +814,22 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tst1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tst1@ds=1 -POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +POSTHOOK: Output: default@tst1_n0@ds=1 +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n0 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +PREHOOK: Input: default@tst1_n0 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n0 # col_name data_type comment key string value string @@ -841,7 +841,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out index c0861e553b..c59a5fc939 100644 --- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out +++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out @@ -1,25 +1,25 @@ -PREHOOK: query: create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets +PREHOOK: query: create table tst1_n1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tst1 -POSTHOOK: query: create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets +PREHOOK: Output: default@tst1_n1 +POSTHOOK: query: create table tst1_n1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tst1 -PREHOOK: query: alter table tst1 clustered by (key) into 8 buckets +POSTHOOK: Output: default@tst1_n1 +PREHOOK: query: alter table tst1_n1 clustered by (key) into 8 buckets PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: alter table tst1 clustered by (key) into 8 buckets +PREHOOK: Input: default@tst1_n1 +PREHOOK: Output: default@tst1_n1 +POSTHOOK: query: alter table tst1_n1 clustered by (key) into 8 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: describe formatted tst1 +POSTHOOK: Input: default@tst1_n1 +POSTHOOK: Output: default@tst1_n1 +PREHOOK: query: describe formatted tst1_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: describe formatted tst1 +PREHOOK: Input: default@tst1_n1 +POSTHOOK: query: describe formatted tst1_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n1 # col_name data_type comment key string value string @@ -55,22 +55,22 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: insert overwrite table tst1 partition (ds='1') select key, value from src +PREHOOK: query: insert overwrite table tst1_n1 partition (ds='1') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tst1@ds=1 -POSTHOOK: query: insert overwrite table tst1 partition (ds='1') select key, value from src +PREHOOK: Output: default@tst1_n1@ds=1 +POSTHOOK: query: insert overwrite table tst1_n1 partition (ds='1') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tst1@ds=1 -POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: describe formatted tst1 partition (ds = '1') +POSTHOOK: Output: default@tst1_n1@ds=1 +POSTHOOK: Lineage: tst1_n1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted tst1_n1 partition (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: describe formatted tst1 partition (ds = '1') +PREHOOK: Input: default@tst1_n1 +POSTHOOK: query: describe formatted tst1_n1 partition (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n1 # col_name data_type comment key string value string @@ -82,7 +82,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -102,30 +102,30 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table tst1 clustered by (key) into 12 buckets +PREHOOK: query: alter table tst1_n1 clustered by (key) into 12 buckets PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: alter table tst1 clustered by (key) into 12 buckets +PREHOOK: Input: default@tst1_n1 +PREHOOK: Output: default@tst1_n1 +POSTHOOK: query: alter table tst1_n1 clustered by (key) into 12 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: insert overwrite table tst1 partition (ds='1') select key, value from src +POSTHOOK: Input: default@tst1_n1 +POSTHOOK: Output: default@tst1_n1 +PREHOOK: query: insert overwrite table tst1_n1 partition (ds='1') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tst1@ds=1 -POSTHOOK: query: insert overwrite table tst1 partition (ds='1') select key, value from src +PREHOOK: Output: default@tst1_n1@ds=1 +POSTHOOK: query: insert overwrite table tst1_n1 partition (ds='1') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tst1@ds=1 -POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: describe formatted tst1 partition (ds = '1') +POSTHOOK: Output: default@tst1_n1@ds=1 +POSTHOOK: Lineage: tst1_n1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted tst1_n1 partition (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: describe formatted tst1 partition (ds = '1') +PREHOOK: Input: default@tst1_n1 +POSTHOOK: query: describe formatted tst1_n1 partition (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n1 # col_name data_type comment key string value string @@ -137,7 +137,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -157,12 +157,12 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted tst1 +PREHOOK: query: describe formatted tst1_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: describe formatted tst1 +PREHOOK: Input: default@tst1_n1 +POSTHOOK: query: describe formatted tst1_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n1 # col_name data_type comment key string value string @@ -198,19 +198,19 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table tst1 into 4 buckets +PREHOOK: query: alter table tst1_n1 into 4 buckets PREHOOK: type: ALTERTABLE_BUCKETNUM -PREHOOK: Input: default@tst1 -POSTHOOK: query: alter table tst1 into 4 buckets +PREHOOK: Input: default@tst1_n1 +POSTHOOK: query: alter table tst1_n1 into 4 buckets POSTHOOK: type: ALTERTABLE_BUCKETNUM -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: describe formatted tst1 +POSTHOOK: Input: default@tst1_n1 +POSTHOOK: Output: default@tst1_n1 +PREHOOK: query: describe formatted tst1_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: describe formatted tst1 +PREHOOK: Input: default@tst1_n1 +POSTHOOK: query: describe formatted tst1_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n1 # col_name data_type comment key string value string @@ -246,12 +246,12 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted tst1 partition (ds = '1') +PREHOOK: query: describe formatted tst1_n1 partition (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: describe formatted tst1 partition (ds = '1') +PREHOOK: Input: default@tst1_n1 +POSTHOOK: query: describe formatted tst1_n1 partition (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n1 # col_name data_type comment key string value string @@ -263,7 +263,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -283,20 +283,20 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table tst1 partition (ds = '1') into 6 buckets +PREHOOK: query: alter table tst1_n1 partition (ds = '1') into 6 buckets PREHOOK: type: ALTERPARTITION_BUCKETNUM -PREHOOK: Input: default@tst1 -POSTHOOK: query: alter table tst1 partition (ds = '1') into 6 buckets +PREHOOK: Input: default@tst1_n1 +POSTHOOK: query: alter table tst1_n1 partition (ds = '1') into 6 buckets POSTHOOK: type: ALTERPARTITION_BUCKETNUM -POSTHOOK: Input: default@tst1 -POSTHOOK: Input: default@tst1@ds=1 -POSTHOOK: Output: default@tst1@ds=1 -PREHOOK: query: describe formatted tst1 +POSTHOOK: Input: default@tst1_n1 +POSTHOOK: Input: default@tst1_n1@ds=1 +POSTHOOK: Output: default@tst1_n1@ds=1 +PREHOOK: query: describe formatted tst1_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: describe formatted tst1 +PREHOOK: Input: default@tst1_n1 +POSTHOOK: query: describe formatted tst1_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n1 # col_name data_type comment key string value string @@ -332,12 +332,12 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted tst1 partition (ds = '1') +PREHOOK: query: describe formatted tst1_n1 partition (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: describe formatted tst1 partition (ds = '1') +PREHOOK: Input: default@tst1_n1 +POSTHOOK: query: describe formatted tst1_n1 partition (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n1 # col_name data_type comment key string value string @@ -349,7 +349,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -370,20 +370,20 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets +PREHOOK: query: alter table tst1_n1 clustered by (key) sorted by (key asc) into 12 buckets PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets +PREHOOK: Input: default@tst1_n1 +PREHOOK: Output: default@tst1_n1 +POSTHOOK: query: alter table tst1_n1 clustered by (key) sorted by (key asc) into 12 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: describe formatted tst1 +POSTHOOK: Input: default@tst1_n1 +POSTHOOK: Output: default@tst1_n1 +PREHOOK: query: describe formatted tst1_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: describe formatted tst1 +PREHOOK: Input: default@tst1_n1 +POSTHOOK: query: describe formatted tst1_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n1 # col_name data_type comment key string value string @@ -419,20 +419,20 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets +PREHOOK: query: alter table tst1_n1 clustered by (key) sorted by (value desc) into 12 buckets PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets +PREHOOK: Input: default@tst1_n1 +PREHOOK: Output: default@tst1_n1 +POSTHOOK: query: alter table tst1_n1 clustered by (key) sorted by (value desc) into 12 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: describe formatted tst1 +POSTHOOK: Input: default@tst1_n1 +POSTHOOK: Output: default@tst1_n1 +PREHOOK: query: describe formatted tst1_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: describe formatted tst1 +PREHOOK: Input: default@tst1_n1 +POSTHOOK: query: describe formatted tst1_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n1 # col_name data_type comment key string value string @@ -468,20 +468,20 @@ Bucket Columns: [key] Sort Columns: [Order(col:value, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table tst1 clustered by (value) into 12 buckets +PREHOOK: query: alter table tst1_n1 clustered by (value) into 12 buckets PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: alter table tst1 clustered by (value) into 12 buckets +PREHOOK: Input: default@tst1_n1 +PREHOOK: Output: default@tst1_n1 +POSTHOOK: query: alter table tst1_n1 clustered by (value) into 12 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: describe formatted tst1 +POSTHOOK: Input: default@tst1_n1 +POSTHOOK: Output: default@tst1_n1 +PREHOOK: query: describe formatted tst1_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: describe formatted tst1 +PREHOOK: Input: default@tst1_n1 +POSTHOOK: query: describe formatted tst1_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n1 # col_name data_type comment key string value string @@ -517,20 +517,20 @@ Bucket Columns: [value] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table tst1 not clustered +PREHOOK: query: alter table tst1_n1 not clustered PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: alter table tst1 not clustered +PREHOOK: Input: default@tst1_n1 +PREHOOK: Output: default@tst1_n1 +POSTHOOK: query: alter table tst1_n1 not clustered POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: describe formatted tst1 +POSTHOOK: Input: default@tst1_n1 +POSTHOOK: Output: default@tst1_n1 +PREHOOK: query: describe formatted tst1_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: describe formatted tst1 +PREHOOK: Input: default@tst1_n1 +POSTHOOK: query: describe formatted tst1_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n1 # col_name data_type comment key string value string diff --git a/ql/src/test/results/clientpositive/alter_partition_with_whitelist.q.out b/ql/src/test/results/clientpositive/alter_partition_with_whitelist.q.out index ff79150e11..4e7499d9c7 100644 --- a/ql/src/test/results/clientpositive/alter_partition_with_whitelist.q.out +++ b/ql/src/test/results/clientpositive/alter_partition_with_whitelist.q.out @@ -1,31 +1,31 @@ -PREHOOK: query: CREATE TABLE part_whitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE part_whitelist_test_n0 (key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part_whitelist_test -POSTHOOK: query: CREATE TABLE part_whitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@part_whitelist_test_n0 +POSTHOOK: query: CREATE TABLE part_whitelist_test_n0 (key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_whitelist_test -PREHOOK: query: SHOW PARTITIONS part_whitelist_test +POSTHOOK: Output: default@part_whitelist_test_n0 +PREHOOK: query: SHOW PARTITIONS part_whitelist_test_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@part_whitelist_test -POSTHOOK: query: SHOW PARTITIONS part_whitelist_test +PREHOOK: Input: default@part_whitelist_test_n0 +POSTHOOK: query: SHOW PARTITIONS part_whitelist_test_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@part_whitelist_test -PREHOOK: query: ALTER TABLE part_whitelist_test ADD PARTITION (ds='Part') +POSTHOOK: Input: default@part_whitelist_test_n0 +PREHOOK: query: ALTER TABLE part_whitelist_test_n0 ADD PARTITION (ds='Part') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@part_whitelist_test -POSTHOOK: query: ALTER TABLE part_whitelist_test ADD PARTITION (ds='Part') +PREHOOK: Output: default@part_whitelist_test_n0 +POSTHOOK: query: ALTER TABLE part_whitelist_test_n0 ADD PARTITION (ds='Part') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@part_whitelist_test -POSTHOOK: Output: default@part_whitelist_test@ds=Part -PREHOOK: query: ALTER TABLE part_whitelist_test PARTITION (ds='Part') rename to partition (ds='Apart') +POSTHOOK: Output: default@part_whitelist_test_n0 +POSTHOOK: Output: default@part_whitelist_test_n0@ds=Part +PREHOOK: query: ALTER TABLE part_whitelist_test_n0 PARTITION (ds='Part') rename to partition (ds='Apart') PREHOOK: type: ALTERTABLE_RENAMEPART -PREHOOK: Input: default@part_whitelist_test -PREHOOK: Output: default@part_whitelist_test@ds=Part -POSTHOOK: query: ALTER TABLE part_whitelist_test PARTITION (ds='Part') rename to partition (ds='Apart') +PREHOOK: Input: default@part_whitelist_test_n0 +PREHOOK: Output: default@part_whitelist_test_n0@ds=Part +POSTHOOK: query: ALTER TABLE part_whitelist_test_n0 PARTITION (ds='Part') rename to partition (ds='Apart') POSTHOOK: type: ALTERTABLE_RENAMEPART -POSTHOOK: Input: default@part_whitelist_test -POSTHOOK: Input: default@part_whitelist_test@ds=Part -POSTHOOK: Output: default@part_whitelist_test@ds=Apart -POSTHOOK: Output: default@part_whitelist_test@ds=Part +POSTHOOK: Input: default@part_whitelist_test_n0 +POSTHOOK: Input: default@part_whitelist_test_n0@ds=Part +POSTHOOK: Output: default@part_whitelist_test_n0@ds=Apart +POSTHOOK: Output: default@part_whitelist_test_n0@ds=Part diff --git a/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out b/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out index b2e327785a..d36fcc42a8 100644 --- a/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out +++ b/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out @@ -1,103 +1,103 @@ -PREHOOK: query: create table src_auth_tmp as select * from src +PREHOOK: query: create table src_auth_tmp_n1 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src_auth_tmp -POSTHOOK: query: create table src_auth_tmp as select * from src +PREHOOK: Output: default@src_auth_tmp_n1 +POSTHOOK: query: create table src_auth_tmp_n1 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_auth_tmp -POSTHOOK: Lineage: src_auth_tmp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_auth_tmp.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) +POSTHOOK: Output: default@src_auth_tmp_n1 +POSTHOOK: Lineage: src_auth_tmp_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_auth_tmp_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table authorization_part_n1 (key int, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@authorization_part -POSTHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) +PREHOOK: Output: default@authorization_part_n1 +POSTHOOK: query: create table authorization_part_n1 (key int, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@authorization_part -PREHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +POSTHOOK: Output: default@authorization_part_n1 +PREHOOK: query: ALTER TABLE authorization_part_n1 SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@authorization_part -PREHOOK: Output: default@authorization_part -POSTHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +PREHOOK: Input: default@authorization_part_n1 +PREHOOK: Output: default@authorization_part_n1 +POSTHOOK: query: ALTER TABLE authorization_part_n1 SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@authorization_part -POSTHOOK: Output: default@authorization_part -PREHOOK: query: grant select on table src_auth_tmp to user hive_test_user +POSTHOOK: Input: default@authorization_part_n1 +POSTHOOK: Output: default@authorization_part_n1 +PREHOOK: query: grant select on table src_auth_tmp_n1 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_auth_tmp -POSTHOOK: query: grant select on table src_auth_tmp to user hive_test_user +PREHOOK: Output: default@src_auth_tmp_n1 +POSTHOOK: query: grant select on table src_auth_tmp_n1 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_auth_tmp -PREHOOK: query: grant Create on table authorization_part to user hive_test_user +POSTHOOK: Output: default@src_auth_tmp_n1 +PREHOOK: query: grant Create on table authorization_part_n1 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@authorization_part -POSTHOOK: query: grant Create on table authorization_part to user hive_test_user +PREHOOK: Output: default@authorization_part_n1 +POSTHOOK: query: grant Create on table authorization_part_n1 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@authorization_part -PREHOOK: query: grant Update on table authorization_part to user hive_test_user +POSTHOOK: Output: default@authorization_part_n1 +PREHOOK: query: grant Update on table authorization_part_n1 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@authorization_part -POSTHOOK: query: grant Update on table authorization_part to user hive_test_user +PREHOOK: Output: default@authorization_part_n1 +POSTHOOK: query: grant Update on table authorization_part_n1 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@authorization_part -PREHOOK: query: grant Drop on table authorization_part to user hive_test_user +POSTHOOK: Output: default@authorization_part_n1 +PREHOOK: query: grant Drop on table authorization_part_n1 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@authorization_part -POSTHOOK: query: grant Drop on table authorization_part to user hive_test_user +PREHOOK: Output: default@authorization_part_n1 +POSTHOOK: query: grant Drop on table authorization_part_n1 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@authorization_part -PREHOOK: query: show grant user hive_test_user on table authorization_part +POSTHOOK: Output: default@authorization_part_n1 +PREHOOK: query: show grant user hive_test_user on table authorization_part_n1 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table authorization_part +POSTHOOK: query: show grant user hive_test_user on table authorization_part_n1 POSTHOOK: type: SHOW_GRANT -default authorization_part hive_test_user USER CREATE false -1 hive_test_user -default authorization_part hive_test_user USER DROP false -1 hive_test_user -default authorization_part hive_test_user USER UPDATE false -1 hive_test_user -PREHOOK: query: grant select(key) on table authorization_part to user hive_test_user +default authorization_part_n1 hive_test_user USER CREATE false -1 hive_test_user +default authorization_part_n1 hive_test_user USER DROP false -1 hive_test_user +default authorization_part_n1 hive_test_user USER UPDATE false -1 hive_test_user +PREHOOK: query: grant select(key) on table authorization_part_n1 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@authorization_part -POSTHOOK: query: grant select(key) on table authorization_part to user hive_test_user +PREHOOK: Output: default@authorization_part_n1 +POSTHOOK: query: grant select(key) on table authorization_part_n1 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@authorization_part -PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp +POSTHOOK: Output: default@authorization_part_n1 +PREHOOK: query: insert overwrite table authorization_part_n1 partition (ds='2010') select key, value from src_auth_tmp_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@src_auth_tmp -PREHOOK: Output: default@authorization_part@ds=2010 -POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp +PREHOOK: Input: default@src_auth_tmp_n1 +PREHOOK: Output: default@authorization_part_n1@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part_n1 partition (ds='2010') select key, value from src_auth_tmp_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_auth_tmp -POSTHOOK: Output: default@authorization_part@ds=2010 -POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +POSTHOOK: Input: default@src_auth_tmp_n1 +POSTHOOK: Output: default@authorization_part_n1@ds=2010 +POSTHOOK: Lineage: authorization_part_n1 PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp_n1)src_auth_tmp_n1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part_n1 PARTITION(ds=2010).value SIMPLE [(src_auth_tmp_n1)src_auth_tmp_n1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part_n1(key) partition (ds='2010') PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +POSTHOOK: query: show grant user hive_test_user on table authorization_part_n1(key) partition (ds='2010') POSTHOOK: type: SHOW_GRANT -default authorization_part [2010] [key] hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: alter table authorization_part partition (ds='2010') rename to partition (ds='2010_tmp') +default authorization_part_n1 [2010] [key] hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: alter table authorization_part_n1 partition (ds='2010') rename to partition (ds='2010_tmp') PREHOOK: type: ALTERTABLE_RENAMEPART -PREHOOK: Input: default@authorization_part -PREHOOK: Output: default@authorization_part@ds=2010 -POSTHOOK: query: alter table authorization_part partition (ds='2010') rename to partition (ds='2010_tmp') +PREHOOK: Input: default@authorization_part_n1 +PREHOOK: Output: default@authorization_part_n1@ds=2010 +POSTHOOK: query: alter table authorization_part_n1 partition (ds='2010') rename to partition (ds='2010_tmp') POSTHOOK: type: ALTERTABLE_RENAMEPART -POSTHOOK: Input: default@authorization_part -POSTHOOK: Input: default@authorization_part@ds=2010 -POSTHOOK: Output: default@authorization_part@ds=2010 -POSTHOOK: Output: default@authorization_part@ds=2010_tmp -PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010_tmp') +POSTHOOK: Input: default@authorization_part_n1 +POSTHOOK: Input: default@authorization_part_n1@ds=2010 +POSTHOOK: Output: default@authorization_part_n1@ds=2010 +POSTHOOK: Output: default@authorization_part_n1@ds=2010_tmp +PREHOOK: query: show grant user hive_test_user on table authorization_part_n1(key) partition (ds='2010_tmp') PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010_tmp') +POSTHOOK: query: show grant user hive_test_user on table authorization_part_n1(key) partition (ds='2010_tmp') POSTHOOK: type: SHOW_GRANT -default authorization_part [2010_tmp] [key] hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: drop table authorization_part +default authorization_part_n1 [2010_tmp] [key] hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: drop table authorization_part_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@authorization_part -PREHOOK: Output: default@authorization_part -POSTHOOK: query: drop table authorization_part +PREHOOK: Input: default@authorization_part_n1 +PREHOOK: Output: default@authorization_part_n1 +POSTHOOK: query: drop table authorization_part_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@authorization_part -POSTHOOK: Output: default@authorization_part +POSTHOOK: Input: default@authorization_part_n1 +POSTHOOK: Output: default@authorization_part_n1 diff --git a/ql/src/test/results/clientpositive/alter_table_serde.q.out b/ql/src/test/results/clientpositive/alter_table_serde.q.out index 1370625404..17c3cccf29 100644 --- a/ql/src/test/results/clientpositive/alter_table_serde.q.out +++ b/ql/src/test/results/clientpositive/alter_table_serde.q.out @@ -1,89 +1,89 @@ -PREHOOK: query: create table test_table (id int, query string, name string) +PREHOOK: query: create table test_table_n1 (id int, query string, name string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table -POSTHOOK: query: create table test_table (id int, query string, name string) +PREHOOK: Output: default@test_table_n1 +POSTHOOK: query: create table test_table_n1 (id int, query string, name string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table -PREHOOK: query: describe extended test_table +POSTHOOK: Output: default@test_table_n1 +PREHOOK: query: describe extended test_table_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: describe extended test_table +PREHOOK: Input: default@test_table_n1 +POSTHOOK: query: describe extended test_table_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n1 id int query string name string #### A masked pattern was here #### -PREHOOK: query: alter table test_table set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +PREHOOK: query: alter table test_table_n1 set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@test_table -PREHOOK: Output: default@test_table -POSTHOOK: query: alter table test_table set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +PREHOOK: Input: default@test_table_n1 +PREHOOK: Output: default@test_table_n1 +POSTHOOK: query: alter table test_table_n1 set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@test_table -POSTHOOK: Output: default@test_table -PREHOOK: query: describe extended test_table +POSTHOOK: Input: default@test_table_n1 +POSTHOOK: Output: default@test_table_n1 +PREHOOK: query: describe extended test_table_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: describe extended test_table +PREHOOK: Input: default@test_table_n1 +POSTHOOK: query: describe extended test_table_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n1 id int query string name string #### A masked pattern was here #### -PREHOOK: query: alter table test_table set serdeproperties ('field.delim' = ',') +PREHOOK: query: alter table test_table_n1 set serdeproperties ('field.delim' = ',') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES -PREHOOK: Input: default@test_table -PREHOOK: Output: default@test_table -POSTHOOK: query: alter table test_table set serdeproperties ('field.delim' = ',') +PREHOOK: Input: default@test_table_n1 +PREHOOK: Output: default@test_table_n1 +POSTHOOK: query: alter table test_table_n1 set serdeproperties ('field.delim' = ',') POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES -POSTHOOK: Input: default@test_table -POSTHOOK: Output: default@test_table -PREHOOK: query: describe extended test_table +POSTHOOK: Input: default@test_table_n1 +POSTHOOK: Output: default@test_table_n1 +PREHOOK: query: describe extended test_table_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: describe extended test_table +PREHOOK: Input: default@test_table_n1 +POSTHOOK: query: describe extended test_table_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n1 id int query string name string #### A masked pattern was here #### -PREHOOK: query: drop table test_table +PREHOOK: query: drop table test_table_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_table -PREHOOK: Output: default@test_table -POSTHOOK: query: drop table test_table +PREHOOK: Input: default@test_table_n1 +PREHOOK: Output: default@test_table_n1 +POSTHOOK: query: drop table test_table_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_table -POSTHOOK: Output: default@test_table -PREHOOK: query: create table test_table (id int, query string, name string) partitioned by (dt string) +POSTHOOK: Input: default@test_table_n1 +POSTHOOK: Output: default@test_table_n1 +PREHOOK: query: create table test_table_n1 (id int, query string, name string) partitioned by (dt string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table -POSTHOOK: query: create table test_table (id int, query string, name string) partitioned by (dt string) +PREHOOK: Output: default@test_table_n1 +POSTHOOK: query: create table test_table_n1 (id int, query string, name string) partitioned by (dt string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table -PREHOOK: query: alter table test_table add partition (dt = '2011') +POSTHOOK: Output: default@test_table_n1 +PREHOOK: query: alter table test_table_n1 add partition (dt = '2011') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@test_table -POSTHOOK: query: alter table test_table add partition (dt = '2011') +PREHOOK: Output: default@test_table_n1 +POSTHOOK: query: alter table test_table_n1 add partition (dt = '2011') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@test_table -POSTHOOK: Output: default@test_table@dt=2011 -PREHOOK: query: describe extended test_table partition (dt='2011') +POSTHOOK: Output: default@test_table_n1 +POSTHOOK: Output: default@test_table_n1@dt=2011 +PREHOOK: query: describe extended test_table_n1 partition (dt='2011') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: describe extended test_table partition (dt='2011') +PREHOOK: Input: default@test_table_n1 +POSTHOOK: query: describe extended test_table_n1 partition (dt='2011') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n1 id int query string name string @@ -94,20 +94,20 @@ dt string dt string #### A masked pattern was here #### -PREHOOK: query: alter table test_table set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +PREHOOK: query: alter table test_table_n1 set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@test_table -PREHOOK: Output: default@test_table -POSTHOOK: query: alter table test_table set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +PREHOOK: Input: default@test_table_n1 +PREHOOK: Output: default@test_table_n1 +POSTHOOK: query: alter table test_table_n1 set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@test_table -POSTHOOK: Output: default@test_table -PREHOOK: query: describe extended test_table partition (dt='2011') +POSTHOOK: Input: default@test_table_n1 +POSTHOOK: Output: default@test_table_n1 +PREHOOK: query: describe extended test_table_n1 partition (dt='2011') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: describe extended test_table partition (dt='2011') +PREHOOK: Input: default@test_table_n1 +POSTHOOK: query: describe extended test_table_n1 partition (dt='2011') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n1 id int query string name string @@ -118,20 +118,20 @@ dt string dt string #### A masked pattern was here #### -PREHOOK: query: alter table test_table set serdeproperties ('field.delim' = ',') +PREHOOK: query: alter table test_table_n1 set serdeproperties ('field.delim' = ',') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES -PREHOOK: Input: default@test_table -PREHOOK: Output: default@test_table -POSTHOOK: query: alter table test_table set serdeproperties ('field.delim' = ',') +PREHOOK: Input: default@test_table_n1 +PREHOOK: Output: default@test_table_n1 +POSTHOOK: query: alter table test_table_n1 set serdeproperties ('field.delim' = ',') POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES -POSTHOOK: Input: default@test_table -POSTHOOK: Output: default@test_table -PREHOOK: query: describe extended test_table partition (dt='2011') +POSTHOOK: Input: default@test_table_n1 +POSTHOOK: Output: default@test_table_n1 +PREHOOK: query: describe extended test_table_n1 partition (dt='2011') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: describe extended test_table partition (dt='2011') +PREHOOK: Input: default@test_table_n1 +POSTHOOK: query: describe extended test_table_n1 partition (dt='2011') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n1 id int query string name string @@ -142,21 +142,21 @@ dt string dt string #### A masked pattern was here #### -PREHOOK: query: alter table test_table partition(dt='2011') set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +PREHOOK: query: alter table test_table_n1 partition(dt='2011') set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' PREHOOK: type: ALTERPARTITION_SERIALIZER -PREHOOK: Input: default@test_table -PREHOOK: Output: default@test_table@dt=2011 -POSTHOOK: query: alter table test_table partition(dt='2011') set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +PREHOOK: Input: default@test_table_n1 +PREHOOK: Output: default@test_table_n1@dt=2011 +POSTHOOK: query: alter table test_table_n1 partition(dt='2011') set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' POSTHOOK: type: ALTERPARTITION_SERIALIZER -POSTHOOK: Input: default@test_table -POSTHOOK: Input: default@test_table@dt=2011 -POSTHOOK: Output: default@test_table@dt=2011 -PREHOOK: query: describe extended test_table partition (dt='2011') +POSTHOOK: Input: default@test_table_n1 +POSTHOOK: Input: default@test_table_n1@dt=2011 +POSTHOOK: Output: default@test_table_n1@dt=2011 +PREHOOK: query: describe extended test_table_n1 partition (dt='2011') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: describe extended test_table partition (dt='2011') +PREHOOK: Input: default@test_table_n1 +POSTHOOK: query: describe extended test_table_n1 partition (dt='2011') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n1 id int query string name string @@ -167,21 +167,21 @@ dt string dt string #### A masked pattern was here #### -PREHOOK: query: alter table test_table partition(dt='2011') set serdeproperties ('field.delim' = ',') +PREHOOK: query: alter table test_table_n1 partition(dt='2011') set serdeproperties ('field.delim' = ',') PREHOOK: type: ALTERPARTITION_SERDEPROPERTIES -PREHOOK: Input: default@test_table -PREHOOK: Output: default@test_table@dt=2011 -POSTHOOK: query: alter table test_table partition(dt='2011') set serdeproperties ('field.delim' = ',') +PREHOOK: Input: default@test_table_n1 +PREHOOK: Output: default@test_table_n1@dt=2011 +POSTHOOK: query: alter table test_table_n1 partition(dt='2011') set serdeproperties ('field.delim' = ',') POSTHOOK: type: ALTERPARTITION_SERDEPROPERTIES -POSTHOOK: Input: default@test_table -POSTHOOK: Input: default@test_table@dt=2011 -POSTHOOK: Output: default@test_table@dt=2011 -PREHOOK: query: describe extended test_table partition (dt='2011') +POSTHOOK: Input: default@test_table_n1 +POSTHOOK: Input: default@test_table_n1@dt=2011 +POSTHOOK: Output: default@test_table_n1@dt=2011 +PREHOOK: query: describe extended test_table_n1 partition (dt='2011') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: describe extended test_table partition (dt='2011') +PREHOOK: Input: default@test_table_n1 +POSTHOOK: query: describe extended test_table_n1 partition (dt='2011') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n1 id int query string name string @@ -192,11 +192,11 @@ dt string dt string #### A masked pattern was here #### -PREHOOK: query: drop table test_table +PREHOOK: query: drop table test_table_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_table -PREHOOK: Output: default@test_table -POSTHOOK: query: drop table test_table +PREHOOK: Input: default@test_table_n1 +PREHOOK: Output: default@test_table_n1 +POSTHOOK: query: drop table test_table_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_table -POSTHOOK: Output: default@test_table +POSTHOOK: Input: default@test_table_n1 +POSTHOOK: Output: default@test_table_n1 diff --git a/ql/src/test/results/clientpositive/alter_table_serde2.q.out b/ql/src/test/results/clientpositive/alter_table_serde2.q.out index fc82acf7d6..c328004cc4 100644 --- a/ql/src/test/results/clientpositive/alter_table_serde2.q.out +++ b/ql/src/test/results/clientpositive/alter_table_serde2.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE tst1_n5(key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tst1 -POSTHOOK: query: CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@tst1_n5 +POSTHOOK: query: CREATE TABLE tst1_n5(key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tst1 -PREHOOK: query: DESCRIBE FORMATTED tst1 +POSTHOOK: Output: default@tst1_n5 +PREHOOK: query: DESCRIBE FORMATTED tst1_n5 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 +PREHOOK: Input: default@tst1_n5 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n5 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n5 # col_name data_type comment key string value string @@ -46,22 +46,22 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: query: INSERT OVERWRITE TABLE tst1_n5 PARTITION (ds = '1') SELECT key, value FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tst1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: Output: default@tst1_n5@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE tst1_n5 PARTITION (ds = '1') SELECT key, value FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tst1@ds=1 -POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +POSTHOOK: Output: default@tst1_n5@ds=1 +POSTHOOK: Lineage: tst1_n5 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n5 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED tst1_n5 PARTITION (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +PREHOOK: Input: default@tst1_n5 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n5 PARTITION (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n5 # col_name data_type comment key string value string @@ -73,7 +73,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -93,20 +93,20 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: ALTER TABLE tst1 SET SERDEPROPERTIES ('field.delim' = ',') +PREHOOK: query: ALTER TABLE tst1_n5 SET SERDEPROPERTIES ('field.delim' = ',') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: ALTER TABLE tst1 SET SERDEPROPERTIES ('field.delim' = ',') +PREHOOK: Input: default@tst1_n5 +PREHOOK: Output: default@tst1_n5 +POSTHOOK: query: ALTER TABLE tst1_n5 SET SERDEPROPERTIES ('field.delim' = ',') POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 -PREHOOK: query: DESCRIBE FORMATTED tst1 +POSTHOOK: Input: default@tst1_n5 +POSTHOOK: Output: default@tst1_n5 +PREHOOK: query: DESCRIBE FORMATTED tst1_n5 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 +PREHOOK: Input: default@tst1_n5 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n5 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n5 # col_name data_type comment key string value string @@ -143,22 +143,22 @@ Sort Columns: [] Storage Desc Params: field.delim , serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: query: INSERT OVERWRITE TABLE tst1_n5 PARTITION (ds = '1') SELECT key, value FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tst1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src +PREHOOK: Output: default@tst1_n5@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE tst1_n5 PARTITION (ds = '1') SELECT key, value FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tst1@ds=1 -POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +POSTHOOK: Output: default@tst1_n5@ds=1 +POSTHOOK: Lineage: tst1_n5 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n5 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED tst1_n5 PARTITION (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tst1 -POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1') +PREHOOK: Input: default@tst1_n5 +POSTHOOK: query: DESCRIBE FORMATTED tst1_n5 PARTITION (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n5 # col_name data_type comment key string value string @@ -170,7 +170,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: tst1 +Table: tst1_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} diff --git a/ql/src/test/results/clientpositive/alter_table_update_status.q.out b/ql/src/test/results/clientpositive/alter_table_update_status.q.out index 3d63e13aa5..ce3ff4c592 100644 --- a/ql/src/test/results/clientpositive/alter_table_update_status.q.out +++ b/ql/src/test/results/clientpositive/alter_table_update_status.q.out @@ -1,53 +1,53 @@ -PREHOOK: query: create table src_stat as select * from src1 +PREHOOK: query: create table src_stat_n0 as select * from src1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src1 PREHOOK: Output: database:default -PREHOOK: Output: default@src_stat -POSTHOOK: query: create table src_stat as select * from src1 +PREHOOK: Output: default@src_stat_n0 +POSTHOOK: query: create table src_stat_n0 as select * from src1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src1 POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_stat -POSTHOOK: Lineage: src_stat.key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_stat.value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table src_stat_int ( +POSTHOOK: Output: default@src_stat_n0 +POSTHOOK: Lineage: src_stat_n0.key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_n0.value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table src_stat_int_n0 ( key double, value string ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_stat_int -POSTHOOK: query: create table src_stat_int ( +PREHOOK: Output: default@src_stat_int_n0 +POSTHOOK: query: create table src_stat_int_n0 ( key double, value string ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_stat_int -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE src_stat_int +POSTHOOK: Output: default@src_stat_int_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE src_stat_int_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@src_stat_int -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE src_stat_int +PREHOOK: Output: default@src_stat_int_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE src_stat_int_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@src_stat_int -PREHOOK: query: ANALYZE TABLE src_stat COMPUTE STATISTICS for columns key +POSTHOOK: Output: default@src_stat_int_n0 +PREHOOK: query: ANALYZE TABLE src_stat_n0 COMPUTE STATISTICS for columns key PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@src_stat -PREHOOK: Output: default@src_stat +PREHOOK: Input: default@src_stat_n0 +PREHOOK: Output: default@src_stat_n0 #### A masked pattern was here #### -POSTHOOK: query: ANALYZE TABLE src_stat COMPUTE STATISTICS for columns key +POSTHOOK: query: ANALYZE TABLE src_stat_n0 COMPUTE STATISTICS for columns key POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@src_stat -POSTHOOK: Output: default@src_stat +POSTHOOK: Input: default@src_stat_n0 +POSTHOOK: Output: default@src_stat_n0 #### A masked pattern was here #### -PREHOOK: query: describe formatted src_stat key +PREHOOK: query: describe formatted src_stat_n0 key PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_stat -POSTHOOK: query: describe formatted src_stat key +PREHOOK: Input: default@src_stat_n0 +POSTHOOK: query: describe formatted src_stat_n0 key POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_stat +POSTHOOK: Input: default@src_stat_n0 col_name key data_type string min @@ -61,16 +61,16 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} -PREHOOK: query: ALTER TABLE src_stat UPDATE STATISTICS for column key SET ('numDVs'='1111','avgColLen'='1.111') +PREHOOK: query: ALTER TABLE src_stat_n0 UPDATE STATISTICS for column key SET ('numDVs'='1111','avgColLen'='1.111') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE src_stat UPDATE STATISTICS for column key SET ('numDVs'='1111','avgColLen'='1.111') +POSTHOOK: query: ALTER TABLE src_stat_n0 UPDATE STATISTICS for column key SET ('numDVs'='1111','avgColLen'='1.111') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: describe formatted src_stat key +PREHOOK: query: describe formatted src_stat_n0 key PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_stat -POSTHOOK: query: describe formatted src_stat key +PREHOOK: Input: default@src_stat_n0 +POSTHOOK: query: describe formatted src_stat_n0 key POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_stat +POSTHOOK: Input: default@src_stat_n0 col_name key data_type string min @@ -84,16 +84,16 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} -PREHOOK: query: ALTER TABLE src_stat UPDATE STATISTICS for column value SET ('numDVs'='121','numNulls'='122','avgColLen'='1.23','maxColLen'='124') +PREHOOK: query: ALTER TABLE src_stat_n0 UPDATE STATISTICS for column value SET ('numDVs'='121','numNulls'='122','avgColLen'='1.23','maxColLen'='124') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE src_stat UPDATE STATISTICS for column value SET ('numDVs'='121','numNulls'='122','avgColLen'='1.23','maxColLen'='124') +POSTHOOK: query: ALTER TABLE src_stat_n0 UPDATE STATISTICS for column value SET ('numDVs'='121','numNulls'='122','avgColLen'='1.23','maxColLen'='124') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: describe formatted src_stat value +PREHOOK: query: describe formatted src_stat_n0 value PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_stat -POSTHOOK: query: describe formatted src_stat value +PREHOOK: Input: default@src_stat_n0 +POSTHOOK: query: describe formatted src_stat_n0 value POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_stat +POSTHOOK: Input: default@src_stat_n0 col_name value data_type string min @@ -107,22 +107,22 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} -PREHOOK: query: ANALYZE TABLE src_stat_int COMPUTE STATISTICS for columns key +PREHOOK: query: ANALYZE TABLE src_stat_int_n0 COMPUTE STATISTICS for columns key PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@src_stat_int -PREHOOK: Output: default@src_stat_int +PREHOOK: Input: default@src_stat_int_n0 +PREHOOK: Output: default@src_stat_int_n0 #### A masked pattern was here #### -POSTHOOK: query: ANALYZE TABLE src_stat_int COMPUTE STATISTICS for columns key +POSTHOOK: query: ANALYZE TABLE src_stat_int_n0 COMPUTE STATISTICS for columns key POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@src_stat_int -POSTHOOK: Output: default@src_stat_int +POSTHOOK: Input: default@src_stat_int_n0 +POSTHOOK: Output: default@src_stat_int_n0 #### A masked pattern was here #### -PREHOOK: query: describe formatted src_stat_int key +PREHOOK: query: describe formatted src_stat_int_n0 key PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_stat_int -POSTHOOK: query: describe formatted src_stat_int key +PREHOOK: Input: default@src_stat_int_n0 +POSTHOOK: query: describe formatted src_stat_int_n0 key POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_stat_int +POSTHOOK: Input: default@src_stat_int_n0 col_name key data_type double min 66.0 @@ -136,16 +136,16 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} -PREHOOK: query: ALTER TABLE src_stat_int UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22') +PREHOOK: query: ALTER TABLE src_stat_int_n0 UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE src_stat_int UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22') +POSTHOOK: query: ALTER TABLE src_stat_int_n0 UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: describe formatted src_stat_int key +PREHOOK: query: describe formatted src_stat_int_n0 key PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_stat_int -POSTHOOK: query: describe formatted src_stat_int key +PREHOOK: Input: default@src_stat_int_n0 +POSTHOOK: query: describe formatted src_stat_int_n0 key POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_stat_int +POSTHOOK: Input: default@src_stat_int_n0 col_name key data_type double min 333.22 @@ -171,16 +171,16 @@ PREHOOK: Input: database:dummydb POSTHOOK: query: use dummydb POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:dummydb -PREHOOK: query: ALTER TABLE default.src_stat UPDATE STATISTICS for column key SET ('numDVs'='3333','avgColLen'='2.222') +PREHOOK: query: ALTER TABLE default.src_stat_n0 UPDATE STATISTICS for column key SET ('numDVs'='3333','avgColLen'='2.222') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.src_stat UPDATE STATISTICS for column key SET ('numDVs'='3333','avgColLen'='2.222') +POSTHOOK: query: ALTER TABLE default.src_stat_n0 UPDATE STATISTICS for column key SET ('numDVs'='3333','avgColLen'='2.222') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: describe formatted default.src_stat key +PREHOOK: query: describe formatted default.src_stat_n0 key PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_stat -POSTHOOK: query: describe formatted default.src_stat key +PREHOOK: Input: default@src_stat_n0 +POSTHOOK: query: describe formatted default.src_stat_n0 key POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_stat +POSTHOOK: Input: default@src_stat_n0 col_name key data_type string min @@ -194,16 +194,16 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} -PREHOOK: query: ALTER TABLE default.src_stat UPDATE STATISTICS for column value SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235') +PREHOOK: query: ALTER TABLE default.src_stat_n0 UPDATE STATISTICS for column value SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.src_stat UPDATE STATISTICS for column value SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235') +POSTHOOK: query: ALTER TABLE default.src_stat_n0 UPDATE STATISTICS for column value SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: describe formatted default.src_stat value +PREHOOK: query: describe formatted default.src_stat_n0 value PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_stat -POSTHOOK: query: describe formatted default.src_stat value +PREHOOK: Input: default@src_stat_n0 +POSTHOOK: query: describe formatted default.src_stat_n0 value POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_stat +POSTHOOK: Input: default@src_stat_n0 col_name value data_type string min @@ -231,7 +231,7 @@ POSTHOOK: query: drop database dummydb POSTHOOK: type: DROPDATABASE POSTHOOK: Input: database:dummydb POSTHOOK: Output: database:dummydb -PREHOOK: query: create table datatype_stats( +PREHOOK: query: create table datatype_stats_n0( t TINYINT, s SMALLINT, i INT, @@ -248,8 +248,8 @@ PREHOOK: query: create table datatype_stats( bin BINARY) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@datatype_stats -POSTHOOK: query: create table datatype_stats( +PREHOOK: Output: default@datatype_stats_n0 +POSTHOOK: query: create table datatype_stats_n0( t TINYINT, s SMALLINT, i INT, @@ -266,57 +266,57 @@ POSTHOOK: query: create table datatype_stats( bin BINARY) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@datatype_stats -PREHOOK: query: INSERT INTO datatype_stats values(2, 3, 45, 456, 45454.4, 454.6565, 2355, '2012-01-01 01:02:03', '2012-01-01', 'update_statistics', 'stats', 'hive', 'true', 'bin') +POSTHOOK: Output: default@datatype_stats_n0 +PREHOOK: query: INSERT INTO datatype_stats_n0 values(2, 3, 45, 456, 45454.4, 454.6565, 2355, '2012-01-01 01:02:03', '2012-01-01', 'update_statistics', 'stats', 'hive', 'true', 'bin') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@datatype_stats -POSTHOOK: query: INSERT INTO datatype_stats values(2, 3, 45, 456, 45454.4, 454.6565, 2355, '2012-01-01 01:02:03', '2012-01-01', 'update_statistics', 'stats', 'hive', 'true', 'bin') +PREHOOK: Output: default@datatype_stats_n0 +POSTHOOK: query: INSERT INTO datatype_stats_n0 values(2, 3, 45, 456, 45454.4, 454.6565, 2355, '2012-01-01 01:02:03', '2012-01-01', 'update_statistics', 'stats', 'hive', 'true', 'bin') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@datatype_stats -POSTHOOK: Lineage: datatype_stats.b SCRIPT [] -POSTHOOK: Lineage: datatype_stats.bin SCRIPT [] -POSTHOOK: Lineage: datatype_stats.bl SCRIPT [] -POSTHOOK: Lineage: datatype_stats.c SCRIPT [] -POSTHOOK: Lineage: datatype_stats.d SCRIPT [] -POSTHOOK: Lineage: datatype_stats.dem SCRIPT [] -POSTHOOK: Lineage: datatype_stats.dt SCRIPT [] -POSTHOOK: Lineage: datatype_stats.f SCRIPT [] -POSTHOOK: Lineage: datatype_stats.i SCRIPT [] -POSTHOOK: Lineage: datatype_stats.s SCRIPT [] -POSTHOOK: Lineage: datatype_stats.str SCRIPT [] -POSTHOOK: Lineage: datatype_stats.t SCRIPT [] -POSTHOOK: Lineage: datatype_stats.ts SCRIPT [] -POSTHOOK: Lineage: datatype_stats.v SCRIPT [] -PREHOOK: query: INSERT INTO datatype_stats values(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) +POSTHOOK: Output: default@datatype_stats_n0 +POSTHOOK: Lineage: datatype_stats_n0.b SCRIPT [] +POSTHOOK: Lineage: datatype_stats_n0.bin SCRIPT [] +POSTHOOK: Lineage: datatype_stats_n0.bl SCRIPT [] +POSTHOOK: Lineage: datatype_stats_n0.c SCRIPT [] +POSTHOOK: Lineage: datatype_stats_n0.d SCRIPT [] +POSTHOOK: Lineage: datatype_stats_n0.dem SCRIPT [] +POSTHOOK: Lineage: datatype_stats_n0.dt SCRIPT [] +POSTHOOK: Lineage: datatype_stats_n0.f SCRIPT [] +POSTHOOK: Lineage: datatype_stats_n0.i SCRIPT [] +POSTHOOK: Lineage: datatype_stats_n0.s SCRIPT [] +POSTHOOK: Lineage: datatype_stats_n0.str SCRIPT [] +POSTHOOK: Lineage: datatype_stats_n0.t SCRIPT [] +POSTHOOK: Lineage: datatype_stats_n0.ts SCRIPT [] +POSTHOOK: Lineage: datatype_stats_n0.v SCRIPT [] +PREHOOK: query: INSERT INTO datatype_stats_n0 values(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@datatype_stats -POSTHOOK: query: INSERT INTO datatype_stats values(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) +PREHOOK: Output: default@datatype_stats_n0 +POSTHOOK: query: INSERT INTO datatype_stats_n0 values(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@datatype_stats -POSTHOOK: Lineage: datatype_stats.b EXPRESSION [] -POSTHOOK: Lineage: datatype_stats.bin EXPRESSION [] -POSTHOOK: Lineage: datatype_stats.bl EXPRESSION [] -POSTHOOK: Lineage: datatype_stats.c EXPRESSION [] -POSTHOOK: Lineage: datatype_stats.d EXPRESSION [] -POSTHOOK: Lineage: datatype_stats.dem EXPRESSION [] -POSTHOOK: Lineage: datatype_stats.dt EXPRESSION [] -POSTHOOK: Lineage: datatype_stats.f EXPRESSION [] -POSTHOOK: Lineage: datatype_stats.i EXPRESSION [] -POSTHOOK: Lineage: datatype_stats.s EXPRESSION [] -POSTHOOK: Lineage: datatype_stats.str EXPRESSION [] -POSTHOOK: Lineage: datatype_stats.t EXPRESSION [] -POSTHOOK: Lineage: datatype_stats.ts EXPRESSION [] -POSTHOOK: Lineage: datatype_stats.v EXPRESSION [] -PREHOOK: query: DESC FORMATTED datatype_stats s +POSTHOOK: Output: default@datatype_stats_n0 +POSTHOOK: Lineage: datatype_stats_n0.b EXPRESSION [] +POSTHOOK: Lineage: datatype_stats_n0.bin EXPRESSION [] +POSTHOOK: Lineage: datatype_stats_n0.bl EXPRESSION [] +POSTHOOK: Lineage: datatype_stats_n0.c EXPRESSION [] +POSTHOOK: Lineage: datatype_stats_n0.d EXPRESSION [] +POSTHOOK: Lineage: datatype_stats_n0.dem EXPRESSION [] +POSTHOOK: Lineage: datatype_stats_n0.dt EXPRESSION [] +POSTHOOK: Lineage: datatype_stats_n0.f EXPRESSION [] +POSTHOOK: Lineage: datatype_stats_n0.i EXPRESSION [] +POSTHOOK: Lineage: datatype_stats_n0.s EXPRESSION [] +POSTHOOK: Lineage: datatype_stats_n0.str EXPRESSION [] +POSTHOOK: Lineage: datatype_stats_n0.t EXPRESSION [] +POSTHOOK: Lineage: datatype_stats_n0.ts EXPRESSION [] +POSTHOOK: Lineage: datatype_stats_n0.v EXPRESSION [] +PREHOOK: query: DESC FORMATTED datatype_stats_n0 s PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats s +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 s POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name s data_type smallint min @@ -330,12 +330,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} -PREHOOK: query: DESC FORMATTED datatype_stats i +PREHOOK: query: DESC FORMATTED datatype_stats_n0 i PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats i +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 i POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name i data_type int min @@ -349,12 +349,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} -PREHOOK: query: DESC FORMATTED datatype_stats b +PREHOOK: query: DESC FORMATTED datatype_stats_n0 b PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats b +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 b POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name b data_type bigint min @@ -368,12 +368,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} -PREHOOK: query: DESC FORMATTED datatype_stats f +PREHOOK: query: DESC FORMATTED datatype_stats_n0 f PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats f +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 f POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name f data_type float min @@ -387,12 +387,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} -PREHOOK: query: DESC FORMATTED datatype_stats d +PREHOOK: query: DESC FORMATTED datatype_stats_n0 d PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats d +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 d POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name d data_type double min @@ -406,12 +406,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} -PREHOOK: query: DESC FORMATTED datatype_stats dem +PREHOOK: query: DESC FORMATTED datatype_stats_n0 dem PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats dem +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 dem POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name dem data_type decimal(10,0) min @@ -425,12 +425,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} -PREHOOK: query: DESC FORMATTED datatype_stats ts +PREHOOK: query: DESC FORMATTED datatype_stats_n0 ts PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats ts +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 ts POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name ts data_type timestamp min @@ -444,12 +444,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} -PREHOOK: query: DESC FORMATTED datatype_stats dt +PREHOOK: query: DESC FORMATTED datatype_stats_n0 dt PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats dt +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 dt POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name dt data_type date min @@ -463,12 +463,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} -PREHOOK: query: DESC FORMATTED datatype_stats str +PREHOOK: query: DESC FORMATTED datatype_stats_n0 str PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats str +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 str POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name str data_type string min @@ -482,12 +482,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} -PREHOOK: query: DESC FORMATTED datatype_stats v +PREHOOK: query: DESC FORMATTED datatype_stats_n0 v PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats v +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 v POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name v data_type varchar(12) min @@ -501,12 +501,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} -PREHOOK: query: DESC FORMATTED datatype_stats c +PREHOOK: query: DESC FORMATTED datatype_stats_n0 c PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats c +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 c POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name c data_type char(5) min @@ -520,12 +520,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} -PREHOOK: query: DESC FORMATTED datatype_stats bl +PREHOOK: query: DESC FORMATTED datatype_stats_n0 bl PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats bl +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 bl POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name bl data_type boolean min @@ -539,12 +539,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} -PREHOOK: query: DESC FORMATTED datatype_stats bin +PREHOOK: query: DESC FORMATTED datatype_stats_n0 bin PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats bin +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 bin POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name bin data_type binary min @@ -558,12 +558,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} -PREHOOK: query: DESC FORMATTED datatype_stats t +PREHOOK: query: DESC FORMATTED datatype_stats_n0 t PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats t +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 t POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name t data_type tinyint min @@ -577,16 +577,16 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} -PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column t SET ('numDVs'='232','numNulls'='233','highValue'='234','lowValue'='35') +PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column t SET ('numDVs'='232','numNulls'='233','highValue'='234','lowValue'='35') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column t SET ('numDVs'='232','numNulls'='233','highValue'='234','lowValue'='35') +POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column t SET ('numDVs'='232','numNulls'='233','highValue'='234','lowValue'='35') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: DESC FORMATTED datatype_stats t +PREHOOK: query: DESC FORMATTED datatype_stats_n0 t PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats t +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 t POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name t data_type tinyint min 35 @@ -600,12 +600,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"t\":\"true\"}} -PREHOOK: query: DESC FORMATTED datatype_stats s +PREHOOK: query: DESC FORMATTED datatype_stats_n0 s PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats s +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 s POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name s data_type smallint min @@ -619,16 +619,16 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"t\":\"true\"}} -PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column s SET ('numDVs'='56','numNulls'='56','highValue'='489','lowValue'='25') +PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column s SET ('numDVs'='56','numNulls'='56','highValue'='489','lowValue'='25') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column s SET ('numDVs'='56','numNulls'='56','highValue'='489','lowValue'='25') +POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column s SET ('numDVs'='56','numNulls'='56','highValue'='489','lowValue'='25') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: DESC FORMATTED datatype_stats s +PREHOOK: query: DESC FORMATTED datatype_stats_n0 s PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats s +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 s POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name s data_type smallint min 25 @@ -642,12 +642,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"s\":\"true\",\"t\":\"true\"}} -PREHOOK: query: DESC FORMATTED datatype_stats i +PREHOOK: query: DESC FORMATTED datatype_stats_n0 i PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats i +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 i POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name i data_type int min @@ -661,16 +661,16 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"s\":\"true\",\"t\":\"true\"}} -PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column i SET ('numDVs'='59','numNulls'='1','highValue'='889','lowValue'='5') +PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column i SET ('numDVs'='59','numNulls'='1','highValue'='889','lowValue'='5') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column i SET ('numDVs'='59','numNulls'='1','highValue'='889','lowValue'='5') +POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column i SET ('numDVs'='59','numNulls'='1','highValue'='889','lowValue'='5') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: DESC FORMATTED datatype_stats i +PREHOOK: query: DESC FORMATTED datatype_stats_n0 i PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats i +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 i POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name i data_type int min 5 @@ -684,12 +684,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}} -PREHOOK: query: DESC FORMATTED datatype_stats b +PREHOOK: query: DESC FORMATTED datatype_stats_n0 b PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats b +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 b POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name b data_type bigint min @@ -703,16 +703,16 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}} -PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column b SET ('numDVs'='9','numNulls'='14','highValue'='89','lowValue'='8') +PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column b SET ('numDVs'='9','numNulls'='14','highValue'='89','lowValue'='8') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column b SET ('numDVs'='9','numNulls'='14','highValue'='89','lowValue'='8') +POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column b SET ('numDVs'='9','numNulls'='14','highValue'='89','lowValue'='8') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: DESC FORMATTED datatype_stats b +PREHOOK: query: DESC FORMATTED datatype_stats_n0 b PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats b +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 b POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name b data_type bigint min 8 @@ -726,12 +726,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}} -PREHOOK: query: DESC FORMATTED datatype_stats f +PREHOOK: query: DESC FORMATTED datatype_stats_n0 f PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats f +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 f POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name f data_type float min @@ -745,16 +745,16 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}} -PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column f SET ('numDVs'='563','numNulls'='45','highValue'='2345.656','lowValue'='8.00') +PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column f SET ('numDVs'='563','numNulls'='45','highValue'='2345.656','lowValue'='8.00') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column f SET ('numDVs'='563','numNulls'='45','highValue'='2345.656','lowValue'='8.00') +POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column f SET ('numDVs'='563','numNulls'='45','highValue'='2345.656','lowValue'='8.00') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: DESC FORMATTED datatype_stats f +PREHOOK: query: DESC FORMATTED datatype_stats_n0 f PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats f +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 f POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name f data_type float min 8.0 @@ -768,12 +768,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}} -PREHOOK: query: DESC FORMATTED datatype_stats d +PREHOOK: query: DESC FORMATTED datatype_stats_n0 d PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats d +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 d POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name d data_type double min @@ -787,16 +787,16 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}} -PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column d SET ('numDVs'='5677','numNulls'='12','highValue'='560.3367','lowValue'='0.00455') +PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column d SET ('numDVs'='5677','numNulls'='12','highValue'='560.3367','lowValue'='0.00455') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column d SET ('numDVs'='5677','numNulls'='12','highValue'='560.3367','lowValue'='0.00455') +POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column d SET ('numDVs'='5677','numNulls'='12','highValue'='560.3367','lowValue'='0.00455') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: DESC FORMATTED datatype_stats d +PREHOOK: query: DESC FORMATTED datatype_stats_n0 d PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats d +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 d POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name d data_type double min 0.00455 @@ -810,12 +810,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}} -PREHOOK: query: DESC FORMATTED datatype_stats dem +PREHOOK: query: DESC FORMATTED datatype_stats_n0 dem PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats dem +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 dem POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name dem data_type decimal(10,0) min @@ -829,16 +829,16 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}} -PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column dem SET ('numDVs'='57','numNulls'='912','highValue'='560','lowValue'='0') +PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column dem SET ('numDVs'='57','numNulls'='912','highValue'='560','lowValue'='0') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column dem SET ('numDVs'='57','numNulls'='912','highValue'='560','lowValue'='0') +POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column dem SET ('numDVs'='57','numNulls'='912','highValue'='560','lowValue'='0') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: DESC FORMATTED datatype_stats dem +PREHOOK: query: DESC FORMATTED datatype_stats_n0 dem PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats dem +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 dem POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name dem data_type decimal(10,0) min 0 @@ -852,12 +852,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}} -PREHOOK: query: DESC FORMATTED datatype_stats ts +PREHOOK: query: DESC FORMATTED datatype_stats_n0 ts PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats ts +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 ts POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name ts data_type timestamp min @@ -871,16 +871,16 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\"}} -PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column ts SET ('numDVs'='7','numNulls'='12','highValue'='1357030923','lowValue'='1357030924') +PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column ts SET ('numDVs'='7','numNulls'='12','highValue'='1357030923','lowValue'='1357030924') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column ts SET ('numDVs'='7','numNulls'='12','highValue'='1357030923','lowValue'='1357030924') +POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column ts SET ('numDVs'='7','numNulls'='12','highValue'='1357030923','lowValue'='1357030924') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: DESC FORMATTED datatype_stats ts +PREHOOK: query: DESC FORMATTED datatype_stats_n0 ts PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats ts +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 ts POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name ts data_type timestamp min 1357030924 @@ -894,12 +894,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\",\"ts\":\"true\"}} -PREHOOK: query: DESC FORMATTED datatype_stats dt +PREHOOK: query: DESC FORMATTED datatype_stats_n0 dt PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats dt +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 dt POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name dt data_type date min @@ -913,16 +913,16 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\",\"ts\":\"true\"}} -PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column dt SET ('numDVs'='57','numNulls'='912','highValue'='2012-01-01','lowValue'='2001-02-04') +PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column dt SET ('numDVs'='57','numNulls'='912','highValue'='2012-01-01','lowValue'='2001-02-04') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column dt SET ('numDVs'='57','numNulls'='912','highValue'='2012-01-01','lowValue'='2001-02-04') +POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column dt SET ('numDVs'='57','numNulls'='912','highValue'='2012-01-01','lowValue'='2001-02-04') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: DESC FORMATTED datatype_stats dt +PREHOOK: query: DESC FORMATTED datatype_stats_n0 dt PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats dt +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 dt POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name dt data_type date min 2001-02-04 @@ -936,12 +936,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\",\"ts\":\"true\"}} -PREHOOK: query: DESC FORMATTED datatype_stats str +PREHOOK: query: DESC FORMATTED datatype_stats_n0 str PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats str +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 str POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name str data_type string min @@ -955,16 +955,16 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"t\":\"true\",\"ts\":\"true\"}} -PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column str SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235') +PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column str SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column str SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235') +POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column str SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: DESC FORMATTED datatype_stats str +PREHOOK: query: DESC FORMATTED datatype_stats_n0 str PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats str +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 str POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name str data_type string min @@ -978,12 +978,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\"}} -PREHOOK: query: DESC FORMATTED datatype_stats v +PREHOOK: query: DESC FORMATTED datatype_stats_n0 v PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats v +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 v POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name v data_type varchar(12) min @@ -997,16 +997,16 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\"}} -PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column v SET ('numDVs'='22','numNulls'='33','avgColLen'='4.40','maxColLen'='25') +PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column v SET ('numDVs'='22','numNulls'='33','avgColLen'='4.40','maxColLen'='25') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column v SET ('numDVs'='22','numNulls'='33','avgColLen'='4.40','maxColLen'='25') +POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column v SET ('numDVs'='22','numNulls'='33','avgColLen'='4.40','maxColLen'='25') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: DESC FORMATTED datatype_stats v +PREHOOK: query: DESC FORMATTED datatype_stats_n0 v PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats v +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 v POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name v data_type varchar(12) min @@ -1020,12 +1020,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\",\"v\":\"true\"}} -PREHOOK: query: DESC FORMATTED datatype_stats c +PREHOOK: query: DESC FORMATTED datatype_stats_n0 c PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats c +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 c POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name c data_type char(5) min @@ -1039,16 +1039,16 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\",\"v\":\"true\"}} -PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column c SET ('numDVs'='2','numNulls'='03','avgColLen'='9.00','maxColLen'='58') +PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column c SET ('numDVs'='2','numNulls'='03','avgColLen'='9.00','maxColLen'='58') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column c SET ('numDVs'='2','numNulls'='03','avgColLen'='9.00','maxColLen'='58') +POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column c SET ('numDVs'='2','numNulls'='03','avgColLen'='9.00','maxColLen'='58') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: DESC FORMATTED datatype_stats c +PREHOOK: query: DESC FORMATTED datatype_stats_n0 c PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats c +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 c POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name c data_type char(5) min @@ -1062,12 +1062,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"c\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\",\"v\":\"true\"}} -PREHOOK: query: DESC FORMATTED datatype_stats bl +PREHOOK: query: DESC FORMATTED datatype_stats_n0 bl PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats bl +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 bl POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name bl data_type boolean min @@ -1081,16 +1081,16 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"c\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\",\"v\":\"true\"}} -PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column bl SET ('numNulls'='1','numTrues'='9','numFalses'='8') +PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column bl SET ('numNulls'='1','numTrues'='9','numFalses'='8') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column bl SET ('numNulls'='1','numTrues'='9','numFalses'='8') +POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column bl SET ('numNulls'='1','numTrues'='9','numFalses'='8') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: DESC FORMATTED datatype_stats bl +PREHOOK: query: DESC FORMATTED datatype_stats_n0 bl PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats bl +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 bl POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name bl data_type boolean min @@ -1104,12 +1104,12 @@ num_falses 8 bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"bl\":\"true\",\"c\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\",\"v\":\"true\"}} -PREHOOK: query: DESC FORMATTED datatype_stats bin +PREHOOK: query: DESC FORMATTED datatype_stats_n0 bin PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats bin +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 bin POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name bin data_type binary min @@ -1123,16 +1123,16 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"bl\":\"true\",\"c\":\"true\",\"d\":\"true\",\"dem\":\"true\",\"dt\":\"true\",\"f\":\"true\",\"i\":\"true\",\"s\":\"true\",\"str\":\"true\",\"t\":\"true\",\"ts\":\"true\",\"v\":\"true\"}} -PREHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column bin SET ('numNulls'='8','avgColLen'='2.0','maxColLen'='8') +PREHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column bin SET ('numNulls'='8','avgColLen'='2.0','maxColLen'='8') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: ALTER TABLE default.datatype_stats UPDATE STATISTICS for column bin SET ('numNulls'='8','avgColLen'='2.0','maxColLen'='8') +POSTHOOK: query: ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column bin SET ('numNulls'='8','avgColLen'='2.0','maxColLen'='8') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: query: DESC FORMATTED datatype_stats bin +PREHOOK: query: DESC FORMATTED datatype_stats_n0 bin PREHOOK: type: DESCTABLE -PREHOOK: Input: default@datatype_stats -POSTHOOK: query: DESC FORMATTED datatype_stats bin +PREHOOK: Input: default@datatype_stats_n0 +POSTHOOK: query: DESC FORMATTED datatype_stats_n0 bin POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@datatype_stats +POSTHOOK: Input: default@datatype_stats_n0 col_name bin data_type binary min diff --git a/ql/src/test/results/clientpositive/alter_view_rename.q.out b/ql/src/test/results/clientpositive/alter_view_rename.q.out index 307c0f6b12..9937e37d83 100644 --- a/ql/src/test/results/clientpositive/alter_view_rename.q.out +++ b/ql/src/test/results/clientpositive/alter_view_rename.q.out @@ -10,27 +10,27 @@ PREHOOK: Output: database:tv2 POSTHOOK: query: CREATE DATABASE tv2 POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:tv2 -PREHOOK: query: CREATE TABLE invites (foo INT, bar STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE invites_n1 (foo INT, bar STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@invites -POSTHOOK: query: CREATE TABLE invites (foo INT, bar STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@invites_n1 +POSTHOOK: query: CREATE TABLE invites_n1 (foo INT, bar STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@invites -PREHOOK: query: CREATE VIEW tv1.view1 as SELECT * FROM invites +POSTHOOK: Output: default@invites_n1 +PREHOOK: query: CREATE VIEW tv1.view1 as SELECT * FROM invites_n1 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@invites +PREHOOK: Input: default@invites_n1 PREHOOK: Output: database:tv1 PREHOOK: Output: tv1@view1 -POSTHOOK: query: CREATE VIEW tv1.view1 as SELECT * FROM invites +POSTHOOK: query: CREATE VIEW tv1.view1 as SELECT * FROM invites_n1 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@invites +POSTHOOK: Input: default@invites_n1 POSTHOOK: Output: database:tv1 POSTHOOK: Output: tv1@view1 -POSTHOOK: Lineage: view1.bar SIMPLE [(invites)invites.FieldSchema(name:bar, type:string, comment:null), ] -POSTHOOK: Lineage: view1.ds SIMPLE [(invites)invites.FieldSchema(name:ds, type:string, comment:null), ] -POSTHOOK: Lineage: view1.foo SIMPLE [(invites)invites.FieldSchema(name:foo, type:int, comment:null), ] +POSTHOOK: Lineage: view1.bar SIMPLE [(invites_n1)invites_n1.FieldSchema(name:bar, type:string, comment:null), ] +POSTHOOK: Lineage: view1.ds SIMPLE [(invites_n1)invites_n1.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: view1.foo SIMPLE [(invites_n1)invites_n1.FieldSchema(name:foo, type:int, comment:null), ] PREHOOK: query: DESCRIBE EXTENDED tv1.view1 PREHOOK: type: DESCTABLE PREHOOK: Input: tv1@view1 @@ -64,22 +64,22 @@ ds string #### A masked pattern was here #### PREHOOK: query: SELECT * FROM tv2.view2 PREHOOK: type: QUERY -PREHOOK: Input: default@invites +PREHOOK: Input: default@invites_n1 PREHOOK: Input: tv2@view2 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM tv2.view2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@invites +POSTHOOK: Input: default@invites_n1 POSTHOOK: Input: tv2@view2 #### A masked pattern was here #### -PREHOOK: query: DROP TABLE invites +PREHOOK: query: DROP TABLE invites_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@invites -PREHOOK: Output: default@invites -POSTHOOK: query: DROP TABLE invites +PREHOOK: Input: default@invites_n1 +PREHOOK: Output: default@invites_n1 +POSTHOOK: query: DROP TABLE invites_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@invites -POSTHOOK: Output: default@invites +POSTHOOK: Input: default@invites_n1 +POSTHOOK: Output: default@invites_n1 PREHOOK: query: DROP VIEW tv2.view2 PREHOOK: type: DROPVIEW PREHOOK: Input: tv2@view2 diff --git a/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out b/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out index 6be0ac996e..3207f1e128 100644 --- a/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out +++ b/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out @@ -1,80 +1,80 @@ -PREHOOK: query: DROP TABLE IF EXISTS test1 +PREHOOK: query: DROP TABLE IF EXISTS test1_n8 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS test1 +POSTHOOK: query: DROP TABLE IF EXISTS test1_n8 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE IF EXISTS test2 +PREHOOK: query: DROP TABLE IF EXISTS test2_n6 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS test2 +POSTHOOK: query: DROP TABLE IF EXISTS test2_n6 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE test1(name string, age int) +PREHOOK: query: CREATE TABLE test1_n8(name string, age int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test1 -POSTHOOK: query: CREATE TABLE test1(name string, age int) +PREHOOK: Output: default@test1_n8 +POSTHOOK: query: CREATE TABLE test1_n8(name string, age int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test1 -PREHOOK: query: CREATE TABLE test2(name string) PARTITIONED by (age int) +POSTHOOK: Output: default@test1_n8 +PREHOOK: query: CREATE TABLE test2_n6(name string) PARTITIONED by (age int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test2 -POSTHOOK: query: CREATE TABLE test2(name string) PARTITIONED by (age int) +PREHOOK: Output: default@test2_n6 +POSTHOOK: query: CREATE TABLE test2_n6(name string) PARTITIONED by (age int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/test1.txt' INTO TABLE test1 +POSTHOOK: Output: default@test2_n6 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/test1.txt' INTO TABLE test1_n8 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@test1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/test1.txt' INTO TABLE test1 +PREHOOK: Output: default@test1_n8 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/test1.txt' INTO TABLE test1_n8 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@test1 -PREHOOK: query: FROM test1 INSERT OVERWRITE TABLE test2 PARTITION(age) SELECT test1.name, test1.age +POSTHOOK: Output: default@test1_n8 +PREHOOK: query: FROM test1_n8 INSERT OVERWRITE TABLE test2_n6 PARTITION(age) SELECT test1_n8.name, test1_n8.age PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Output: default@test2 -POSTHOOK: query: FROM test1 INSERT OVERWRITE TABLE test2 PARTITION(age) SELECT test1.name, test1.age +PREHOOK: Input: default@test1_n8 +PREHOOK: Output: default@test2_n6 +POSTHOOK: query: FROM test1_n8 INSERT OVERWRITE TABLE test2_n6 PARTITION(age) SELECT test1_n8.name, test1_n8.age POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Output: default@test2@age=15 -POSTHOOK: Output: default@test2@age=30 -POSTHOOK: Output: default@test2@age=40 -POSTHOOK: Output: default@test2@age=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: Lineage: test2 PARTITION(age=15).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ] -POSTHOOK: Lineage: test2 PARTITION(age=30).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ] -POSTHOOK: Lineage: test2 PARTITION(age=40).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ] -POSTHOOK: Lineage: test2 PARTITION(age=__HIVE_DEFAULT_PARTITION__).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ] -PREHOOK: query: ANALYZE TABLE test2 PARTITION(age) COMPUTE STATISTICS +POSTHOOK: Input: default@test1_n8 +POSTHOOK: Output: default@test2_n6@age=15 +POSTHOOK: Output: default@test2_n6@age=30 +POSTHOOK: Output: default@test2_n6@age=40 +POSTHOOK: Output: default@test2_n6@age=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Lineage: test2_n6 PARTITION(age=15).name SIMPLE [(test1_n8)test1_n8.FieldSchema(name:name, type:string, comment:null), ] +POSTHOOK: Lineage: test2_n6 PARTITION(age=30).name SIMPLE [(test1_n8)test1_n8.FieldSchema(name:name, type:string, comment:null), ] +POSTHOOK: Lineage: test2_n6 PARTITION(age=40).name SIMPLE [(test1_n8)test1_n8.FieldSchema(name:name, type:string, comment:null), ] +POSTHOOK: Lineage: test2_n6 PARTITION(age=__HIVE_DEFAULT_PARTITION__).name SIMPLE [(test1_n8)test1_n8.FieldSchema(name:name, type:string, comment:null), ] +PREHOOK: query: ANALYZE TABLE test2_n6 PARTITION(age) COMPUTE STATISTICS PREHOOK: type: QUERY -PREHOOK: Input: default@test2 -PREHOOK: Input: default@test2@age=15 -PREHOOK: Input: default@test2@age=30 -PREHOOK: Input: default@test2@age=40 -PREHOOK: Input: default@test2@age=__HIVE_DEFAULT_PARTITION__ -PREHOOK: Output: default@test2 -PREHOOK: Output: default@test2@age=15 -PREHOOK: Output: default@test2@age=30 -PREHOOK: Output: default@test2@age=40 -PREHOOK: Output: default@test2@age=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: query: ANALYZE TABLE test2 PARTITION(age) COMPUTE STATISTICS +PREHOOK: Input: default@test2_n6 +PREHOOK: Input: default@test2_n6@age=15 +PREHOOK: Input: default@test2_n6@age=30 +PREHOOK: Input: default@test2_n6@age=40 +PREHOOK: Input: default@test2_n6@age=__HIVE_DEFAULT_PARTITION__ +PREHOOK: Output: default@test2_n6 +PREHOOK: Output: default@test2_n6@age=15 +PREHOOK: Output: default@test2_n6@age=30 +PREHOOK: Output: default@test2_n6@age=40 +PREHOOK: Output: default@test2_n6@age=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: query: ANALYZE TABLE test2_n6 PARTITION(age) COMPUTE STATISTICS POSTHOOK: type: QUERY -POSTHOOK: Input: default@test2 -POSTHOOK: Input: default@test2@age=15 -POSTHOOK: Input: default@test2@age=30 -POSTHOOK: Input: default@test2@age=40 -POSTHOOK: Input: default@test2@age=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: Output: default@test2 -POSTHOOK: Output: default@test2@age=15 -POSTHOOK: Output: default@test2@age=30 -POSTHOOK: Output: default@test2@age=40 -POSTHOOK: Output: default@test2@age=__HIVE_DEFAULT_PARTITION__ -PREHOOK: query: DESC EXTENDED test2 +POSTHOOK: Input: default@test2_n6 +POSTHOOK: Input: default@test2_n6@age=15 +POSTHOOK: Input: default@test2_n6@age=30 +POSTHOOK: Input: default@test2_n6@age=40 +POSTHOOK: Input: default@test2_n6@age=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Output: default@test2_n6 +POSTHOOK: Output: default@test2_n6@age=15 +POSTHOOK: Output: default@test2_n6@age=30 +POSTHOOK: Output: default@test2_n6@age=40 +POSTHOOK: Output: default@test2_n6@age=__HIVE_DEFAULT_PARTITION__ +PREHOOK: query: DESC EXTENDED test2_n6 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test2 -POSTHOOK: query: DESC EXTENDED test2 +PREHOOK: Input: default@test2_n6 +POSTHOOK: query: DESC EXTENDED test2_n6 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test2_n6 name string age int @@ -83,9 +83,9 @@ age int age int #### A masked pattern was here #### -PREHOOK: query: EXPLAIN EXTENDED select * from test2 +PREHOOK: query: EXPLAIN EXTENDED select * from test2_n6 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED select * from test2 +POSTHOOK: query: EXPLAIN EXTENDED select * from test2_n6 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -108,13 +108,13 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.test2 + name default.test2_n6 numFiles 1 numRows 1 partition_columns age partition_columns.types int rawDataSize 3 - serialization.ddl struct test2 { string name} + serialization.ddl struct test2_n6 { string name} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4 @@ -131,16 +131,16 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.test2 + name default.test2_n6 partition_columns age partition_columns.types int - serialization.ddl struct test2 { string name} + serialization.ddl struct test2_n6 { string name} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 + name: default.test2_n6 + name: default.test2_n6 Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -154,13 +154,13 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.test2 + name default.test2_n6 numFiles 1 numRows 1 partition_columns age partition_columns.types int rawDataSize 0 - serialization.ddl struct test2 { string name} + serialization.ddl struct test2_n6 { string name} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1 @@ -177,16 +177,16 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.test2 + name default.test2_n6 partition_columns age partition_columns.types int - serialization.ddl struct test2 { string name} + serialization.ddl struct test2_n6 { string name} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 + name: default.test2_n6 + name: default.test2_n6 Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -200,13 +200,13 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.test2 + name default.test2_n6 numFiles 1 numRows 1 partition_columns age partition_columns.types int rawDataSize 4 - serialization.ddl struct test2 { string name} + serialization.ddl struct test2_n6 { string name} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5 @@ -223,16 +223,16 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.test2 + name default.test2_n6 partition_columns age partition_columns.types int - serialization.ddl struct test2 { string name} + serialization.ddl struct test2_n6 { string name} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 + name: default.test2_n6 + name: default.test2_n6 Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -246,13 +246,13 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.test2 + name default.test2_n6 numFiles 1 numRows 2 partition_columns age partition_columns.types int rawDataSize 4 - serialization.ddl struct test2 { string name} + serialization.ddl struct test2_n6 { string name} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 6 @@ -269,19 +269,19 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.test2 + name default.test2_n6 partition_columns age partition_columns.types int - serialization.ddl struct test2 { string name} + serialization.ddl struct test2_n6 { string name} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 + name: default.test2_n6 + name: default.test2_n6 Processor Tree: TableScan - alias: test2 + alias: test2_n6 Statistics: Num rows: 5 Data size: 299 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -290,19 +290,19 @@ STAGE PLANS: Statistics: Num rows: 5 Data size: 299 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: DROP TABLE test1 +PREHOOK: query: DROP TABLE test1_n8 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test1 -PREHOOK: Output: default@test1 -POSTHOOK: query: DROP TABLE test1 +PREHOOK: Input: default@test1_n8 +PREHOOK: Output: default@test1_n8 +POSTHOOK: query: DROP TABLE test1_n8 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test1 -POSTHOOK: Output: default@test1 -PREHOOK: query: DROP TABLE test2 +POSTHOOK: Input: default@test1_n8 +POSTHOOK: Output: default@test1_n8 +PREHOOK: query: DROP TABLE test2_n6 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test2 -PREHOOK: Output: default@test2 -POSTHOOK: query: DROP TABLE test2 +PREHOOK: Input: default@test2_n6 +PREHOOK: Output: default@test2_n6 +POSTHOOK: query: DROP TABLE test2_n6 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test2 -POSTHOOK: Output: default@test2 +POSTHOOK: Input: default@test2_n6 +POSTHOOK: Output: default@test2_n6 diff --git a/ql/src/test/results/clientpositive/analyze_tbl_date.q.out b/ql/src/test/results/clientpositive/analyze_tbl_date.q.out index bd8db9f1c6..c2294ab6a6 100644 --- a/ql/src/test/results/clientpositive/analyze_tbl_date.q.out +++ b/ql/src/test/results/clientpositive/analyze_tbl_date.q.out @@ -1,36 +1,36 @@ -PREHOOK: query: create table test_table(d date) +PREHOOK: query: create table test_table_n7(d date) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table -POSTHOOK: query: create table test_table(d date) +PREHOOK: Output: default@test_table_n7 +POSTHOOK: query: create table test_table_n7(d date) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table -PREHOOK: query: insert into test_table values(null), (null), (null) +POSTHOOK: Output: default@test_table_n7 +PREHOOK: query: insert into test_table_n7 values(null), (null), (null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_table -POSTHOOK: query: insert into test_table values(null), (null), (null) +PREHOOK: Output: default@test_table_n7 +POSTHOOK: query: insert into test_table_n7 values(null), (null), (null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_table -POSTHOOK: Lineage: test_table.d EXPRESSION [] -PREHOOK: query: analyze table test_table compute statistics for columns +POSTHOOK: Output: default@test_table_n7 +POSTHOOK: Lineage: test_table_n7.d EXPRESSION [] +PREHOOK: query: analyze table test_table_n7 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@test_table -PREHOOK: Output: default@test_table +PREHOOK: Input: default@test_table_n7 +PREHOOK: Output: default@test_table_n7 #### A masked pattern was here #### -POSTHOOK: query: analyze table test_table compute statistics for columns +POSTHOOK: query: analyze table test_table_n7 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@test_table -POSTHOOK: Output: default@test_table +POSTHOOK: Input: default@test_table_n7 +POSTHOOK: Output: default@test_table_n7 #### A masked pattern was here #### -PREHOOK: query: describe formatted test_table +PREHOOK: query: describe formatted test_table_n7 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: describe formatted test_table +PREHOOK: Input: default@test_table_n7 +POSTHOOK: query: describe formatted test_table_n7 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n7 # col_name data_type comment d date @@ -59,9 +59,9 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: explain select * from test_table where d is not null +PREHOOK: query: explain select * from test_table_n7 where d is not null PREHOOK: type: QUERY -POSTHOOK: query: explain select * from test_table where d is not null +POSTHOOK: query: explain select * from test_table_n7 where d is not null POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -72,7 +72,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_table + alias: test_table_n7 Statistics: Num rows: 3 Data size: 6 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: d is not null (type: boolean) @@ -96,11 +96,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from test_table where d is not null +PREHOOK: query: select * from test_table_n7 where d is not null PREHOOK: type: QUERY -PREHOOK: Input: default@test_table +PREHOOK: Input: default@test_table_n7 #### A masked pattern was here #### -POSTHOOK: query: select * from test_table where d is not null +POSTHOOK: query: select * from test_table_n7 where d is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n7 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/analyze_tbl_part.q.out b/ql/src/test/results/clientpositive/analyze_tbl_part.q.out index 74c8beee61..734811175e 100644 --- a/ql/src/test/results/clientpositive/analyze_tbl_part.q.out +++ b/ql/src/test/results/clientpositive/analyze_tbl_part.q.out @@ -1,59 +1,59 @@ -PREHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int) +PREHOOK: query: create table src_stat_part_n1(key string, value string) partitioned by (partitionId int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_stat_part -POSTHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int) +PREHOOK: Output: default@src_stat_part_n1 +POSTHOOK: query: create table src_stat_part_n1(key string, value string) partitioned by (partitionId int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_stat_part -PREHOOK: query: insert overwrite table src_stat_part partition (partitionId=1) +POSTHOOK: Output: default@src_stat_part_n1 +PREHOOK: query: insert overwrite table src_stat_part_n1 partition (partitionId=1) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@src_stat_part@partitionid=1 -POSTHOOK: query: insert overwrite table src_stat_part partition (partitionId=1) +PREHOOK: Output: default@src_stat_part_n1@partitionid=1 +POSTHOOK: query: insert overwrite table src_stat_part_n1 partition (partitionId=1) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@src_stat_part@partitionid=1 -POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table src_stat_part partition (partitionId=2) +POSTHOOK: Output: default@src_stat_part_n1@partitionid=1 +POSTHOOK: Lineage: src_stat_part_n1 PARTITION(partitionid=1).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part_n1 PARTITION(partitionid=1).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table src_stat_part_n1 partition (partitionId=2) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@src_stat_part@partitionid=2 -POSTHOOK: query: insert overwrite table src_stat_part partition (partitionId=2) +PREHOOK: Output: default@src_stat_part_n1@partitionid=2 +POSTHOOK: query: insert overwrite table src_stat_part_n1 partition (partitionId=2) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@src_stat_part@partitionid=2 -POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key +POSTHOOK: Output: default@src_stat_part_n1@partitionid=2 +POSTHOOK: Lineage: src_stat_part_n1 PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part_n1 PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: ANALYZE TABLE src_stat_part_n1 partition (partitionId) COMPUTE STATISTICS for columns key PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@src_stat_part -PREHOOK: Input: default@src_stat_part@partitionid=1 -PREHOOK: Input: default@src_stat_part@partitionid=2 -PREHOOK: Output: default@src_stat_part -PREHOOK: Output: default@src_stat_part@partitionid=1 -PREHOOK: Output: default@src_stat_part@partitionid=2 +PREHOOK: Input: default@src_stat_part_n1 +PREHOOK: Input: default@src_stat_part_n1@partitionid=1 +PREHOOK: Input: default@src_stat_part_n1@partitionid=2 +PREHOOK: Output: default@src_stat_part_n1 +PREHOOK: Output: default@src_stat_part_n1@partitionid=1 +PREHOOK: Output: default@src_stat_part_n1@partitionid=2 #### A masked pattern was here #### -POSTHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key +POSTHOOK: query: ANALYZE TABLE src_stat_part_n1 partition (partitionId) COMPUTE STATISTICS for columns key POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@src_stat_part -POSTHOOK: Input: default@src_stat_part@partitionid=1 -POSTHOOK: Input: default@src_stat_part@partitionid=2 -POSTHOOK: Output: default@src_stat_part -POSTHOOK: Output: default@src_stat_part@partitionid=1 -POSTHOOK: Output: default@src_stat_part@partitionid=2 +POSTHOOK: Input: default@src_stat_part_n1 +POSTHOOK: Input: default@src_stat_part_n1@partitionid=1 +POSTHOOK: Input: default@src_stat_part_n1@partitionid=2 +POSTHOOK: Output: default@src_stat_part_n1 +POSTHOOK: Output: default@src_stat_part_n1@partitionid=1 +POSTHOOK: Output: default@src_stat_part_n1@partitionid=2 #### A masked pattern was here #### -PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1) key +PREHOOK: query: describe formatted src_stat_part_n1 PARTITION(partitionId=1) key PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_stat_part -POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1) key +PREHOOK: Input: default@src_stat_part_n1 +POSTHOOK: query: describe formatted src_stat_part_n1 PARTITION(partitionId=1) key POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_stat_part +POSTHOOK: Input: default@src_stat_part_n1 col_name key data_type string min @@ -66,30 +66,30 @@ num_trues num_falses bitVector HL comment from deserializer -PREHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value +PREHOOK: query: ANALYZE TABLE src_stat_part_n1 partition (partitionId) COMPUTE STATISTICS for columns key, value PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@src_stat_part -PREHOOK: Input: default@src_stat_part@partitionid=1 -PREHOOK: Input: default@src_stat_part@partitionid=2 -PREHOOK: Output: default@src_stat_part -PREHOOK: Output: default@src_stat_part@partitionid=1 -PREHOOK: Output: default@src_stat_part@partitionid=2 +PREHOOK: Input: default@src_stat_part_n1 +PREHOOK: Input: default@src_stat_part_n1@partitionid=1 +PREHOOK: Input: default@src_stat_part_n1@partitionid=2 +PREHOOK: Output: default@src_stat_part_n1 +PREHOOK: Output: default@src_stat_part_n1@partitionid=1 +PREHOOK: Output: default@src_stat_part_n1@partitionid=2 #### A masked pattern was here #### -POSTHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value +POSTHOOK: query: ANALYZE TABLE src_stat_part_n1 partition (partitionId) COMPUTE STATISTICS for columns key, value POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@src_stat_part -POSTHOOK: Input: default@src_stat_part@partitionid=1 -POSTHOOK: Input: default@src_stat_part@partitionid=2 -POSTHOOK: Output: default@src_stat_part -POSTHOOK: Output: default@src_stat_part@partitionid=1 -POSTHOOK: Output: default@src_stat_part@partitionid=2 +POSTHOOK: Input: default@src_stat_part_n1 +POSTHOOK: Input: default@src_stat_part_n1@partitionid=1 +POSTHOOK: Input: default@src_stat_part_n1@partitionid=2 +POSTHOOK: Output: default@src_stat_part_n1 +POSTHOOK: Output: default@src_stat_part_n1@partitionid=1 +POSTHOOK: Output: default@src_stat_part_n1@partitionid=2 #### A masked pattern was here #### -PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1) key +PREHOOK: query: describe formatted src_stat_part_n1 PARTITION(partitionId=1) key PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_stat_part -POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1) key +PREHOOK: Input: default@src_stat_part_n1 +POSTHOOK: query: describe formatted src_stat_part_n1 PARTITION(partitionId=1) key POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_stat_part +POSTHOOK: Input: default@src_stat_part_n1 col_name key data_type string min @@ -102,12 +102,12 @@ num_trues num_falses bitVector HL comment from deserializer -PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2) value +PREHOOK: query: describe formatted src_stat_part_n1 PARTITION(partitionId=2) value PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_stat_part -POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2) value +PREHOOK: Input: default@src_stat_part_n1 +POSTHOOK: query: describe formatted src_stat_part_n1 PARTITION(partitionId=2) value POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_stat_part +POSTHOOK: Input: default@src_stat_part_n1 col_name value data_type string min diff --git a/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out b/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out index 2cde0f912c..dfdfe3928e 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table over1k( +PREHOOK: query: create table over1k_n4( t tinyint, si smallint, i int, @@ -14,8 +14,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over1k -POSTHOOK: query: create table over1k( +PREHOOK: Output: default@over1k_n4 +POSTHOOK: query: create table over1k_n4( t tinyint, si smallint, i int, @@ -31,42 +31,42 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over1k -PREHOOK: query: load data local inpath '../../data/files/over1k' overwrite into table over1k +POSTHOOK: Output: default@over1k_n4 +PREHOOK: query: load data local inpath '../../data/files/over1k' overwrite into table over1k_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over1k -POSTHOOK: query: load data local inpath '../../data/files/over1k' overwrite into table over1k +PREHOOK: Output: default@over1k_n4 +POSTHOOK: query: load data local inpath '../../data/files/over1k' overwrite into table over1k_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over1k -PREHOOK: query: load data local inpath '../../data/files/over1k' into table over1k +POSTHOOK: Output: default@over1k_n4 +PREHOOK: query: load data local inpath '../../data/files/over1k' into table over1k_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over1k -POSTHOOK: query: load data local inpath '../../data/files/over1k' into table over1k +PREHOOK: Output: default@over1k_n4 +POSTHOOK: query: load data local inpath '../../data/files/over1k' into table over1k_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over1k -PREHOOK: query: analyze table over1k compute statistics +POSTHOOK: Output: default@over1k_n4 +PREHOOK: query: analyze table over1k_n4 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@over1k -PREHOOK: Output: default@over1k -POSTHOOK: query: analyze table over1k compute statistics +PREHOOK: Input: default@over1k_n4 +PREHOOK: Output: default@over1k_n4 +POSTHOOK: query: analyze table over1k_n4 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1k -POSTHOOK: Output: default@over1k -PREHOOK: query: analyze table over1k compute statistics for columns +POSTHOOK: Input: default@over1k_n4 +POSTHOOK: Output: default@over1k_n4 +PREHOOK: query: analyze table over1k_n4 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@over1k -PREHOOK: Output: default@over1k +PREHOOK: Input: default@over1k_n4 +PREHOOK: Output: default@over1k_n4 #### A masked pattern was here #### -POSTHOOK: query: analyze table over1k compute statistics for columns +POSTHOOK: query: analyze table over1k_n4 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@over1k -POSTHOOK: Output: default@over1k +POSTHOOK: Input: default@over1k_n4 +POSTHOOK: Output: default@over1k_n4 #### A masked pattern was here #### -PREHOOK: query: explain select count(*) from over1k where ( +PREHOOK: query: explain select count(*) from over1k_n4 where ( (t=1 and si=2) or (t=2 and si=3) or (t=3 and si=4) @@ -88,7 +88,7 @@ or (t=37 and si=38) or (t=47 and si=48) or (t=52 and si=53)) PREHOOK: type: QUERY -POSTHOOK: query: explain select count(*) from over1k where ( +POSTHOOK: query: explain select count(*) from over1k_n4 where ( (t=1 and si=2) or (t=2 and si=3) or (t=3 and si=4) @@ -119,7 +119,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: over1k + alias: over1k_n4 Statistics: Num rows: 2098 Data size: 16744 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (((t = 10Y) and (si = 11S)) or ((t = 11Y) and (si = 12S)) or ((t = 12Y) and (si = 13S)) or ((t = 13Y) and (si = 14S)) or ((t = 14Y) and (si = 15S)) or ((t = 15Y) and (si = 16S)) or ((t = 16Y) and (si = 17S)) or ((t = 17Y) and (si = 18S)) or ((t = 1Y) and (si = 2S)) or ((t = 27Y) and (si = 28S)) or ((t = 2Y) and (si = 3S)) or ((t = 37Y) and (si = 38S)) or ((t = 3Y) and (si = 4S)) or ((t = 47Y) and (si = 48S)) or ((t = 4Y) and (si = 5S)) or ((t = 52Y) and (si = 53S)) or ((t = 5Y) and (si = 6S)) or ((t = 6Y) and (si = 7S)) or ((t = 7Y) and (si = 8S)) or ((t = 9Y) and (si = 10S))) (type: boolean) @@ -156,7 +156,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select count(*) from over1k where ( +PREHOOK: query: explain select count(*) from over1k_n4 where ( (t=1 and si=2) or (t=2 and si=3) or (t=3 and si=4) @@ -178,7 +178,7 @@ or (t=37 and si=38) or (t=47 and si=48) or (t=52 and si=53)) PREHOOK: type: QUERY -POSTHOOK: query: explain select count(*) from over1k where ( +POSTHOOK: query: explain select count(*) from over1k_n4 where ( (t=1 and si=2) or (t=2 and si=3) or (t=3 and si=4) @@ -209,7 +209,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: over1k + alias: over1k_n4 Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((t = 10Y) and (si = 11S)) or ((t = 11Y) and (si = 12S)) or ((t = 12Y) and (si = 13S)) or ((t = 13Y) and (si = 14S)) or ((t = 14Y) and (si = 15S)) or ((t = 15Y) and (si = 16S)) or ((t = 16Y) and (si = 17S)) or ((t = 17Y) and (si = 18S)) or ((t = 1Y) and (si = 2S)) or ((t = 27Y) and (si = 28S)) or ((t = 2Y) and (si = 3S)) or ((t = 37Y) and (si = 38S)) or ((t = 3Y) and (si = 4S)) or ((t = 47Y) and (si = 48S)) or ((t = 4Y) and (si = 5S)) or ((t = 52Y) and (si = 53S)) or ((t = 5Y) and (si = 6S)) or ((t = 6Y) and (si = 7S)) or ((t = 7Y) and (si = 8S)) or ((t = 9Y) and (si = 10S))) (type: boolean) diff --git a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out index 7748ae057a..18c103bc15 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table if not exists loc_staging ( +PREHOOK: query: create table if not exists loc_staging_n2 ( state string, locid int, zip bigint, @@ -6,8 +6,8 @@ PREHOOK: query: create table if not exists loc_staging ( ) row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_staging -POSTHOOK: query: create table if not exists loc_staging ( +PREHOOK: Output: default@loc_staging_n2 +POSTHOOK: query: create table if not exists loc_staging_n2 ( state string, locid int, zip bigint, @@ -15,46 +15,46 @@ POSTHOOK: query: create table if not exists loc_staging ( ) row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_staging -PREHOOK: query: create table loc_orc like loc_staging +POSTHOOK: Output: default@loc_staging_n2 +PREHOOK: query: create table loc_orc_n2 like loc_staging_n2 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_orc -POSTHOOK: query: create table loc_orc like loc_staging +PREHOOK: Output: default@loc_orc_n2 +POSTHOOK: query: create table loc_orc_n2 like loc_staging_n2 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_orc -PREHOOK: query: alter table loc_orc set fileformat orc +POSTHOOK: Output: default@loc_orc_n2 +PREHOOK: query: alter table loc_orc_n2 set fileformat orc PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@loc_orc -PREHOOK: Output: default@loc_orc -POSTHOOK: query: alter table loc_orc set fileformat orc +PREHOOK: Input: default@loc_orc_n2 +PREHOOK: Output: default@loc_orc_n2 +POSTHOOK: query: alter table loc_orc_n2 set fileformat orc POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@loc_orc -POSTHOOK: Output: default@loc_orc -PREHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging +POSTHOOK: Input: default@loc_orc_n2 +POSTHOOK: Output: default@loc_orc_n2 +PREHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@loc_staging -POSTHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging +PREHOOK: Output: default@loc_staging_n2 +POSTHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@loc_staging -PREHOOK: query: insert overwrite table loc_orc select * from loc_staging +POSTHOOK: Output: default@loc_staging_n2 +PREHOOK: query: insert overwrite table loc_orc_n2 select * from loc_staging_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@loc_staging -PREHOOK: Output: default@loc_orc -POSTHOOK: query: insert overwrite table loc_orc select * from loc_staging +PREHOOK: Input: default@loc_staging_n2 +PREHOOK: Output: default@loc_orc_n2 +POSTHOOK: query: insert overwrite table loc_orc_n2 select * from loc_staging_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@loc_staging -POSTHOOK: Output: default@loc_orc -POSTHOOK: Lineage: loc_orc.locid SIMPLE [(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc.state SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc.year SIMPLE [(loc_staging)loc_staging.FieldSchema(name:year, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc.zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, comment:null), ] -PREHOOK: query: explain select * from loc_orc +POSTHOOK: Input: default@loc_staging_n2 +POSTHOOK: Output: default@loc_orc_n2 +POSTHOOK: Lineage: loc_orc_n2.locid SIMPLE [(loc_staging_n2)loc_staging_n2.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_n2.state SIMPLE [(loc_staging_n2)loc_staging_n2.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_n2.year SIMPLE [(loc_staging_n2)loc_staging_n2.FieldSchema(name:year, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_n2.zip SIMPLE [(loc_staging_n2)loc_staging_n2.FieldSchema(name:zip, type:bigint, comment:null), ] +PREHOOK: query: explain select * from loc_orc_n2 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc_n2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -65,7 +65,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int) @@ -73,26 +73,26 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: analyze table loc_orc compute statistics for columns state +PREHOOK: query: analyze table loc_orc_n2 compute statistics for columns state PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc -PREHOOK: Output: default@loc_orc +PREHOOK: Input: default@loc_orc_n2 +PREHOOK: Output: default@loc_orc_n2 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc compute statistics for columns state +POSTHOOK: query: analyze table loc_orc_n2 compute statistics for columns state POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc -POSTHOOK: Output: default@loc_orc +POSTHOOK: Input: default@loc_orc_n2 +POSTHOOK: Output: default@loc_orc_n2 #### A masked pattern was here #### PREHOOK: query: explain select a, c, min(b) from ( select state as a, locid as b, count(*) as c - from loc_orc + from loc_orc_n2 group by state,locid ) sq1 group by a,c PREHOOK: type: QUERY POSTHOOK: query: explain select a, c, min(b) from ( select state as a, locid as b, count(*) as c - from loc_orc + from loc_orc_n2 group by state,locid ) sq1 group by a,c @@ -107,7 +107,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -178,19 +178,19 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,year +PREHOOK: query: analyze table loc_orc_n2 compute statistics for columns state,locid,year PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc -PREHOOK: Output: default@loc_orc +PREHOOK: Input: default@loc_orc_n2 +PREHOOK: Output: default@loc_orc_n2 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,year +POSTHOOK: query: analyze table loc_orc_n2 compute statistics for columns state,locid,year POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc -POSTHOOK: Output: default@loc_orc +POSTHOOK: Input: default@loc_orc_n2 +POSTHOOK: Output: default@loc_orc_n2 #### A masked pattern was here #### -PREHOOK: query: explain select year from loc_orc group by year +PREHOOK: query: explain select year from loc_orc_n2 group by year PREHOOK: type: QUERY -POSTHOOK: query: explain select year from loc_orc group by year +POSTHOOK: query: explain select year from loc_orc_n2 group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -201,7 +201,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: year (type: int) @@ -238,9 +238,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -251,7 +251,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -288,9 +288,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -301,7 +301,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -339,9 +339,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid with rollup PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid with rollup POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -352,7 +352,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -390,9 +390,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by rollup( state,locid ) +PREHOOK: query: explain select state,locid from loc_orc_n2 group by rollup( state,locid ) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by rollup( state,locid ) +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by rollup( state,locid ) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -403,7 +403,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -441,9 +441,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state)) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -454,7 +454,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -492,9 +492,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid)) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -505,7 +505,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -543,9 +543,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -556,7 +556,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -594,9 +594,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state,locid),(state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state,locid),(state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -607,7 +607,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -645,9 +645,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select year from loc_orc group by year +PREHOOK: query: explain select year from loc_orc_n2 group by year PREHOOK: type: QUERY -POSTHOOK: query: explain select year from loc_orc group by year +POSTHOOK: query: explain select year from loc_orc_n2 group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -658,7 +658,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: year (type: int) @@ -695,9 +695,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -708,7 +708,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -746,9 +746,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,zip from loc_orc group by state,zip +PREHOOK: query: explain select state,zip from loc_orc_n2 group by state,zip PREHOOK: type: QUERY -POSTHOOK: query: explain select state,zip from loc_orc group by state,zip +POSTHOOK: query: explain select state,zip from loc_orc_n2 group by state,zip POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -759,7 +759,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 752 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), zip (type: bigint) @@ -796,9 +796,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -809,7 +809,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -847,9 +847,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid with rollup PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid with rollup POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -860,7 +860,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -898,9 +898,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by rollup (state,locid) +PREHOOK: query: explain select state,locid from loc_orc_n2 group by rollup (state,locid) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by rollup (state,locid) +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by rollup (state,locid) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -911,7 +911,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -949,9 +949,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state)) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -962,7 +962,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -1000,9 +1000,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid)) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1013,7 +1013,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -1051,9 +1051,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1064,7 +1064,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -1102,9 +1102,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state,locid),(state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state,locid),(state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1115,7 +1115,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -1153,9 +1153,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select year from loc_orc group by year +PREHOOK: query: explain select year from loc_orc_n2 group by year PREHOOK: type: QUERY -POSTHOOK: query: explain select year from loc_orc group by year +POSTHOOK: query: explain select year from loc_orc_n2 group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1166,7 +1166,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: year (type: int) @@ -1203,9 +1203,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc_n2 group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1216,7 +1216,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n2 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) diff --git a/ql/src/test/results/clientpositive/annotate_stats_join.q.out b/ql/src/test/results/clientpositive/annotate_stats_join.q.out index 5eed9bf103..b7b429c5e7 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_join.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_join.q.out @@ -1,33 +1,33 @@ -PREHOOK: query: create table if not exists emp ( +PREHOOK: query: create table if not exists emp_n2 ( lastname string, deptid int, locid int ) row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@emp -POSTHOOK: query: create table if not exists emp ( +PREHOOK: Output: default@emp_n2 +POSTHOOK: query: create table if not exists emp_n2 ( lastname string, deptid int, locid int ) row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@emp -PREHOOK: query: create table if not exists dept ( +POSTHOOK: Output: default@emp_n2 +PREHOOK: query: create table if not exists dept_n1 ( deptid int, deptname string ) row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dept -POSTHOOK: query: create table if not exists dept ( +PREHOOK: Output: default@dept_n1 +POSTHOOK: query: create table if not exists dept_n1 ( deptid int, deptname string ) row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dept +POSTHOOK: Output: default@dept_n1 PREHOOK: query: create table if not exists loc ( state string, locid int, @@ -46,22 +46,22 @@ POSTHOOK: query: create table if not exists loc ( POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@loc -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@emp -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp +PREHOOK: Output: default@emp_n2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@emp -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dept.txt' OVERWRITE INTO TABLE dept +POSTHOOK: Output: default@emp_n2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dept.txt' OVERWRITE INTO TABLE dept_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@dept -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dept.txt' OVERWRITE INTO TABLE dept +PREHOOK: Output: default@dept_n1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dept.txt' OVERWRITE INTO TABLE dept_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@dept +POSTHOOK: Output: default@dept_n1 PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc PREHOOK: type: LOAD #### A masked pattern was here #### @@ -70,22 +70,22 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INT POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@loc -PREHOOK: query: analyze table emp compute statistics +PREHOOK: query: analyze table emp_n2 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@emp -PREHOOK: Output: default@emp -POSTHOOK: query: analyze table emp compute statistics +PREHOOK: Input: default@emp_n2 +PREHOOK: Output: default@emp_n2 +POSTHOOK: query: analyze table emp_n2 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@emp -POSTHOOK: Output: default@emp -PREHOOK: query: analyze table dept compute statistics +POSTHOOK: Input: default@emp_n2 +POSTHOOK: Output: default@emp_n2 +PREHOOK: query: analyze table dept_n1 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@dept -PREHOOK: Output: default@dept -POSTHOOK: query: analyze table dept compute statistics +PREHOOK: Input: default@dept_n1 +PREHOOK: Output: default@dept_n1 +POSTHOOK: query: analyze table dept_n1 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@dept -POSTHOOK: Output: default@dept +POSTHOOK: Input: default@dept_n1 +POSTHOOK: Output: default@dept_n1 PREHOOK: query: analyze table loc compute statistics PREHOOK: type: QUERY PREHOOK: Input: default@loc @@ -94,25 +94,25 @@ POSTHOOK: query: analyze table loc compute statistics POSTHOOK: type: QUERY POSTHOOK: Input: default@loc POSTHOOK: Output: default@loc -PREHOOK: query: analyze table emp compute statistics for columns lastname,deptid,locid +PREHOOK: query: analyze table emp_n2 compute statistics for columns lastname,deptid,locid PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@emp -PREHOOK: Output: default@emp +PREHOOK: Input: default@emp_n2 +PREHOOK: Output: default@emp_n2 #### A masked pattern was here #### -POSTHOOK: query: analyze table emp compute statistics for columns lastname,deptid,locid +POSTHOOK: query: analyze table emp_n2 compute statistics for columns lastname,deptid,locid POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@emp -POSTHOOK: Output: default@emp +POSTHOOK: Input: default@emp_n2 +POSTHOOK: Output: default@emp_n2 #### A masked pattern was here #### -PREHOOK: query: analyze table dept compute statistics for columns deptname,deptid +PREHOOK: query: analyze table dept_n1 compute statistics for columns deptname,deptid PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@dept -PREHOOK: Output: default@dept +PREHOOK: Input: default@dept_n1 +PREHOOK: Output: default@dept_n1 #### A masked pattern was here #### -POSTHOOK: query: analyze table dept compute statistics for columns deptname,deptid +POSTHOOK: query: analyze table dept_n1 compute statistics for columns deptname,deptid POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@dept -POSTHOOK: Output: default@dept +POSTHOOK: Input: default@dept_n1 +POSTHOOK: Output: default@dept_n1 #### A masked pattern was here #### PREHOOK: query: analyze table loc compute statistics for columns state,locid,zip,year PREHOOK: type: ANALYZE_TABLE @@ -124,9 +124,9 @@ POSTHOOK: type: ANALYZE_TABLE POSTHOOK: Input: default@loc POSTHOOK: Output: default@loc #### A masked pattern was here #### -PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) +PREHOOK: query: explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) +POSTHOOK: query: explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -191,9 +191,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname +PREHOOK: query: explain select * from emp_n2,dept_n1 where emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname PREHOOK: type: QUERY -POSTHOOK: query: explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname +POSTHOOK: query: explain select * from emp_n2,dept_n1 where emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -204,7 +204,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: emp + alias: emp_n2 Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (deptid is not null and lastname is not null) (type: boolean) @@ -220,7 +220,7 @@ STAGE PLANS: Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: int) TableScan - alias: dept + alias: dept_n1 Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (deptid is not null and deptname is not null) (type: boolean) @@ -257,9 +257,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) +PREHOOK: query: explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid and e.lastname = d.deptname) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) +POSTHOOK: query: explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid and e.lastname = d.deptname) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -323,9 +323,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp_n2,dept_n1 where emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname PREHOOK: type: QUERY -POSTHOOK: query: explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp_n2,dept_n1 where emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -336,7 +336,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: emp + alias: emp_n2 Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (deptid is not null and lastname is not null) (type: boolean) @@ -352,7 +352,7 @@ STAGE PLANS: Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: int) TableScan - alias: dept + alias: dept_n1 Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (deptid is not null and deptname is not null) (type: boolean) @@ -389,9 +389,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid) +PREHOOK: query: explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid) join emp_n2 e1 on (e.deptid = e1.deptid) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid) +POSTHOOK: query: explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid) join emp_n2 e1 on (e.deptid = e1.deptid) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -474,9 +474,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid) +PREHOOK: query: explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid) +POSTHOOK: query: explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -559,9 +559,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state) +PREHOOK: query: explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state) +POSTHOOK: query: explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -643,9 +643,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from emp left outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp_n2 left outer join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname PREHOOK: type: QUERY -POSTHOOK: query: explain select * from emp left outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp_n2 left outer join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -656,7 +656,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: emp + alias: emp_n2 Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: lastname (type: string), deptid (type: int), locid (type: int) @@ -669,7 +669,7 @@ STAGE PLANS: Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: int) TableScan - alias: dept + alias: dept_n1 Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: deptid (type: int), deptname (type: string) @@ -703,9 +703,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from emp left semi join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp_n2 left semi join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname PREHOOK: type: QUERY -POSTHOOK: query: explain select * from emp left semi join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp_n2 left semi join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -716,7 +716,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: emp + alias: emp_n2 Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (deptid is not null and lastname is not null) (type: boolean) @@ -732,7 +732,7 @@ STAGE PLANS: Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: int) TableScan - alias: dept + alias: dept_n1 Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (deptid is not null and deptname is not null) (type: boolean) @@ -774,9 +774,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from emp right outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp_n2 right outer join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname PREHOOK: type: QUERY -POSTHOOK: query: explain select * from emp right outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp_n2 right outer join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -787,7 +787,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: emp + alias: emp_n2 Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: lastname (type: string), deptid (type: int), locid (type: int) @@ -800,7 +800,7 @@ STAGE PLANS: Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: int) TableScan - alias: dept + alias: dept_n1 Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: deptid (type: int), deptname (type: string) @@ -834,9 +834,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from emp full outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp_n2 full outer join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname PREHOOK: type: QUERY -POSTHOOK: query: explain select * from emp full outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp_n2 full outer join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -847,7 +847,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: emp + alias: emp_n2 Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: lastname (type: string), deptid (type: int), locid (type: int) @@ -860,7 +860,7 @@ STAGE PLANS: Statistics: Num rows: 48 Data size: 4752 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col2 (type: int) TableScan - alias: dept + alias: dept_n1 Statistics: Num rows: 6 Data size: 570 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: deptid (type: int), deptname (type: string) diff --git a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out index 8778b83126..aa5dba0eaf 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: drop table store_sales +PREHOOK: query: drop table store_sales_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table store_sales +POSTHOOK: query: drop table store_sales_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table store +PREHOOK: query: drop table store_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table store +POSTHOOK: query: drop table store_n0 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table customer_address PREHOOK: type: DROPTABLE POSTHOOK: query: drop table customer_address POSTHOOK: type: DROPTABLE -PREHOOK: query: create table store_sales +PREHOOK: query: create table store_sales_n0 ( ss_sold_date_sk int, ss_sold_time_sk int, @@ -39,8 +39,8 @@ PREHOOK: query: create table store_sales row format delimited fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@store_sales -POSTHOOK: query: create table store_sales +PREHOOK: Output: default@store_sales_n0 +POSTHOOK: query: create table store_sales_n0 ( ss_sold_date_sk int, ss_sold_time_sk int, @@ -69,8 +69,8 @@ POSTHOOK: query: create table store_sales row format delimited fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@store_sales -PREHOOK: query: create table store +POSTHOOK: Output: default@store_sales_n0 +PREHOOK: query: create table store_n0 ( s_store_sk int, s_store_id string, @@ -105,8 +105,8 @@ PREHOOK: query: create table store row format delimited fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@store -POSTHOOK: query: create table store +PREHOOK: Output: default@store_n0 +POSTHOOK: query: create table store_n0 ( s_store_sk int, s_store_id string, @@ -141,7 +141,7 @@ POSTHOOK: query: create table store row format delimited fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@store +POSTHOOK: Output: default@store_n0 PREHOOK: query: create table store_bigint ( s_store_sk bigint, @@ -254,14 +254,14 @@ row format delimited fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@customer_address -PREHOOK: query: load data local inpath '../../data/files/store.txt' overwrite into table store +PREHOOK: query: load data local inpath '../../data/files/store.txt' overwrite into table store_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@store -POSTHOOK: query: load data local inpath '../../data/files/store.txt' overwrite into table store +PREHOOK: Output: default@store_n0 +POSTHOOK: query: load data local inpath '../../data/files/store.txt' overwrite into table store_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@store +POSTHOOK: Output: default@store_n0 PREHOOK: query: load data local inpath '../../data/files/store.txt' overwrite into table store_bigint PREHOOK: type: LOAD #### A masked pattern was here #### @@ -270,14 +270,14 @@ POSTHOOK: query: load data local inpath '../../data/files/store.txt' overwrite i POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@store_bigint -PREHOOK: query: load data local inpath '../../data/files/store_sales.txt' overwrite into table store_sales +PREHOOK: query: load data local inpath '../../data/files/store_sales.txt' overwrite into table store_sales_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@store_sales -POSTHOOK: query: load data local inpath '../../data/files/store_sales.txt' overwrite into table store_sales +PREHOOK: Output: default@store_sales_n0 +POSTHOOK: query: load data local inpath '../../data/files/store_sales.txt' overwrite into table store_sales_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@store_sales +POSTHOOK: Output: default@store_sales_n0 PREHOOK: query: load data local inpath '../../data/files/customer_address.txt' overwrite into table customer_address PREHOOK: type: LOAD #### A masked pattern was here #### @@ -286,23 +286,23 @@ POSTHOOK: query: load data local inpath '../../data/files/customer_address.txt' POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@customer_address -PREHOOK: query: analyze table store compute statistics +PREHOOK: query: analyze table store_n0 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@store -PREHOOK: Output: default@store -POSTHOOK: query: analyze table store compute statistics +PREHOOK: Input: default@store_n0 +PREHOOK: Output: default@store_n0 +POSTHOOK: query: analyze table store_n0 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@store -POSTHOOK: Output: default@store -PREHOOK: query: analyze table store compute statistics for columns s_store_sk, s_floor_space +POSTHOOK: Input: default@store_n0 +POSTHOOK: Output: default@store_n0 +PREHOOK: query: analyze table store_n0 compute statistics for columns s_store_sk, s_floor_space PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@store -PREHOOK: Output: default@store +PREHOOK: Input: default@store_n0 +PREHOOK: Output: default@store_n0 #### A masked pattern was here #### -POSTHOOK: query: analyze table store compute statistics for columns s_store_sk, s_floor_space +POSTHOOK: query: analyze table store_n0 compute statistics for columns s_store_sk, s_floor_space POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@store -POSTHOOK: Output: default@store +POSTHOOK: Input: default@store_n0 +POSTHOOK: Output: default@store_n0 #### A masked pattern was here #### PREHOOK: query: analyze table store_bigint compute statistics PREHOOK: type: QUERY @@ -322,23 +322,23 @@ POSTHOOK: type: ANALYZE_TABLE POSTHOOK: Input: default@store_bigint POSTHOOK: Output: default@store_bigint #### A masked pattern was here #### -PREHOOK: query: analyze table store_sales compute statistics +PREHOOK: query: analyze table store_sales_n0 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@store_sales -PREHOOK: Output: default@store_sales -POSTHOOK: query: analyze table store_sales compute statistics +PREHOOK: Input: default@store_sales_n0 +PREHOOK: Output: default@store_sales_n0 +POSTHOOK: query: analyze table store_sales_n0 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@store_sales -POSTHOOK: Output: default@store_sales -PREHOOK: query: analyze table store_sales compute statistics for columns ss_store_sk, ss_addr_sk, ss_quantity +POSTHOOK: Input: default@store_sales_n0 +POSTHOOK: Output: default@store_sales_n0 +PREHOOK: query: analyze table store_sales_n0 compute statistics for columns ss_store_sk, ss_addr_sk, ss_quantity PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@store_sales -PREHOOK: Output: default@store_sales +PREHOOK: Input: default@store_sales_n0 +PREHOOK: Output: default@store_sales_n0 #### A masked pattern was here #### -POSTHOOK: query: analyze table store_sales compute statistics for columns ss_store_sk, ss_addr_sk, ss_quantity +POSTHOOK: query: analyze table store_sales_n0 compute statistics for columns ss_store_sk, ss_addr_sk, ss_quantity POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@store_sales -POSTHOOK: Output: default@store_sales +POSTHOOK: Input: default@store_sales_n0 +POSTHOOK: Output: default@store_sales_n0 #### A masked pattern was here #### PREHOOK: query: analyze table customer_address compute statistics PREHOOK: type: QUERY @@ -358,9 +358,9 @@ POSTHOOK: type: ANALYZE_TABLE POSTHOOK: Input: default@customer_address POSTHOOK: Output: default@customer_address #### A masked pattern was here #### -PREHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) +PREHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) PREHOOK: type: QUERY -POSTHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) +POSTHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -423,9 +423,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select s.s_store_sk from store_bigint s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) +PREHOOK: query: explain select s.s_store_sk from store_bigint s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) PREHOOK: type: QUERY -POSTHOOK: query: explain select s.s_store_sk from store_bigint s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) +POSTHOOK: query: explain select s.s_store_sk from store_bigint s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -488,9 +488,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) where s.s_store_sk > 0 +PREHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) where s.s_store_sk > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) where s.s_store_sk > 0 +POSTHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) where s.s_store_sk > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -553,9 +553,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) where s.s_company_id > 0 and ss.ss_quantity > 10 +PREHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) where s.s_company_id > 0 and ss.ss_quantity > 10 PREHOOK: type: QUERY -POSTHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) where s.s_company_id > 0 and ss.ss_quantity > 10 +POSTHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) where s.s_company_id > 0 and ss.ss_quantity > 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -618,9 +618,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) where s.s_floor_space > 0 +PREHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) where s.s_floor_space > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) where s.s_floor_space > 0 +POSTHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) where s.s_floor_space > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -683,9 +683,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) where ss.ss_quantity > 10 +PREHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) where ss.ss_quantity > 10 PREHOOK: type: QUERY -POSTHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) where ss.ss_quantity > 10 +POSTHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) where ss.ss_quantity > 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -748,9 +748,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk) +PREHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join store_n0 s1 on (s1.s_store_sk = ss.ss_store_sk) PREHOOK: type: QUERY -POSTHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk) +POSTHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join store_n0 s1 on (s1.s_store_sk = ss.ss_store_sk) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -834,9 +834,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk) where s.s_store_sk > 1000 +PREHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join store_n0 s1 on (s1.s_store_sk = ss.ss_store_sk) where s.s_store_sk > 1000 PREHOOK: type: QUERY -POSTHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk) where s.s_store_sk > 1000 +POSTHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join store_n0 s1 on (s1.s_store_sk = ss.ss_store_sk) where s.s_store_sk > 1000 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -920,9 +920,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk) where s.s_floor_space > 1000 +PREHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join store_n0 s1 on (s1.s_store_sk = ss.ss_store_sk) where s.s_floor_space > 1000 PREHOOK: type: QUERY -POSTHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk) where s.s_floor_space > 1000 +POSTHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join store_n0 s1 on (s1.s_store_sk = ss.ss_store_sk) where s.s_floor_space > 1000 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1006,9 +1006,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk) where ss.ss_quantity > 10 +PREHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join store_n0 s1 on (s1.s_store_sk = ss.ss_store_sk) where ss.ss_quantity > 10 PREHOOK: type: QUERY -POSTHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk) where ss.ss_quantity > 10 +POSTHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join store_n0 s1 on (s1.s_store_sk = ss.ss_store_sk) where ss.ss_quantity > 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1092,9 +1092,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join customer_address ca on (ca.ca_address_sk = ss.ss_addr_sk) +PREHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join customer_address ca on (ca.ca_address_sk = ss.ss_addr_sk) PREHOOK: type: QUERY -POSTHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk) join customer_address ca on (ca.ca_address_sk = ss.ss_addr_sk) +POSTHOOK: query: explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on (s.s_store_sk = ss.ss_store_sk) join customer_address ca on (ca.ca_address_sk = ss.ss_addr_sk) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1204,22 +1204,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: drop table store_sales +PREHOOK: query: drop table store_sales_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@store_sales -PREHOOK: Output: default@store_sales -POSTHOOK: query: drop table store_sales +PREHOOK: Input: default@store_sales_n0 +PREHOOK: Output: default@store_sales_n0 +POSTHOOK: query: drop table store_sales_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@store_sales -POSTHOOK: Output: default@store_sales -PREHOOK: query: drop table store +POSTHOOK: Input: default@store_sales_n0 +POSTHOOK: Output: default@store_sales_n0 +PREHOOK: query: drop table store_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@store -PREHOOK: Output: default@store -POSTHOOK: query: drop table store +PREHOOK: Input: default@store_n0 +PREHOOK: Output: default@store_n0 +POSTHOOK: query: drop table store_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@store -POSTHOOK: Output: default@store +POSTHOOK: Input: default@store_n0 +POSTHOOK: Output: default@store_n0 PREHOOK: query: drop table store_bigint PREHOOK: type: DROPTABLE PREHOOK: Input: default@store_bigint diff --git a/ql/src/test/results/clientpositive/annotate_stats_limit.q.out b/ql/src/test/results/clientpositive/annotate_stats_limit.q.out index 98da1404af..16f8a4f710 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_limit.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_limit.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table if not exists loc_staging ( +PREHOOK: query: create table if not exists loc_staging_n5 ( state string, locid int, zip bigint, @@ -6,8 +6,8 @@ PREHOOK: query: create table if not exists loc_staging ( ) row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_staging -POSTHOOK: query: create table if not exists loc_staging ( +PREHOOK: Output: default@loc_staging_n5 +POSTHOOK: query: create table if not exists loc_staging_n5 ( state string, locid int, zip bigint, @@ -15,56 +15,56 @@ POSTHOOK: query: create table if not exists loc_staging ( ) row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_staging -PREHOOK: query: create table loc_orc like loc_staging +POSTHOOK: Output: default@loc_staging_n5 +PREHOOK: query: create table loc_orc_n5 like loc_staging_n5 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_orc -POSTHOOK: query: create table loc_orc like loc_staging +PREHOOK: Output: default@loc_orc_n5 +POSTHOOK: query: create table loc_orc_n5 like loc_staging_n5 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_orc -PREHOOK: query: alter table loc_orc set fileformat orc +POSTHOOK: Output: default@loc_orc_n5 +PREHOOK: query: alter table loc_orc_n5 set fileformat orc PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@loc_orc -PREHOOK: Output: default@loc_orc -POSTHOOK: query: alter table loc_orc set fileformat orc +PREHOOK: Input: default@loc_orc_n5 +PREHOOK: Output: default@loc_orc_n5 +POSTHOOK: query: alter table loc_orc_n5 set fileformat orc POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@loc_orc -POSTHOOK: Output: default@loc_orc -PREHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging +POSTHOOK: Input: default@loc_orc_n5 +POSTHOOK: Output: default@loc_orc_n5 +PREHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n5 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@loc_staging -POSTHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging +PREHOOK: Output: default@loc_staging_n5 +POSTHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n5 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@loc_staging -PREHOOK: query: insert overwrite table loc_orc select * from loc_staging +POSTHOOK: Output: default@loc_staging_n5 +PREHOOK: query: insert overwrite table loc_orc_n5 select * from loc_staging_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@loc_staging -PREHOOK: Output: default@loc_orc -POSTHOOK: query: insert overwrite table loc_orc select * from loc_staging +PREHOOK: Input: default@loc_staging_n5 +PREHOOK: Output: default@loc_orc_n5 +POSTHOOK: query: insert overwrite table loc_orc_n5 select * from loc_staging_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@loc_staging -POSTHOOK: Output: default@loc_orc -POSTHOOK: Lineage: loc_orc.locid SIMPLE [(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc.state SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc.year SIMPLE [(loc_staging)loc_staging.FieldSchema(name:year, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc.zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, comment:null), ] -PREHOOK: query: analyze table loc_orc compute statistics for columns state, locid, zip, year +POSTHOOK: Input: default@loc_staging_n5 +POSTHOOK: Output: default@loc_orc_n5 +POSTHOOK: Lineage: loc_orc_n5.locid SIMPLE [(loc_staging_n5)loc_staging_n5.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_n5.state SIMPLE [(loc_staging_n5)loc_staging_n5.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_n5.year SIMPLE [(loc_staging_n5)loc_staging_n5.FieldSchema(name:year, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_n5.zip SIMPLE [(loc_staging_n5)loc_staging_n5.FieldSchema(name:zip, type:bigint, comment:null), ] +PREHOOK: query: analyze table loc_orc_n5 compute statistics for columns state, locid, zip, year PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc -PREHOOK: Output: default@loc_orc +PREHOOK: Input: default@loc_orc_n5 +PREHOOK: Output: default@loc_orc_n5 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc compute statistics for columns state, locid, zip, year +POSTHOOK: query: analyze table loc_orc_n5 compute statistics for columns state, locid, zip, year POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc -POSTHOOK: Output: default@loc_orc +POSTHOOK: Input: default@loc_orc_n5 +POSTHOOK: Output: default@loc_orc_n5 #### A masked pattern was here #### -PREHOOK: query: explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc_n5 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc_n5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -75,7 +75,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n5 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int) @@ -83,9 +83,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select * from loc_orc limit 4 +PREHOOK: query: explain select * from loc_orc_n5 limit 4 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc limit 4 +POSTHOOK: query: explain select * from loc_orc_n5 limit 4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -96,7 +96,7 @@ STAGE PLANS: limit: 4 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n5 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int) @@ -107,9 +107,9 @@ STAGE PLANS: Statistics: Num rows: 4 Data size: 408 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select * from loc_orc limit 16 +PREHOOK: query: explain select * from loc_orc_n5 limit 16 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc limit 16 +POSTHOOK: query: explain select * from loc_orc_n5 limit 16 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -120,7 +120,7 @@ STAGE PLANS: limit: 16 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n5 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int) @@ -131,9 +131,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select * from loc_orc limit 0 +PREHOOK: query: explain select * from loc_orc_n5 limit 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc limit 0 +POSTHOOK: query: explain select * from loc_orc_n5 limit 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/annotate_stats_part.q.out b/ql/src/test/results/clientpositive/annotate_stats_part.q.out index a65bde5570..9e45101fe6 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_part.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_part.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table if not exists loc_staging ( +PREHOOK: query: create table if not exists loc_staging_n4 ( state string, locid int, zip bigint, @@ -6,8 +6,8 @@ PREHOOK: query: create table if not exists loc_staging ( ) row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_staging -POSTHOOK: query: create table if not exists loc_staging ( +PREHOOK: Output: default@loc_staging_n4 +POSTHOOK: query: create table if not exists loc_staging_n4 ( state string, locid int, zip bigint, @@ -15,34 +15,34 @@ POSTHOOK: query: create table if not exists loc_staging ( ) row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_staging -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc_staging +POSTHOOK: Output: default@loc_staging_n4 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc_staging_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@loc_staging -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc_staging +PREHOOK: Output: default@loc_staging_n4 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc_staging_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@loc_staging -PREHOOK: query: create table if not exists loc_orc ( +POSTHOOK: Output: default@loc_staging_n4 +PREHOOK: query: create table if not exists loc_orc_n4 ( state string, locid int, zip bigint ) partitioned by(year string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_orc -POSTHOOK: query: create table if not exists loc_orc ( +PREHOOK: Output: default@loc_orc_n4 +POSTHOOK: query: create table if not exists loc_orc_n4 ( state string, locid int, zip bigint ) partitioned by(year string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_orc -PREHOOK: query: explain select * from loc_orc +POSTHOOK: Output: default@loc_orc_n4 +PREHOOK: query: explain select * from loc_orc_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -53,7 +53,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 1 Data size: 380 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) @@ -61,24 +61,24 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 380 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: insert overwrite table loc_orc partition(year) select * from loc_staging +PREHOOK: query: insert overwrite table loc_orc_n4 partition(year) select * from loc_staging_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@loc_staging -PREHOOK: Output: default@loc_orc -POSTHOOK: query: insert overwrite table loc_orc partition(year) select * from loc_staging +PREHOOK: Input: default@loc_staging_n4 +PREHOOK: Output: default@loc_orc_n4 +POSTHOOK: query: insert overwrite table loc_orc_n4 partition(year) select * from loc_staging_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@loc_staging -POSTHOOK: Output: default@loc_orc@year=2001 -POSTHOOK: Output: default@loc_orc@year=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: Lineage: loc_orc PARTITION(year=2001).locid SIMPLE [(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc PARTITION(year=2001).state SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc PARTITION(year=2001).zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, comment:null), ] -POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).locid SIMPLE [(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).state SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, comment:null), ] -PREHOOK: query: explain select * from loc_orc +POSTHOOK: Input: default@loc_staging_n4 +POSTHOOK: Output: default@loc_orc_n4@year=2001 +POSTHOOK: Output: default@loc_orc_n4@year=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Lineage: loc_orc_n4 PARTITION(year=2001).locid SIMPLE [(loc_staging_n4)loc_staging_n4.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_n4 PARTITION(year=2001).state SIMPLE [(loc_staging_n4)loc_staging_n4.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_n4 PARTITION(year=2001).zip SIMPLE [(loc_staging_n4)loc_staging_n4.FieldSchema(name:zip, type:bigint, comment:null), ] +POSTHOOK: Lineage: loc_orc_n4 PARTITION(year=__HIVE_DEFAULT_PARTITION__).locid SIMPLE [(loc_staging_n4)loc_staging_n4.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_n4 PARTITION(year=__HIVE_DEFAULT_PARTITION__).state SIMPLE [(loc_staging_n4)loc_staging_n4.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_n4 PARTITION(year=__HIVE_DEFAULT_PARTITION__).zip SIMPLE [(loc_staging_n4)loc_staging_n4.FieldSchema(name:zip, type:bigint, comment:null), ] +PREHOOK: query: explain select * from loc_orc_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -89,7 +89,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 18 Data size: 14640 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) @@ -97,19 +97,19 @@ STAGE PLANS: Statistics: Num rows: 18 Data size: 6840 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: analyze table loc_orc partition(year='2001') compute statistics +PREHOOK: query: analyze table loc_orc_n4 partition(year='2001') compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@loc_orc -PREHOOK: Output: default@loc_orc -PREHOOK: Output: default@loc_orc@year=2001 -POSTHOOK: query: analyze table loc_orc partition(year='2001') compute statistics +PREHOOK: Input: default@loc_orc_n4 +PREHOOK: Output: default@loc_orc_n4 +PREHOOK: Output: default@loc_orc_n4@year=2001 +POSTHOOK: query: analyze table loc_orc_n4 partition(year='2001') compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@loc_orc -POSTHOOK: Output: default@loc_orc -POSTHOOK: Output: default@loc_orc@year=2001 -PREHOOK: query: explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: Input: default@loc_orc_n4 +POSTHOOK: Output: default@loc_orc_n4 +POSTHOOK: Output: default@loc_orc_n4@year=2001 +PREHOOK: query: explain select * from loc_orc_n4 where year='__HIVE_DEFAULT_PARTITION__' PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: query: explain select * from loc_orc_n4 where year='__HIVE_DEFAULT_PARTITION__' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -120,7 +120,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 8 Data size: 5048 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), '__HIVE_DEFAULT_PARTITION__' (type: string) @@ -128,9 +128,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 5048 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -141,7 +141,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 7 Data size: 3338 Basic stats: PARTIAL Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) @@ -149,9 +149,9 @@ STAGE PLANS: Statistics: Num rows: 7 Data size: 3338 Basic stats: PARTIAL Column stats: PARTIAL ListSink -PREHOOK: query: explain select * from loc_orc where year='2001' +PREHOOK: query: explain select * from loc_orc_n4 where year='2001' PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where year='2001' +POSTHOOK: query: explain select * from loc_orc_n4 where year='2001' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -162,7 +162,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 7 Data size: 2050 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), '2001' (type: string) @@ -170,21 +170,21 @@ STAGE PLANS: Statistics: Num rows: 7 Data size: 2050 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: analyze table loc_orc partition(year) compute statistics +PREHOOK: query: analyze table loc_orc_n4 partition(year) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@loc_orc -PREHOOK: Output: default@loc_orc -PREHOOK: Output: default@loc_orc@year=2001 -PREHOOK: Output: default@loc_orc@year=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: query: analyze table loc_orc partition(year) compute statistics +PREHOOK: Input: default@loc_orc_n4 +PREHOOK: Output: default@loc_orc_n4 +PREHOOK: Output: default@loc_orc_n4@year=2001 +PREHOOK: Output: default@loc_orc_n4@year=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: query: analyze table loc_orc_n4 partition(year) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@loc_orc -POSTHOOK: Output: default@loc_orc -POSTHOOK: Output: default@loc_orc@year=2001 -POSTHOOK: Output: default@loc_orc@year=__HIVE_DEFAULT_PARTITION__ -PREHOOK: query: explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: Input: default@loc_orc_n4 +POSTHOOK: Output: default@loc_orc_n4 +POSTHOOK: Output: default@loc_orc_n4@year=2001 +POSTHOOK: Output: default@loc_orc_n4@year=__HIVE_DEFAULT_PARTITION__ +PREHOOK: query: explain select * from loc_orc_n4 where year='__HIVE_DEFAULT_PARTITION__' PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: query: explain select * from loc_orc_n4 where year='__HIVE_DEFAULT_PARTITION__' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -195,7 +195,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 1 Data size: 292 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), '__HIVE_DEFAULT_PARTITION__' (type: string) @@ -203,9 +203,9 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 292 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -216,7 +216,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 8 Data size: 3814 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) @@ -224,9 +224,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 3040 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: explain select * from loc_orc where year='2001' or year='__HIVE_DEFAULT_PARTITION__' +PREHOOK: query: explain select * from loc_orc_n4 where year='2001' or year='__HIVE_DEFAULT_PARTITION__' PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where year='2001' or year='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: query: explain select * from loc_orc_n4 where year='2001' or year='__HIVE_DEFAULT_PARTITION__' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -237,7 +237,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 8 Data size: 3814 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) @@ -245,9 +245,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 3040 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: explain select * from loc_orc where year='2001' and year='__HIVE_DEFAULT_PARTITION__' +PREHOOK: query: explain select * from loc_orc_n4 where year='2001' and year='__HIVE_DEFAULT_PARTITION__' PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where year='2001' and year='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: query: explain select * from loc_orc_n4 where year='2001' and year='__HIVE_DEFAULT_PARTITION__' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -258,7 +258,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 1 Data size: 380 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator predicate: false (type: boolean) @@ -269,23 +269,23 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 380 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: analyze table loc_orc partition(year='2001') compute statistics for columns state,locid +PREHOOK: query: analyze table loc_orc_n4 partition(year='2001') compute statistics for columns state,locid PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc -PREHOOK: Input: default@loc_orc@year=2001 -PREHOOK: Output: default@loc_orc -PREHOOK: Output: default@loc_orc@year=2001 +PREHOOK: Input: default@loc_orc_n4 +PREHOOK: Input: default@loc_orc_n4@year=2001 +PREHOOK: Output: default@loc_orc_n4 +PREHOOK: Output: default@loc_orc_n4@year=2001 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc partition(year='2001') compute statistics for columns state,locid +POSTHOOK: query: analyze table loc_orc_n4 partition(year='2001') compute statistics for columns state,locid POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc -POSTHOOK: Input: default@loc_orc@year=2001 -POSTHOOK: Output: default@loc_orc -POSTHOOK: Output: default@loc_orc@year=2001 +POSTHOOK: Input: default@loc_orc_n4 +POSTHOOK: Input: default@loc_orc_n4@year=2001 +POSTHOOK: Output: default@loc_orc_n4 +POSTHOOK: Output: default@loc_orc_n4@year=2001 #### A masked pattern was here #### -PREHOOK: query: explain select zip from loc_orc +PREHOOK: query: explain select zip from loc_orc_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain select zip from loc_orc +POSTHOOK: query: explain select zip from loc_orc_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -296,7 +296,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 8 Data size: 838 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: zip (type: bigint) @@ -304,9 +304,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 838 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: explain select state from loc_orc +PREHOOK: query: explain select state from loc_orc_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain select state from loc_orc +POSTHOOK: query: explain select state from loc_orc_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -317,7 +317,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string) @@ -325,9 +325,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: explain select year from loc_orc +PREHOOK: query: explain select year from loc_orc_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain select year from loc_orc +POSTHOOK: query: explain select year from loc_orc_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -338,7 +338,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 8 Data size: 2246 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: year (type: string) @@ -346,9 +346,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 1472 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select state,locid from loc_orc +PREHOOK: query: explain select state,locid from loc_orc_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc +POSTHOOK: query: explain select state,locid from loc_orc_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -359,7 +359,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int) @@ -367,9 +367,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: explain select state,locid from loc_orc where year='2001' +PREHOOK: query: explain select state,locid from loc_orc_n4 where year='2001' PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc where year='2001' +POSTHOOK: query: explain select state,locid from loc_orc_n4 where year='2001' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -380,7 +380,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 7 Data size: 630 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -388,9 +388,9 @@ STAGE PLANS: Statistics: Num rows: 7 Data size: 630 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select state,locid from loc_orc where year!='2001' +PREHOOK: query: explain select state,locid from loc_orc_n4 where year!='2001' PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc where year!='2001' +POSTHOOK: query: explain select state,locid from loc_orc_n4 where year!='2001' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -401,7 +401,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 1 Data size: 284 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -409,9 +409,9 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 284 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -422,7 +422,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 8 Data size: 2192 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) @@ -430,9 +430,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 2192 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: explain select locid from loc_orc where locid>0 and year='2001' +PREHOOK: query: explain select locid from loc_orc_n4 where locid>0 and year='2001' PREHOOK: type: QUERY -POSTHOOK: query: explain select locid from loc_orc where locid>0 and year='2001' +POSTHOOK: query: explain select locid from loc_orc_n4 where locid>0 and year='2001' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -443,7 +443,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid > 0) (type: boolean) @@ -454,9 +454,9 @@ STAGE PLANS: Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select locid,year from loc_orc where locid>0 and year='2001' +PREHOOK: query: explain select locid,year from loc_orc_n4 where locid>0 and year='2001' PREHOOK: type: QUERY -POSTHOOK: query: explain select locid,year from loc_orc where locid>0 and year='2001' +POSTHOOK: query: explain select locid,year from loc_orc_n4 where locid>0 and year='2001' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -467,7 +467,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid > 0) (type: boolean) @@ -478,9 +478,9 @@ STAGE PLANS: Statistics: Num rows: 7 Data size: 644 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select * from (select locid,year from loc_orc) test where locid>0 and year='2001' +PREHOOK: query: explain select * from (select locid,year from loc_orc_n4) test where locid>0 and year='2001' PREHOOK: type: QUERY -POSTHOOK: query: explain select * from (select locid,year from loc_orc) test where locid>0 and year='2001' +POSTHOOK: query: explain select * from (select locid,year from loc_orc_n4) test where locid>0 and year='2001' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -491,7 +491,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n4 Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid > 0) (type: boolean) diff --git a/ql/src/test/results/clientpositive/annotate_stats_table.q.out b/ql/src/test/results/clientpositive/annotate_stats_table.q.out index a457312379..b502957e96 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_table.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_table.q.out @@ -263,20 +263,20 @@ STAGE PLANS: Statistics: Num rows: 48 Data size: 4560 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: create table tmp as select 1 +PREHOOK: query: create table tmp_n0 as select 1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: _dummy_database@_dummy_table PREHOOK: Output: database:default -PREHOOK: Output: default@tmp -POSTHOOK: query: create table tmp as select 1 +PREHOOK: Output: default@tmp_n0 +POSTHOOK: query: create table tmp_n0 as select 1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmp -POSTHOOK: Lineage: tmp._c0 SIMPLE [] -PREHOOK: query: explain create table tmp as select 1 +POSTHOOK: Output: default@tmp_n0 +POSTHOOK: Lineage: tmp_n0._c0 SIMPLE [] +PREHOOK: query: explain create table tmp_n0 as select 1 PREHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: query: explain create table tmp as select 1 +POSTHOOK: query: explain create table tmp_n0 as select 1 POSTHOOK: type: CREATETABLE_AS_SELECT STAGE DEPENDENCIES: Stage-1 is a root stage @@ -307,7 +307,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmp + name: default.tmp_n0 Stage: Stage-6 Conditional Operator @@ -331,7 +331,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmp + name: default.tmp_n0 Stage: Stage-2 Map Reduce @@ -343,7 +343,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmp + name: default.tmp_n0 Stage: Stage-4 Map Reduce @@ -355,7 +355,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmp + name: default.tmp_n0 Stage: Stage-5 Move Operator diff --git a/ql/src/test/results/clientpositive/annotate_stats_union.q.out b/ql/src/test/results/clientpositive/annotate_stats_union.q.out index cf77fc06a6..11f8015c28 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_union.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_union.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table if not exists loc_staging ( +PREHOOK: query: create table if not exists loc_staging_n3 ( state string, locid int, zip bigint, @@ -6,8 +6,8 @@ PREHOOK: query: create table if not exists loc_staging ( ) row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_staging -POSTHOOK: query: create table if not exists loc_staging ( +PREHOOK: Output: default@loc_staging_n3 +POSTHOOK: query: create table if not exists loc_staging_n3 ( state string, locid int, zip bigint, @@ -15,56 +15,56 @@ POSTHOOK: query: create table if not exists loc_staging ( ) row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_staging -PREHOOK: query: create table loc_orc like loc_staging +POSTHOOK: Output: default@loc_staging_n3 +PREHOOK: query: create table loc_orc_n3 like loc_staging_n3 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_orc -POSTHOOK: query: create table loc_orc like loc_staging +PREHOOK: Output: default@loc_orc_n3 +POSTHOOK: query: create table loc_orc_n3 like loc_staging_n3 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_orc -PREHOOK: query: alter table loc_orc set fileformat orc +POSTHOOK: Output: default@loc_orc_n3 +PREHOOK: query: alter table loc_orc_n3 set fileformat orc PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@loc_orc -PREHOOK: Output: default@loc_orc -POSTHOOK: query: alter table loc_orc set fileformat orc +PREHOOK: Input: default@loc_orc_n3 +PREHOOK: Output: default@loc_orc_n3 +POSTHOOK: query: alter table loc_orc_n3 set fileformat orc POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@loc_orc -POSTHOOK: Output: default@loc_orc -PREHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging +POSTHOOK: Input: default@loc_orc_n3 +POSTHOOK: Output: default@loc_orc_n3 +PREHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@loc_staging -POSTHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging +PREHOOK: Output: default@loc_staging_n3 +POSTHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@loc_staging -PREHOOK: query: insert overwrite table loc_orc select * from loc_staging +POSTHOOK: Output: default@loc_staging_n3 +PREHOOK: query: insert overwrite table loc_orc_n3 select * from loc_staging_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@loc_staging -PREHOOK: Output: default@loc_orc -POSTHOOK: query: insert overwrite table loc_orc select * from loc_staging +PREHOOK: Input: default@loc_staging_n3 +PREHOOK: Output: default@loc_orc_n3 +POSTHOOK: query: insert overwrite table loc_orc_n3 select * from loc_staging_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@loc_staging -POSTHOOK: Output: default@loc_orc -POSTHOOK: Lineage: loc_orc.locid SIMPLE [(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc.state SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc.year SIMPLE [(loc_staging)loc_staging.FieldSchema(name:year, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc.zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, comment:null), ] -PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year +POSTHOOK: Input: default@loc_staging_n3 +POSTHOOK: Output: default@loc_orc_n3 +POSTHOOK: Lineage: loc_orc_n3.locid SIMPLE [(loc_staging_n3)loc_staging_n3.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_n3.state SIMPLE [(loc_staging_n3)loc_staging_n3.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_n3.year SIMPLE [(loc_staging_n3)loc_staging_n3.FieldSchema(name:year, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_n3.zip SIMPLE [(loc_staging_n3)loc_staging_n3.FieldSchema(name:zip, type:bigint, comment:null), ] +PREHOOK: query: analyze table loc_orc_n3 compute statistics for columns state,locid,zip,year PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc -PREHOOK: Output: default@loc_orc +PREHOOK: Input: default@loc_orc_n3 +PREHOOK: Output: default@loc_orc_n3 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year +POSTHOOK: query: analyze table loc_orc_n3 compute statistics for columns state,locid,zip,year POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc -POSTHOOK: Output: default@loc_orc +POSTHOOK: Input: default@loc_orc_n3 +POSTHOOK: Output: default@loc_orc_n3 #### A masked pattern was here #### -PREHOOK: query: explain select state from loc_orc +PREHOOK: query: explain select state from loc_orc_n3 PREHOOK: type: QUERY -POSTHOOK: query: explain select state from loc_orc +POSTHOOK: query: explain select state from loc_orc_n3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -75,7 +75,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n3 Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string) @@ -83,9 +83,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select * from (select state from loc_orc union all select state from loc_orc) tmp +PREHOOK: query: explain select * from (select state from loc_orc_n3 union all select state from loc_orc_n3) tmp PREHOOK: type: QUERY -POSTHOOK: query: explain select * from (select state from loc_orc union all select state from loc_orc) tmp +POSTHOOK: query: explain select * from (select state from loc_orc_n3 union all select state from loc_orc_n3) tmp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -96,7 +96,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n3 Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string) @@ -112,7 +112,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: loc_orc + alias: loc_orc_n3 Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string) @@ -134,9 +134,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc_n3 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc_n3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -147,7 +147,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n3 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int) @@ -155,9 +155,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select * from (select * from loc_orc union all select * from loc_orc) tmp +PREHOOK: query: explain select * from (select * from loc_orc_n3 union all select * from loc_orc_n3) tmp PREHOOK: type: QUERY -POSTHOOK: query: explain select * from (select * from loc_orc union all select * from loc_orc) tmp +POSTHOOK: query: explain select * from (select * from loc_orc_n3 union all select * from loc_orc_n3) tmp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -168,7 +168,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n3 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int) @@ -184,7 +184,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: loc_orc + alias: loc_orc_n3 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int) @@ -218,7 +218,7 @@ PREHOOK: Input: database:test POSTHOOK: query: use test POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test -PREHOOK: query: create table if not exists loc_staging ( +PREHOOK: query: create table if not exists loc_staging_n3 ( state string, locid int, zip bigint, @@ -226,8 +226,8 @@ PREHOOK: query: create table if not exists loc_staging ( ) row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:test -PREHOOK: Output: test@loc_staging -POSTHOOK: query: create table if not exists loc_staging ( +PREHOOK: Output: test@loc_staging_n3 +POSTHOOK: query: create table if not exists loc_staging_n3 ( state string, locid int, zip bigint, @@ -235,74 +235,74 @@ POSTHOOK: query: create table if not exists loc_staging ( ) row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:test -POSTHOOK: Output: test@loc_staging -PREHOOK: query: create table loc_orc like loc_staging +POSTHOOK: Output: test@loc_staging_n3 +PREHOOK: query: create table loc_orc_n3 like loc_staging_n3 PREHOOK: type: CREATETABLE PREHOOK: Output: database:test -PREHOOK: Output: test@loc_orc -POSTHOOK: query: create table loc_orc like loc_staging +PREHOOK: Output: test@loc_orc_n3 +POSTHOOK: query: create table loc_orc_n3 like loc_staging_n3 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:test -POSTHOOK: Output: test@loc_orc -PREHOOK: query: alter table loc_orc set fileformat orc +POSTHOOK: Output: test@loc_orc_n3 +PREHOOK: query: alter table loc_orc_n3 set fileformat orc PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: test@loc_orc -PREHOOK: Output: test@loc_orc -POSTHOOK: query: alter table loc_orc set fileformat orc +PREHOOK: Input: test@loc_orc_n3 +PREHOOK: Output: test@loc_orc_n3 +POSTHOOK: query: alter table loc_orc_n3 set fileformat orc POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: test@loc_orc -POSTHOOK: Output: test@loc_orc -PREHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging +POSTHOOK: Input: test@loc_orc_n3 +POSTHOOK: Output: test@loc_orc_n3 +PREHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: test@loc_staging -POSTHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging +PREHOOK: Output: test@loc_staging_n3 +POSTHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: test@loc_staging -PREHOOK: query: insert overwrite table loc_orc select * from loc_staging +POSTHOOK: Output: test@loc_staging_n3 +PREHOOK: query: insert overwrite table loc_orc_n3 select * from loc_staging_n3 PREHOOK: type: QUERY -PREHOOK: Input: test@loc_staging -PREHOOK: Output: test@loc_orc -POSTHOOK: query: insert overwrite table loc_orc select * from loc_staging +PREHOOK: Input: test@loc_staging_n3 +PREHOOK: Output: test@loc_orc_n3 +POSTHOOK: query: insert overwrite table loc_orc_n3 select * from loc_staging_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: test@loc_staging -POSTHOOK: Output: test@loc_orc -POSTHOOK: Lineage: loc_orc.locid SIMPLE [(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc.state SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc.year SIMPLE [(loc_staging)loc_staging.FieldSchema(name:year, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc.zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, comment:null), ] -PREHOOK: query: analyze table loc_staging compute statistics +POSTHOOK: Input: test@loc_staging_n3 +POSTHOOK: Output: test@loc_orc_n3 +POSTHOOK: Lineage: loc_orc_n3.locid SIMPLE [(loc_staging_n3)loc_staging_n3.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_n3.state SIMPLE [(loc_staging_n3)loc_staging_n3.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_n3.year SIMPLE [(loc_staging_n3)loc_staging_n3.FieldSchema(name:year, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_n3.zip SIMPLE [(loc_staging_n3)loc_staging_n3.FieldSchema(name:zip, type:bigint, comment:null), ] +PREHOOK: query: analyze table loc_staging_n3 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: test@loc_staging -PREHOOK: Output: test@loc_staging -POSTHOOK: query: analyze table loc_staging compute statistics +PREHOOK: Input: test@loc_staging_n3 +PREHOOK: Output: test@loc_staging_n3 +POSTHOOK: query: analyze table loc_staging_n3 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: test@loc_staging -POSTHOOK: Output: test@loc_staging -PREHOOK: query: analyze table loc_staging compute statistics for columns state,locid,zip,year +POSTHOOK: Input: test@loc_staging_n3 +POSTHOOK: Output: test@loc_staging_n3 +PREHOOK: query: analyze table loc_staging_n3 compute statistics for columns state,locid,zip,year PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: test@loc_staging +PREHOOK: Input: test@loc_staging_n3 #### A masked pattern was here #### -PREHOOK: Output: test@loc_staging -POSTHOOK: query: analyze table loc_staging compute statistics for columns state,locid,zip,year +PREHOOK: Output: test@loc_staging_n3 +POSTHOOK: query: analyze table loc_staging_n3 compute statistics for columns state,locid,zip,year POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: test@loc_staging +POSTHOOK: Input: test@loc_staging_n3 #### A masked pattern was here #### -POSTHOOK: Output: test@loc_staging -PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year +POSTHOOK: Output: test@loc_staging_n3 +PREHOOK: query: analyze table loc_orc_n3 compute statistics for columns state,locid,zip,year PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: test@loc_orc +PREHOOK: Input: test@loc_orc_n3 #### A masked pattern was here #### -PREHOOK: Output: test@loc_orc -POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year +PREHOOK: Output: test@loc_orc_n3 +POSTHOOK: query: analyze table loc_orc_n3 compute statistics for columns state,locid,zip,year POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: test@loc_orc +POSTHOOK: Input: test@loc_orc_n3 #### A masked pattern was here #### -POSTHOOK: Output: test@loc_orc -PREHOOK: query: explain select * from (select state from default.loc_orc union all select state from test.loc_orc) temp +POSTHOOK: Output: test@loc_orc_n3 +PREHOOK: query: explain select * from (select state from default.loc_orc_n3 union all select state from test.loc_orc_n3) temp PREHOOK: type: QUERY -POSTHOOK: query: explain select * from (select state from default.loc_orc union all select state from test.loc_orc) temp +POSTHOOK: query: explain select * from (select state from default.loc_orc_n3 union all select state from test.loc_orc_n3) temp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -313,7 +313,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n3 Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string) @@ -329,7 +329,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: loc_orc + alias: loc_orc_n3 Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string) @@ -351,9 +351,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from (select state from test.loc_staging union all select state from test.loc_orc) temp +PREHOOK: query: explain select * from (select state from test.loc_staging_n3 union all select state from test.loc_orc_n3) temp PREHOOK: type: QUERY -POSTHOOK: query: explain select * from (select state from test.loc_staging union all select state from test.loc_orc) temp +POSTHOOK: query: explain select * from (select state from test.loc_staging_n3 union all select state from test.loc_orc_n3) temp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -364,7 +364,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_staging + alias: loc_staging_n3 Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string) @@ -380,7 +380,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: loc_orc + alias: loc_orc_n3 Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string) diff --git a/ql/src/test/results/clientpositive/archive_excludeHadoop20.q.out b/ql/src/test/results/clientpositive/archive_excludeHadoop20.q.out index 65b0cdcb5b..e4b390c9cd 100644 --- a/ql/src/test/results/clientpositive/archive_excludeHadoop20.q.out +++ b/ql/src/test/results/clientpositive/archive_excludeHadoop20.q.out @@ -1,156 +1,156 @@ -PREHOOK: query: drop table tstsrc +PREHOOK: query: drop table tstsrc_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tstsrc +POSTHOOK: query: drop table tstsrc_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table tstsrcpart +PREHOOK: query: drop table tstsrcpart_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tstsrcpart +POSTHOOK: query: drop table tstsrcpart_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table tstsrc like src +PREHOOK: query: create table tstsrc_n2 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tstsrc -POSTHOOK: query: create table tstsrc like src +PREHOOK: Output: default@tstsrc_n2 +POSTHOOK: query: create table tstsrc_n2 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstsrc -PREHOOK: query: insert overwrite table tstsrc select key, value from src +POSTHOOK: Output: default@tstsrc_n2 +PREHOOK: query: insert overwrite table tstsrc_n2 select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tstsrc -POSTHOOK: query: insert overwrite table tstsrc select key, value from src +PREHOOK: Output: default@tstsrc_n2 +POSTHOOK: query: insert overwrite table tstsrc_n2 select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tstsrc -POSTHOOK: Lineage: tstsrc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table tstsrcpart (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 10 buckets +POSTHOOK: Output: default@tstsrc_n2 +POSTHOOK: Lineage: tstsrc_n2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrc_n2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table tstsrcpart_n2 (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 10 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: create table tstsrcpart (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 10 buckets +PREHOOK: Output: default@tstsrcpart_n2 +POSTHOOK: query: create table tstsrcpart_n2 (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 10 buckets POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstsrcpart -PREHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11') +POSTHOOK: Output: default@tstsrcpart_n2 +PREHOOK: query: insert overwrite table tstsrcpart_n2 partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' and hr='11' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11') +PREHOOK: Output: default@tstsrcpart_n2@ds=2008-04-08/hr=11 +POSTHOOK: query: insert overwrite table tstsrcpart_n2 partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' and hr='11' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='12') +POSTHOOK: Output: default@tstsrcpart_n2@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: tstsrcpart_n2 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n2 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tstsrcpart_n2 partition (ds='2008-04-08', hr='12') select key, value from srcpart where ds='2008-04-08' and hr='12' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='12') +PREHOOK: Output: default@tstsrcpart_n2@ds=2008-04-08/hr=12 +POSTHOOK: query: insert overwrite table tstsrcpart_n2 partition (ds='2008-04-08', hr='12') select key, value from srcpart where ds='2008-04-08' and hr='12' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='11') +POSTHOOK: Output: default@tstsrcpart_n2@ds=2008-04-08/hr=12 +POSTHOOK: Lineage: tstsrcpart_n2 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n2 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tstsrcpart_n2 partition (ds='2008-04-09', hr='11') select key, value from srcpart where ds='2008-04-09' and hr='11' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-09/hr=11 -POSTHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='11') +PREHOOK: Output: default@tstsrcpart_n2@ds=2008-04-09/hr=11 +POSTHOOK: query: insert overwrite table tstsrcpart_n2 partition (ds='2008-04-09', hr='11') select key, value from srcpart where ds='2008-04-09' and hr='11' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-09/hr=11 -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='12') +POSTHOOK: Output: default@tstsrcpart_n2@ds=2008-04-09/hr=11 +POSTHOOK: Lineage: tstsrcpart_n2 PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n2 PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tstsrcpart_n2 partition (ds='2008-04-09', hr='12') select key, value from srcpart where ds='2008-04-09' and hr='12' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-09/hr=12 -POSTHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='12') +PREHOOK: Output: default@tstsrcpart_n2@ds=2008-04-09/hr=12 +POSTHOOK: query: insert overwrite table tstsrcpart_n2 partition (ds='2008-04-09', hr='12') select key, value from srcpart where ds='2008-04-09' and hr='12' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@tstsrcpart_n2@ds=2008-04-09/hr=12 +POSTHOOK: Lineage: tstsrcpart_n2 PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n2 PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2 +FROM (SELECT * FROM tstsrcpart_n2 WHERE ds='2008-04-08') subq1) subq2 PREHOOK: type: QUERY -PREHOOK: Input: default@tstsrcpart -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@tstsrcpart_n2 +PREHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=11 +PREHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=12 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2 +FROM (SELECT * FROM tstsrcpart_n2 WHERE ds='2008-04-08') subq1) subq2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@tstsrcpart_n2 +POSTHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=12 #### A masked pattern was here #### 48479881068 -PREHOOK: query: ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08', hr='12') +PREHOOK: query: ALTER TABLE tstsrcpart_n2 ARCHIVE PARTITION (ds='2008-04-08', hr='12') PREHOOK: type: ALTERTABLE_ARCHIVE -PREHOOK: Input: default@tstsrcpart -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08', hr='12') +PREHOOK: Input: default@tstsrcpart_n2 +PREHOOK: Output: default@tstsrcpart_n2@ds=2008-04-08/hr=12 +POSTHOOK: query: ALTER TABLE tstsrcpart_n2 ARCHIVE PARTITION (ds='2008-04-08', hr='12') POSTHOOK: type: ALTERTABLE_ARCHIVE -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@tstsrcpart_n2 +POSTHOOK: Output: default@tstsrcpart_n2@ds=2008-04-08/hr=12 PREHOOK: query: SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2 +FROM (SELECT * FROM tstsrcpart_n2 WHERE ds='2008-04-08') subq1) subq2 PREHOOK: type: QUERY -PREHOOK: Input: default@tstsrcpart -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@tstsrcpart_n2 +PREHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=11 +PREHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=12 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2 +FROM (SELECT * FROM tstsrcpart_n2 WHERE ds='2008-04-08') subq1) subq2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@tstsrcpart_n2 +POSTHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=12 #### A masked pattern was here #### 48479881068 -PREHOOK: query: SELECT key, count(1) FROM tstsrcpart WHERE ds='2008-04-08' AND hr='12' AND key='0' GROUP BY key +PREHOOK: query: SELECT key, count(1) FROM tstsrcpart_n2 WHERE ds='2008-04-08' AND hr='12' AND key='0' GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@tstsrcpart -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@tstsrcpart_n2 +PREHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, count(1) FROM tstsrcpart WHERE ds='2008-04-08' AND hr='12' AND key='0' GROUP BY key +POSTHOOK: query: SELECT key, count(1) FROM tstsrcpart_n2 WHERE ds='2008-04-08' AND hr='12' AND key='0' GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@tstsrcpart_n2 +POSTHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=12 #### A masked pattern was here #### 0 3 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: SELECT * FROM tstsrcpart a JOIN tstsrc b ON a.key=b.key +PREHOOK: query: SELECT * FROM tstsrcpart_n2 a JOIN tstsrc_n2 b ON a.key=b.key WHERE a.ds='2008-04-08' AND a.hr='12' AND a.key='0' PREHOOK: type: QUERY -PREHOOK: Input: default@tstsrc -PREHOOK: Input: default@tstsrcpart -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@tstsrc_n2 +PREHOOK: Input: default@tstsrcpart_n2 +PREHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM tstsrcpart a JOIN tstsrc b ON a.key=b.key +POSTHOOK: query: SELECT * FROM tstsrcpart_n2 a JOIN tstsrc_n2 b ON a.key=b.key WHERE a.ds='2008-04-08' AND a.hr='12' AND a.key='0' POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstsrc -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@tstsrc_n2 +POSTHOOK: Input: default@tstsrcpart_n2 +POSTHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=12 #### A masked pattern was here #### 0 val_0 2008-04-08 12 0 val_0 0 val_0 2008-04-08 12 0 val_0 @@ -161,27 +161,27 @@ POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 0 val_0 2008-04-08 12 0 val_0 0 val_0 2008-04-08 12 0 val_0 0 val_0 2008-04-08 12 0 val_0 -PREHOOK: query: ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr='12') +PREHOOK: query: ALTER TABLE tstsrcpart_n2 UNARCHIVE PARTITION (ds='2008-04-08', hr='12') PREHOOK: type: ALTERTABLE_UNARCHIVE -PREHOOK: Input: default@tstsrcpart -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr='12') +PREHOOK: Input: default@tstsrcpart_n2 +PREHOOK: Output: default@tstsrcpart_n2@ds=2008-04-08/hr=12 +POSTHOOK: query: ALTER TABLE tstsrcpart_n2 UNARCHIVE PARTITION (ds='2008-04-08', hr='12') POSTHOOK: type: ALTERTABLE_UNARCHIVE -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@tstsrcpart_n2 +POSTHOOK: Output: default@tstsrcpart_n2@ds=2008-04-08/hr=12 PREHOOK: query: SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2 +FROM (SELECT * FROM tstsrcpart_n2 WHERE ds='2008-04-08') subq1) subq2 PREHOOK: type: QUERY -PREHOOK: Input: default@tstsrcpart -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@tstsrcpart_n2 +PREHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=11 +PREHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=12 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2 +FROM (SELECT * FROM tstsrcpart_n2 WHERE ds='2008-04-08') subq1) subq2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@tstsrcpart_n2 +POSTHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@tstsrcpart_n2@ds=2008-04-08/hr=12 #### A masked pattern was here #### 48479881068 PREHOOK: query: CREATE TABLE harbucket(key INT) @@ -196,15 +196,15 @@ CLUSTERED BY (key) INTO 10 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@harbucket -PREHOOK: query: INSERT OVERWRITE TABLE harbucket PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc WHERE key > 50 +PREHOOK: query: INSERT OVERWRITE TABLE harbucket PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc_n2 WHERE key > 50 PREHOOK: type: QUERY -PREHOOK: Input: default@tstsrc +PREHOOK: Input: default@tstsrc_n2 PREHOOK: Output: default@harbucket@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE harbucket PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc WHERE key > 50 +POSTHOOK: query: INSERT OVERWRITE TABLE harbucket PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc_n2 WHERE key > 50 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstsrc +POSTHOOK: Input: default@tstsrc_n2 POSTHOOK: Output: default@harbucket@ds=1 -POSTHOOK: Lineage: harbucket PARTITION(ds=1).key EXPRESSION [(tstsrc)tstsrc.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: harbucket PARTITION(ds=1).key EXPRESSION [(tstsrc_n2)tstsrc_n2.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: SELECT key FROM harbucket TABLESAMPLE(BUCKET 1 OUT OF 10) SORT BY key PREHOOK: type: QUERY PREHOOK: Input: default@harbucket @@ -216,14 +216,14 @@ POSTHOOK: Input: default@harbucket POSTHOOK: Input: default@harbucket@ds=1 #### A masked pattern was here #### 260 -PREHOOK: query: ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08', hr='12') +PREHOOK: query: ALTER TABLE tstsrcpart_n2 ARCHIVE PARTITION (ds='2008-04-08', hr='12') PREHOOK: type: ALTERTABLE_ARCHIVE -PREHOOK: Input: default@tstsrcpart -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08', hr='12') +PREHOOK: Input: default@tstsrcpart_n2 +PREHOOK: Output: default@tstsrcpart_n2@ds=2008-04-08/hr=12 +POSTHOOK: query: ALTER TABLE tstsrcpart_n2 ARCHIVE PARTITION (ds='2008-04-08', hr='12') POSTHOOK: type: ALTERTABLE_ARCHIVE -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@tstsrcpart_n2 +POSTHOOK: Output: default@tstsrcpart_n2@ds=2008-04-08/hr=12 PREHOOK: query: SELECT key FROM harbucket TABLESAMPLE(BUCKET 1 OUT OF 10) SORT BY key PREHOOK: type: QUERY PREHOOK: Input: default@harbucket @@ -235,14 +235,14 @@ POSTHOOK: Input: default@harbucket POSTHOOK: Input: default@harbucket@ds=1 #### A masked pattern was here #### 260 -PREHOOK: query: ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr='12') +PREHOOK: query: ALTER TABLE tstsrcpart_n2 UNARCHIVE PARTITION (ds='2008-04-08', hr='12') PREHOOK: type: ALTERTABLE_UNARCHIVE -PREHOOK: Input: default@tstsrcpart -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr='12') +PREHOOK: Input: default@tstsrcpart_n2 +PREHOOK: Output: default@tstsrcpart_n2@ds=2008-04-08/hr=12 +POSTHOOK: query: ALTER TABLE tstsrcpart_n2 UNARCHIVE PARTITION (ds='2008-04-08', hr='12') POSTHOOK: type: ALTERTABLE_UNARCHIVE -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@tstsrcpart_n2 +POSTHOOK: Output: default@tstsrcpart_n2@ds=2008-04-08/hr=12 PREHOOK: query: SELECT key FROM harbucket TABLESAMPLE(BUCKET 1 OUT OF 10) SORT BY key PREHOOK: type: QUERY PREHOOK: Input: default@harbucket @@ -264,15 +264,15 @@ PARTITIONED by (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@old_name -PREHOOK: query: INSERT OVERWRITE TABLE old_name PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc WHERE key > 50 +PREHOOK: query: INSERT OVERWRITE TABLE old_name PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc_n2 WHERE key > 50 PREHOOK: type: QUERY -PREHOOK: Input: default@tstsrc +PREHOOK: Input: default@tstsrc_n2 PREHOOK: Output: default@old_name@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE old_name PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc WHERE key > 50 +POSTHOOK: query: INSERT OVERWRITE TABLE old_name PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc_n2 WHERE key > 50 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstsrc +POSTHOOK: Input: default@tstsrc_n2 POSTHOOK: Output: default@old_name@ds=1 -POSTHOOK: Lineage: old_name PARTITION(ds=1).key EXPRESSION [(tstsrc)tstsrc.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: old_name PARTITION(ds=1).key EXPRESSION [(tstsrc_n2)tstsrc_n2.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: ALTER TABLE old_name ARCHIVE PARTITION (ds='1') PREHOOK: type: ALTERTABLE_ARCHIVE PREHOOK: Input: default@old_name @@ -316,19 +316,19 @@ POSTHOOK: Input: default@new_name POSTHOOK: Input: default@new_name@ds=1 #### A masked pattern was here #### 20158186837 -PREHOOK: query: drop table tstsrc +PREHOOK: query: drop table tstsrc_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tstsrc -PREHOOK: Output: default@tstsrc -POSTHOOK: query: drop table tstsrc +PREHOOK: Input: default@tstsrc_n2 +PREHOOK: Output: default@tstsrc_n2 +POSTHOOK: query: drop table tstsrc_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tstsrc -POSTHOOK: Output: default@tstsrc -PREHOOK: query: drop table tstsrcpart +POSTHOOK: Input: default@tstsrc_n2 +POSTHOOK: Output: default@tstsrc_n2 +PREHOOK: query: drop table tstsrcpart_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tstsrcpart -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: drop table tstsrcpart +PREHOOK: Input: default@tstsrcpart_n2 +PREHOOK: Output: default@tstsrcpart_n2 +POSTHOOK: query: drop table tstsrcpart_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Output: default@tstsrcpart +POSTHOOK: Input: default@tstsrcpart_n2 +POSTHOOK: Output: default@tstsrcpart_n2 diff --git a/ql/src/test/results/clientpositive/authorization_1.q.out b/ql/src/test/results/clientpositive/authorization_1.q.out index 90cca5a839..0a3e19aeca 100644 --- a/ql/src/test/results/clientpositive/authorization_1.q.out +++ b/ql/src/test/results/clientpositive/authorization_1.q.out @@ -1,37 +1,37 @@ -PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: query: create table src_autho_test_n11 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: create table src_autho_test as select * from src +PREHOOK: Output: default@src_autho_test_n11 +POSTHOOK: query: create table src_autho_test_n11 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_autho_test -POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: grant select on table src_autho_test to user hive_test_user +POSTHOOK: Output: default@src_autho_test_n11 +POSTHOOK: Lineage: src_autho_test_n11.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_autho_test_n11.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: grant select on table src_autho_test_n11 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n11 +POSTHOOK: query: grant select on table src_autho_test_n11 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: Output: default@src_autho_test_n11 +PREHOOK: query: show grant user hive_test_user on table src_autho_test_n11 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: query: show grant user hive_test_user on table src_autho_test_n11 POSTHOOK: type: SHOW_GRANT -default src_autho_test hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: show grant user hive_test_user on table src_autho_test(key) +default src_autho_test_n11 hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: show grant user hive_test_user on table src_autho_test_n11(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table src_autho_test(key) +POSTHOOK: query: show grant user hive_test_user on table src_autho_test_n11(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: query: select key from src_autho_test_n11 order by key limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n11 #### A masked pattern was here #### -POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: query: select key from src_autho_test_n11 order by key limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n11 #### A masked pattern was here #### 0 0 @@ -53,42 +53,42 @@ POSTHOOK: Input: default@src_autho_test 118 118 119 -PREHOOK: query: revoke select on table src_autho_test from user hive_test_user +PREHOOK: query: revoke select on table src_autho_test_n11 from user hive_test_user PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: revoke select on table src_autho_test from user hive_test_user +PREHOOK: Output: default@src_autho_test_n11 +POSTHOOK: query: revoke select on table src_autho_test_n11 from user hive_test_user POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: Output: default@src_autho_test_n11 +PREHOOK: query: show grant user hive_test_user on table src_autho_test_n11 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: query: show grant user hive_test_user on table src_autho_test_n11 POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant user hive_test_user on table src_autho_test(key) +PREHOOK: query: show grant user hive_test_user on table src_autho_test_n11(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table src_autho_test(key) +POSTHOOK: query: show grant user hive_test_user on table src_autho_test_n11(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: query: grant select(key) on table src_autho_test_n11 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n11 +POSTHOOK: query: grant select(key) on table src_autho_test_n11 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: Output: default@src_autho_test_n11 +PREHOOK: query: show grant user hive_test_user on table src_autho_test_n11 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: query: show grant user hive_test_user on table src_autho_test_n11 POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant user hive_test_user on table src_autho_test(key) +PREHOOK: query: show grant user hive_test_user on table src_autho_test_n11(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table src_autho_test(key) +POSTHOOK: query: show grant user hive_test_user on table src_autho_test_n11(key) POSTHOOK: type: SHOW_GRANT -default src_autho_test [key] hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: select key from src_autho_test order by key limit 20 +default src_autho_test_n11 [key] hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: select key from src_autho_test_n11 order by key limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n11 #### A masked pattern was here #### -POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: query: select key from src_autho_test_n11 order by key limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n11 #### A masked pattern was here #### 0 0 @@ -110,42 +110,42 @@ POSTHOOK: Input: default@src_autho_test 118 118 119 -PREHOOK: query: revoke select(key) on table src_autho_test from user hive_test_user +PREHOOK: query: revoke select(key) on table src_autho_test_n11 from user hive_test_user PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: revoke select(key) on table src_autho_test from user hive_test_user +PREHOOK: Output: default@src_autho_test_n11 +POSTHOOK: query: revoke select(key) on table src_autho_test_n11 from user hive_test_user POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: Output: default@src_autho_test_n11 +PREHOOK: query: show grant user hive_test_user on table src_autho_test_n11 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: query: show grant user hive_test_user on table src_autho_test_n11 POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant user hive_test_user on table src_autho_test(key) +PREHOOK: query: show grant user hive_test_user on table src_autho_test_n11(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table src_autho_test(key) +POSTHOOK: query: show grant user hive_test_user on table src_autho_test_n11(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: grant select on table src_autho_test to group hive_test_group1 +PREHOOK: query: grant select on table src_autho_test_n11 to group hive_test_group1 PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select on table src_autho_test to group hive_test_group1 +PREHOOK: Output: default@src_autho_test_n11 +POSTHOOK: query: grant select on table src_autho_test_n11 to group hive_test_group1 POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: Output: default@src_autho_test_n11 +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11 POSTHOOK: type: SHOW_GRANT -default src_autho_test hive_test_group1 GROUP SELECT false -1 hive_test_user -PREHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +default src_autho_test_n11 hive_test_group1 GROUP SELECT false -1 hive_test_user +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: query: select key from src_autho_test_n11 order by key limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n11 #### A masked pattern was here #### -POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: query: select key from src_autho_test_n11 order by key limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n11 #### A masked pattern was here #### 0 0 @@ -167,42 +167,42 @@ POSTHOOK: Input: default@src_autho_test 118 118 119 -PREHOOK: query: revoke select on table src_autho_test from group hive_test_group1 +PREHOOK: query: revoke select on table src_autho_test_n11 from group hive_test_group1 PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: revoke select on table src_autho_test from group hive_test_group1 +PREHOOK: Output: default@src_autho_test_n11 +POSTHOOK: query: revoke select on table src_autho_test_n11 from group hive_test_group1 POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: Output: default@src_autho_test_n11 +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11 POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: grant select(key) on table src_autho_test to group hive_test_group1 +PREHOOK: query: grant select(key) on table src_autho_test_n11 to group hive_test_group1 PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select(key) on table src_autho_test to group hive_test_group1 +PREHOOK: Output: default@src_autho_test_n11 +POSTHOOK: query: grant select(key) on table src_autho_test_n11 to group hive_test_group1 POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: Output: default@src_autho_test_n11 +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11 POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11(key) POSTHOOK: type: SHOW_GRANT -default src_autho_test [key] hive_test_group1 GROUP SELECT false -1 hive_test_user -PREHOOK: query: select key from src_autho_test order by key limit 20 +default src_autho_test_n11 [key] hive_test_group1 GROUP SELECT false -1 hive_test_user +PREHOOK: query: select key from src_autho_test_n11 order by key limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n11 #### A masked pattern was here #### -POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: query: select key from src_autho_test_n11 order by key limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n11 #### A masked pattern was here #### 0 0 @@ -224,19 +224,19 @@ POSTHOOK: Input: default@src_autho_test 118 118 119 -PREHOOK: query: revoke select(key) on table src_autho_test from group hive_test_group1 +PREHOOK: query: revoke select(key) on table src_autho_test_n11 from group hive_test_group1 PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: revoke select(key) on table src_autho_test from group hive_test_group1 +PREHOOK: Output: default@src_autho_test_n11 +POSTHOOK: query: revoke select(key) on table src_autho_test_n11 from group hive_test_group1 POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: Output: default@src_autho_test_n11 +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11 POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test_n11(key) POSTHOOK: type: SHOW_GRANT PREHOOK: query: create role sRc_roLE PREHOOK: type: CREATEROLE @@ -252,28 +252,28 @@ POSTHOOK: query: show role grant user hive_test_user POSTHOOK: type: SHOW_ROLE_GRANT public false -1 sRc_roLE false -1 hive_test_user -PREHOOK: query: grant select(key) on table src_autho_test to role sRc_roLE +PREHOOK: query: grant select(key) on table src_autho_test_n11 to role sRc_roLE PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select(key) on table src_autho_test to role sRc_roLE +PREHOOK: Output: default@src_autho_test_n11 +POSTHOOK: query: grant select(key) on table src_autho_test_n11 to role sRc_roLE POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant role sRc_roLE on table src_autho_test +POSTHOOK: Output: default@src_autho_test_n11 +PREHOOK: query: show grant role sRc_roLE on table src_autho_test_n11 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant role sRc_roLE on table src_autho_test +POSTHOOK: query: show grant role sRc_roLE on table src_autho_test_n11 POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant role sRc_roLE on table src_autho_test(key) +PREHOOK: query: show grant role sRc_roLE on table src_autho_test_n11(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant role sRc_roLE on table src_autho_test(key) +POSTHOOK: query: show grant role sRc_roLE on table src_autho_test_n11(key) POSTHOOK: type: SHOW_GRANT -default src_autho_test [key] sRc_roLE ROLE SELECT false -1 hive_test_user -PREHOOK: query: select key from src_autho_test order by key limit 20 +default src_autho_test_n11 [key] sRc_roLE ROLE SELECT false -1 hive_test_user +PREHOOK: query: select key from src_autho_test_n11 order by key limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n11 #### A masked pattern was here #### -POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: query: select key from src_autho_test_n11 order by key limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n11 #### A masked pattern was here #### 0 0 @@ -295,25 +295,25 @@ POSTHOOK: Input: default@src_autho_test 118 118 119 -PREHOOK: query: revoke select(key) on table src_autho_test from role sRc_roLE +PREHOOK: query: revoke select(key) on table src_autho_test_n11 from role sRc_roLE PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: revoke select(key) on table src_autho_test from role sRc_roLE +PREHOOK: Output: default@src_autho_test_n11 +POSTHOOK: query: revoke select(key) on table src_autho_test_n11 from role sRc_roLE POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: grant select on table src_autho_test to role sRc_roLE +POSTHOOK: Output: default@src_autho_test_n11 +PREHOOK: query: grant select on table src_autho_test_n11 to role sRc_roLE PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select on table src_autho_test to role sRc_roLE +PREHOOK: Output: default@src_autho_test_n11 +POSTHOOK: query: grant select on table src_autho_test_n11 to role sRc_roLE POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: Output: default@src_autho_test_n11 +PREHOOK: query: select key from src_autho_test_n11 order by key limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n11 #### A masked pattern was here #### -POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: query: select key from src_autho_test_n11 order by key limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n11 #### A masked pattern was here #### 0 0 @@ -335,30 +335,30 @@ POSTHOOK: Input: default@src_autho_test 118 118 119 -PREHOOK: query: show grant role sRc_roLE on table src_autho_test +PREHOOK: query: show grant role sRc_roLE on table src_autho_test_n11 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant role sRc_roLE on table src_autho_test +POSTHOOK: query: show grant role sRc_roLE on table src_autho_test_n11 POSTHOOK: type: SHOW_GRANT -default src_autho_test sRc_roLE ROLE SELECT false -1 hive_test_user -PREHOOK: query: show grant role sRc_roLE on table src_autho_test(key) +default src_autho_test_n11 sRc_roLE ROLE SELECT false -1 hive_test_user +PREHOOK: query: show grant role sRc_roLE on table src_autho_test_n11(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant role sRc_roLE on table src_autho_test(key) +POSTHOOK: query: show grant role sRc_roLE on table src_autho_test_n11(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: revoke select on table src_autho_test from role sRc_roLE +PREHOOK: query: revoke select on table src_autho_test_n11 from role sRc_roLE PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: revoke select on table src_autho_test from role sRc_roLE +PREHOOK: Output: default@src_autho_test_n11 +POSTHOOK: query: revoke select on table src_autho_test_n11 from role sRc_roLE POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@src_autho_test +POSTHOOK: Output: default@src_autho_test_n11 PREHOOK: query: drop role sRc_roLE PREHOOK: type: DROPROLE POSTHOOK: query: drop role sRc_roLE POSTHOOK: type: DROPROLE -PREHOOK: query: drop table src_autho_test +PREHOOK: query: drop table src_autho_test_n11 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@src_autho_test -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: drop table src_autho_test +PREHOOK: Input: default@src_autho_test_n11 +PREHOOK: Output: default@src_autho_test_n11 +POSTHOOK: query: drop table src_autho_test_n11 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Output: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n11 +POSTHOOK: Output: default@src_autho_test_n11 diff --git a/ql/src/test/results/clientpositive/authorization_3.q.out b/ql/src/test/results/clientpositive/authorization_3.q.out index 181a512b88..fc4139fa0f 100644 --- a/ql/src/test/results/clientpositive/authorization_3.q.out +++ b/ql/src/test/results/clientpositive/authorization_3.q.out @@ -1,77 +1,77 @@ -PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: query: create table src_autho_test_n5 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: create table src_autho_test as select * from src +PREHOOK: Output: default@src_autho_test_n5 +POSTHOOK: query: create table src_autho_test_n5 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_autho_test -POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: grant drop on table src_autho_test to user hive_test_user +POSTHOOK: Output: default@src_autho_test_n5 +POSTHOOK: Lineage: src_autho_test_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_autho_test_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: grant drop on table src_autho_test_n5 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant drop on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n5 +POSTHOOK: query: grant drop on table src_autho_test_n5 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: grant select on table src_autho_test to user hive_test_user +POSTHOOK: Output: default@src_autho_test_n5 +PREHOOK: query: grant select on table src_autho_test_n5 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n5 +POSTHOOK: query: grant select on table src_autho_test_n5 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: Output: default@src_autho_test_n5 +PREHOOK: query: show grant user hive_test_user on table src_autho_test_n5 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: query: show grant user hive_test_user on table src_autho_test_n5 POSTHOOK: type: SHOW_GRANT -default src_autho_test hive_test_user USER DROP false -1 hive_test_user -default src_autho_test hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: revoke select on table src_autho_test from user hive_test_user +default src_autho_test_n5 hive_test_user USER DROP false -1 hive_test_user +default src_autho_test_n5 hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: revoke select on table src_autho_test_n5 from user hive_test_user PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: revoke select on table src_autho_test from user hive_test_user +PREHOOK: Output: default@src_autho_test_n5 +POSTHOOK: query: revoke select on table src_autho_test_n5 from user hive_test_user POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: revoke drop on table src_autho_test from user hive_test_user +POSTHOOK: Output: default@src_autho_test_n5 +PREHOOK: query: revoke drop on table src_autho_test_n5 from user hive_test_user PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: revoke drop on table src_autho_test from user hive_test_user +PREHOOK: Output: default@src_autho_test_n5 +POSTHOOK: query: revoke drop on table src_autho_test_n5 from user hive_test_user POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: grant drop,select on table src_autho_test to user hive_test_user +POSTHOOK: Output: default@src_autho_test_n5 +PREHOOK: query: grant drop,select on table src_autho_test_n5 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant drop,select on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n5 +POSTHOOK: query: grant drop,select on table src_autho_test_n5 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: Output: default@src_autho_test_n5 +PREHOOK: query: show grant user hive_test_user on table src_autho_test_n5 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: query: show grant user hive_test_user on table src_autho_test_n5 POSTHOOK: type: SHOW_GRANT -default src_autho_test hive_test_user USER DROP false -1 hive_test_user -default src_autho_test hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: revoke drop,select on table src_autho_test from user hive_test_user +default src_autho_test_n5 hive_test_user USER DROP false -1 hive_test_user +default src_autho_test_n5 hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: revoke drop,select on table src_autho_test_n5 from user hive_test_user PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: revoke drop,select on table src_autho_test from user hive_test_user +PREHOOK: Output: default@src_autho_test_n5 +POSTHOOK: query: revoke drop,select on table src_autho_test_n5 from user hive_test_user POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: grant drop,select(key), select(value) on table src_autho_test to user hive_test_user +POSTHOOK: Output: default@src_autho_test_n5 +PREHOOK: query: grant drop,select(key), select(value) on table src_autho_test_n5 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant drop,select(key), select(value) on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n5 +POSTHOOK: query: grant drop,select(key), select(value) on table src_autho_test_n5 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: Output: default@src_autho_test_n5 +PREHOOK: query: show grant user hive_test_user on table src_autho_test_n5 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: query: show grant user hive_test_user on table src_autho_test_n5 POSTHOOK: type: SHOW_GRANT -default src_autho_test hive_test_user USER DROP false -1 hive_test_user -PREHOOK: query: revoke drop,select(key), select(value) on table src_autho_test from user hive_test_user +default src_autho_test_n5 hive_test_user USER DROP false -1 hive_test_user +PREHOOK: query: revoke drop,select(key), select(value) on table src_autho_test_n5 from user hive_test_user PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: revoke drop,select(key), select(value) on table src_autho_test from user hive_test_user +PREHOOK: Output: default@src_autho_test_n5 +POSTHOOK: query: revoke drop,select(key), select(value) on table src_autho_test_n5 from user hive_test_user POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@src_autho_test +POSTHOOK: Output: default@src_autho_test_n5 diff --git a/ql/src/test/results/clientpositive/authorization_4.q.out b/ql/src/test/results/clientpositive/authorization_4.q.out index aade134eb5..b5adc1a19d 100644 --- a/ql/src/test/results/clientpositive/authorization_4.q.out +++ b/ql/src/test/results/clientpositive/authorization_4.q.out @@ -1,33 +1,33 @@ -PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: query: create table src_autho_test_n2 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: create table src_autho_test as select * from src +PREHOOK: Output: default@src_autho_test_n2 +POSTHOOK: query: create table src_autho_test_n2 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_autho_test -POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: grant All on table src_autho_test to user hive_test_user +POSTHOOK: Output: default@src_autho_test_n2 +POSTHOOK: Lineage: src_autho_test_n2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_autho_test_n2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: grant All on table src_autho_test_n2 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant All on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n2 +POSTHOOK: query: grant All on table src_autho_test_n2 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: Output: default@src_autho_test_n2 +PREHOOK: query: show grant user hive_test_user on table src_autho_test_n2 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: query: show grant user hive_test_user on table src_autho_test_n2 POSTHOOK: type: SHOW_GRANT -default src_autho_test hive_test_user USER ALL false -1 hive_test_user -PREHOOK: query: select key from src_autho_test order by key limit 20 +default src_autho_test_n2 hive_test_user USER ALL false -1 hive_test_user +PREHOOK: query: select key from src_autho_test_n2 order by key limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n2 #### A masked pattern was here #### -POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: query: select key from src_autho_test_n2 order by key limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n2 #### A masked pattern was here #### 0 0 @@ -49,11 +49,11 @@ POSTHOOK: Input: default@src_autho_test 118 118 119 -PREHOOK: query: drop table src_autho_test +PREHOOK: query: drop table src_autho_test_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@src_autho_test -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: drop table src_autho_test +PREHOOK: Input: default@src_autho_test_n2 +PREHOOK: Output: default@src_autho_test_n2 +POSTHOOK: query: drop table src_autho_test_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Output: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n2 +POSTHOOK: Output: default@src_autho_test_n2 diff --git a/ql/src/test/results/clientpositive/authorization_6.q.out b/ql/src/test/results/clientpositive/authorization_6.q.out index cbd218fb49..2687779279 100644 --- a/ql/src/test/results/clientpositive/authorization_6.q.out +++ b/ql/src/test/results/clientpositive/authorization_6.q.out @@ -1,114 +1,114 @@ -PREHOOK: query: create table src_auth_tmp as select * from src +PREHOOK: query: create table src_auth_tmp_n0 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src_auth_tmp -POSTHOOK: query: create table src_auth_tmp as select * from src +PREHOOK: Output: default@src_auth_tmp_n0 +POSTHOOK: query: create table src_auth_tmp_n0 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_auth_tmp -POSTHOOK: Lineage: src_auth_tmp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_auth_tmp.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) +POSTHOOK: Output: default@src_auth_tmp_n0 +POSTHOOK: Lineage: src_auth_tmp_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_auth_tmp_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table authorization_part_n0 (key int, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@authorization_part -POSTHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) +PREHOOK: Output: default@authorization_part_n0 +POSTHOOK: query: create table authorization_part_n0 (key int, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@authorization_part -PREHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +POSTHOOK: Output: default@authorization_part_n0 +PREHOOK: query: ALTER TABLE authorization_part_n0 SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@authorization_part -PREHOOK: Output: default@authorization_part -POSTHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +PREHOOK: Input: default@authorization_part_n0 +PREHOOK: Output: default@authorization_part_n0 +POSTHOOK: query: ALTER TABLE authorization_part_n0 SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@authorization_part -POSTHOOK: Output: default@authorization_part -PREHOOK: query: grant select on table src_auth_tmp to user hive_test_user +POSTHOOK: Input: default@authorization_part_n0 +POSTHOOK: Output: default@authorization_part_n0 +PREHOOK: query: grant select on table src_auth_tmp_n0 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_auth_tmp -POSTHOOK: query: grant select on table src_auth_tmp to user hive_test_user +PREHOOK: Output: default@src_auth_tmp_n0 +POSTHOOK: query: grant select on table src_auth_tmp_n0 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_auth_tmp -PREHOOK: query: grant Create on table authorization_part to user hive_test_user +POSTHOOK: Output: default@src_auth_tmp_n0 +PREHOOK: query: grant Create on table authorization_part_n0 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@authorization_part -POSTHOOK: query: grant Create on table authorization_part to user hive_test_user +PREHOOK: Output: default@authorization_part_n0 +POSTHOOK: query: grant Create on table authorization_part_n0 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@authorization_part -PREHOOK: query: grant Update on table authorization_part to user hive_test_user +POSTHOOK: Output: default@authorization_part_n0 +PREHOOK: query: grant Update on table authorization_part_n0 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@authorization_part -POSTHOOK: query: grant Update on table authorization_part to user hive_test_user +PREHOOK: Output: default@authorization_part_n0 +POSTHOOK: query: grant Update on table authorization_part_n0 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@authorization_part -PREHOOK: query: grant Drop on table authorization_part to user hive_test_user +POSTHOOK: Output: default@authorization_part_n0 +PREHOOK: query: grant Drop on table authorization_part_n0 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@authorization_part -POSTHOOK: query: grant Drop on table authorization_part to user hive_test_user +PREHOOK: Output: default@authorization_part_n0 +POSTHOOK: query: grant Drop on table authorization_part_n0 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@authorization_part -PREHOOK: query: show grant user hive_test_user on table authorization_part +POSTHOOK: Output: default@authorization_part_n0 +PREHOOK: query: show grant user hive_test_user on table authorization_part_n0 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table authorization_part +POSTHOOK: query: show grant user hive_test_user on table authorization_part_n0 POSTHOOK: type: SHOW_GRANT -default authorization_part hive_test_user USER CREATE false -1 hive_test_user -default authorization_part hive_test_user USER DROP false -1 hive_test_user -default authorization_part hive_test_user USER UPDATE false -1 hive_test_user -PREHOOK: query: grant select(key) on table authorization_part to user hive_test_user +default authorization_part_n0 hive_test_user USER CREATE false -1 hive_test_user +default authorization_part_n0 hive_test_user USER DROP false -1 hive_test_user +default authorization_part_n0 hive_test_user USER UPDATE false -1 hive_test_user +PREHOOK: query: grant select(key) on table authorization_part_n0 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@authorization_part -POSTHOOK: query: grant select(key) on table authorization_part to user hive_test_user +PREHOOK: Output: default@authorization_part_n0 +POSTHOOK: query: grant select(key) on table authorization_part_n0 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@authorization_part -PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp +POSTHOOK: Output: default@authorization_part_n0 +PREHOOK: query: insert overwrite table authorization_part_n0 partition (ds='2010') select key, value from src_auth_tmp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@src_auth_tmp -PREHOOK: Output: default@authorization_part@ds=2010 -POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp +PREHOOK: Input: default@src_auth_tmp_n0 +PREHOOK: Output: default@authorization_part_n0@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part_n0 partition (ds='2010') select key, value from src_auth_tmp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_auth_tmp -POSTHOOK: Output: default@authorization_part@ds=2010 -POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: insert overwrite table authorization_part partition (ds='2011') select key, value from src_auth_tmp +POSTHOOK: Input: default@src_auth_tmp_n0 +POSTHOOK: Output: default@authorization_part_n0@ds=2010 +POSTHOOK: Lineage: authorization_part_n0 PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp_n0)src_auth_tmp_n0.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part_n0 PARTITION(ds=2010).value SIMPLE [(src_auth_tmp_n0)src_auth_tmp_n0.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert overwrite table authorization_part_n0 partition (ds='2011') select key, value from src_auth_tmp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@src_auth_tmp -PREHOOK: Output: default@authorization_part@ds=2011 -POSTHOOK: query: insert overwrite table authorization_part partition (ds='2011') select key, value from src_auth_tmp +PREHOOK: Input: default@src_auth_tmp_n0 +PREHOOK: Output: default@authorization_part_n0@ds=2011 +POSTHOOK: query: insert overwrite table authorization_part_n0 partition (ds='2011') select key, value from src_auth_tmp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_auth_tmp -POSTHOOK: Output: default@authorization_part@ds=2011 -POSTHOOK: Lineage: authorization_part PARTITION(ds=2011).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: authorization_part PARTITION(ds=2011).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +POSTHOOK: Input: default@src_auth_tmp_n0 +POSTHOOK: Output: default@authorization_part_n0@ds=2011 +POSTHOOK: Lineage: authorization_part_n0 PARTITION(ds=2011).key EXPRESSION [(src_auth_tmp_n0)src_auth_tmp_n0.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part_n0 PARTITION(ds=2011).value SIMPLE [(src_auth_tmp_n0)src_auth_tmp_n0.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part_n0(key) partition (ds='2010') PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +POSTHOOK: query: show grant user hive_test_user on table authorization_part_n0(key) partition (ds='2010') POSTHOOK: type: SHOW_GRANT -default authorization_part [2010] [key] hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2011') +default authorization_part_n0 [2010] [key] hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: show grant user hive_test_user on table authorization_part_n0(key) partition (ds='2011') PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2011') +POSTHOOK: query: show grant user hive_test_user on table authorization_part_n0(key) partition (ds='2011') POSTHOOK: type: SHOW_GRANT -default authorization_part [2011] [key] hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: show grant user hive_test_user on table authorization_part(key) +default authorization_part_n0 [2011] [key] hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: show grant user hive_test_user on table authorization_part_n0(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) +POSTHOOK: query: show grant user hive_test_user on table authorization_part_n0(key) POSTHOOK: type: SHOW_GRANT -default authorization_part [key] hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: select key from authorization_part where ds>='2010' order by key limit 20 +default authorization_part_n0 [key] hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: select key from authorization_part_n0 where ds>='2010' order by key limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@authorization_part -PREHOOK: Input: default@authorization_part@ds=2010 -PREHOOK: Input: default@authorization_part@ds=2011 +PREHOOK: Input: default@authorization_part_n0 +PREHOOK: Input: default@authorization_part_n0@ds=2010 +PREHOOK: Input: default@authorization_part_n0@ds=2011 #### A masked pattern was here #### -POSTHOOK: query: select key from authorization_part where ds>='2010' order by key limit 20 +POSTHOOK: query: select key from authorization_part_n0 where ds>='2010' order by key limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@authorization_part -POSTHOOK: Input: default@authorization_part@ds=2010 -POSTHOOK: Input: default@authorization_part@ds=2011 +POSTHOOK: Input: default@authorization_part_n0 +POSTHOOK: Input: default@authorization_part_n0@ds=2010 +POSTHOOK: Input: default@authorization_part_n0@ds=2011 #### A masked pattern was here #### 0 0 @@ -130,98 +130,98 @@ POSTHOOK: Input: default@authorization_part@ds=2011 8 9 9 -PREHOOK: query: drop table authorization_part +PREHOOK: query: drop table authorization_part_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@authorization_part -PREHOOK: Output: default@authorization_part -POSTHOOK: query: drop table authorization_part +PREHOOK: Input: default@authorization_part_n0 +PREHOOK: Output: default@authorization_part_n0 +POSTHOOK: query: drop table authorization_part_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@authorization_part -POSTHOOK: Output: default@authorization_part -PREHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) +POSTHOOK: Input: default@authorization_part_n0 +POSTHOOK: Output: default@authorization_part_n0 +PREHOOK: query: create table authorization_part_n0 (key int, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@authorization_part -POSTHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) +PREHOOK: Output: default@authorization_part_n0 +POSTHOOK: query: create table authorization_part_n0 (key int, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@authorization_part -PREHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="FALSE") +POSTHOOK: Output: default@authorization_part_n0 +PREHOOK: query: ALTER TABLE authorization_part_n0 SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="FALSE") PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@authorization_part -PREHOOK: Output: default@authorization_part -POSTHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="FALSE") +PREHOOK: Input: default@authorization_part_n0 +PREHOOK: Output: default@authorization_part_n0 +POSTHOOK: query: ALTER TABLE authorization_part_n0 SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="FALSE") POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@authorization_part -POSTHOOK: Output: default@authorization_part -PREHOOK: query: grant Create on table authorization_part to user hive_test_user +POSTHOOK: Input: default@authorization_part_n0 +POSTHOOK: Output: default@authorization_part_n0 +PREHOOK: query: grant Create on table authorization_part_n0 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@authorization_part -POSTHOOK: query: grant Create on table authorization_part to user hive_test_user +PREHOOK: Output: default@authorization_part_n0 +POSTHOOK: query: grant Create on table authorization_part_n0 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@authorization_part -PREHOOK: query: grant Update on table authorization_part to user hive_test_user +POSTHOOK: Output: default@authorization_part_n0 +PREHOOK: query: grant Update on table authorization_part_n0 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@authorization_part -POSTHOOK: query: grant Update on table authorization_part to user hive_test_user +PREHOOK: Output: default@authorization_part_n0 +POSTHOOK: query: grant Update on table authorization_part_n0 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@authorization_part -PREHOOK: query: show grant user hive_test_user on table authorization_part +POSTHOOK: Output: default@authorization_part_n0 +PREHOOK: query: show grant user hive_test_user on table authorization_part_n0 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table authorization_part +POSTHOOK: query: show grant user hive_test_user on table authorization_part_n0 POSTHOOK: type: SHOW_GRANT -default authorization_part hive_test_user USER CREATE false -1 hive_test_user -default authorization_part hive_test_user USER UPDATE false -1 hive_test_user -PREHOOK: query: grant select(key) on table authorization_part to user hive_test_user +default authorization_part_n0 hive_test_user USER CREATE false -1 hive_test_user +default authorization_part_n0 hive_test_user USER UPDATE false -1 hive_test_user +PREHOOK: query: grant select(key) on table authorization_part_n0 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@authorization_part -POSTHOOK: query: grant select(key) on table authorization_part to user hive_test_user +PREHOOK: Output: default@authorization_part_n0 +POSTHOOK: query: grant select(key) on table authorization_part_n0 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@authorization_part -PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp +POSTHOOK: Output: default@authorization_part_n0 +PREHOOK: query: insert overwrite table authorization_part_n0 partition (ds='2010') select key, value from src_auth_tmp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@src_auth_tmp -PREHOOK: Output: default@authorization_part@ds=2010 -POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp +PREHOOK: Input: default@src_auth_tmp_n0 +PREHOOK: Output: default@authorization_part_n0@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part_n0 partition (ds='2010') select key, value from src_auth_tmp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_auth_tmp -POSTHOOK: Output: default@authorization_part@ds=2010 -POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: insert overwrite table authorization_part partition (ds='2011') select key, value from src_auth_tmp +POSTHOOK: Input: default@src_auth_tmp_n0 +POSTHOOK: Output: default@authorization_part_n0@ds=2010 +POSTHOOK: Lineage: authorization_part_n0 PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp_n0)src_auth_tmp_n0.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part_n0 PARTITION(ds=2010).value SIMPLE [(src_auth_tmp_n0)src_auth_tmp_n0.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert overwrite table authorization_part_n0 partition (ds='2011') select key, value from src_auth_tmp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@src_auth_tmp -PREHOOK: Output: default@authorization_part@ds=2011 -POSTHOOK: query: insert overwrite table authorization_part partition (ds='2011') select key, value from src_auth_tmp +PREHOOK: Input: default@src_auth_tmp_n0 +PREHOOK: Output: default@authorization_part_n0@ds=2011 +POSTHOOK: query: insert overwrite table authorization_part_n0 partition (ds='2011') select key, value from src_auth_tmp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_auth_tmp -POSTHOOK: Output: default@authorization_part@ds=2011 -POSTHOOK: Lineage: authorization_part PARTITION(ds=2011).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: authorization_part PARTITION(ds=2011).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +POSTHOOK: Input: default@src_auth_tmp_n0 +POSTHOOK: Output: default@authorization_part_n0@ds=2011 +POSTHOOK: Lineage: authorization_part_n0 PARTITION(ds=2011).key EXPRESSION [(src_auth_tmp_n0)src_auth_tmp_n0.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: authorization_part_n0 PARTITION(ds=2011).value SIMPLE [(src_auth_tmp_n0)src_auth_tmp_n0.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part_n0(key) partition (ds='2010') PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +POSTHOOK: query: show grant user hive_test_user on table authorization_part_n0(key) partition (ds='2010') POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2011') +PREHOOK: query: show grant user hive_test_user on table authorization_part_n0(key) partition (ds='2011') PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2011') +POSTHOOK: query: show grant user hive_test_user on table authorization_part_n0(key) partition (ds='2011') POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant user hive_test_user on table authorization_part(key) +PREHOOK: query: show grant user hive_test_user on table authorization_part_n0(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) +POSTHOOK: query: show grant user hive_test_user on table authorization_part_n0(key) POSTHOOK: type: SHOW_GRANT -default authorization_part [key] hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: select key from authorization_part where ds>='2010' order by key limit 20 +default authorization_part_n0 [key] hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: select key from authorization_part_n0 where ds>='2010' order by key limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@authorization_part -PREHOOK: Input: default@authorization_part@ds=2010 -PREHOOK: Input: default@authorization_part@ds=2011 +PREHOOK: Input: default@authorization_part_n0 +PREHOOK: Input: default@authorization_part_n0@ds=2010 +PREHOOK: Input: default@authorization_part_n0@ds=2011 #### A masked pattern was here #### -POSTHOOK: query: select key from authorization_part where ds>='2010' order by key limit 20 +POSTHOOK: query: select key from authorization_part_n0 where ds>='2010' order by key limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@authorization_part -POSTHOOK: Input: default@authorization_part@ds=2010 -POSTHOOK: Input: default@authorization_part@ds=2011 +POSTHOOK: Input: default@authorization_part_n0 +POSTHOOK: Input: default@authorization_part_n0@ds=2010 +POSTHOOK: Input: default@authorization_part_n0@ds=2011 #### A masked pattern was here #### 0 0 diff --git a/ql/src/test/results/clientpositive/authorization_9.q.out b/ql/src/test/results/clientpositive/authorization_9.q.out index ac3baf9edd..6cb28ab716 100644 --- a/ql/src/test/results/clientpositive/authorization_9.q.out +++ b/ql/src/test/results/clientpositive/authorization_9.q.out @@ -10,13 +10,13 @@ PREHOOK: Input: database:authorization_9 POSTHOOK: query: use authorization_9 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:authorization_9 -PREHOOK: query: create table dummy (key string, value string) +PREHOOK: query: create table dummy_n1 (key string, value string) PREHOOK: type: CREATETABLE -PREHOOK: Output: authorization_9@dummy +PREHOOK: Output: authorization_9@dummy_n1 PREHOOK: Output: database:authorization_9 -POSTHOOK: query: create table dummy (key string, value string) +POSTHOOK: query: create table dummy_n1 (key string, value string) POSTHOOK: type: CREATETABLE -POSTHOOK: Output: authorization_9@dummy +POSTHOOK: Output: authorization_9@dummy_n1 POSTHOOK: Output: database:authorization_9 PREHOOK: query: grant select to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE @@ -26,37 +26,57 @@ PREHOOK: query: grant select on database authorization_9 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE POSTHOOK: query: grant select on database authorization_9 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -PREHOOK: query: grant select on table dummy to user hive_test_user +PREHOOK: query: grant select on table dummy_n1 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: authorization_9@dummy -POSTHOOK: query: grant select on table dummy to user hive_test_user +PREHOOK: Output: authorization_9@dummy_n1 +POSTHOOK: query: grant select on table dummy_n1 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: authorization_9@dummy -PREHOOK: query: grant select (key, value) on table dummy to user hive_test_user +POSTHOOK: Output: authorization_9@dummy_n1 +PREHOOK: query: grant select (key, value) on table dummy_n1 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: authorization_9@dummy -POSTHOOK: query: grant select (key, value) on table dummy to user hive_test_user +PREHOOK: Output: authorization_9@dummy_n1 +POSTHOOK: query: grant select (key, value) on table dummy_n1 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: authorization_9@dummy +POSTHOOK: Output: authorization_9@dummy_n1 PREHOOK: query: show grant user hive_test_user on database authorization_9 PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user hive_test_user on database authorization_9 POSTHOOK: type: SHOW_GRANT authorization_9 hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: show grant user hive_test_user on table dummy +PREHOOK: query: show grant user hive_test_user on table dummy_n1 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table dummy +POSTHOOK: query: show grant user hive_test_user on table dummy_n1 POSTHOOK: type: SHOW_GRANT -authorization_9 dummy hive_test_user USER SELECT false -1 hive_test_user +authorization_9 dummy_n1 hive_test_user USER SELECT false -1 hive_test_user PREHOOK: query: show grant user hive_test_user on all PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user hive_test_user on all POSTHOOK: type: SHOW_GRANT hive_test_user USER SELECT false -1 hive_test_user authorization_9 hive_test_user USER SELECT false -1 hive_test_user -authorization_9 dummy hive_test_user USER SELECT false -1 hive_test_user -authorization_9 dummy [key] hive_test_user USER SELECT false -1 hive_test_user -authorization_9 dummy [value] hive_test_user USER SELECT false -1 hive_test_user +authorization_9 dummy_n1 hive_test_user USER SELECT false -1 hive_test_user +default alltypesorc hive_test_user USER DELETE true -1 hive_test_user +default alltypesorc hive_test_user USER INSERT true -1 hive_test_user +default alltypesorc hive_test_user USER SELECT true -1 hive_test_user +default alltypesorc hive_test_user USER UPDATE true -1 hive_test_user +default part hive_test_user USER DELETE true -1 hive_test_user +default part hive_test_user USER INSERT true -1 hive_test_user +default part hive_test_user USER SELECT true -1 hive_test_user +default part hive_test_user USER UPDATE true -1 hive_test_user +default src hive_test_user USER DELETE true -1 hive_test_user +default src hive_test_user USER INSERT true -1 hive_test_user +default src hive_test_user USER SELECT true -1 hive_test_user +default src hive_test_user USER UPDATE true -1 hive_test_user +default src1 hive_test_user USER DELETE true -1 hive_test_user +default src1 hive_test_user USER INSERT true -1 hive_test_user +default src1 hive_test_user USER SELECT true -1 hive_test_user +default src1 hive_test_user USER UPDATE true -1 hive_test_user +default srcpart hive_test_user USER DELETE true -1 hive_test_user +default srcpart hive_test_user USER INSERT true -1 hive_test_user +default srcpart hive_test_user USER SELECT true -1 hive_test_user +default srcpart hive_test_user USER UPDATE true -1 hive_test_user +authorization_9 dummy_n1 [key] hive_test_user USER SELECT false -1 hive_test_user +authorization_9 dummy_n1 [value] hive_test_user USER SELECT false -1 hive_test_user PREHOOK: query: grant select to user hive_test_user2 PREHOOK: type: GRANT_PRIVILEGE POSTHOOK: query: grant select to user hive_test_user2 @@ -65,18 +85,18 @@ PREHOOK: query: grant select on database authorization_9 to user hive_test_user2 PREHOOK: type: GRANT_PRIVILEGE POSTHOOK: query: grant select on database authorization_9 to user hive_test_user2 POSTHOOK: type: GRANT_PRIVILEGE -PREHOOK: query: grant select on table dummy to user hive_test_user2 +PREHOOK: query: grant select on table dummy_n1 to user hive_test_user2 PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: authorization_9@dummy -POSTHOOK: query: grant select on table dummy to user hive_test_user2 +PREHOOK: Output: authorization_9@dummy_n1 +POSTHOOK: query: grant select on table dummy_n1 to user hive_test_user2 POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: authorization_9@dummy -PREHOOK: query: grant select (key, value) on table dummy to user hive_test_user2 +POSTHOOK: Output: authorization_9@dummy_n1 +PREHOOK: query: grant select (key, value) on table dummy_n1 to user hive_test_user2 PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: authorization_9@dummy -POSTHOOK: query: grant select (key, value) on table dummy to user hive_test_user2 +PREHOOK: Output: authorization_9@dummy_n1 +POSTHOOK: query: grant select (key, value) on table dummy_n1 to user hive_test_user2 POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: authorization_9@dummy +POSTHOOK: Output: authorization_9@dummy_n1 PREHOOK: query: show grant on all PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant on all @@ -86,30 +106,70 @@ POSTHOOK: type: SHOW_GRANT hive_test_user2 USER SELECT false -1 hive_test_user authorization_9 hive_test_user USER SELECT false -1 hive_test_user authorization_9 hive_test_user2 USER SELECT false -1 hive_test_user -authorization_9 dummy hive_test_user USER SELECT false -1 hive_test_user -authorization_9 dummy hive_test_user2 USER SELECT false -1 hive_test_user -authorization_9 dummy [key] hive_test_user USER SELECT false -1 hive_test_user -authorization_9 dummy [key] hive_test_user2 USER SELECT false -1 hive_test_user -authorization_9 dummy [value] hive_test_user USER SELECT false -1 hive_test_user -authorization_9 dummy [value] hive_test_user2 USER SELECT false -1 hive_test_user +authorization_9 dummy_n1 hive_test_user USER SELECT false -1 hive_test_user +authorization_9 dummy_n1 hive_test_user2 USER SELECT false -1 hive_test_user +default alltypesorc hive_test_user USER DELETE true -1 hive_test_user +default alltypesorc hive_test_user USER INSERT true -1 hive_test_user +default alltypesorc hive_test_user USER SELECT true -1 hive_test_user +default alltypesorc hive_test_user USER UPDATE true -1 hive_test_user +default part hive_test_user USER DELETE true -1 hive_test_user +default part hive_test_user USER INSERT true -1 hive_test_user +default part hive_test_user USER SELECT true -1 hive_test_user +default part hive_test_user USER UPDATE true -1 hive_test_user +default src hive_test_user USER DELETE true -1 hive_test_user +default src hive_test_user USER INSERT true -1 hive_test_user +default src hive_test_user USER SELECT true -1 hive_test_user +default src hive_test_user USER UPDATE true -1 hive_test_user +default src1 hive_test_user USER DELETE true -1 hive_test_user +default src1 hive_test_user USER INSERT true -1 hive_test_user +default src1 hive_test_user USER SELECT true -1 hive_test_user +default src1 hive_test_user USER UPDATE true -1 hive_test_user +default srcpart hive_test_user USER DELETE true -1 hive_test_user +default srcpart hive_test_user USER INSERT true -1 hive_test_user +default srcpart hive_test_user USER SELECT true -1 hive_test_user +default srcpart hive_test_user USER UPDATE true -1 hive_test_user +authorization_9 dummy_n1 [key] hive_test_user USER SELECT false -1 hive_test_user +authorization_9 dummy_n1 [key] hive_test_user2 USER SELECT false -1 hive_test_user +authorization_9 dummy_n1 [value] hive_test_user USER SELECT false -1 hive_test_user +authorization_9 dummy_n1 [value] hive_test_user2 USER SELECT false -1 hive_test_user PREHOOK: query: show grant user hive_test_user on all PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user hive_test_user on all POSTHOOK: type: SHOW_GRANT hive_test_user USER SELECT false -1 hive_test_user authorization_9 hive_test_user USER SELECT false -1 hive_test_user -authorization_9 dummy hive_test_user USER SELECT false -1 hive_test_user -authorization_9 dummy [key] hive_test_user USER SELECT false -1 hive_test_user -authorization_9 dummy [value] hive_test_user USER SELECT false -1 hive_test_user +authorization_9 dummy_n1 hive_test_user USER SELECT false -1 hive_test_user +default alltypesorc hive_test_user USER DELETE true -1 hive_test_user +default alltypesorc hive_test_user USER INSERT true -1 hive_test_user +default alltypesorc hive_test_user USER SELECT true -1 hive_test_user +default alltypesorc hive_test_user USER UPDATE true -1 hive_test_user +default part hive_test_user USER DELETE true -1 hive_test_user +default part hive_test_user USER INSERT true -1 hive_test_user +default part hive_test_user USER SELECT true -1 hive_test_user +default part hive_test_user USER UPDATE true -1 hive_test_user +default src hive_test_user USER DELETE true -1 hive_test_user +default src hive_test_user USER INSERT true -1 hive_test_user +default src hive_test_user USER SELECT true -1 hive_test_user +default src hive_test_user USER UPDATE true -1 hive_test_user +default src1 hive_test_user USER DELETE true -1 hive_test_user +default src1 hive_test_user USER INSERT true -1 hive_test_user +default src1 hive_test_user USER SELECT true -1 hive_test_user +default src1 hive_test_user USER UPDATE true -1 hive_test_user +default srcpart hive_test_user USER DELETE true -1 hive_test_user +default srcpart hive_test_user USER INSERT true -1 hive_test_user +default srcpart hive_test_user USER SELECT true -1 hive_test_user +default srcpart hive_test_user USER UPDATE true -1 hive_test_user +authorization_9 dummy_n1 [key] hive_test_user USER SELECT false -1 hive_test_user +authorization_9 dummy_n1 [value] hive_test_user USER SELECT false -1 hive_test_user PREHOOK: query: show grant user hive_test_user2 on all PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user hive_test_user2 on all POSTHOOK: type: SHOW_GRANT hive_test_user2 USER SELECT false -1 hive_test_user authorization_9 hive_test_user2 USER SELECT false -1 hive_test_user -authorization_9 dummy hive_test_user2 USER SELECT false -1 hive_test_user -authorization_9 dummy [key] hive_test_user2 USER SELECT false -1 hive_test_user -authorization_9 dummy [value] hive_test_user2 USER SELECT false -1 hive_test_user +authorization_9 dummy_n1 hive_test_user2 USER SELECT false -1 hive_test_user +authorization_9 dummy_n1 [key] hive_test_user2 USER SELECT false -1 hive_test_user +authorization_9 dummy_n1 [value] hive_test_user2 USER SELECT false -1 hive_test_user PREHOOK: query: revoke select from user hive_test_user PREHOOK: type: REVOKE_PRIVILEGE POSTHOOK: query: revoke select from user hive_test_user @@ -118,18 +178,18 @@ PREHOOK: query: revoke select on database authorization_9 from user hive_test_us PREHOOK: type: REVOKE_PRIVILEGE POSTHOOK: query: revoke select on database authorization_9 from user hive_test_user POSTHOOK: type: REVOKE_PRIVILEGE -PREHOOK: query: revoke select on table dummy from user hive_test_user +PREHOOK: query: revoke select on table dummy_n1 from user hive_test_user PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: authorization_9@dummy -POSTHOOK: query: revoke select on table dummy from user hive_test_user +PREHOOK: Output: authorization_9@dummy_n1 +POSTHOOK: query: revoke select on table dummy_n1 from user hive_test_user POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: authorization_9@dummy -PREHOOK: query: revoke select (key, value) on table dummy from user hive_test_user +POSTHOOK: Output: authorization_9@dummy_n1 +PREHOOK: query: revoke select (key, value) on table dummy_n1 from user hive_test_user PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: authorization_9@dummy -POSTHOOK: query: revoke select (key, value) on table dummy from user hive_test_user +PREHOOK: Output: authorization_9@dummy_n1 +POSTHOOK: query: revoke select (key, value) on table dummy_n1 from user hive_test_user POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: authorization_9@dummy +POSTHOOK: Output: authorization_9@dummy_n1 PREHOOK: query: revoke select from user hive_test_user2 PREHOOK: type: REVOKE_PRIVILEGE POSTHOOK: query: revoke select from user hive_test_user2 @@ -138,15 +198,15 @@ PREHOOK: query: revoke select on database authorization_9 from user hive_test_us PREHOOK: type: REVOKE_PRIVILEGE POSTHOOK: query: revoke select on database authorization_9 from user hive_test_user2 POSTHOOK: type: REVOKE_PRIVILEGE -PREHOOK: query: revoke select on table dummy from user hive_test_user2 +PREHOOK: query: revoke select on table dummy_n1 from user hive_test_user2 PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: authorization_9@dummy -POSTHOOK: query: revoke select on table dummy from user hive_test_user2 +PREHOOK: Output: authorization_9@dummy_n1 +POSTHOOK: query: revoke select on table dummy_n1 from user hive_test_user2 POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: authorization_9@dummy -PREHOOK: query: revoke select (key, value) on table dummy from user hive_test_user2 +POSTHOOK: Output: authorization_9@dummy_n1 +PREHOOK: query: revoke select (key, value) on table dummy_n1 from user hive_test_user2 PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: authorization_9@dummy -POSTHOOK: query: revoke select (key, value) on table dummy from user hive_test_user2 +PREHOOK: Output: authorization_9@dummy_n1 +POSTHOOK: query: revoke select (key, value) on table dummy_n1 from user hive_test_user2 POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: authorization_9@dummy +POSTHOOK: Output: authorization_9@dummy_n1 diff --git a/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out b/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out index c5b04e687b..7f0cdc1c7e 100644 --- a/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out +++ b/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: create table t1(i int) +PREHOOK: query: create table t1_n28(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(i int) +PREHOOK: Output: default@t1_n28 +POSTHOOK: query: create table t1_n28(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n28 PREHOOK: query: show current roles PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles @@ -20,33 +20,33 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES admin -PREHOOK: query: select * from t1 +PREHOOK: query: select * from t1_n28 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n28 #### A masked pattern was here #### -POSTHOOK: query: select * from t1 +POSTHOOK: query: select * from t1_n28 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n28 #### A masked pattern was here #### -PREHOOK: query: grant all on table t1 to user user1 +PREHOOK: query: grant all on table t1_n28 to user user1 PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@t1 -POSTHOOK: query: grant all on table t1 to user user1 +PREHOOK: Output: default@t1_n28 +POSTHOOK: query: grant all on table t1_n28 to user user1 POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@t1 -PREHOOK: query: show grant user user1 on table t1 +POSTHOOK: Output: default@t1_n28 +PREHOOK: query: show grant user user1 on table t1_n28 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user user1 on table t1 +POSTHOOK: query: show grant user user1 on table t1_n28 POSTHOOK: type: SHOW_GRANT -default t1 user1 USER DELETE false -1 hive_admin_user -default t1 user1 USER INSERT false -1 hive_admin_user -default t1 user1 USER SELECT false -1 hive_admin_user -default t1 user1 USER UPDATE false -1 hive_admin_user -PREHOOK: query: drop table t1 +default t1_n28 user1 USER DELETE false -1 hive_admin_user +default t1_n28 user1 USER INSERT false -1 hive_admin_user +default t1_n28 user1 USER SELECT false -1 hive_admin_user +default t1_n28 user1 USER UPDATE false -1 hive_admin_user +PREHOOK: query: drop table t1_n28 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n28 +PREHOOK: Output: default@t1_n28 +POSTHOOK: query: drop table t1_n28 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n28 +POSTHOOK: Output: default@t1_n28 diff --git a/ql/src/test/results/clientpositive/authorization_cli_createtab_noauthzapi.q.out b/ql/src/test/results/clientpositive/authorization_cli_createtab_noauthzapi.q.out index d5a79c656e..aae2115516 100644 --- a/ql/src/test/results/clientpositive/authorization_cli_createtab_noauthzapi.q.out +++ b/ql/src/test/results/clientpositive/authorization_cli_createtab_noauthzapi.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: create table t_cli(i int) +PREHOOK: query: create table t_cli_n1(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t_cli -POSTHOOK: query: create table t_cli(i int) +PREHOOK: Output: default@t_cli_n1 +POSTHOOK: query: create table t_cli_n1(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_cli -PREHOOK: query: create view v_cli (i) as select i from t_cli +POSTHOOK: Output: default@t_cli_n1 +PREHOOK: query: create view v_cli_n0 (i) as select i from t_cli_n1 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t_cli +PREHOOK: Input: default@t_cli_n1 PREHOOK: Output: database:default -PREHOOK: Output: default@v_cli -POSTHOOK: query: create view v_cli (i) as select i from t_cli +PREHOOK: Output: default@v_cli_n0 +POSTHOOK: query: create view v_cli_n0 (i) as select i from t_cli_n1 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t_cli +POSTHOOK: Input: default@t_cli_n1 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v_cli -POSTHOOK: Lineage: v_cli.i SIMPLE [(t_cli)t_cli.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Output: default@v_cli_n0 +POSTHOOK: Lineage: v_cli_n0.i SIMPLE [(t_cli_n1)t_cli_n1.FieldSchema(name:i, type:int, comment:null), ] diff --git a/ql/src/test/results/clientpositive/authorization_cli_nonsql.q.out b/ql/src/test/results/clientpositive/authorization_cli_nonsql.q.out index 92e7b2b48f..2a48676af6 100644 --- a/ql/src/test/results/clientpositive/authorization_cli_nonsql.q.out +++ b/ql/src/test/results/clientpositive/authorization_cli_nonsql.q.out @@ -4,22 +4,22 @@ PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: create table a_table1(a int, b int) +PREHOOK: query: create table a_table1_n0(a int, b int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@a_table1 -POSTHOOK: query: create table a_table1(a int, b int) +PREHOOK: Output: default@a_table1_n0 +POSTHOOK: query: create table a_table1_n0(a int, b int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@a_table1 -PREHOOK: query: alter table a_table1 set serde 'org.apache.hadoop.hive.serde2.TestSerDe' with serdeproperties('s1'='9') +POSTHOOK: Output: default@a_table1_n0 +PREHOOK: query: alter table a_table1_n0 set serde 'org.apache.hadoop.hive.serde2.TestSerDe' with serdeproperties('s1'='9') PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@a_table1 -PREHOOK: Output: default@a_table1 -POSTHOOK: query: alter table a_table1 set serde 'org.apache.hadoop.hive.serde2.TestSerDe' with serdeproperties('s1'='9') +PREHOOK: Input: default@a_table1_n0 +PREHOOK: Output: default@a_table1_n0 +POSTHOOK: query: alter table a_table1_n0 set serde 'org.apache.hadoop.hive.serde2.TestSerDe' with serdeproperties('s1'='9') POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@a_table1 -POSTHOOK: Output: default@a_table1 +POSTHOOK: Input: default@a_table1_n0 +POSTHOOK: Output: default@a_table1_n0 PREHOOK: query: drop table a_table PREHOOK: type: DROPTABLE POSTHOOK: query: drop table a_table diff --git a/ql/src/test/results/clientpositive/authorization_cli_stdconfigauth.q.out b/ql/src/test/results/clientpositive/authorization_cli_stdconfigauth.q.out index 78482f6685..7fc4e9ed84 100644 --- a/ql/src/test/results/clientpositive/authorization_cli_stdconfigauth.q.out +++ b/ql/src/test/results/clientpositive/authorization_cli_stdconfigauth.q.out @@ -1,15 +1,15 @@ -PREHOOK: query: create table t_cli(i int) +PREHOOK: query: create table t_cli_n0(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t_cli -POSTHOOK: query: create table t_cli(i int) +PREHOOK: Output: default@t_cli_n0 +POSTHOOK: query: create table t_cli_n0(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_cli -PREHOOK: query: describe t_cli +POSTHOOK: Output: default@t_cli_n0 +PREHOOK: query: describe t_cli_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t_cli -POSTHOOK: query: describe t_cli +PREHOOK: Input: default@t_cli_n0 +POSTHOOK: query: describe t_cli_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t_cli +POSTHOOK: Input: default@t_cli_n0 i int diff --git a/ql/src/test/results/clientpositive/authorization_grant_option_role.q.out b/ql/src/test/results/clientpositive/authorization_grant_option_role.q.out index 63f2ab4478..6986b051ae 100644 --- a/ql/src/test/results/clientpositive/authorization_grant_option_role.q.out +++ b/ql/src/test/results/clientpositive/authorization_grant_option_role.q.out @@ -10,59 +10,59 @@ PREHOOK: query: grant role r1 to user r1user PREHOOK: type: GRANT_ROLE POSTHOOK: query: grant role r1 to user r1user POSTHOOK: type: GRANT_ROLE -PREHOOK: query: CREATE TABLE t1(i int) +PREHOOK: query: CREATE TABLE t1_n43(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: CREATE TABLE t1(i int) +PREHOOK: Output: default@t1_n43 +POSTHOOK: query: CREATE TABLE t1_n43(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: GRANT ALL ON t1 TO ROLE r1 WITH GRANT OPTION +POSTHOOK: Output: default@t1_n43 +PREHOOK: query: GRANT ALL ON t1_n43 TO ROLE r1 WITH GRANT OPTION PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@t1 -POSTHOOK: query: GRANT ALL ON t1 TO ROLE r1 WITH GRANT OPTION +PREHOOK: Output: default@t1_n43 +POSTHOOK: query: GRANT ALL ON t1_n43 TO ROLE r1 WITH GRANT OPTION POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@t1 -PREHOOK: query: GRANT ALL ON t1 TO USER user3 +POSTHOOK: Output: default@t1_n43 +PREHOOK: query: GRANT ALL ON t1_n43 TO USER user3 PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@t1 -POSTHOOK: query: GRANT ALL ON t1 TO USER user3 +PREHOOK: Output: default@t1_n43 +POSTHOOK: query: GRANT ALL ON t1_n43 TO USER user3 POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n43 PREHOOK: query: set role admin PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role admin POSTHOOK: type: SHOW_ROLES -PREHOOK: query: show grant on table t1 +PREHOOK: query: show grant on table t1_n43 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant on table t1 +POSTHOOK: query: show grant on table t1_n43 POSTHOOK: type: SHOW_GRANT -default t1 r1 ROLE DELETE true -1 user1 -default t1 r1 ROLE INSERT true -1 user1 -default t1 r1 ROLE SELECT true -1 user1 -default t1 r1 ROLE UPDATE true -1 user1 -default t1 user1 USER DELETE true -1 hive_admin_user -default t1 user1 USER INSERT true -1 hive_admin_user -default t1 user1 USER SELECT true -1 hive_admin_user -default t1 user1 USER UPDATE true -1 hive_admin_user -default t1 user3 USER DELETE false -1 r1user -default t1 user3 USER INSERT false -1 r1user -default t1 user3 USER SELECT false -1 r1user -default t1 user3 USER UPDATE false -1 r1user +default t1_n43 r1 ROLE DELETE true -1 user1 +default t1_n43 r1 ROLE INSERT true -1 user1 +default t1_n43 r1 ROLE SELECT true -1 user1 +default t1_n43 r1 ROLE UPDATE true -1 user1 +default t1_n43 user1 USER DELETE true -1 hive_admin_user +default t1_n43 user1 USER INSERT true -1 hive_admin_user +default t1_n43 user1 USER SELECT true -1 hive_admin_user +default t1_n43 user1 USER UPDATE true -1 hive_admin_user +default t1_n43 user3 USER DELETE false -1 r1user +default t1_n43 user3 USER INSERT false -1 r1user +default t1_n43 user3 USER SELECT false -1 r1user +default t1_n43 user3 USER UPDATE false -1 r1user PREHOOK: query: drop role r1 PREHOOK: type: DROPROLE POSTHOOK: query: drop role r1 POSTHOOK: type: DROPROLE -PREHOOK: query: show grant on table t1 +PREHOOK: query: show grant on table t1_n43 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant on table t1 +POSTHOOK: query: show grant on table t1_n43 POSTHOOK: type: SHOW_GRANT -default t1 user1 USER DELETE true -1 hive_admin_user -default t1 user1 USER INSERT true -1 hive_admin_user -default t1 user1 USER SELECT true -1 hive_admin_user -default t1 user1 USER UPDATE true -1 hive_admin_user -default t1 user3 USER DELETE false -1 r1user -default t1 user3 USER INSERT false -1 r1user -default t1 user3 USER SELECT false -1 r1user -default t1 user3 USER UPDATE false -1 r1user +default t1_n43 user1 USER DELETE true -1 hive_admin_user +default t1_n43 user1 USER INSERT true -1 hive_admin_user +default t1_n43 user1 USER SELECT true -1 hive_admin_user +default t1_n43 user1 USER UPDATE true -1 hive_admin_user +default t1_n43 user3 USER DELETE false -1 r1user +default t1_n43 user3 USER INSERT false -1 r1user +default t1_n43 user3 USER SELECT false -1 r1user +default t1_n43 user3 USER UPDATE false -1 r1user diff --git a/ql/src/test/results/clientpositive/authorization_non_id.q.out b/ql/src/test/results/clientpositive/authorization_non_id.q.out index 9b81d22428..a51a91b387 100644 --- a/ql/src/test/results/clientpositive/authorization_non_id.q.out +++ b/ql/src/test/results/clientpositive/authorization_non_id.q.out @@ -2,18 +2,18 @@ PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN POSTHOOK: type: SHOW_ROLES -PREHOOK: query: drop table if exists src_autho_test +PREHOOK: query: drop table if exists src_autho_test_n12 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists src_autho_test +POSTHOOK: query: drop table if exists src_autho_test_n12 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table src_autho_test (id int) +PREHOOK: query: create table src_autho_test_n12 (id int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: create table src_autho_test (id int) +PREHOOK: Output: default@src_autho_test_n12 +POSTHOOK: query: create table src_autho_test_n12 (id int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_autho_test +POSTHOOK: Output: default@src_autho_test_n12 PREHOOK: query: create role src_role2 PREHOOK: type: CREATEROLE POSTHOOK: query: create role src_role2 @@ -38,36 +38,36 @@ POSTHOOK: query: show role grant user `foo-1` POSTHOOK: type: SHOW_ROLE_GRANT public false -1 src_role2 false -1 hive_admin_user -PREHOOK: query: grant select on table src_autho_test to user bar +PREHOOK: query: grant select on table src_autho_test_n12 to user bar PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select on table src_autho_test to user bar +PREHOOK: Output: default@src_autho_test_n12 +POSTHOOK: query: grant select on table src_autho_test_n12 to user bar POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: grant select on table src_autho_test to user `foo-1` +POSTHOOK: Output: default@src_autho_test_n12 +PREHOOK: query: grant select on table src_autho_test_n12 to user `foo-1` PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select on table src_autho_test to user `foo-1` +PREHOOK: Output: default@src_autho_test_n12 +POSTHOOK: query: grant select on table src_autho_test_n12 to user `foo-1` POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test +POSTHOOK: Output: default@src_autho_test_n12 PREHOOK: query: show grant user bar on all PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user bar on all POSTHOOK: type: SHOW_GRANT -default src_autho_test bar USER SELECT false -1 hive_admin_user +default src_autho_test_n12 bar USER SELECT false -1 hive_admin_user PREHOOK: query: show grant user `foo-1` on all PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user `foo-1` on all POSTHOOK: type: SHOW_GRANT -default src_autho_test foo-1 USER SELECT false -1 hive_admin_user -PREHOOK: query: drop table src_autho_test +default src_autho_test_n12 foo-1 USER SELECT false -1 hive_admin_user +PREHOOK: query: drop table src_autho_test_n12 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@src_autho_test -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: drop table src_autho_test +PREHOOK: Input: default@src_autho_test_n12 +PREHOOK: Output: default@src_autho_test_n12 +POSTHOOK: query: drop table src_autho_test_n12 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Output: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n12 +POSTHOOK: Output: default@src_autho_test_n12 PREHOOK: query: drop role src_role2 PREHOOK: type: DROPROLE POSTHOOK: query: drop role src_role2 diff --git a/ql/src/test/results/clientpositive/authorization_owner_actions.q.out b/ql/src/test/results/clientpositive/authorization_owner_actions.q.out index 779c3babf8..37b224b287 100644 --- a/ql/src/test/results/clientpositive/authorization_owner_actions.q.out +++ b/ql/src/test/results/clientpositive/authorization_owner_actions.q.out @@ -1,60 +1,60 @@ -PREHOOK: query: create table t1(i int) +PREHOOK: query: create table t1_n38(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(i int) +PREHOOK: Output: default@t1_n38 +POSTHOOK: query: create table t1_n38(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: ALTER TABLE t1 SET SERDEPROPERTIES ('field.delim' = ',') +POSTHOOK: Output: default@t1_n38 +PREHOOK: query: ALTER TABLE t1_n38 SET SERDEPROPERTIES ('field.delim' = ',') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: ALTER TABLE t1 SET SERDEPROPERTIES ('field.delim' = ',') +PREHOOK: Input: default@t1_n38 +PREHOOK: Output: default@t1_n38 +POSTHOOK: query: ALTER TABLE t1_n38 SET SERDEPROPERTIES ('field.delim' = ',') POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: drop table t1 +POSTHOOK: Input: default@t1_n38 +POSTHOOK: Output: default@t1_n38 +PREHOOK: query: drop table t1_n38 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n38 +PREHOOK: Output: default@t1_n38 +POSTHOOK: query: drop table t1_n38 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t1(i int) +POSTHOOK: Input: default@t1_n38 +POSTHOOK: Output: default@t1_n38 +PREHOOK: query: create table t1_n38(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(i int) +PREHOOK: Output: default@t1_n38 +POSTHOOK: query: create table t1_n38(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create view vt1 as select * from t1 +POSTHOOK: Output: default@t1_n38 +PREHOOK: query: create view vt1_n0 as select * from t1_n38 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n38 PREHOOK: Output: database:default -PREHOOK: Output: default@vt1 -POSTHOOK: query: create view vt1 as select * from t1 +PREHOOK: Output: default@vt1_n0 +POSTHOOK: query: create view vt1_n0 as select * from t1_n38 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n38 POSTHOOK: Output: database:default -POSTHOOK: Output: default@vt1 -POSTHOOK: Lineage: vt1.i SIMPLE [(t1)t1.FieldSchema(name:i, type:int, comment:null), ] -PREHOOK: query: drop view vt1 +POSTHOOK: Output: default@vt1_n0 +POSTHOOK: Lineage: vt1_n0.i SIMPLE [(t1_n38)t1_n38.FieldSchema(name:i, type:int, comment:null), ] +PREHOOK: query: drop view vt1_n0 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@vt1 -PREHOOK: Output: default@vt1 -POSTHOOK: query: drop view vt1 +PREHOOK: Input: default@vt1_n0 +PREHOOK: Output: default@vt1_n0 +POSTHOOK: query: drop view vt1_n0 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@vt1 -POSTHOOK: Output: default@vt1 -PREHOOK: query: alter table t1 rename to tnew1 +POSTHOOK: Input: default@vt1_n0 +POSTHOOK: Output: default@vt1_n0 +PREHOOK: query: alter table t1_n38 rename to tnew1 PREHOOK: type: ALTERTABLE_RENAME -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: alter table t1 rename to tnew1 +PREHOOK: Input: default@t1_n38 +PREHOOK: Output: default@t1_n38 +POSTHOOK: query: alter table t1_n38 rename to tnew1 POSTHOOK: type: ALTERTABLE_RENAME -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n38 +POSTHOOK: Output: default@t1_n38 POSTHOOK: Output: default@tnew1 diff --git a/ql/src/test/results/clientpositive/authorization_parts.q.out b/ql/src/test/results/clientpositive/authorization_parts.q.out index bc600f38b7..bce595b7a0 100644 --- a/ql/src/test/results/clientpositive/authorization_parts.q.out +++ b/ql/src/test/results/clientpositive/authorization_parts.q.out @@ -1,55 +1,55 @@ -PREHOOK: query: create table tpart(i int, j int) partitioned by (k string) +PREHOOK: query: create table tpart_n0(i int, j int) partitioned by (k string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tpart -POSTHOOK: query: create table tpart(i int, j int) partitioned by (k string) +PREHOOK: Output: default@tpart_n0 +POSTHOOK: query: create table tpart_n0(i int, j int) partitioned by (k string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tpart +POSTHOOK: Output: default@tpart_n0 #### A masked pattern was here #### PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -PREHOOK: Output: default@tpart +PREHOOK: Output: default@tpart_n0 #### A masked pattern was here #### POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -POSTHOOK: Output: default@tpart -POSTHOOK: Output: default@tpart@k=1 +POSTHOOK: Output: default@tpart_n0 +POSTHOOK: Output: default@tpart_n0@k=1 #### A masked pattern was here #### PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -PREHOOK: Output: default@tpart +PREHOOK: Output: default@tpart_n0 #### A masked pattern was here #### POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -POSTHOOK: Output: default@tpart -POSTHOOK: Output: default@tpart@k=2 -PREHOOK: query: select count(*) from tpart +POSTHOOK: Output: default@tpart_n0 +POSTHOOK: Output: default@tpart_n0@k=2 +PREHOOK: query: select count(*) from tpart_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tpart -PREHOOK: Input: default@tpart@k=1 -PREHOOK: Input: default@tpart@k=2 +PREHOOK: Input: default@tpart_n0 +PREHOOK: Input: default@tpart_n0@k=1 +PREHOOK: Input: default@tpart_n0@k=2 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from tpart +POSTHOOK: query: select count(*) from tpart_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tpart -POSTHOOK: Input: default@tpart@k=1 -POSTHOOK: Input: default@tpart@k=2 +POSTHOOK: Input: default@tpart_n0 +POSTHOOK: Input: default@tpart_n0@k=1 +POSTHOOK: Input: default@tpart_n0@k=2 #### A masked pattern was here #### 0 -PREHOOK: query: analyze table tpart partition (k) compute statistics +PREHOOK: query: analyze table tpart_n0 partition (k) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@tpart -PREHOOK: Input: default@tpart@k=1 -PREHOOK: Input: default@tpart@k=2 -PREHOOK: Output: default@tpart -PREHOOK: Output: default@tpart@k=1 -PREHOOK: Output: default@tpart@k=2 -POSTHOOK: query: analyze table tpart partition (k) compute statistics +PREHOOK: Input: default@tpart_n0 +PREHOOK: Input: default@tpart_n0@k=1 +PREHOOK: Input: default@tpart_n0@k=2 +PREHOOK: Output: default@tpart_n0 +PREHOOK: Output: default@tpart_n0@k=1 +PREHOOK: Output: default@tpart_n0@k=2 +POSTHOOK: query: analyze table tpart_n0 partition (k) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@tpart -POSTHOOK: Input: default@tpart@k=1 -POSTHOOK: Input: default@tpart@k=2 -POSTHOOK: Output: default@tpart -POSTHOOK: Output: default@tpart@k=1 -POSTHOOK: Output: default@tpart@k=2 +POSTHOOK: Input: default@tpart_n0 +POSTHOOK: Input: default@tpart_n0@k=1 +POSTHOOK: Input: default@tpart_n0@k=2 +POSTHOOK: Output: default@tpart_n0 +POSTHOOK: Output: default@tpart_n0@k=1 +POSTHOOK: Output: default@tpart_n0@k=2 diff --git a/ql/src/test/results/clientpositive/authorization_show_grant.q.out b/ql/src/test/results/clientpositive/authorization_show_grant.q.out index 37c6555618..4dc667e21c 100644 --- a/ql/src/test/results/clientpositive/authorization_show_grant.q.out +++ b/ql/src/test/results/clientpositive/authorization_show_grant.q.out @@ -18,137 +18,157 @@ PREHOOK: query: grant role roleB to role roleA PREHOOK: type: GRANT_ROLE POSTHOOK: query: grant role roleB to role roleA POSTHOOK: type: GRANT_ROLE -PREHOOK: query: create table t1(i int, j int, k int) +PREHOOK: query: create table t1_n6(i int, j int, k int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(i int, j int, k int) +PREHOOK: Output: default@t1_n6 +POSTHOOK: query: create table t1_n6(i int, j int, k int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2(i int, j int, k int) +POSTHOOK: Output: default@t1_n6 +PREHOOK: query: create table t2_n2(i int, j int, k int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2(i int, j int, k int) +PREHOOK: Output: default@t2_n2 +POSTHOOK: query: create table t2_n2(i int, j int, k int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: grant select on t1 to role roleA +POSTHOOK: Output: default@t2_n2 +PREHOOK: query: grant select on t1_n6 to role roleA PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@t1 -POSTHOOK: query: grant select on t1 to role roleA +PREHOOK: Output: default@t1_n6 +POSTHOOK: query: grant select on t1_n6 to role roleA POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@t1 -PREHOOK: query: grant insert on t2 to role roleA +POSTHOOK: Output: default@t1_n6 +PREHOOK: query: grant insert on t2_n2 to role roleA PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@t2 -POSTHOOK: query: grant insert on t2 to role roleA +PREHOOK: Output: default@t2_n2 +POSTHOOK: query: grant insert on t2_n2 to role roleA POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@t2 -PREHOOK: query: grant insert on t2 to role roleB +POSTHOOK: Output: default@t2_n2 +PREHOOK: query: grant insert on t2_n2 to role roleB PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@t2 -POSTHOOK: query: grant insert on t2 to role roleB +PREHOOK: Output: default@t2_n2 +POSTHOOK: query: grant insert on t2_n2 to role roleB POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@t2 -PREHOOK: query: grant insert,delete on t1 to user userA +POSTHOOK: Output: default@t2_n2 +PREHOOK: query: grant insert,delete on t1_n6 to user userA PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@t1 -POSTHOOK: query: grant insert,delete on t1 to user userA +PREHOOK: Output: default@t1_n6 +POSTHOOK: query: grant insert,delete on t1_n6 to user userA POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@t1 -PREHOOK: query: grant select,insert on t2 to user userA +POSTHOOK: Output: default@t1_n6 +PREHOOK: query: grant select,insert on t2_n2 to user userA PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@t2 -POSTHOOK: query: grant select,insert on t2 to user userA +PREHOOK: Output: default@t2_n2 +POSTHOOK: query: grant select,insert on t2_n2 to user userA POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n2 PREHOOK: query: set role admin PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role admin POSTHOOK: type: SHOW_ROLES -PREHOOK: query: show grant user user1 on table t1 +PREHOOK: query: show grant user user1 on table t1_n6 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user user1 on table t1 +POSTHOOK: query: show grant user user1 on table t1_n6 POSTHOOK: type: SHOW_GRANT -default t1 user1 USER DELETE true -1 hive_admin_user -default t1 user1 USER INSERT true -1 hive_admin_user -default t1 user1 USER SELECT true -1 hive_admin_user -default t1 user1 USER UPDATE true -1 hive_admin_user +default t1_n6 user1 USER DELETE true -1 hive_admin_user +default t1_n6 user1 USER INSERT true -1 hive_admin_user +default t1_n6 user1 USER SELECT true -1 hive_admin_user +default t1_n6 user1 USER UPDATE true -1 hive_admin_user PREHOOK: query: show grant user user1 PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user user1 POSTHOOK: type: SHOW_GRANT -default t1 user1 USER DELETE true -1 hive_admin_user -default t1 user1 USER INSERT true -1 hive_admin_user -default t1 user1 USER SELECT true -1 hive_admin_user -default t1 user1 USER UPDATE true -1 hive_admin_user -default t2 user1 USER DELETE true -1 hive_admin_user -default t2 user1 USER INSERT true -1 hive_admin_user -default t2 user1 USER SELECT true -1 hive_admin_user -default t2 user1 USER UPDATE true -1 hive_admin_user -PREHOOK: query: show grant role roleA on table t1 +default t1_n6 user1 USER DELETE true -1 hive_admin_user +default t1_n6 user1 USER INSERT true -1 hive_admin_user +default t1_n6 user1 USER SELECT true -1 hive_admin_user +default t1_n6 user1 USER UPDATE true -1 hive_admin_user +default t2_n2 user1 USER DELETE true -1 hive_admin_user +default t2_n2 user1 USER INSERT true -1 hive_admin_user +default t2_n2 user1 USER SELECT true -1 hive_admin_user +default t2_n2 user1 USER UPDATE true -1 hive_admin_user +PREHOOK: query: show grant role roleA on table t1_n6 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant role roleA on table t1 +POSTHOOK: query: show grant role roleA on table t1_n6 POSTHOOK: type: SHOW_GRANT -default t1 rolea ROLE SELECT false -1 user1 +default t1_n6 rolea ROLE SELECT false -1 user1 PREHOOK: query: show grant role roleA PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant role roleA POSTHOOK: type: SHOW_GRANT -default t1 rolea ROLE SELECT false -1 user1 -default t2 rolea ROLE INSERT false -1 user1 +default t1_n6 rolea ROLE SELECT false -1 user1 +default t2_n2 rolea ROLE INSERT false -1 user1 PREHOOK: query: show grant PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant POSTHOOK: type: SHOW_GRANT -default t1 rolea ROLE SELECT false -1 user1 -default t1 user1 USER DELETE true -1 hive_admin_user -default t1 user1 USER INSERT true -1 hive_admin_user -default t1 user1 USER SELECT true -1 hive_admin_user -default t1 user1 USER UPDATE true -1 hive_admin_user -default t1 userA USER DELETE false -1 user1 -default t1 userA USER INSERT false -1 user1 -default t2 rolea ROLE INSERT false -1 user1 -default t2 roleb ROLE INSERT false -1 user1 -default t2 user1 USER DELETE true -1 hive_admin_user -default t2 user1 USER INSERT true -1 hive_admin_user -default t2 user1 USER SELECT true -1 hive_admin_user -default t2 user1 USER UPDATE true -1 hive_admin_user -default t2 userA USER INSERT false -1 user1 -default t2 userA USER SELECT false -1 user1 -PREHOOK: query: show grant role roleA on table t1 +default alltypesorc hive_test_user USER DELETE true -1 hive_test_user +default alltypesorc hive_test_user USER INSERT true -1 hive_test_user +default alltypesorc hive_test_user USER SELECT true -1 hive_test_user +default alltypesorc hive_test_user USER UPDATE true -1 hive_test_user +default part hive_test_user USER DELETE true -1 hive_test_user +default part hive_test_user USER INSERT true -1 hive_test_user +default part hive_test_user USER SELECT true -1 hive_test_user +default part hive_test_user USER UPDATE true -1 hive_test_user +default src hive_test_user USER DELETE true -1 hive_test_user +default src hive_test_user USER INSERT true -1 hive_test_user +default src hive_test_user USER SELECT true -1 hive_test_user +default src hive_test_user USER UPDATE true -1 hive_test_user +default src1 hive_test_user USER DELETE true -1 hive_test_user +default src1 hive_test_user USER INSERT true -1 hive_test_user +default src1 hive_test_user USER SELECT true -1 hive_test_user +default src1 hive_test_user USER UPDATE true -1 hive_test_user +default srcpart hive_test_user USER DELETE true -1 hive_test_user +default srcpart hive_test_user USER INSERT true -1 hive_test_user +default srcpart hive_test_user USER SELECT true -1 hive_test_user +default srcpart hive_test_user USER UPDATE true -1 hive_test_user +default t1_n6 rolea ROLE SELECT false -1 user1 +default t1_n6 user1 USER DELETE true -1 hive_admin_user +default t1_n6 user1 USER INSERT true -1 hive_admin_user +default t1_n6 user1 USER SELECT true -1 hive_admin_user +default t1_n6 user1 USER UPDATE true -1 hive_admin_user +default t1_n6 userA USER DELETE false -1 user1 +default t1_n6 userA USER INSERT false -1 user1 +default t2_n2 rolea ROLE INSERT false -1 user1 +default t2_n2 roleb ROLE INSERT false -1 user1 +default t2_n2 user1 USER DELETE true -1 hive_admin_user +default t2_n2 user1 USER INSERT true -1 hive_admin_user +default t2_n2 user1 USER SELECT true -1 hive_admin_user +default t2_n2 user1 USER UPDATE true -1 hive_admin_user +default t2_n2 userA USER INSERT false -1 user1 +default t2_n2 userA USER SELECT false -1 user1 +PREHOOK: query: show grant role roleA on table t1_n6 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant role roleA on table t1 +POSTHOOK: query: show grant role roleA on table t1_n6 POSTHOOK: type: SHOW_GRANT -default t1 rolea ROLE SELECT false -1 user1 +default t1_n6 rolea ROLE SELECT false -1 user1 PREHOOK: query: show grant role roleA PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant role roleA POSTHOOK: type: SHOW_GRANT -default t1 rolea ROLE SELECT false -1 user1 -default t2 rolea ROLE INSERT false -1 user1 -PREHOOK: query: show grant role roleB on table t1 +default t1_n6 rolea ROLE SELECT false -1 user1 +default t2_n2 rolea ROLE INSERT false -1 user1 +PREHOOK: query: show grant role roleB on table t1_n6 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant role roleB on table t1 +POSTHOOK: query: show grant role roleB on table t1_n6 POSTHOOK: type: SHOW_GRANT PREHOOK: query: show grant role roleB PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant role roleB POSTHOOK: type: SHOW_GRANT -default t2 roleb ROLE INSERT false -1 user1 -PREHOOK: query: show grant user userA on table t1 +default t2_n2 roleb ROLE INSERT false -1 user1 +PREHOOK: query: show grant user userA on table t1_n6 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user userA on table t1 +POSTHOOK: query: show grant user userA on table t1_n6 POSTHOOK: type: SHOW_GRANT -default t1 userA USER DELETE false -1 user1 -default t1 userA USER INSERT false -1 user1 +default t1_n6 userA USER DELETE false -1 user1 +default t1_n6 userA USER INSERT false -1 user1 PREHOOK: query: show grant user userA PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user userA POSTHOOK: type: SHOW_GRANT -default t1 userA USER DELETE false -1 user1 -default t1 userA USER INSERT false -1 user1 -default t2 userA USER INSERT false -1 user1 -default t2 userA USER SELECT false -1 user1 +default t1_n6 userA USER DELETE false -1 user1 +default t1_n6 userA USER INSERT false -1 user1 +default t2_n2 userA USER INSERT false -1 user1 +default t2_n2 userA USER SELECT false -1 user1 diff --git a/ql/src/test/results/clientpositive/authorization_update.q.out b/ql/src/test/results/clientpositive/authorization_update.q.out index e6f456f7ed..a22641ee33 100644 --- a/ql/src/test/results/clientpositive/authorization_update.q.out +++ b/ql/src/test/results/clientpositive/authorization_update.q.out @@ -6,20 +6,20 @@ POSTHOOK: query: CREATE TABLE t_auth_up(i int, j int) clustered by (i) into 2 bu POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t_auth_up -PREHOOK: query: CREATE TABLE t_select(i int) +PREHOOK: query: CREATE TABLE t_select_n0(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t_select -POSTHOOK: query: CREATE TABLE t_select(i int) +PREHOOK: Output: default@t_select_n0 +POSTHOOK: query: CREATE TABLE t_select_n0(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_select -PREHOOK: query: GRANT ALL ON TABLE t_select TO ROLE public +POSTHOOK: Output: default@t_select_n0 +PREHOOK: query: GRANT ALL ON TABLE t_select_n0 TO ROLE public PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@t_select -POSTHOOK: query: GRANT ALL ON TABLE t_select TO ROLE public +PREHOOK: Output: default@t_select_n0 +POSTHOOK: query: GRANT ALL ON TABLE t_select_n0 TO ROLE public POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@t_select +POSTHOOK: Output: default@t_select_n0 PREHOOK: query: GRANT UPDATE ON t_auth_up TO USER userWIns PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@t_auth_up diff --git a/ql/src/test/results/clientpositive/authorization_update_own_table.q.out b/ql/src/test/results/clientpositive/authorization_update_own_table.q.out index 2ef03ad56f..a39640917e 100644 --- a/ql/src/test/results/clientpositive/authorization_update_own_table.q.out +++ b/ql/src/test/results/clientpositive/authorization_update_own_table.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: create table auth_noupd(i int, j int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: query: create table auth_noupd_n0(i int, j int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@auth_noupd -POSTHOOK: query: create table auth_noupd(i int, j int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: Output: default@auth_noupd_n0 +POSTHOOK: query: create table auth_noupd_n0(i int, j int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@auth_noupd -PREHOOK: query: update auth_noupd set j = 0 where i > 0 +POSTHOOK: Output: default@auth_noupd_n0 +PREHOOK: query: update auth_noupd_n0 set j = 0 where i > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@auth_noupd -PREHOOK: Output: default@auth_noupd -POSTHOOK: query: update auth_noupd set j = 0 where i > 0 +PREHOOK: Input: default@auth_noupd_n0 +PREHOOK: Output: default@auth_noupd_n0 +POSTHOOK: query: update auth_noupd_n0 set j = 0 where i > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@auth_noupd -POSTHOOK: Output: default@auth_noupd +POSTHOOK: Input: default@auth_noupd_n0 +POSTHOOK: Output: default@auth_noupd_n0 PREHOOK: query: set role admin PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role admin diff --git a/ql/src/test/results/clientpositive/authorization_view_1.q.out b/ql/src/test/results/clientpositive/authorization_view_1.q.out index 31cc0c8888..4440c3febe 100644 --- a/ql/src/test/results/clientpositive/authorization_view_1.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_1.q.out @@ -1,98 +1,98 @@ -PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: query: create table src_autho_test_n8 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: create table src_autho_test as select * from src +PREHOOK: Output: default@src_autho_test_n8 +POSTHOOK: query: create table src_autho_test_n8 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_autho_test -POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create view v as select * from src_autho_test +POSTHOOK: Output: default@src_autho_test_n8 +POSTHOOK: Lineage: src_autho_test_n8.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_autho_test_n8.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create view v_n9 as select * from src_autho_test_n8 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n8 PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select * from src_autho_test +PREHOOK: Output: default@v_n9 +POSTHOOK: query: create view v_n9 as select * from src_autho_test_n8 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n8 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v.value SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: create view v1 as select * from src_autho_test +POSTHOOK: Output: default@v_n9 +POSTHOOK: Lineage: v_n9.key SIMPLE [(src_autho_test_n8)src_autho_test_n8.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v_n9.value SIMPLE [(src_autho_test_n8)src_autho_test_n8.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: create view v1_n8 as select * from src_autho_test_n8 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n8 PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: create view v1 as select * from src_autho_test +PREHOOK: Output: default@v1_n8 +POSTHOOK: query: create view v1_n8 as select * from src_autho_test_n8 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n8 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -POSTHOOK: Lineage: v1.key SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v1.value SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: create view v2 as select * from src_autho_test +POSTHOOK: Output: default@v1_n8 +POSTHOOK: Lineage: v1_n8.key SIMPLE [(src_autho_test_n8)src_autho_test_n8.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v1_n8.value SIMPLE [(src_autho_test_n8)src_autho_test_n8.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: create view v2_n5 as select * from src_autho_test_n8 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n8 PREHOOK: Output: database:default -PREHOOK: Output: default@v2 -POSTHOOK: query: create view v2 as select * from src_autho_test +PREHOOK: Output: default@v2_n5 +POSTHOOK: query: create view v2_n5 as select * from src_autho_test_n8 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n8 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v2 -POSTHOOK: Lineage: v2.key SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v2.value SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: grant select on table src_autho_test to user hive_test_user +POSTHOOK: Output: default@v2_n5 +POSTHOOK: Lineage: v2_n5.key SIMPLE [(src_autho_test_n8)src_autho_test_n8.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v2_n5.value SIMPLE [(src_autho_test_n8)src_autho_test_n8.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: grant select on table src_autho_test_n8 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n8 +POSTHOOK: query: grant select on table src_autho_test_n8 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: grant select on table v to user hive_test_user +POSTHOOK: Output: default@src_autho_test_n8 +PREHOOK: query: grant select on table v_n9 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@v -POSTHOOK: query: grant select on table v to user hive_test_user +PREHOOK: Output: default@v_n9 +POSTHOOK: query: grant select on table v_n9 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@v -PREHOOK: query: grant select on table v1 to user hive_test_user +POSTHOOK: Output: default@v_n9 +PREHOOK: query: grant select on table v1_n8 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@v1 -POSTHOOK: query: grant select on table v1 to user hive_test_user +PREHOOK: Output: default@v1_n8 +POSTHOOK: query: grant select on table v1_n8 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@v1 -PREHOOK: query: grant select on table v2 to user hive_test_user +POSTHOOK: Output: default@v1_n8 +PREHOOK: query: grant select on table v2_n5 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@v2 -POSTHOOK: query: grant select on table v2 to user hive_test_user +PREHOOK: Output: default@v2_n5 +POSTHOOK: query: grant select on table v2_n5 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@v2 -PREHOOK: query: show grant user hive_test_user on table v +POSTHOOK: Output: default@v2_n5 +PREHOOK: query: show grant user hive_test_user on table v_n9 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table v +POSTHOOK: query: show grant user hive_test_user on table v_n9 POSTHOOK: type: SHOW_GRANT -default v hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: show grant user hive_test_user on v +default v_n9 hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: show grant user hive_test_user on v_n9 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on v +POSTHOOK: query: show grant user hive_test_user on v_n9 POSTHOOK: type: SHOW_GRANT -default v hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: show grant user hive_test_user on v(key) +default v_n9 hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: show grant user hive_test_user on v_n9(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on v(key) +POSTHOOK: query: show grant user hive_test_user on v_n9(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: select * from v order by key limit 10 +PREHOOK: query: select * from v_n9 order by key limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v +PREHOOK: Input: default@src_autho_test_n8 +PREHOOK: Input: default@v_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from v order by key limit 10 +POSTHOOK: query: select * from v_n9 order by key limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v +POSTHOOK: Input: default@src_autho_test_n8 +POSTHOOK: Input: default@v_n9 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -104,74 +104,74 @@ POSTHOOK: Input: default@v 103 val_103 104 val_104 104 val_104 -PREHOOK: query: revoke select on table src_autho_test from user hive_test_user +PREHOOK: query: revoke select on table src_autho_test_n8 from user hive_test_user PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: revoke select on table src_autho_test from user hive_test_user +PREHOOK: Output: default@src_autho_test_n8 +POSTHOOK: query: revoke select on table src_autho_test_n8 from user hive_test_user POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant user hive_test_user on table v +POSTHOOK: Output: default@src_autho_test_n8 +PREHOOK: query: show grant user hive_test_user on table v_n9 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table v +POSTHOOK: query: show grant user hive_test_user on table v_n9 POSTHOOK: type: SHOW_GRANT -default v hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: show grant user hive_test_user on v +default v_n9 hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: show grant user hive_test_user on v_n9 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on v +POSTHOOK: query: show grant user hive_test_user on v_n9 POSTHOOK: type: SHOW_GRANT -default v hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: show grant user hive_test_user on v(key) +default v_n9 hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: show grant user hive_test_user on v_n9(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on v(key) +POSTHOOK: query: show grant user hive_test_user on v_n9(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: revoke select on table v from user hive_test_user +PREHOOK: query: revoke select on table v_n9 from user hive_test_user PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@v -POSTHOOK: query: revoke select on table v from user hive_test_user +PREHOOK: Output: default@v_n9 +POSTHOOK: query: revoke select on table v_n9 from user hive_test_user POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@v -PREHOOK: query: show grant user hive_test_user on table v +POSTHOOK: Output: default@v_n9 +PREHOOK: query: show grant user hive_test_user on table v_n9 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table v +POSTHOOK: query: show grant user hive_test_user on table v_n9 POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant user hive_test_user on v +PREHOOK: query: show grant user hive_test_user on v_n9 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on v +POSTHOOK: query: show grant user hive_test_user on v_n9 POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant user hive_test_user on v(key) +PREHOOK: query: show grant user hive_test_user on v_n9(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on v(key) +POSTHOOK: query: show grant user hive_test_user on v_n9(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: grant select on table src_autho_test to user hive_test_user +PREHOOK: query: grant select on table src_autho_test_n8 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n8 +POSTHOOK: query: grant select on table src_autho_test_n8 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: grant select(key) on table v to user hive_test_user +POSTHOOK: Output: default@src_autho_test_n8 +PREHOOK: query: grant select(key) on table v_n9 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@v -POSTHOOK: query: grant select(key) on table v to user hive_test_user +PREHOOK: Output: default@v_n9 +POSTHOOK: query: grant select(key) on table v_n9 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@v -PREHOOK: query: show grant user hive_test_user on table v +POSTHOOK: Output: default@v_n9 +PREHOOK: query: show grant user hive_test_user on table v_n9 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table v +POSTHOOK: query: show grant user hive_test_user on table v_n9 POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant user hive_test_user on v(key) +PREHOOK: query: show grant user hive_test_user on v_n9(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on v(key) +POSTHOOK: query: show grant user hive_test_user on v_n9(key) POSTHOOK: type: SHOW_GRANT -default v [key] hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: select key from v order by key limit 10 +default v_n9 [key] hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: select key from v_n9 order by key limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v +PREHOOK: Input: default@src_autho_test_n8 +PREHOOK: Input: default@v_n9 #### A masked pattern was here #### -POSTHOOK: query: select key from v order by key limit 10 +POSTHOOK: query: select key from v_n9 order by key limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v +POSTHOOK: Input: default@src_autho_test_n8 +POSTHOOK: Input: default@v_n9 #### A masked pattern was here #### 0 0 @@ -184,18 +184,18 @@ POSTHOOK: Input: default@v 104 104 PREHOOK: query: select key from -(select v.key from src_autho_test join v on src_autho_test.key=v.key)subq +(select v_n9.key from src_autho_test_n8 join v_n9 on src_autho_test_n8.key=v_n9.key)subq order by key limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v +PREHOOK: Input: default@src_autho_test_n8 +PREHOOK: Input: default@v_n9 #### A masked pattern was here #### POSTHOOK: query: select key from -(select v.key from src_autho_test join v on src_autho_test.key=v.key)subq +(select v_n9.key from src_autho_test_n8 join v_n9 on src_autho_test_n8.key=v_n9.key)subq order by key limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v +POSTHOOK: Input: default@src_autho_test_n8 +POSTHOOK: Input: default@v_n9 #### A masked pattern was here #### 0 0 @@ -208,18 +208,18 @@ POSTHOOK: Input: default@v 0 10 PREHOOK: query: select key from -(select key as key from src_autho_test union all select key from v)subq +(select key as key from src_autho_test_n8 union all select key from v_n9)subq limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v +PREHOOK: Input: default@src_autho_test_n8 +PREHOOK: Input: default@v_n9 #### A masked pattern was here #### POSTHOOK: query: select key from -(select key as key from src_autho_test union all select key from v)subq +(select key as key from src_autho_test_n8 union all select key from v_n9)subq limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v +POSTHOOK: Input: default@src_autho_test_n8 +POSTHOOK: Input: default@v_n9 #### A masked pattern was here #### 238 238 @@ -232,22 +232,22 @@ POSTHOOK: Input: default@v 165 165 PREHOOK: query: select key from -(select value as key from v2 union select value as key from v1 union all select key from v)subq +(select value as key from v2_n5 union select value as key from v1_n8 union all select key from v_n9)subq limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v2 +PREHOOK: Input: default@src_autho_test_n8 +PREHOOK: Input: default@v1_n8 +PREHOOK: Input: default@v2_n5 +PREHOOK: Input: default@v_n9 #### A masked pattern was here #### POSTHOOK: query: select key from -(select value as key from v2 union select value as key from v1 union all select key from v)subq +(select value as key from v2_n5 union select value as key from v1_n8 union all select key from v_n9)subq limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v2 +POSTHOOK: Input: default@src_autho_test_n8 +POSTHOOK: Input: default@v1_n8 +POSTHOOK: Input: default@v2_n5 +POSTHOOK: Input: default@v_n9 #### A masked pattern was here #### val_0 val_10 diff --git a/ql/src/test/results/clientpositive/authorization_view_2.q.out b/ql/src/test/results/clientpositive/authorization_view_2.q.out index e4dd0597b0..984ed59b7a 100644 --- a/ql/src/test/results/clientpositive/authorization_view_2.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_2.q.out @@ -1,58 +1,58 @@ -PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: query: create table src_autho_test_n13 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: create table src_autho_test as select * from src +PREHOOK: Output: default@src_autho_test_n13 +POSTHOOK: query: create table src_autho_test_n13 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_autho_test -POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create view v1 as select * from src_autho_test +POSTHOOK: Output: default@src_autho_test_n13 +POSTHOOK: Lineage: src_autho_test_n13.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_autho_test_n13.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create view v1_n14 as select * from src_autho_test_n13 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n13 PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: create view v1 as select * from src_autho_test +PREHOOK: Output: default@v1_n14 +POSTHOOK: query: create view v1_n14 as select * from src_autho_test_n13 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n13 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -POSTHOOK: Lineage: v1.key SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v1.value SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: create view v2 as select * from v1 +POSTHOOK: Output: default@v1_n14 +POSTHOOK: Lineage: v1_n14.key SIMPLE [(src_autho_test_n13)src_autho_test_n13.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v1_n14.value SIMPLE [(src_autho_test_n13)src_autho_test_n13.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: create view v2_n10 as select * from v1_n14 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v1 +PREHOOK: Input: default@src_autho_test_n13 +PREHOOK: Input: default@v1_n14 PREHOOK: Output: database:default -PREHOOK: Output: default@v2 -POSTHOOK: query: create view v2 as select * from v1 +PREHOOK: Output: default@v2_n10 +POSTHOOK: query: create view v2_n10 as select * from v1_n14 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@src_autho_test_n13 +POSTHOOK: Input: default@v1_n14 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v2 -POSTHOOK: Lineage: v2.key SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v2.value SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: grant select on table v2 to user hive_test_user +POSTHOOK: Output: default@v2_n10 +POSTHOOK: Lineage: v2_n10.key SIMPLE [(src_autho_test_n13)src_autho_test_n13.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v2_n10.value SIMPLE [(src_autho_test_n13)src_autho_test_n13.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: grant select on table v2_n10 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@v2 -POSTHOOK: query: grant select on table v2 to user hive_test_user +PREHOOK: Output: default@v2_n10 +POSTHOOK: query: grant select on table v2_n10 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@v2 -PREHOOK: query: select * from v2 order by key limit 10 +POSTHOOK: Output: default@v2_n10 +PREHOOK: query: select * from v2_n10 order by key limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v2 +PREHOOK: Input: default@src_autho_test_n13 +PREHOOK: Input: default@v1_n14 +PREHOOK: Input: default@v2_n10 #### A masked pattern was here #### -POSTHOOK: query: select * from v2 order by key limit 10 +POSTHOOK: query: select * from v2_n10 order by key limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v2 +POSTHOOK: Input: default@src_autho_test_n13 +POSTHOOK: Input: default@v1_n14 +POSTHOOK: Input: default@v2_n10 #### A masked pattern was here #### 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/authorization_view_3.q.out b/ql/src/test/results/clientpositive/authorization_view_3.q.out index b2d3b1f9b1..aca910c6fc 100644 --- a/ql/src/test/results/clientpositive/authorization_view_3.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_3.q.out @@ -1,62 +1,62 @@ -PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: query: create table src_autho_test_n1 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: create table src_autho_test as select * from src +PREHOOK: Output: default@src_autho_test_n1 +POSTHOOK: query: create table src_autho_test_n1 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_autho_test -POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create view v1 as select * from src_autho_test +POSTHOOK: Output: default@src_autho_test_n1 +POSTHOOK: Lineage: src_autho_test_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_autho_test_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create view v1_n1 as select * from src_autho_test_n1 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n1 PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: create view v1 as select * from src_autho_test +PREHOOK: Output: default@v1_n1 +POSTHOOK: query: create view v1_n1 as select * from src_autho_test_n1 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n1 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -POSTHOOK: Lineage: v1.key SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v1.value SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: create view v2 as select * from v1 +POSTHOOK: Output: default@v1_n1 +POSTHOOK: Lineage: v1_n1.key SIMPLE [(src_autho_test_n1)src_autho_test_n1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v1_n1.value SIMPLE [(src_autho_test_n1)src_autho_test_n1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: create view v2_n0 as select * from v1_n1 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v1 +PREHOOK: Input: default@src_autho_test_n1 +PREHOOK: Input: default@v1_n1 PREHOOK: Output: database:default -PREHOOK: Output: default@v2 -POSTHOOK: query: create view v2 as select * from v1 +PREHOOK: Output: default@v2_n0 +POSTHOOK: query: create view v2_n0 as select * from v1_n1 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@src_autho_test_n1 +POSTHOOK: Input: default@v1_n1 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v2 -POSTHOOK: Lineage: v2.key SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v2.value SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: grant select on table v2 to user hive_test_user +POSTHOOK: Output: default@v2_n0 +POSTHOOK: Lineage: v2_n0.key SIMPLE [(src_autho_test_n1)src_autho_test_n1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v2_n0.value SIMPLE [(src_autho_test_n1)src_autho_test_n1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: grant select on table v2_n0 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@v2 -POSTHOOK: query: grant select on table v2 to user hive_test_user +PREHOOK: Output: default@v2_n0 +POSTHOOK: query: grant select on table v2_n0 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@v2 -PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user +POSTHOOK: Output: default@v2_n0 +PREHOOK: query: grant select(key) on table src_autho_test_n1 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n1 +POSTHOOK: query: grant select(key) on table src_autho_test_n1 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10 +POSTHOOK: Output: default@src_autho_test_n1 +PREHOOK: query: select v2_n0.key from v2_n0 join (select key from src_autho_test_n1)subq on v2_n0.value=subq.key order by key limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v2 +PREHOOK: Input: default@src_autho_test_n1 +PREHOOK: Input: default@v1_n1 +PREHOOK: Input: default@v2_n0 #### A masked pattern was here #### -POSTHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10 +POSTHOOK: query: select v2_n0.key from v2_n0 join (select key from src_autho_test_n1)subq on v2_n0.value=subq.key order by key limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v2 +POSTHOOK: Input: default@src_autho_test_n1 +POSTHOOK: Input: default@v1_n1 +POSTHOOK: Input: default@v2_n0 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/authorization_view_4.q.out b/ql/src/test/results/clientpositive/authorization_view_4.q.out index c832f110a9..6c70a35fa7 100644 --- a/ql/src/test/results/clientpositive/authorization_view_4.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_4.q.out @@ -1,64 +1,64 @@ -PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: query: create table src_autho_test_n7 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: create table src_autho_test as select * from src +PREHOOK: Output: default@src_autho_test_n7 +POSTHOOK: query: create table src_autho_test_n7 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_autho_test -POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create view v1 as select * from src +POSTHOOK: Output: default@src_autho_test_n7 +POSTHOOK: Lineage: src_autho_test_n7.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_autho_test_n7.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create view v1_n6 as select * from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: create view v1 as select * from src +PREHOOK: Output: default@v1_n6 +POSTHOOK: query: create view v1_n6 as select * from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -POSTHOOK: Lineage: v1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: v1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create view v2 as select * from v1 +POSTHOOK: Output: default@v1_n6 +POSTHOOK: Lineage: v1_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: v1_n6.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create view v2_n3 as select * from v1_n6 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src -PREHOOK: Input: default@v1 +PREHOOK: Input: default@v1_n6 PREHOOK: Output: database:default -PREHOOK: Output: default@v2 -POSTHOOK: query: create view v2 as select * from v1 +PREHOOK: Output: default@v2_n3 +POSTHOOK: query: create view v2_n3 as select * from v1_n6 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src -POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v1_n6 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v2 -POSTHOOK: Lineage: v2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: v2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: grant select on table v2 to user hive_test_user +POSTHOOK: Output: default@v2_n3 +POSTHOOK: Lineage: v2_n3.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: v2_n3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: grant select on table v2_n3 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@v2 -POSTHOOK: query: grant select on table v2 to user hive_test_user +PREHOOK: Output: default@v2_n3 +POSTHOOK: query: grant select on table v2_n3 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@v2 -PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user +POSTHOOK: Output: default@v2_n3 +PREHOOK: query: grant select(key) on table src_autho_test_n7 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n7 +POSTHOOK: query: grant select(key) on table src_autho_test_n7 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10 +POSTHOOK: Output: default@src_autho_test_n7 +PREHOOK: query: select v2_n3.key from v2_n3 join (select key from src_autho_test_n7)subq on v2_n3.value=subq.key order by key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v2 +PREHOOK: Input: default@src_autho_test_n7 +PREHOOK: Input: default@v1_n6 +PREHOOK: Input: default@v2_n3 #### A masked pattern was here #### -POSTHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10 +POSTHOOK: query: select v2_n3.key from v2_n3 join (select key from src_autho_test_n7)subq on v2_n3.value=subq.key order by key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v2 +POSTHOOK: Input: default@src_autho_test_n7 +POSTHOOK: Input: default@v1_n6 +POSTHOOK: Input: default@v2_n3 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_1.q.out b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_1.q.out index edc0d89773..8e746098d2 100644 --- a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_1.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_1.q.out @@ -1,98 +1,98 @@ -PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: query: create table src_autho_test_n9 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: create table src_autho_test as select * from src +PREHOOK: Output: default@src_autho_test_n9 +POSTHOOK: query: create table src_autho_test_n9 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_autho_test -POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create view v as select * from src_autho_test +POSTHOOK: Output: default@src_autho_test_n9 +POSTHOOK: Lineage: src_autho_test_n9.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_autho_test_n9.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create view v_n10 as select * from src_autho_test_n9 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n9 PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select * from src_autho_test +PREHOOK: Output: default@v_n10 +POSTHOOK: query: create view v_n10 as select * from src_autho_test_n9 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n9 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v.value SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: create view v1 as select * from src_autho_test +POSTHOOK: Output: default@v_n10 +POSTHOOK: Lineage: v_n10.key SIMPLE [(src_autho_test_n9)src_autho_test_n9.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v_n10.value SIMPLE [(src_autho_test_n9)src_autho_test_n9.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: create view v1_n9 as select * from src_autho_test_n9 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n9 PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: create view v1 as select * from src_autho_test +PREHOOK: Output: default@v1_n9 +POSTHOOK: query: create view v1_n9 as select * from src_autho_test_n9 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n9 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -POSTHOOK: Lineage: v1.key SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v1.value SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: create view v2 as select * from src_autho_test +POSTHOOK: Output: default@v1_n9 +POSTHOOK: Lineage: v1_n9.key SIMPLE [(src_autho_test_n9)src_autho_test_n9.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v1_n9.value SIMPLE [(src_autho_test_n9)src_autho_test_n9.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: create view v2_n6 as select * from src_autho_test_n9 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n9 PREHOOK: Output: database:default -PREHOOK: Output: default@v2 -POSTHOOK: query: create view v2 as select * from src_autho_test +PREHOOK: Output: default@v2_n6 +POSTHOOK: query: create view v2_n6 as select * from src_autho_test_n9 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n9 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v2 -POSTHOOK: Lineage: v2.key SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v2.value SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: grant select on table src_autho_test to user hive_test_user +POSTHOOK: Output: default@v2_n6 +POSTHOOK: Lineage: v2_n6.key SIMPLE [(src_autho_test_n9)src_autho_test_n9.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v2_n6.value SIMPLE [(src_autho_test_n9)src_autho_test_n9.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: grant select on table src_autho_test_n9 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n9 +POSTHOOK: query: grant select on table src_autho_test_n9 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: grant select on table v to user hive_test_user +POSTHOOK: Output: default@src_autho_test_n9 +PREHOOK: query: grant select on table v_n10 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@v -POSTHOOK: query: grant select on table v to user hive_test_user +PREHOOK: Output: default@v_n10 +POSTHOOK: query: grant select on table v_n10 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@v -PREHOOK: query: grant select on table v1 to user hive_test_user +POSTHOOK: Output: default@v_n10 +PREHOOK: query: grant select on table v1_n9 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@v1 -POSTHOOK: query: grant select on table v1 to user hive_test_user +PREHOOK: Output: default@v1_n9 +POSTHOOK: query: grant select on table v1_n9 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@v1 -PREHOOK: query: grant select on table v2 to user hive_test_user +POSTHOOK: Output: default@v1_n9 +PREHOOK: query: grant select on table v2_n6 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@v2 -POSTHOOK: query: grant select on table v2 to user hive_test_user +PREHOOK: Output: default@v2_n6 +POSTHOOK: query: grant select on table v2_n6 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@v2 -PREHOOK: query: show grant user hive_test_user on table v +POSTHOOK: Output: default@v2_n6 +PREHOOK: query: show grant user hive_test_user on table v_n10 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table v +POSTHOOK: query: show grant user hive_test_user on table v_n10 POSTHOOK: type: SHOW_GRANT -default v hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: show grant user hive_test_user on v +default v_n10 hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: show grant user hive_test_user on v_n10 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on v +POSTHOOK: query: show grant user hive_test_user on v_n10 POSTHOOK: type: SHOW_GRANT -default v hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: show grant user hive_test_user on v(key) +default v_n10 hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: show grant user hive_test_user on v_n10(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on v(key) +POSTHOOK: query: show grant user hive_test_user on v_n10(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: select * from v order by key limit 10 +PREHOOK: query: select * from v_n10 order by key limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v +PREHOOK: Input: default@src_autho_test_n9 +PREHOOK: Input: default@v_n10 #### A masked pattern was here #### -POSTHOOK: query: select * from v order by key limit 10 +POSTHOOK: query: select * from v_n10 order by key limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v +POSTHOOK: Input: default@src_autho_test_n9 +POSTHOOK: Input: default@v_n10 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -104,74 +104,74 @@ POSTHOOK: Input: default@v 103 val_103 104 val_104 104 val_104 -PREHOOK: query: revoke select on table src_autho_test from user hive_test_user +PREHOOK: query: revoke select on table src_autho_test_n9 from user hive_test_user PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: revoke select on table src_autho_test from user hive_test_user +PREHOOK: Output: default@src_autho_test_n9 +POSTHOOK: query: revoke select on table src_autho_test_n9 from user hive_test_user POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant user hive_test_user on table v +POSTHOOK: Output: default@src_autho_test_n9 +PREHOOK: query: show grant user hive_test_user on table v_n10 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table v +POSTHOOK: query: show grant user hive_test_user on table v_n10 POSTHOOK: type: SHOW_GRANT -default v hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: show grant user hive_test_user on v +default v_n10 hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: show grant user hive_test_user on v_n10 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on v +POSTHOOK: query: show grant user hive_test_user on v_n10 POSTHOOK: type: SHOW_GRANT -default v hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: show grant user hive_test_user on v(key) +default v_n10 hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: show grant user hive_test_user on v_n10(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on v(key) +POSTHOOK: query: show grant user hive_test_user on v_n10(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: revoke select on table v from user hive_test_user +PREHOOK: query: revoke select on table v_n10 from user hive_test_user PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@v -POSTHOOK: query: revoke select on table v from user hive_test_user +PREHOOK: Output: default@v_n10 +POSTHOOK: query: revoke select on table v_n10 from user hive_test_user POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@v -PREHOOK: query: show grant user hive_test_user on table v +POSTHOOK: Output: default@v_n10 +PREHOOK: query: show grant user hive_test_user on table v_n10 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table v +POSTHOOK: query: show grant user hive_test_user on table v_n10 POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant user hive_test_user on v +PREHOOK: query: show grant user hive_test_user on v_n10 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on v +POSTHOOK: query: show grant user hive_test_user on v_n10 POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant user hive_test_user on v(key) +PREHOOK: query: show grant user hive_test_user on v_n10(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on v(key) +POSTHOOK: query: show grant user hive_test_user on v_n10(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: grant select on table src_autho_test to user hive_test_user +PREHOOK: query: grant select on table src_autho_test_n9 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n9 +POSTHOOK: query: grant select on table src_autho_test_n9 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: grant select(key) on table v to user hive_test_user +POSTHOOK: Output: default@src_autho_test_n9 +PREHOOK: query: grant select(key) on table v_n10 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@v -POSTHOOK: query: grant select(key) on table v to user hive_test_user +PREHOOK: Output: default@v_n10 +POSTHOOK: query: grant select(key) on table v_n10 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@v -PREHOOK: query: show grant user hive_test_user on table v +POSTHOOK: Output: default@v_n10 +PREHOOK: query: show grant user hive_test_user on table v_n10 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on table v +POSTHOOK: query: show grant user hive_test_user on table v_n10 POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant user hive_test_user on v(key) +PREHOOK: query: show grant user hive_test_user on v_n10(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user hive_test_user on v(key) +POSTHOOK: query: show grant user hive_test_user on v_n10(key) POSTHOOK: type: SHOW_GRANT -default v [key] hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: select key from v order by key limit 10 +default v_n10 [key] hive_test_user USER SELECT false -1 hive_test_user +PREHOOK: query: select key from v_n10 order by key limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v +PREHOOK: Input: default@src_autho_test_n9 +PREHOOK: Input: default@v_n10 #### A masked pattern was here #### -POSTHOOK: query: select key from v order by key limit 10 +POSTHOOK: query: select key from v_n10 order by key limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v +POSTHOOK: Input: default@src_autho_test_n9 +POSTHOOK: Input: default@v_n10 #### A masked pattern was here #### 0 0 @@ -184,18 +184,18 @@ POSTHOOK: Input: default@v 104 104 PREHOOK: query: select key from -(select v.key from src_autho_test join v on src_autho_test.key=v.key)subq +(select v_n10.key from src_autho_test_n9 join v_n10 on src_autho_test_n9.key=v_n10.key)subq order by key limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v +PREHOOK: Input: default@src_autho_test_n9 +PREHOOK: Input: default@v_n10 #### A masked pattern was here #### POSTHOOK: query: select key from -(select v.key from src_autho_test join v on src_autho_test.key=v.key)subq +(select v_n10.key from src_autho_test_n9 join v_n10 on src_autho_test_n9.key=v_n10.key)subq order by key limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v +POSTHOOK: Input: default@src_autho_test_n9 +POSTHOOK: Input: default@v_n10 #### A masked pattern was here #### 0 0 @@ -208,18 +208,18 @@ POSTHOOK: Input: default@v 0 10 PREHOOK: query: select key from -(select key as key from src_autho_test union all select key from v)subq +(select key as key from src_autho_test_n9 union all select key from v_n10)subq limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v +PREHOOK: Input: default@src_autho_test_n9 +PREHOOK: Input: default@v_n10 #### A masked pattern was here #### POSTHOOK: query: select key from -(select key as key from src_autho_test union all select key from v)subq +(select key as key from src_autho_test_n9 union all select key from v_n10)subq limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v +POSTHOOK: Input: default@src_autho_test_n9 +POSTHOOK: Input: default@v_n10 #### A masked pattern was here #### 238 238 @@ -232,22 +232,22 @@ POSTHOOK: Input: default@v 165 165 PREHOOK: query: select key from -(select value as key from v2 union select value as key from v1 union all select key from v)subq +(select value as key from v2_n6 union select value as key from v1_n9 union all select key from v_n10)subq limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v2 +PREHOOK: Input: default@src_autho_test_n9 +PREHOOK: Input: default@v1_n9 +PREHOOK: Input: default@v2_n6 +PREHOOK: Input: default@v_n10 #### A masked pattern was here #### POSTHOOK: query: select key from -(select value as key from v2 union select value as key from v1 union all select key from v)subq +(select value as key from v2_n6 union select value as key from v1_n9 union all select key from v_n10)subq limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v2 +POSTHOOK: Input: default@src_autho_test_n9 +POSTHOOK: Input: default@v1_n9 +POSTHOOK: Input: default@v2_n6 +POSTHOOK: Input: default@v_n10 #### A masked pattern was here #### val_0 val_10 @@ -259,15 +259,15 @@ val_11 val_111 val_113 val_114 -PREHOOK: query: select key from v sort by key limit 10 +PREHOOK: query: select key from v_n10 sort by key limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v +PREHOOK: Input: default@src_autho_test_n9 +PREHOOK: Input: default@v_n10 #### A masked pattern was here #### -POSTHOOK: query: select key from v sort by key limit 10 +POSTHOOK: query: select key from v_n10 sort by key limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v +POSTHOOK: Input: default@src_autho_test_n9 +POSTHOOK: Input: default@v_n10 #### A masked pattern was here #### 0 0 @@ -280,18 +280,18 @@ POSTHOOK: Input: default@v 104 104 PREHOOK: query: select key from -(select key as key from src_autho_test union all select key from v cluster by key)subq +(select key as key from src_autho_test_n9 union all select key from v_n10 cluster by key)subq limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v +PREHOOK: Input: default@src_autho_test_n9 +PREHOOK: Input: default@v_n10 #### A masked pattern was here #### POSTHOOK: query: select key from -(select key as key from src_autho_test union all select key from v cluster by key)subq +(select key as key from src_autho_test_n9 union all select key from v_n10 cluster by key)subq limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v +POSTHOOK: Input: default@src_autho_test_n9 +POSTHOOK: Input: default@v_n10 #### A masked pattern was here #### 0 0 diff --git a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_2.q.out b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_2.q.out index e4dd0597b0..ee94a71550 100644 --- a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_2.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_2.q.out @@ -1,58 +1,58 @@ -PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: query: create table src_autho_test_n10 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: create table src_autho_test as select * from src +PREHOOK: Output: default@src_autho_test_n10 +POSTHOOK: query: create table src_autho_test_n10 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_autho_test -POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create view v1 as select * from src_autho_test +POSTHOOK: Output: default@src_autho_test_n10 +POSTHOOK: Lineage: src_autho_test_n10.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_autho_test_n10.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create view v1_n11 as select * from src_autho_test_n10 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n10 PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: create view v1 as select * from src_autho_test +PREHOOK: Output: default@v1_n11 +POSTHOOK: query: create view v1_n11 as select * from src_autho_test_n10 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n10 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -POSTHOOK: Lineage: v1.key SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v1.value SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: create view v2 as select * from v1 +POSTHOOK: Output: default@v1_n11 +POSTHOOK: Lineage: v1_n11.key SIMPLE [(src_autho_test_n10)src_autho_test_n10.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v1_n11.value SIMPLE [(src_autho_test_n10)src_autho_test_n10.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: create view v2_n7 as select * from v1_n11 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v1 +PREHOOK: Input: default@src_autho_test_n10 +PREHOOK: Input: default@v1_n11 PREHOOK: Output: database:default -PREHOOK: Output: default@v2 -POSTHOOK: query: create view v2 as select * from v1 +PREHOOK: Output: default@v2_n7 +POSTHOOK: query: create view v2_n7 as select * from v1_n11 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@src_autho_test_n10 +POSTHOOK: Input: default@v1_n11 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v2 -POSTHOOK: Lineage: v2.key SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v2.value SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: grant select on table v2 to user hive_test_user +POSTHOOK: Output: default@v2_n7 +POSTHOOK: Lineage: v2_n7.key SIMPLE [(src_autho_test_n10)src_autho_test_n10.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v2_n7.value SIMPLE [(src_autho_test_n10)src_autho_test_n10.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: grant select on table v2_n7 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@v2 -POSTHOOK: query: grant select on table v2 to user hive_test_user +PREHOOK: Output: default@v2_n7 +POSTHOOK: query: grant select on table v2_n7 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@v2 -PREHOOK: query: select * from v2 order by key limit 10 +POSTHOOK: Output: default@v2_n7 +PREHOOK: query: select * from v2_n7 order by key limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v2 +PREHOOK: Input: default@src_autho_test_n10 +PREHOOK: Input: default@v1_n11 +PREHOOK: Input: default@v2_n7 #### A masked pattern was here #### -POSTHOOK: query: select * from v2 order by key limit 10 +POSTHOOK: query: select * from v2_n7 order by key limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v2 +POSTHOOK: Input: default@src_autho_test_n10 +POSTHOOK: Input: default@v1_n11 +POSTHOOK: Input: default@v2_n7 #### A masked pattern was here #### 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_3.q.out b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_3.q.out index b2d3b1f9b1..5b54e26da3 100644 --- a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_3.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_3.q.out @@ -1,62 +1,62 @@ -PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: query: create table src_autho_test_n0 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: create table src_autho_test as select * from src +PREHOOK: Output: default@src_autho_test_n0 +POSTHOOK: query: create table src_autho_test_n0 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_autho_test -POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create view v1 as select * from src_autho_test +POSTHOOK: Output: default@src_autho_test_n0 +POSTHOOK: Lineage: src_autho_test_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_autho_test_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create view v1_n0 as select * from src_autho_test_n0 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_autho_test +PREHOOK: Input: default@src_autho_test_n0 PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: create view v1 as select * from src_autho_test +PREHOOK: Output: default@v1_n0 +POSTHOOK: query: create view v1_n0 as select * from src_autho_test_n0 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_autho_test +POSTHOOK: Input: default@src_autho_test_n0 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -POSTHOOK: Lineage: v1.key SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v1.value SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: create view v2 as select * from v1 +POSTHOOK: Output: default@v1_n0 +POSTHOOK: Lineage: v1_n0.key SIMPLE [(src_autho_test_n0)src_autho_test_n0.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v1_n0.value SIMPLE [(src_autho_test_n0)src_autho_test_n0.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: create view v2 as select * from v1_n0 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v1 +PREHOOK: Input: default@src_autho_test_n0 +PREHOOK: Input: default@v1_n0 PREHOOK: Output: database:default PREHOOK: Output: default@v2 -POSTHOOK: query: create view v2 as select * from v1 +POSTHOOK: query: create view v2 as select * from v1_n0 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@src_autho_test_n0 +POSTHOOK: Input: default@v1_n0 POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -POSTHOOK: Lineage: v2.key SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v2.value SIMPLE [(src_autho_test)src_autho_test.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: v2.key SIMPLE [(src_autho_test_n0)src_autho_test_n0.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v2.value SIMPLE [(src_autho_test_n0)src_autho_test_n0.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: grant select on table v2 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@v2 POSTHOOK: query: grant select on table v2 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@v2 -PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: query: grant select(key) on table src_autho_test_n0 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n0 +POSTHOOK: query: grant select(key) on table src_autho_test_n0 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10 +POSTHOOK: Output: default@src_autho_test_n0 +PREHOOK: query: select v2.key from v2 join (select key from src_autho_test_n0)subq on v2.value=subq.key order by key limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v1 +PREHOOK: Input: default@src_autho_test_n0 +PREHOOK: Input: default@v1_n0 PREHOOK: Input: default@v2 #### A masked pattern was here #### -POSTHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10 +POSTHOOK: query: select v2.key from v2 join (select key from src_autho_test_n0)subq on v2.value=subq.key order by key limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@src_autho_test_n0 +POSTHOOK: Input: default@v1_n0 POSTHOOK: Input: default@v2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_4.q.out b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_4.q.out index c832f110a9..302b5da038 100644 --- a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_4.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_4.q.out @@ -1,64 +1,64 @@ -PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: query: create table src_autho_test_n6 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: create table src_autho_test as select * from src +PREHOOK: Output: default@src_autho_test_n6 +POSTHOOK: query: create table src_autho_test_n6 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_autho_test -POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create view v1 as select * from src +POSTHOOK: Output: default@src_autho_test_n6 +POSTHOOK: Lineage: src_autho_test_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_autho_test_n6.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create view v1_n5 as select * from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: create view v1 as select * from src +PREHOOK: Output: default@v1_n5 +POSTHOOK: query: create view v1_n5 as select * from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -POSTHOOK: Lineage: v1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: v1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create view v2 as select * from v1 +POSTHOOK: Output: default@v1_n5 +POSTHOOK: Lineage: v1_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: v1_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create view v2_n2 as select * from v1_n5 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src -PREHOOK: Input: default@v1 +PREHOOK: Input: default@v1_n5 PREHOOK: Output: database:default -PREHOOK: Output: default@v2 -POSTHOOK: query: create view v2 as select * from v1 +PREHOOK: Output: default@v2_n2 +POSTHOOK: query: create view v2_n2 as select * from v1_n5 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src -POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v1_n5 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v2 -POSTHOOK: Lineage: v2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: v2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: grant select on table v2 to user hive_test_user +POSTHOOK: Output: default@v2_n2 +POSTHOOK: Lineage: v2_n2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: v2_n2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: grant select on table v2_n2 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@v2 -POSTHOOK: query: grant select on table v2 to user hive_test_user +PREHOOK: Output: default@v2_n2 +POSTHOOK: query: grant select on table v2_n2 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@v2 -PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user +POSTHOOK: Output: default@v2_n2 +PREHOOK: query: grant select(key) on table src_autho_test_n6 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@src_autho_test -POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: Output: default@src_autho_test_n6 +POSTHOOK: query: grant select(key) on table src_autho_test_n6 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@src_autho_test -PREHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10 +POSTHOOK: Output: default@src_autho_test_n6 +PREHOOK: query: select v2_n2.key from v2_n2 join (select key from src_autho_test_n6)subq on v2_n2.value=subq.key order by key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@src_autho_test -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v2 +PREHOOK: Input: default@src_autho_test_n6 +PREHOOK: Input: default@v1_n5 +PREHOOK: Input: default@v2_n2 #### A masked pattern was here #### -POSTHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10 +POSTHOOK: query: select v2_n2.key from v2_n2 join (select key from src_autho_test_n6)subq on v2_n2.value=subq.key order by key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@src_autho_test -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v2 +POSTHOOK: Input: default@src_autho_test_n6 +POSTHOOK: Input: default@v1_n5 +POSTHOOK: Input: default@v2_n2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/autoColumnStats_3.q.out b/ql/src/test/results/clientpositive/autoColumnStats_3.q.out index 73f6f8735f..777d165dc2 100644 --- a/ql/src/test/results/clientpositive/autoColumnStats_3.q.out +++ b/ql/src/test/results/clientpositive/autoColumnStats_3.q.out @@ -1,31 +1,31 @@ -PREHOOK: query: drop table src_multi1 +PREHOOK: query: drop table src_multi1_n6 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table src_multi1 +POSTHOOK: query: drop table src_multi1_n6 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table src_multi1 like src +PREHOOK: query: create table src_multi1_n6 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_multi1 -POSTHOOK: query: create table src_multi1 like src +PREHOOK: Output: default@src_multi1_n6 +POSTHOOK: query: create table src_multi1_n6 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_multi1 -PREHOOK: query: analyze table src_multi1 compute statistics for columns key +POSTHOOK: Output: default@src_multi1_n6 +PREHOOK: query: analyze table src_multi1_n6 compute statistics for columns key PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@src_multi1 -PREHOOK: Output: default@src_multi1 +PREHOOK: Input: default@src_multi1_n6 +PREHOOK: Output: default@src_multi1_n6 #### A masked pattern was here #### -POSTHOOK: query: analyze table src_multi1 compute statistics for columns key +POSTHOOK: query: analyze table src_multi1_n6 compute statistics for columns key POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@src_multi1 -POSTHOOK: Output: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n6 +POSTHOOK: Output: default@src_multi1_n6 #### A masked pattern was here #### -PREHOOK: query: describe formatted src_multi1 +PREHOOK: query: describe formatted src_multi1_n6 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_multi1 -POSTHOOK: query: describe formatted src_multi1 +PREHOOK: Input: default@src_multi1_n6 +POSTHOOK: query: describe formatted src_multi1_n6 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n6 # col_name data_type comment key string default value string default @@ -54,22 +54,22 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: insert into table src_multi1 select * from src +PREHOOK: query: insert into table src_multi1_n6 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_multi1 -POSTHOOK: query: insert into table src_multi1 select * from src +PREHOOK: Output: default@src_multi1_n6 +POSTHOOK: query: insert into table src_multi1_n6 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: describe formatted src_multi1 +POSTHOOK: Output: default@src_multi1_n6 +POSTHOOK: Lineage: src_multi1_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n6.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted src_multi1_n6 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@src_multi1 -POSTHOOK: query: describe formatted src_multi1 +PREHOOK: Input: default@src_multi1_n6 +POSTHOOK: query: describe formatted src_multi1_n6 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n6 # col_name data_type comment key string default value string default @@ -98,26 +98,26 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table nzhang_part14 +PREHOOK: query: drop table nzhang_part14_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table nzhang_part14 +POSTHOOK: query: drop table nzhang_part14_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table if not exists nzhang_part14 (key string, value string) +PREHOOK: query: create table if not exists nzhang_part14_n2 (key string, value string) partitioned by (ds string, hr string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@nzhang_part14 -POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string) +PREHOOK: Output: default@nzhang_part14_n2 +POSTHOOK: query: create table if not exists nzhang_part14_n2 (key string, value string) partitioned by (ds string, hr string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@nzhang_part14 -PREHOOK: query: describe formatted nzhang_part14 +POSTHOOK: Output: default@nzhang_part14_n2 +PREHOOK: query: describe formatted nzhang_part14_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@nzhang_part14 -POSTHOOK: query: describe formatted nzhang_part14 +PREHOOK: Input: default@nzhang_part14_n2 +POSTHOOK: query: describe formatted nzhang_part14_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Input: default@nzhang_part14_n2 # col_name data_type comment key string value string @@ -153,7 +153,7 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: insert into table nzhang_part14 partition(ds, hr) +PREHOOK: query: insert into table nzhang_part14_n2 partition(ds, hr) select key, value, ds, hr from ( select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a union all @@ -163,8 +163,8 @@ select key, value, ds, hr from ( ) T PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@nzhang_part14 -POSTHOOK: query: insert into table nzhang_part14 partition(ds, hr) +PREHOOK: Output: default@nzhang_part14_n2 +POSTHOOK: query: insert into table nzhang_part14_n2 partition(ds, hr) select key, value, ds, hr from ( select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a union all @@ -174,21 +174,21 @@ select key, value, ds, hr from ( ) T POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@nzhang_part14@ds=1/hr=2 -POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3 -POSTHOOK: Output: default@nzhang_part14@ds=2/hr=1 -POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).key EXPRESSION [] -POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).value EXPRESSION [] -POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).key EXPRESSION [] -POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).value EXPRESSION [] -POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).key EXPRESSION [] -POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).value EXPRESSION [] -PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +POSTHOOK: Output: default@nzhang_part14_n2@ds=1/hr=2 +POSTHOOK: Output: default@nzhang_part14_n2@ds=1/hr=3 +POSTHOOK: Output: default@nzhang_part14_n2@ds=2/hr=1 +POSTHOOK: Lineage: nzhang_part14_n2 PARTITION(ds=1,hr=2).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14_n2 PARTITION(ds=1,hr=2).value EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14_n2 PARTITION(ds=1,hr=3).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14_n2 PARTITION(ds=1,hr=3).value EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14_n2 PARTITION(ds=2,hr=1).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14_n2 PARTITION(ds=2,hr=1).value EXPRESSION [] +PREHOOK: query: desc formatted nzhang_part14_n2 partition(ds='1', hr='3') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@nzhang_part14 -POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +PREHOOK: Input: default@nzhang_part14_n2 +POSTHOOK: query: desc formatted nzhang_part14_n2 partition(ds='1', hr='3') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Input: default@nzhang_part14_n2 # col_name data_type comment key string value string @@ -201,7 +201,7 @@ hr string # Detailed Partition Information Partition Value: [1, 3] Database: default -Table: nzhang_part14 +Table: nzhang_part14_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -221,26 +221,26 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: analyze table nzhang_part14 partition(ds='1', hr='3') compute statistics for columns value +PREHOOK: query: analyze table nzhang_part14_n2 partition(ds='1', hr='3') compute statistics for columns value PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@nzhang_part14 -PREHOOK: Input: default@nzhang_part14@ds=1/hr=3 -PREHOOK: Output: default@nzhang_part14 -PREHOOK: Output: default@nzhang_part14@ds=1/hr=3 +PREHOOK: Input: default@nzhang_part14_n2 +PREHOOK: Input: default@nzhang_part14_n2@ds=1/hr=3 +PREHOOK: Output: default@nzhang_part14_n2 +PREHOOK: Output: default@nzhang_part14_n2@ds=1/hr=3 #### A masked pattern was here #### -POSTHOOK: query: analyze table nzhang_part14 partition(ds='1', hr='3') compute statistics for columns value +POSTHOOK: query: analyze table nzhang_part14_n2 partition(ds='1', hr='3') compute statistics for columns value POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@nzhang_part14 -POSTHOOK: Input: default@nzhang_part14@ds=1/hr=3 -POSTHOOK: Output: default@nzhang_part14 -POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3 +POSTHOOK: Input: default@nzhang_part14_n2 +POSTHOOK: Input: default@nzhang_part14_n2@ds=1/hr=3 +POSTHOOK: Output: default@nzhang_part14_n2 +POSTHOOK: Output: default@nzhang_part14_n2@ds=1/hr=3 #### A masked pattern was here #### -PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +PREHOOK: query: desc formatted nzhang_part14_n2 partition(ds='1', hr='3') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@nzhang_part14 -POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +PREHOOK: Input: default@nzhang_part14_n2 +POSTHOOK: query: desc formatted nzhang_part14_n2 partition(ds='1', hr='3') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Input: default@nzhang_part14_n2 # col_name data_type comment key string value string @@ -253,7 +253,7 @@ hr string # Detailed Partition Information Partition Value: [1, 3] Database: default -Table: nzhang_part14 +Table: nzhang_part14_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"value\":\"true\"}} @@ -273,12 +273,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted nzhang_part14 partition(ds='2', hr='1') +PREHOOK: query: desc formatted nzhang_part14_n2 partition(ds='2', hr='1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@nzhang_part14 -POSTHOOK: query: desc formatted nzhang_part14 partition(ds='2', hr='1') +PREHOOK: Input: default@nzhang_part14_n2 +POSTHOOK: query: desc formatted nzhang_part14_n2 partition(ds='2', hr='1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Input: default@nzhang_part14_n2 # col_name data_type comment key string value string @@ -291,7 +291,7 @@ hr string # Detailed Partition Information Partition Value: [2, 1] Database: default -Table: nzhang_part14 +Table: nzhang_part14_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -311,7 +311,7 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: insert into table nzhang_part14 partition(ds, hr) +PREHOOK: query: insert into table nzhang_part14_n2 partition(ds, hr) select key, value, ds, hr from ( select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a union all @@ -321,8 +321,8 @@ select key, value, ds, hr from ( ) T PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@nzhang_part14 -POSTHOOK: query: insert into table nzhang_part14 partition(ds, hr) +PREHOOK: Output: default@nzhang_part14_n2 +POSTHOOK: query: insert into table nzhang_part14_n2 partition(ds, hr) select key, value, ds, hr from ( select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a union all @@ -332,21 +332,21 @@ select key, value, ds, hr from ( ) T POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@nzhang_part14@ds=1/hr=2 -POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3 -POSTHOOK: Output: default@nzhang_part14@ds=2/hr=1 -POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).key EXPRESSION [] -POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).value EXPRESSION [] -POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).key EXPRESSION [] -POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).value EXPRESSION [] -POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).key EXPRESSION [] -POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).value EXPRESSION [] -PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +POSTHOOK: Output: default@nzhang_part14_n2@ds=1/hr=2 +POSTHOOK: Output: default@nzhang_part14_n2@ds=1/hr=3 +POSTHOOK: Output: default@nzhang_part14_n2@ds=2/hr=1 +POSTHOOK: Lineage: nzhang_part14_n2 PARTITION(ds=1,hr=2).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14_n2 PARTITION(ds=1,hr=2).value EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14_n2 PARTITION(ds=1,hr=3).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14_n2 PARTITION(ds=1,hr=3).value EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14_n2 PARTITION(ds=2,hr=1).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14_n2 PARTITION(ds=2,hr=1).value EXPRESSION [] +PREHOOK: query: desc formatted nzhang_part14_n2 partition(ds='1', hr='3') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@nzhang_part14 -POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3') +PREHOOK: Input: default@nzhang_part14_n2 +POSTHOOK: query: desc formatted nzhang_part14_n2 partition(ds='1', hr='3') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Input: default@nzhang_part14_n2 # col_name data_type comment key string value string @@ -359,7 +359,7 @@ hr string # Detailed Partition Information Partition Value: [1, 3] Database: default -Table: nzhang_part14 +Table: nzhang_part14_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"value\":\"true\"}} @@ -379,12 +379,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted nzhang_part14 partition(ds='2', hr='1') +PREHOOK: query: desc formatted nzhang_part14_n2 partition(ds='2', hr='1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@nzhang_part14 -POSTHOOK: query: desc formatted nzhang_part14 partition(ds='2', hr='1') +PREHOOK: Input: default@nzhang_part14_n2 +POSTHOOK: query: desc formatted nzhang_part14_n2 partition(ds='2', hr='1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Input: default@nzhang_part14_n2 # col_name data_type comment key string value string @@ -397,7 +397,7 @@ hr string # Detailed Partition Information Partition Value: [2, 1] Database: default -Table: nzhang_part14 +Table: nzhang_part14_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} diff --git a/ql/src/test/results/clientpositive/autoColumnStats_5.q.out b/ql/src/test/results/clientpositive/autoColumnStats_5.q.out index 6b29dad85f..5b5b45fa14 100644 --- a/ql/src/test/results/clientpositive/autoColumnStats_5.q.out +++ b/ql/src/test/results/clientpositive/autoColumnStats_5.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE partitioned1_n1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partitioned1 -POSTHOOK: query: CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE +PREHOOK: Output: default@partitioned1_n1 +POSTHOOK: query: CREATE TABLE partitioned1_n1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partitioned1 -PREHOOK: query: explain insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original') +POSTHOOK: Output: default@partitioned1_n1 +PREHOOK: query: explain insert into table partitioned1_n1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original') PREHOOK: type: QUERY -POSTHOOK: query: explain insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original') +POSTHOOK: query: explain insert into table partitioned1_n1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original') POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -47,7 +47,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.partitioned1 + name: default.partitioned1_n1 Select Operator expressions: _col0 (type: int), _col1 (type: string), UDFToInteger('1') (type: int) outputColumnNames: a, b, part @@ -102,7 +102,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.partitioned1 + name: default.partitioned1_n1 Stage: Stage-2 Stats Work @@ -110,7 +110,7 @@ STAGE PLANS: Column Stats Desc: Columns: a, b Column Types: int, string - Table: default.partitioned1 + Table: default.partitioned1_n1 Stage: Stage-3 Map Reduce @@ -122,7 +122,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.partitioned1 + name: default.partitioned1_n1 Stage: Stage-5 Map Reduce @@ -134,7 +134,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.partitioned1 + name: default.partitioned1_n1 Stage: Stage-6 Move Operator @@ -142,23 +142,23 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original') +PREHOOK: query: insert into table partitioned1_n1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@partitioned1@part=1 -POSTHOOK: query: insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original') +PREHOOK: Output: default@partitioned1_n1@part=1 +POSTHOOK: query: insert into table partitioned1_n1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@partitioned1@part=1 -POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a SCRIPT [] -POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SCRIPT [] +POSTHOOK: Output: default@partitioned1_n1@part=1 +POSTHOOK: Lineage: partitioned1_n1 PARTITION(part=1).a SCRIPT [] +POSTHOOK: Lineage: partitioned1_n1 PARTITION(part=1).b SCRIPT [] col1 col2 -PREHOOK: query: desc formatted partitioned1 partition(part=1) +PREHOOK: query: desc formatted partitioned1_n1 partition(part=1) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@partitioned1 -POSTHOOK: query: desc formatted partitioned1 partition(part=1) +PREHOOK: Input: default@partitioned1_n1 +POSTHOOK: query: desc formatted partitioned1_n1 partition(part=1) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@partitioned1 +POSTHOOK: Input: default@partitioned1_n1 col_name data_type comment # col_name data_type comment a int @@ -171,7 +171,7 @@ part int # Detailed Partition Information Partition Value: [1] Database: default -Table: partitioned1 +Table: partitioned1_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"}} @@ -191,12 +191,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted partitioned1 partition(part=1) a +PREHOOK: query: desc formatted partitioned1_n1 partition(part=1) a PREHOOK: type: DESCTABLE -PREHOOK: Input: default@partitioned1 -POSTHOOK: query: desc formatted partitioned1 partition(part=1) a +PREHOOK: Input: default@partitioned1_n1 +POSTHOOK: query: desc formatted partitioned1_n1 partition(part=1) a POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@partitioned1 +POSTHOOK: Input: default@partitioned1_n1 col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses bitvector comment col_name a data_type int @@ -210,20 +210,20 @@ num_trues num_falses bitVector HL comment from deserializer -PREHOOK: query: alter table partitioned1 add columns(c int, d string) +PREHOOK: query: alter table partitioned1_n1 add columns(c int, d string) PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@partitioned1 -PREHOOK: Output: default@partitioned1 -POSTHOOK: query: alter table partitioned1 add columns(c int, d string) +PREHOOK: Input: default@partitioned1_n1 +PREHOOK: Output: default@partitioned1_n1 +POSTHOOK: query: alter table partitioned1_n1 add columns(c int, d string) POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@partitioned1 -POSTHOOK: Output: default@partitioned1 -PREHOOK: query: desc formatted partitioned1 partition(part=1) +POSTHOOK: Input: default@partitioned1_n1 +POSTHOOK: Output: default@partitioned1_n1 +PREHOOK: query: desc formatted partitioned1_n1 partition(part=1) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@partitioned1 -POSTHOOK: query: desc formatted partitioned1 partition(part=1) +PREHOOK: Input: default@partitioned1_n1 +POSTHOOK: query: desc formatted partitioned1_n1 partition(part=1) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@partitioned1 +POSTHOOK: Input: default@partitioned1_n1 col_name data_type comment # col_name data_type comment a int @@ -236,7 +236,7 @@ part int # Detailed Partition Information Partition Value: [1] Database: default -Table: partitioned1 +Table: partitioned1_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"}} @@ -256,9 +256,9 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: explain insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty') +PREHOOK: query: explain insert into table partitioned1_n1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty') PREHOOK: type: QUERY -POSTHOOK: query: explain insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty') +POSTHOOK: query: explain insert into table partitioned1_n1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty') POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -297,7 +297,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.partitioned1 + name: default.partitioned1_n1 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), UDFToInteger('2') (type: int) outputColumnNames: a, b, c, d, part @@ -352,7 +352,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.partitioned1 + name: default.partitioned1_n1 Stage: Stage-2 Stats Work @@ -360,7 +360,7 @@ STAGE PLANS: Column Stats Desc: Columns: a, b, c, d Column Types: int, string, int, string - Table: default.partitioned1 + Table: default.partitioned1_n1 Stage: Stage-3 Map Reduce @@ -372,7 +372,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.partitioned1 + name: default.partitioned1_n1 Stage: Stage-5 Map Reduce @@ -384,7 +384,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.partitioned1 + name: default.partitioned1_n1 Stage: Stage-6 Move Operator @@ -392,25 +392,25 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty') +PREHOOK: query: insert into table partitioned1_n1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@partitioned1@part=2 -POSTHOOK: query: insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty') +PREHOOK: Output: default@partitioned1_n1@part=2 +POSTHOOK: query: insert into table partitioned1_n1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@partitioned1@part=2 -POSTHOOK: Lineage: partitioned1 PARTITION(part=2).a SCRIPT [] -POSTHOOK: Lineage: partitioned1 PARTITION(part=2).b SCRIPT [] -POSTHOOK: Lineage: partitioned1 PARTITION(part=2).c SCRIPT [] -POSTHOOK: Lineage: partitioned1 PARTITION(part=2).d SCRIPT [] +POSTHOOK: Output: default@partitioned1_n1@part=2 +POSTHOOK: Lineage: partitioned1_n1 PARTITION(part=2).a SCRIPT [] +POSTHOOK: Lineage: partitioned1_n1 PARTITION(part=2).b SCRIPT [] +POSTHOOK: Lineage: partitioned1_n1 PARTITION(part=2).c SCRIPT [] +POSTHOOK: Lineage: partitioned1_n1 PARTITION(part=2).d SCRIPT [] col1 col2 col3 col4 -PREHOOK: query: desc formatted partitioned1 partition(part=2) +PREHOOK: query: desc formatted partitioned1_n1 partition(part=2) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@partitioned1 -POSTHOOK: query: desc formatted partitioned1 partition(part=2) +PREHOOK: Input: default@partitioned1_n1 +POSTHOOK: query: desc formatted partitioned1_n1 partition(part=2) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@partitioned1 +POSTHOOK: Input: default@partitioned1_n1 col_name data_type comment # col_name data_type comment a int @@ -425,7 +425,7 @@ part int # Detailed Partition Information Partition Value: [2] Database: default -Table: partitioned1 +Table: partitioned1_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}} @@ -445,12 +445,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted partitioned1 partition(part=2) c +PREHOOK: query: desc formatted partitioned1_n1 partition(part=2) c PREHOOK: type: DESCTABLE -PREHOOK: Input: default@partitioned1 -POSTHOOK: query: desc formatted partitioned1 partition(part=2) c +PREHOOK: Input: default@partitioned1_n1 +POSTHOOK: query: desc formatted partitioned1_n1 partition(part=2) c POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@partitioned1 +POSTHOOK: Input: default@partitioned1_n1 col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses bitvector comment col_name c data_type int @@ -464,9 +464,9 @@ num_trues num_falses bitVector HL comment from deserializer -PREHOOK: query: explain insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred') +PREHOOK: query: explain insert into table partitioned1_n1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred') PREHOOK: type: QUERY -POSTHOOK: query: explain insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred') +POSTHOOK: query: explain insert into table partitioned1_n1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred') POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -505,7 +505,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.partitioned1 + name: default.partitioned1_n1 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), UDFToInteger('1') (type: int) outputColumnNames: a, b, c, d, part @@ -560,7 +560,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.partitioned1 + name: default.partitioned1_n1 Stage: Stage-2 Stats Work @@ -568,7 +568,7 @@ STAGE PLANS: Column Stats Desc: Columns: a, b, c, d Column Types: int, string, int, string - Table: default.partitioned1 + Table: default.partitioned1_n1 Stage: Stage-3 Map Reduce @@ -580,7 +580,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.partitioned1 + name: default.partitioned1_n1 Stage: Stage-5 Map Reduce @@ -592,7 +592,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.partitioned1 + name: default.partitioned1_n1 Stage: Stage-6 Move Operator @@ -600,25 +600,25 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred') +PREHOOK: query: insert into table partitioned1_n1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@partitioned1@part=1 -POSTHOOK: query: insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred') +PREHOOK: Output: default@partitioned1_n1@part=1 +POSTHOOK: query: insert into table partitioned1_n1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@partitioned1@part=1 -POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a SCRIPT [] -POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SCRIPT [] -POSTHOOK: Lineage: partitioned1 PARTITION(part=1).c SCRIPT [] -POSTHOOK: Lineage: partitioned1 PARTITION(part=1).d SCRIPT [] +POSTHOOK: Output: default@partitioned1_n1@part=1 +POSTHOOK: Lineage: partitioned1_n1 PARTITION(part=1).a SCRIPT [] +POSTHOOK: Lineage: partitioned1_n1 PARTITION(part=1).b SCRIPT [] +POSTHOOK: Lineage: partitioned1_n1 PARTITION(part=1).c SCRIPT [] +POSTHOOK: Lineage: partitioned1_n1 PARTITION(part=1).d SCRIPT [] col1 col2 col3 col4 -PREHOOK: query: desc formatted partitioned1 partition(part=1) +PREHOOK: query: desc formatted partitioned1_n1 partition(part=1) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@partitioned1 -POSTHOOK: query: desc formatted partitioned1 partition(part=1) +PREHOOK: Input: default@partitioned1_n1 +POSTHOOK: query: desc formatted partitioned1_n1 partition(part=1) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@partitioned1 +POSTHOOK: Input: default@partitioned1_n1 col_name data_type comment # col_name data_type comment a int @@ -631,7 +631,7 @@ part int # Detailed Partition Information Partition Value: [1] Database: default -Table: partitioned1 +Table: partitioned1_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"}} @@ -651,12 +651,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted partitioned1 partition(part=1) a +PREHOOK: query: desc formatted partitioned1_n1 partition(part=1) a PREHOOK: type: DESCTABLE -PREHOOK: Input: default@partitioned1 -POSTHOOK: query: desc formatted partitioned1 partition(part=1) a +PREHOOK: Input: default@partitioned1_n1 +POSTHOOK: query: desc formatted partitioned1_n1 partition(part=1) a POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@partitioned1 +POSTHOOK: Input: default@partitioned1_n1 col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses bitvector comment col_name a data_type int @@ -670,12 +670,12 @@ num_trues num_falses bitVector HL comment from deserializer -PREHOOK: query: desc formatted partitioned1 partition(part=1) c +PREHOOK: query: desc formatted partitioned1_n1 partition(part=1) c PREHOOK: type: DESCTABLE -PREHOOK: Input: default@partitioned1 -POSTHOOK: query: desc formatted partitioned1 partition(part=1) c +PREHOOK: Input: default@partitioned1_n1 +POSTHOOK: query: desc formatted partitioned1_n1 partition(part=1) c POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@partitioned1 +POSTHOOK: Input: default@partitioned1_n1 col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses bitvector comment # col_name data_type comment c int from deserializer diff --git a/ql/src/test/results/clientpositive/autoColumnStats_7.q.out b/ql/src/test/results/clientpositive/autoColumnStats_7.q.out index 7f7b7a4cd1..8c07d61390 100644 --- a/ql/src/test/results/clientpositive/autoColumnStats_7.q.out +++ b/ql/src/test/results/clientpositive/autoColumnStats_7.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g2_n5(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_g2 -POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_g2_n5 +POSTHOOK: query: CREATE TABLE dest_g2_n5(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_g2 +POSTHOOK: Output: default@dest_g2_n5 PREHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src @@ -17,10 +17,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: database:default POSTHOOK: Output: default@src_temp PREHOOK: query: explain FROM src_temp -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n5 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: explain FROM src_temp -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n5 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -90,7 +90,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_g2 + name: default.dest_g2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) outputColumnNames: key, c1, c2 @@ -110,7 +110,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_g2 + name: default.dest_g2_n5 Stage: Stage-3 Stats Work @@ -118,7 +118,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, c1, c2 Column Types: string, int, string - Table: default.dest_g2 + Table: default.dest_g2_n5 Stage: Stage-4 Map Reduce @@ -167,25 +167,25 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src_temp -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n5 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) PREHOOK: type: QUERY PREHOOK: Input: default@src_temp -PREHOOK: Output: default@dest_g2 +PREHOOK: Output: default@dest_g2_n5 POSTHOOK: query: FROM src_temp -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n5 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src_temp -POSTHOOK: Output: default@dest_g2 -POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src_temp)src_temp.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src_temp)src_temp.FieldSchema(name:key, type:string, comment:null), (src_temp)src_temp.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src_temp)src_temp.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT dest_g2.* FROM dest_g2 +POSTHOOK: Output: default@dest_g2_n5 +POSTHOOK: Lineage: dest_g2_n5.c1 EXPRESSION [(src_temp)src_temp.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest_g2_n5.c2 EXPRESSION [(src_temp)src_temp.FieldSchema(name:key, type:string, comment:null), (src_temp)src_temp.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest_g2_n5.key EXPRESSION [(src_temp)src_temp.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT dest_g2_n5.* FROM dest_g2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_g2 +PREHOOK: Input: default@dest_g2_n5 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest_g2.* FROM dest_g2 +POSTHOOK: query: SELECT dest_g2_n5.* FROM dest_g2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_g2 +POSTHOOK: Input: default@dest_g2_n5 #### A masked pattern was here #### 0 1 00.0 1 71 116414.0 @@ -197,14 +197,14 @@ POSTHOOK: Input: default@dest_g2 7 6 7735.0 8 8 8762.0 9 7 91047.0 -PREHOOK: query: DROP TABLE dest_g2 +PREHOOK: query: DROP TABLE dest_g2_n5 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest_g2 -PREHOOK: Output: default@dest_g2 -POSTHOOK: query: DROP TABLE dest_g2 +PREHOOK: Input: default@dest_g2_n5 +PREHOOK: Output: default@dest_g2_n5 +POSTHOOK: query: DROP TABLE dest_g2_n5 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest_g2 -POSTHOOK: Output: default@dest_g2 +POSTHOOK: Input: default@dest_g2_n5 +POSTHOOK: Output: default@dest_g2_n5 PREHOOK: query: DROP TABLE src_temp PREHOOK: type: DROPTABLE PREHOOK: Input: default@src_temp diff --git a/ql/src/test/results/clientpositive/autoColumnStats_9.q.out b/ql/src/test/results/clientpositive/autoColumnStats_9.q.out index a2554f1740..898598fc65 100644 --- a/ql/src/test/results/clientpositive/autoColumnStats_9.q.out +++ b/ql/src/test/results/clientpositive/autoColumnStats_9.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n23(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n23 +POSTHOOK: query: CREATE TABLE dest_j1_n23(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n23 PREHOOK: query: EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n23 SELECT src1.key, src2.value PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n23 SELECT src1.key, src2.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -79,7 +79,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n23 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -135,7 +135,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n23 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -162,7 +162,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n23 Stage: Stage-2 Stats Work @@ -170,7 +170,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest_j1 + Table: default.dest_j1_n23 Stage: Stage-3 Map Reduce @@ -196,23 +196,23 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n23 SELECT src1.key, src2.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest_j1 +PREHOOK: Output: default@dest_j1_n23 POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n23 SELECT src1.key, src2.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted dest_j1 +POSTHOOK: Output: default@dest_j1_n23 +POSTHOOK: Lineage: dest_j1_n23.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n23.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted dest_j1_n23 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@dest_j1 -POSTHOOK: query: desc formatted dest_j1 +PREHOOK: Input: default@dest_j1_n23 +POSTHOOK: query: desc formatted dest_j1_n23 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n23 # col_name data_type comment key int value string @@ -242,12 +242,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted dest_j1 key +PREHOOK: query: desc formatted dest_j1_n23 key PREHOOK: type: DESCTABLE -PREHOOK: Input: default@dest_j1 -POSTHOOK: query: desc formatted dest_j1 key +PREHOOK: Input: default@dest_j1_n23 +POSTHOOK: query: desc formatted dest_j1_n23 key POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n23 col_name key data_type int min 0 @@ -261,12 +261,12 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} -PREHOOK: query: desc formatted dest_j1 value +PREHOOK: query: desc formatted dest_j1_n23 value PREHOOK: type: DESCTABLE -PREHOOK: Input: default@dest_j1 -POSTHOOK: query: desc formatted dest_j1 value +PREHOOK: Input: default@dest_j1_n23 +POSTHOOK: query: desc formatted dest_j1_n23 value POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n23 col_name value data_type string min diff --git a/ql/src/test/results/clientpositive/auto_join1.q.out b/ql/src/test/results/clientpositive/auto_join1.q.out index 347e9f8871..2a11bc0421 100644 --- a/ql/src/test/results/clientpositive/auto_join1.q.out +++ b/ql/src/test/results/clientpositive/auto_join1.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n3(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n3 +POSTHOOK: query: CREATE TABLE dest_j1_n3(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n3 PREHOOK: query: explain FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n3 SELECT src1.key, src2.value PREHOOK: type: QUERY POSTHOOK: query: explain FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n3 SELECT src1.key, src2.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-6 is a root stage @@ -77,7 +77,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n3 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -104,7 +104,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n3 Stage: Stage-2 Stats Work @@ -112,7 +112,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest_j1 + Table: default.dest_j1_n3 Stage: Stage-3 Map Reduce @@ -138,23 +138,23 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n3 SELECT src1.key, src2.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest_j1 +PREHOOK: Output: default@dest_j1_n3 POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n3 SELECT src1.key, src2.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1 +POSTHOOK: Output: default@dest_j1_n3 +POSTHOOK: Lineage: dest_j1_n3.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n3.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest_j1_n3.key,dest_j1_n3.value)) FROM dest_j1_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n3 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1 +POSTHOOK: query: SELECT sum(hash(dest_j1_n3.key,dest_j1_n3.value)) FROM dest_j1_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n3 #### A masked pattern was here #### 101861029915 diff --git a/ql/src/test/results/clientpositive/auto_join14.q.out b/ql/src/test/results/clientpositive/auto_join14.q.out index 9b73c72f89..bcb3da0c3c 100644 --- a/ql/src/test/results/clientpositive/auto_join14.q.out +++ b/ql/src/test/results/clientpositive/auto_join14.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n70(c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n70 +POSTHOOK: query: CREATE TABLE dest1_n70(c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n70 PREHOOK: query: explain FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value +INSERT OVERWRITE TABLE dest1_n70 SELECT src.key, srcpart.value PREHOOK: type: QUERY POSTHOOK: query: explain FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value +INSERT OVERWRITE TABLE dest1_n70 SELECT src.key, srcpart.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-6 is a root stage @@ -77,7 +77,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n70 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: c1, c2 @@ -104,7 +104,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n70 Stage: Stage-2 Stats Work @@ -112,7 +112,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2 Column Types: int, string - Table: default.dest1 + Table: default.dest1_n70 Stage: Stage-3 Map Reduce @@ -138,29 +138,29 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value +INSERT OVERWRITE TABLE dest1_n70 SELECT src.key, srcpart.value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n70 POSTHOOK: query: FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value +INSERT OVERWRITE TABLE dest1_n70 SELECT src.key, srcpart.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2)) FROM dest1 +POSTHOOK: Output: default@dest1_n70 +POSTHOOK: Lineage: dest1_n70.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n70.c2 SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1_n70.c1,dest1_n70.c2)) FROM dest1_n70 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n70 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2)) FROM dest1 +POSTHOOK: query: SELECT sum(hash(dest1_n70.c1,dest1_n70.c2)) FROM dest1_n70 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n70 #### A masked pattern was here #### 404554174174 diff --git a/ql/src/test/results/clientpositive/auto_join17.q.out b/ql/src/test/results/clientpositive/auto_join17.q.out index 743b72c7f4..f5d5abf391 100644 --- a/ql/src/test/results/clientpositive/auto_join17.q.out +++ b/ql/src/test/results/clientpositive/auto_join17.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n37(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n37 +POSTHOOK: query: CREATE TABLE dest1_n37(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n37 PREHOOK: query: explain FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.* +INSERT OVERWRITE TABLE dest1_n37 SELECT src1.*, src2.* PREHOOK: type: QUERY POSTHOOK: query: explain FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.* +INSERT OVERWRITE TABLE dest1_n37 SELECT src1.*, src2.* POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-6 is a root stage @@ -77,7 +77,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n37 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string) outputColumnNames: key1, value1, key2, value2 @@ -104,7 +104,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n37 Stage: Stage-2 Stats Work @@ -112,7 +112,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, value1, key2, value2 Column Types: int, string, int, string - Table: default.dest1 + Table: default.dest1_n37 Stage: Stage-3 Map Reduce @@ -138,25 +138,25 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.* +INSERT OVERWRITE TABLE dest1_n37 SELECT src1.*, src2.* PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n37 POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.* +INSERT OVERWRITE TABLE dest1_n37 SELECT src1.*, src2.* POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key2 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value1 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value2 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(dest1.key1,dest1.value1,dest1.key2,dest1.value2)) FROM dest1 +POSTHOOK: Output: default@dest1_n37 +POSTHOOK: Lineage: dest1_n37.key1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n37.key2 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n37.value1 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n37.value2 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1_n37.key1,dest1_n37.value1,dest1_n37.key2,dest1_n37.value2)) FROM dest1_n37 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n37 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest1.key1,dest1.value1,dest1.key2,dest1.value2)) FROM dest1 +POSTHOOK: query: SELECT sum(hash(dest1_n37.key1,dest1_n37.value1,dest1_n37.key2,dest1_n37.value2)) FROM dest1_n37 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n37 #### A masked pattern was here #### -793937029770 diff --git a/ql/src/test/results/clientpositive/auto_join19.q.out b/ql/src/test/results/clientpositive/auto_join19.q.out index f371cc55e5..bae2cfd88f 100644 --- a/ql/src/test/results/clientpositive/auto_join19.q.out +++ b/ql/src/test/results/clientpositive/auto_join19.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n16(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n16 +POSTHOOK: query: CREATE TABLE dest1_n16(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n16 PREHOOK: query: explain FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n16 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') PREHOOK: type: QUERY POSTHOOK: query: explain FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n16 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -79,7 +79,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n16 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -106,7 +106,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n16 Stage: Stage-2 Stats Work @@ -114,7 +114,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n16 Stage: Stage-3 Map Reduce @@ -140,7 +140,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n16 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -149,9 +149,9 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n16 POSTHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n16 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -160,15 +160,15 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +POSTHOOK: Output: default@dest1_n16 +POSTHOOK: Lineage: dest1_n16.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n16.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1_n16.key,dest1_n16.value)) FROM dest1_n16 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n16 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +POSTHOOK: query: SELECT sum(hash(dest1_n16.key,dest1_n16.value)) FROM dest1_n16 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n16 #### A masked pattern was here #### 407444119660 diff --git a/ql/src/test/results/clientpositive/auto_join19_inclause.q.out b/ql/src/test/results/clientpositive/auto_join19_inclause.q.out index f371cc55e5..f816433567 100644 --- a/ql/src/test/results/clientpositive/auto_join19_inclause.q.out +++ b/ql/src/test/results/clientpositive/auto_join19_inclause.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n10(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n10 +POSTHOOK: query: CREATE TABLE dest1_n10(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n10 PREHOOK: query: explain FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n10 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') PREHOOK: type: QUERY POSTHOOK: query: explain FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n10 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -79,7 +79,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n10 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -106,7 +106,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n10 Stage: Stage-2 Stats Work @@ -114,7 +114,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n10 Stage: Stage-3 Map Reduce @@ -140,7 +140,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n10 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -149,9 +149,9 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n10 POSTHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n10 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -160,15 +160,15 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +POSTHOOK: Output: default@dest1_n10 +POSTHOOK: Lineage: dest1_n10.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n10.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1_n10.key,dest1_n10.value)) FROM dest1_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n10 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +POSTHOOK: query: SELECT sum(hash(dest1_n10.key,dest1_n10.value)) FROM dest1_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n10 #### A masked pattern was here #### 407444119660 diff --git a/ql/src/test/results/clientpositive/auto_join24.q.out b/ql/src/test/results/clientpositive/auto_join24.q.out index 96abbf3501..2e6d8c2fba 100644 --- a/ql/src/test/results/clientpositive/auto_join24.q.out +++ b/ql/src/test/results/clientpositive/auto_join24.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: create table tst1(key STRING, cnt INT) +PREHOOK: query: create table tst1_n2(key STRING, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tst1 -POSTHOOK: query: create table tst1(key STRING, cnt INT) +PREHOOK: Output: default@tst1_n2 +POSTHOOK: query: create table tst1_n2(key STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tst1 -PREHOOK: query: INSERT OVERWRITE TABLE tst1 +POSTHOOK: Output: default@tst1_n2 +PREHOOK: query: INSERT OVERWRITE TABLE tst1_n2 SELECT a.key, count(1) FROM src a group by a.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tst1 -POSTHOOK: query: INSERT OVERWRITE TABLE tst1 +PREHOOK: Output: default@tst1_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE tst1_n2 SELECT a.key, count(1) FROM src a group by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tst1 -POSTHOOK: Lineage: tst1.cnt EXPRESSION [(src)a.null, ] -POSTHOOK: Lineage: tst1.key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Output: default@tst1_n2 +POSTHOOK: Lineage: tst1_n2.cnt EXPRESSION [(src)a.null, ] +POSTHOOK: Lineage: tst1_n2.key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: explain -SELECT sum(a.cnt) FROM tst1 a JOIN tst1 b ON a.key = b.key +SELECT sum(a.cnt) FROM tst1_n2 a JOIN tst1_n2 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -SELECT sum(a.cnt) FROM tst1 a JOIN tst1 b ON a.key = b.key +SELECT sum(a.cnt) FROM tst1_n2 a JOIN tst1_n2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-5 is a root stage @@ -106,12 +106,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT sum(a.cnt) FROM tst1 a JOIN tst1 b ON a.key = b.key +PREHOOK: query: SELECT sum(a.cnt) FROM tst1_n2 a JOIN tst1_n2 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@tst1 +PREHOOK: Input: default@tst1_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(a.cnt) FROM tst1 a JOIN tst1 b ON a.key = b.key +POSTHOOK: query: SELECT sum(a.cnt) FROM tst1_n2 a JOIN tst1_n2 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tst1 +POSTHOOK: Input: default@tst1_n2 #### A masked pattern was here #### 500 diff --git a/ql/src/test/results/clientpositive/auto_join25.q.out b/ql/src/test/results/clientpositive/auto_join25.q.out index f903bd1312..d16af0059a 100644 --- a/ql/src/test/results/clientpositive/auto_join25.q.out +++ b/ql/src/test/results/clientpositive/auto_join25.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n53(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n53 +POSTHOOK: query: CREATE TABLE dest1_n53(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n53 RUN: Stage-0:DDL PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n53 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -17,12 +17,12 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n53 Hive Runtime Error: Map local work exhausted memory FAILED: Execution Error, return code 3 from org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask POSTHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n53 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -31,39 +31,39 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@dest1_n53 +POSTHOOK: Lineage: dest1_n53.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n53.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] RUN: Stage-7:CONDITIONAL RUN: Stage-8:MAPREDLOCAL RUN: Stage-1:MAPRED RUN: Stage-0:MOVE RUN: Stage-3:MAPRED RUN: Stage-2:STATS -PREHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +PREHOOK: query: SELECT sum(hash(dest1_n53.key,dest1_n53.value)) FROM dest1_n53 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n53 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +POSTHOOK: query: SELECT sum(hash(dest1_n53.key,dest1_n53.value)) FROM dest1_n53 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n53 #### A masked pattern was here #### RUN: Stage-1:MAPRED 407444119660 -PREHOOK: query: CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j2_n0(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j2 -POSTHOOK: query: CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j2_n0 +POSTHOOK: query: CREATE TABLE dest_j2_n0(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j2 +POSTHOOK: Output: default@dest_j2_n0 RUN: Stage-0:DDL PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) -INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest_j2_n0 SELECT src1.key, src3.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest_j2 +PREHOOK: Output: default@dest_j2_n0 Hive Runtime Error: Map local work exhausted memory FAILED: Execution Error, return code 3 from org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask @@ -71,12 +71,12 @@ Hive Runtime Error: Map local work exhausted memory FAILED: Execution Error, return code 3 from org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) -INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest_j2_n0 SELECT src1.key, src3.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest_j2 -POSTHOOK: Lineage: dest_j2.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j2.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@dest_j2_n0 +POSTHOOK: Lineage: dest_j2_n0.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j2_n0.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] RUN: Stage-12:CONDITIONAL RUN: Stage-15:MAPREDLOCAL RUN: Stage-1:MAPRED @@ -86,53 +86,53 @@ RUN: Stage-2:MAPRED RUN: Stage-0:MOVE RUN: Stage-4:MAPRED RUN: Stage-3:STATS -PREHOOK: query: SELECT sum(hash(dest_j2.key,dest_j2.value)) FROM dest_j2 +PREHOOK: query: SELECT sum(hash(dest_j2_n0.key,dest_j2_n0.value)) FROM dest_j2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j2 +PREHOOK: Input: default@dest_j2_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest_j2.key,dest_j2.value)) FROM dest_j2 +POSTHOOK: query: SELECT sum(hash(dest_j2_n0.key,dest_j2_n0.value)) FROM dest_j2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j2 +POSTHOOK: Input: default@dest_j2_n0 #### A masked pattern was here #### RUN: Stage-1:MAPRED 33815990627 -PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n5(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n5 +POSTHOOK: query: CREATE TABLE dest_j1_n5(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n5 RUN: Stage-0:DDL PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n5 SELECT src1.key, src2.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest_j1 +PREHOOK: Output: default@dest_j1_n5 Hive Runtime Error: Map local work exhausted memory FAILED: Execution Error, return code 3 from org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n5 SELECT src1.key, src2.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@dest_j1_n5 +POSTHOOK: Lineage: dest_j1_n5.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n5.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] RUN: Stage-7:CONDITIONAL RUN: Stage-8:MAPREDLOCAL RUN: Stage-1:MAPRED RUN: Stage-0:MOVE RUN: Stage-3:MAPRED RUN: Stage-2:STATS -PREHOOK: query: SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1 +PREHOOK: query: SELECT sum(hash(dest_j1_n5.key,dest_j1_n5.value)) FROM dest_j1_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n5 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1 +POSTHOOK: query: SELECT sum(hash(dest_j1_n5.key,dest_j1_n5.value)) FROM dest_j1_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n5 #### A masked pattern was here #### RUN: Stage-1:MAPRED 101861029915 diff --git a/ql/src/test/results/clientpositive/auto_join3.q.out b/ql/src/test/results/clientpositive/auto_join3.q.out index df80648d42..b8e5cfc76d 100644 --- a/ql/src/test/results/clientpositive/auto_join3.q.out +++ b/ql/src/test/results/clientpositive/auto_join3.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n116(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n116 +POSTHOOK: query: CREATE TABLE dest1_n116(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n116 PREHOOK: query: explain FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest1_n116 SELECT src1.key, src3.value PREHOOK: type: QUERY POSTHOOK: query: explain FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest1_n116 SELECT src1.key, src3.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-7 is a root stage @@ -99,7 +99,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n116 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -126,7 +126,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n116 Stage: Stage-2 Stats Work @@ -134,7 +134,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n116 Stage: Stage-3 Map Reduce @@ -160,23 +160,23 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest1_n116 SELECT src1.key, src3.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n116 POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest1_n116 SELECT src1.key, src3.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +POSTHOOK: Output: default@dest1_n116 +POSTHOOK: Lineage: dest1_n116.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n116.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1_n116.key,dest1_n116.value)) FROM dest1_n116 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n116 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +POSTHOOK: query: SELECT sum(hash(dest1_n116.key,dest1_n116.value)) FROM dest1_n116 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n116 #### A masked pattern was here #### 344360994461 diff --git a/ql/src/test/results/clientpositive/auto_join4.q.out b/ql/src/test/results/clientpositive/auto_join4.q.out index d2cd903934..c5bf120706 100644 --- a/ql/src/test/results/clientpositive/auto_join4.q.out +++ b/ql/src/test/results/clientpositive/auto_join4.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n97(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n97 +POSTHOOK: query: CREATE TABLE dest1_n97(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n97 PREHOOK: query: explain FROM ( FROM @@ -19,7 +19,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n97 SELECT c.c1, c.c2, c.c3, c.c4 PREHOOK: type: QUERY POSTHOOK: query: explain FROM ( @@ -34,7 +34,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n97 SELECT c.c1, c.c2, c.c3, c.c4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-6 is a root stage @@ -99,7 +99,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n97 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string) outputColumnNames: c1, c2, c3, c4 @@ -126,7 +126,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n97 Stage: Stage-2 Stats Work @@ -134,7 +134,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4 Column Types: int, string, int, string - Table: default.dest1 + Table: default.dest1_n97 Stage: Stage-3 Map Reduce @@ -171,10 +171,10 @@ PREHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n97 SELECT c.c1, c.c2, c.c3, c.c4 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n97 POSTHOOK: query: FROM ( FROM ( @@ -187,20 +187,20 @@ POSTHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n97 SELECT c.c1, c.c2, c.c3, c.c4 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +POSTHOOK: Output: default@dest1_n97 +POSTHOOK: Lineage: dest1_n97.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n97.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n97.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n97.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1_n97.c1,dest1_n97.c2,dest1_n97.c3,dest1_n97.c4)) FROM dest1_n97 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n97 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +POSTHOOK: query: SELECT sum(hash(dest1_n97.c1,dest1_n97.c2,dest1_n97.c3,dest1_n97.c4)) FROM dest1_n97 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n97 #### A masked pattern was here #### 5079148035 diff --git a/ql/src/test/results/clientpositive/auto_join5.q.out b/ql/src/test/results/clientpositive/auto_join5.q.out index 2ba4f4d0ae..edbf4dc9df 100644 --- a/ql/src/test/results/clientpositive/auto_join5.q.out +++ b/ql/src/test/results/clientpositive/auto_join5.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n55(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n55 +POSTHOOK: query: CREATE TABLE dest1_n55(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n55 PREHOOK: query: explain FROM ( FROM @@ -19,7 +19,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n55 SELECT c.c1, c.c2, c.c3, c.c4 PREHOOK: type: QUERY POSTHOOK: query: explain FROM ( @@ -34,7 +34,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n55 SELECT c.c1, c.c2, c.c3, c.c4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-6 is a root stage @@ -99,7 +99,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n55 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string) outputColumnNames: c1, c2, c3, c4 @@ -126,7 +126,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n55 Stage: Stage-2 Stats Work @@ -134,7 +134,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4 Column Types: int, string, int, string - Table: default.dest1 + Table: default.dest1_n55 Stage: Stage-3 Map Reduce @@ -171,10 +171,10 @@ PREHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n55 SELECT c.c1, c.c2, c.c3, c.c4 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n55 POSTHOOK: query: FROM ( FROM ( @@ -187,20 +187,20 @@ POSTHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n55 SELECT c.c1, c.c2, c.c3, c.c4 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +POSTHOOK: Output: default@dest1_n55 +POSTHOOK: Lineage: dest1_n55.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n55.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n55.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n55.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1_n55.c1,dest1_n55.c2,dest1_n55.c3,dest1_n55.c4)) FROM dest1_n55 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n55 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +POSTHOOK: query: SELECT sum(hash(dest1_n55.c1,dest1_n55.c2,dest1_n55.c3,dest1_n55.c4)) FROM dest1_n55 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n55 #### A masked pattern was here #### 9766083196 diff --git a/ql/src/test/results/clientpositive/auto_join6.q.out b/ql/src/test/results/clientpositive/auto_join6.q.out index 4f3245ad71..bc52a75ec0 100644 --- a/ql/src/test/results/clientpositive/auto_join6.q.out +++ b/ql/src/test/results/clientpositive/auto_join6.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n8(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n8 +POSTHOOK: query: CREATE TABLE dest1_n8(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n8 PREHOOK: query: explain FROM ( FROM @@ -19,7 +19,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n8 SELECT c.c1, c.c2, c.c3, c.c4 PREHOOK: type: QUERY POSTHOOK: query: explain FROM ( @@ -34,7 +34,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n8 SELECT c.c1, c.c2, c.c3, c.c4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -98,7 +98,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n8 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string) outputColumnNames: c1, c2, c3, c4 @@ -123,7 +123,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n8 Stage: Stage-2 Stats Work @@ -131,7 +131,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4 Column Types: int, string, int, string - Table: default.dest1 + Table: default.dest1_n8 Stage: Stage-3 Map Reduce @@ -168,10 +168,10 @@ PREHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n8 SELECT c.c1, c.c2, c.c3, c.c4 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n8 POSTHOOK: query: FROM ( FROM ( @@ -184,20 +184,20 @@ POSTHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n8 SELECT c.c1, c.c2, c.c3, c.c4 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +POSTHOOK: Output: default@dest1_n8 +POSTHOOK: Lineage: dest1_n8.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n8.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n8.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n8.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1_n8.c1,dest1_n8.c2,dest1_n8.c3,dest1_n8.c4)) FROM dest1_n8 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n8 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +POSTHOOK: query: SELECT sum(hash(dest1_n8.c1,dest1_n8.c2,dest1_n8.c3,dest1_n8.c4)) FROM dest1_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n8 #### A masked pattern was here #### 2607643291 diff --git a/ql/src/test/results/clientpositive/auto_join7.q.out b/ql/src/test/results/clientpositive/auto_join7.q.out index 7fdd894e83..8e41bef01b 100644 --- a/ql/src/test/results/clientpositive/auto_join7.q.out +++ b/ql/src/test/results/clientpositive/auto_join7.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n123(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n123 +POSTHOOK: query: CREATE TABLE dest1_n123(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n123 PREHOOK: query: explain FROM ( FROM @@ -24,7 +24,7 @@ FROM ( ON (a.c1 = c.c5) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 +INSERT OVERWRITE TABLE dest1_n123 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 PREHOOK: type: QUERY POSTHOOK: query: explain FROM ( @@ -44,7 +44,7 @@ FROM ( ON (a.c1 = c.c5) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 +INSERT OVERWRITE TABLE dest1_n123 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -126,7 +126,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n123 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: int), _col5 (type: string) outputColumnNames: c1, c2, c3, c4, c5, c6 @@ -151,7 +151,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n123 Stage: Stage-2 Stats Work @@ -159,7 +159,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4, c5, c6 Column Types: int, string, int, string, int, string - Table: default.dest1 + Table: default.dest1_n123 Stage: Stage-3 Map Reduce @@ -201,10 +201,10 @@ PREHOOK: query: FROM ( ON (a.c1 = c.c5) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 +INSERT OVERWRITE TABLE dest1_n123 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n123 POSTHOOK: query: FROM ( FROM ( @@ -222,22 +222,22 @@ POSTHOOK: query: FROM ( ON (a.c1 = c.c5) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 +INSERT OVERWRITE TABLE dest1_n123 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src3.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c6 SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4,dest1.c5,dest1.c6)) FROM dest1 +POSTHOOK: Output: default@dest1_n123 +POSTHOOK: Lineage: dest1_n123.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n123.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n123.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n123.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n123.c5 EXPRESSION [(src)src3.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n123.c6 SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1_n123.c1,dest1_n123.c2,dest1_n123.c3,dest1_n123.c4,dest1_n123.c5,dest1_n123.c6)) FROM dest1_n123 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n123 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4,dest1.c5,dest1.c6)) FROM dest1 +POSTHOOK: query: SELECT sum(hash(dest1_n123.c1,dest1_n123.c2,dest1_n123.c3,dest1_n123.c4,dest1_n123.c5,dest1_n123.c6)) FROM dest1_n123 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n123 #### A masked pattern was here #### -2315698213 diff --git a/ql/src/test/results/clientpositive/auto_join8.q.out b/ql/src/test/results/clientpositive/auto_join8.q.out index c3b856be71..b959822ed4 100644 --- a/ql/src/test/results/clientpositive/auto_join8.q.out +++ b/ql/src/test/results/clientpositive/auto_join8.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n3(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n3 +POSTHOOK: query: CREATE TABLE dest1_n3(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n3 PREHOOK: query: explain FROM ( FROM @@ -19,7 +19,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL +INSERT OVERWRITE TABLE dest1_n3 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL PREHOOK: type: QUERY POSTHOOK: query: explain FROM ( @@ -34,7 +34,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL +INSERT OVERWRITE TABLE dest1_n3 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-6 is a root stage @@ -102,7 +102,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n3 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string) outputColumnNames: c1, c2, c3, c4 @@ -129,7 +129,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n3 Stage: Stage-2 Stats Work @@ -137,7 +137,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4 Column Types: int, string, int, string - Table: default.dest1 + Table: default.dest1_n3 Stage: Stage-3 Map Reduce @@ -174,10 +174,10 @@ PREHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL +INSERT OVERWRITE TABLE dest1_n3 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n3 POSTHOOK: query: FROM ( FROM ( @@ -190,20 +190,20 @@ POSTHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL +INSERT OVERWRITE TABLE dest1_n3 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [] -POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +POSTHOOK: Output: default@dest1_n3 +POSTHOOK: Lineage: dest1_n3.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n3.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n3.c3 EXPRESSION [] +POSTHOOK: Lineage: dest1_n3.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1_n3.c1,dest1_n3.c2,dest1_n3.c3,dest1_n3.c4)) FROM dest1_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n3 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1 +POSTHOOK: query: SELECT sum(hash(dest1_n3.c1,dest1_n3.c2,dest1_n3.c3,dest1_n3.c4)) FROM dest1_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n3 #### A masked pattern was here #### -7158439905 diff --git a/ql/src/test/results/clientpositive/auto_join9.q.out b/ql/src/test/results/clientpositive/auto_join9.q.out index c2f315a6e4..242cddf685 100644 --- a/ql/src/test/results/clientpositive/auto_join9.q.out +++ b/ql/src/test/results/clientpositive/auto_join9.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n118(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n118 +POSTHOOK: query: CREATE TABLE dest1_n118(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n118 PREHOOK: query: explain FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' +INSERT OVERWRITE TABLE dest1_n118 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' PREHOOK: type: QUERY POSTHOOK: query: explain FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' +INSERT OVERWRITE TABLE dest1_n118 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-6 is a root stage @@ -77,7 +77,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n118 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -104,7 +104,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n118 Stage: Stage-2 Stats Work @@ -112,7 +112,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n118 Stage: Stage-3 Map Reduce @@ -138,27 +138,27 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' +INSERT OVERWRITE TABLE dest1_n118 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n118 POSTHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' +INSERT OVERWRITE TABLE dest1_n118 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +POSTHOOK: Output: default@dest1_n118 +POSTHOOK: Lineage: dest1_n118.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n118.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1_n118.key,dest1_n118.value)) FROM dest1_n118 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n118 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest1.key,dest1.value)) FROM dest1 +POSTHOOK: query: SELECT sum(hash(dest1_n118.key,dest1_n118.value)) FROM dest1_n118 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n118 #### A masked pattern was here #### 101861029915 diff --git a/ql/src/test/results/clientpositive/auto_join_stats.q.out b/ql/src/test/results/clientpositive/auto_join_stats.q.out index 7e115f043f..321c977483 100644 --- a/ql/src/test/results/clientpositive/auto_join_stats.q.out +++ b/ql/src/test/results/clientpositive/auto_join_stats.q.out @@ -1,30 +1,30 @@ -PREHOOK: query: create table smalltable(key string, value string) stored as textfile +PREHOOK: query: create table smalltable_n0(key string, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smalltable -POSTHOOK: query: create table smalltable(key string, value string) stored as textfile +PREHOOK: Output: default@smalltable_n0 +POSTHOOK: query: create table smalltable_n0(key string, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smalltable -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table smalltable +POSTHOOK: Output: default@smalltable_n0 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table smalltable_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@smalltable -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table smalltable +PREHOOK: Output: default@smalltable_n0 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table smalltable_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@smalltable -PREHOOK: query: analyze table smalltable compute statistics +POSTHOOK: Output: default@smalltable_n0 +PREHOOK: query: analyze table smalltable_n0 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@smalltable -PREHOOK: Output: default@smalltable -POSTHOOK: query: analyze table smalltable compute statistics +PREHOOK: Input: default@smalltable_n0 +PREHOOK: Output: default@smalltable_n0 +POSTHOOK: query: analyze table smalltable_n0 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@smalltable -POSTHOOK: Output: default@smalltable -PREHOOK: query: explain select src1.key, src2.key, smalltable.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable ON (src1.key + src2.key = smalltable.key) +POSTHOOK: Input: default@smalltable_n0 +POSTHOOK: Output: default@smalltable_n0 +PREHOOK: query: explain select src1.key, src2.key, smalltable_n0.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable_n0 ON (src1.key + src2.key = smalltable_n0.key) PREHOOK: type: QUERY -POSTHOOK: query: explain select src1.key, src2.key, smalltable.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable ON (src1.key + src2.key = smalltable.key) +POSTHOOK: query: explain select src1.key, src2.key, smalltable_n0.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable_n0 ON (src1.key + src2.key = smalltable_n0.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-8 is a root stage , consists of Stage-10, Stage-11, Stage-1 @@ -98,13 +98,13 @@ STAGE PLANS: Stage: Stage-9 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_2:smalltable + $hdt$_2:smalltable_n0 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_2:smalltable + $hdt$_2:smalltable_n0 TableScan - alias: smalltable + alias: smalltable_n0 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -250,45 +250,45 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select src1.key, src2.key, smalltable.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable ON (src1.key + src2.key = smalltable.key) +PREHOOK: query: select src1.key, src2.key, smalltable_n0.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable_n0 ON (src1.key + src2.key = smalltable_n0.key) PREHOOK: type: QUERY -PREHOOK: Input: default@smalltable +PREHOOK: Input: default@smalltable_n0 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select src1.key, src2.key, smalltable.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable ON (src1.key + src2.key = smalltable.key) +POSTHOOK: query: select src1.key, src2.key, smalltable_n0.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable_n0 ON (src1.key + src2.key = smalltable_n0.key) POSTHOOK: type: QUERY -POSTHOOK: Input: default@smalltable +POSTHOOK: Input: default@smalltable_n0 POSTHOOK: Input: default@src #### A masked pattern was here #### 4 4 8 4 4 8 -PREHOOK: query: create table smalltable2(key string, value string) stored as textfile +PREHOOK: query: create table smalltable2_n0(key string, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smalltable2 -POSTHOOK: query: create table smalltable2(key string, value string) stored as textfile +PREHOOK: Output: default@smalltable2_n0 +POSTHOOK: query: create table smalltable2_n0(key string, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smalltable2 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table smalltable2 +POSTHOOK: Output: default@smalltable2_n0 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table smalltable2_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@smalltable2 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table smalltable2 +PREHOOK: Output: default@smalltable2_n0 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table smalltable2_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@smalltable2 -PREHOOK: query: analyze table smalltable compute statistics +POSTHOOK: Output: default@smalltable2_n0 +PREHOOK: query: analyze table smalltable_n0 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@smalltable -PREHOOK: Output: default@smalltable -POSTHOOK: query: analyze table smalltable compute statistics +PREHOOK: Input: default@smalltable_n0 +PREHOOK: Output: default@smalltable_n0 +POSTHOOK: query: analyze table smalltable_n0 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@smalltable -POSTHOOK: Output: default@smalltable -PREHOOK: query: explain select src1.key, src2.key, smalltable.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable ON (src1.key + src2.key = smalltable.key) JOIN smalltable2 ON (src1.key + src2.key = smalltable2.key) +POSTHOOK: Input: default@smalltable_n0 +POSTHOOK: Output: default@smalltable_n0 +PREHOOK: query: explain select src1.key, src2.key, smalltable_n0.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable_n0 ON (src1.key + src2.key = smalltable_n0.key) JOIN smalltable2_n0 ON (src1.key + src2.key = smalltable2_n0.key) PREHOOK: type: QUERY -POSTHOOK: query: explain select src1.key, src2.key, smalltable.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable ON (src1.key + src2.key = smalltable.key) JOIN smalltable2 ON (src1.key + src2.key = smalltable2.key) +POSTHOOK: query: explain select src1.key, src2.key, smalltable_n0.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable_n0 ON (src1.key + src2.key = smalltable_n0.key) JOIN smalltable2_n0 ON (src1.key + src2.key = smalltable2_n0.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-9 is a root stage , consists of Stage-11, Stage-12, Stage-1 @@ -362,16 +362,16 @@ STAGE PLANS: Stage: Stage-10 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:smalltable + $hdt$_0:smalltable_n0 Fetch Operator limit: -1 - $hdt$_3:smalltable2 + $hdt$_3:smalltable2_n0 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:smalltable + $hdt$_0:smalltable_n0 TableScan - alias: smalltable + alias: smalltable_n0 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -385,9 +385,9 @@ STAGE PLANS: 0 (UDFToDouble(_col0) + UDFToDouble(_col1)) (type: double) 1 UDFToDouble(_col0) (type: double) 2 UDFToDouble(_col0) (type: double) - $hdt$_3:smalltable2 + $hdt$_3:smalltable2_n0 TableScan - alias: smalltable2 + alias: smalltable2_n0 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -540,16 +540,16 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select src1.key, src2.key, smalltable.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable ON (src1.key + src2.key = smalltable.key) JOIN smalltable2 ON (src1.key + src2.key = smalltable2.key) +PREHOOK: query: select src1.key, src2.key, smalltable_n0.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable_n0 ON (src1.key + src2.key = smalltable_n0.key) JOIN smalltable2_n0 ON (src1.key + src2.key = smalltable2_n0.key) PREHOOK: type: QUERY -PREHOOK: Input: default@smalltable -PREHOOK: Input: default@smalltable2 +PREHOOK: Input: default@smalltable2_n0 +PREHOOK: Input: default@smalltable_n0 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select src1.key, src2.key, smalltable.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable ON (src1.key + src2.key = smalltable.key) JOIN smalltable2 ON (src1.key + src2.key = smalltable2.key) +POSTHOOK: query: select src1.key, src2.key, smalltable_n0.key from src src1 JOIN src src2 ON (src1.key = src2.key) JOIN smalltable_n0 ON (src1.key + src2.key = smalltable_n0.key) JOIN smalltable2_n0 ON (src1.key + src2.key = smalltable2_n0.key) POSTHOOK: type: QUERY -POSTHOOK: Input: default@smalltable -POSTHOOK: Input: default@smalltable2 +POSTHOOK: Input: default@smalltable2_n0 +POSTHOOK: Input: default@smalltable_n0 POSTHOOK: Input: default@src #### A masked pattern was here #### 4 4 8 diff --git a/ql/src/test/results/clientpositive/avro_add_column.q.out b/ql/src/test/results/clientpositive/avro_add_column.q.out index 57c7d80283..3427da009f 100644 --- a/ql/src/test/results/clientpositive/avro_add_column.q.out +++ b/ql/src/test/results/clientpositive/avro_add_column.q.out @@ -1,57 +1,57 @@ -PREHOOK: query: CREATE TABLE doctors ( +PREHOOK: query: CREATE TABLE doctors_n0 ( number int, first_name string) STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@doctors -POSTHOOK: query: CREATE TABLE doctors ( +PREHOOK: Output: default@doctors_n0 +POSTHOOK: query: CREATE TABLE doctors_n0 ( number int, first_name string) STORED AS AVRO POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@doctors -PREHOOK: query: DESCRIBE doctors +POSTHOOK: Output: default@doctors_n0 +PREHOOK: query: DESCRIBE doctors_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@doctors -POSTHOOK: query: DESCRIBE doctors +PREHOOK: Input: default@doctors_n0 +POSTHOOK: query: DESCRIBE doctors_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@doctors +POSTHOOK: Input: default@doctors_n0 number int first_name string -PREHOOK: query: ALTER TABLE doctors ADD COLUMNS (last_name string) +PREHOOK: query: ALTER TABLE doctors_n0 ADD COLUMNS (last_name string) PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@doctors -PREHOOK: Output: default@doctors -POSTHOOK: query: ALTER TABLE doctors ADD COLUMNS (last_name string) +PREHOOK: Input: default@doctors_n0 +PREHOOK: Output: default@doctors_n0 +POSTHOOK: query: ALTER TABLE doctors_n0 ADD COLUMNS (last_name string) POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@doctors -POSTHOOK: Output: default@doctors -PREHOOK: query: DESCRIBE doctors +POSTHOOK: Input: default@doctors_n0 +POSTHOOK: Output: default@doctors_n0 +PREHOOK: query: DESCRIBE doctors_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@doctors -POSTHOOK: query: DESCRIBE doctors +PREHOOK: Input: default@doctors_n0 +POSTHOOK: query: DESCRIBE doctors_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@doctors +POSTHOOK: Input: default@doctors_n0 number int first_name string last_name string -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@doctors -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors +PREHOOK: Output: default@doctors_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@doctors -PREHOOK: query: SELECT * FROM doctors +POSTHOOK: Output: default@doctors_n0 +PREHOOK: query: SELECT * FROM doctors_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@doctors +PREHOOK: Input: default@doctors_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM doctors +POSTHOOK: query: SELECT * FROM doctors_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@doctors +POSTHOOK: Input: default@doctors_n0 #### A masked pattern was here #### 1 William Hartnell 10 David Tennant diff --git a/ql/src/test/results/clientpositive/avro_add_column3.q.out b/ql/src/test/results/clientpositive/avro_add_column3.q.out index aaacb1a068..49beb3d866 100644 --- a/ql/src/test/results/clientpositive/avro_add_column3.q.out +++ b/ql/src/test/results/clientpositive/avro_add_column3.q.out @@ -1,67 +1,67 @@ -PREHOOK: query: CREATE TABLE doctors ( +PREHOOK: query: CREATE TABLE doctors_n3 ( number int, first_name string, last_name string) STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@doctors -POSTHOOK: query: CREATE TABLE doctors ( +PREHOOK: Output: default@doctors_n3 +POSTHOOK: query: CREATE TABLE doctors_n3 ( number int, first_name string, last_name string) STORED AS AVRO POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@doctors -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors +POSTHOOK: Output: default@doctors_n3 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@doctors -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors +PREHOOK: Output: default@doctors_n3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@doctors -PREHOOK: query: CREATE TABLE doctors_copy ( +POSTHOOK: Output: default@doctors_n3 +PREHOOK: query: CREATE TABLE doctors_copy_n0 ( number int, first_name string) PARTITIONED BY (part int) STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@doctors_copy -POSTHOOK: query: CREATE TABLE doctors_copy ( +PREHOOK: Output: default@doctors_copy_n0 +POSTHOOK: query: CREATE TABLE doctors_copy_n0 ( number int, first_name string) PARTITIONED BY (part int) STORED AS AVRO POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@doctors_copy -PREHOOK: query: INSERT INTO TABLE doctors_copy PARTITION(part=1) SELECT number, first_name FROM doctors +POSTHOOK: Output: default@doctors_copy_n0 +PREHOOK: query: INSERT INTO TABLE doctors_copy_n0 PARTITION(part=1) SELECT number, first_name FROM doctors_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@doctors -PREHOOK: Output: default@doctors_copy@part=1 -POSTHOOK: query: INSERT INTO TABLE doctors_copy PARTITION(part=1) SELECT number, first_name FROM doctors +PREHOOK: Input: default@doctors_n3 +PREHOOK: Output: default@doctors_copy_n0@part=1 +POSTHOOK: query: INSERT INTO TABLE doctors_copy_n0 PARTITION(part=1) SELECT number, first_name FROM doctors_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@doctors -POSTHOOK: Output: default@doctors_copy@part=1 -POSTHOOK: Lineage: doctors_copy PARTITION(part=1).first_name SIMPLE [(doctors)doctors.FieldSchema(name:first_name, type:string, comment:), ] -POSTHOOK: Lineage: doctors_copy PARTITION(part=1).number SIMPLE [(doctors)doctors.FieldSchema(name:number, type:int, comment:), ] -PREHOOK: query: ALTER TABLE doctors_copy ADD COLUMNS (last_name string) +POSTHOOK: Input: default@doctors_n3 +POSTHOOK: Output: default@doctors_copy_n0@part=1 +POSTHOOK: Lineage: doctors_copy_n0 PARTITION(part=1).first_name SIMPLE [(doctors_n3)doctors_n3.FieldSchema(name:first_name, type:string, comment:), ] +POSTHOOK: Lineage: doctors_copy_n0 PARTITION(part=1).number SIMPLE [(doctors_n3)doctors_n3.FieldSchema(name:number, type:int, comment:), ] +PREHOOK: query: ALTER TABLE doctors_copy_n0 ADD COLUMNS (last_name string) PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@doctors_copy -PREHOOK: Output: default@doctors_copy -POSTHOOK: query: ALTER TABLE doctors_copy ADD COLUMNS (last_name string) +PREHOOK: Input: default@doctors_copy_n0 +PREHOOK: Output: default@doctors_copy_n0 +POSTHOOK: query: ALTER TABLE doctors_copy_n0 ADD COLUMNS (last_name string) POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@doctors_copy -POSTHOOK: Output: default@doctors_copy -PREHOOK: query: DESCRIBE doctors_copy +POSTHOOK: Input: default@doctors_copy_n0 +POSTHOOK: Output: default@doctors_copy_n0 +PREHOOK: query: DESCRIBE doctors_copy_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@doctors_copy -POSTHOOK: query: DESCRIBE doctors_copy +PREHOOK: Input: default@doctors_copy_n0 +POSTHOOK: query: DESCRIBE doctors_copy_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@doctors_copy +POSTHOOK: Input: default@doctors_copy_n0 number int first_name string last_name string @@ -70,15 +70,15 @@ part int # Partition Information # col_name data_type comment part int -PREHOOK: query: SELECT * FROM doctors_copy +PREHOOK: query: SELECT * FROM doctors_copy_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@doctors_copy -PREHOOK: Input: default@doctors_copy@part=1 +PREHOOK: Input: default@doctors_copy_n0 +PREHOOK: Input: default@doctors_copy_n0@part=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM doctors_copy +POSTHOOK: query: SELECT * FROM doctors_copy_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@doctors_copy -POSTHOOK: Input: default@doctors_copy@part=1 +POSTHOOK: Input: default@doctors_copy_n0 +POSTHOOK: Input: default@doctors_copy_n0@part=1 #### A masked pattern was here #### 1 William NULL 1 10 David NULL 1 diff --git a/ql/src/test/results/clientpositive/avro_alter_table_update_columns.q.out b/ql/src/test/results/clientpositive/avro_alter_table_update_columns.q.out index 8985f18b0d..7f74f6c141 100644 --- a/ql/src/test/results/clientpositive/avro_alter_table_update_columns.q.out +++ b/ql/src/test/results/clientpositive/avro_alter_table_update_columns.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE avro_extschema_literal +PREHOOK: query: CREATE TABLE avro_extschema_literal_n1 STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", @@ -11,8 +11,8 @@ PREHOOK: query: CREATE TABLE avro_extschema_literal ] }') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@avro_extschema_literal -POSTHOOK: query: CREATE TABLE avro_extschema_literal +PREHOOK: Output: default@avro_extschema_literal_n1 +POSTHOOK: query: CREATE TABLE avro_extschema_literal_n1 STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", @@ -25,17 +25,17 @@ POSTHOOK: query: CREATE TABLE avro_extschema_literal ] }') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@avro_extschema_literal -PREHOOK: query: DESCRIBE avro_extschema_literal +POSTHOOK: Output: default@avro_extschema_literal_n1 +PREHOOK: query: DESCRIBE avro_extschema_literal_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@avro_extschema_literal -POSTHOOK: query: DESCRIBE avro_extschema_literal +PREHOOK: Input: default@avro_extschema_literal_n1 +POSTHOOK: query: DESCRIBE avro_extschema_literal_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@avro_extschema_literal +POSTHOOK: Input: default@avro_extschema_literal_n1 number int first_name string last_name string -PREHOOK: query: ALTER TABLE avro_extschema_literal SET +PREHOOK: query: ALTER TABLE avro_extschema_literal_n1 SET TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", "name": "ext_schema", @@ -44,9 +44,9 @@ PREHOOK: query: ALTER TABLE avro_extschema_literal SET { "name":"newCol", "type":"int" } ] }') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@avro_extschema_literal -PREHOOK: Output: default@avro_extschema_literal -POSTHOOK: query: ALTER TABLE avro_extschema_literal SET +PREHOOK: Input: default@avro_extschema_literal_n1 +PREHOOK: Output: default@avro_extschema_literal_n1 +POSTHOOK: query: ALTER TABLE avro_extschema_literal_n1 SET TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", "name": "ext_schema", @@ -55,33 +55,33 @@ POSTHOOK: query: ALTER TABLE avro_extschema_literal SET { "name":"newCol", "type":"int" } ] }') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@avro_extschema_literal -POSTHOOK: Output: default@avro_extschema_literal -PREHOOK: query: DESCRIBE avro_extschema_literal +POSTHOOK: Input: default@avro_extschema_literal_n1 +POSTHOOK: Output: default@avro_extschema_literal_n1 +PREHOOK: query: DESCRIBE avro_extschema_literal_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@avro_extschema_literal -POSTHOOK: query: DESCRIBE avro_extschema_literal +PREHOOK: Input: default@avro_extschema_literal_n1 +POSTHOOK: query: DESCRIBE avro_extschema_literal_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@avro_extschema_literal +POSTHOOK: Input: default@avro_extschema_literal_n1 newcol int -PREHOOK: query: ALTER TABLE avro_extschema_literal UNSET TBLPROPERTIES ('avro.schema.literal') +PREHOOK: query: ALTER TABLE avro_extschema_literal_n1 UNSET TBLPROPERTIES ('avro.schema.literal') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@avro_extschema_literal -PREHOOK: Output: default@avro_extschema_literal -POSTHOOK: query: ALTER TABLE avro_extschema_literal UNSET TBLPROPERTIES ('avro.schema.literal') +PREHOOK: Input: default@avro_extschema_literal_n1 +PREHOOK: Output: default@avro_extschema_literal_n1 +POSTHOOK: query: ALTER TABLE avro_extschema_literal_n1 UNSET TBLPROPERTIES ('avro.schema.literal') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@avro_extschema_literal -POSTHOOK: Output: default@avro_extschema_literal -PREHOOK: query: DESCRIBE avro_extschema_literal +POSTHOOK: Input: default@avro_extschema_literal_n1 +POSTHOOK: Output: default@avro_extschema_literal_n1 +PREHOOK: query: DESCRIBE avro_extschema_literal_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@avro_extschema_literal -POSTHOOK: query: DESCRIBE avro_extschema_literal +PREHOOK: Input: default@avro_extschema_literal_n1 +POSTHOOK: query: DESCRIBE avro_extschema_literal_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@avro_extschema_literal +POSTHOOK: Input: default@avro_extschema_literal_n1 number int first_name string last_name string -PREHOOK: query: ALTER TABLE avro_extschema_literal SET +PREHOOK: query: ALTER TABLE avro_extschema_literal_n1 SET TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", "name": "ext_schema", @@ -90,9 +90,9 @@ PREHOOK: query: ALTER TABLE avro_extschema_literal SET { "name":"newCol", "type":"int" } ] }') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@avro_extschema_literal -PREHOOK: Output: default@avro_extschema_literal -POSTHOOK: query: ALTER TABLE avro_extschema_literal SET +PREHOOK: Input: default@avro_extschema_literal_n1 +PREHOOK: Output: default@avro_extschema_literal_n1 +POSTHOOK: query: ALTER TABLE avro_extschema_literal_n1 SET TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", "name": "ext_schema", @@ -101,138 +101,138 @@ POSTHOOK: query: ALTER TABLE avro_extschema_literal SET { "name":"newCol", "type":"int" } ] }') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@avro_extschema_literal -POSTHOOK: Output: default@avro_extschema_literal -PREHOOK: query: ALTER TABLE avro_extschema_literal UPDATE COLUMNS CASCADE +POSTHOOK: Input: default@avro_extschema_literal_n1 +POSTHOOK: Output: default@avro_extschema_literal_n1 +PREHOOK: query: ALTER TABLE avro_extschema_literal_n1 UPDATE COLUMNS CASCADE PREHOOK: type: ALTERTABLE_UPDATECOLUMNS -POSTHOOK: query: ALTER TABLE avro_extschema_literal UPDATE COLUMNS CASCADE +POSTHOOK: query: ALTER TABLE avro_extschema_literal_n1 UPDATE COLUMNS CASCADE POSTHOOK: type: ALTERTABLE_UPDATECOLUMNS -POSTHOOK: Input: default@avro_extschema_literal -POSTHOOK: Output: default@avro_extschema_literal -PREHOOK: query: DESCRIBE avro_extschema_literal +POSTHOOK: Input: default@avro_extschema_literal_n1 +POSTHOOK: Output: default@avro_extschema_literal_n1 +PREHOOK: query: DESCRIBE avro_extschema_literal_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@avro_extschema_literal -POSTHOOK: query: DESCRIBE avro_extschema_literal +PREHOOK: Input: default@avro_extschema_literal_n1 +POSTHOOK: query: DESCRIBE avro_extschema_literal_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@avro_extschema_literal +POSTHOOK: Input: default@avro_extschema_literal_n1 newcol int -PREHOOK: query: ALTER TABLE avro_extschema_literal UNSET TBLPROPERTIES ('avro.schema.literal') +PREHOOK: query: ALTER TABLE avro_extschema_literal_n1 UNSET TBLPROPERTIES ('avro.schema.literal') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@avro_extschema_literal -PREHOOK: Output: default@avro_extschema_literal -POSTHOOK: query: ALTER TABLE avro_extschema_literal UNSET TBLPROPERTIES ('avro.schema.literal') +PREHOOK: Input: default@avro_extschema_literal_n1 +PREHOOK: Output: default@avro_extschema_literal_n1 +POSTHOOK: query: ALTER TABLE avro_extschema_literal_n1 UNSET TBLPROPERTIES ('avro.schema.literal') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@avro_extschema_literal -POSTHOOK: Output: default@avro_extschema_literal -PREHOOK: query: DESCRIBE avro_extschema_literal +POSTHOOK: Input: default@avro_extschema_literal_n1 +POSTHOOK: Output: default@avro_extschema_literal_n1 +PREHOOK: query: DESCRIBE avro_extschema_literal_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@avro_extschema_literal -POSTHOOK: query: DESCRIBE avro_extschema_literal +PREHOOK: Input: default@avro_extschema_literal_n1 +POSTHOOK: query: DESCRIBE avro_extschema_literal_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@avro_extschema_literal +POSTHOOK: Input: default@avro_extschema_literal_n1 newcol int -PREHOOK: query: CREATE TABLE avro_extschema_url +PREHOOK: query: CREATE TABLE avro_extschema_url_n1 STORED AS AVRO #### A masked pattern was here #### PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@avro_extschema_url -POSTHOOK: query: CREATE TABLE avro_extschema_url +PREHOOK: Output: default@avro_extschema_url_n1 +POSTHOOK: query: CREATE TABLE avro_extschema_url_n1 STORED AS AVRO #### A masked pattern was here #### POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@avro_extschema_url -PREHOOK: query: DESCRIBE avro_extschema_url +POSTHOOK: Output: default@avro_extschema_url_n1 +PREHOOK: query: DESCRIBE avro_extschema_url_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@avro_extschema_url -POSTHOOK: query: DESCRIBE avro_extschema_url +PREHOOK: Input: default@avro_extschema_url_n1 +POSTHOOK: query: DESCRIBE avro_extschema_url_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@avro_extschema_url +POSTHOOK: Input: default@avro_extschema_url_n1 col1 string col2 string col3 double col4 string col5 string col6 int -PREHOOK: query: ALTER TABLE avro_extschema_url SET +PREHOOK: query: ALTER TABLE avro_extschema_url_n1 SET #### A masked pattern was here #### PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@avro_extschema_url -PREHOOK: Output: default@avro_extschema_url -POSTHOOK: query: ALTER TABLE avro_extschema_url SET +PREHOOK: Input: default@avro_extschema_url_n1 +PREHOOK: Output: default@avro_extschema_url_n1 +POSTHOOK: query: ALTER TABLE avro_extschema_url_n1 SET #### A masked pattern was here #### POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@avro_extschema_url -POSTHOOK: Output: default@avro_extschema_url -PREHOOK: query: DESCRIBE avro_extschema_url +POSTHOOK: Input: default@avro_extschema_url_n1 +POSTHOOK: Output: default@avro_extschema_url_n1 +PREHOOK: query: DESCRIBE avro_extschema_url_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@avro_extschema_url -POSTHOOK: query: DESCRIBE avro_extschema_url +PREHOOK: Input: default@avro_extschema_url_n1 +POSTHOOK: query: DESCRIBE avro_extschema_url_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@avro_extschema_url +POSTHOOK: Input: default@avro_extschema_url_n1 colx string coly string colz double -PREHOOK: query: ALTER TABLE avro_extschema_url UNSET TBLPROPERTIES ('avro.schema.url') +PREHOOK: query: ALTER TABLE avro_extschema_url_n1 UNSET TBLPROPERTIES ('avro.schema.url') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@avro_extschema_url -PREHOOK: Output: default@avro_extschema_url -POSTHOOK: query: ALTER TABLE avro_extschema_url UNSET TBLPROPERTIES ('avro.schema.url') +PREHOOK: Input: default@avro_extschema_url_n1 +PREHOOK: Output: default@avro_extschema_url_n1 +POSTHOOK: query: ALTER TABLE avro_extschema_url_n1 UNSET TBLPROPERTIES ('avro.schema.url') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@avro_extschema_url -POSTHOOK: Output: default@avro_extschema_url -PREHOOK: query: DESCRIBE avro_extschema_url +POSTHOOK: Input: default@avro_extschema_url_n1 +POSTHOOK: Output: default@avro_extschema_url_n1 +PREHOOK: query: DESCRIBE avro_extschema_url_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@avro_extschema_url -POSTHOOK: query: DESCRIBE avro_extschema_url +PREHOOK: Input: default@avro_extschema_url_n1 +POSTHOOK: query: DESCRIBE avro_extschema_url_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@avro_extschema_url +POSTHOOK: Input: default@avro_extschema_url_n1 col1 string col2 string col3 double col4 string col5 string col6 int -PREHOOK: query: ALTER TABLE avro_extschema_url SET +PREHOOK: query: ALTER TABLE avro_extschema_url_n1 SET #### A masked pattern was here #### PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@avro_extschema_url -PREHOOK: Output: default@avro_extschema_url -POSTHOOK: query: ALTER TABLE avro_extschema_url SET +PREHOOK: Input: default@avro_extschema_url_n1 +PREHOOK: Output: default@avro_extschema_url_n1 +POSTHOOK: query: ALTER TABLE avro_extschema_url_n1 SET #### A masked pattern was here #### POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@avro_extschema_url -POSTHOOK: Output: default@avro_extschema_url -PREHOOK: query: ALTER TABLE avro_extschema_url UPDATE COLUMNS CASCADE +POSTHOOK: Input: default@avro_extschema_url_n1 +POSTHOOK: Output: default@avro_extschema_url_n1 +PREHOOK: query: ALTER TABLE avro_extschema_url_n1 UPDATE COLUMNS CASCADE PREHOOK: type: ALTERTABLE_UPDATECOLUMNS -POSTHOOK: query: ALTER TABLE avro_extschema_url UPDATE COLUMNS CASCADE +POSTHOOK: query: ALTER TABLE avro_extschema_url_n1 UPDATE COLUMNS CASCADE POSTHOOK: type: ALTERTABLE_UPDATECOLUMNS -POSTHOOK: Input: default@avro_extschema_url -POSTHOOK: Output: default@avro_extschema_url -PREHOOK: query: DESCRIBE avro_extschema_url +POSTHOOK: Input: default@avro_extschema_url_n1 +POSTHOOK: Output: default@avro_extschema_url_n1 +PREHOOK: query: DESCRIBE avro_extschema_url_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@avro_extschema_url -POSTHOOK: query: DESCRIBE avro_extschema_url +PREHOOK: Input: default@avro_extschema_url_n1 +POSTHOOK: query: DESCRIBE avro_extschema_url_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@avro_extschema_url +POSTHOOK: Input: default@avro_extschema_url_n1 colx string coly string colz double -PREHOOK: query: ALTER TABLE avro_extschema_url UNSET TBLPROPERTIES ('avro.schema.url') +PREHOOK: query: ALTER TABLE avro_extschema_url_n1 UNSET TBLPROPERTIES ('avro.schema.url') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@avro_extschema_url -PREHOOK: Output: default@avro_extschema_url -POSTHOOK: query: ALTER TABLE avro_extschema_url UNSET TBLPROPERTIES ('avro.schema.url') +PREHOOK: Input: default@avro_extschema_url_n1 +PREHOOK: Output: default@avro_extschema_url_n1 +POSTHOOK: query: ALTER TABLE avro_extschema_url_n1 UNSET TBLPROPERTIES ('avro.schema.url') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@avro_extschema_url -POSTHOOK: Output: default@avro_extschema_url -PREHOOK: query: DESCRIBE avro_extschema_url +POSTHOOK: Input: default@avro_extschema_url_n1 +POSTHOOK: Output: default@avro_extschema_url_n1 +PREHOOK: query: DESCRIBE avro_extschema_url_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@avro_extschema_url -POSTHOOK: query: DESCRIBE avro_extschema_url +PREHOOK: Input: default@avro_extschema_url_n1 +POSTHOOK: query: DESCRIBE avro_extschema_url_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@avro_extschema_url +POSTHOOK: Input: default@avro_extschema_url_n1 colx string coly string colz double diff --git a/ql/src/test/results/clientpositive/avro_compression_enabled.q.out b/ql/src/test/results/clientpositive/avro_compression_enabled.q.out index d0122bf5fb..d30d030041 100644 --- a/ql/src/test/results/clientpositive/avro_compression_enabled.q.out +++ b/ql/src/test/results/clientpositive/avro_compression_enabled.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE doctors4 +PREHOOK: query: CREATE TABLE doctors4_n0 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -34,8 +34,8 @@ TBLPROPERTIES ('avro.schema.literal'='{ }') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@doctors4 -POSTHOOK: query: CREATE TABLE doctors4 +PREHOOK: Output: default@doctors4_n0 +POSTHOOK: query: CREATE TABLE doctors4_n0 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -71,15 +71,15 @@ TBLPROPERTIES ('avro.schema.literal'='{ }') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@doctors4 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4 +POSTHOOK: Output: default@doctors4_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@doctors4 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4 +PREHOOK: Output: default@doctors4_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@doctors4 +POSTHOOK: Output: default@doctors4_n0 PREHOOK: query: select count(*) from src PREHOOK: type: QUERY PREHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/avro_compression_enabled_native.q.out b/ql/src/test/results/clientpositive/avro_compression_enabled_native.q.out index 43cf1907d6..7fac43b37a 100644 --- a/ql/src/test/results/clientpositive/avro_compression_enabled_native.q.out +++ b/ql/src/test/results/clientpositive/avro_compression_enabled_native.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE doctors4 ( +PREHOOK: query: CREATE TABLE doctors4_n1 ( number int, first_name string, last_name string, @@ -6,8 +6,8 @@ PREHOOK: query: CREATE TABLE doctors4 ( STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@doctors4 -POSTHOOK: query: CREATE TABLE doctors4 ( +PREHOOK: Output: default@doctors4_n1 +POSTHOOK: query: CREATE TABLE doctors4_n1 ( number int, first_name string, last_name string, @@ -15,15 +15,15 @@ POSTHOOK: query: CREATE TABLE doctors4 ( STORED AS AVRO POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@doctors4 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4 +POSTHOOK: Output: default@doctors4_n1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@doctors4 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4 +PREHOOK: Output: default@doctors4_n1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@doctors4 +POSTHOOK: Output: default@doctors4_n1 PREHOOK: query: SELECT count(*) FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/avro_decimal.q.out b/ql/src/test/results/clientpositive/avro_decimal.q.out index 25a7065883..6aaafde803 100644 --- a/ql/src/test/results/clientpositive/avro_decimal.q.out +++ b/ql/src/test/results/clientpositive/avro_decimal.q.out @@ -1,39 +1,39 @@ -PREHOOK: query: DROP TABLE IF EXISTS `dec` +PREHOOK: query: DROP TABLE IF EXISTS `dec_n0` PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS `dec` +POSTHOOK: query: DROP TABLE IF EXISTS `dec_n0` POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE `dec`(name string, value decimal(8,4)) +PREHOOK: query: CREATE TABLE `dec_n0`(name string, value decimal(8,4)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dec -POSTHOOK: query: CREATE TABLE `dec`(name string, value decimal(8,4)) +PREHOOK: Output: default@dec_n0 +POSTHOOK: query: CREATE TABLE `dec_n0`(name string, value decimal(8,4)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dec -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.txt' into TABLE `dec` +POSTHOOK: Output: default@dec_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.txt' into TABLE `dec_n0` PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@dec -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.txt' into TABLE `dec` +PREHOOK: Output: default@dec_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.txt' into TABLE `dec_n0` POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@dec -PREHOOK: query: ANALYZE TABLE `dec` COMPUTE STATISTICS FOR COLUMNS value +POSTHOOK: Output: default@dec_n0 +PREHOOK: query: ANALYZE TABLE `dec_n0` COMPUTE STATISTICS FOR COLUMNS value PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@dec -PREHOOK: Output: default@dec +PREHOOK: Input: default@dec_n0 +PREHOOK: Output: default@dec_n0 #### A masked pattern was here #### -POSTHOOK: query: ANALYZE TABLE `dec` COMPUTE STATISTICS FOR COLUMNS value +POSTHOOK: query: ANALYZE TABLE `dec_n0` COMPUTE STATISTICS FOR COLUMNS value POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@dec -POSTHOOK: Output: default@dec +POSTHOOK: Input: default@dec_n0 +POSTHOOK: Output: default@dec_n0 #### A masked pattern was here #### -PREHOOK: query: DESC FORMATTED `dec` value +PREHOOK: query: DESC FORMATTED `dec_n0` value PREHOOK: type: DESCTABLE -PREHOOK: Input: default@dec -POSTHOOK: query: DESC FORMATTED `dec` value +PREHOOK: Input: default@dec_n0 +POSTHOOK: query: DESC FORMATTED `dec_n0` value POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@dec +POSTHOOK: Input: default@dec_n0 col_name value data_type decimal(8,4) min -12.25 @@ -47,11 +47,11 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"value\":\"true\"}} -PREHOOK: query: DROP TABLE IF EXISTS avro_dec +PREHOOK: query: DROP TABLE IF EXISTS avro_dec_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS avro_dec +POSTHOOK: query: DROP TABLE IF EXISTS avro_dec_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE `avro_dec`( +PREHOOK: query: CREATE TABLE `avro_dec_n0`( `name` string COMMENT 'from deserializer', `value` decimal(5,2) COMMENT 'from deserializer') COMMENT 'just drop the schema right into the HQL' @@ -67,8 +67,8 @@ TBLPROPERTIES ( ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@avro_dec -POSTHOOK: query: CREATE TABLE `avro_dec`( +PREHOOK: Output: default@avro_dec_n0 +POSTHOOK: query: CREATE TABLE `avro_dec_n0`( `name` string COMMENT 'from deserializer', `value` decimal(5,2) COMMENT 'from deserializer') COMMENT 'just drop the schema right into the HQL' @@ -84,32 +84,32 @@ TBLPROPERTIES ( ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@avro_dec -PREHOOK: query: DESC avro_dec +POSTHOOK: Output: default@avro_dec_n0 +PREHOOK: query: DESC avro_dec_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@avro_dec -POSTHOOK: query: DESC avro_dec +PREHOOK: Input: default@avro_dec_n0 +POSTHOOK: query: DESC avro_dec_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@avro_dec +POSTHOOK: Input: default@avro_dec_n0 name string value decimal(5,2) -PREHOOK: query: INSERT OVERWRITE TABLE avro_dec select name, value from `dec` +PREHOOK: query: INSERT OVERWRITE TABLE avro_dec_n0 select name, value from `dec_n0` PREHOOK: type: QUERY -PREHOOK: Input: default@dec -PREHOOK: Output: default@avro_dec -POSTHOOK: query: INSERT OVERWRITE TABLE avro_dec select name, value from `dec` +PREHOOK: Input: default@dec_n0 +PREHOOK: Output: default@avro_dec_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE avro_dec_n0 select name, value from `dec_n0` POSTHOOK: type: QUERY -POSTHOOK: Input: default@dec -POSTHOOK: Output: default@avro_dec -POSTHOOK: Lineage: avro_dec.name SIMPLE [(dec)dec.FieldSchema(name:name, type:string, comment:null), ] -POSTHOOK: Lineage: avro_dec.value EXPRESSION [(dec)dec.FieldSchema(name:value, type:decimal(8,4), comment:null), ] -PREHOOK: query: SELECT * FROM avro_dec +POSTHOOK: Input: default@dec_n0 +POSTHOOK: Output: default@avro_dec_n0 +POSTHOOK: Lineage: avro_dec_n0.name SIMPLE [(dec_n0)dec_n0.FieldSchema(name:name, type:string, comment:null), ] +POSTHOOK: Lineage: avro_dec_n0.value EXPRESSION [(dec_n0)dec_n0.FieldSchema(name:value, type:decimal(8,4), comment:null), ] +PREHOOK: query: SELECT * FROM avro_dec_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@avro_dec +PREHOOK: Input: default@avro_dec_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM avro_dec +POSTHOOK: query: SELECT * FROM avro_dec_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@avro_dec +POSTHOOK: Input: default@avro_dec_n0 #### A masked pattern was here #### Tom 234.79 Beck 77.34 @@ -121,11 +121,11 @@ Mary 33.33 Tom 19.00 Beck 0.00 Beck 79.90 -PREHOOK: query: DROP TABLE IF EXISTS avro_dec1 +PREHOOK: query: DROP TABLE IF EXISTS avro_dec1_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS avro_dec1 +POSTHOOK: query: DROP TABLE IF EXISTS avro_dec1_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE `avro_dec1`( +PREHOOK: query: CREATE TABLE `avro_dec1_n0`( `name` string COMMENT 'from deserializer', `value` decimal(4,1) COMMENT 'from deserializer') COMMENT 'just drop the schema right into the HQL' @@ -141,8 +141,8 @@ TBLPROPERTIES ( ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@avro_dec1 -POSTHOOK: query: CREATE TABLE `avro_dec1`( +PREHOOK: Output: default@avro_dec1_n0 +POSTHOOK: query: CREATE TABLE `avro_dec1_n0`( `name` string COMMENT 'from deserializer', `value` decimal(4,1) COMMENT 'from deserializer') COMMENT 'just drop the schema right into the HQL' @@ -158,30 +158,30 @@ TBLPROPERTIES ( ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@avro_dec1 -PREHOOK: query: DESC avro_dec1 +POSTHOOK: Output: default@avro_dec1_n0 +PREHOOK: query: DESC avro_dec1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@avro_dec1 -POSTHOOK: query: DESC avro_dec1 +PREHOOK: Input: default@avro_dec1_n0 +POSTHOOK: query: DESC avro_dec1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@avro_dec1 +POSTHOOK: Input: default@avro_dec1_n0 name string value decimal(4,1) -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.avro' into TABLE avro_dec1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.avro' into TABLE avro_dec1_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@avro_dec1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.avro' into TABLE avro_dec1 +PREHOOK: Output: default@avro_dec1_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.avro' into TABLE avro_dec1_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@avro_dec1 -PREHOOK: query: select value from avro_dec1 +POSTHOOK: Output: default@avro_dec1_n0 +PREHOOK: query: select value from avro_dec1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@avro_dec1 +PREHOOK: Input: default@avro_dec1_n0 #### A masked pattern was here #### -POSTHOOK: query: select value from avro_dec1 +POSTHOOK: query: select value from avro_dec1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@avro_dec1 +POSTHOOK: Input: default@avro_dec1_n0 #### A masked pattern was here #### 234.8 77.3 @@ -193,27 +193,27 @@ POSTHOOK: Input: default@avro_dec1 19.0 3.2 79.9 -PREHOOK: query: DROP TABLE `dec` +PREHOOK: query: DROP TABLE `dec_n0` PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dec -PREHOOK: Output: default@dec -POSTHOOK: query: DROP TABLE `dec` +PREHOOK: Input: default@dec_n0 +PREHOOK: Output: default@dec_n0 +POSTHOOK: query: DROP TABLE `dec_n0` POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dec -POSTHOOK: Output: default@dec -PREHOOK: query: DROP TABLE avro_dec +POSTHOOK: Input: default@dec_n0 +POSTHOOK: Output: default@dec_n0 +PREHOOK: query: DROP TABLE avro_dec_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@avro_dec -PREHOOK: Output: default@avro_dec -POSTHOOK: query: DROP TABLE avro_dec +PREHOOK: Input: default@avro_dec_n0 +PREHOOK: Output: default@avro_dec_n0 +POSTHOOK: query: DROP TABLE avro_dec_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@avro_dec -POSTHOOK: Output: default@avro_dec -PREHOOK: query: DROP TABLE avro_dec1 +POSTHOOK: Input: default@avro_dec_n0 +POSTHOOK: Output: default@avro_dec_n0 +PREHOOK: query: DROP TABLE avro_dec1_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@avro_dec1 -PREHOOK: Output: default@avro_dec1 -POSTHOOK: query: DROP TABLE avro_dec1 +PREHOOK: Input: default@avro_dec1_n0 +PREHOOK: Output: default@avro_dec1_n0 +POSTHOOK: query: DROP TABLE avro_dec1_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@avro_dec1 -POSTHOOK: Output: default@avro_dec1 +POSTHOOK: Input: default@avro_dec1_n0 +POSTHOOK: Output: default@avro_dec1_n0 diff --git a/ql/src/test/results/clientpositive/avro_joins.q.out b/ql/src/test/results/clientpositive/avro_joins.q.out index 24a14d5204..b31d37f2d5 100644 --- a/ql/src/test/results/clientpositive/avro_joins.q.out +++ b/ql/src/test/results/clientpositive/avro_joins.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE doctors4 +PREHOOK: query: CREATE TABLE doctors4_n2 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -34,8 +34,8 @@ TBLPROPERTIES ('avro.schema.literal'='{ }') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@doctors4 -POSTHOOK: query: CREATE TABLE doctors4 +PREHOOK: Output: default@doctors4_n2 +POSTHOOK: query: CREATE TABLE doctors4_n2 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -71,26 +71,26 @@ TBLPROPERTIES ('avro.schema.literal'='{ }') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@doctors4 -PREHOOK: query: DESCRIBE doctors4 +POSTHOOK: Output: default@doctors4_n2 +PREHOOK: query: DESCRIBE doctors4_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@doctors4 -POSTHOOK: query: DESCRIBE doctors4 +PREHOOK: Input: default@doctors4_n2 +POSTHOOK: query: DESCRIBE doctors4_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@doctors4 +POSTHOOK: Input: default@doctors4_n2 number int Order of playing the role first_name string first name of actor playing role last_name string last name of actor playing role extra_field string an extra field not in the original file -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@doctors4 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4 +PREHOOK: Output: default@doctors4_n2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@doctors4 -PREHOOK: query: CREATE TABLE episodes +POSTHOOK: Output: default@doctors4_n2 +PREHOOK: query: CREATE TABLE episodes_n3 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -98,7 +98,7 @@ INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n3", "type": "record", "fields": [ { @@ -120,8 +120,8 @@ TBLPROPERTIES ('avro.schema.literal'='{ }') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@episodes -POSTHOOK: query: CREATE TABLE episodes +PREHOOK: Output: default@episodes_n3 +POSTHOOK: query: CREATE TABLE episodes_n3 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -129,7 +129,7 @@ INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n3", "type": "record", "fields": [ { @@ -151,35 +151,35 @@ TBLPROPERTIES ('avro.schema.literal'='{ }') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@episodes -PREHOOK: query: DESCRIBE episodes +POSTHOOK: Output: default@episodes_n3 +PREHOOK: query: DESCRIBE episodes_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@episodes -POSTHOOK: query: DESCRIBE episodes +PREHOOK: Input: default@episodes_n3 +POSTHOOK: query: DESCRIBE episodes_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@episodes +POSTHOOK: Input: default@episodes_n3 title string episode title air_date string initial date doctor int main actor playing the Doctor in episode -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@episodes -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes +PREHOOK: Output: default@episodes_n3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@episodes +POSTHOOK: Output: default@episodes_n3 PREHOOK: query: SELECT e.title, e.air_date, d.first_name, d.last_name, d.extra_field, e.air_date -FROM doctors4 d JOIN episodes e ON (d.number=e.doctor) +FROM doctors4_n2 d JOIN episodes_n3 e ON (d.number=e.doctor) PREHOOK: type: QUERY -PREHOOK: Input: default@doctors4 -PREHOOK: Input: default@episodes +PREHOOK: Input: default@doctors4_n2 +PREHOOK: Input: default@episodes_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT e.title, e.air_date, d.first_name, d.last_name, d.extra_field, e.air_date -FROM doctors4 d JOIN episodes e ON (d.number=e.doctor) +FROM doctors4_n2 d JOIN episodes_n3 e ON (d.number=e.doctor) POSTHOOK: type: QUERY -POSTHOOK: Input: default@doctors4 -POSTHOOK: Input: default@episodes +POSTHOOK: Input: default@doctors4_n2 +POSTHOOK: Input: default@episodes_n3 #### A masked pattern was here #### An Unearthly Child 23 November 1963 William Hartnell fishfingers and custard 23 November 1963 Castrolava 4 January 1982 Peter Davison fishfingers and custard 4 January 1982 diff --git a/ql/src/test/results/clientpositive/avro_joins_native.q.out b/ql/src/test/results/clientpositive/avro_joins_native.q.out index b2ece57fb6..98ab05066b 100644 --- a/ql/src/test/results/clientpositive/avro_joins_native.q.out +++ b/ql/src/test/results/clientpositive/avro_joins_native.q.out @@ -31,50 +31,50 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TAB POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@doctors4 -PREHOOK: query: CREATE TABLE episodes ( +PREHOOK: query: CREATE TABLE episodes_n1 ( title string COMMENT "episode title", air_date string COMMENT "initial date", doctor int COMMENT "main actor playing the Doctor in episode") STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@episodes -POSTHOOK: query: CREATE TABLE episodes ( +PREHOOK: Output: default@episodes_n1 +POSTHOOK: query: CREATE TABLE episodes_n1 ( title string COMMENT "episode title", air_date string COMMENT "initial date", doctor int COMMENT "main actor playing the Doctor in episode") STORED AS AVRO POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@episodes -PREHOOK: query: DESCRIBE episodes +POSTHOOK: Output: default@episodes_n1 +PREHOOK: query: DESCRIBE episodes_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@episodes -POSTHOOK: query: DESCRIBE episodes +PREHOOK: Input: default@episodes_n1 +POSTHOOK: query: DESCRIBE episodes_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@episodes +POSTHOOK: Input: default@episodes_n1 title string episode title air_date string initial date doctor int main actor playing the Doctor in episode -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@episodes -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes +PREHOOK: Output: default@episodes_n1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@episodes +POSTHOOK: Output: default@episodes_n1 PREHOOK: query: SELECT e.title, e.air_date, d.first_name, d.last_name, e.air_date -FROM doctors4 d JOIN episodes e ON (d.number=e.doctor) +FROM doctors4 d JOIN episodes_n1 e ON (d.number=e.doctor) PREHOOK: type: QUERY PREHOOK: Input: default@doctors4 -PREHOOK: Input: default@episodes +PREHOOK: Input: default@episodes_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT e.title, e.air_date, d.first_name, d.last_name, e.air_date -FROM doctors4 d JOIN episodes e ON (d.number=e.doctor) +FROM doctors4 d JOIN episodes_n1 e ON (d.number=e.doctor) POSTHOOK: type: QUERY POSTHOOK: Input: default@doctors4 -POSTHOOK: Input: default@episodes +POSTHOOK: Input: default@episodes_n1 #### A masked pattern was here #### An Unearthly Child 23 November 1963 William Hartnell 23 November 1963 Castrolava 4 January 1982 Peter Davison 4 January 1982 diff --git a/ql/src/test/results/clientpositive/avro_native.q.out b/ql/src/test/results/clientpositive/avro_native.q.out index 17d828046d..0626cda390 100644 --- a/ql/src/test/results/clientpositive/avro_native.q.out +++ b/ql/src/test/results/clientpositive/avro_native.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: CREATE TABLE doctors ( +PREHOOK: query: CREATE TABLE doctors_n4 ( number int, first_name string, last_name string) STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@doctors -POSTHOOK: query: CREATE TABLE doctors ( +PREHOOK: Output: default@doctors_n4 +POSTHOOK: query: CREATE TABLE doctors_n4 ( number int, first_name string, last_name string) STORED AS AVRO POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@doctors -PREHOOK: query: DESCRIBE doctors +POSTHOOK: Output: default@doctors_n4 +PREHOOK: query: DESCRIBE doctors_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@doctors -POSTHOOK: query: DESCRIBE doctors +PREHOOK: Input: default@doctors_n4 +POSTHOOK: query: DESCRIBE doctors_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@doctors +POSTHOOK: Input: default@doctors_n4 number int first_name string last_name string -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@doctors -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors +PREHOOK: Output: default@doctors_n4 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@doctors -PREHOOK: query: SELECT * FROM doctors +POSTHOOK: Output: default@doctors_n4 +PREHOOK: query: SELECT * FROM doctors_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@doctors +PREHOOK: Input: default@doctors_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM doctors +POSTHOOK: query: SELECT * FROM doctors_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@doctors +POSTHOOK: Input: default@doctors_n4 #### A masked pattern was here #### 1 William Hartnell 10 David Tennant diff --git a/ql/src/test/results/clientpositive/avro_partitioned.q.out b/ql/src/test/results/clientpositive/avro_partitioned.q.out index b85b4edf81..da6a067ef2 100644 --- a/ql/src/test/results/clientpositive/avro_partitioned.q.out +++ b/ql/src/test/results/clientpositive/avro_partitioned.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE episodes +PREHOOK: query: CREATE TABLE episodes_n2 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -6,7 +6,7 @@ INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n2", "type": "record", "fields": [ { @@ -28,8 +28,8 @@ TBLPROPERTIES ('avro.schema.literal'='{ }') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@episodes -POSTHOOK: query: CREATE TABLE episodes +PREHOOK: Output: default@episodes_n2 +POSTHOOK: query: CREATE TABLE episodes_n2 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -37,7 +37,7 @@ INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n2", "type": "record", "fields": [ { @@ -59,16 +59,16 @@ TBLPROPERTIES ('avro.schema.literal'='{ }') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@episodes -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes +POSTHOOK: Output: default@episodes_n2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@episodes -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes +PREHOOK: Output: default@episodes_n2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@episodes -PREHOOK: query: CREATE TABLE episodes_partitioned +POSTHOOK: Output: default@episodes_n2 +PREHOOK: query: CREATE TABLE episodes_partitioned_n1 PARTITIONED BY (doctor_pt INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' @@ -77,7 +77,7 @@ INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n2", "type": "record", "fields": [ { @@ -99,8 +99,8 @@ TBLPROPERTIES ('avro.schema.literal'='{ }') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@episodes_partitioned -POSTHOOK: query: CREATE TABLE episodes_partitioned +PREHOOK: Output: default@episodes_partitioned_n1 +POSTHOOK: query: CREATE TABLE episodes_partitioned_n1 PARTITIONED BY (doctor_pt INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' @@ -109,7 +109,7 @@ INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n2", "type": "record", "fields": [ { @@ -131,117 +131,117 @@ TBLPROPERTIES ('avro.schema.literal'='{ }') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@episodes_partitioned -PREHOOK: query: INSERT OVERWRITE TABLE episodes_partitioned PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes +POSTHOOK: Output: default@episodes_partitioned_n1 +PREHOOK: query: INSERT OVERWRITE TABLE episodes_partitioned_n1 PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes -PREHOOK: Output: default@episodes_partitioned -POSTHOOK: query: INSERT OVERWRITE TABLE episodes_partitioned PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes +PREHOOK: Input: default@episodes_n2 +PREHOOK: Output: default@episodes_partitioned_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE episodes_partitioned_n1 PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=1 -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=11 -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=2 -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=4 -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=5 -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=6 -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=9 -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt > 6 +POSTHOOK: Input: default@episodes_n2 +POSTHOOK: Output: default@episodes_partitioned_n1@doctor_pt=1 +POSTHOOK: Output: default@episodes_partitioned_n1@doctor_pt=11 +POSTHOOK: Output: default@episodes_partitioned_n1@doctor_pt=2 +POSTHOOK: Output: default@episodes_partitioned_n1@doctor_pt=4 +POSTHOOK: Output: default@episodes_partitioned_n1@doctor_pt=5 +POSTHOOK: Output: default@episodes_partitioned_n1@doctor_pt=6 +POSTHOOK: Output: default@episodes_partitioned_n1@doctor_pt=9 +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=11).air_date SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=11).doctor SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=11).title SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=1).air_date SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=1).doctor SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=1).title SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=2).air_date SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=2).doctor SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=2).title SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=4).air_date SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=4).doctor SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=4).title SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=5).air_date SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=5).doctor SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=5).title SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=6).air_date SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=6).doctor SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=6).title SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=9).air_date SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=9).doctor SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_n1 PARTITION(doctor_pt=9).title SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:title, type:string, comment:episode title), ] +PREHOOK: query: SELECT * FROM episodes_partitioned_n1 WHERE doctor_pt > 6 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes_partitioned -PREHOOK: Input: default@episodes_partitioned@doctor_pt=11 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=9 +PREHOOK: Input: default@episodes_partitioned_n1 +PREHOOK: Input: default@episodes_partitioned_n1@doctor_pt=11 +PREHOOK: Input: default@episodes_partitioned_n1@doctor_pt=9 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt > 6 +POSTHOOK: query: SELECT * FROM episodes_partitioned_n1 WHERE doctor_pt > 6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes_partitioned -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=11 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=9 +POSTHOOK: Input: default@episodes_partitioned_n1 +POSTHOOK: Input: default@episodes_partitioned_n1@doctor_pt=11 +POSTHOOK: Input: default@episodes_partitioned_n1@doctor_pt=9 #### A masked pattern was here #### Rose 26 March 2005 9 9 The Doctor's Wife 14 May 2011 11 11 The Eleventh Hour 3 April 2010 11 11 -PREHOOK: query: SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 +PREHOOK: query: SELECT * FROM episodes_partitioned_n1 ORDER BY air_date LIMIT 5 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes_partitioned -PREHOOK: Input: default@episodes_partitioned@doctor_pt=1 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=11 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=2 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=4 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=5 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=6 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=9 +PREHOOK: Input: default@episodes_partitioned_n1 +PREHOOK: Input: default@episodes_partitioned_n1@doctor_pt=1 +PREHOOK: Input: default@episodes_partitioned_n1@doctor_pt=11 +PREHOOK: Input: default@episodes_partitioned_n1@doctor_pt=2 +PREHOOK: Input: default@episodes_partitioned_n1@doctor_pt=4 +PREHOOK: Input: default@episodes_partitioned_n1@doctor_pt=5 +PREHOOK: Input: default@episodes_partitioned_n1@doctor_pt=6 +PREHOOK: Input: default@episodes_partitioned_n1@doctor_pt=9 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 +POSTHOOK: query: SELECT * FROM episodes_partitioned_n1 ORDER BY air_date LIMIT 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes_partitioned -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=1 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=11 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=2 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=4 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=5 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=9 +POSTHOOK: Input: default@episodes_partitioned_n1 +POSTHOOK: Input: default@episodes_partitioned_n1@doctor_pt=1 +POSTHOOK: Input: default@episodes_partitioned_n1@doctor_pt=11 +POSTHOOK: Input: default@episodes_partitioned_n1@doctor_pt=2 +POSTHOOK: Input: default@episodes_partitioned_n1@doctor_pt=4 +POSTHOOK: Input: default@episodes_partitioned_n1@doctor_pt=5 +POSTHOOK: Input: default@episodes_partitioned_n1@doctor_pt=6 +POSTHOOK: Input: default@episodes_partitioned_n1@doctor_pt=9 #### A masked pattern was here #### An Unearthly Child 23 November 1963 1 1 Horror of Fang Rock 3 September 1977 4 4 Rose 26 March 2005 9 9 The Doctor's Wife 14 May 2011 11 11 The Eleventh Hour 3 April 2010 11 11 -PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 +PREHOOK: query: SELECT * FROM episodes_partitioned_n1 WHERE doctor_pt = 6 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes_partitioned -PREHOOK: Input: default@episodes_partitioned@doctor_pt=6 +PREHOOK: Input: default@episodes_partitioned_n1 +PREHOOK: Input: default@episodes_partitioned_n1@doctor_pt=6 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 +POSTHOOK: query: SELECT * FROM episodes_partitioned_n1 WHERE doctor_pt = 6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes_partitioned -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6 +POSTHOOK: Input: default@episodes_partitioned_n1 +POSTHOOK: Input: default@episodes_partitioned_n1@doctor_pt=6 #### A masked pattern was here #### The Mysterious Planet 6 September 1986 6 6 -PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 +PREHOOK: query: SELECT * FROM episodes_partitioned_n1 WHERE doctor_pt = 7 LIMIT 5 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes_partitioned +PREHOOK: Input: default@episodes_partitioned_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 +POSTHOOK: query: SELECT * FROM episodes_partitioned_n1 WHERE doctor_pt = 7 LIMIT 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes_partitioned +POSTHOOK: Input: default@episodes_partitioned_n1 #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE episodes_partitioned ADD PARTITION (doctor_pt=7) +PREHOOK: query: ALTER TABLE episodes_partitioned_n1 ADD PARTITION (doctor_pt=7) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@episodes_partitioned -POSTHOOK: query: ALTER TABLE episodes_partitioned ADD PARTITION (doctor_pt=7) +PREHOOK: Output: default@episodes_partitioned_n1 +POSTHOOK: query: ALTER TABLE episodes_partitioned_n1 ADD PARTITION (doctor_pt=7) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@episodes_partitioned -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=7 -PREHOOK: query: SELECT COUNT(*) FROM episodes_partitioned +POSTHOOK: Output: default@episodes_partitioned_n1 +POSTHOOK: Output: default@episodes_partitioned_n1@doctor_pt=7 +PREHOOK: query: SELECT COUNT(*) FROM episodes_partitioned_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes_partitioned +PREHOOK: Input: default@episodes_partitioned_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(*) FROM episodes_partitioned +POSTHOOK: query: SELECT COUNT(*) FROM episodes_partitioned_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes_partitioned +POSTHOOK: Input: default@episodes_partitioned_n1 #### A masked pattern was here #### 8 PREHOOK: query: CREATE TABLE episodes_partitioned_serdeproperties @@ -250,7 +250,7 @@ ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' WITH SERDEPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n2", "type": "record", "fields": [ { @@ -282,7 +282,7 @@ ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' WITH SERDEPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n2", "type": "record", "fields": [ { @@ -308,13 +308,13 @@ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@episodes_partitioned_serdeproperties -PREHOOK: query: INSERT INTO TABLE episodes_partitioned_serdeproperties PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes +PREHOOK: query: INSERT INTO TABLE episodes_partitioned_serdeproperties PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes +PREHOOK: Input: default@episodes_n2 PREHOOK: Output: default@episodes_partitioned_serdeproperties -POSTHOOK: query: INSERT INTO TABLE episodes_partitioned_serdeproperties PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes +POSTHOOK: query: INSERT INTO TABLE episodes_partitioned_serdeproperties PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes +POSTHOOK: Input: default@episodes_n2 POSTHOOK: Output: default@episodes_partitioned_serdeproperties@doctor_pt=1 POSTHOOK: Output: default@episodes_partitioned_serdeproperties@doctor_pt=11 POSTHOOK: Output: default@episodes_partitioned_serdeproperties@doctor_pt=2 @@ -322,32 +322,32 @@ POSTHOOK: Output: default@episodes_partitioned_serdeproperties@doctor_pt=4 POSTHOOK: Output: default@episodes_partitioned_serdeproperties@doctor_pt=5 POSTHOOK: Output: default@episodes_partitioned_serdeproperties@doctor_pt=6 POSTHOOK: Output: default@episodes_partitioned_serdeproperties@doctor_pt=9 -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=11).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=11).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=11).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=1).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=1).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=1).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=2).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=2).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=2).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=4).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=4).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=4).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=5).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=5).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=5).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=6).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=6).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=6).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=11).air_date SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=11).doctor SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=11).title SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=1).air_date SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=1).doctor SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=1).title SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=2).air_date SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=2).doctor SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=2).title SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=4).air_date SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=4).doctor SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=4).title SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=5).air_date SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=5).doctor SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=5).title SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=6).air_date SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=6).doctor SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=6).title SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).air_date SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).doctor SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).title SIMPLE [(episodes_n2)episodes_n2.FieldSchema(name:title, type:string, comment:episode title), ] PREHOOK: query: ALTER TABLE episodes_partitioned_serdeproperties SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' WITH SERDEPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n2", "type": "record", "fields": [ { @@ -379,7 +379,7 @@ POSTHOOK: query: ALTER TABLE episodes_partitioned_serdeproperties SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' WITH SERDEPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n2", "type": "record", "fields": [ { diff --git a/ql/src/test/results/clientpositive/avro_sanity_test.q.out b/ql/src/test/results/clientpositive/avro_sanity_test.q.out index fa4e92182a..7aed83ccd7 100644 --- a/ql/src/test/results/clientpositive/avro_sanity_test.q.out +++ b/ql/src/test/results/clientpositive/avro_sanity_test.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE doctors +PREHOOK: query: CREATE TABLE doctors_n1 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -6,7 +6,7 @@ INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "doctors", + "name": "doctors_n1", "type": "record", "fields": [ { @@ -28,8 +28,8 @@ TBLPROPERTIES ('avro.schema.literal'='{ }') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@doctors -POSTHOOK: query: CREATE TABLE doctors +PREHOOK: Output: default@doctors_n1 +POSTHOOK: query: CREATE TABLE doctors_n1 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -37,7 +37,7 @@ INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "doctors", + "name": "doctors_n1", "type": "record", "fields": [ { @@ -59,31 +59,31 @@ TBLPROPERTIES ('avro.schema.literal'='{ }') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@doctors -PREHOOK: query: DESCRIBE doctors +POSTHOOK: Output: default@doctors_n1 +PREHOOK: query: DESCRIBE doctors_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@doctors -POSTHOOK: query: DESCRIBE doctors +PREHOOK: Input: default@doctors_n1 +POSTHOOK: query: DESCRIBE doctors_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@doctors +POSTHOOK: Input: default@doctors_n1 number int Order of playing the role first_name string first name of actor playing role last_name string last name of actor playing role -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@doctors -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors +PREHOOK: Output: default@doctors_n1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@doctors -PREHOOK: query: SELECT * FROM doctors +POSTHOOK: Output: default@doctors_n1 +PREHOOK: query: SELECT * FROM doctors_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@doctors +PREHOOK: Input: default@doctors_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM doctors +POSTHOOK: query: SELECT * FROM doctors_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@doctors +POSTHOOK: Input: default@doctors_n1 #### A masked pattern was here #### 1 William Hartnell 10 David Tennant diff --git a/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out b/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out index 200f9b8465..6860b47086 100644 --- a/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out +++ b/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE episodes ( +PREHOOK: query: CREATE TABLE episodes_n0 ( title string COMMENT "episode title", air_date string COMMENT "initial date", doctor int COMMENT "main actor playing the Doctor in episode") STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@episodes -POSTHOOK: query: CREATE TABLE episodes ( +PREHOOK: Output: default@episodes_n0 +POSTHOOK: query: CREATE TABLE episodes_n0 ( title string COMMENT "episode title", air_date string COMMENT "initial date", doctor int COMMENT "main actor playing the Doctor in episode") STORED AS AVRO POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@episodes -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes +POSTHOOK: Output: default@episodes_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@episodes -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes +PREHOOK: Output: default@episodes_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@episodes -PREHOOK: query: CREATE TABLE episodes_partitioned ( +POSTHOOK: Output: default@episodes_n0 +PREHOOK: query: CREATE TABLE episodes_partitioned_n0 ( title string COMMENT "episode title", air_date string COMMENT "initial date", doctor int COMMENT "main actor playing the Doctor in episode") @@ -30,8 +30,8 @@ PARTITIONED BY (doctor_pt INT) STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@episodes_partitioned -POSTHOOK: query: CREATE TABLE episodes_partitioned ( +PREHOOK: Output: default@episodes_partitioned_n0 +POSTHOOK: query: CREATE TABLE episodes_partitioned_n0 ( title string COMMENT "episode title", air_date string COMMENT "initial date", doctor int COMMENT "main actor playing the Doctor in episode") @@ -39,51 +39,51 @@ PARTITIONED BY (doctor_pt INT) STORED AS AVRO POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@episodes_partitioned -PREHOOK: query: INSERT OVERWRITE TABLE episodes_partitioned PARTITION (doctor_pt) -SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes +POSTHOOK: Output: default@episodes_partitioned_n0 +PREHOOK: query: INSERT OVERWRITE TABLE episodes_partitioned_n0 PARTITION (doctor_pt) +SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes -PREHOOK: Output: default@episodes_partitioned -POSTHOOK: query: INSERT OVERWRITE TABLE episodes_partitioned PARTITION (doctor_pt) -SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes +PREHOOK: Input: default@episodes_n0 +PREHOOK: Output: default@episodes_partitioned_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE episodes_partitioned_n0 PARTITION (doctor_pt) +SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=1 -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=11 -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=2 -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=4 -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=5 -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=6 -POSTHOOK: Output: default@episodes_partitioned@doctor_pt=9 -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Input: default@episodes_n0 +POSTHOOK: Output: default@episodes_partitioned_n0@doctor_pt=1 +POSTHOOK: Output: default@episodes_partitioned_n0@doctor_pt=11 +POSTHOOK: Output: default@episodes_partitioned_n0@doctor_pt=2 +POSTHOOK: Output: default@episodes_partitioned_n0@doctor_pt=4 +POSTHOOK: Output: default@episodes_partitioned_n0@doctor_pt=5 +POSTHOOK: Output: default@episodes_partitioned_n0@doctor_pt=6 +POSTHOOK: Output: default@episodes_partitioned_n0@doctor_pt=9 +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=11).air_date SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=11).doctor SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=11).title SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=1).air_date SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=1).doctor SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=1).title SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=2).air_date SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=2).doctor SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=2).title SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=4).air_date SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=4).doctor SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=4).title SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=5).air_date SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=5).doctor SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=5).title SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=6).air_date SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=6).doctor SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=6).title SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:title, type:string, comment:episode title), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=9).air_date SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:air_date, type:string, comment:initial date), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=9).doctor SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] +POSTHOOK: Lineage: episodes_partitioned_n0 PARTITION(doctor_pt=9).title SIMPLE [(episodes_n0)episodes_n0.FieldSchema(name:title, type:string, comment:episode title), ] title air_date doctor doctor_pt -PREHOOK: query: DESCRIBE FORMATTED episodes_partitioned +PREHOOK: query: DESCRIBE FORMATTED episodes_partitioned_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@episodes_partitioned -POSTHOOK: query: DESCRIBE FORMATTED episodes_partitioned +PREHOOK: Input: default@episodes_partitioned_n0 +POSTHOOK: query: DESCRIBE FORMATTED episodes_partitioned_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@episodes_partitioned +POSTHOOK: Input: default@episodes_partitioned_n0 col_name data_type comment # col_name data_type comment title string episode title @@ -107,7 +107,7 @@ Table Parameters: numPartitions 7 numRows 8 rawDataSize 0 - totalSize 3077 + totalSize 3098 #### A masked pattern was here #### # Storage Information @@ -120,12 +120,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: ALTER TABLE episodes_partitioned +PREHOOK: query: ALTER TABLE episodes_partitioned_n0 SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' WITH SERDEPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n0", "type": "record", "fields": [ { @@ -152,14 +152,14 @@ SERDEPROPERTIES ('avro.schema.literal'='{ ] }') PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@episodes_partitioned -PREHOOK: Output: default@episodes_partitioned -POSTHOOK: query: ALTER TABLE episodes_partitioned +PREHOOK: Input: default@episodes_partitioned_n0 +PREHOOK: Output: default@episodes_partitioned_n0 +POSTHOOK: query: ALTER TABLE episodes_partitioned_n0 SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' WITH SERDEPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "episodes", + "name": "episodes_n0", "type": "record", "fields": [ { @@ -186,14 +186,14 @@ SERDEPROPERTIES ('avro.schema.literal'='{ ] }') POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@episodes_partitioned -POSTHOOK: Output: default@episodes_partitioned -PREHOOK: query: DESCRIBE FORMATTED episodes_partitioned +POSTHOOK: Input: default@episodes_partitioned_n0 +POSTHOOK: Output: default@episodes_partitioned_n0 +PREHOOK: query: DESCRIBE FORMATTED episodes_partitioned_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@episodes_partitioned -POSTHOOK: query: DESCRIBE FORMATTED episodes_partitioned +PREHOOK: Input: default@episodes_partitioned_n0 +POSTHOOK: query: DESCRIBE FORMATTED episodes_partitioned_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@episodes_partitioned +POSTHOOK: Input: default@episodes_partitioned_n0 col_name data_type comment # col_name data_type comment title string episode title @@ -219,7 +219,7 @@ Table Parameters: numPartitions 7 numRows 8 rawDataSize 0 - totalSize 3077 + totalSize 3098 #### A masked pattern was here #### # Storage Information @@ -231,13 +231,13 @@ Num Buckets: -1 Bucket Columns: [] Sort Columns: [] Storage Desc Params: - avro.schema.literal {\n \"namespace\": \"testing.hive.avro.serde\",\n \"name\": \"episodes\",\n \"type\": \"record\",\n \"fields\": [\n {\n \"name\":\"title\",\n \"type\":\"string\",\n \"doc\":\"episode title\"\n },\n {\n \"name\":\"air_date\",\n \"type\":\"string\",\n \"doc\":\"initial date\"\n },\n {\n \"name\":\"doctor\",\n \"type\":\"int\",\n \"doc\":\"main actor playing the Doctor in episode\"\n },\n {\n \"name\":\"value\",\n \"type\":\"int\",\n \"default\":0,\n \"doc\":\"default value\"\n }\n ]\n} + avro.schema.literal {\n \"namespace\": \"testing.hive.avro.serde\",\n \"name\": \"episodes_n0\",\n \"type\": \"record\",\n \"fields\": [\n {\n \"name\":\"title\",\n \"type\":\"string\",\n \"doc\":\"episode title\"\n },\n {\n \"name\":\"air_date\",\n \"type\":\"string\",\n \"doc\":\"initial date\"\n },\n {\n \"name\":\"doctor\",\n \"type\":\"int\",\n \"doc\":\"main actor playing the Doctor in episode\"\n },\n {\n \"name\":\"value\",\n \"type\":\"int\",\n \"default\":0,\n \"doc\":\"default value\"\n }\n ]\n} serialization.format 1 PREHOOK: query: EXPLAIN -SELECT * FROM episodes_partitioned WHERE doctor_pt > 6 +SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt > 6 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT * FROM episodes_partitioned WHERE doctor_pt > 6 +SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt > 6 POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -249,84 +249,84 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: episodes_partitioned - Statistics: Num rows: 3 Data size: 8890 Basic stats: COMPLETE Column stats: NONE + alias: episodes_partitioned_n0 + Statistics: Num rows: 3 Data size: 8950 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: title (type: string), air_date (type: string), doctor (type: int), value (type: int), doctor_pt (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 3 Data size: 8890 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 8950 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt > 6 +PREHOOK: query: SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt > 6 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes_partitioned -PREHOOK: Input: default@episodes_partitioned@doctor_pt=11 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=9 +PREHOOK: Input: default@episodes_partitioned_n0 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=11 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=9 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt > 6 +POSTHOOK: query: SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt > 6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes_partitioned -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=11 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=9 +POSTHOOK: Input: default@episodes_partitioned_n0 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=11 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=9 #### A masked pattern was here #### -episodes_partitioned.title episodes_partitioned.air_date episodes_partitioned.doctor episodes_partitioned.value episodes_partitioned.doctor_pt +episodes_partitioned_n0.title episodes_partitioned_n0.air_date episodes_partitioned_n0.doctor episodes_partitioned_n0.value episodes_partitioned_n0.doctor_pt Rose 26 March 2005 9 0 9 The Doctor's Wife 14 May 2011 11 0 11 The Eleventh Hour 3 April 2010 11 0 11 -PREHOOK: query: SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 +PREHOOK: query: SELECT * FROM episodes_partitioned_n0 ORDER BY air_date LIMIT 5 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes_partitioned -PREHOOK: Input: default@episodes_partitioned@doctor_pt=1 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=11 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=2 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=4 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=5 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=6 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=9 +PREHOOK: Input: default@episodes_partitioned_n0 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=1 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=11 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=2 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=4 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=5 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=6 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=9 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 +POSTHOOK: query: SELECT * FROM episodes_partitioned_n0 ORDER BY air_date LIMIT 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes_partitioned -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=1 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=11 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=2 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=4 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=5 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=9 +POSTHOOK: Input: default@episodes_partitioned_n0 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=1 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=11 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=2 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=4 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=5 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=6 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=9 #### A masked pattern was here #### -episodes_partitioned.title episodes_partitioned.air_date episodes_partitioned.doctor episodes_partitioned.value episodes_partitioned.doctor_pt +episodes_partitioned_n0.title episodes_partitioned_n0.air_date episodes_partitioned_n0.doctor episodes_partitioned_n0.value episodes_partitioned_n0.doctor_pt An Unearthly Child 23 November 1963 1 0 1 Horror of Fang Rock 3 September 1977 4 0 4 Rose 26 March 2005 9 0 9 The Doctor's Wife 14 May 2011 11 0 11 The Eleventh Hour 3 April 2010 11 0 11 -PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 +PREHOOK: query: SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt = 6 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes_partitioned -PREHOOK: Input: default@episodes_partitioned@doctor_pt=6 +PREHOOK: Input: default@episodes_partitioned_n0 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=6 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 +POSTHOOK: query: SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt = 6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes_partitioned -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6 +POSTHOOK: Input: default@episodes_partitioned_n0 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=6 #### A masked pattern was here #### -episodes_partitioned.title episodes_partitioned.air_date episodes_partitioned.doctor episodes_partitioned.value episodes_partitioned.doctor_pt +episodes_partitioned_n0.title episodes_partitioned_n0.air_date episodes_partitioned_n0.doctor episodes_partitioned_n0.value episodes_partitioned_n0.doctor_pt The Mysterious Planet 6 September 1986 6 0 6 -PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 +PREHOOK: query: SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt = 7 LIMIT 5 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes_partitioned +PREHOOK: Input: default@episodes_partitioned_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 +POSTHOOK: query: SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt = 7 LIMIT 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes_partitioned +POSTHOOK: Input: default@episodes_partitioned_n0 #### A masked pattern was here #### -episodes_partitioned.title episodes_partitioned.air_date episodes_partitioned.doctor episodes_partitioned.value episodes_partitioned.doctor_pt +episodes_partitioned_n0.title episodes_partitioned_n0.air_date episodes_partitioned_n0.doctor episodes_partitioned_n0.value episodes_partitioned_n0.doctor_pt PREHOOK: query: EXPLAIN -SELECT * FROM episodes_partitioned WHERE doctor_pt > 6 +SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt > 6 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT * FROM episodes_partitioned WHERE doctor_pt > 6 +SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt > 6 POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -338,15 +338,15 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: episodes_partitioned - Statistics: Num rows: 3 Data size: 8890 Basic stats: COMPLETE Column stats: NONE + alias: episodes_partitioned_n0 + Statistics: Num rows: 3 Data size: 8950 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: title (type: string), air_date (type: string), doctor (type: int), value (type: int), doctor_pt (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 3 Data size: 8890 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 8950 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 8890 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 8950 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -359,68 +359,68 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt > 6 +PREHOOK: query: SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt > 6 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes_partitioned -PREHOOK: Input: default@episodes_partitioned@doctor_pt=11 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=9 +PREHOOK: Input: default@episodes_partitioned_n0 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=11 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=9 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt > 6 +POSTHOOK: query: SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt > 6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes_partitioned -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=11 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=9 +POSTHOOK: Input: default@episodes_partitioned_n0 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=11 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=9 #### A masked pattern was here #### -episodes_partitioned.title episodes_partitioned.air_date episodes_partitioned.doctor episodes_partitioned.value episodes_partitioned.doctor_pt +episodes_partitioned_n0.title episodes_partitioned_n0.air_date episodes_partitioned_n0.doctor episodes_partitioned_n0.value episodes_partitioned_n0.doctor_pt Rose 26 March 2005 9 0 9 The Doctor's Wife 14 May 2011 11 0 11 The Eleventh Hour 3 April 2010 11 0 11 -PREHOOK: query: SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 +PREHOOK: query: SELECT * FROM episodes_partitioned_n0 ORDER BY air_date LIMIT 5 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes_partitioned -PREHOOK: Input: default@episodes_partitioned@doctor_pt=1 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=11 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=2 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=4 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=5 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=6 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=9 +PREHOOK: Input: default@episodes_partitioned_n0 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=1 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=11 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=2 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=4 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=5 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=6 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=9 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 +POSTHOOK: query: SELECT * FROM episodes_partitioned_n0 ORDER BY air_date LIMIT 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes_partitioned -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=1 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=11 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=2 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=4 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=5 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=9 +POSTHOOK: Input: default@episodes_partitioned_n0 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=1 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=11 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=2 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=4 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=5 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=6 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=9 #### A masked pattern was here #### -episodes_partitioned.title episodes_partitioned.air_date episodes_partitioned.doctor episodes_partitioned.value episodes_partitioned.doctor_pt +episodes_partitioned_n0.title episodes_partitioned_n0.air_date episodes_partitioned_n0.doctor episodes_partitioned_n0.value episodes_partitioned_n0.doctor_pt An Unearthly Child 23 November 1963 1 0 1 Horror of Fang Rock 3 September 1977 4 0 4 Rose 26 March 2005 9 0 9 The Doctor's Wife 14 May 2011 11 0 11 The Eleventh Hour 3 April 2010 11 0 11 -PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 +PREHOOK: query: SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt = 6 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes_partitioned -PREHOOK: Input: default@episodes_partitioned@doctor_pt=6 +PREHOOK: Input: default@episodes_partitioned_n0 +PREHOOK: Input: default@episodes_partitioned_n0@doctor_pt=6 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 +POSTHOOK: query: SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt = 6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes_partitioned -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6 +POSTHOOK: Input: default@episodes_partitioned_n0 +POSTHOOK: Input: default@episodes_partitioned_n0@doctor_pt=6 #### A masked pattern was here #### -episodes_partitioned.title episodes_partitioned.air_date episodes_partitioned.doctor episodes_partitioned.value episodes_partitioned.doctor_pt +episodes_partitioned_n0.title episodes_partitioned_n0.air_date episodes_partitioned_n0.doctor episodes_partitioned_n0.value episodes_partitioned_n0.doctor_pt The Mysterious Planet 6 September 1986 6 0 6 -PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 +PREHOOK: query: SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt = 7 LIMIT 5 PREHOOK: type: QUERY -PREHOOK: Input: default@episodes_partitioned +PREHOOK: Input: default@episodes_partitioned_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 +POSTHOOK: query: SELECT * FROM episodes_partitioned_n0 WHERE doctor_pt = 7 LIMIT 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@episodes_partitioned +POSTHOOK: Input: default@episodes_partitioned_n0 #### A masked pattern was here #### -episodes_partitioned.title episodes_partitioned.air_date episodes_partitioned.doctor episodes_partitioned.value episodes_partitioned.doctor_pt +episodes_partitioned_n0.title episodes_partitioned_n0.air_date episodes_partitioned_n0.doctor episodes_partitioned_n0.value episodes_partitioned_n0.doctor_pt diff --git a/ql/src/test/results/clientpositive/avro_tableproperty_optimize.q.out b/ql/src/test/results/clientpositive/avro_tableproperty_optimize.q.out index 8660c44a35..6fc005abe8 100644 --- a/ql/src/test/results/clientpositive/avro_tableproperty_optimize.q.out +++ b/ql/src/test/results/clientpositive/avro_tableproperty_optimize.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE avro_extschema_literal +PREHOOK: query: CREATE TABLE avro_extschema_literal_n0 STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", @@ -11,8 +11,8 @@ TBLPROPERTIES ('avro.schema.literal'='{ ] }') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@avro_extschema_literal -POSTHOOK: query: CREATE TABLE avro_extschema_literal +PREHOOK: Output: default@avro_extschema_literal_n0 +POSTHOOK: query: CREATE TABLE avro_extschema_literal_n0 STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", @@ -25,79 +25,79 @@ TBLPROPERTIES ('avro.schema.literal'='{ ] }') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@avro_extschema_literal -PREHOOK: query: INSERT INTO TABLE avro_extschema_literal VALUES('s1', 1, 's2') +POSTHOOK: Output: default@avro_extschema_literal_n0 +PREHOOK: query: INSERT INTO TABLE avro_extschema_literal_n0 VALUES('s1', 1, 's2') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@avro_extschema_literal -POSTHOOK: query: INSERT INTO TABLE avro_extschema_literal VALUES('s1', 1, 's2') +PREHOOK: Output: default@avro_extschema_literal_n0 +POSTHOOK: query: INSERT INTO TABLE avro_extschema_literal_n0 VALUES('s1', 1, 's2') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@avro_extschema_literal -POSTHOOK: Lineage: avro_extschema_literal.col1 SCRIPT [] -POSTHOOK: Lineage: avro_extschema_literal.col2 SCRIPT [] -POSTHOOK: Lineage: avro_extschema_literal.col3 SCRIPT [] -PREHOOK: query: DESCRIBE EXTENDED avro_extschema_literal +POSTHOOK: Output: default@avro_extschema_literal_n0 +POSTHOOK: Lineage: avro_extschema_literal_n0.col1 SCRIPT [] +POSTHOOK: Lineage: avro_extschema_literal_n0.col2 SCRIPT [] +POSTHOOK: Lineage: avro_extschema_literal_n0.col3 SCRIPT [] +PREHOOK: query: DESCRIBE EXTENDED avro_extschema_literal_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@avro_extschema_literal -POSTHOOK: query: DESCRIBE EXTENDED avro_extschema_literal +PREHOOK: Input: default@avro_extschema_literal_n0 +POSTHOOK: query: DESCRIBE EXTENDED avro_extschema_literal_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@avro_extschema_literal +POSTHOOK: Input: default@avro_extschema_literal_n0 col1 string col2 bigint col3 string #### A masked pattern was here #### -PREHOOK: query: SELECT * FROM avro_extschema_literal +PREHOOK: query: SELECT * FROM avro_extschema_literal_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@avro_extschema_literal +PREHOOK: Input: default@avro_extschema_literal_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM avro_extschema_literal +POSTHOOK: query: SELECT * FROM avro_extschema_literal_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@avro_extschema_literal +POSTHOOK: Input: default@avro_extschema_literal_n0 #### A masked pattern was here #### s1 1 s2 -PREHOOK: query: CREATE TABLE avro_extschema_url +PREHOOK: query: CREATE TABLE avro_extschema_url_n0 STORED AS AVRO #### A masked pattern was here #### PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@avro_extschema_url -POSTHOOK: query: CREATE TABLE avro_extschema_url +PREHOOK: Output: default@avro_extschema_url_n0 +POSTHOOK: query: CREATE TABLE avro_extschema_url_n0 STORED AS AVRO #### A masked pattern was here #### POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@avro_extschema_url -PREHOOK: query: INSERT INTO TABLE avro_extschema_url VALUES('s1', 1, 's2') +POSTHOOK: Output: default@avro_extschema_url_n0 +PREHOOK: query: INSERT INTO TABLE avro_extschema_url_n0 VALUES('s1', 1, 's2') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@avro_extschema_url -POSTHOOK: query: INSERT INTO TABLE avro_extschema_url VALUES('s1', 1, 's2') +PREHOOK: Output: default@avro_extschema_url_n0 +POSTHOOK: query: INSERT INTO TABLE avro_extschema_url_n0 VALUES('s1', 1, 's2') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@avro_extschema_url -POSTHOOK: Lineage: avro_extschema_url.col1 SCRIPT [] -POSTHOOK: Lineage: avro_extschema_url.col2 SCRIPT [] -POSTHOOK: Lineage: avro_extschema_url.col3 SCRIPT [] -PREHOOK: query: DESCRIBE EXTENDED avro_extschema_url +POSTHOOK: Output: default@avro_extschema_url_n0 +POSTHOOK: Lineage: avro_extschema_url_n0.col1 SCRIPT [] +POSTHOOK: Lineage: avro_extschema_url_n0.col2 SCRIPT [] +POSTHOOK: Lineage: avro_extschema_url_n0.col3 SCRIPT [] +PREHOOK: query: DESCRIBE EXTENDED avro_extschema_url_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@avro_extschema_url -POSTHOOK: query: DESCRIBE EXTENDED avro_extschema_url +PREHOOK: Input: default@avro_extschema_url_n0 +POSTHOOK: query: DESCRIBE EXTENDED avro_extschema_url_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@avro_extschema_url +POSTHOOK: Input: default@avro_extschema_url_n0 col1 string col2 bigint col3 string #### A masked pattern was here #### -PREHOOK: query: SELECT * FROM avro_extschema_url +PREHOOK: query: SELECT * FROM avro_extschema_url_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@avro_extschema_url +PREHOOK: Input: default@avro_extschema_url_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM avro_extschema_url +POSTHOOK: query: SELECT * FROM avro_extschema_url_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@avro_extschema_url +POSTHOOK: Input: default@avro_extschema_url_n0 #### A masked pattern was here #### s1 1 s2 PREHOOK: query: CREATE TABLE avro_extschema_literal1 diff --git a/ql/src/test/results/clientpositive/avrocountemptytbl.q.out b/ql/src/test/results/clientpositive/avrocountemptytbl.q.out index 72e848937b..1609d66a61 100644 --- a/ql/src/test/results/clientpositive/avrocountemptytbl.q.out +++ b/ql/src/test/results/clientpositive/avrocountemptytbl.q.out @@ -1,58 +1,58 @@ -PREHOOK: query: drop table if exists emptyavro +PREHOOK: query: drop table if exists emptyavro_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists emptyavro +POSTHOOK: query: drop table if exists emptyavro_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table emptyavro (a int) stored as avro +PREHOOK: query: create table emptyavro_n0 (a int) stored as avro PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@emptyavro -POSTHOOK: query: create table emptyavro (a int) stored as avro +PREHOOK: Output: default@emptyavro_n0 +POSTHOOK: query: create table emptyavro_n0 (a int) stored as avro POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@emptyavro -PREHOOK: query: select count(*) from emptyavro +POSTHOOK: Output: default@emptyavro_n0 +PREHOOK: query: select count(*) from emptyavro_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@emptyavro +PREHOOK: Input: default@emptyavro_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from emptyavro +POSTHOOK: query: select count(*) from emptyavro_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@emptyavro +POSTHOOK: Input: default@emptyavro_n0 #### A masked pattern was here #### 0 -PREHOOK: query: insert into emptyavro select count(*) from emptyavro +PREHOOK: query: insert into emptyavro_n0 select count(*) from emptyavro_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@emptyavro -PREHOOK: Output: default@emptyavro -POSTHOOK: query: insert into emptyavro select count(*) from emptyavro +PREHOOK: Input: default@emptyavro_n0 +PREHOOK: Output: default@emptyavro_n0 +POSTHOOK: query: insert into emptyavro_n0 select count(*) from emptyavro_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@emptyavro -POSTHOOK: Output: default@emptyavro -POSTHOOK: Lineage: emptyavro.a EXPRESSION [(emptyavro)emptyavro.null, ] -PREHOOK: query: select count(*) from emptyavro +POSTHOOK: Input: default@emptyavro_n0 +POSTHOOK: Output: default@emptyavro_n0 +POSTHOOK: Lineage: emptyavro_n0.a EXPRESSION [(emptyavro_n0)emptyavro_n0.null, ] +PREHOOK: query: select count(*) from emptyavro_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@emptyavro +PREHOOK: Input: default@emptyavro_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from emptyavro +POSTHOOK: query: select count(*) from emptyavro_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@emptyavro +POSTHOOK: Input: default@emptyavro_n0 #### A masked pattern was here #### 1 -PREHOOK: query: insert into emptyavro select key from src where key = 100 limit 1 +PREHOOK: query: insert into emptyavro_n0 select key from src where key = 100 limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@emptyavro -POSTHOOK: query: insert into emptyavro select key from src where key = 100 limit 1 +PREHOOK: Output: default@emptyavro_n0 +POSTHOOK: query: insert into emptyavro_n0 select key from src where key = 100 limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@emptyavro -POSTHOOK: Lineage: emptyavro.a EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: select * from emptyavro +POSTHOOK: Output: default@emptyavro_n0 +POSTHOOK: Lineage: emptyavro_n0.a EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: select * from emptyavro_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@emptyavro +PREHOOK: Input: default@emptyavro_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from emptyavro +POSTHOOK: query: select * from emptyavro_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@emptyavro +POSTHOOK: Input: default@emptyavro_n0 #### A masked pattern was here #### 0 100 diff --git a/ql/src/test/results/clientpositive/avrotblsjoin.q.out b/ql/src/test/results/clientpositive/avrotblsjoin.q.out index a686235e57..8468bcb87a 100644 --- a/ql/src/test/results/clientpositive/avrotblsjoin.q.out +++ b/ql/src/test/results/clientpositive/avrotblsjoin.q.out @@ -1,12 +1,12 @@ -PREHOOK: query: drop table if exists table1 +PREHOOK: query: drop table if exists table1_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists table1 +POSTHOOK: query: drop table if exists table1_n1 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table if exists table1_1 PREHOOK: type: DROPTABLE POSTHOOK: query: drop table if exists table1_1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table table1 +PREHOOK: query: create table table1_n1 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS INPUTFORMAT @@ -16,8 +16,8 @@ PREHOOK: query: create table table1 #### A masked pattern was here #### PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: create table table1 +PREHOOK: Output: default@table1_n1 +POSTHOOK: query: create table table1_n1 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS INPUTFORMAT @@ -27,7 +27,7 @@ POSTHOOK: query: create table table1 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 +POSTHOOK: Output: default@table1_n1 PREHOOK: query: create table table1_1 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' @@ -50,17 +50,17 @@ POSTHOOK: query: create table table1_1 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@table1_1 -PREHOOK: query: insert into table1 values ("1", "2", "3") +PREHOOK: query: insert into table1_n1 values ("1", "2", "3") PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@table1 -POSTHOOK: query: insert into table1 values ("1", "2", "3") +PREHOOK: Output: default@table1_n1 +POSTHOOK: query: insert into table1_n1 values ("1", "2", "3") POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@table1 -POSTHOOK: Lineage: table1.col1 SCRIPT [] -POSTHOOK: Lineage: table1.col2 SCRIPT [] -POSTHOOK: Lineage: table1.col3 SCRIPT [] +POSTHOOK: Output: default@table1_n1 +POSTHOOK: Lineage: table1_n1.col1 SCRIPT [] +POSTHOOK: Lineage: table1_n1.col2 SCRIPT [] +POSTHOOK: Lineage: table1_n1.col3 SCRIPT [] PREHOOK: query: insert into table1_1 values (1, "2") PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table @@ -73,14 +73,14 @@ POSTHOOK: Lineage: table1_1.col1 SCRIPT [] POSTHOOK: Lineage: table1_1.col2 SCRIPT [] WARNING: Comparing a bigint and a string may result in a loss of precision. Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: select table1.col1, table1_1.* from table1 join table1_1 on table1.col1=table1_1.col1 where table1_1.col1="1" +PREHOOK: query: select table1_n1.col1, table1_1.* from table1_n1 join table1_1 on table1_n1.col1=table1_1.col1 where table1_1.col1="1" PREHOOK: type: QUERY -PREHOOK: Input: default@table1 PREHOOK: Input: default@table1_1 +PREHOOK: Input: default@table1_n1 #### A masked pattern was here #### -POSTHOOK: query: select table1.col1, table1_1.* from table1 join table1_1 on table1.col1=table1_1.col1 where table1_1.col1="1" +POSTHOOK: query: select table1_n1.col1, table1_1.* from table1_n1 join table1_1 on table1_n1.col1=table1_1.col1 where table1_1.col1="1" POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 POSTHOOK: Input: default@table1_1 +POSTHOOK: Input: default@table1_n1 #### A masked pattern was here #### 1 1 2 diff --git a/ql/src/test/results/clientpositive/ba_table1.q.out b/ql/src/test/results/clientpositive/ba_table1.q.out index 6643bb1483..c0ab0ec0f7 100644 --- a/ql/src/test/results/clientpositive/ba_table1.q.out +++ b/ql/src/test/results/clientpositive/ba_table1.q.out @@ -1,42 +1,42 @@ -PREHOOK: query: drop table ba_test +PREHOOK: query: drop table ba_test_n4 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table ba_test +POSTHOOK: query: drop table ba_test_n4 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table ba_test (ba_key binary, ba_val binary) +PREHOOK: query: create table ba_test_n4 (ba_key binary, ba_val binary) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@ba_test -POSTHOOK: query: create table ba_test (ba_key binary, ba_val binary) +PREHOOK: Output: default@ba_test_n4 +POSTHOOK: query: create table ba_test_n4 (ba_key binary, ba_val binary) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@ba_test -PREHOOK: query: describe extended ba_test +POSTHOOK: Output: default@ba_test_n4 +PREHOOK: query: describe extended ba_test_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@ba_test -POSTHOOK: query: describe extended ba_test +PREHOOK: Input: default@ba_test_n4 +POSTHOOK: query: describe extended ba_test_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@ba_test +POSTHOOK: Input: default@ba_test_n4 ba_key binary ba_val binary #### A masked pattern was here #### -PREHOOK: query: from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary) +PREHOOK: query: from src insert overwrite table ba_test_n4 select cast (src.key as binary), cast (src.value as binary) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@ba_test -POSTHOOK: query: from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary) +PREHOOK: Output: default@ba_test_n4 +POSTHOOK: query: from src insert overwrite table ba_test_n4 select cast (src.key as binary), cast (src.value as binary) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@ba_test -POSTHOOK: Lineage: ba_test.ba_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: ba_test.ba_val EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from ba_test tablesample (10 rows) +POSTHOOK: Output: default@ba_test_n4 +POSTHOOK: Lineage: ba_test_n4.ba_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: ba_test_n4.ba_val EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from ba_test_n4 tablesample (10 rows) PREHOOK: type: QUERY -PREHOOK: Input: default@ba_test +PREHOOK: Input: default@ba_test_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from ba_test tablesample (10 rows) +POSTHOOK: query: select * from ba_test_n4 tablesample (10 rows) POSTHOOK: type: QUERY -POSTHOOK: Input: default@ba_test +POSTHOOK: Input: default@ba_test_n4 #### A masked pattern was here #### 165 val_165 238 val_238 @@ -48,11 +48,11 @@ POSTHOOK: Input: default@ba_test 484 val_484 86 val_86 98 val_98 -PREHOOK: query: drop table ba_test +PREHOOK: query: drop table ba_test_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@ba_test -PREHOOK: Output: default@ba_test -POSTHOOK: query: drop table ba_test +PREHOOK: Input: default@ba_test_n4 +PREHOOK: Output: default@ba_test_n4 +POSTHOOK: query: drop table ba_test_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@ba_test -POSTHOOK: Output: default@ba_test +POSTHOOK: Input: default@ba_test_n4 +POSTHOOK: Output: default@ba_test_n4 diff --git a/ql/src/test/results/clientpositive/ba_table2.q.out b/ql/src/test/results/clientpositive/ba_table2.q.out index 8694ac8a3f..8f4d12f6b7 100644 --- a/ql/src/test/results/clientpositive/ba_table2.q.out +++ b/ql/src/test/results/clientpositive/ba_table2.q.out @@ -1,50 +1,50 @@ -PREHOOK: query: drop table ba_test +PREHOOK: query: drop table ba_test_n3 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table ba_test +POSTHOOK: query: drop table ba_test_n3 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table ba_test (ba_key binary, ba_val binary) +PREHOOK: query: create table ba_test_n3 (ba_key binary, ba_val binary) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@ba_test -POSTHOOK: query: create table ba_test (ba_key binary, ba_val binary) +PREHOOK: Output: default@ba_test_n3 +POSTHOOK: query: create table ba_test_n3 (ba_key binary, ba_val binary) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@ba_test -PREHOOK: query: alter table ba_test set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' +POSTHOOK: Output: default@ba_test_n3 +PREHOOK: query: alter table ba_test_n3 set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@ba_test -PREHOOK: Output: default@ba_test -POSTHOOK: query: alter table ba_test set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' +PREHOOK: Input: default@ba_test_n3 +PREHOOK: Output: default@ba_test_n3 +POSTHOOK: query: alter table ba_test_n3 set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@ba_test -POSTHOOK: Output: default@ba_test -PREHOOK: query: describe extended ba_test +POSTHOOK: Input: default@ba_test_n3 +POSTHOOK: Output: default@ba_test_n3 +PREHOOK: query: describe extended ba_test_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@ba_test -POSTHOOK: query: describe extended ba_test +PREHOOK: Input: default@ba_test_n3 +POSTHOOK: query: describe extended ba_test_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@ba_test +POSTHOOK: Input: default@ba_test_n3 ba_key binary ba_val binary #### A masked pattern was here #### -PREHOOK: query: from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary) +PREHOOK: query: from src insert overwrite table ba_test_n3 select cast (src.key as binary), cast (src.value as binary) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@ba_test -POSTHOOK: query: from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary) +PREHOOK: Output: default@ba_test_n3 +POSTHOOK: query: from src insert overwrite table ba_test_n3 select cast (src.key as binary), cast (src.value as binary) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@ba_test -POSTHOOK: Lineage: ba_test.ba_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: ba_test.ba_val EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from ba_test tablesample (10 rows) +POSTHOOK: Output: default@ba_test_n3 +POSTHOOK: Lineage: ba_test_n3.ba_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: ba_test_n3.ba_val EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from ba_test_n3 tablesample (10 rows) PREHOOK: type: QUERY -PREHOOK: Input: default@ba_test +PREHOOK: Input: default@ba_test_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from ba_test tablesample (10 rows) +POSTHOOK: query: select * from ba_test_n3 tablesample (10 rows) POSTHOOK: type: QUERY -POSTHOOK: Input: default@ba_test +POSTHOOK: Input: default@ba_test_n3 #### A masked pattern was here #### 165 val_165 238 val_238 @@ -56,11 +56,11 @@ POSTHOOK: Input: default@ba_test 484 val_484 86 val_86 98 val_98 -PREHOOK: query: drop table ba_test +PREHOOK: query: drop table ba_test_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@ba_test -PREHOOK: Output: default@ba_test -POSTHOOK: query: drop table ba_test +PREHOOK: Input: default@ba_test_n3 +PREHOOK: Output: default@ba_test_n3 +POSTHOOK: query: drop table ba_test_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@ba_test -POSTHOOK: Output: default@ba_test +POSTHOOK: Input: default@ba_test_n3 +POSTHOOK: Output: default@ba_test_n3 diff --git a/ql/src/test/results/clientpositive/ba_table3.q.out b/ql/src/test/results/clientpositive/ba_table3.q.out index 2ddc78b817..a1350a9b15 100644 --- a/ql/src/test/results/clientpositive/ba_table3.q.out +++ b/ql/src/test/results/clientpositive/ba_table3.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: drop table ba_test +PREHOOK: query: drop table ba_test_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table ba_test +POSTHOOK: query: drop table ba_test_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table ba_test (ba_key binary, ba_val binary) +PREHOOK: query: create table ba_test_n2 (ba_key binary, ba_val binary) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@ba_test -POSTHOOK: query: create table ba_test (ba_key binary, ba_val binary) +PREHOOK: Output: default@ba_test_n2 +POSTHOOK: query: create table ba_test_n2 (ba_key binary, ba_val binary) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@ba_test -PREHOOK: query: from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary) +POSTHOOK: Output: default@ba_test_n2 +PREHOOK: query: from src insert overwrite table ba_test_n2 select cast (src.key as binary), cast (src.value as binary) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@ba_test -POSTHOOK: query: from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary) +PREHOOK: Output: default@ba_test_n2 +POSTHOOK: query: from src insert overwrite table ba_test_n2 select cast (src.key as binary), cast (src.value as binary) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@ba_test -POSTHOOK: Lineage: ba_test.ba_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: ba_test.ba_val EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select ba_test.ba_key, count(ba_test.ba_val) from ba_test group by ba_test.ba_key order by ba_key limit 5 +POSTHOOK: Output: default@ba_test_n2 +POSTHOOK: Lineage: ba_test_n2.ba_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: ba_test_n2.ba_val EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select ba_test_n2.ba_key, count(ba_test_n2.ba_val) from ba_test_n2 group by ba_test_n2.ba_key order by ba_key limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@ba_test +PREHOOK: Input: default@ba_test_n2 #### A masked pattern was here #### -POSTHOOK: query: select ba_test.ba_key, count(ba_test.ba_val) from ba_test group by ba_test.ba_key order by ba_key limit 5 +POSTHOOK: query: select ba_test_n2.ba_key, count(ba_test_n2.ba_val) from ba_test_n2 group by ba_test_n2.ba_key order by ba_key limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@ba_test +POSTHOOK: Input: default@ba_test_n2 #### A masked pattern was here #### 0 3 10 1 100 2 103 2 104 2 -PREHOOK: query: drop table ba_test +PREHOOK: query: drop table ba_test_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@ba_test -PREHOOK: Output: default@ba_test -POSTHOOK: query: drop table ba_test +PREHOOK: Input: default@ba_test_n2 +PREHOOK: Output: default@ba_test_n2 +POSTHOOK: query: drop table ba_test_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@ba_test -POSTHOOK: Output: default@ba_test +POSTHOOK: Input: default@ba_test_n2 +POSTHOOK: Output: default@ba_test_n2 diff --git a/ql/src/test/results/clientpositive/ba_table_udfs.q.out b/ql/src/test/results/clientpositive/ba_table_udfs.q.out index 0a48b09470..30f6158c60 100644 --- a/ql/src/test/results/clientpositive/ba_table_udfs.q.out +++ b/ql/src/test/results/clientpositive/ba_table_udfs.q.out @@ -4,19 +4,19 @@ PREHOOK: Input: database:default POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: CREATE TABLE dest1(bytes1 BINARY, +PREHOOK: query: CREATE TABLE dest1_n122(bytes1 BINARY, bytes2 BINARY, string STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(bytes1 BINARY, +PREHOOK: Output: default@dest1_n122 +POSTHOOK: query: CREATE TABLE dest1_n122(bytes1 BINARY, bytes2 BINARY, string STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 +POSTHOOK: Output: default@dest1_n122 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n122 SELECT CAST(key AS BINARY), CAST(value AS BINARY), @@ -25,8 +25,8 @@ ORDER BY value LIMIT 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 +PREHOOK: Output: default@dest1_n122 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n122 SELECT CAST(key AS BINARY), CAST(value AS BINARY), @@ -35,21 +35,21 @@ ORDER BY value LIMIT 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.bytes1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.bytes2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.string SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT INTO TABLE dest1 SELECT NULL, NULL, NULL FROM dest1 LIMIT 1 +POSTHOOK: Output: default@dest1_n122 +POSTHOOK: Lineage: dest1_n122.bytes1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n122.bytes2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n122.string SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: INSERT INTO TABLE dest1_n122 SELECT NULL, NULL, NULL FROM dest1_n122 LIMIT 1 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT INTO TABLE dest1 SELECT NULL, NULL, NULL FROM dest1 LIMIT 1 +PREHOOK: Input: default@dest1_n122 +PREHOOK: Output: default@dest1_n122 +POSTHOOK: query: INSERT INTO TABLE dest1_n122 SELECT NULL, NULL, NULL FROM dest1_n122 LIMIT 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.bytes1 EXPRESSION [] -POSTHOOK: Lineage: dest1.bytes2 EXPRESSION [] -POSTHOOK: Lineage: dest1.string EXPRESSION [] +POSTHOOK: Input: default@dest1_n122 +POSTHOOK: Output: default@dest1_n122 +POSTHOOK: Lineage: dest1_n122.bytes1 EXPRESSION [] +POSTHOOK: Lineage: dest1_n122.bytes2 EXPRESSION [] +POSTHOOK: Lineage: dest1_n122.string EXPRESSION [] PREHOOK: query: SELECT bytes1, bytes2, @@ -65,9 +65,9 @@ PREHOOK: query: SELECT UNBASE64(BASE64(bytes1)), HEX(ENCODE(string, 'US-ASCII')), DECODE(ENCODE(string, 'US-ASCII'), 'US-ASCII') -FROM dest1 +FROM dest1_n122 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n122 #### A masked pattern was here #### POSTHOOK: query: SELECT bytes1, @@ -84,9 +84,9 @@ POSTHOOK: query: SELECT UNBASE64(BASE64(bytes1)), HEX(ENCODE(string, 'US-ASCII')), DECODE(ENCODE(string, 'US-ASCII'), 'US-ASCII') -FROM dest1 +FROM dest1_n122 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n122 #### A masked pattern was here #### 0 val_0 val_0 1 0val_0 val_ l_0 al_ 30 0 MA== 0 76616C5F30 val_0 0 val_0 val_0 1 0val_0 val_ l_0 al_ 30 0 MA== 0 76616C5F30 val_0 diff --git a/ql/src/test/results/clientpositive/binary_output_format.q.out b/ql/src/test/results/clientpositive/binary_output_format.q.out index 5c5867bb95..d235b57687 100644 --- a/ql/src/test/results/clientpositive/binary_output_format.q.out +++ b/ql/src/test/results/clientpositive/binary_output_format.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE dest1(mydata STRING) +PREHOOK: query: CREATE TABLE dest1_n91(mydata STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ( @@ -9,8 +9,8 @@ STORED AS OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(mydata STRING) +PREHOOK: Output: default@dest1_n91 +POSTHOOK: query: CREATE TABLE dest1_n91(mydata STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ( @@ -21,9 +21,9 @@ STORED AS OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n91 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n91 SELECT TRANSFORM(*) USING 'cat' AS mydata STRING @@ -36,7 +36,7 @@ SELECT TRANSFORM(*) FROM src PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n91 SELECT TRANSFORM(*) USING 'cat' AS mydata STRING @@ -103,18 +103,18 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n91 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { string mydata} + serialization.ddl struct dest1_n91 { string mydata} serialization.format 1 serialization.last.column.takes.rest true serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n91 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -243,18 +243,18 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n91 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { string mydata} + serialization.ddl struct dest1_n91 { string mydata} serialization.format 1 serialization.last.column.takes.rest true serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n91 Stage: Stage-2 Stats Work @@ -263,7 +263,7 @@ STAGE PLANS: Column Stats Desc: Columns: mydata Column Types: string - Table: default.dest1 + Table: default.dest1_n91 Is Table Level Stats: true Stage: Stage-3 @@ -288,18 +288,18 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n91 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { string mydata} + serialization.ddl struct dest1_n91 { string mydata} serialization.format 1 serialization.last.column.takes.rest true serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n91 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -320,11 +320,11 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n91 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { string mydata} + serialization.ddl struct dest1_n91 { string mydata} serialization.format 1 serialization.last.column.takes.rest true serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -343,19 +343,19 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n91 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { string mydata} + serialization.ddl struct dest1_n91 { string mydata} serialization.format 1 serialization.last.column.takes.rest true serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n91 + name: default.dest1_n91 Truncated Path -> Alias: #### A masked pattern was here #### @@ -381,18 +381,18 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n91 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { string mydata} + serialization.ddl struct dest1_n91 { string mydata} serialization.format 1 serialization.last.column.takes.rest true serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n91 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -413,11 +413,11 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n91 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { string mydata} + serialization.ddl struct dest1_n91 { string mydata} serialization.format 1 serialization.last.column.takes.rest true serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -436,19 +436,19 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n91 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { string mydata} + serialization.ddl struct dest1_n91 { string mydata} serialization.format 1 serialization.last.column.takes.rest true serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n91 + name: default.dest1_n91 Truncated Path -> Alias: #### A masked pattern was here #### @@ -458,7 +458,7 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE dest1 +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n91 SELECT TRANSFORM(*) USING 'cat' AS mydata STRING @@ -471,8 +471,8 @@ SELECT TRANSFORM(*) FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 +PREHOOK: Output: default@dest1_n91 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n91 SELECT TRANSFORM(*) USING 'cat' AS mydata STRING @@ -485,15 +485,15 @@ SELECT TRANSFORM(*) FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.mydata SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM dest1 +POSTHOOK: Output: default@dest1_n91 +POSTHOOK: Lineage: dest1_n91.mydata SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM dest1_n91 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n91 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM dest1 +POSTHOOK: query: SELECT * FROM dest1_n91 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n91 #### A masked pattern was here #### 238 val_238 86 val_86 diff --git a/ql/src/test/results/clientpositive/binary_table_bincolserde.q.out b/ql/src/test/results/clientpositive/binary_table_bincolserde.q.out index ba692ad4f7..8dc5e9affc 100644 --- a/ql/src/test/results/clientpositive/binary_table_bincolserde.q.out +++ b/ql/src/test/results/clientpositive/binary_table_bincolserde.q.out @@ -1,50 +1,50 @@ -PREHOOK: query: drop table ba_test +PREHOOK: query: drop table ba_test_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table ba_test +POSTHOOK: query: drop table ba_test_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table ba_test (ba_key binary, ba_val binary) stored as rcfile +PREHOOK: query: create table ba_test_n1 (ba_key binary, ba_val binary) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@ba_test -POSTHOOK: query: create table ba_test (ba_key binary, ba_val binary) stored as rcfile +PREHOOK: Output: default@ba_test_n1 +POSTHOOK: query: create table ba_test_n1 (ba_key binary, ba_val binary) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@ba_test -PREHOOK: query: alter table ba_test set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: Output: default@ba_test_n1 +PREHOOK: query: alter table ba_test_n1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@ba_test -PREHOOK: Output: default@ba_test -POSTHOOK: query: alter table ba_test set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: Input: default@ba_test_n1 +PREHOOK: Output: default@ba_test_n1 +POSTHOOK: query: alter table ba_test_n1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@ba_test -POSTHOOK: Output: default@ba_test -PREHOOK: query: describe extended ba_test +POSTHOOK: Input: default@ba_test_n1 +POSTHOOK: Output: default@ba_test_n1 +PREHOOK: query: describe extended ba_test_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@ba_test -POSTHOOK: query: describe extended ba_test +PREHOOK: Input: default@ba_test_n1 +POSTHOOK: query: describe extended ba_test_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@ba_test +POSTHOOK: Input: default@ba_test_n1 ba_key binary ba_val binary #### A masked pattern was here #### -PREHOOK: query: from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary) +PREHOOK: query: from src insert overwrite table ba_test_n1 select cast (src.key as binary), cast (src.value as binary) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@ba_test -POSTHOOK: query: from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary) +PREHOOK: Output: default@ba_test_n1 +POSTHOOK: query: from src insert overwrite table ba_test_n1 select cast (src.key as binary), cast (src.value as binary) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@ba_test -POSTHOOK: Lineage: ba_test.ba_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: ba_test.ba_val EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select ba_key, ba_val from ba_test order by ba_key limit 10 +POSTHOOK: Output: default@ba_test_n1 +POSTHOOK: Lineage: ba_test_n1.ba_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: ba_test_n1.ba_val EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select ba_key, ba_val from ba_test_n1 order by ba_key limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@ba_test +PREHOOK: Input: default@ba_test_n1 #### A masked pattern was here #### -POSTHOOK: query: select ba_key, ba_val from ba_test order by ba_key limit 10 +POSTHOOK: query: select ba_key, ba_val from ba_test_n1 order by ba_key limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@ba_test +POSTHOOK: Input: default@ba_test_n1 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -56,11 +56,11 @@ POSTHOOK: Input: default@ba_test 103 val_103 104 val_104 104 val_104 -PREHOOK: query: drop table ba_test +PREHOOK: query: drop table ba_test_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@ba_test -PREHOOK: Output: default@ba_test -POSTHOOK: query: drop table ba_test +PREHOOK: Input: default@ba_test_n1 +PREHOOK: Output: default@ba_test_n1 +POSTHOOK: query: drop table ba_test_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@ba_test -POSTHOOK: Output: default@ba_test +POSTHOOK: Input: default@ba_test_n1 +POSTHOOK: Output: default@ba_test_n1 diff --git a/ql/src/test/results/clientpositive/binary_table_colserde.q.out b/ql/src/test/results/clientpositive/binary_table_colserde.q.out index 5fdddc9bcb..075a8c4922 100644 --- a/ql/src/test/results/clientpositive/binary_table_colserde.q.out +++ b/ql/src/test/results/clientpositive/binary_table_colserde.q.out @@ -1,50 +1,50 @@ -PREHOOK: query: drop table ba_test +PREHOOK: query: drop table ba_test_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table ba_test +POSTHOOK: query: drop table ba_test_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table ba_test (ba_key binary, ba_val binary) stored as rcfile +PREHOOK: query: create table ba_test_n0 (ba_key binary, ba_val binary) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@ba_test -POSTHOOK: query: create table ba_test (ba_key binary, ba_val binary) stored as rcfile +PREHOOK: Output: default@ba_test_n0 +POSTHOOK: query: create table ba_test_n0 (ba_key binary, ba_val binary) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@ba_test -PREHOOK: query: alter table ba_test set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +POSTHOOK: Output: default@ba_test_n0 +PREHOOK: query: alter table ba_test_n0 set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@ba_test -PREHOOK: Output: default@ba_test -POSTHOOK: query: alter table ba_test set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +PREHOOK: Input: default@ba_test_n0 +PREHOOK: Output: default@ba_test_n0 +POSTHOOK: query: alter table ba_test_n0 set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@ba_test -POSTHOOK: Output: default@ba_test -PREHOOK: query: describe extended ba_test +POSTHOOK: Input: default@ba_test_n0 +POSTHOOK: Output: default@ba_test_n0 +PREHOOK: query: describe extended ba_test_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@ba_test -POSTHOOK: query: describe extended ba_test +PREHOOK: Input: default@ba_test_n0 +POSTHOOK: query: describe extended ba_test_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@ba_test +POSTHOOK: Input: default@ba_test_n0 ba_key binary ba_val binary #### A masked pattern was here #### -PREHOOK: query: from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary) +PREHOOK: query: from src insert overwrite table ba_test_n0 select cast (src.key as binary), cast (src.value as binary) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@ba_test -POSTHOOK: query: from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary) +PREHOOK: Output: default@ba_test_n0 +POSTHOOK: query: from src insert overwrite table ba_test_n0 select cast (src.key as binary), cast (src.value as binary) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@ba_test -POSTHOOK: Lineage: ba_test.ba_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: ba_test.ba_val EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select ba_key, ba_val from ba_test order by ba_key limit 10 +POSTHOOK: Output: default@ba_test_n0 +POSTHOOK: Lineage: ba_test_n0.ba_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: ba_test_n0.ba_val EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select ba_key, ba_val from ba_test_n0 order by ba_key limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@ba_test +PREHOOK: Input: default@ba_test_n0 #### A masked pattern was here #### -POSTHOOK: query: select ba_key, ba_val from ba_test order by ba_key limit 10 +POSTHOOK: query: select ba_key, ba_val from ba_test_n0 order by ba_key limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@ba_test +POSTHOOK: Input: default@ba_test_n0 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -56,11 +56,11 @@ POSTHOOK: Input: default@ba_test 103 val_103 104 val_104 104 val_104 -PREHOOK: query: drop table ba_test +PREHOOK: query: drop table ba_test_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@ba_test -PREHOOK: Output: default@ba_test -POSTHOOK: query: drop table ba_test +PREHOOK: Input: default@ba_test_n0 +PREHOOK: Output: default@ba_test_n0 +POSTHOOK: query: drop table ba_test_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@ba_test -POSTHOOK: Output: default@ba_test +POSTHOOK: Input: default@ba_test_n0 +POSTHOOK: Output: default@ba_test_n0 diff --git a/ql/src/test/results/clientpositive/binarysortable_1.q.out b/ql/src/test/results/clientpositive/binarysortable_1.q.out index 5575fa13aa..f073949021 100644 --- a/ql/src/test/results/clientpositive/binarysortable_1.q.out +++ b/ql/src/test/results/clientpositive/binarysortable_1.q.out @@ -1,30 +1,30 @@ -PREHOOK: query: CREATE TABLE mytable(key STRING, value STRING) +PREHOOK: query: CREATE TABLE mytable_n0(key STRING, value STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '9' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@mytable -POSTHOOK: query: CREATE TABLE mytable(key STRING, value STRING) +PREHOOK: Output: default@mytable_n0 +POSTHOOK: query: CREATE TABLE mytable_n0(key STRING, value STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '9' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@mytable -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable +POSTHOOK: Output: default@mytable_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@mytable -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable +PREHOOK: Output: default@mytable_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@mytable +POSTHOOK: Output: default@mytable_n0 PREHOOK: query: EXPLAIN SELECT REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(key, '\001', '^A'), '\0', '^@'), '\002', '^B'), value FROM ( SELECT key, sum(value) as value - FROM mytable + FROM mytable_n0 GROUP BY key ) a PREHOOK: type: QUERY @@ -32,7 +32,7 @@ POSTHOOK: query: EXPLAIN SELECT REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(key, '\001', '^A'), '\0', '^@'), '\002', '^B'), value FROM ( SELECT key, sum(value) as value - FROM mytable + FROM mytable_n0 GROUP BY key ) a POSTHOOK: type: QUERY @@ -45,7 +45,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: mytable + alias: mytable_n0 Statistics: Num rows: 1 Data size: 930 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -91,20 +91,20 @@ STAGE PLANS: PREHOOK: query: SELECT REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(key, '\001', '^A'), '\0', '^@'), '\002', '^B'), value FROM ( SELECT key, sum(value) as value - FROM mytable + FROM mytable_n0 GROUP BY key ) a PREHOOK: type: QUERY -PREHOOK: Input: default@mytable +PREHOOK: Input: default@mytable_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(key, '\001', '^A'), '\0', '^@'), '\002', '^B'), value FROM ( SELECT key, sum(value) as value - FROM mytable + FROM mytable_n0 GROUP BY key ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@mytable +POSTHOOK: Input: default@mytable_n0 #### A masked pattern was here #### ^@^@^@ 7.0 ^@^A^@ 9.0 diff --git a/ql/src/test/results/clientpositive/bucket_if_with_path_filter.q.out b/ql/src/test/results/clientpositive/bucket_if_with_path_filter.q.out index b4ca9e6841..9e77f708ee 100644 --- a/ql/src/test/results/clientpositive/bucket_if_with_path_filter.q.out +++ b/ql/src/test/results/clientpositive/bucket_if_with_path_filter.q.out @@ -2,27 +2,27 @@ PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@t1 +PREHOOK: Output: default@t1_n35 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: Create table t2 (dt string) stored as orc +POSTHOOK: Output: default@t1_n35 +PREHOOK: query: Create table t2_n22 (dt string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: Create table t2 (dt string) stored as orc +PREHOOK: Output: default@t2_n22 +POSTHOOK: query: Create table t2_n22 (dt string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: SELECT /*+ MAPJOIN(b) */ a.dt FROM t1 a JOIN t2 b ON (a.dt = b.dt) +POSTHOOK: Output: default@t2_n22 +PREHOOK: query: SELECT /*+ MAPJOIN(b) */ a.dt FROM t1_n35 a JOIN t2_n22 b ON (a.dt = b.dt) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n35 +PREHOOK: Input: default@t2_n22 #### A masked pattern was here #### -POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ a.dt FROM t1 a JOIN t2 b ON (a.dt = b.dt) +POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ a.dt FROM t1_n35 a JOIN t2_n22 b ON (a.dt = b.dt) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n35 +POSTHOOK: Input: default@t2_n22 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/bucket_map_join_1.q.out b/ql/src/test/results/clientpositive/bucket_map_join_1.q.out index 43112a0fc5..ed986a282a 100644 --- a/ql/src/test/results/clientpositive/bucket_map_join_1.q.out +++ b/ql/src/test/results/clientpositive/bucket_map_join_1.q.out @@ -1,52 +1,52 @@ -PREHOOK: query: drop table table1 +PREHOOK: query: drop table table1_n8 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table table1 +POSTHOOK: query: drop table table1_n8 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table table2 +PREHOOK: query: drop table table2_n5 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table table2 +POSTHOOK: query: drop table table2_n5 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table table1(key string, value string) clustered by (key, value) +PREHOOK: query: create table table1_n8(key string, value string) clustered by (key, value) sorted by (key, value) into 1 BUCKETS stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: create table table1(key string, value string) clustered by (key, value) +PREHOOK: Output: default@table1_n8 +POSTHOOK: query: create table table1_n8(key string, value string) clustered by (key, value) sorted by (key, value) into 1 BUCKETS stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: create table table2(key string, value string) clustered by (value, key) +POSTHOOK: Output: default@table1_n8 +PREHOOK: query: create table table2_n5(key string, value string) clustered by (value, key) sorted by (value, key) into 1 BUCKETS stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table2 -POSTHOOK: query: create table table2(key string, value string) clustered by (value, key) +PREHOOK: Output: default@table2_n5 +POSTHOOK: query: create table table2_n5(key string, value string) clustered by (value, key) sorted by (value, key) into 1 BUCKETS stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table2 -PREHOOK: query: load data local inpath '../../data/files/SortCol1Col2/000000_0' overwrite into table table1 +POSTHOOK: Output: default@table2_n5 +PREHOOK: query: load data local inpath '../../data/files/SortCol1Col2/000000_0' overwrite into table table1_n8 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@table1 -POSTHOOK: query: load data local inpath '../../data/files/SortCol1Col2/000000_0' overwrite into table table1 +PREHOOK: Output: default@table1_n8 +POSTHOOK: query: load data local inpath '../../data/files/SortCol1Col2/000000_0' overwrite into table table1_n8 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@table1 -PREHOOK: query: load data local inpath '../../data/files/SortCol2Col1/000000_0' overwrite into table table2 +POSTHOOK: Output: default@table1_n8 +PREHOOK: query: load data local inpath '../../data/files/SortCol2Col1/000000_0' overwrite into table table2_n5 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@table2 -POSTHOOK: query: load data local inpath '../../data/files/SortCol2Col1/000000_0' overwrite into table table2 +PREHOOK: Output: default@table2_n5 +POSTHOOK: query: load data local inpath '../../data/files/SortCol2Col1/000000_0' overwrite into table table2_n5 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@table2 +POSTHOOK: Output: default@table2_n5 PREHOOK: query: explain extended -select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value +select /*+ mapjoin(b) */ count(*) from table1_n8 a join table2_n5 b on a.key=b.key and a.value=b.value PREHOOK: type: QUERY POSTHOOK: query: explain extended -select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value +select /*+ mapjoin(b) */ count(*) from table1_n8 a join table2_n5 b on a.key=b.key and a.value=b.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -115,7 +115,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: table1 + base file name: table1_n8 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -128,11 +128,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.table1 + name default.table1_n8 numFiles 1 numRows 0 rawDataSize 0 - serialization.ddl struct table1 { string key, string value} + serialization.ddl struct table1_n8 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 @@ -151,20 +151,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.table1 + name default.table1_n8 numFiles 1 numRows 0 rawDataSize 0 - serialization.ddl struct table1 { string key, string value} + serialization.ddl struct table1_n8 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.table1 - name: default.table1 + name: default.table1_n8 + name: default.table1_n8 Truncated Path -> Alias: - /table1 [a] + /table1_n8 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -201,14 +201,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value +PREHOOK: query: select /*+ mapjoin(b) */ count(*) from table1_n8 a join table2_n5 b on a.key=b.key and a.value=b.value PREHOOK: type: QUERY -PREHOOK: Input: default@table1 -PREHOOK: Input: default@table2 +PREHOOK: Input: default@table1_n8 +PREHOOK: Input: default@table2_n5 #### A masked pattern was here #### -POSTHOOK: query: select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value +POSTHOOK: query: select /*+ mapjoin(b) */ count(*) from table1_n8 a join table2_n5 b on a.key=b.key and a.value=b.value POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table1_n8 +POSTHOOK: Input: default@table2_n5 #### A masked pattern was here #### 4 diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out b/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out index 3e711d5bfe..e4c7c641b7 100644 --- a/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out +++ b/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out @@ -1,119 +1,119 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n19 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_part_n19 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n19 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n19 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n19 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n19 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n19 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n19 +POSTHOOK: Output: default@srcbucket_mapjoin_part_n19@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n19 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n19@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n19 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n19@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n19 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n19@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n19 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n19@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n19 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n19@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n19 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@srcbucket_mapjoin_part_n19@ds=2008-04-08 +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n16 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n16 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n16 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n16 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n16 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n16 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n16 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n16 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n16@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n16 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n16@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n16 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n16@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n16 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n16@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n16 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n16@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2_n16 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n16@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2_n16 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint) +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n16@ds=2008-04-08 +PREHOOK: query: create table bucketmapjoin_hash_result_1_n7 (key bigint , value1 bigint, value2 bigint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint) +PREHOOK: Output: default@bucketmapjoin_hash_result_1_n7 +POSTHOOK: query: create table bucketmapjoin_hash_result_1_n7 (key bigint , value1 bigint, value2 bigint) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucketmapjoin_hash_result_1 -PREHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint) +POSTHOOK: Output: default@bucketmapjoin_hash_result_1_n7 +PREHOOK: query: create table bucketmapjoin_hash_result_2_n7 (key bigint , value1 bigint, value2 bigint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucketmapjoin_hash_result_2 -POSTHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint) +PREHOOK: Output: default@bucketmapjoin_hash_result_2_n7 +POSTHOOK: query: create table bucketmapjoin_hash_result_2_n7 (key bigint , value1 bigint, value2 bigint) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucketmapjoin_hash_result_2 -PREHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string) +POSTHOOK: Output: default@bucketmapjoin_hash_result_2_n7 +PREHOOK: query: create table bucketmapjoin_tmp_result_n9 (key string , value1 string, value2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string) +PREHOOK: Output: default@bucketmapjoin_tmp_result_n9 +POSTHOOK: query: create table bucketmapjoin_tmp_result_n9 (key string , value1 string, value2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucketmapjoin_tmp_result +POSTHOOK: Output: default@bucketmapjoin_tmp_result_n9 PREHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n9 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n19 a join srcbucket_mapjoin_part_2_n16 b on a.key=b.key and b.ds="2008-04-08" PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n9 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n19 a join srcbucket_mapjoin_part_2_n16 b on a.key=b.key and b.ds="2008-04-08" POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -145,13 +145,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_n19 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n19 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -169,16 +169,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_n19 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n19 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part - name: default.srcbucket_mapjoin_part + name: default.srcbucket_mapjoin_part_n19 + name: default.srcbucket_mapjoin_part_n19 Alias -> Map Local Operator Tree: $hdt$_0:a TableScan @@ -246,17 +246,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n9 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n9 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n9 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -307,13 +307,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_2_n16 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n16 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -331,16 +331,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_2_n16 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n16 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part - name: default.srcbucket_mapjoin_part + name: default.srcbucket_mapjoin_part_2_n16 + name: default.srcbucket_mapjoin_part_2_n16 #### A masked pattern was here #### Partition base file name: ds=2008-04-08 @@ -356,13 +356,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_n19 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n19 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -380,18 +380,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_n19 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n19 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_n19 + name: default.srcbucket_mapjoin_part_n19 Truncated Path -> Alias: - /srcbucket_mapjoin_part_2/ds=2008-04-08 [$hdt$_1:b] + /srcbucket_mapjoin_part_2_n16/ds=2008-04-08 [$hdt$_1:b] Stage: Stage-0 Move Operator @@ -410,17 +410,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n9 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n9 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n9 Stage: Stage-2 Stats Work @@ -429,7 +429,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value1, value2 Column Types: string, string, string - Table: default.bucketmapjoin_tmp_result + Table: default.bucketmapjoin_tmp_result_n9 Is Table Level Stats: true Stage: Stage-3 @@ -501,61 +501,61 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result +PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result_n9 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n19 a join srcbucket_mapjoin_part_2_n16 b on a.key=b.key and b.ds="2008-04-08" PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n16 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n16@ds=2008-04-08 +PREHOOK: Input: default@srcbucket_mapjoin_part_n19 +PREHOOK: Input: default@srcbucket_mapjoin_part_n19@ds=2008-04-08 +PREHOOK: Output: default@bucketmapjoin_tmp_result_n9 +POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result_n9 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n19 a join srcbucket_mapjoin_part_2_n16 b on a.key=b.key and b.ds="2008-04-08" POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -POSTHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION [(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select count(1) from bucketmapjoin_tmp_result +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n16 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n16@ds=2008-04-08 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n19 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n19@ds=2008-04-08 +POSTHOOK: Output: default@bucketmapjoin_tmp_result_n9 +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n9.key EXPRESSION [(srcbucket_mapjoin_part_n19)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n9.value1 SIMPLE [(srcbucket_mapjoin_part_n19)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n9.value2 SIMPLE [(srcbucket_mapjoin_part_2_n16)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select count(1) from bucketmapjoin_tmp_result_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@bucketmapjoin_tmp_result +PREHOOK: Input: default@bucketmapjoin_tmp_result_n9 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result +POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucketmapjoin_tmp_result +POSTHOOK: Input: default@bucketmapjoin_tmp_result_n9 #### A masked pattern was here #### 1028 -PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result +PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1_n7 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@bucketmapjoin_tmp_result -PREHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result +PREHOOK: Input: default@bucketmapjoin_tmp_result_n9 +PREHOOK: Output: default@bucketmapjoin_hash_result_1_n7 +POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1_n7 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucketmapjoin_tmp_result -POSTHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ] +POSTHOOK: Input: default@bucketmapjoin_tmp_result_n9 +POSTHOOK: Output: default@bucketmapjoin_hash_result_1_n7 +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n7.key EXPRESSION [(bucketmapjoin_tmp_result_n9)bucketmapjoin_tmp_result_n9.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n7.value1 EXPRESSION [(bucketmapjoin_tmp_result_n9)bucketmapjoin_tmp_result_n9.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n7.value2 EXPRESSION [(bucketmapjoin_tmp_result_n9)bucketmapjoin_tmp_result_n9.FieldSchema(name:value2, type:string, comment:null), ] PREHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n9 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n19 a join srcbucket_mapjoin_part_2_n16 b on a.key=b.key and b.ds="2008-04-08" PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n9 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n19 a join srcbucket_mapjoin_part_2_n16 b on a.key=b.key and b.ds="2008-04-08" POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -587,13 +587,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_n19 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n19 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -611,16 +611,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_n19 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n19 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part - name: default.srcbucket_mapjoin_part + name: default.srcbucket_mapjoin_part_n19 + name: default.srcbucket_mapjoin_part_n19 Alias -> Map Local Operator Tree: $hdt$_0:a TableScan @@ -688,17 +688,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n9 numFiles 1 numRows 1028 rawDataSize 19022 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n9 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20050 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n9 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -749,13 +749,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_2_n16 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n16 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -773,16 +773,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_2_n16 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n16 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part - name: default.srcbucket_mapjoin_part + name: default.srcbucket_mapjoin_part_2_n16 + name: default.srcbucket_mapjoin_part_2_n16 #### A masked pattern was here #### Partition base file name: ds=2008-04-08 @@ -798,13 +798,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_n19 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n19 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -822,18 +822,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_n19 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n19 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_n19 + name: default.srcbucket_mapjoin_part_n19 Truncated Path -> Alias: - /srcbucket_mapjoin_part_2/ds=2008-04-08 [$hdt$_1:b] + /srcbucket_mapjoin_part_2_n16/ds=2008-04-08 [$hdt$_1:b] Stage: Stage-0 Move Operator @@ -852,17 +852,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n9 numFiles 1 numRows 1028 rawDataSize 19022 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n9 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20050 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n9 Stage: Stage-2 Stats Work @@ -871,7 +871,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value1, value2 Column Types: string, string, string - Table: default.bucketmapjoin_tmp_result + Table: default.bucketmapjoin_tmp_result_n9 Is Table Level Stats: true Stage: Stage-3 @@ -943,48 +943,48 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result +PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result_n9 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n19 a join srcbucket_mapjoin_part_2_n16 b on a.key=b.key and b.ds="2008-04-08" PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n16 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n16@ds=2008-04-08 +PREHOOK: Input: default@srcbucket_mapjoin_part_n19 +PREHOOK: Input: default@srcbucket_mapjoin_part_n19@ds=2008-04-08 +PREHOOK: Output: default@bucketmapjoin_tmp_result_n9 +POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result_n9 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n19 a join srcbucket_mapjoin_part_2_n16 b on a.key=b.key and b.ds="2008-04-08" POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -POSTHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION [(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select count(1) from bucketmapjoin_tmp_result +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n16 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n16@ds=2008-04-08 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n19 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n19@ds=2008-04-08 +POSTHOOK: Output: default@bucketmapjoin_tmp_result_n9 +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n9.key EXPRESSION [(srcbucket_mapjoin_part_n19)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n9.value1 SIMPLE [(srcbucket_mapjoin_part_n19)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n9.value2 SIMPLE [(srcbucket_mapjoin_part_2_n16)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select count(1) from bucketmapjoin_tmp_result_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@bucketmapjoin_tmp_result +PREHOOK: Input: default@bucketmapjoin_tmp_result_n9 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result +POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucketmapjoin_tmp_result +POSTHOOK: Input: default@bucketmapjoin_tmp_result_n9 #### A masked pattern was here #### 1028 -PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result +PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1_n7 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@bucketmapjoin_tmp_result -PREHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result +PREHOOK: Input: default@bucketmapjoin_tmp_result_n9 +PREHOOK: Output: default@bucketmapjoin_hash_result_1_n7 +POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1_n7 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucketmapjoin_tmp_result -POSTHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ] +POSTHOOK: Input: default@bucketmapjoin_tmp_result_n9 +POSTHOOK: Output: default@bucketmapjoin_hash_result_1_n7 +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n7.key EXPRESSION [(bucketmapjoin_tmp_result_n9)bucketmapjoin_tmp_result_n9.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n7.value1 EXPRESSION [(bucketmapjoin_tmp_result_n9)bucketmapjoin_tmp_result_n9.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n7.value2 EXPRESSION [(bucketmapjoin_tmp_result_n9)bucketmapjoin_tmp_result_n9.FieldSchema(name:value2, type:string, comment:null), ] diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out b/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out index 2084c90f6a..eed33650c8 100644 --- a/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out +++ b/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out @@ -1,103 +1,103 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n12 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_part_n12 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n12 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n12 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n12 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n12 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n12 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n12 +POSTHOOK: Output: default@srcbucket_mapjoin_part_n12@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n12 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n12@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n12 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n12@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n12 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n12@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n12 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n12@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n12 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n12@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n12 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@srcbucket_mapjoin_part_n12@ds=2008-04-08 +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n10 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n10 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n10 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n10 +PREHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n10 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n10 +POSTHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n10 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n10 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n10@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n10 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n10@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n10 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint) +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n10@ds=2008-04-08 +PREHOOK: query: create table bucketmapjoin_hash_result_1_n3 (key bigint , value1 bigint, value2 bigint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint) +PREHOOK: Output: default@bucketmapjoin_hash_result_1_n3 +POSTHOOK: query: create table bucketmapjoin_hash_result_1_n3 (key bigint , value1 bigint, value2 bigint) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucketmapjoin_hash_result_1 -PREHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint) +POSTHOOK: Output: default@bucketmapjoin_hash_result_1_n3 +PREHOOK: query: create table bucketmapjoin_hash_result_2_n3 (key bigint , value1 bigint, value2 bigint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucketmapjoin_hash_result_2 -POSTHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint) +PREHOOK: Output: default@bucketmapjoin_hash_result_2_n3 +POSTHOOK: query: create table bucketmapjoin_hash_result_2_n3 (key bigint , value1 bigint, value2 bigint) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucketmapjoin_hash_result_2 -PREHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string) +POSTHOOK: Output: default@bucketmapjoin_hash_result_2_n3 +PREHOOK: query: create table bucketmapjoin_tmp_result_n5 (key string , value1 string, value2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string) +PREHOOK: Output: default@bucketmapjoin_tmp_result_n5 +POSTHOOK: query: create table bucketmapjoin_tmp_result_n5 (key string , value1 string, value2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucketmapjoin_tmp_result +POSTHOOK: Output: default@bucketmapjoin_tmp_result_n5 PREHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n5 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n12 a join srcbucket_mapjoin_part_2_n10 b on a.key=b.key and b.ds="2008-04-08" PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n5 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n12 a join srcbucket_mapjoin_part_2_n10 b on a.key=b.key and b.ds="2008-04-08" POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -129,13 +129,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n10 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n10 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 3062 @@ -153,16 +153,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n10 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n10 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n10 + name: default.srcbucket_mapjoin_part_2_n10 Alias -> Map Local Operator Tree: $hdt$_1:b TableScan @@ -230,17 +230,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n5 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n5 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n5 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -284,30 +284,30 @@ STAGE PLANS: partition values: ds 2008-04-08 properties: - bucket_count 4 + bucket_count 2 bucket_field_name key column.name.delimiter , columns key,value columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part - numFiles 4 + name default.srcbucket_mapjoin_part_2_n10 + numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n10 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 + totalSize 3062 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - bucket_count 4 + bucket_count 2 bucket_field_name key bucketing_version 2 column.name.delimiter , @@ -315,16 +315,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_2_n10 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n10 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part - name: default.srcbucket_mapjoin_part + name: default.srcbucket_mapjoin_part_2_n10 + name: default.srcbucket_mapjoin_part_2_n10 #### A masked pattern was here #### Partition base file name: ds=2008-04-08 @@ -333,30 +333,30 @@ STAGE PLANS: partition values: ds 2008-04-08 properties: - bucket_count 2 + bucket_count 4 bucket_field_name key column.name.delimiter , columns key,value columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 - numFiles 2 + name default.srcbucket_mapjoin_part_n12 + numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n12 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 3062 + totalSize 5812 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - bucket_count 2 + bucket_count 4 bucket_field_name key bucketing_version 2 column.name.delimiter , @@ -364,18 +364,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_n12 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n12 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_n12 + name: default.srcbucket_mapjoin_part_n12 Truncated Path -> Alias: - /srcbucket_mapjoin_part/ds=2008-04-08 [$hdt$_0:a] + /srcbucket_mapjoin_part_n12/ds=2008-04-08 [$hdt$_0:a] Stage: Stage-0 Move Operator @@ -394,17 +394,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n5 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n5 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n5 Stage: Stage-2 Stats Work @@ -413,7 +413,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value1, value2 Column Types: string, string, string - Table: default.bucketmapjoin_tmp_result + Table: default.bucketmapjoin_tmp_result_n5 Is Table Level Stats: true Stage: Stage-3 @@ -485,61 +485,61 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result +PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result_n5 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n12 a join srcbucket_mapjoin_part_2_n10 b on a.key=b.key and b.ds="2008-04-08" PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n10 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n10@ds=2008-04-08 +PREHOOK: Input: default@srcbucket_mapjoin_part_n12 +PREHOOK: Input: default@srcbucket_mapjoin_part_n12@ds=2008-04-08 +PREHOOK: Output: default@bucketmapjoin_tmp_result_n5 +POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result_n5 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n12 a join srcbucket_mapjoin_part_2_n10 b on a.key=b.key and b.ds="2008-04-08" POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -POSTHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION [(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select count(1) from bucketmapjoin_tmp_result +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n10 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n10@ds=2008-04-08 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n12 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n12@ds=2008-04-08 +POSTHOOK: Output: default@bucketmapjoin_tmp_result_n5 +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n5.key EXPRESSION [(srcbucket_mapjoin_part_n12)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n5.value1 SIMPLE [(srcbucket_mapjoin_part_n12)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n5.value2 SIMPLE [(srcbucket_mapjoin_part_2_n10)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select count(1) from bucketmapjoin_tmp_result_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@bucketmapjoin_tmp_result +PREHOOK: Input: default@bucketmapjoin_tmp_result_n5 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result +POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucketmapjoin_tmp_result +POSTHOOK: Input: default@bucketmapjoin_tmp_result_n5 #### A masked pattern was here #### 564 -PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result +PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1_n3 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@bucketmapjoin_tmp_result -PREHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result +PREHOOK: Input: default@bucketmapjoin_tmp_result_n5 +PREHOOK: Output: default@bucketmapjoin_hash_result_1_n3 +POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1_n3 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucketmapjoin_tmp_result -POSTHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ] +POSTHOOK: Input: default@bucketmapjoin_tmp_result_n5 +POSTHOOK: Output: default@bucketmapjoin_hash_result_1_n3 +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n3.key EXPRESSION [(bucketmapjoin_tmp_result_n5)bucketmapjoin_tmp_result_n5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n3.value1 EXPRESSION [(bucketmapjoin_tmp_result_n5)bucketmapjoin_tmp_result_n5.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n3.value2 EXPRESSION [(bucketmapjoin_tmp_result_n5)bucketmapjoin_tmp_result_n5.FieldSchema(name:value2, type:string, comment:null), ] PREHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n5 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n12 a join srcbucket_mapjoin_part_2_n10 b on a.key=b.key and b.ds="2008-04-08" PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n5 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n12 a join srcbucket_mapjoin_part_2_n10 b on a.key=b.key and b.ds="2008-04-08" POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -571,13 +571,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n10 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n10 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 3062 @@ -595,16 +595,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n10 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n10 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n10 + name: default.srcbucket_mapjoin_part_2_n10 Alias -> Map Local Operator Tree: $hdt$_1:b TableScan @@ -672,17 +672,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n5 numFiles 1 numRows 564 rawDataSize 10503 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n5 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 11067 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n5 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -726,30 +726,30 @@ STAGE PLANS: partition values: ds 2008-04-08 properties: - bucket_count 4 + bucket_count 2 bucket_field_name key column.name.delimiter , columns key,value columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part - numFiles 4 + name default.srcbucket_mapjoin_part_2_n10 + numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n10 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 + totalSize 3062 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - bucket_count 4 + bucket_count 2 bucket_field_name key bucketing_version 2 column.name.delimiter , @@ -757,16 +757,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_2_n10 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n10 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part - name: default.srcbucket_mapjoin_part + name: default.srcbucket_mapjoin_part_2_n10 + name: default.srcbucket_mapjoin_part_2_n10 #### A masked pattern was here #### Partition base file name: ds=2008-04-08 @@ -775,30 +775,30 @@ STAGE PLANS: partition values: ds 2008-04-08 properties: - bucket_count 2 + bucket_count 4 bucket_field_name key column.name.delimiter , columns key,value columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 - numFiles 2 + name default.srcbucket_mapjoin_part_n12 + numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n12 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 3062 + totalSize 5812 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - bucket_count 2 + bucket_count 4 bucket_field_name key bucketing_version 2 column.name.delimiter , @@ -806,18 +806,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_n12 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n12 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_n12 + name: default.srcbucket_mapjoin_part_n12 Truncated Path -> Alias: - /srcbucket_mapjoin_part/ds=2008-04-08 [$hdt$_0:a] + /srcbucket_mapjoin_part_n12/ds=2008-04-08 [$hdt$_0:a] Stage: Stage-0 Move Operator @@ -836,17 +836,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n5 numFiles 1 numRows 564 rawDataSize 10503 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n5 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 11067 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n5 Stage: Stage-2 Stats Work @@ -855,7 +855,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value1, value2 Column Types: string, string, string - Table: default.bucketmapjoin_tmp_result + Table: default.bucketmapjoin_tmp_result_n5 Is Table Level Stats: true Stage: Stage-3 @@ -927,48 +927,48 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result +PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result_n5 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n12 a join srcbucket_mapjoin_part_2_n10 b on a.key=b.key and b.ds="2008-04-08" PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n10 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n10@ds=2008-04-08 +PREHOOK: Input: default@srcbucket_mapjoin_part_n12 +PREHOOK: Input: default@srcbucket_mapjoin_part_n12@ds=2008-04-08 +PREHOOK: Output: default@bucketmapjoin_tmp_result_n5 +POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result_n5 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n12 a join srcbucket_mapjoin_part_2_n10 b on a.key=b.key and b.ds="2008-04-08" POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -POSTHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION [(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select count(1) from bucketmapjoin_tmp_result +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n10 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n10@ds=2008-04-08 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n12 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n12@ds=2008-04-08 +POSTHOOK: Output: default@bucketmapjoin_tmp_result_n5 +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n5.key EXPRESSION [(srcbucket_mapjoin_part_n12)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n5.value1 SIMPLE [(srcbucket_mapjoin_part_n12)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n5.value2 SIMPLE [(srcbucket_mapjoin_part_2_n10)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select count(1) from bucketmapjoin_tmp_result_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@bucketmapjoin_tmp_result +PREHOOK: Input: default@bucketmapjoin_tmp_result_n5 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result +POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucketmapjoin_tmp_result +POSTHOOK: Input: default@bucketmapjoin_tmp_result_n5 #### A masked pattern was here #### 564 -PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result +PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1_n3 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@bucketmapjoin_tmp_result -PREHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result +PREHOOK: Input: default@bucketmapjoin_tmp_result_n5 +PREHOOK: Output: default@bucketmapjoin_hash_result_1_n3 +POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1_n3 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucketmapjoin_tmp_result -POSTHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ] +POSTHOOK: Input: default@bucketmapjoin_tmp_result_n5 +POSTHOOK: Output: default@bucketmapjoin_hash_result_1_n3 +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n3.key EXPRESSION [(bucketmapjoin_tmp_result_n5)bucketmapjoin_tmp_result_n5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n3.value1 EXPRESSION [(bucketmapjoin_tmp_result_n5)bucketmapjoin_tmp_result_n5.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n3.value2 EXPRESSION [(bucketmapjoin_tmp_result_n5)bucketmapjoin_tmp_result_n5.FieldSchema(name:value2, type:string, comment:null), ] diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out b/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out index 9f601c9e67..4841f6de4a 100644 --- a/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out +++ b/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out @@ -1,103 +1,103 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n4 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_part_n4 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n4 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part -PREHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n4 +PREHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_n4 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n4 +POSTHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_n4 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n4 +POSTHOOK: Output: default@srcbucket_mapjoin_part_n4@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_n4 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n4@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_n4 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@srcbucket_mapjoin_part_n4@ds=2008-04-08 +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n3 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n3 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n3 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n3 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n3 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n3 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n3 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n3 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n3@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n3 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n3@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n3 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n3@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n3 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n3@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n3 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n3@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2_n3 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n3@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2_n3 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint) +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n3@ds=2008-04-08 +PREHOOK: query: create table bucketmapjoin_hash_result_1_n1 (key bigint , value1 bigint, value2 bigint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint) +PREHOOK: Output: default@bucketmapjoin_hash_result_1_n1 +POSTHOOK: query: create table bucketmapjoin_hash_result_1_n1 (key bigint , value1 bigint, value2 bigint) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucketmapjoin_hash_result_1 -PREHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint) +POSTHOOK: Output: default@bucketmapjoin_hash_result_1_n1 +PREHOOK: query: create table bucketmapjoin_hash_result_2_n1 (key bigint , value1 bigint, value2 bigint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucketmapjoin_hash_result_2 -POSTHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint) +PREHOOK: Output: default@bucketmapjoin_hash_result_2_n1 +POSTHOOK: query: create table bucketmapjoin_hash_result_2_n1 (key bigint , value1 bigint, value2 bigint) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucketmapjoin_hash_result_2 -PREHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string) +POSTHOOK: Output: default@bucketmapjoin_hash_result_2_n1 +PREHOOK: query: create table bucketmapjoin_tmp_result_n1 (key string , value1 string, value2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string) +PREHOOK: Output: default@bucketmapjoin_tmp_result_n1 +POSTHOOK: query: create table bucketmapjoin_tmp_result_n1 (key string , value1 string, value2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucketmapjoin_tmp_result +POSTHOOK: Output: default@bucketmapjoin_tmp_result_n1 PREHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n1 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n4 a join srcbucket_mapjoin_part_2_n3 b on a.key=b.key and b.ds="2008-04-08" PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n1 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n4 a join srcbucket_mapjoin_part_2_n3 b on a.key=b.key and b.ds="2008-04-08" POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -129,13 +129,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_n4 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 3062 @@ -153,16 +153,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_n4 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part - name: default.srcbucket_mapjoin_part + name: default.srcbucket_mapjoin_part_n4 + name: default.srcbucket_mapjoin_part_n4 Alias -> Map Local Operator Tree: $hdt$_0:a TableScan @@ -230,17 +230,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n1 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n1 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -284,30 +284,30 @@ STAGE PLANS: partition values: ds 2008-04-08 properties: - bucket_count 2 + bucket_count 4 bucket_field_name key column.name.delimiter , columns key,value columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part - numFiles 2 + name default.srcbucket_mapjoin_part_2_n3 + numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n3 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 3062 + totalSize 5812 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - bucket_count 2 + bucket_count 4 bucket_field_name key bucketing_version 2 column.name.delimiter , @@ -315,16 +315,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_2_n3 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n3 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part - name: default.srcbucket_mapjoin_part + name: default.srcbucket_mapjoin_part_2_n3 + name: default.srcbucket_mapjoin_part_2_n3 #### A masked pattern was here #### Partition base file name: ds=2008-04-08 @@ -333,30 +333,30 @@ STAGE PLANS: partition values: ds 2008-04-08 properties: - bucket_count 4 + bucket_count 2 bucket_field_name key column.name.delimiter , columns key,value columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 - numFiles 4 + name default.srcbucket_mapjoin_part_n4 + numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 + totalSize 3062 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - bucket_count 4 + bucket_count 2 bucket_field_name key bucketing_version 2 column.name.delimiter , @@ -364,18 +364,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_n4 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_n4 + name: default.srcbucket_mapjoin_part_n4 Truncated Path -> Alias: - /srcbucket_mapjoin_part_2/ds=2008-04-08 [$hdt$_1:b] + /srcbucket_mapjoin_part_2_n3/ds=2008-04-08 [$hdt$_1:b] Stage: Stage-0 Move Operator @@ -394,17 +394,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n1 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n1 Stage: Stage-2 Stats Work @@ -413,7 +413,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value1, value2 Column Types: string, string, string - Table: default.bucketmapjoin_tmp_result + Table: default.bucketmapjoin_tmp_result_n1 Is Table Level Stats: true Stage: Stage-3 @@ -485,61 +485,61 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result +PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result_n1 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n4 a join srcbucket_mapjoin_part_2_n3 b on a.key=b.key and b.ds="2008-04-08" PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n3 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n3@ds=2008-04-08 +PREHOOK: Input: default@srcbucket_mapjoin_part_n4 +PREHOOK: Input: default@srcbucket_mapjoin_part_n4@ds=2008-04-08 +PREHOOK: Output: default@bucketmapjoin_tmp_result_n1 +POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result_n1 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n4 a join srcbucket_mapjoin_part_2_n3 b on a.key=b.key and b.ds="2008-04-08" POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -POSTHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION [(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select count(1) from bucketmapjoin_tmp_result +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n3 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n3@ds=2008-04-08 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n4 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n4@ds=2008-04-08 +POSTHOOK: Output: default@bucketmapjoin_tmp_result_n1 +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n1.key EXPRESSION [(srcbucket_mapjoin_part_n4)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n1.value1 SIMPLE [(srcbucket_mapjoin_part_n4)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n1.value2 SIMPLE [(srcbucket_mapjoin_part_2_n3)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select count(1) from bucketmapjoin_tmp_result_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@bucketmapjoin_tmp_result +PREHOOK: Input: default@bucketmapjoin_tmp_result_n1 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result +POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucketmapjoin_tmp_result +POSTHOOK: Input: default@bucketmapjoin_tmp_result_n1 #### A masked pattern was here #### 564 -PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result +PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1_n1 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@bucketmapjoin_tmp_result -PREHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result +PREHOOK: Input: default@bucketmapjoin_tmp_result_n1 +PREHOOK: Output: default@bucketmapjoin_hash_result_1_n1 +POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1_n1 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucketmapjoin_tmp_result -POSTHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ] +POSTHOOK: Input: default@bucketmapjoin_tmp_result_n1 +POSTHOOK: Output: default@bucketmapjoin_hash_result_1_n1 +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n1.key EXPRESSION [(bucketmapjoin_tmp_result_n1)bucketmapjoin_tmp_result_n1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n1.value1 EXPRESSION [(bucketmapjoin_tmp_result_n1)bucketmapjoin_tmp_result_n1.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n1.value2 EXPRESSION [(bucketmapjoin_tmp_result_n1)bucketmapjoin_tmp_result_n1.FieldSchema(name:value2, type:string, comment:null), ] PREHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n1 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n4 a join srcbucket_mapjoin_part_2_n3 b on a.key=b.key and b.ds="2008-04-08" PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n1 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n4 a join srcbucket_mapjoin_part_2_n3 b on a.key=b.key and b.ds="2008-04-08" POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -571,13 +571,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_n4 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 3062 @@ -595,16 +595,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_n4 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part - name: default.srcbucket_mapjoin_part + name: default.srcbucket_mapjoin_part_n4 + name: default.srcbucket_mapjoin_part_n4 Alias -> Map Local Operator Tree: $hdt$_0:a TableScan @@ -672,17 +672,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n1 numFiles 1 numRows 564 rawDataSize 10503 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n1 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 11067 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n1 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -726,30 +726,30 @@ STAGE PLANS: partition values: ds 2008-04-08 properties: - bucket_count 2 + bucket_count 4 bucket_field_name key column.name.delimiter , columns key,value columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part - numFiles 2 + name default.srcbucket_mapjoin_part_2_n3 + numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n3 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 3062 + totalSize 5812 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - bucket_count 2 + bucket_count 4 bucket_field_name key bucketing_version 2 column.name.delimiter , @@ -757,16 +757,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_2_n3 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n3 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part - name: default.srcbucket_mapjoin_part + name: default.srcbucket_mapjoin_part_2_n3 + name: default.srcbucket_mapjoin_part_2_n3 #### A masked pattern was here #### Partition base file name: ds=2008-04-08 @@ -775,30 +775,30 @@ STAGE PLANS: partition values: ds 2008-04-08 properties: - bucket_count 4 + bucket_count 2 bucket_field_name key column.name.delimiter , columns key,value columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 - numFiles 4 + name default.srcbucket_mapjoin_part_n4 + numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 + totalSize 3062 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - bucket_count 4 + bucket_count 2 bucket_field_name key bucketing_version 2 column.name.delimiter , @@ -806,18 +806,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_n4 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_n4 + name: default.srcbucket_mapjoin_part_n4 Truncated Path -> Alias: - /srcbucket_mapjoin_part_2/ds=2008-04-08 [$hdt$_1:b] + /srcbucket_mapjoin_part_2_n3/ds=2008-04-08 [$hdt$_1:b] Stage: Stage-0 Move Operator @@ -836,17 +836,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n1 numFiles 1 numRows 564 rawDataSize 10503 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n1 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 11067 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n1 Stage: Stage-2 Stats Work @@ -855,7 +855,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value1, value2 Column Types: string, string, string - Table: default.bucketmapjoin_tmp_result + Table: default.bucketmapjoin_tmp_result_n1 Is Table Level Stats: true Stage: Stage-3 @@ -927,48 +927,48 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result +PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result_n1 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n4 a join srcbucket_mapjoin_part_2_n3 b on a.key=b.key and b.ds="2008-04-08" PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n3 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n3@ds=2008-04-08 +PREHOOK: Input: default@srcbucket_mapjoin_part_n4 +PREHOOK: Input: default@srcbucket_mapjoin_part_n4@ds=2008-04-08 +PREHOOK: Output: default@bucketmapjoin_tmp_result_n1 +POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result_n1 select a.key, a.value, b.value -from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_part_n4 a join srcbucket_mapjoin_part_2_n3 b on a.key=b.key and b.ds="2008-04-08" POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -POSTHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION [(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select count(1) from bucketmapjoin_tmp_result +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n3 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n3@ds=2008-04-08 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n4 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n4@ds=2008-04-08 +POSTHOOK: Output: default@bucketmapjoin_tmp_result_n1 +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n1.key EXPRESSION [(srcbucket_mapjoin_part_n4)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n1.value1 SIMPLE [(srcbucket_mapjoin_part_n4)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result_n1.value2 SIMPLE [(srcbucket_mapjoin_part_2_n3)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select count(1) from bucketmapjoin_tmp_result_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@bucketmapjoin_tmp_result +PREHOOK: Input: default@bucketmapjoin_tmp_result_n1 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result +POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucketmapjoin_tmp_result +POSTHOOK: Input: default@bucketmapjoin_tmp_result_n1 #### A masked pattern was here #### 564 -PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result +PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1_n1 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@bucketmapjoin_tmp_result -PREHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1 -select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result +PREHOOK: Input: default@bucketmapjoin_tmp_result_n1 +PREHOOK: Output: default@bucketmapjoin_hash_result_1_n1 +POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1_n1 +select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucketmapjoin_tmp_result -POSTHOOK: Output: default@bucketmapjoin_hash_result_1 -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ] +POSTHOOK: Input: default@bucketmapjoin_tmp_result_n1 +POSTHOOK: Output: default@bucketmapjoin_hash_result_1_n1 +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n1.key EXPRESSION [(bucketmapjoin_tmp_result_n1)bucketmapjoin_tmp_result_n1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n1.value1 EXPRESSION [(bucketmapjoin_tmp_result_n1)bucketmapjoin_tmp_result_n1.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_hash_result_1_n1.value2 EXPRESSION [(bucketmapjoin_tmp_result_n1)bucketmapjoin_tmp_result_n1.FieldSchema(name:value2, type:string, comment:null), ] diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark4.q.out b/ql/src/test/results/clientpositive/bucket_map_join_spark4.q.out index 01722649ee..96a0593bf2 100644 --- a/ql/src/test/results/clientpositive/bucket_map_join_spark4.q.out +++ b/ql/src/test/results/clientpositive/bucket_map_join_spark4.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE tbl1_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tbl1 -POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: Output: default@tbl1_n0 +POSTHOOK: query: CREATE TABLE tbl1_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tbl1 -PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: Output: default@tbl1_n0 +PREHOOK: query: CREATE TABLE tbl2_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tbl2 -POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: Output: default@tbl2_n0 +POSTHOOK: query: CREATE TABLE tbl2_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tbl2 +POSTHOOK: Output: default@tbl2_n0 PREHOOK: query: CREATE TABLE tbl3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -22,30 +22,30 @@ POSTHOOK: query: CREATE TABLE tbl3(key int, value string) CLUSTERED BY (key) SOR POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl3 -PREHOOK: query: insert overwrite table tbl1 +PREHOOK: query: insert overwrite table tbl1_n0 select * from src where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tbl1 -POSTHOOK: query: insert overwrite table tbl1 +PREHOOK: Output: default@tbl1_n0 +POSTHOOK: query: insert overwrite table tbl1_n0 select * from src where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tbl1 -POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table tbl2 +POSTHOOK: Output: default@tbl1_n0 +POSTHOOK: Lineage: tbl1_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tbl2_n0 select * from src where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tbl2 -POSTHOOK: query: insert overwrite table tbl2 +PREHOOK: Output: default@tbl2_n0 +POSTHOOK: query: insert overwrite table tbl2_n0 select * from src where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tbl2 -POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@tbl2_n0 +POSTHOOK: Lineage: tbl2_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: insert overwrite table tbl3 select * from src where key < 10 PREHOOK: type: QUERY @@ -60,11 +60,11 @@ POSTHOOK: Lineage: tbl3.key EXPRESSION [(src)src.FieldSchema(name:key, type:stri POSTHOOK: Lineage: tbl3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain extended select a.key as key, a.value as val1, b.value as val2, c.value as val3 -from tbl1 a join tbl2 b on a.key = b.key join tbl3 c on a.value = c.value +from tbl1_n0 a join tbl2_n0 b on a.key = b.key join tbl3 c on a.value = c.value PREHOOK: type: QUERY POSTHOOK: query: explain extended select a.key as key, a.value as val1, b.value as val2, c.value as val3 -from tbl1 a join tbl2 b on a.key = b.key join tbl3 c on a.value = c.value +from tbl1_n0 a join tbl2_n0 b on a.key = b.key join tbl3 c on a.value = c.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-7 is a root stage @@ -185,7 +185,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: tbl1 + base file name: tbl1_n0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -199,11 +199,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.tbl1 + name default.tbl1_n0 numFiles 2 numRows 10 rawDataSize 70 - serialization.ddl struct tbl1 { i32 key, string value} + serialization.ddl struct tbl1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 80 @@ -223,21 +223,21 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.tbl1 + name default.tbl1_n0 numFiles 2 numRows 10 rawDataSize 70 - serialization.ddl struct tbl1 { i32 key, string value} + serialization.ddl struct tbl1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 80 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tbl1 - name: default.tbl1 + name: default.tbl1_n0 + name: default.tbl1_n0 #### A masked pattern was here #### Partition - base file name: tbl2 + base file name: tbl2_n0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -251,11 +251,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.tbl2 + name default.tbl2_n0 numFiles 2 numRows 10 rawDataSize 70 - serialization.ddl struct tbl2 { i32 key, string value} + serialization.ddl struct tbl2_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 80 @@ -275,18 +275,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.tbl2 + name default.tbl2_n0 numFiles 2 numRows 10 rawDataSize 70 - serialization.ddl struct tbl2 { i32 key, string value} + serialization.ddl struct tbl2_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 80 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tbl2 - name: default.tbl2 + name: default.tbl2_n0 + name: default.tbl2_n0 #### A masked pattern was here #### Partition base file name: tbl3 @@ -340,7 +340,7 @@ STAGE PLANS: name: default.tbl3 name: default.tbl3 Truncated Path -> Alias: - /tbl2 [$hdt$_1:b] + /tbl2_n0 [$hdt$_1:b] Stage: Stage-0 Fetch Operator @@ -349,17 +349,17 @@ STAGE PLANS: ListSink PREHOOK: query: select a.key as key, a.value as val1, b.value as val2, c.value as val3 -from tbl1 a join tbl2 b on a.key = b.key join tbl3 c on a.value = c.value +from tbl1_n0 a join tbl2_n0 b on a.key = b.key join tbl3 c on a.value = c.value PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n0 +PREHOOK: Input: default@tbl2_n0 PREHOOK: Input: default@tbl3 #### A masked pattern was here #### POSTHOOK: query: select a.key as key, a.value as val1, b.value as val2, c.value as val3 -from tbl1 a join tbl2 b on a.key = b.key join tbl3 c on a.value = c.value +from tbl1_n0 a join tbl2_n0 b on a.key = b.key join tbl3 c on a.value = c.value POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n0 +POSTHOOK: Input: default@tbl2_n0 POSTHOOK: Input: default@tbl3 #### A masked pattern was here #### 0 val_0 val_0 val_0 @@ -422,11 +422,11 @@ POSTHOOK: Input: default@tbl3 9 val_9 val_9 val_9 PREHOOK: query: explain extended select a.key as key, a.value as val1, b.value as val2, c.value as val3 -from tbl1 a join tbl2 b on a.key = b.key join tbl3 c on a.value = c.value +from tbl1_n0 a join tbl2_n0 b on a.key = b.key join tbl3 c on a.value = c.value PREHOOK: type: QUERY POSTHOOK: query: explain extended select a.key as key, a.value as val1, b.value as val2, c.value as val3 -from tbl1 a join tbl2 b on a.key = b.key join tbl3 c on a.value = c.value +from tbl1_n0 a join tbl2_n0 b on a.key = b.key join tbl3 c on a.value = c.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-7 is a root stage @@ -547,7 +547,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: tbl1 + base file name: tbl1_n0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -561,11 +561,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.tbl1 + name default.tbl1_n0 numFiles 2 numRows 10 rawDataSize 70 - serialization.ddl struct tbl1 { i32 key, string value} + serialization.ddl struct tbl1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 80 @@ -585,21 +585,21 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.tbl1 + name default.tbl1_n0 numFiles 2 numRows 10 rawDataSize 70 - serialization.ddl struct tbl1 { i32 key, string value} + serialization.ddl struct tbl1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 80 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tbl1 - name: default.tbl1 + name: default.tbl1_n0 + name: default.tbl1_n0 #### A masked pattern was here #### Partition - base file name: tbl2 + base file name: tbl2_n0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -613,11 +613,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.tbl2 + name default.tbl2_n0 numFiles 2 numRows 10 rawDataSize 70 - serialization.ddl struct tbl2 { i32 key, string value} + serialization.ddl struct tbl2_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 80 @@ -637,18 +637,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.tbl2 + name default.tbl2_n0 numFiles 2 numRows 10 rawDataSize 70 - serialization.ddl struct tbl2 { i32 key, string value} + serialization.ddl struct tbl2_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 80 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tbl2 - name: default.tbl2 + name: default.tbl2_n0 + name: default.tbl2_n0 #### A masked pattern was here #### Partition base file name: tbl3 @@ -702,7 +702,7 @@ STAGE PLANS: name: default.tbl3 name: default.tbl3 Truncated Path -> Alias: - /tbl2 [$hdt$_1:b] + /tbl2_n0 [$hdt$_1:b] Stage: Stage-0 Fetch Operator @@ -711,17 +711,17 @@ STAGE PLANS: ListSink PREHOOK: query: select a.key as key, a.value as val1, b.value as val2, c.value as val3 -from tbl1 a join tbl2 b on a.key = b.key join tbl3 c on a.value = c.value +from tbl1_n0 a join tbl2_n0 b on a.key = b.key join tbl3 c on a.value = c.value PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n0 +PREHOOK: Input: default@tbl2_n0 PREHOOK: Input: default@tbl3 #### A masked pattern was here #### POSTHOOK: query: select a.key as key, a.value as val1, b.value as val2, c.value as val3 -from tbl1 a join tbl2 b on a.key = b.key join tbl3 c on a.value = c.value +from tbl1_n0 a join tbl2_n0 b on a.key = b.key join tbl3 c on a.value = c.value POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n0 +POSTHOOK: Input: default@tbl2_n0 POSTHOOK: Input: default@tbl3 #### A masked pattern was here #### 0 val_0 val_0 val_0 diff --git a/ql/src/test/results/clientpositive/bucketcontext_1.q.out b/ql/src/test/results/clientpositive/bucketcontext_1.q.out index adc2d765c9..6060fe7bff 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_1.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_1.q.out @@ -1,105 +1,105 @@ -PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small_n14 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_small -POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_small_n14 +POSTHOOK: query: CREATE TABLE bucket_small_n14 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_small -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n14 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n14 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n14 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n14 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n14 +POSTHOOK: Output: default@bucket_small_n14@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n14 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n14@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n14 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@bucket_small_n14@ds=2008-04-08 +PREHOOK: query: CREATE TABLE bucket_big_n14 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_big -POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_big_n14 +POSTHOOK: query: CREATE TABLE bucket_big_n14 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_big -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n14 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n14 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n14 +POSTHOOK: Output: default@bucket_big_n14@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n14@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n14@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n14@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n14@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n14@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n14@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n14 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n14 +POSTHOOK: Output: default@bucket_big_n14@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n14@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n14@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n14@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n14@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n14@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n14 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: Output: default@bucket_big_n14@ds=2008-04-09 +PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n14 a JOIN bucket_big_n14 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n14 a JOIN bucket_big_n14 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -127,13 +127,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n14 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n14 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -152,16 +152,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n14 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n14 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_small - name: default.bucket_small + name: default.bucket_small_n14 + name: default.bucket_small_n14 Alias -> Map Local Operator Tree: a TableScan @@ -238,13 +238,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n14 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n14 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -263,16 +263,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n14 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n14 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n14 + name: default.bucket_big_n14 #### A masked pattern was here #### Partition base file name: ds=2008-04-09 @@ -288,13 +288,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n14 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n14 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -313,19 +313,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n14 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n14 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n14 + name: default.bucket_big_n14 Truncated Path -> Alias: - /bucket_big/ds=2008-04-08 [b] - /bucket_big/ds=2008-04-09 [b] + /bucket_big_n14/ds=2008-04-08 [b] + /bucket_big_n14/ds=2008-04-09 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -362,26 +362,26 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n14 a JOIN bucket_big_n14 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_big@ds=2008-04-09 -PREHOOK: Input: default@bucket_small -PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n14 +PREHOOK: Input: default@bucket_big_n14@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n14@ds=2008-04-09 +PREHOOK: Input: default@bucket_small_n14 +PREHOOK: Input: default@bucket_small_n14@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n14 a JOIN bucket_big_n14 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_big@ds=2008-04-09 -POSTHOOK: Input: default@bucket_small -POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n14 +POSTHOOK: Input: default@bucket_big_n14@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n14@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small_n14 +POSTHOOK: Input: default@bucket_small_n14@ds=2008-04-08 #### A masked pattern was here #### 928 -PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n14 a JOIN bucket_big_n14 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n14 a JOIN bucket_big_n14 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -435,13 +435,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n14 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n14 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -460,16 +460,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n14 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n14 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n14 + name: default.bucket_big_n14 #### A masked pattern was here #### Partition base file name: ds=2008-04-09 @@ -485,13 +485,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n14 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n14 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -510,19 +510,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n14 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n14 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n14 + name: default.bucket_big_n14 Truncated Path -> Alias: - /bucket_big/ds=2008-04-08 [b] - /bucket_big/ds=2008-04-09 [b] + /bucket_big_n14/ds=2008-04-08 [b] + /bucket_big_n14/ds=2008-04-09 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -557,20 +557,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n14 a JOIN bucket_big_n14 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_big@ds=2008-04-09 -PREHOOK: Input: default@bucket_small -PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n14 +PREHOOK: Input: default@bucket_big_n14@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n14@ds=2008-04-09 +PREHOOK: Input: default@bucket_small_n14 +PREHOOK: Input: default@bucket_small_n14@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n14 a JOIN bucket_big_n14 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_big@ds=2008-04-09 -POSTHOOK: Input: default@bucket_small -POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n14 +POSTHOOK: Input: default@bucket_big_n14@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n14@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small_n14 +POSTHOOK: Input: default@bucket_small_n14@ds=2008-04-08 #### A masked pattern was here #### 928 diff --git a/ql/src/test/results/clientpositive/bucketcontext_2.q.out b/ql/src/test/results/clientpositive/bucketcontext_2.q.out index 03ce495825..f99865005e 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_2.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_2.q.out @@ -1,89 +1,89 @@ -PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small_n8 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_small -POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_small_n8 +POSTHOOK: query: CREATE TABLE bucket_small_n8 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_small -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n8 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n8 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n8 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n8 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n8 +POSTHOOK: Output: default@bucket_small_n8@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n8 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n8@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n8 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n8@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n8 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n8@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n8 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n8@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n8 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n8@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n8 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@bucket_small_n8@ds=2008-04-08 +PREHOOK: query: CREATE TABLE bucket_big_n8 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_big -POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_big_n8 +POSTHOOK: query: CREATE TABLE bucket_big_n8 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_big -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n8 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n8 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n8 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n8 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n8 +POSTHOOK: Output: default@bucket_big_n8@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n8 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n8@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n8 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n8@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n8 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n8 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n8 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n8 +POSTHOOK: Output: default@bucket_big_n8@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n8 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n8@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n8 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: Output: default@bucket_big_n8@ds=2008-04-09 +PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n8 a JOIN bucket_big_n8 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n8 a JOIN bucket_big_n8 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -111,13 +111,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n8 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n8 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -136,16 +136,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n8 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n8 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_small - name: default.bucket_small + name: default.bucket_small_n8 + name: default.bucket_small_n8 Alias -> Map Local Operator Tree: a TableScan @@ -222,13 +222,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n8 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n8 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -247,16 +247,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n8 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n8 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n8 + name: default.bucket_big_n8 #### A masked pattern was here #### Partition base file name: ds=2008-04-09 @@ -272,13 +272,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n8 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n8 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -297,19 +297,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n8 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n8 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n8 + name: default.bucket_big_n8 Truncated Path -> Alias: - /bucket_big/ds=2008-04-08 [b] - /bucket_big/ds=2008-04-09 [b] + /bucket_big_n8/ds=2008-04-08 [b] + /bucket_big_n8/ds=2008-04-09 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -346,26 +346,26 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n8 a JOIN bucket_big_n8 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_big@ds=2008-04-09 -PREHOOK: Input: default@bucket_small -PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n8 +PREHOOK: Input: default@bucket_big_n8@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n8@ds=2008-04-09 +PREHOOK: Input: default@bucket_small_n8 +PREHOOK: Input: default@bucket_small_n8@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n8 a JOIN bucket_big_n8 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_big@ds=2008-04-09 -POSTHOOK: Input: default@bucket_small -POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n8 +POSTHOOK: Input: default@bucket_big_n8@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n8@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small_n8 +POSTHOOK: Input: default@bucket_small_n8@ds=2008-04-08 #### A masked pattern was here #### 928 -PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n8 a JOIN bucket_big_n8 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n8 a JOIN bucket_big_n8 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -419,13 +419,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n8 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n8 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -444,16 +444,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n8 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n8 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n8 + name: default.bucket_big_n8 #### A masked pattern was here #### Partition base file name: ds=2008-04-09 @@ -469,13 +469,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n8 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n8 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -494,19 +494,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n8 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n8 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n8 + name: default.bucket_big_n8 Truncated Path -> Alias: - /bucket_big/ds=2008-04-08 [b] - /bucket_big/ds=2008-04-09 [b] + /bucket_big_n8/ds=2008-04-08 [b] + /bucket_big_n8/ds=2008-04-09 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -541,20 +541,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n8 a JOIN bucket_big_n8 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_big@ds=2008-04-09 -PREHOOK: Input: default@bucket_small -PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n8 +PREHOOK: Input: default@bucket_big_n8@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n8@ds=2008-04-09 +PREHOOK: Input: default@bucket_small_n8 +PREHOOK: Input: default@bucket_small_n8@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n8 a JOIN bucket_big_n8 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_big@ds=2008-04-09 -POSTHOOK: Input: default@bucket_small -POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n8 +POSTHOOK: Input: default@bucket_big_n8@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n8@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small_n8 +POSTHOOK: Input: default@bucket_small_n8@ds=2008-04-08 #### A masked pattern was here #### 928 diff --git a/ql/src/test/results/clientpositive/bucketcontext_3.q.out b/ql/src/test/results/clientpositive/bucketcontext_3.q.out index 57e1701443..15b3d142f7 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_3.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_3.q.out @@ -1,89 +1,89 @@ -PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small_n4 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_small -POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_small_n4 +POSTHOOK: query: CREATE TABLE bucket_small_n4 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_small -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n4 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n4 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n4 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n4 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n4 +POSTHOOK: Output: default@bucket_small_n4@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n4 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n4@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n4 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_small_n4@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n4 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_small_n4 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n4 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -POSTHOOK: Output: default@bucket_small@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_small_n4 +POSTHOOK: Output: default@bucket_small_n4@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n4 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_small_n4@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n4 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-09 -PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@bucket_small_n4@ds=2008-04-09 +PREHOOK: query: CREATE TABLE bucket_big_n4 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_big -POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_big_n4 +POSTHOOK: query: CREATE TABLE bucket_big_n4 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_big -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n4 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n4 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n4 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n4 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n4 +POSTHOOK: Output: default@bucket_big_n4@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n4 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n4@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n4 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n4@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n4 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n4@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n4 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n4@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n4 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n4@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n4 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: Output: default@bucket_big_n4@ds=2008-04-08 +PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n4 a JOIN bucket_big_n4 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n4 a JOIN bucket_big_n4 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -111,13 +111,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n4 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n4 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -136,16 +136,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n4 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n4 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_small - name: default.bucket_small + name: default.bucket_small_n4 + name: default.bucket_small_n4 Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -159,13 +159,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n4 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n4 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -184,16 +184,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n4 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n4 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_small - name: default.bucket_small + name: default.bucket_small_n4 + name: default.bucket_small_n4 Alias -> Map Local Operator Tree: a TableScan @@ -270,13 +270,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n4 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n4 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -295,18 +295,18 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n4 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n4 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n4 + name: default.bucket_big_n4 Truncated Path -> Alias: - /bucket_big/ds=2008-04-08 [b] + /bucket_big_n4/ds=2008-04-08 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -343,26 +343,26 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n4 a JOIN bucket_big_n4 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_small -PREHOOK: Input: default@bucket_small@ds=2008-04-08 -PREHOOK: Input: default@bucket_small@ds=2008-04-09 +PREHOOK: Input: default@bucket_big_n4 +PREHOOK: Input: default@bucket_big_n4@ds=2008-04-08 +PREHOOK: Input: default@bucket_small_n4 +PREHOOK: Input: default@bucket_small_n4@ds=2008-04-08 +PREHOOK: Input: default@bucket_small_n4@ds=2008-04-09 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n4 a JOIN bucket_big_n4 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_small -POSTHOOK: Input: default@bucket_small@ds=2008-04-08 -POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +POSTHOOK: Input: default@bucket_big_n4 +POSTHOOK: Input: default@bucket_big_n4@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small_n4 +POSTHOOK: Input: default@bucket_small_n4@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small_n4@ds=2008-04-09 #### A masked pattern was here #### 928 -PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n4 a JOIN bucket_big_n4 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n4 a JOIN bucket_big_n4 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -416,13 +416,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n4 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n4 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -441,18 +441,18 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n4 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n4 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n4 + name: default.bucket_big_n4 Truncated Path -> Alias: - /bucket_big/ds=2008-04-08 [b] + /bucket_big_n4/ds=2008-04-08 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -487,20 +487,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n4 a JOIN bucket_big_n4 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_small -PREHOOK: Input: default@bucket_small@ds=2008-04-08 -PREHOOK: Input: default@bucket_small@ds=2008-04-09 +PREHOOK: Input: default@bucket_big_n4 +PREHOOK: Input: default@bucket_big_n4@ds=2008-04-08 +PREHOOK: Input: default@bucket_small_n4 +PREHOOK: Input: default@bucket_small_n4@ds=2008-04-08 +PREHOOK: Input: default@bucket_small_n4@ds=2008-04-09 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n4 a JOIN bucket_big_n4 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_small -POSTHOOK: Input: default@bucket_small@ds=2008-04-08 -POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +POSTHOOK: Input: default@bucket_big_n4 +POSTHOOK: Input: default@bucket_big_n4@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small_n4 +POSTHOOK: Input: default@bucket_small_n4@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small_n4@ds=2008-04-09 #### A masked pattern was here #### 928 diff --git a/ql/src/test/results/clientpositive/bucketcontext_5.q.out b/ql/src/test/results/clientpositive/bucketcontext_5.q.out index 1cce4e05d6..629c364e4f 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_5.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_5.q.out @@ -1,70 +1,70 @@ -PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small_n13 (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_small -POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_small_n13 +POSTHOOK: query: CREATE TABLE bucket_small_n13 (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_small -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small +POSTHOOK: Output: default@bucket_small_n13 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n13 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small +PREHOOK: Output: default@bucket_small_n13 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n13 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small +POSTHOOK: Output: default@bucket_small_n13 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n13 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small +PREHOOK: Output: default@bucket_small_n13 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n13 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small +POSTHOOK: Output: default@bucket_small_n13 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n13 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small +PREHOOK: Output: default@bucket_small_n13 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n13 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small +POSTHOOK: Output: default@bucket_small_n13 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n13 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small +PREHOOK: Output: default@bucket_small_n13 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n13 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -PREHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@bucket_small_n13 +PREHOOK: query: CREATE TABLE bucket_big_n13 (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_big -POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_big_n13 +POSTHOOK: query: CREATE TABLE bucket_big_n13 (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_big -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big +POSTHOOK: Output: default@bucket_big_n13 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n13 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big +PREHOOK: Output: default@bucket_big_n13 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n13 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big +POSTHOOK: Output: default@bucket_big_n13 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n13 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big +PREHOOK: Output: default@bucket_big_n13 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n13 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: Output: default@bucket_big_n13 +PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n13 a JOIN bucket_big_n13 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n13 a JOIN bucket_big_n13 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -141,7 +141,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: bucket_big + base file name: bucket_big_n13 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -154,11 +154,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n13 numFiles 2 numRows 0 rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n13 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -177,20 +177,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n13 numFiles 2 numRows 0 rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n13 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n13 + name: default.bucket_big_n13 Truncated Path -> Alias: - /bucket_big [b] + /bucket_big_n13 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -227,20 +227,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n13 a JOIN bucket_big_n13 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_big_n13 +PREHOOK: Input: default@bucket_small_n13 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n13 a JOIN bucket_big_n13 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_big_n13 +POSTHOOK: Input: default@bucket_small_n13 #### A masked pattern was here #### 464 -PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n13 a JOIN bucket_big_n13 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n13 a JOIN bucket_big_n13 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -281,7 +281,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: bucket_big + base file name: bucket_big_n13 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -294,11 +294,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n13 numFiles 2 numRows 0 rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n13 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -317,20 +317,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n13 numFiles 2 numRows 0 rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n13 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n13 + name: default.bucket_big_n13 Truncated Path -> Alias: - /bucket_big [b] + /bucket_big_n13 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -365,14 +365,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n13 a JOIN bucket_big_n13 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_big_n13 +PREHOOK: Input: default@bucket_small_n13 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n13 a JOIN bucket_big_n13 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_big_n13 +POSTHOOK: Input: default@bucket_small_n13 #### A masked pattern was here #### 464 diff --git a/ql/src/test/results/clientpositive/bucketcontext_6.q.out b/ql/src/test/results/clientpositive/bucketcontext_6.q.out index 729e9b602d..c6e940cc90 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_6.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_6.q.out @@ -1,88 +1,88 @@ -PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small_n7 (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_small -POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_small_n7 +POSTHOOK: query: CREATE TABLE bucket_small_n7 (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_small -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small +POSTHOOK: Output: default@bucket_small_n7 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n7 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small +PREHOOK: Output: default@bucket_small_n7 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n7 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small +POSTHOOK: Output: default@bucket_small_n7 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n7 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small +PREHOOK: Output: default@bucket_small_n7 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n7 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small +POSTHOOK: Output: default@bucket_small_n7 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n7 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small +PREHOOK: Output: default@bucket_small_n7 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n7 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small +POSTHOOK: Output: default@bucket_small_n7 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n7 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small +PREHOOK: Output: default@bucket_small_n7 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n7 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@bucket_small_n7 +PREHOOK: query: CREATE TABLE bucket_big_n7 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_big -POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_big_n7 +POSTHOOK: query: CREATE TABLE bucket_big_n7 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_big -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n7 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n7 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n7 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n7 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n7 +POSTHOOK: Output: default@bucket_big_n7@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n7 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n7@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n7 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n7@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n7 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n7 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n7 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n7 +POSTHOOK: Output: default@bucket_big_n7@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n7 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n7@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n7 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: Output: default@bucket_big_n7@ds=2008-04-09 +PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n7 a JOIN bucket_big_n7 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n7 a JOIN bucket_big_n7 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -172,13 +172,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n7 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -197,16 +197,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n7 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n7 + name: default.bucket_big_n7 #### A masked pattern was here #### Partition base file name: ds=2008-04-09 @@ -222,13 +222,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n7 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -247,19 +247,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n7 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n7 + name: default.bucket_big_n7 Truncated Path -> Alias: - /bucket_big/ds=2008-04-08 [b] - /bucket_big/ds=2008-04-09 [b] + /bucket_big_n7/ds=2008-04-08 [b] + /bucket_big_n7/ds=2008-04-09 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -296,24 +296,24 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n7 a JOIN bucket_big_n7 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_big@ds=2008-04-09 -PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_big_n7 +PREHOOK: Input: default@bucket_big_n7@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n7@ds=2008-04-09 +PREHOOK: Input: default@bucket_small_n7 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n7 a JOIN bucket_big_n7 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_big@ds=2008-04-09 -POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_big_n7 +POSTHOOK: Input: default@bucket_big_n7@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n7@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small_n7 #### A masked pattern was here #### 928 -PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n7 a JOIN bucket_big_n7 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n7 a JOIN bucket_big_n7 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -367,13 +367,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n7 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -392,16 +392,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n7 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n7 + name: default.bucket_big_n7 #### A masked pattern was here #### Partition base file name: ds=2008-04-09 @@ -417,13 +417,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n7 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -442,19 +442,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n7 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n7 + name: default.bucket_big_n7 Truncated Path -> Alias: - /bucket_big/ds=2008-04-08 [b] - /bucket_big/ds=2008-04-09 [b] + /bucket_big_n7/ds=2008-04-08 [b] + /bucket_big_n7/ds=2008-04-09 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -489,18 +489,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n7 a JOIN bucket_big_n7 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_big@ds=2008-04-09 -PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_big_n7 +PREHOOK: Input: default@bucket_big_n7@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n7@ds=2008-04-09 +PREHOOK: Input: default@bucket_small_n7 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n7 a JOIN bucket_big_n7 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_big@ds=2008-04-09 -POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_big_n7 +POSTHOOK: Input: default@bucket_big_n7@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n7@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small_n7 #### A masked pattern was here #### 928 diff --git a/ql/src/test/results/clientpositive/bucketcontext_7.q.out b/ql/src/test/results/clientpositive/bucketcontext_7.q.out index fdc8f1df5c..ec8806fbfa 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_7.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_7.q.out @@ -1,122 +1,122 @@ -PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small_n2 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_small -POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_small_n2 +POSTHOOK: query: CREATE TABLE bucket_small_n2 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_small -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n2 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n2 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n2 +POSTHOOK: Output: default@bucket_small_n2@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n2@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n2@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n2@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n2@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n2@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_small_n2@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_small_n2 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -POSTHOOK: Output: default@bucket_small@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_small_n2 +POSTHOOK: Output: default@bucket_small_n2@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_small_n2@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_small_n2@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_small_n2@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_small_n2@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_small_n2@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_small_n2 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-09 -PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@bucket_small_n2@ds=2008-04-09 +PREHOOK: query: CREATE TABLE bucket_big_n2 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_big -POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_big_n2 +POSTHOOK: query: CREATE TABLE bucket_big_n2 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_big -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n2 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n2 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n2 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n2 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n2 +POSTHOOK: Output: default@bucket_big_n2@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n2 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n2@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n2 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n2@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n2 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n2 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n2 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n2 +POSTHOOK: Output: default@bucket_big_n2@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n2 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n2@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n2 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: Output: default@bucket_big_n2@ds=2008-04-09 +PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n2 a JOIN bucket_big_n2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n2 a JOIN bucket_big_n2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -144,13 +144,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n2 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -169,16 +169,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n2 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_small - name: default.bucket_small + name: default.bucket_small_n2 + name: default.bucket_small_n2 Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -192,13 +192,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n2 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -217,16 +217,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n2 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_small - name: default.bucket_small + name: default.bucket_small_n2 + name: default.bucket_small_n2 Alias -> Map Local Operator Tree: a TableScan @@ -303,13 +303,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n2 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -328,16 +328,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n2 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n2 + name: default.bucket_big_n2 #### A masked pattern was here #### Partition base file name: ds=2008-04-09 @@ -353,13 +353,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n2 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -378,19 +378,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n2 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n2 + name: default.bucket_big_n2 Truncated Path -> Alias: - /bucket_big/ds=2008-04-08 [b] - /bucket_big/ds=2008-04-09 [b] + /bucket_big_n2/ds=2008-04-08 [b] + /bucket_big_n2/ds=2008-04-09 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -427,28 +427,28 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n2 a JOIN bucket_big_n2 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_big@ds=2008-04-09 -PREHOOK: Input: default@bucket_small -PREHOOK: Input: default@bucket_small@ds=2008-04-08 -PREHOOK: Input: default@bucket_small@ds=2008-04-09 -#### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: Input: default@bucket_big_n2 +PREHOOK: Input: default@bucket_big_n2@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n2@ds=2008-04-09 +PREHOOK: Input: default@bucket_small_n2 +PREHOOK: Input: default@bucket_small_n2@ds=2008-04-08 +PREHOOK: Input: default@bucket_small_n2@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n2 a JOIN bucket_big_n2 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_big@ds=2008-04-09 -POSTHOOK: Input: default@bucket_small -POSTHOOK: Input: default@bucket_small@ds=2008-04-08 -POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +POSTHOOK: Input: default@bucket_big_n2 +POSTHOOK: Input: default@bucket_big_n2@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n2@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small_n2 +POSTHOOK: Input: default@bucket_small_n2@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small_n2@ds=2008-04-09 #### A masked pattern was here #### 1856 -PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n2 a JOIN bucket_big_n2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n2 a JOIN bucket_big_n2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -502,13 +502,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n2 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -527,16 +527,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n2 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n2 + name: default.bucket_big_n2 #### A masked pattern was here #### Partition base file name: ds=2008-04-09 @@ -552,13 +552,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n2 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -577,19 +577,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n2 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n2 + name: default.bucket_big_n2 Truncated Path -> Alias: - /bucket_big/ds=2008-04-08 [b] - /bucket_big/ds=2008-04-09 [b] + /bucket_big_n2/ds=2008-04-08 [b] + /bucket_big_n2/ds=2008-04-09 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -624,22 +624,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n2 a JOIN bucket_big_n2 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_big@ds=2008-04-09 -PREHOOK: Input: default@bucket_small -PREHOOK: Input: default@bucket_small@ds=2008-04-08 -PREHOOK: Input: default@bucket_small@ds=2008-04-09 -#### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: Input: default@bucket_big_n2 +PREHOOK: Input: default@bucket_big_n2@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n2@ds=2008-04-09 +PREHOOK: Input: default@bucket_small_n2 +PREHOOK: Input: default@bucket_small_n2@ds=2008-04-08 +PREHOOK: Input: default@bucket_small_n2@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n2 a JOIN bucket_big_n2 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_big@ds=2008-04-09 -POSTHOOK: Input: default@bucket_small -POSTHOOK: Input: default@bucket_small@ds=2008-04-08 -POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +POSTHOOK: Input: default@bucket_big_n2 +POSTHOOK: Input: default@bucket_big_n2@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n2@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small_n2 +POSTHOOK: Input: default@bucket_small_n2@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small_n2@ds=2008-04-09 #### A masked pattern was here #### 1856 diff --git a/ql/src/test/results/clientpositive/bucketcontext_8.q.out b/ql/src/test/results/clientpositive/bucketcontext_8.q.out index 42260aa4ff..2eda9399b5 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_8.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_8.q.out @@ -1,122 +1,122 @@ -PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small_n10 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_small -POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_small_n10 +POSTHOOK: query: CREATE TABLE bucket_small_n10 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_small -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n10 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n10 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n10 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n10 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n10 +POSTHOOK: Output: default@bucket_small_n10@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n10 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n10@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n10 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_small_n10@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n10 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_small_n10 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n10 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -POSTHOOK: Output: default@bucket_small@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_small_n10 +POSTHOOK: Output: default@bucket_small_n10@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n10 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_small_n10@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n10 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-09 -PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@bucket_small_n10@ds=2008-04-09 +PREHOOK: query: CREATE TABLE bucket_big_n10 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_big -POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_big_n10 +POSTHOOK: query: CREATE TABLE bucket_big_n10 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_big -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n10 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n10 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n10 +POSTHOOK: Output: default@bucket_big_n10@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n10@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n10@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n10@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n10@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n10@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n10@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n10 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n10 +POSTHOOK: Output: default@bucket_big_n10@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n10@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n10@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n10@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n10@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n10@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n10 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: Output: default@bucket_big_n10@ds=2008-04-09 +PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n10 a JOIN bucket_big_n10 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n10 a JOIN bucket_big_n10 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -144,13 +144,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n10 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -169,16 +169,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n10 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_small - name: default.bucket_small + name: default.bucket_small_n10 + name: default.bucket_small_n10 Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -192,13 +192,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n10 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -217,16 +217,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_small + name default.bucket_small_n10 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_small { string key, string value} + serialization.ddl struct bucket_small_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_small - name: default.bucket_small + name: default.bucket_small_n10 + name: default.bucket_small_n10 Alias -> Map Local Operator Tree: a TableScan @@ -303,13 +303,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n10 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -328,16 +328,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n10 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n10 + name: default.bucket_big_n10 #### A masked pattern was here #### Partition base file name: ds=2008-04-09 @@ -353,13 +353,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n10 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -378,19 +378,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n10 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n10 + name: default.bucket_big_n10 Truncated Path -> Alias: - /bucket_big/ds=2008-04-08 [b] - /bucket_big/ds=2008-04-09 [b] + /bucket_big_n10/ds=2008-04-08 [b] + /bucket_big_n10/ds=2008-04-09 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -427,28 +427,28 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n10 a JOIN bucket_big_n10 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_big@ds=2008-04-09 -PREHOOK: Input: default@bucket_small -PREHOOK: Input: default@bucket_small@ds=2008-04-08 -PREHOOK: Input: default@bucket_small@ds=2008-04-09 -#### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: Input: default@bucket_big_n10 +PREHOOK: Input: default@bucket_big_n10@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n10@ds=2008-04-09 +PREHOOK: Input: default@bucket_small_n10 +PREHOOK: Input: default@bucket_small_n10@ds=2008-04-08 +PREHOOK: Input: default@bucket_small_n10@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n10 a JOIN bucket_big_n10 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_big@ds=2008-04-09 -POSTHOOK: Input: default@bucket_small -POSTHOOK: Input: default@bucket_small@ds=2008-04-08 -POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +POSTHOOK: Input: default@bucket_big_n10 +POSTHOOK: Input: default@bucket_big_n10@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n10@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small_n10 +POSTHOOK: Input: default@bucket_small_n10@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small_n10@ds=2008-04-09 #### A masked pattern was here #### 1856 -PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n10 a JOIN bucket_big_n10 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n10 a JOIN bucket_big_n10 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -502,13 +502,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n10 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -527,16 +527,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n10 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n10 + name: default.bucket_big_n10 #### A masked pattern was here #### Partition base file name: ds=2008-04-09 @@ -552,13 +552,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n10 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -577,19 +577,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.bucket_big + name default.bucket_big_n10 partition_columns ds partition_columns.types string - serialization.ddl struct bucket_big { string key, string value} + serialization.ddl struct bucket_big_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket_big - name: default.bucket_big + name: default.bucket_big_n10 + name: default.bucket_big_n10 Truncated Path -> Alias: - /bucket_big/ds=2008-04-08 [b] - /bucket_big/ds=2008-04-09 [b] + /bucket_big_n10/ds=2008-04-08 [b] + /bucket_big_n10/ds=2008-04-09 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -624,22 +624,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n10 a JOIN bucket_big_n10 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_big@ds=2008-04-09 -PREHOOK: Input: default@bucket_small -PREHOOK: Input: default@bucket_small@ds=2008-04-08 -PREHOOK: Input: default@bucket_small@ds=2008-04-09 -#### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: Input: default@bucket_big_n10 +PREHOOK: Input: default@bucket_big_n10@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n10@ds=2008-04-09 +PREHOOK: Input: default@bucket_small_n10 +PREHOOK: Input: default@bucket_small_n10@ds=2008-04-08 +PREHOOK: Input: default@bucket_small_n10@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select /*+ MAPJOIN(a) */ count(*) FROM bucket_small_n10 a JOIN bucket_big_n10 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_big@ds=2008-04-09 -POSTHOOK: Input: default@bucket_small -POSTHOOK: Input: default@bucket_small@ds=2008-04-08 -POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +POSTHOOK: Input: default@bucket_big_n10 +POSTHOOK: Input: default@bucket_big_n10@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n10@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small_n10 +POSTHOOK: Input: default@bucket_small_n10@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small_n10@ds=2008-04-09 #### A masked pattern was here #### 1856 diff --git a/ql/src/test/results/clientpositive/bucketizedhiveinputformat_auto.q.out b/ql/src/test/results/clientpositive/bucketizedhiveinputformat_auto.q.out index 51560eb523..13aada2ade 100644 --- a/ql/src/test/results/clientpositive/bucketizedhiveinputformat_auto.q.out +++ b/ql/src/test/results/clientpositive/bucketizedhiveinputformat_auto.q.out @@ -1,150 +1,150 @@ -PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small_n16 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_small -POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_small_n16 +POSTHOOK: query: CREATE TABLE bucket_small_n16 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_small -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n16 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n16 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n16 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_small_n16 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_small_n16 +POSTHOOK: Output: default@bucket_small_n16@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n16 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_small@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_small_n16@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_small_n16 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_small@ds=2008-04-08 -PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@bucket_small_n16@ds=2008-04-08 +PREHOOK: query: CREATE TABLE bucket_big_n16 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket_big -POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_big_n16 +POSTHOOK: query: CREATE TABLE bucket_big_n16 (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket_big -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n16 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n16 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n16 +POSTHOOK: Output: default@bucket_big_n16@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n16@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n16@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n16@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: Output: default@bucket_big_n16@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: Output: default@bucket_big_n16@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n16@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n16 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n16 +POSTHOOK: Output: default@bucket_big_n16@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n16@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n16@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n16@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: Output: default@bucket_big_n16@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket_big@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: Output: default@bucket_big_n16@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_big_n16 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: Output: default@bucket_big_n16@ds=2008-04-09 +PREHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM bucket_small_n16 a JOIN bucket_big_n16 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_big@ds=2008-04-09 -PREHOOK: Input: default@bucket_small -PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n16 +PREHOOK: Input: default@bucket_big_n16@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n16@ds=2008-04-09 +PREHOOK: Input: default@bucket_small_n16 +PREHOOK: Input: default@bucket_small_n16@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM bucket_small_n16 a JOIN bucket_big_n16 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_big@ds=2008-04-09 -POSTHOOK: Input: default@bucket_small -POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n16 +POSTHOOK: Input: default@bucket_big_n16@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n16@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small_n16 +POSTHOOK: Input: default@bucket_small_n16@ds=2008-04-08 #### A masked pattern was here #### 928 -PREHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM bucket_small_n16 a JOIN bucket_big_n16 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_big@ds=2008-04-09 -PREHOOK: Input: default@bucket_small -PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n16 +PREHOOK: Input: default@bucket_big_n16@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n16@ds=2008-04-09 +PREHOOK: Input: default@bucket_small_n16 +PREHOOK: Input: default@bucket_small_n16@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM bucket_small_n16 a JOIN bucket_big_n16 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_big@ds=2008-04-09 -POSTHOOK: Input: default@bucket_small -POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n16 +POSTHOOK: Input: default@bucket_big_n16@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n16@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small_n16 +POSTHOOK: Input: default@bucket_small_n16@ds=2008-04-08 #### A masked pattern was here #### 928 -PREHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM bucket_small_n16 a JOIN bucket_big_n16 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket_big -PREHOOK: Input: default@bucket_big@ds=2008-04-08 -PREHOOK: Input: default@bucket_big@ds=2008-04-09 -PREHOOK: Input: default@bucket_small -PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n16 +PREHOOK: Input: default@bucket_big_n16@ds=2008-04-08 +PREHOOK: Input: default@bucket_big_n16@ds=2008-04-09 +PREHOOK: Input: default@bucket_small_n16 +PREHOOK: Input: default@bucket_small_n16@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM bucket_small_n16 a JOIN bucket_big_n16 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket_big -POSTHOOK: Input: default@bucket_big@ds=2008-04-08 -POSTHOOK: Input: default@bucket_big@ds=2008-04-09 -POSTHOOK: Input: default@bucket_small -POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n16 +POSTHOOK: Input: default@bucket_big_n16@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big_n16@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small_n16 +POSTHOOK: Input: default@bucket_small_n16@ds=2008-04-08 #### A masked pattern was here #### 928 diff --git a/ql/src/test/results/clientpositive/bucketmapjoin10.q.out b/ql/src/test/results/clientpositive/bucketmapjoin10.q.out index 7fee7c297e..a8951d102a 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin10.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin10.q.out @@ -1,139 +1,139 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n6 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n6 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n6 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n6 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n6 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n6 +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n6@part=1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n6@part=1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 3 BUCKETS +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n6@part=1 +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1_n6 CLUSTERED BY (key) INTO 3 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 3 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n6 +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n6 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1_n6 CLUSTERED BY (key) INTO 3 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n6 +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n6 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='2') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n6 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='2') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n6 +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n6@part=2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='2') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n6@part=2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='2') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n6@part=2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='2') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n6@part=2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_1_n6 PARTITION (part='2') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n6@part=2 +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n13 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n13 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n13 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n13 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n13 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n13 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n13@part=1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n13@part=1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n13@part=1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n13@part=1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n13@part=1 +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n13 CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n13 +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n13 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n13 CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2') +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n13 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n13 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='2') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n13 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='2') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n13 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n13@part=2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='2') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n13@part=2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n13 PARTITION (part='2') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=2 -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 BUCKETS +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n13@part=2 +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n13 CLUSTERED BY (key) INTO 3 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n13 +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n13 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n13 CLUSTERED BY (key) INTO 3 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n13 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n13 PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n6 a JOIN srcbucket_mapjoin_part_2_n13 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n6 a JOIN srcbucket_mapjoin_part_2_n13 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -162,13 +162,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n13 numFiles 3 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n13 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 @@ -186,16 +186,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n13 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n13 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n13 + name: default.srcbucket_mapjoin_part_2_n13 Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -209,13 +209,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n13 numFiles 2 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n13 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -233,16 +233,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n13 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n13 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n13 + name: default.srcbucket_mapjoin_part_2_n13 Alias -> Map Local Operator Tree: b TableScan @@ -310,13 +310,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n6 numFiles 2 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -334,16 +334,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n6 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n6 + name: default.srcbucket_mapjoin_part_1_n6 #### A masked pattern was here #### Partition base file name: part=2 @@ -359,13 +359,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n6 numFiles 3 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 @@ -383,19 +383,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n6 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n6 + name: default.srcbucket_mapjoin_part_1_n6 Truncated Path -> Alias: - /srcbucket_mapjoin_part_1/part=1 [a] - /srcbucket_mapjoin_part_1/part=2 [a] + /srcbucket_mapjoin_part_1_n6/part=1 [a] + /srcbucket_mapjoin_part_1_n6/part=2 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -433,25 +433,25 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n6 a JOIN srcbucket_mapjoin_part_2_n13 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=2 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n6 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n6@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n6@part=2 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n13 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n13@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n13@part=2 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n6 a JOIN srcbucket_mapjoin_part_2_n13 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n6 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n6@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n6@part=2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n13 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n13@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n13@part=2 #### A masked pattern was here #### 2116 diff --git a/ql/src/test/results/clientpositive/bucketmapjoin11.q.out b/ql/src/test/results/clientpositive/bucketmapjoin11.q.out index 206de5923c..e54a8c3efd 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin11.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin11.q.out @@ -1,147 +1,147 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n2 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n2 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n2 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n2 +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n2@part=1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n2@part=1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 4 BUCKETS +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n2@part=1 +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1_n2 CLUSTERED BY (key) INTO 4 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 4 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n2 +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n2 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1_n2 CLUSTERED BY (key) INTO 4 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n2 +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='2') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='2') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n2 +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n2@part=2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='2') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n2@part=2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='2') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n2@part=2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='2') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n2@part=2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='2') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n2@part=2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='2') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n2@part=2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_1_n2 PARTITION (part='2') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n2@part=2 +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n6 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n6 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n6 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n6 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n6 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n6 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n6@part=1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n6@part=1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n6@part=1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n6@part=1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n6@part=1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n6@part=1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n6@part=1 +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n6 CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n6 +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n6 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n6 CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2') +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n6 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n6 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='2') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n6 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='2') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n6 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n6@part=2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='2') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n6@part=2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n6 PARTITION (part='2') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=2 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n6@part=2 PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n2 a JOIN srcbucket_mapjoin_part_2_n6 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n2 a JOIN srcbucket_mapjoin_part_2_n6 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -170,13 +170,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n6 numFiles 4 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -194,16 +194,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n6 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n6 + name: default.srcbucket_mapjoin_part_2_n6 Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -217,13 +217,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n6 numFiles 2 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -241,16 +241,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n6 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n6 + name: default.srcbucket_mapjoin_part_2_n6 Alias -> Map Local Operator Tree: b TableScan @@ -326,13 +326,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n2 numFiles 2 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -350,16 +350,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n2 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n2 + name: default.srcbucket_mapjoin_part_1_n2 #### A masked pattern was here #### Partition base file name: part=2 @@ -375,13 +375,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n2 numFiles 4 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -399,19 +399,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n2 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n2 + name: default.srcbucket_mapjoin_part_1_n2 Truncated Path -> Alias: - /srcbucket_mapjoin_part_1/part=1 [a] - /srcbucket_mapjoin_part_1/part=2 [a] + /srcbucket_mapjoin_part_1_n2/part=1 [a] + /srcbucket_mapjoin_part_1_n2/part=2 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -449,36 +449,36 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n2 a JOIN srcbucket_mapjoin_part_2_n6 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=2 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n2 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n2@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n2@part=2 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n6 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n6@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n6@part=2 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n2 a JOIN srcbucket_mapjoin_part_2_n6 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n2@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n2@part=2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n6 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n6@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n6@part=2 #### A masked pattern was here #### 2420 PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n2 a JOIN srcbucket_mapjoin_part_2_n6 b ON a.key = b.key AND a.part = b.part AND a.part IS NOT NULL AND b.part IS NOT NULL PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n2 a JOIN srcbucket_mapjoin_part_2_n6 b ON a.key = b.key AND a.part = b.part AND a.part IS NOT NULL AND b.part IS NOT NULL POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -507,13 +507,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n6 numFiles 4 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -531,16 +531,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n6 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n6 + name: default.srcbucket_mapjoin_part_2_n6 Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -554,13 +554,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n6 numFiles 2 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -578,16 +578,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n6 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n6 + name: default.srcbucket_mapjoin_part_2_n6 Alias -> Map Local Operator Tree: b TableScan @@ -663,13 +663,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n2 numFiles 2 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -687,16 +687,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n2 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n2 + name: default.srcbucket_mapjoin_part_1_n2 #### A masked pattern was here #### Partition base file name: part=2 @@ -712,13 +712,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n2 numFiles 4 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -736,19 +736,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n2 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n2 + name: default.srcbucket_mapjoin_part_1_n2 Truncated Path -> Alias: - /srcbucket_mapjoin_part_1/part=1 [a] - /srcbucket_mapjoin_part_1/part=2 [a] + /srcbucket_mapjoin_part_1_n2/part=1 [a] + /srcbucket_mapjoin_part_1_n2/part=2 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -786,25 +786,25 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n2 a JOIN srcbucket_mapjoin_part_2_n6 b ON a.key = b.key AND a.part = b.part AND a.part IS NOT NULL AND b.part IS NOT NULL PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=2 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n2 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n2@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n2@part=2 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n6 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n6@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n6@part=2 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n2 a JOIN srcbucket_mapjoin_part_2_n6 b ON a.key = b.key AND a.part = b.part AND a.part IS NOT NULL AND b.part IS NOT NULL POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n2@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n2@part=2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n6 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n6@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n6@part=2 #### A masked pattern was here #### 928 diff --git a/ql/src/test/results/clientpositive/bucketmapjoin12.q.out b/ql/src/test/results/clientpositive/bucketmapjoin12.q.out index 64eb6a2e79..f892b8e5d7 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin12.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin12.q.out @@ -25,41 +25,41 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TAB POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n0 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n0 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n0 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n0 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n0 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n0 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n0@part=1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n0 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n0@part=1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n0 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 NOT CLUSTERED +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n0@part=1 +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n0 NOT CLUSTERED PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 NOT CLUSTERED +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n0 +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n0 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n0 NOT CLUSTERED POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n0 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n0 PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_3 (key INT, value STRING) PARTITIONED BY (part STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE @@ -97,12 +97,12 @@ POSTHOOK: Input: default@srcbucket_mapjoin_part_3 POSTHOOK: Output: default@srcbucket_mapjoin_part_3 PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2_n0 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2_n0 b ON a.key = b.key AND a.part = '1' and b.part = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -131,13 +131,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n0 numFiles 2 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -154,16 +154,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n0 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n0 + name: default.srcbucket_mapjoin_part_2_n0 Alias -> Map Local Operator Tree: b TableScan @@ -312,22 +312,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2_n0 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY PREHOOK: Input: default@srcbucket_mapjoin_part_1 PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n0 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n0@part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2_n0 b ON a.key = b.key AND a.part = '1' and b.part = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket_mapjoin_part_1 POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n0 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n0@part=1 #### A masked pattern was here #### 464 PREHOOK: query: EXPLAIN EXTENDED diff --git a/ql/src/test/results/clientpositive/bucketmapjoin13.q.out b/ql/src/test/results/clientpositive/bucketmapjoin13.q.out index 416e31675d..2f9b3ca1a8 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin13.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin13.q.out @@ -1,75 +1,75 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n8 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (value) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n8 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n8 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n8 +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n8 PARTITION (part='1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n8@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n8 PARTITION (part='1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n8@part=1 +POSTHOOK: Lineage: srcbucket_mapjoin_part_1_n8 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcbucket_mapjoin_part_1_n8 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1_n8 CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n8 +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n8 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1_n8 CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n8 +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n8 +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n8 PARTITION (part='2') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n8@part=2 +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n8 PARTITION (part='2') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n8@part=2 +POSTHOOK: Lineage: srcbucket_mapjoin_part_1_n8 PARTITION(part=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcbucket_mapjoin_part_1_n8 PARTITION(part=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n18 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n18 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n18 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n18 +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n18 PARTITION (part='1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n18@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n18 PARTITION (part='1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n18@part=1 +POSTHOOK: Lineage: srcbucket_mapjoin_part_2_n18 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcbucket_mapjoin_part_2_n18 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -99,13 +99,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n18 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n18 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -123,16 +123,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n18 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n18 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n18 + name: default.srcbucket_mapjoin_part_2_n18 Alias -> Map Local Operator Tree: b TableScan @@ -201,13 +201,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n8 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n8 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -225,16 +225,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n8 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n8 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n8 + name: default.srcbucket_mapjoin_part_1_n8 #### A masked pattern was here #### Partition base file name: part=2 @@ -251,13 +251,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n8 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n8 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -275,19 +275,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n8 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n8 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n8 + name: default.srcbucket_mapjoin_part_1_n8 Truncated Path -> Alias: - /srcbucket_mapjoin_part_1/part=1 [a] - /srcbucket_mapjoin_part_1/part=2 [a] + /srcbucket_mapjoin_part_1_n8/part=1 [a] + /srcbucket_mapjoin_part_1_n8/part=2 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -325,34 +325,34 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n8 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n8@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n8@part=2 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n18 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n18@part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n8 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n8@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n8@part=2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n18 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n18@part=1 #### A masked pattern was here #### 2056 PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key and a.part = '2' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key and a.part = '2' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -382,13 +382,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n18 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n18 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -406,16 +406,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n18 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n18 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n18 + name: default.srcbucket_mapjoin_part_2_n18 Alias -> Map Local Operator Tree: b TableScan @@ -492,13 +492,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n8 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n8 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -516,18 +516,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n8 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n8 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n8 + name: default.srcbucket_mapjoin_part_1_n8 Truncated Path -> Alias: - /srcbucket_mapjoin_part_1/part=2 [a] + /srcbucket_mapjoin_part_1_n8/part=2 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -565,40 +565,40 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key and a.part = '2' PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n8 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n8@part=2 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n18 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n18@part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key and a.part = '2' POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n8 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n8@part=2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n18 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n18@part=1 #### A masked pattern was here #### 1028 -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 drop partition (part = '1') +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1_n8 drop partition (part = '1') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 drop partition (part = '1') +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n8 +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n8@part=1 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1_n8 drop partition (part = '1') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n8 +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n8@part=1 PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -628,13 +628,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n18 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n18 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -652,16 +652,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n18 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n18 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n18 + name: default.srcbucket_mapjoin_part_2_n18 Alias -> Map Local Operator Tree: b TableScan @@ -738,13 +738,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n8 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n8 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -762,18 +762,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n8 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n8 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n8 + name: default.srcbucket_mapjoin_part_1_n8 Truncated Path -> Alias: - /srcbucket_mapjoin_part_1/part=2 [a] + /srcbucket_mapjoin_part_1_n8/part=2 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -811,40 +811,40 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n8 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n8@part=2 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n18 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n18@part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n8 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n8@part=2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n18 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n18@part=1 #### A masked pattern was here #### 1028 -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (value) INTO 2 BUCKETS +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1_n8 CLUSTERED BY (value) INTO 2 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (value) INTO 2 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n8 +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n8 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1_n8 CLUSTERED BY (value) INTO 2 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n8 +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n8 PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -874,13 +874,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n18 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n18 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -898,16 +898,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n18 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n18 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n18 + name: default.srcbucket_mapjoin_part_2_n18 Alias -> Map Local Operator Tree: b TableScan @@ -984,13 +984,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n8 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n8 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -1008,18 +1008,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n8 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n8 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n8 + name: default.srcbucket_mapjoin_part_1_n8 Truncated Path -> Alias: - /srcbucket_mapjoin_part_1/part=2 [a] + /srcbucket_mapjoin_part_1_n8/part=2 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -1057,21 +1057,21 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n8 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n8@part=2 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n18 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n18@part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n8 a JOIN srcbucket_mapjoin_part_2_n18 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n8 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n8@part=2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n18 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n18@part=1 #### A masked pattern was here #### 1028 diff --git a/ql/src/test/results/clientpositive/bucketmapjoin5.q.out b/ql/src/test/results/clientpositive/bucketmapjoin5.q.out index 5ed02e7a1a..33fac4528a 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin5.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin5.q.out @@ -1,101 +1,101 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_n0(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_n0 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_n0(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin +POSTHOOK: Output: default@srcbucket_mapjoin_n0 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin +PREHOOK: Output: default@srcbucket_mapjoin_n0 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin +POSTHOOK: Output: default@srcbucket_mapjoin_n0 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin +PREHOOK: Output: default@srcbucket_mapjoin_n0 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@srcbucket_mapjoin_n0 +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n0 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_part_n0 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n0 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n0 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n0 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n0 +POSTHOOK: Output: default@srcbucket_mapjoin_part_n0@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n0@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n0@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n0@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n0@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n0@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n0@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09') +PREHOOK: Output: default@srcbucket_mapjoin_part_n0 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n0 +POSTHOOK: Output: default@srcbucket_mapjoin_part_n0@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09') +PREHOOK: Output: default@srcbucket_mapjoin_part_n0@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n0@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09') +PREHOOK: Output: default@srcbucket_mapjoin_part_n0@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n0@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09') +PREHOOK: Output: default@srcbucket_mapjoin_part_n0@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n0 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-09 +POSTHOOK: Output: default@srcbucket_mapjoin_part_n0@ds=2008-04-09 PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -165,13 +165,13 @@ POSTHOOK: Output: default@bucketmapjoin_tmp_result PREHOOK: query: explain extended insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_n0 b on a.key=b.key PREHOOK: type: QUERY POSTHOOK: query: explain extended insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_n0 b on a.key=b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -309,13 +309,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_n0 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -333,16 +333,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_n0 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part - name: default.srcbucket_mapjoin_part + name: default.srcbucket_mapjoin_part_n0 + name: default.srcbucket_mapjoin_part_n0 #### A masked pattern was here #### Partition base file name: ds=2008-04-09 @@ -358,13 +358,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_n0 numFiles 4 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -382,19 +382,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_n0 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part - name: default.srcbucket_mapjoin_part + name: default.srcbucket_mapjoin_part_n0 + name: default.srcbucket_mapjoin_part_n0 Truncated Path -> Alias: - /srcbucket_mapjoin_part/ds=2008-04-08 [b] - /srcbucket_mapjoin_part/ds=2008-04-09 [b] + /srcbucket_mapjoin_part_n0/ds=2008-04-08 [b] + /srcbucket_mapjoin_part_n0/ds=2008-04-09 [b] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -661,27 +661,27 @@ STAGE PLANS: PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_n0 b on a.key=b.key PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin -PREHOOK: Input: default@srcbucket_mapjoin_part -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-09 +PREHOOK: Input: default@srcbucket_mapjoin_n0 +PREHOOK: Input: default@srcbucket_mapjoin_part_n0 +PREHOOK: Input: default@srcbucket_mapjoin_part_n0@ds=2008-04-08 +PREHOOK: Input: default@srcbucket_mapjoin_part_n0@ds=2008-04-09 PREHOOK: Output: default@bucketmapjoin_tmp_result POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_n0 b on a.key=b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin -POSTHOOK: Input: default@srcbucket_mapjoin_part -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-09 +POSTHOOK: Input: default@srcbucket_mapjoin_n0 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n0 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n0@ds=2008-04-08 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n0@ds=2008-04-09 POSTHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION [(srcbucket_mapjoin_n0)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_n0)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_n0)b.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select count(1) from bucketmapjoin_tmp_result PREHOOK: type: QUERY PREHOOK: Input: default@bucketmapjoin_tmp_result @@ -706,27 +706,27 @@ POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ] PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_n0 b on a.key=b.key PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin -PREHOOK: Input: default@srcbucket_mapjoin_part -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-09 +PREHOOK: Input: default@srcbucket_mapjoin_n0 +PREHOOK: Input: default@srcbucket_mapjoin_part_n0 +PREHOOK: Input: default@srcbucket_mapjoin_part_n0@ds=2008-04-08 +PREHOOK: Input: default@srcbucket_mapjoin_part_n0@ds=2008-04-09 PREHOOK: Output: default@bucketmapjoin_tmp_result POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_n0 b on a.key=b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin -POSTHOOK: Input: default@srcbucket_mapjoin_part -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-09 +POSTHOOK: Input: default@srcbucket_mapjoin_n0 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n0 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n0@ds=2008-04-08 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n0@ds=2008-04-09 POSTHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION [(srcbucket_mapjoin_n0)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_n0)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_n0)b.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select count(1) from bucketmapjoin_tmp_result PREHOOK: type: QUERY PREHOOK: Input: default@bucketmapjoin_tmp_result @@ -767,13 +767,13 @@ POSTHOOK: Input: default@bucketmapjoin_hash_result_2 PREHOOK: query: explain extended insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_2 b on a.key=b.key PREHOOK: type: QUERY POSTHOOK: query: explain extended insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_2 b on a.key=b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1263,26 +1263,26 @@ STAGE PLANS: PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_2 b on a.key=b.key PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin +PREHOOK: Input: default@srcbucket_mapjoin_n0 PREHOOK: Input: default@srcbucket_mapjoin_part_2 PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-09 PREHOOK: Output: default@bucketmapjoin_tmp_result POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_2 b on a.key=b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin +POSTHOOK: Input: default@srcbucket_mapjoin_n0 POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-09 POSTHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION [(srcbucket_mapjoin_n0)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_n0)a.FieldSchema(name:value, type:string, comment:null), ] POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select count(1) from bucketmapjoin_tmp_result PREHOOK: type: QUERY @@ -1308,26 +1308,26 @@ POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ] PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_2 b on a.key=b.key PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin +PREHOOK: Input: default@srcbucket_mapjoin_n0 PREHOOK: Input: default@srcbucket_mapjoin_part_2 PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-09 PREHOOK: Output: default@bucketmapjoin_tmp_result POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result select /*+mapjoin(a)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_n0 a join srcbucket_mapjoin_part_2 b on a.key=b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin +POSTHOOK: Input: default@srcbucket_mapjoin_n0 POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-09 POSTHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION [(srcbucket_mapjoin_n0)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_n0)a.FieldSchema(name:value, type:string, comment:null), ] POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: select count(1) from bucketmapjoin_tmp_result PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/bucketmapjoin8.q.out b/ql/src/test/results/clientpositive/bucketmapjoin8.q.out index d0b1dde5dd..86495dfe87 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin8.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin8.q.out @@ -1,73 +1,73 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n1 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n1 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n1 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n1 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n1 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n1 +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n1@part=1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n1 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n1@part=1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n1 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n1@part=1 +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n4 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n4 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n4 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n4 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n4 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n4 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n4 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n4 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n4@part=1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n4 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n4@part=1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n4 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 BUCKETS +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n4@part=1 +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n4 CLUSTERED BY (key) INTO 3 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n4 +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n4 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n4 CLUSTERED BY (key) INTO 3 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n4 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n4 PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n1 a JOIN srcbucket_mapjoin_part_2_n4 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n1 a JOIN srcbucket_mapjoin_part_2_n4 b ON a.key = b.key AND a.part = '1' and b.part = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -96,13 +96,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n4 numFiles 2 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -120,16 +120,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n4 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n4 + name: default.srcbucket_mapjoin_part_2_n4 Alias -> Map Local Operator Tree: b TableScan @@ -205,13 +205,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n1 numFiles 2 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -229,18 +229,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n1 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n1 + name: default.srcbucket_mapjoin_part_1_n1 Truncated Path -> Alias: - /srcbucket_mapjoin_part_1/part=1 [a] + /srcbucket_mapjoin_part_1_n1/part=1 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -278,40 +278,40 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n1 a JOIN srcbucket_mapjoin_part_2_n4 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n1@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n4 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n4@part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n1 a JOIN srcbucket_mapjoin_part_2_n4 b ON a.key = b.key AND a.part = '1' and b.part = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n1@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n4 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n4@part=1 #### A masked pattern was here #### 464 -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (value) INTO 2 BUCKETS +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n4 CLUSTERED BY (value) INTO 2 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (value) INTO 2 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n4 +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n4 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n4 CLUSTERED BY (value) INTO 2 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n4 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n4 PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n1 a JOIN srcbucket_mapjoin_part_2_n4 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n1 a JOIN srcbucket_mapjoin_part_2_n4 b ON a.key = b.key AND a.part = '1' and b.part = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -340,13 +340,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n4 numFiles 2 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -364,16 +364,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n4 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n4 + name: default.srcbucket_mapjoin_part_2_n4 Alias -> Map Local Operator Tree: b TableScan @@ -449,13 +449,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n1 numFiles 2 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -473,18 +473,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n1 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n1 + name: default.srcbucket_mapjoin_part_1_n1 Truncated Path -> Alias: - /srcbucket_mapjoin_part_1/part=1 [a] + /srcbucket_mapjoin_part_1_n1/part=1 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -522,21 +522,21 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n1 a JOIN srcbucket_mapjoin_part_2_n4 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n1@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n4 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n4@part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n1 a JOIN srcbucket_mapjoin_part_2_n4 b ON a.key = b.key AND a.part = '1' and b.part = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n1@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n4 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n4@part=1 #### A masked pattern was here #### 464 diff --git a/ql/src/test/results/clientpositive/bucketmapjoin9.q.out b/ql/src/test/results/clientpositive/bucketmapjoin9.q.out index 24a24e99d7..b6ad96aa32 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin9.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin9.q.out @@ -1,81 +1,81 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n5 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n5 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n5 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n5 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n5 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n5 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_1_n5 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n5 +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n5@part=1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n5 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n5@part=1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_1_n5 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n5@part=1 +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n12 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n12 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n12 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n12 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n12 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n12 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n12@part=1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n12@part=1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n12@part=1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n12@part=1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n12@part=1 +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n12 CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n12 +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n12 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n12 CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n12 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n12 PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n5 a JOIN srcbucket_mapjoin_part_2_n12 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n5 a JOIN srcbucket_mapjoin_part_2_n12 b ON a.key = b.key AND a.part = '1' and b.part = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -104,13 +104,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n12 numFiles 3 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n12 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 @@ -128,16 +128,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n12 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n12 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n12 + name: default.srcbucket_mapjoin_part_2_n12 Alias -> Map Local Operator Tree: b TableScan @@ -205,13 +205,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n5 numFiles 2 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n5 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -229,18 +229,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n5 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n5 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n5 + name: default.srcbucket_mapjoin_part_1_n5 Truncated Path -> Alias: - /srcbucket_mapjoin_part_1/part=1 [a] + /srcbucket_mapjoin_part_1_n5/part=1 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -278,73 +278,73 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n5 a JOIN srcbucket_mapjoin_part_2_n12 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n5 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n5@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n12 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n12@part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n5 a JOIN srcbucket_mapjoin_part_2_n12 b ON a.key = b.key AND a.part = '1' and b.part = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n5 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n5@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n12 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n12@part=1 #### A masked pattern was here #### 464 -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 DROP PARTITION (part='1') +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n12 DROP PARTITION (part='1') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 DROP PARTITION (part='1') +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n12 +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n12@part=1 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n12 DROP PARTITION (part='1') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (value) INTO 2 BUCKETS +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n12 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n12@part=1 +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n12 CLUSTERED BY (value) INTO 2 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (value) INTO 2 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n12 +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n12 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n12 CLUSTERED BY (value) INTO 2 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n12 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n12 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n12 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n12 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n12@part=1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n12@part=1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n12 PARTITION (part='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n12@part=1 +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n12 CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n12 +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n12 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n12 CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n12 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n12 PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n5 a JOIN srcbucket_mapjoin_part_2_n12 b ON a.key = b.key AND a.part = '1' AND b.part = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n5 a JOIN srcbucket_mapjoin_part_2_n12 b ON a.key = b.key AND a.part = '1' AND b.part = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -373,13 +373,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n12 numFiles 2 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n12 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -397,16 +397,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n12 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n12 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n12 + name: default.srcbucket_mapjoin_part_2_n12 Alias -> Map Local Operator Tree: b TableScan @@ -474,13 +474,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n5 numFiles 2 numRows 0 partition_columns part partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n5 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -498,18 +498,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n5 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n5 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n5 + name: default.srcbucket_mapjoin_part_1_n5 Truncated Path -> Alias: - /srcbucket_mapjoin_part_1/part=1 [a] + /srcbucket_mapjoin_part_1_n5/part=1 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -547,21 +547,21 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n5 a JOIN srcbucket_mapjoin_part_2_n12 b ON a.key = b.key AND a.part = '1' AND b.part = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n5 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n5@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n12 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n12@part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n5 a JOIN srcbucket_mapjoin_part_2_n12 b ON a.key = b.key AND a.part = '1' AND b.part = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n5 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n5@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n12 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n12@part=1 #### A masked pattern was here #### 464 diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out b/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out index 7c560d2df8..8695e256ab 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out @@ -1,78 +1,78 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_n10(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_n10 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_n10(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin +POSTHOOK: Output: default@srcbucket_mapjoin_n10 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n10 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin +PREHOOK: Output: default@srcbucket_mapjoin_n10 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n10 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin +POSTHOOK: Output: default@srcbucket_mapjoin_n10 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_n10 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin +PREHOOK: Output: default@srcbucket_mapjoin_n10 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_n10 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@srcbucket_mapjoin_n10 +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n10 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_part_n10 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n10 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n10 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n10 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n10 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n10 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n10 +POSTHOOK: Output: default@srcbucket_mapjoin_part_n10@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n10 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n10@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n10 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n10@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n10 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n10@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n10 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string) +POSTHOOK: Output: default@srcbucket_mapjoin_part_n10@ds=2008-04-08 +PREHOOK: query: create table bucketmapjoin_tmp_result_n4 (key string , value1 string, value2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string) +PREHOOK: Output: default@bucketmapjoin_tmp_result_n4 +POSTHOOK: query: create table bucketmapjoin_tmp_result_n4 (key string , value1 string, value2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucketmapjoin_tmp_result +POSTHOOK: Output: default@bucketmapjoin_tmp_result_n4 PREHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n4 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n10 a join srcbucket_mapjoin_part_n10 b on a.key=b.key where b.ds="2008-04-08" PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n4 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part b +from srcbucket_mapjoin_n10 a join srcbucket_mapjoin_part_n10 b on a.key=b.key where b.ds="2008-04-08" POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -107,13 +107,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_n10 numFiles 3 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n10 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 @@ -131,16 +131,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part + name default.srcbucket_mapjoin_part_n10 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_n10 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part - name: default.srcbucket_mapjoin_part + name: default.srcbucket_mapjoin_part_n10 + name: default.srcbucket_mapjoin_part_n10 Alias -> Map Local Operator Tree: b TableScan @@ -200,17 +200,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n4 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n4 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -237,7 +237,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: srcbucket_mapjoin + base file name: srcbucket_mapjoin_n10 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -249,11 +249,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin + name default.srcbucket_mapjoin_n10 numFiles 2 numRows 0 rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_n10 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -271,20 +271,20 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin + name default.srcbucket_mapjoin_n10 numFiles 2 numRows 0 rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_n10 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin - name: default.srcbucket_mapjoin + name: default.srcbucket_mapjoin_n10 + name: default.srcbucket_mapjoin_n10 Truncated Path -> Alias: - /srcbucket_mapjoin [a] + /srcbucket_mapjoin_n10 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -341,17 +341,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n4 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n4 Stage: Stage-2 Stats Work @@ -360,7 +360,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value1, value2 Column Types: string, string, string - Table: default.bucketmapjoin_tmp_result + Table: default.bucketmapjoin_tmp_result_n4 Is Table Level Stats: true Stage: Stage-3 @@ -385,17 +385,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n4 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n4 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -416,11 +416,11 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n4 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -438,18 +438,18 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n4 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n4 + name: default.bucketmapjoin_tmp_result_n4 Truncated Path -> Alias: #### A masked pattern was here #### @@ -475,17 +475,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n4 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n4 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -506,11 +506,11 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n4 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -528,18 +528,18 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n4 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n4 + name: default.bucketmapjoin_tmp_result_n4 Truncated Path -> Alias: #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out b/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out index 1260a58a3b..43983794ad 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out @@ -1,87 +1,87 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_n5(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_n5 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_n5(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin +POSTHOOK: Output: default@srcbucket_mapjoin_n5 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n5 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin +PREHOOK: Output: default@srcbucket_mapjoin_n5 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n5 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin +POSTHOOK: Output: default@srcbucket_mapjoin_n5 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_n5 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin +PREHOOK: Output: default@srcbucket_mapjoin_n5 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_n5 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@srcbucket_mapjoin_n5 +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n7 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n7 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n7 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n7 +PREHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n7 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n7 +POSTHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n7 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n7 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n7@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n7 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n7@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n7 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n7@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n7 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n7 +POSTHOOK: query: load data local inpath '../../data/files/bmj2/000000_0' INTO TABLE srcbucket_mapjoin_part_2_n7 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-09 -PREHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09') +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n7 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n7@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n7 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-09 -POSTHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09') +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n7@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/bmj2/000001_0' INTO TABLE srcbucket_mapjoin_part_2_n7 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-09 -PREHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string) +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n7@ds=2008-04-09 +PREHOOK: query: create table bucketmapjoin_tmp_result_n3 (key string , value1 string, value2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucketmapjoin_tmp_result -POSTHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string) +PREHOOK: Output: default@bucketmapjoin_tmp_result_n3 +POSTHOOK: query: create table bucketmapjoin_tmp_result_n3 (key string , value1 string, value2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucketmapjoin_tmp_result +POSTHOOK: Output: default@bucketmapjoin_tmp_result_n3 PREHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n3 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_n5 a join srcbucket_mapjoin_part_2_n7 b on a.key=b.key PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table bucketmapjoin_tmp_result +insert overwrite table bucketmapjoin_tmp_result_n3 select /*+mapjoin(b)*/ a.key, a.value, b.value -from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b +from srcbucket_mapjoin_n5 a join srcbucket_mapjoin_part_2_n7 b on a.key=b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -116,13 +116,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n7 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n7 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 3062 @@ -140,16 +140,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n7 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n7 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n7 + name: default.srcbucket_mapjoin_part_2_n7 Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -163,13 +163,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n7 numFiles 2 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n7 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 3062 @@ -187,16 +187,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n7 partition_columns ds partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n7 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n7 + name: default.srcbucket_mapjoin_part_2_n7 Alias -> Map Local Operator Tree: b TableScan @@ -264,17 +264,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n3 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n3 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n3 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -301,7 +301,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: srcbucket_mapjoin + base file name: srcbucket_mapjoin_n5 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -313,11 +313,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin + name default.srcbucket_mapjoin_n5 numFiles 2 numRows 0 rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_n5 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 @@ -335,20 +335,20 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin + name default.srcbucket_mapjoin_n5 numFiles 2 numRows 0 rawDataSize 0 - serialization.ddl struct srcbucket_mapjoin { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_n5 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2750 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin - name: default.srcbucket_mapjoin + name: default.srcbucket_mapjoin_n5 + name: default.srcbucket_mapjoin_n5 Truncated Path -> Alias: - /srcbucket_mapjoin [a] + /srcbucket_mapjoin_n5 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -405,17 +405,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n3 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n3 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n3 Stage: Stage-2 Stats Work @@ -424,7 +424,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value1, value2 Column Types: string, string, string - Table: default.bucketmapjoin_tmp_result + Table: default.bucketmapjoin_tmp_result_n3 Is Table Level Stats: true Stage: Stage-3 @@ -449,17 +449,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n3 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n3 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n3 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -480,11 +480,11 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n3 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n3 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -502,18 +502,18 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n3 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n3 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n3 + name: default.bucketmapjoin_tmp_result_n3 Truncated Path -> Alias: #### A masked pattern was here #### @@ -539,17 +539,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n3 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n3 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n3 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -570,11 +570,11 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n3 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n3 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -592,18 +592,18 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.bucketmapjoin_tmp_result + name default.bucketmapjoin_tmp_result_n3 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2} + serialization.ddl struct bucketmapjoin_tmp_result_n3 { string key, string value1, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucketmapjoin_tmp_result - name: default.bucketmapjoin_tmp_result + name: default.bucketmapjoin_tmp_result_n3 + name: default.bucketmapjoin_tmp_result_n3 Truncated Path -> Alias: #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out b/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out index d0ad82b601..be94a8bcb1 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out @@ -1,10 +1,10 @@ -PREHOOK: query: drop table test1 +PREHOOK: query: drop table test1_n10 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table test1 +POSTHOOK: query: drop table test1_n10 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table test2 +PREHOOK: query: drop table test2_n7 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table test2 +POSTHOOK: query: drop table test2_n7 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table test3 PREHOOK: type: DROPTABLE @@ -14,22 +14,22 @@ PREHOOK: query: drop table test4 PREHOOK: type: DROPTABLE POSTHOOK: query: drop table test4 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table test1 (key string, value string) clustered by (key) sorted by (key) into 3 buckets +PREHOOK: query: create table test1_n10 (key string, value string) clustered by (key) sorted by (key) into 3 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test1 -POSTHOOK: query: create table test1 (key string, value string) clustered by (key) sorted by (key) into 3 buckets +PREHOOK: Output: default@test1_n10 +POSTHOOK: query: create table test1_n10 (key string, value string) clustered by (key) sorted by (key) into 3 buckets POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test1 -PREHOOK: query: create table test2 (key string, value string) clustered by (value) sorted by (value) into 3 buckets +POSTHOOK: Output: default@test1_n10 +PREHOOK: query: create table test2_n7 (key string, value string) clustered by (value) sorted by (value) into 3 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test2 -POSTHOOK: query: create table test2 (key string, value string) clustered by (value) sorted by (value) into 3 buckets +PREHOOK: Output: default@test2_n7 +POSTHOOK: query: create table test2_n7 (key string, value string) clustered by (value) sorted by (value) into 3 buckets POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test2 +POSTHOOK: Output: default@test2_n7 PREHOOK: query: create table test3 (key string, value string) clustered by (key, value) sorted by (key, value) into 3 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -46,54 +46,54 @@ POSTHOOK: query: create table test4 (key string, value string) clustered by (val POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test4 -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE test1 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE test1_n10 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@test1 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE test1 +PREHOOK: Output: default@test1_n10 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE test1_n10 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@test1 -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE test1 +POSTHOOK: Output: default@test1_n10 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE test1_n10 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@test1 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE test1 +PREHOOK: Output: default@test1_n10 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE test1_n10 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@test1 -PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE test1 +POSTHOOK: Output: default@test1_n10 +PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE test1_n10 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@test1 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE test1 +PREHOOK: Output: default@test1_n10 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE test1_n10 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@test1 -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE test2 +POSTHOOK: Output: default@test1_n10 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE test2_n7 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@test2 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE test2 +PREHOOK: Output: default@test2_n7 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE test2_n7 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@test2 -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE test2 +POSTHOOK: Output: default@test2_n7 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE test2_n7 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@test2 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE test2 +PREHOOK: Output: default@test2_n7 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE test2_n7 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@test2 -PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE test2 +POSTHOOK: Output: default@test2_n7 +PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE test2_n7 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@test2 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE test2 +PREHOOK: Output: default@test2_n7 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE test2_n7 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@test2 +POSTHOOK: Output: default@test2_n7 PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE test3 PREHOOK: type: LOAD #### A masked pattern was here #### @@ -142,9 +142,9 @@ POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TAB POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@test4 -PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value +PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1_n10 L join test1_n10 R on L.key=R.key AND L.value=R.value PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value +POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1_n10 L join test1_n10 R on L.key=R.key AND L.value=R.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -236,7 +236,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: test1 + base file name: test1_n10 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -249,11 +249,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test1 + name default.test1_n10 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test1 { string key, string value} + serialization.ddl struct test1_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 @@ -272,20 +272,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test1 + name default.test1_n10 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test1 { string key, string value} + serialization.ddl struct test1_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 - name: default.test1 + name: default.test1_n10 + name: default.test1_n10 Truncated Path -> Alias: - /test1 [l] + /test1_n10 [l] Stage: Stage-0 Fetch Operator @@ -293,9 +293,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test2 L join test2 R on L.key=R.key AND L.value=R.value +PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test2_n7 L join test2_n7 R on L.key=R.key AND L.value=R.value PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test2 L join test2 R on L.key=R.key AND L.value=R.value +POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test2_n7 L join test2_n7 R on L.key=R.key AND L.value=R.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -387,7 +387,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: test2 + base file name: test2_n7 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -400,11 +400,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test2 + name default.test2_n7 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test2 { string key, string value} + serialization.ddl struct test2_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 @@ -423,20 +423,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test2 + name default.test2_n7 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test2 { string key, string value} + serialization.ddl struct test2_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 + name: default.test2_n7 + name: default.test2_n7 Truncated Path -> Alias: - /test2 [l] + /test2_n7 [l] Stage: Stage-0 Fetch Operator @@ -444,9 +444,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key +PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1_n10 L join test1_n10 R on L.key+L.key=R.key PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key +POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1_n10 L join test1_n10 R on L.key+L.key=R.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -530,7 +530,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: test1 + base file name: test1_n10 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -543,11 +543,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test1 + name default.test1_n10 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test1 { string key, string value} + serialization.ddl struct test1_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 @@ -566,20 +566,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test1 + name default.test1_n10 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test1 { string key, string value} + serialization.ddl struct test1_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 - name: default.test1 + name: default.test1_n10 + name: default.test1_n10 Truncated Path -> Alias: - /test1 [l] + /test1_n10 [l] Stage: Stage-0 Fetch Operator @@ -587,9 +587,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1 L join test2 R on L.key=R.key AND L.value=R.value +PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1_n10 L join test2_n7 R on L.key=R.key AND L.value=R.value PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1 L join test2 R on L.key=R.key AND L.value=R.value +POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1_n10 L join test2_n7 R on L.key=R.key AND L.value=R.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -673,7 +673,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: test1 + base file name: test1_n10 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -686,11 +686,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test1 + name default.test1_n10 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test1 { string key, string value} + serialization.ddl struct test1_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 @@ -709,20 +709,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test1 + name default.test1_n10 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test1 { string key, string value} + serialization.ddl struct test1_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 - name: default.test1 + name: default.test1_n10 + name: default.test1_n10 Truncated Path -> Alias: - /test1 [l] + /test1_n10 [l] Stage: Stage-0 Fetch Operator @@ -730,9 +730,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1 L join test3 R on L.key=R.key AND L.value=R.value +PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1_n10 L join test3 R on L.key=R.key AND L.value=R.value PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1 L join test3 R on L.key=R.key AND L.value=R.value +POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1_n10 L join test3 R on L.key=R.key AND L.value=R.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -816,7 +816,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: test1 + base file name: test1_n10 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -829,11 +829,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test1 + name default.test1_n10 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test1 { string key, string value} + serialization.ddl struct test1_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 @@ -852,20 +852,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test1 + name default.test1_n10 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test1 { string key, string value} + serialization.ddl struct test1_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 - name: default.test1 + name: default.test1_n10 + name: default.test1_n10 Truncated Path -> Alias: - /test1 [l] + /test1_n10 [l] Stage: Stage-0 Fetch Operator @@ -873,9 +873,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1 L join test4 R on L.key=R.key AND L.value=R.value +PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1_n10 L join test4 R on L.key=R.key AND L.value=R.value PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1 L join test4 R on L.key=R.key AND L.value=R.value +POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test1_n10 L join test4 R on L.key=R.key AND L.value=R.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -959,7 +959,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: test1 + base file name: test1_n10 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -972,11 +972,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test1 + name default.test1_n10 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test1 { string key, string value} + serialization.ddl struct test1_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 @@ -995,20 +995,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test1 + name default.test1_n10 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test1 { string key, string value} + serialization.ddl struct test1_n10 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 - name: default.test1 + name: default.test1_n10 + name: default.test1_n10 Truncated Path -> Alias: - /test1 [l] + /test1_n10 [l] Stage: Stage-0 Fetch Operator @@ -1016,9 +1016,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test2 L join test3 R on L.key=R.key AND L.value=R.value +PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test2_n7 L join test3 R on L.key=R.key AND L.value=R.value PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test2 L join test3 R on L.key=R.key AND L.value=R.value +POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test2_n7 L join test3 R on L.key=R.key AND L.value=R.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -1102,7 +1102,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: test2 + base file name: test2_n7 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -1115,11 +1115,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test2 + name default.test2_n7 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test2 { string key, string value} + serialization.ddl struct test2_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 @@ -1138,20 +1138,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test2 + name default.test2_n7 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test2 { string key, string value} + serialization.ddl struct test2_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 + name: default.test2_n7 + name: default.test2_n7 Truncated Path -> Alias: - /test2 [l] + /test2_n7 [l] Stage: Stage-0 Fetch Operator @@ -1159,9 +1159,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test2 L join test4 R on L.key=R.key AND L.value=R.value +PREHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test2_n7 L join test4 R on L.key=R.key AND L.value=R.value PREHOOK: type: QUERY -POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test2 L join test4 R on L.key=R.key AND L.value=R.value +POSTHOOK: query: explain extended select /*+ MAPJOIN(R) */ * from test2_n7 L join test4 R on L.key=R.key AND L.value=R.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -1245,7 +1245,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: test2 + base file name: test2_n7 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -1258,11 +1258,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test2 + name default.test2_n7 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test2 { string key, string value} + serialization.ddl struct test2_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 @@ -1281,20 +1281,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test2 + name default.test2_n7 numFiles 3 numRows 0 rawDataSize 0 - serialization.ddl struct test2 { string key, string value} + serialization.ddl struct test2_n7 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 4200 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 + name: default.test2_n7 + name: default.test2_n7 Truncated Path -> Alias: - /test2 [l] + /test2_n7 [l] Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out index 49598d1d2a..fe2c7c27cf 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out @@ -1,47 +1,47 @@ -PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1_n5 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table1 -POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table1_n5 +POSTHOOK: query: CREATE TABLE test_table1_n5 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table1 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: Output: default@test_table1_n5 +PREHOOK: query: CREATE TABLE test_table2_n5 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table2_n5 +POSTHOOK: query: CREATE TABLE test_table2_n5 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 +POSTHOOK: Output: default@test_table2_n5 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table1_n5 PARTITION (ds = '1') SELECT * PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table1@ds=1 +PREHOOK: Output: default@test_table1_n5@ds=1 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table1_n5 PARTITION (ds = '1') SELECT * POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table1@ds=1 -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@test_table1_n5@ds=1 +POSTHOOK: Lineage: test_table1_n5 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1_n5 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.key, x.value from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.key, x.value from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -67,7 +67,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n5 Stage: Stage-0 Move Operator @@ -79,7 +79,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n5 Stage: Stage-2 Stats Work @@ -87,71 +87,71 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table2 + Table: default.test_table2_n5 -PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.key, x.value from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Output: default@test_table2@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +PREHOOK: Input: default@test_table1_n5 +PREHOOK: Input: default@test_table1_n5@ds=1 +PREHOOK: Output: default@test_table2_n5@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.key, x.value from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Output: default@test_table2@ds=1 -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(test_table1)a.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select count(*) from test_table2 where ds = '1' +POSTHOOK: Input: default@test_table1_n5 +POSTHOOK: Input: default@test_table1_n5@ds=1 +POSTHOOK: Output: default@test_table2_n5@ds=1 +POSTHOOK: Lineage: test_table2_n5 PARTITION(ds=1).key SIMPLE [(test_table1_n5)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table2_n5 PARTITION(ds=1).value SIMPLE [(test_table1_n5)a.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select count(*) from test_table2_n5 where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2_n5 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 where ds = '1' +POSTHOOK: query: select count(*) from test_table2_n5 where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2_n5 #### A masked pattern was here #### 500 -PREHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: query: select count(*) from test_table2_n5 tablesample (bucket 1 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Input: default@test_table2_n5 +PREHOOK: Input: default@test_table2_n5@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: query: select count(*) from test_table2_n5 tablesample (bucket 1 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Input: default@test_table2_n5 +POSTHOOK: Input: default@test_table2_n5@ds=1 #### A masked pattern was here #### 243 -PREHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: query: select count(*) from test_table2_n5 tablesample (bucket 2 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Input: default@test_table2_n5 +PREHOOK: Input: default@test_table2_n5@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: query: select count(*) from test_table2_n5 tablesample (bucket 2 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Input: default@test_table2_n5 +POSTHOOK: Input: default@test_table2_n5@ds=1 #### A masked pattern was here #### 257 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT * from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT * from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -177,7 +177,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n5 Stage: Stage-0 Move Operator @@ -189,7 +189,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n5 Stage: Stage-2 Stats Work @@ -197,71 +197,71 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table2 + Table: default.test_table2_n5 -PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT * from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Output: default@test_table2@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +PREHOOK: Input: default@test_table1_n5 +PREHOOK: Input: default@test_table1_n5@ds=1 +PREHOOK: Output: default@test_table2_n5@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT * from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Output: default@test_table2@ds=1 -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(test_table1)a.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select count(*) from test_table2 where ds = '1' +POSTHOOK: Input: default@test_table1_n5 +POSTHOOK: Input: default@test_table1_n5@ds=1 +POSTHOOK: Output: default@test_table2_n5@ds=1 +POSTHOOK: Lineage: test_table2_n5 PARTITION(ds=1).key SIMPLE [(test_table1_n5)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table2_n5 PARTITION(ds=1).value SIMPLE [(test_table1_n5)a.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select count(*) from test_table2_n5 where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2_n5 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 where ds = '1' +POSTHOOK: query: select count(*) from test_table2_n5 where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2_n5 #### A masked pattern was here #### 500 -PREHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: query: select count(*) from test_table2_n5 tablesample (bucket 1 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Input: default@test_table2_n5 +PREHOOK: Input: default@test_table2_n5@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: query: select count(*) from test_table2_n5 tablesample (bucket 1 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Input: default@test_table2_n5 +POSTHOOK: Input: default@test_table2_n5@ds=1 #### A masked pattern was here #### 243 -PREHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: query: select count(*) from test_table2_n5 tablesample (bucket 2 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Input: default@test_table2_n5 +PREHOOK: Input: default@test_table2_n5@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: query: select count(*) from test_table2_n5 tablesample (bucket 2 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Input: default@test_table2_n5 +POSTHOOK: Input: default@test_table2_n5@ds=1 #### A masked pattern was here #### 257 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.key, concat(x.value, x.value) from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.key, concat(x.value, x.value) from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -287,7 +287,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n5 Stage: Stage-0 Move Operator @@ -299,7 +299,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n5 Stage: Stage-2 Stats Work @@ -307,20 +307,20 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table2 + Table: default.test_table2_n5 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.key+x.key, x.value from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.key+x.key, x.value from ( -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +SELECT a.key, a.value FROM test_table1_n5 a WHERE a.ds = '1' )x POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -358,7 +358,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n5 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string) outputColumnNames: key, value, ds @@ -386,7 +386,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n5 Stage: Stage-2 Stats Work @@ -394,7 +394,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table2 + Table: default.test_table2_n5 Stage: Stage-3 Map Reduce @@ -426,17 +426,17 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.k1, concat(x.v1, x.v1) from ( -SELECT a.key as k1, a.value as v1 FROM test_table1 a WHERE a.ds = '1' +SELECT a.key as k1, a.value as v1 FROM test_table1_n5 a WHERE a.ds = '1' )x PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n5 PARTITION (ds = '1') SELECT x.k1, concat(x.v1, x.v1) from ( -SELECT a.key as k1, a.value as v1 FROM test_table1 a WHERE a.ds = '1' +SELECT a.key as k1, a.value as v1 FROM test_table1_n5 a WHERE a.ds = '1' )x POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -462,7 +462,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n5 Stage: Stage-0 Move Operator @@ -474,7 +474,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n5 Stage: Stage-2 Stats Work @@ -482,5 +482,5 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table2 + Table: default.test_table2_n5 diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out index d1c11f4b58..085f8f4e14 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1_n19 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table1 -POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table1_n19 +POSTHOOK: query: CREATE TABLE test_table1_n19 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table1 -PREHOOK: query: CREATE TABLE test_table2 (value STRING, key INT) PARTITIONED BY (ds STRING) +POSTHOOK: Output: default@test_table1_n19 +PREHOOK: query: CREATE TABLE test_table2_n18 (value STRING, key INT) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (value STRING, key INT) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table2_n18 +POSTHOOK: query: CREATE TABLE test_table2_n18 (value STRING, key INT) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 +POSTHOOK: Output: default@test_table2_n18 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table1_n19 PARTITION (ds = '1') SELECT * PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table1@ds=1 +PREHOOK: Output: default@test_table1_n19@ds=1 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table1_n19 PARTITION (ds = '1') SELECT * POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table1@ds=1 -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@test_table1_n19@ds=1 +POSTHOOK: Lineage: test_table1_n19 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1_n19 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n18 PARTITION (ds = '1') SELECT x.value, x.key from -(SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x +(SELECT a.key, a.value FROM test_table1_n19 a WHERE a.ds = '1')x PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n18 PARTITION (ds = '1') SELECT x.value, x.key from -(SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x +(SELECT a.key, a.value FROM test_table1_n19 a WHERE a.ds = '1')x POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -63,7 +63,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n18 Stage: Stage-0 Move Operator @@ -75,7 +75,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n18 Stage: Stage-2 Stats Work @@ -83,74 +83,74 @@ STAGE PLANS: Column Stats Desc: Columns: value, key Column Types: string, int - Table: default.test_table2 + Table: default.test_table2_n18 -PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table2_n18 PARTITION (ds = '1') SELECT x.value, x.key from -(SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x +(SELECT a.key, a.value FROM test_table1_n19 a WHERE a.ds = '1')x PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Output: default@test_table2@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +PREHOOK: Input: default@test_table1_n19 +PREHOOK: Input: default@test_table1_n19@ds=1 +PREHOOK: Output: default@test_table2_n18@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table2_n18 PARTITION (ds = '1') SELECT x.value, x.key from -(SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x +(SELECT a.key, a.value FROM test_table1_n19 a WHERE a.ds = '1')x POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Output: default@test_table2@ds=1 -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(test_table1)a.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select count(*) from test_table2 where ds = '1' +POSTHOOK: Input: default@test_table1_n19 +POSTHOOK: Input: default@test_table1_n19@ds=1 +POSTHOOK: Output: default@test_table2_n18@ds=1 +POSTHOOK: Lineage: test_table2_n18 PARTITION(ds=1).key SIMPLE [(test_table1_n19)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table2_n18 PARTITION(ds=1).value SIMPLE [(test_table1_n19)a.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select count(*) from test_table2_n18 where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2_n18 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 where ds = '1' +POSTHOOK: query: select count(*) from test_table2_n18 where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2_n18 #### A masked pattern was here #### 500 -PREHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: query: select count(*) from test_table2_n18 tablesample (bucket 1 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Input: default@test_table2_n18 +PREHOOK: Input: default@test_table2_n18@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: query: select count(*) from test_table2_n18 tablesample (bucket 1 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Input: default@test_table2_n18 +POSTHOOK: Input: default@test_table2_n18@ds=1 #### A masked pattern was here #### 243 -PREHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: query: select count(*) from test_table2_n18 tablesample (bucket 2 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Input: default@test_table2_n18 +PREHOOK: Input: default@test_table2_n18@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: query: select count(*) from test_table2_n18 tablesample (bucket 2 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Input: default@test_table2_n18 +POSTHOOK: Input: default@test_table2_n18@ds=1 #### A masked pattern was here #### 257 -PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table3_n10 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table3 -POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table3_n10 +POSTHOOK: query: CREATE TABLE test_table3_n10 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table3 +POSTHOOK: Output: default@test_table3_n10 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n18 PARTITION (ds = '1') SELECT x.key, x.value from -(SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x +(SELECT a.key, a.value FROM test_table1_n19 a WHERE a.ds = '1')x PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table2_n18 PARTITION (ds = '1') SELECT x.key, x.value from -(SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x +(SELECT a.key, a.value FROM test_table1_n19 a WHERE a.ds = '1')x POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -187,7 +187,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n18 Select Operator expressions: _col0 (type: string), _col1 (type: int), '1' (type: string) outputColumnNames: value, key, ds @@ -215,7 +215,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n18 Stage: Stage-2 Stats Work @@ -223,7 +223,7 @@ STAGE PLANS: Column Stats Desc: Columns: value, key Column Types: string, int - Table: default.test_table2 + Table: default.test_table2_n18 Stage: Stage-3 Map Reduce @@ -254,50 +254,50 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table2_n18 PARTITION (ds = '1') SELECT x.key, x.value from -(SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x +(SELECT a.key, a.value FROM test_table1_n19 a WHERE a.ds = '1')x PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Output: default@test_table2@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') +PREHOOK: Input: default@test_table1_n19 +PREHOOK: Input: default@test_table1_n19@ds=1 +PREHOOK: Output: default@test_table2_n18@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table2_n18 PARTITION (ds = '1') SELECT x.key, x.value from -(SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x +(SELECT a.key, a.value FROM test_table1_n19 a WHERE a.ds = '1')x POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Output: default@test_table2@ds=1 -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select count(*) from test_table2 where ds = '1' +POSTHOOK: Input: default@test_table1_n19 +POSTHOOK: Input: default@test_table1_n19@ds=1 +POSTHOOK: Output: default@test_table2_n18@ds=1 +POSTHOOK: Lineage: test_table2_n18 PARTITION(ds=1).key EXPRESSION [(test_table1_n19)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: test_table2_n18 PARTITION(ds=1).value EXPRESSION [(test_table1_n19)a.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select count(*) from test_table2_n18 where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2_n18 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 where ds = '1' +POSTHOOK: query: select count(*) from test_table2_n18 where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2_n18 #### A masked pattern was here #### 500 -PREHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1' +PREHOOK: query: select count(*) from test_table2_n18 tablesample (bucket 1 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Input: default@test_table2_n18 +PREHOOK: Input: default@test_table2_n18@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: query: select count(*) from test_table2_n18 tablesample (bucket 1 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Input: default@test_table2_n18 +POSTHOOK: Input: default@test_table2_n18@ds=1 #### A masked pattern was here #### 500 -PREHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: query: select count(*) from test_table2_n18 tablesample (bucket 2 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 +PREHOOK: Input: default@test_table2_n18 +PREHOOK: Input: default@test_table2_n18@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: query: select count(*) from test_table2_n18 tablesample (bucket 2 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 +POSTHOOK: Input: default@test_table2_n18 +POSTHOOK: Input: default@test_table2_n18@ds=1 #### A masked pattern was here #### 0 diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out index 72e918fa0e..cbcac68cd1 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out @@ -1,67 +1,67 @@ -PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table1 -POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table1_n16 +POSTHOOK: query: CREATE TABLE test_table1_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table1 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: Output: default@test_table1_n16 +PREHOOK: query: CREATE TABLE test_table2_n15 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table2_n15 +POSTHOOK: query: CREATE TABLE test_table2_n15 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 -PREHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: Output: default@test_table2_n15 +PREHOOK: query: CREATE TABLE test_table3_n8 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key2) SORTED BY (key2) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table3 -POSTHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table3_n8 +POSTHOOK: query: CREATE TABLE test_table3_n8 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key2) SORTED BY (key2) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table3 +POSTHOOK: Output: default@test_table3_n8 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10 +INSERT OVERWRITE TABLE test_table1_n16 PARTITION (ds = '1') SELECT * where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table1@ds=1 +PREHOOK: Output: default@test_table1_n16@ds=1 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10 +INSERT OVERWRITE TABLE test_table1_n16 PARTITION (ds = '1') SELECT * where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table1@ds=1 -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@test_table1_n16@ds=1 +POSTHOOK: Lineage: test_table1_n16 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1_n16 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100 +INSERT OVERWRITE TABLE test_table2_n15 PARTITION (ds = '1') SELECT * where key < 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table2@ds=1 +PREHOOK: Output: default@test_table2_n15@ds=1 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100 +INSERT OVERWRITE TABLE test_table2_n15 PARTITION (ds = '1') SELECT * where key < 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table2@ds=1 -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@test_table2_n15@ds=1 +POSTHOOK: Lineage: test_table2_n15 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n15 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n8 PARTITION (ds = '1') SELECT a.key, a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n16 a JOIN test_table2_n15 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n8 PARTITION (ds = '1') SELECT a.key, a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n16 a JOIN test_table2_n15 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -105,7 +105,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n8 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) outputColumnNames: key, key2, value @@ -131,7 +131,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n8 Stage: Stage-2 Stats Work @@ -139,7 +139,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, key2, value Column Types: int, int, string - Table: default.test_table3 + Table: default.test_table3_n8 Stage: Stage-3 Map Reduce @@ -166,49 +166,49 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table3_n8 PARTITION (ds = '1') SELECT a.key, a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n16 a JOIN test_table2_n15 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 -PREHOOK: Output: default@test_table3@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +PREHOOK: Input: default@test_table1_n16 +PREHOOK: Input: default@test_table1_n16@ds=1 +PREHOOK: Input: default@test_table2_n15 +PREHOOK: Input: default@test_table2_n15@ds=1 +PREHOOK: Output: default@test_table3_n8@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3_n8 PARTITION (ds = '1') SELECT a.key, a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n16 a JOIN test_table2_n15 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 -POSTHOOK: Output: default@test_table3@ds=1 -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: Input: default@test_table1_n16 +POSTHOOK: Input: default@test_table1_n16@ds=1 +POSTHOOK: Input: default@test_table2_n15 +POSTHOOK: Input: default@test_table2_n15@ds=1 +POSTHOOK: Output: default@test_table3_n8@ds=1 +POSTHOOK: Lineage: test_table3_n8 PARTITION(ds=1).key SIMPLE [(test_table1_n16)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3_n8 PARTITION(ds=1).key2 SIMPLE [(test_table1_n16)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3_n8 PARTITION(ds=1).value EXPRESSION [(test_table1_n16)a.FieldSchema(name:value, type:string, comment:null), (test_table2_n15)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3_n8 tablesample (bucket 1 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table3_n8 +PREHOOK: Input: default@test_table3_n8@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: query: select * from test_table3_n8 tablesample (bucket 1 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table3_n8 +POSTHOOK: Input: default@test_table3_n8@ds=1 #### A masked pattern was here #### 2 2 val_2val_2 1 -PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: query: select * from test_table3_n8 tablesample (bucket 2 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table3_n8 +PREHOOK: Input: default@test_table3_n8@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: query: select * from test_table3_n8 tablesample (bucket 2 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table3_n8 +POSTHOOK: Input: default@test_table3_n8@ds=1 #### A masked pattern was here #### 0 0 val_0val_0 1 0 0 val_0val_0 1 @@ -231,34 +231,34 @@ POSTHOOK: Input: default@test_table3@ds=1 5 5 val_5val_5 1 8 8 val_8val_8 1 9 9 val_9val_9 1 -PREHOOK: query: DROP TABLE test_table3 +PREHOOK: query: DROP TABLE test_table3_n8 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_table3 -PREHOOK: Output: default@test_table3 -POSTHOOK: query: DROP TABLE test_table3 +PREHOOK: Input: default@test_table3_n8 +PREHOOK: Output: default@test_table3_n8 +POSTHOOK: query: DROP TABLE test_table3_n8 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_table3 -POSTHOOK: Output: default@test_table3 -PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: Input: default@test_table3_n8 +POSTHOOK: Output: default@test_table3_n8 +PREHOOK: query: CREATE TABLE test_table3_n8 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table3 -POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table3_n8 +POSTHOOK: query: CREATE TABLE test_table3_n8 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table3 +POSTHOOK: Output: default@test_table3_n8 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n8 PARTITION (ds = '1') SELECT a.key, a.value -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n16 a JOIN test_table2_n15 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n8 PARTITION (ds = '1') SELECT a.key, a.value -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n16 a JOIN test_table2_n15 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -303,7 +303,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n8 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string) outputColumnNames: key, value, ds @@ -329,7 +329,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n8 Stage: Stage-2 Stats Work @@ -337,7 +337,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table3 + Table: default.test_table3_n8 Stage: Stage-3 Map Reduce @@ -364,37 +364,37 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table3_n8 PARTITION (ds = '1') SELECT a.key, a.value -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n16 a JOIN test_table2_n15 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 -PREHOOK: Output: default@test_table3@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +PREHOOK: Input: default@test_table1_n16 +PREHOOK: Input: default@test_table1_n16@ds=1 +PREHOOK: Input: default@test_table2_n15 +PREHOOK: Input: default@test_table2_n15@ds=1 +PREHOOK: Output: default@test_table3_n8@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3_n8 PARTITION (ds = '1') SELECT a.key, a.value -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n16 a JOIN test_table2_n15 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 -POSTHOOK: Output: default@test_table3@ds=1 -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value SIMPLE [(test_table1)a.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: Input: default@test_table1_n16 +POSTHOOK: Input: default@test_table1_n16@ds=1 +POSTHOOK: Input: default@test_table2_n15 +POSTHOOK: Input: default@test_table2_n15@ds=1 +POSTHOOK: Output: default@test_table3_n8@ds=1 +POSTHOOK: Lineage: test_table3_n8 PARTITION(ds=1).key SIMPLE [(test_table1_n16)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3_n8 PARTITION(ds=1).value SIMPLE [(test_table1_n16)a.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3_n8 tablesample (bucket 1 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table3_n8 +PREHOOK: Input: default@test_table3_n8@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: query: select * from test_table3_n8 tablesample (bucket 1 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table3_n8 +POSTHOOK: Input: default@test_table3_n8@ds=1 #### A masked pattern was here #### 0 val_0 1 0 val_0 1 @@ -409,15 +409,15 @@ POSTHOOK: Input: default@test_table3@ds=1 4 val_4 1 8 val_8 1 9 val_9 1 -PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: query: select * from test_table3_n8 tablesample (bucket 2 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table3_n8 +PREHOOK: Input: default@test_table3_n8@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: query: select * from test_table3_n8 tablesample (bucket 2 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table3_n8 +POSTHOOK: Input: default@test_table3_n8@ds=1 #### A masked pattern was here #### 5 val_5 1 5 val_5 1 @@ -428,11 +428,11 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5 1 5 val_5 1 5 val_5 1 -PREHOOK: query: DROP TABLE test_table3 +PREHOOK: query: DROP TABLE test_table3_n8 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_table3 -PREHOOK: Output: default@test_table3 -POSTHOOK: query: DROP TABLE test_table3 +PREHOOK: Input: default@test_table3_n8 +PREHOOK: Output: default@test_table3_n8 +POSTHOOK: query: DROP TABLE test_table3_n8 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_table3 -POSTHOOK: Output: default@test_table3 +POSTHOOK: Input: default@test_table3_n8 +POSTHOOK: Output: default@test_table3_n8 diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out index 775cb91a7b..97c4d96bf1 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out @@ -1,67 +1,67 @@ -PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1_n8 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table1 -POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table1_n8 +POSTHOOK: query: CREATE TABLE test_table1_n8 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table1 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: Output: default@test_table1_n8 +PREHOOK: query: CREATE TABLE test_table2_n8 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table2_n8 +POSTHOOK: query: CREATE TABLE test_table2_n8 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 -PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: Output: default@test_table2_n8 +PREHOOK: query: CREATE TABLE test_table3_n5 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key desc) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table3 -POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table3_n5 +POSTHOOK: query: CREATE TABLE test_table3_n5 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key desc) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table3 +POSTHOOK: Output: default@test_table3_n5 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10 +INSERT OVERWRITE TABLE test_table1_n8 PARTITION (ds = '1') SELECT * where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table1@ds=1 +PREHOOK: Output: default@test_table1_n8@ds=1 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10 +INSERT OVERWRITE TABLE test_table1_n8 PARTITION (ds = '1') SELECT * where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table1@ds=1 -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@test_table1_n8@ds=1 +POSTHOOK: Lineage: test_table1_n8 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1_n8 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100 +INSERT OVERWRITE TABLE test_table2_n8 PARTITION (ds = '1') SELECT * where key < 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table2@ds=1 +PREHOOK: Output: default@test_table2_n8@ds=1 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100 +INSERT OVERWRITE TABLE test_table2_n8 PARTITION (ds = '1') SELECT * where key < 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table2@ds=1 -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@test_table2_n8@ds=1 +POSTHOOK: Lineage: test_table2_n8 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n8 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n5 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n8 a JOIN test_table2_n8 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n5 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n8 a JOIN test_table2_n8 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -109,7 +109,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n5 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string) outputColumnNames: key, value, ds @@ -135,7 +135,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n5 Stage: Stage-2 Stats Work @@ -143,7 +143,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table3 + Table: default.test_table3_n5 Stage: Stage-3 Map Reduce @@ -170,48 +170,48 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table3_n5 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n8 a JOIN test_table2_n8 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 -PREHOOK: Output: default@test_table3@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +PREHOOK: Input: default@test_table1_n8 +PREHOOK: Input: default@test_table1_n8@ds=1 +PREHOOK: Input: default@test_table2_n8 +PREHOOK: Input: default@test_table2_n8@ds=1 +PREHOOK: Output: default@test_table3_n5@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3_n5 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n8 a JOIN test_table2_n8 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 -POSTHOOK: Output: default@test_table3@ds=1 -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: Input: default@test_table1_n8 +POSTHOOK: Input: default@test_table1_n8@ds=1 +POSTHOOK: Input: default@test_table2_n8 +POSTHOOK: Input: default@test_table2_n8@ds=1 +POSTHOOK: Output: default@test_table3_n5@ds=1 +POSTHOOK: Lineage: test_table3_n5 PARTITION(ds=1).key SIMPLE [(test_table1_n8)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3_n5 PARTITION(ds=1).value EXPRESSION [(test_table1_n8)a.FieldSchema(name:value, type:string, comment:null), (test_table2_n8)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3_n5 tablesample (bucket 1 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table3_n5 +PREHOOK: Input: default@test_table3_n5@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: query: select * from test_table3_n5 tablesample (bucket 1 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table3_n5 +POSTHOOK: Input: default@test_table3_n5@ds=1 #### A masked pattern was here #### 2 val_2val_2 1 -PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: query: select * from test_table3_n5 tablesample (bucket 2 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table3_n5 +PREHOOK: Input: default@test_table3_n5@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: query: select * from test_table3_n5 tablesample (bucket 2 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table3_n5 +POSTHOOK: Input: default@test_table3_n5@ds=1 #### A masked pattern was here #### 9 val_9val_9 1 8 val_8val_8 1 @@ -235,21 +235,21 @@ POSTHOOK: Input: default@test_table3@ds=1 0 val_0val_0 1 0 val_0val_0 1 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n5 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM -(select key, value from test_table1 where ds = '1') a +(select key, value from test_table1_n8 where ds = '1') a JOIN -(select key, value from test_table2 where ds = '1') b +(select key, value from test_table2_n8 where ds = '1') b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n5 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM -(select key, value from test_table1 where ds = '1') a +(select key, value from test_table1_n8 where ds = '1') a JOIN -(select key, value from test_table2 where ds = '1') b +(select key, value from test_table2_n8 where ds = '1') b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -263,7 +263,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_table1 + alias: test_table1_n8 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -297,7 +297,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n5 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string) outputColumnNames: key, value, ds @@ -323,7 +323,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n5 Stage: Stage-2 Stats Work @@ -331,7 +331,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table3 + Table: default.test_table3_n5 Stage: Stage-3 Map Reduce @@ -358,54 +358,54 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table3_n5 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM -(select key, value from test_table1 where ds = '1') a +(select key, value from test_table1_n8 where ds = '1') a JOIN -(select key, value from test_table2 where ds = '1') b +(select key, value from test_table2_n8 where ds = '1') b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 -PREHOOK: Output: default@test_table3@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +PREHOOK: Input: default@test_table1_n8 +PREHOOK: Input: default@test_table1_n8@ds=1 +PREHOOK: Input: default@test_table2_n8 +PREHOOK: Input: default@test_table2_n8@ds=1 +PREHOOK: Output: default@test_table3_n5@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3_n5 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM -(select key, value from test_table1 where ds = '1') a +(select key, value from test_table1_n8 where ds = '1') a JOIN -(select key, value from test_table2 where ds = '1') b +(select key, value from test_table2_n8 where ds = '1') b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 -POSTHOOK: Output: default@test_table3@ds=1 -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), (test_table2)test_table2.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: Input: default@test_table1_n8 +POSTHOOK: Input: default@test_table1_n8@ds=1 +POSTHOOK: Input: default@test_table2_n8 +POSTHOOK: Input: default@test_table2_n8@ds=1 +POSTHOOK: Output: default@test_table3_n5@ds=1 +POSTHOOK: Lineage: test_table3_n5 PARTITION(ds=1).key SIMPLE [(test_table1_n8)test_table1_n8.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3_n5 PARTITION(ds=1).value EXPRESSION [(test_table1_n8)test_table1_n8.FieldSchema(name:value, type:string, comment:null), (test_table2_n8)test_table2_n8.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3_n5 tablesample (bucket 1 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table3_n5 +PREHOOK: Input: default@test_table3_n5@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: query: select * from test_table3_n5 tablesample (bucket 1 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table3_n5 +POSTHOOK: Input: default@test_table3_n5@ds=1 #### A masked pattern was here #### 2 val_2val_2 1 -PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: query: select * from test_table3_n5 tablesample (bucket 2 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table3_n5 +PREHOOK: Input: default@test_table3_n5@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: query: select * from test_table3_n5 tablesample (bucket 2 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table3_n5 +POSTHOOK: Input: default@test_table3_n5@ds=1 #### A masked pattern was here #### 9 val_9val_9 1 8 val_8val_8 1 diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out index 639af48f31..0b9485a7fa 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out @@ -1,67 +1,67 @@ -PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1_n2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table1 -POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table1_n2 +POSTHOOK: query: CREATE TABLE test_table1_n2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table1 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: Output: default@test_table1_n2 +PREHOOK: query: CREATE TABLE test_table2_n2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table2_n2 +POSTHOOK: query: CREATE TABLE test_table2_n2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 -PREHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: Output: default@test_table2_n2 +PREHOOK: query: CREATE TABLE test_table3_n2 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table3 -POSTHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table3_n2 +POSTHOOK: query: CREATE TABLE test_table3_n2 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table3 +POSTHOOK: Output: default@test_table3_n2 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10 +INSERT OVERWRITE TABLE test_table1_n2 PARTITION (ds = '1') SELECT * where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table1@ds=1 +PREHOOK: Output: default@test_table1_n2@ds=1 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10 +INSERT OVERWRITE TABLE test_table1_n2 PARTITION (ds = '1') SELECT * where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table1@ds=1 -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@test_table1_n2@ds=1 +POSTHOOK: Lineage: test_table1_n2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1_n2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100 +INSERT OVERWRITE TABLE test_table2_n2 PARTITION (ds = '1') SELECT * where key < 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table2@ds=1 +PREHOOK: Output: default@test_table2_n2@ds=1 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100 +INSERT OVERWRITE TABLE test_table2_n2 PARTITION (ds = '1') SELECT * where key < 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table2@ds=1 -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@test_table2_n2@ds=1 +POSTHOOK: Lineage: test_table2_n2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n2 PARTITION (ds = '1') SELECT a.key, b.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n2 a JOIN test_table2_n2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n2 PARTITION (ds = '1') SELECT a.key, b.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n2 a JOIN test_table2_n2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -109,7 +109,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n2 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string), '1' (type: string) outputColumnNames: key, key2, value, ds @@ -135,7 +135,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n2 Stage: Stage-2 Stats Work @@ -143,7 +143,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, key2, value Column Types: int, int, string - Table: default.test_table3 + Table: default.test_table3_n2 Stage: Stage-3 Map Reduce @@ -170,49 +170,49 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table3_n2 PARTITION (ds = '1') SELECT a.key, b.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n2 a JOIN test_table2_n2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 -PREHOOK: Output: default@test_table3@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +PREHOOK: Input: default@test_table1_n2 +PREHOOK: Input: default@test_table1_n2@ds=1 +PREHOOK: Input: default@test_table2_n2 +PREHOOK: Input: default@test_table2_n2@ds=1 +PREHOOK: Output: default@test_table3_n2@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3_n2 PARTITION (ds = '1') SELECT a.key, b.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n2 a JOIN test_table2_n2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 -POSTHOOK: Output: default@test_table3@ds=1 -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table2)b.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: Input: default@test_table1_n2 +POSTHOOK: Input: default@test_table1_n2@ds=1 +POSTHOOK: Input: default@test_table2_n2 +POSTHOOK: Input: default@test_table2_n2@ds=1 +POSTHOOK: Output: default@test_table3_n2@ds=1 +POSTHOOK: Lineage: test_table3_n2 PARTITION(ds=1).key SIMPLE [(test_table1_n2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3_n2 PARTITION(ds=1).key2 SIMPLE [(test_table2_n2)b.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3_n2 PARTITION(ds=1).value EXPRESSION [(test_table1_n2)a.FieldSchema(name:value, type:string, comment:null), (test_table2_n2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3_n2 tablesample (bucket 1 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table3_n2 +PREHOOK: Input: default@test_table3_n2@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: query: select * from test_table3_n2 tablesample (bucket 1 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table3_n2 +POSTHOOK: Input: default@test_table3_n2@ds=1 #### A masked pattern was here #### 2 2 val_2val_2 1 -PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: query: select * from test_table3_n2 tablesample (bucket 2 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table3_n2 +PREHOOK: Input: default@test_table3_n2@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: query: select * from test_table3_n2 tablesample (bucket 2 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table3_n2 +POSTHOOK: Input: default@test_table3_n2@ds=1 #### A masked pattern was here #### 0 0 val_0val_0 1 0 0 val_0val_0 1 @@ -236,15 +236,15 @@ POSTHOOK: Input: default@test_table3@ds=1 8 8 val_8val_8 1 9 9 val_9val_9 1 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n2 PARTITION (ds = '1') SELECT b.key, a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n2 a JOIN test_table2_n2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +INSERT OVERWRITE TABLE test_table3_n2 PARTITION (ds = '1') SELECT b.key, a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n2 a JOIN test_table2_n2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -292,7 +292,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n2 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string), '1' (type: string) outputColumnNames: key, key2, value, ds @@ -318,7 +318,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n2 Stage: Stage-2 Stats Work @@ -326,7 +326,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, key2, value Column Types: int, int, string - Table: default.test_table3 + Table: default.test_table3_n2 Stage: Stage-3 Map Reduce @@ -353,49 +353,49 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table3_n2 PARTITION (ds = '1') SELECT b.key, a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n2 a JOIN test_table2_n2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 -PREHOOK: Output: default@test_table3@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') +PREHOOK: Input: default@test_table1_n2 +PREHOOK: Input: default@test_table1_n2@ds=1 +PREHOOK: Input: default@test_table2_n2 +PREHOOK: Input: default@test_table2_n2@ds=1 +PREHOOK: Output: default@test_table3_n2@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3_n2 PARTITION (ds = '1') SELECT b.key, a.key, concat(a.value, b.value) -FROM test_table1 a JOIN test_table2 b +FROM test_table1_n2 a JOIN test_table2_n2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 -POSTHOOK: Output: default@test_table3@ds=1 -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table2)b.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: Input: default@test_table1_n2 +POSTHOOK: Input: default@test_table1_n2@ds=1 +POSTHOOK: Input: default@test_table2_n2 +POSTHOOK: Input: default@test_table2_n2@ds=1 +POSTHOOK: Output: default@test_table3_n2@ds=1 +POSTHOOK: Lineage: test_table3_n2 PARTITION(ds=1).key SIMPLE [(test_table2_n2)b.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3_n2 PARTITION(ds=1).key2 SIMPLE [(test_table1_n2)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3_n2 PARTITION(ds=1).value EXPRESSION [(test_table1_n2)a.FieldSchema(name:value, type:string, comment:null), (test_table2_n2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from test_table3_n2 tablesample (bucket 1 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table3_n2 +PREHOOK: Input: default@test_table3_n2@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1' +POSTHOOK: query: select * from test_table3_n2 tablesample (bucket 1 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table3_n2 +POSTHOOK: Input: default@test_table3_n2@ds=1 #### A masked pattern was here #### 2 2 val_2val_2 1 -PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +PREHOOK: query: select * from test_table3_n2 tablesample (bucket 2 out of 2) s where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table3_n2 +PREHOOK: Input: default@test_table3_n2@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1' +POSTHOOK: query: select * from test_table3_n2 tablesample (bucket 2 out of 2) s where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table3_n2 +POSTHOOK: Input: default@test_table3_n2@ds=1 #### A masked pattern was here #### 0 0 val_0val_0 1 0 0 val_0val_0 1 diff --git a/ql/src/test/results/clientpositive/case_sensitivity.q.out b/ql/src/test/results/clientpositive/case_sensitivity.q.out index 439d7b8d0a..a8d5d50fc0 100644 --- a/ql/src/test/results/clientpositive/case_sensitivity.q.out +++ b/ql/src/test/results/clientpositive/case_sensitivity.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE DEST1(Key INT, VALUE STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n129(Key INT, VALUE STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(Key INT, VALUE STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n129 +POSTHOOK: query: CREATE TABLE DEST1_n129(Key INT, VALUE STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 +POSTHOOK: Output: default@DEST1_n129 PREHOOK: query: EXPLAIN FROM SRC_THRIFT -INSERT OVERWRITE TABLE dest1 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0 +INSERT OVERWRITE TABLE dest1_n129 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC_THRIFT -INSERT OVERWRITE TABLE dest1 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0 +INSERT OVERWRITE TABLE dest1_n129 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -45,7 +45,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n129 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -90,7 +90,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n129 Stage: Stage-2 Stats Work @@ -98,7 +98,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n129 Stage: Stage-3 Map Reduce @@ -110,7 +110,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n129 Stage: Stage-5 Map Reduce @@ -122,7 +122,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n129 Stage: Stage-6 Move Operator @@ -131,24 +131,24 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: FROM SRC_THRIFT -INSERT OVERWRITE TABLE dest1 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0 +INSERT OVERWRITE TABLE dest1_n129 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0 PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n129 POSTHOOK: query: FROM SRC_THRIFT -INSERT OVERWRITE TABLE dest1 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0 +INSERT OVERWRITE TABLE dest1_n129 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0 POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] -PREHOOK: query: SELECT DEST1.* FROM Dest1 +POSTHOOK: Output: default@dest1_n129 +POSTHOOK: Lineage: dest1_n129.key EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n129.value EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] +PREHOOK: query: SELECT DEST1_n129.* FROM Dest1_n129 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n129 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM Dest1 +POSTHOOK: query: SELECT DEST1_n129.* FROM Dest1_n129 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n129 #### A masked pattern was here #### 2 1 4 8 diff --git a/ql/src/test/results/clientpositive/cast1.q.out b/ql/src/test/results/clientpositive/cast1.q.out index e00e9edd85..fe7d2c05b9 100644 --- a/ql/src/test/results/clientpositive/cast1.q.out +++ b/ql/src/test/results/clientpositive/cast1.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 INT, c6 STRING, c7 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n126(c1 INT, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 INT, c6 STRING, c7 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 INT, c6 STRING, c7 INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n126 +POSTHOOK: query: CREATE TABLE dest1_n126(c1 INT, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 INT, c6 STRING, c7 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n126 PREHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86 +FROM src INSERT OVERWRITE TABLE dest1_n126 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86 +FROM src INSERT OVERWRITE TABLE dest1_n126 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -43,7 +43,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n126 Select Operator expressions: _col0 (type: int), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: int), _col5 (type: string), _col6 (type: int) outputColumnNames: c1, c2, c3, c4, c5, c6, c7 @@ -88,7 +88,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n126 Stage: Stage-2 Stats Work @@ -96,7 +96,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4, c5, c6, c7 Column Types: int, double, double, double, int, string, int - Table: default.dest1 + Table: default.dest1_n126 Stage: Stage-3 Map Reduce @@ -108,7 +108,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n126 Stage: Stage-5 Map Reduce @@ -120,7 +120,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n126 Stage: Stage-6 Move Operator @@ -128,27 +128,27 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n126 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86 +PREHOOK: Output: default@dest1_n126 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n126 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 SIMPLE [] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [] -POSTHOOK: Lineage: dest1.c5 SIMPLE [] -POSTHOOK: Lineage: dest1.c6 EXPRESSION [] -POSTHOOK: Lineage: dest1.c7 SIMPLE [] -PREHOOK: query: select dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n126 +POSTHOOK: Lineage: dest1_n126.c1 SIMPLE [] +POSTHOOK: Lineage: dest1_n126.c2 EXPRESSION [] +POSTHOOK: Lineage: dest1_n126.c3 EXPRESSION [] +POSTHOOK: Lineage: dest1_n126.c4 EXPRESSION [] +POSTHOOK: Lineage: dest1_n126.c5 SIMPLE [] +POSTHOOK: Lineage: dest1_n126.c6 EXPRESSION [] +POSTHOOK: Lineage: dest1_n126.c7 SIMPLE [] +PREHOOK: query: select dest1_n126.* FROM dest1_n126 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n126 #### A masked pattern was here #### -POSTHOOK: query: select dest1.* FROM dest1 +POSTHOOK: query: select dest1_n126.* FROM dest1_n126 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n126 #### A masked pattern was here #### 5 5.0 5.0 5.0 5 TRUE 1 diff --git a/ql/src/test/results/clientpositive/cast_on_constant.q.out b/ql/src/test/results/clientpositive/cast_on_constant.q.out index c357a48fa0..efc9cb244d 100644 --- a/ql/src/test/results/clientpositive/cast_on_constant.q.out +++ b/ql/src/test/results/clientpositive/cast_on_constant.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: create table t1(ts_field timestamp, date_field date) +PREHOOK: query: create table t1_n48(ts_field timestamp, date_field date) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(ts_field timestamp, date_field date) +PREHOOK: Output: default@t1_n48 +POSTHOOK: query: create table t1_n48(ts_field timestamp, date_field date) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: explain select * from t1 where ts_field = "2016-01-23 00:00:00" +POSTHOOK: Output: default@t1_n48 +PREHOOK: query: explain select * from t1_n48 where ts_field = "2016-01-23 00:00:00" PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t1 where ts_field = "2016-01-23 00:00:00" +POSTHOOK: query: explain select * from t1_n48 where ts_field = "2016-01-23 00:00:00" POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -19,7 +19,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (ts_field = TIMESTAMP'2016-01-23 00:00:00.0') (type: boolean) @@ -43,9 +43,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from t1 where date_field = "2016-01-23" +PREHOOK: query: explain select * from t1_n48 where date_field = "2016-01-23" PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t1 where date_field = "2016-01-23" +POSTHOOK: query: explain select * from t1_n48 where date_field = "2016-01-23" POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -56,7 +56,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (date_field = DATE'2016-01-23') (type: boolean) @@ -80,9 +80,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from t1 where ts_field = timestamp '2016-01-23 00:00:00' +PREHOOK: query: explain select * from t1_n48 where ts_field = timestamp '2016-01-23 00:00:00' PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t1 where ts_field = timestamp '2016-01-23 00:00:00' +POSTHOOK: query: explain select * from t1_n48 where ts_field = timestamp '2016-01-23 00:00:00' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -93,7 +93,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (ts_field = TIMESTAMP'2016-01-23 00:00:00.0') (type: boolean) @@ -117,9 +117,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from t1 where date_field = date '2016-01-23' +PREHOOK: query: explain select * from t1_n48 where date_field = date '2016-01-23' PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t1 where date_field = date '2016-01-23' +POSTHOOK: query: explain select * from t1_n48 where date_field = date '2016-01-23' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -130,7 +130,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (date_field = DATE'2016-01-23') (type: boolean) @@ -154,9 +154,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from t1 where date_field = ts_field +PREHOOK: query: explain select * from t1_n48 where date_field = ts_field PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t1 where date_field = ts_field +POSTHOOK: query: explain select * from t1_n48 where date_field = ts_field POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -167,7 +167,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (CAST( date_field AS TIMESTAMP) = ts_field) (type: boolean) @@ -191,11 +191,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n48 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n48 +PREHOOK: Output: default@t1_n48 +POSTHOOK: query: drop table t1_n48 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n48 +POSTHOOK: Output: default@t1_n48 diff --git a/ql/src/test/results/clientpositive/cast_tinyint_to_double.q.out b/ql/src/test/results/clientpositive/cast_tinyint_to_double.q.out index c29df65cfe..4f1f57335e 100644 --- a/ql/src/test/results/clientpositive/cast_tinyint_to_double.q.out +++ b/ql/src/test/results/clientpositive/cast_tinyint_to_double.q.out @@ -1,38 +1,38 @@ -PREHOOK: query: drop table t +PREHOOK: query: drop table t_n24 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table t +POSTHOOK: query: drop table t_n24 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE t(c tinyint) +PREHOOK: query: CREATE TABLE t_n24(c tinyint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: CREATE TABLE t(c tinyint) +PREHOOK: Output: default@t_n24 +POSTHOOK: query: CREATE TABLE t_n24(c tinyint) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert overwrite table t select 10 from src limit 1 +POSTHOOK: Output: default@t_n24 +PREHOOK: query: insert overwrite table t_n24 select 10 from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t -POSTHOOK: query: insert overwrite table t select 10 from src limit 1 +PREHOOK: Output: default@t_n24 +POSTHOOK: query: insert overwrite table t_n24 select 10 from src limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.c EXPRESSION [] -PREHOOK: query: select * from t where c = 10.0 +POSTHOOK: Output: default@t_n24 +POSTHOOK: Lineage: t_n24.c EXPRESSION [] +PREHOOK: query: select * from t_n24 where c = 10.0 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n24 #### A masked pattern was here #### -POSTHOOK: query: select * from t where c = 10.0 +POSTHOOK: query: select * from t_n24 where c = 10.0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n24 #### A masked pattern was here #### 10 -PREHOOK: query: select * from t where c = -10.0 +PREHOOK: query: select * from t_n24 where c = -10.0 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n24 #### A masked pattern was here #### -POSTHOOK: query: select * from t where c = -10.0 +POSTHOOK: query: select * from t_n24 where c = -10.0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n24 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/cbo_SortUnionTransposeRule.q.out b/ql/src/test/results/clientpositive/cbo_SortUnionTransposeRule.q.out index 2f9f758eea..6900f62326 100644 --- a/ql/src/test/results/clientpositive/cbo_SortUnionTransposeRule.q.out +++ b/ql/src/test/results/clientpositive/cbo_SortUnionTransposeRule.q.out @@ -1,25 +1,25 @@ -PREHOOK: query: create table s as select * from src limit 10 +PREHOOK: query: create table s_n3 as select * from src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@s -POSTHOOK: query: create table s as select * from src limit 10 +PREHOOK: Output: default@s_n3 +POSTHOOK: query: create table s_n3 as select * from src limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@s -POSTHOOK: Lineage: s.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: s.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@s_n3 +POSTHOOK: Lineage: s_n3.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: s_n3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b order by key PREHOOK: type: QUERY POSTHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b order by key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -76,15 +76,15 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 0 PREHOOK: type: QUERY POSTHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -98,15 +98,15 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -163,16 +163,16 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b order by key limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b order by key limit 5 POSTHOOK: type: QUERY @@ -460,15 +460,15 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b order by key PREHOOK: type: QUERY POSTHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b order by key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -525,15 +525,15 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 0 PREHOOK: type: QUERY POSTHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -547,15 +547,15 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -670,16 +670,16 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b order by key limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b order by key limit 5 POSTHOOK: type: QUERY @@ -1077,15 +1077,15 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1142,15 +1142,15 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain -select key from s a +select key from s_n3 a union all -select key from s b +select key from s_n3 b limit 5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/cbo_const.q.out b/ql/src/test/results/clientpositive/cbo_const.q.out index 3225c90979..c60e0b34f4 100644 --- a/ql/src/test/results/clientpositive/cbo_const.q.out +++ b/ql/src/test/results/clientpositive/cbo_const.q.out @@ -33,9 +33,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 1000 -PREHOOK: query: drop view t1 +PREHOOK: query: drop view t1_n37 PREHOOK: type: DROPVIEW -POSTHOOK: query: drop view t1 +POSTHOOK: query: drop view t1_n37 POSTHOOK: type: DROPVIEW PREHOOK: query: create table t1_new (key string, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE @@ -69,7 +69,7 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@t1_new@ds=2011-10-16 POSTHOOK: Lineage: t1_new PARTITION(ds=2011-10-16).key SIMPLE [] POSTHOOK: Lineage: t1_new PARTITION(ds=2011-10-16).value SIMPLE [] -PREHOOK: query: create view t1 partitioned on (ds) as +PREHOOK: query: create view t1_n37 partitioned on (ds) as select * from ( select key, value, ds from t1_new @@ -79,8 +79,8 @@ select key, value, ds from t1_new PREHOOK: type: CREATEVIEW PREHOOK: Input: default@t1_new PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create view t1 partitioned on (ds) as +PREHOOK: Output: default@t1_n37 +POSTHOOK: query: create view t1_n37 partitioned on (ds) as select * from ( select key, value, ds from t1_new @@ -90,18 +90,18 @@ select key, value, ds from t1_new POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@t1_new POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key EXPRESSION [(t1_new)t1_new.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1.value EXPRESSION [(t1_new)t1_new.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select * from t1 where ds = '2011-10-15' +POSTHOOK: Output: default@t1_n37 +POSTHOOK: Lineage: t1_n37.key EXPRESSION [(t1_new)t1_new.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n37.value EXPRESSION [(t1_new)t1_new.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from t1_n37 where ds = '2011-10-15' PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n37 PREHOOK: Input: default@t1_new PREHOOK: Input: default@t1_new@ds=2011-10-15 #### A masked pattern was here #### -POSTHOOK: query: select * from t1 where ds = '2011-10-15' +POSTHOOK: query: select * from t1_n37 where ds = '2011-10-15' POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n37 POSTHOOK: Input: default@t1_new POSTHOOK: Input: default@t1_new@ds=2011-10-15 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out b/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out index cc41758350..3af4b85864 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table if not exists loc_staging ( +PREHOOK: query: create table if not exists loc_staging_n1 ( state string, locid int, zip bigint, @@ -6,8 +6,8 @@ PREHOOK: query: create table if not exists loc_staging ( ) row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_staging -POSTHOOK: query: create table if not exists loc_staging ( +PREHOOK: Output: default@loc_staging_n1 +POSTHOOK: query: create table if not exists loc_staging_n1 ( state string, locid int, zip bigint, @@ -15,46 +15,46 @@ POSTHOOK: query: create table if not exists loc_staging ( ) row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_staging -PREHOOK: query: create table loc_orc like loc_staging +POSTHOOK: Output: default@loc_staging_n1 +PREHOOK: query: create table loc_orc_n1 like loc_staging_n1 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_orc -POSTHOOK: query: create table loc_orc like loc_staging +PREHOOK: Output: default@loc_orc_n1 +POSTHOOK: query: create table loc_orc_n1 like loc_staging_n1 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_orc -PREHOOK: query: alter table loc_orc set fileformat orc +POSTHOOK: Output: default@loc_orc_n1 +PREHOOK: query: alter table loc_orc_n1 set fileformat orc PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@loc_orc -PREHOOK: Output: default@loc_orc -POSTHOOK: query: alter table loc_orc set fileformat orc +PREHOOK: Input: default@loc_orc_n1 +PREHOOK: Output: default@loc_orc_n1 +POSTHOOK: query: alter table loc_orc_n1 set fileformat orc POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@loc_orc -POSTHOOK: Output: default@loc_orc -PREHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging +POSTHOOK: Input: default@loc_orc_n1 +POSTHOOK: Output: default@loc_orc_n1 +PREHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@loc_staging -POSTHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging +PREHOOK: Output: default@loc_staging_n1 +POSTHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@loc_staging -PREHOOK: query: insert overwrite table loc_orc select * from loc_staging +POSTHOOK: Output: default@loc_staging_n1 +PREHOOK: query: insert overwrite table loc_orc_n1 select * from loc_staging_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@loc_staging -PREHOOK: Output: default@loc_orc -POSTHOOK: query: insert overwrite table loc_orc select * from loc_staging +PREHOOK: Input: default@loc_staging_n1 +PREHOOK: Output: default@loc_orc_n1 +POSTHOOK: query: insert overwrite table loc_orc_n1 select * from loc_staging_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@loc_staging -POSTHOOK: Output: default@loc_orc -POSTHOOK: Lineage: loc_orc.locid SIMPLE [(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc.state SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc.year SIMPLE [(loc_staging)loc_staging.FieldSchema(name:year, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc.zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, comment:null), ] -PREHOOK: query: explain select * from loc_orc +POSTHOOK: Input: default@loc_staging_n1 +POSTHOOK: Output: default@loc_orc_n1 +POSTHOOK: Lineage: loc_orc_n1.locid SIMPLE [(loc_staging_n1)loc_staging_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_n1.state SIMPLE [(loc_staging_n1)loc_staging_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_n1.year SIMPLE [(loc_staging_n1)loc_staging_n1.FieldSchema(name:year, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_n1.zip SIMPLE [(loc_staging_n1)loc_staging_n1.FieldSchema(name:zip, type:bigint, comment:null), ] +PREHOOK: query: explain select * from loc_orc_n1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -65,7 +65,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int) @@ -73,26 +73,26 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: analyze table loc_orc compute statistics for columns state +PREHOOK: query: analyze table loc_orc_n1 compute statistics for columns state PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc -PREHOOK: Output: default@loc_orc +PREHOOK: Input: default@loc_orc_n1 +PREHOOK: Output: default@loc_orc_n1 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc compute statistics for columns state +POSTHOOK: query: analyze table loc_orc_n1 compute statistics for columns state POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc -POSTHOOK: Output: default@loc_orc +POSTHOOK: Input: default@loc_orc_n1 +POSTHOOK: Output: default@loc_orc_n1 #### A masked pattern was here #### PREHOOK: query: explain select a, c, min(b) from ( select state as a, locid as b, count(*) as c - from loc_orc + from loc_orc_n1 group by state,locid ) sq1 group by a,c PREHOOK: type: QUERY POSTHOOK: query: explain select a, c, min(b) from ( select state as a, locid as b, count(*) as c - from loc_orc + from loc_orc_n1 group by state,locid ) sq1 group by a,c @@ -107,7 +107,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: sq1:loc_orc + alias: sq1:loc_orc_n1 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -178,19 +178,19 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,year +PREHOOK: query: analyze table loc_orc_n1 compute statistics for columns state,locid,year PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc -PREHOOK: Output: default@loc_orc +PREHOOK: Input: default@loc_orc_n1 +PREHOOK: Output: default@loc_orc_n1 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,year +POSTHOOK: query: analyze table loc_orc_n1 compute statistics for columns state,locid,year POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc -POSTHOOK: Output: default@loc_orc +POSTHOOK: Input: default@loc_orc_n1 +POSTHOOK: Output: default@loc_orc_n1 #### A masked pattern was here #### -PREHOOK: query: explain select year from loc_orc group by year +PREHOOK: query: explain select year from loc_orc_n1 group by year PREHOOK: type: QUERY -POSTHOOK: query: explain select year from loc_orc group by year +POSTHOOK: query: explain select year from loc_orc_n1 group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -201,7 +201,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: year (type: int) @@ -238,9 +238,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -251,7 +251,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -288,9 +288,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -301,7 +301,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -338,9 +338,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid with rollup PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid with rollup POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -351,7 +351,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -388,9 +388,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by rollup (state,locid) +PREHOOK: query: explain select state,locid from loc_orc_n1 group by rollup (state,locid) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by rollup (state,locid) +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by rollup (state,locid) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -401,7 +401,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -438,9 +438,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state)) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -451,7 +451,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -488,9 +488,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state),(locid)) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state),(locid)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -501,7 +501,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -538,9 +538,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -551,7 +551,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -588,9 +588,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state,locid),(state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state,locid),(state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -601,7 +601,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -638,9 +638,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select year from loc_orc group by year +PREHOOK: query: explain select year from loc_orc_n1 group by year PREHOOK: type: QUERY -POSTHOOK: query: explain select year from loc_orc group by year +POSTHOOK: query: explain select year from loc_orc_n1 group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -651,7 +651,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: year (type: int) @@ -688,9 +688,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -701,7 +701,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) @@ -738,9 +738,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,zip from loc_orc group by state,zip +PREHOOK: query: explain select state,zip from loc_orc_n1 group by state,zip PREHOOK: type: QUERY -POSTHOOK: query: explain select state,zip from loc_orc group by state,zip +POSTHOOK: query: explain select state,zip from loc_orc_n1 group by state,zip POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -751,7 +751,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 752 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), zip (type: bigint) @@ -788,9 +788,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -801,7 +801,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -838,9 +838,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid with rollup PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid with rollup POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -851,7 +851,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -888,9 +888,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by rollup (state,locid) +PREHOOK: query: explain select state,locid from loc_orc_n1 group by rollup (state,locid) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by rollup (state,locid) +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by rollup (state,locid) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -901,7 +901,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -938,9 +938,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state)) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -951,7 +951,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -988,9 +988,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state),(locid)) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state),(locid)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1001,7 +1001,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -1038,9 +1038,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1051,7 +1051,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -1088,9 +1088,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state,locid),(state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid grouping sets((state,locid),(state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1101,7 +1101,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) @@ -1138,9 +1138,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select year from loc_orc group by year +PREHOOK: query: explain select year from loc_orc_n1 group by year PREHOOK: type: QUERY -POSTHOOK: query: explain select year from loc_orc group by year +POSTHOOK: query: explain select year from loc_orc_n1 group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1151,7 +1151,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: year (type: int) @@ -1188,9 +1188,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc_n1 group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1201,7 +1201,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n1 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) diff --git a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out index 26ae2aa68b..3016c268b5 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out @@ -1,87 +1,87 @@ -PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE tbl1_n13(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tbl1 -POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: Output: default@tbl1_n13 +POSTHOOK: query: CREATE TABLE tbl1_n13(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tbl1 -PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: Output: default@tbl1_n13 +PREHOOK: query: CREATE TABLE tbl2_n12(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tbl2 -POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: Output: default@tbl2_n12 +POSTHOOK: query: CREATE TABLE tbl2_n12(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tbl2 -PREHOOK: query: insert overwrite table tbl1 +POSTHOOK: Output: default@tbl2_n12 +PREHOOK: query: insert overwrite table tbl1_n13 select * from src where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tbl1 -POSTHOOK: query: insert overwrite table tbl1 +PREHOOK: Output: default@tbl1_n13 +POSTHOOK: query: insert overwrite table tbl1_n13 select * from src where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tbl1 -POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table tbl2 +POSTHOOK: Output: default@tbl1_n13 +POSTHOOK: Lineage: tbl1_n13.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1_n13.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tbl2_n12 select * from src where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tbl2 -POSTHOOK: query: insert overwrite table tbl2 +PREHOOK: Output: default@tbl2_n12 +POSTHOOK: query: insert overwrite table tbl2_n12 select * from src where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tbl2 -POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: analyze table tbl1 compute statistics +POSTHOOK: Output: default@tbl2_n12 +POSTHOOK: Lineage: tbl2_n12.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2_n12.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: analyze table tbl1_n13 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Output: default@tbl1 -POSTHOOK: query: analyze table tbl1 compute statistics +PREHOOK: Input: default@tbl1_n13 +PREHOOK: Output: default@tbl1_n13 +POSTHOOK: query: analyze table tbl1_n13 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Output: default@tbl1 -PREHOOK: query: analyze table tbl1 compute statistics for columns +POSTHOOK: Input: default@tbl1_n13 +POSTHOOK: Output: default@tbl1_n13 +PREHOOK: query: analyze table tbl1_n13 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@tbl1 -PREHOOK: Output: default@tbl1 +PREHOOK: Input: default@tbl1_n13 +PREHOOK: Output: default@tbl1_n13 #### A masked pattern was here #### -POSTHOOK: query: analyze table tbl1 compute statistics for columns +POSTHOOK: query: analyze table tbl1_n13 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@tbl1 -POSTHOOK: Output: default@tbl1 +POSTHOOK: Input: default@tbl1_n13 +POSTHOOK: Output: default@tbl1_n13 #### A masked pattern was here #### -PREHOOK: query: analyze table tbl2 compute statistics +PREHOOK: query: analyze table tbl2_n12 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@tbl2 -PREHOOK: Output: default@tbl2 -POSTHOOK: query: analyze table tbl2 compute statistics +PREHOOK: Input: default@tbl2_n12 +PREHOOK: Output: default@tbl2_n12 +POSTHOOK: query: analyze table tbl2_n12 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl2 -POSTHOOK: Output: default@tbl2 -PREHOOK: query: analyze table tbl2 compute statistics for columns +POSTHOOK: Input: default@tbl2_n12 +POSTHOOK: Output: default@tbl2_n12 +PREHOOK: query: analyze table tbl2_n12 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@tbl2 -PREHOOK: Output: default@tbl2 +PREHOOK: Input: default@tbl2_n12 +PREHOOK: Output: default@tbl2_n12 #### A masked pattern was here #### -POSTHOOK: query: analyze table tbl2 compute statistics for columns +POSTHOOK: query: analyze table tbl2_n12 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@tbl2 -POSTHOOK: Output: default@tbl2 +POSTHOOK: Input: default@tbl2_n12 +POSTHOOK: Output: default@tbl2_n12 #### A masked pattern was here #### PREHOOK: query: explain select count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -200,18 +200,18 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n13 +PREHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n13 +POSTHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### 22 PREHOOK: query: explain @@ -219,7 +219,7 @@ select count(*) from ( select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 group by key ) subq2 @@ -229,7 +229,7 @@ select count(*) from ( select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 group by key ) subq2 @@ -335,25 +335,25 @@ PREHOOK: query: select count(*) from ( select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 group by key ) subq2 PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n13 +PREHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from ( select key, count(*) from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 group by key ) subq2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n13 +POSTHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### 6 PREHOOK: query: explain @@ -361,14 +361,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key @@ -378,14 +378,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key @@ -562,39 +562,39 @@ PREHOOK: query: select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n13 +PREHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### POSTHOOK: query: select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n13 +POSTHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### 0 9 9 2 1 1 @@ -604,16 +604,16 @@ POSTHOOK: Input: default@tbl2 9 1 1 PREHOOK: query: explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2 on subq1.key = subq2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -667,22 +667,22 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n13 +PREHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2 on subq1.key = subq2.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n13 +POSTHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### 20 PREHOOK: query: explain @@ -690,11 +690,11 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 - join tbl2 b + join tbl2_n12 b on subq2.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain @@ -702,11 +702,11 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 - join tbl2 b + join tbl2_n12 b on subq2.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -763,29 +763,29 @@ PREHOOK: query: select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 - join tbl2 b + join tbl2_n12 b on subq2.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n13 +PREHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 - join tbl2 b + join tbl2_n12 b on subq2.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n13 +POSTHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### 20 PREHOOK: query: explain @@ -793,7 +793,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 @@ -801,7 +801,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq3 where key < 6 ) subq4 @@ -812,7 +812,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 @@ -820,7 +820,7 @@ select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq3 where key < 6 ) subq4 @@ -880,7 +880,7 @@ PREHOOK: query: select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 @@ -888,19 +888,19 @@ PREHOOK: query: select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq3 where key < 6 ) subq4 on subq2.key = subq4.key PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl1_n13 #### A masked pattern was here #### POSTHOOK: query: select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 @@ -908,27 +908,27 @@ POSTHOOK: query: select count(*) from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq3 where key < 6 ) subq4 on subq2.key = subq4.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl1_n13 #### A masked pattern was here #### 20 PREHOOK: query: explain select count(*) from - (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + (select a.key as key, concat(a.value, a.value) as value from tbl1_n13 a where key < 8) subq1 join - (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + (select a.key as key, concat(a.value, a.value) as value from tbl2_n12 a where key < 8) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from - (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + (select a.key as key, concat(a.value, a.value) as value from tbl1_n13 a where key < 8) subq1 join - (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + (select a.key as key, concat(a.value, a.value) as value from tbl2_n12 a where key < 8) subq2 on subq1.key = subq2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -982,36 +982,36 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from - (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + (select a.key as key, concat(a.value, a.value) as value from tbl1_n13 a where key < 8) subq1 join - (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + (select a.key as key, concat(a.value, a.value) as value from tbl2_n12 a where key < 8) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n13 +PREHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from - (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + (select a.key as key, concat(a.value, a.value) as value from tbl1_n13 a where key < 8) subq1 join - (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + (select a.key as key, concat(a.value, a.value) as value from tbl2_n12 a where key < 8) subq2 on subq1.key = subq2.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n13 +POSTHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### 20 PREHOOK: query: explain select count(*) from - (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n13 a) subq1 join - (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n12 a) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from - (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n13 a) subq1 join - (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n12 a) subq2 on subq1.key = subq2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1174,33 +1174,33 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from - (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n13 a) subq1 join - (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n12 a) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n13 +PREHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from - (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n13 a) subq1 join - (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n12 a) subq2 on subq1.key = subq2.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n13 +POSTHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### 22 PREHOOK: query: explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 + join tbl2_n12 a on subq1.key = a.key PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 + join tbl2_n12 a on subq1.key = a.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1253,38 +1253,38 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 + join tbl2_n12 a on subq1.key = a.key PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n13 +PREHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 - join tbl2 a on subq1.key = a.key + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 + join tbl2_n12 a on subq1.key = a.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n13 +POSTHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### 20 PREHOOK: query: explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2 on (subq1.key = subq2.key) join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq3 on (subq1.key = subq3.key) PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2 on (subq1.key = subq2.key) join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq3 on (subq1.key = subq3.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1340,28 +1340,28 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2 on subq1.key = subq2.key join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq3 on (subq1.key = subq3.key) PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n13 +PREHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2 on subq1.key = subq2.key join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + (select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq3 on (subq1.key = subq3.key) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n13 +POSTHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### 56 PREHOOK: query: explain @@ -1370,11 +1370,11 @@ select count(*) from ( ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 -join tbl2 b +join tbl2_n12 b on subq2.key = b.key) a PREHOOK: type: QUERY POSTHOOK: query: explain @@ -1383,11 +1383,11 @@ select count(*) from ( ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 -join tbl2 b +join tbl2_n12 b on subq2.key = b.key) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1445,35 +1445,35 @@ PREHOOK: query: select count(*) from ( ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 -join tbl2 b +join tbl2_n12 b on subq2.key = b.key) a PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n13 +PREHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### POSTHOOK: query: select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( select * from ( - select a.key as key, a.value as value from tbl1 a where key < 8 + select a.key as key, a.value as value from tbl1_n13 a where key < 8 ) subq1 where key < 6 ) subq2 -join tbl2 b +join tbl2_n12 b on subq2.key = b.key) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n13 +POSTHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### 20 -PREHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +PREHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +POSTHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1516,15 +1516,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +PREHOOK: query: select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n13 +PREHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### -POSTHOOK: query: select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +POSTHOOK: query: select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n13 +POSTHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### 0 val_0 val_0 0 val_0 val_0 @@ -1548,9 +1548,9 @@ POSTHOOK: Input: default@tbl2 5 val_5 val_5 8 val_8 val_8 9 val_9 val_9 -PREHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +PREHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +POSTHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1593,15 +1593,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +PREHOOK: query: select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n13 +PREHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### -POSTHOOK: query: select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +POSTHOOK: query: select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n13 +POSTHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### 0 val_0 val_0 0 val_0 val_0 diff --git a/ql/src/test/results/clientpositive/cbo_rp_auto_join17.q.out b/ql/src/test/results/clientpositive/cbo_rp_auto_join17.q.out index 6a0e228db4..e5f5d17848 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_auto_join17.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_auto_join17.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n94(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n94 +POSTHOOK: query: CREATE TABLE dest1_n94(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n94 PREHOOK: query: explain FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.* +INSERT OVERWRITE TABLE dest1_n94 SELECT src1.*, src2.* PREHOOK: type: QUERY POSTHOOK: query: explain FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.* +INSERT OVERWRITE TABLE dest1_n94 SELECT src1.*, src2.* POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-6 is a root stage @@ -77,7 +77,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n94 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string) outputColumnNames: key1, value1, key2, value2 @@ -104,7 +104,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n94 Stage: Stage-2 Stats Work @@ -112,7 +112,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, value1, key2, value2 Column Types: int, string, int, string - Table: default.dest1 + Table: default.dest1_n94 Stage: Stage-3 Map Reduce @@ -142,25 +142,25 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.* +INSERT OVERWRITE TABLE dest1_n94 SELECT src1.*, src2.* PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n94 POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.* +INSERT OVERWRITE TABLE dest1_n94 SELECT src1.*, src2.* POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key2 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value1 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value2 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(dest1.key1,dest1.value1,dest1.key2,dest1.value2)) FROM dest1 +POSTHOOK: Output: default@dest1_n94 +POSTHOOK: Lineage: dest1_n94.key1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n94.key2 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n94.value1 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n94.value2 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(dest1_n94.key1,dest1_n94.value1,dest1_n94.key2,dest1_n94.value2)) FROM dest1_n94 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n94 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(dest1.key1,dest1.value1,dest1.key2,dest1.value2)) FROM dest1 +POSTHOOK: query: SELECT sum(hash(dest1_n94.key1,dest1_n94.value1,dest1_n94.key2,dest1_n94.value2)) FROM dest1_n94 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n94 #### A masked pattern was here #### -793937029770 diff --git a/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out b/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out index cd8ecf8b9d..ed445c02be 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out @@ -1,37 +1,37 @@ -PREHOOK: query: create table A as +PREHOOK: query: create table A_n2 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@A -POSTHOOK: query: create table A as +PREHOOK: Output: default@A_n2 +POSTHOOK: query: create table A_n2 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@A -POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table B as +POSTHOOK: Output: default@A_n2 +POSTHOOK: Lineage: a_n2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a_n2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table B_n1 as select * from src order by key limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@B -POSTHOOK: query: create table B as +PREHOOK: Output: default@B_n1 +POSTHOOK: query: create table B_n1 as select * from src order by key limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@B -POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@B_n1 +POSTHOOK: Lineage: b_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: b_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Warning: Map Join MAPJOIN[8][bigTable=?] in task 'Stage-3:MAPRED' is a cross product -PREHOOK: query: explain select * from A join B +PREHOOK: query: explain select * from A_n2 join B_n1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from A join B +POSTHOOK: query: explain select * from A_n2 join B_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -42,13 +42,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - b + b_n1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - b + b_n1 TableScan - alias: b + alias: b_n1 Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -63,7 +63,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -95,9 +95,9 @@ STAGE PLANS: ListSink Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Stage-5:MAPRED' is a cross product -PREHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A +PREHOOK: query: explain select * from B_n1 d1 join B_n1 d2 on d1.key = d2.key join A_n2 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A +POSTHOOK: query: explain select * from B_n1 d1 join B_n1 d2 on d1.key = d2.key join A_n2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-7 is a root stage @@ -108,16 +108,16 @@ STAGE PLANS: Stage: Stage-7 Map Reduce Local Work Alias -> Map Local Tables: - a + a_n2 Fetch Operator limit: -1 d1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - a + a_n2 TableScan - alias: a + alias: a_n2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -190,14 +190,14 @@ STAGE PLANS: ListSink Warning: Map Join MAPJOIN[25][bigTable=?] in task 'Stage-5:MAPRED' is a cross product -PREHOOK: query: explain select * from A join +PREHOOK: query: explain select * from A_n2 join (select d1.key - from B d1 join B d2 on d1.key = d2.key + from B_n1 d1 join B_n1 d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from A join +POSTHOOK: query: explain select * from A_n2 join (select d1.key - from B d1 join B d2 on d1.key = d2.key + from B_n1 d1 join B_n1 d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -285,13 +285,13 @@ STAGE PLANS: Stage: Stage-7 Map Reduce Local Work Alias -> Map Local Tables: - a + a_n2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - a + a_n2 TableScan - alias: a + alias: a_n2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -333,9 +333,9 @@ STAGE PLANS: Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Stage-5:MAPRED' is a cross product Warning: Map Join MAPJOIN[22][bigTable=?] in task 'Stage-3:MAPRED' is a cross product -PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 +PREHOOK: query: explain select * from A_n2 join (select d1.key from B_n1 d1 join B_n1 d2 where 1 = 1 group by d1.key) od1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 +POSTHOOK: query: explain select * from A_n2 join (select d1.key from B_n1 d1 join B_n1 d2 where 1 = 1 group by d1.key) od1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-8 is a root stage @@ -414,13 +414,13 @@ STAGE PLANS: Stage: Stage-7 Map Reduce Local Work Alias -> Map Local Tables: - a + a_n2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - a + a_n2 TableScan - alias: a + alias: a_n2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -464,12 +464,12 @@ Warning: Map Join MAPJOIN[31][bigTable=?] in task 'Stage-7:MAPRED' is a cross pr Warning: Map Join MAPJOIN[30][bigTable=?] in task 'Stage-6:MAPRED' is a cross product Warning: Shuffle Join JOIN[20][tables = [, ]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: explain select * from -(select A.key from A group by key) ss join -(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1 +(select A_n2.key from A_n2 group by key) ss join +(select d1.key from B_n1 d1 join B_n1 d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1 PREHOOK: type: QUERY POSTHOOK: query: explain select * from -(select A.key from A group by key) ss join -(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1 +(select A_n2.key from A_n2 group by key) ss join +(select d1.key from B_n1 d1 join B_n1 d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -488,7 +488,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: ss:a + alias: ss:a_n2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/cbo_rp_gby2_map_multi_distinct.q.out b/ql/src/test/results/clientpositive/cbo_rp_gby2_map_multi_distinct.q.out index 8179871772..4c1fde41d1 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_gby2_map_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_gby2_map_multi_distinct.q.out @@ -1,20 +1,20 @@ -PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n140(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n140 +POSTHOOK: query: CREATE TABLE dest1_n140(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n140 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n140 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n140 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY @@ -65,7 +65,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n140 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int) outputColumnNames: key, c1, c2, c3, c4 @@ -90,7 +90,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n140 Stage: Stage-2 Stats Work @@ -98,7 +98,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, c1, c2, c3, c4 Column Types: string, int, string, int, int - Table: default.dest1 + Table: default.dest1_n140 Stage: Stage-3 Map Reduce @@ -128,31 +128,31 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n140 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n140 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n140 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n140 +POSTHOOK: Lineage: dest1_n140.c1 EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest1_n140.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n140.c3 EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest1_n140.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n140.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n140.* FROM dest1_n140 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n140 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n140.* FROM dest1_n140 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n140 #### A masked pattern was here #### 0 1 00.0 0 3 1 71 116414.0 10044 115 @@ -166,13 +166,13 @@ POSTHOOK: Input: default@dest1 9 7 91047.0 577 12 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n140 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n140 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY @@ -223,7 +223,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n140 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int) outputColumnNames: key, c1, c2, c3, c4 @@ -248,7 +248,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n140 Stage: Stage-2 Stats Work @@ -256,7 +256,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, c1, c2, c3, c4 Column Types: string, int, string, int, int - Table: default.dest1 + Table: default.dest1_n140 Stage: Stage-3 Map Reduce @@ -286,31 +286,31 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n140 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n140 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n140 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n140 +POSTHOOK: Lineage: dest1_n140.c1 EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest1_n140.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n140.c3 EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest1_n140.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n140.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n140.* FROM dest1_n140 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n140 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n140.* FROM dest1_n140 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n140 #### A masked pattern was here #### 0 1 00.0 0 3 1 1 116414.0 10044 115 diff --git a/ql/src/test/results/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q.out b/ql/src/test/results/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q.out index fb0bb807b1..6639ac62c3 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n103(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n103 +POSTHOOK: query: CREATE TABLE dest1_n103(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n103 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n103 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -23,7 +23,7 @@ INSERT OVERWRITE TABLE dest1 SELECT PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n103 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -75,7 +75,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n103 Select Operator expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double) outputColumnNames: c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11 @@ -105,7 +105,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n103 Stage: Stage-2 Stats Work @@ -113,10 +113,10 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11 Column Types: double, double, double, double, double, double, double, double, double, double, double - Table: default.dest1 + Table: default.dest1_n103 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n103 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -130,9 +130,9 @@ INSERT OVERWRITE TABLE dest1 SELECT count(DISTINCT substr(src.value, 5)) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n103 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n103 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -146,24 +146,24 @@ INSERT OVERWRITE TABLE dest1 SELECT count(DISTINCT substr(src.value, 5)) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c10 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c11 EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (src)src.null, ] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n103 +POSTHOOK: Lineage: dest1_n103.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n103.c10 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n103.c11 EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest1_n103.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n103.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (src)src.null, ] +POSTHOOK: Lineage: dest1_n103.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n103.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n103.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n103.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n103.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n103.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n103.* FROM dest1_n103 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n103 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n103.* FROM dest1_n103 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n103 #### A masked pattern was here #### 130091.0 260.182 256.10355987055016 98.0 0.0 142.9268095075238 143.06995106518906 20428.072876000002 20469.010897795593 79136.0 309.0 diff --git a/ql/src/test/results/clientpositive/cbo_rp_join1.q.out b/ql/src/test/results/clientpositive/cbo_rp_join1.q.out index e2a55839ac..c5ec00dcc9 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_join1.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_join1.q.out @@ -1,23 +1,23 @@ -PREHOOK: query: CREATE TABLE myinput1(key int, value int) +PREHOOK: query: CREATE TABLE myinput1_n0(key int, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@myinput1 -POSTHOOK: query: CREATE TABLE myinput1(key int, value int) +PREHOOK: Output: default@myinput1_n0 +POSTHOOK: query: CREATE TABLE myinput1_n0(key int, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@myinput1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1 +POSTHOOK: Output: default@myinput1_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@myinput1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1 +PREHOOK: Output: default@myinput1_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@myinput1 +POSTHOOK: Output: default@myinput1_n0 Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND b.key = 40 +PREHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND b.key = 40 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND b.key = 40 +POSTHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND b.key = 40 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -108,19 +108,19 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND b.key = 40 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND b.key = 40 PREHOOK: type: QUERY -PREHOOK: Input: default@myinput1 +PREHOOK: Input: default@myinput1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND b.key = 40 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND b.key = 40 POSTHOOK: type: QUERY -POSTHOOK: Input: default@myinput1 +POSTHOOK: Input: default@myinput1_n0 #### A masked pattern was here #### 4939870 Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND a.value = 40 AND a.key = a.value AND b.key = 40 +PREHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND a.value = 40 AND a.key = a.value AND b.key = 40 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND a.value = 40 AND a.key = a.value AND b.key = 40 +POSTHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND a.value = 40 AND a.key = a.value AND b.key = 40 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -211,19 +211,19 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND a.key = a.value AND b.key = 40 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND a.key = a.value AND b.key = 40 PREHOOK: type: QUERY -PREHOOK: Input: default@myinput1 +PREHOOK: Input: default@myinput1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND a.key = a.value AND b.key = 40 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND a.key = a.value AND b.key = 40 POSTHOOK: type: QUERY -POSTHOOK: Input: default@myinput1 +POSTHOOK: Input: default@myinput1_n0 #### A masked pattern was here #### 4939870 Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND a.key = b.key AND b.key = 40 +PREHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND a.key = b.key AND b.key = 40 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND a.key = b.key AND b.key = 40 +POSTHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND a.key = b.key AND b.key = 40 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -314,19 +314,19 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND a.key = b.key AND b.key = 40 +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND a.key = b.key AND b.key = 40 PREHOOK: type: QUERY -PREHOOK: Input: default@myinput1 +PREHOOK: Input: default@myinput1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key = 40 AND a.key = b.key AND b.key = 40 +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key = 40 AND a.key = b.key AND b.key = 40 POSTHOOK: type: QUERY -POSTHOOK: Input: default@myinput1 +POSTHOOK: Input: default@myinput1_n0 #### A masked pattern was here #### 4939870 Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: query: EXPLAIN SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -417,12 +417,12 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value PREHOOK: type: QUERY -PREHOOK: Input: default@myinput1 +PREHOOK: Input: default@myinput1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value +POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1_n0 a FULL OUTER JOIN myinput1_n0 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value POSTHOOK: type: QUERY -POSTHOOK: Input: default@myinput1 +POSTHOOK: Input: default@myinput1_n0 #### A masked pattern was here #### 4939870 diff --git a/ql/src/test/results/clientpositive/cbo_rp_udaf_percentile_approx_23.q.out b/ql/src/test/results/clientpositive/cbo_rp_udaf_percentile_approx_23.q.out index d91e3db20f..1208ea82e7 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_udaf_percentile_approx_23.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_udaf_percentile_approx_23.q.out @@ -1,504 +1,504 @@ -PREHOOK: query: CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_n1 (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket -POSTHOOK: query: CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_n1 +POSTHOOK: query: CREATE TABLE bucket_n1 (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket +POSTHOOK: Output: default@bucket_n1 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket +PREHOOK: Output: default@bucket_n1 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket +POSTHOOK: Output: default@bucket_n1 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket +PREHOOK: Output: default@bucket_n1 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket +POSTHOOK: Output: default@bucket_n1 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket +PREHOOK: Output: default@bucket_n1 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket +POSTHOOK: Output: default@bucket_n1 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket +PREHOOK: Output: default@bucket_n1 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket -PREHOOK: query: create table t1 (result double) +POSTHOOK: Output: default@bucket_n1 +PREHOOK: query: create table t1_n46 (result double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (result double) +PREHOOK: Output: default@t1_n46 +POSTHOOK: query: create table t1_n46 (result double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2 (result double) +POSTHOOK: Output: default@t1_n46 +PREHOOK: query: create table t2_n25 (result double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2 (result double) +PREHOOK: Output: default@t2_n25 +POSTHOOK: query: create table t2_n25 (result double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: create table t3 (result double) +POSTHOOK: Output: default@t2_n25 +PREHOOK: query: create table t3_n7 (result double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t3 -POSTHOOK: query: create table t3 (result double) +PREHOOK: Output: default@t3_n7 +POSTHOOK: query: create table t3_n7 (result double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t3 -PREHOOK: query: create table t4 (result double) +POSTHOOK: Output: default@t3_n7 +PREHOOK: query: create table t4_n8 (result double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t4 -POSTHOOK: query: create table t4 (result double) +PREHOOK: Output: default@t4_n8 +POSTHOOK: query: create table t4_n8 (result double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t4 -PREHOOK: query: create table t5 (result double) +POSTHOOK: Output: default@t4_n8 +PREHOOK: query: create table t5_n4 (result double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t5 -POSTHOOK: query: create table t5 (result double) +PREHOOK: Output: default@t5_n4 +POSTHOOK: query: create table t5_n4 (result double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t5 -PREHOOK: query: create table t6 (result double) +POSTHOOK: Output: default@t5_n4 +PREHOOK: query: create table t6_n4 (result double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t6 -POSTHOOK: query: create table t6 (result double) +PREHOOK: Output: default@t6_n4 +POSTHOOK: query: create table t6_n4 (result double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t6 -PREHOOK: query: create table t7 (result array) +POSTHOOK: Output: default@t6_n4 +PREHOOK: query: create table t7_n4 (result array) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t7 -POSTHOOK: query: create table t7 (result array) +PREHOOK: Output: default@t7_n4 +POSTHOOK: query: create table t7_n4 (result array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t7 -PREHOOK: query: create table t8 (result array) +POSTHOOK: Output: default@t7_n4 +PREHOOK: query: create table t8_n3 (result array) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t8 -POSTHOOK: query: create table t8 (result array) +PREHOOK: Output: default@t8_n3 +POSTHOOK: query: create table t8_n3 (result array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t8 -PREHOOK: query: create table t9 (result array) +POSTHOOK: Output: default@t8_n3 +PREHOOK: query: create table t9_n2 (result array) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t9 -POSTHOOK: query: create table t9 (result array) +PREHOOK: Output: default@t9_n2 +POSTHOOK: query: create table t9_n2 (result array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t9 -PREHOOK: query: create table t10 (result array) +POSTHOOK: Output: default@t9_n2 +PREHOOK: query: create table t10_n1 (result array) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t10 -POSTHOOK: query: create table t10 (result array) +PREHOOK: Output: default@t10_n1 +POSTHOOK: query: create table t10_n1 (result array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t10 -PREHOOK: query: create table t11 (result array) +POSTHOOK: Output: default@t10_n1 +PREHOOK: query: create table t11_n3 (result array) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t11 -POSTHOOK: query: create table t11 (result array) +PREHOOK: Output: default@t11_n3 +POSTHOOK: query: create table t11_n3 (result array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t11 -PREHOOK: query: create table t12 (result array) +POSTHOOK: Output: default@t11_n3 +PREHOOK: query: create table t12_n1 (result array) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t12 -POSTHOOK: query: create table t12 (result array) +PREHOOK: Output: default@t12_n1 +POSTHOOK: query: create table t12_n1 (result array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t12 -PREHOOK: query: FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) +POSTHOOK: Output: default@t12_n1 +PREHOOK: query: FROM bucket_n1 +insert overwrite table t1_n46 SELECT percentile_approx(cast(key AS double), 0.5) +insert overwrite table t2_n25 SELECT percentile_approx(cast(key AS double), 0.5, 100) +insert overwrite table t3_n7 SELECT percentile_approx(cast(key AS double), 0.5, 1000) -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) +insert overwrite table t4_n8 SELECT percentile_approx(cast(key AS int), 0.5) +insert overwrite table t5_n4 SELECT percentile_approx(cast(key AS int), 0.5, 100) +insert overwrite table t6_n4 SELECT percentile_approx(cast(key AS int), 0.5, 1000) -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t7_n4 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) +insert overwrite table t8_n3 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t9_n2 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t10_n1 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) +insert overwrite table t11_n3 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t12_n1 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) PREHOOK: type: QUERY -PREHOOK: Input: default@bucket -PREHOOK: Output: default@t1 -PREHOOK: Output: default@t10 -PREHOOK: Output: default@t11 -PREHOOK: Output: default@t12 -PREHOOK: Output: default@t2 -PREHOOK: Output: default@t3 -PREHOOK: Output: default@t4 -PREHOOK: Output: default@t5 -PREHOOK: Output: default@t6 -PREHOOK: Output: default@t7 -PREHOOK: Output: default@t8 -PREHOOK: Output: default@t9 -POSTHOOK: query: FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) +PREHOOK: Input: default@bucket_n1 +PREHOOK: Output: default@t10_n1 +PREHOOK: Output: default@t11_n3 +PREHOOK: Output: default@t12_n1 +PREHOOK: Output: default@t1_n46 +PREHOOK: Output: default@t2_n25 +PREHOOK: Output: default@t3_n7 +PREHOOK: Output: default@t4_n8 +PREHOOK: Output: default@t5_n4 +PREHOOK: Output: default@t6_n4 +PREHOOK: Output: default@t7_n4 +PREHOOK: Output: default@t8_n3 +PREHOOK: Output: default@t9_n2 +POSTHOOK: query: FROM bucket_n1 +insert overwrite table t1_n46 SELECT percentile_approx(cast(key AS double), 0.5) +insert overwrite table t2_n25 SELECT percentile_approx(cast(key AS double), 0.5, 100) +insert overwrite table t3_n7 SELECT percentile_approx(cast(key AS double), 0.5, 1000) -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) +insert overwrite table t4_n8 SELECT percentile_approx(cast(key AS int), 0.5) +insert overwrite table t5_n4 SELECT percentile_approx(cast(key AS int), 0.5, 100) +insert overwrite table t6_n4 SELECT percentile_approx(cast(key AS int), 0.5, 1000) -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t7_n4 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) +insert overwrite table t8_n3 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t9_n2 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t10_n1 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) +insert overwrite table t11_n3 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t12_n1 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t10 -POSTHOOK: Output: default@t11 -POSTHOOK: Output: default@t12 -POSTHOOK: Output: default@t2 -POSTHOOK: Output: default@t3 -POSTHOOK: Output: default@t4 -POSTHOOK: Output: default@t5 -POSTHOOK: Output: default@t6 -POSTHOOK: Output: default@t7 -POSTHOOK: Output: default@t8 -POSTHOOK: Output: default@t9 -POSTHOOK: Lineage: t1.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t10.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t11.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t12.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t2.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t3.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t4.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t5.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t6.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t7.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t8.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t9.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -PREHOOK: query: select * from t1 +POSTHOOK: Input: default@bucket_n1 +POSTHOOK: Output: default@t10_n1 +POSTHOOK: Output: default@t11_n3 +POSTHOOK: Output: default@t12_n1 +POSTHOOK: Output: default@t1_n46 +POSTHOOK: Output: default@t2_n25 +POSTHOOK: Output: default@t3_n7 +POSTHOOK: Output: default@t4_n8 +POSTHOOK: Output: default@t5_n4 +POSTHOOK: Output: default@t6_n4 +POSTHOOK: Output: default@t7_n4 +POSTHOOK: Output: default@t8_n3 +POSTHOOK: Output: default@t9_n2 +POSTHOOK: Lineage: t10_n1.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t11_n3.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t12_n1.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t1_n46.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t2_n25.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t3_n7.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t4_n8.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t5_n4.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t6_n4.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t7_n4.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t8_n3.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t9_n2.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +PREHOOK: query: select * from t1_n46 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n46 #### A masked pattern was here #### -POSTHOOK: query: select * from t1 +POSTHOOK: query: select * from t1_n46 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n46 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t2 +PREHOOK: query: select * from t2_n25 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2_n25 #### A masked pattern was here #### -POSTHOOK: query: select * from t2 +POSTHOOK: query: select * from t2_n25 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2_n25 #### A masked pattern was here #### 254.08333333333334 -PREHOOK: query: select * from t3 +PREHOOK: query: select * from t3_n7 PREHOOK: type: QUERY -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t3_n7 #### A masked pattern was here #### -POSTHOOK: query: select * from t3 +POSTHOOK: query: select * from t3_n7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t3_n7 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t4 +PREHOOK: query: select * from t4_n8 PREHOOK: type: QUERY -PREHOOK: Input: default@t4 +PREHOOK: Input: default@t4_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from t4 +POSTHOOK: query: select * from t4_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t4_n8 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t5 +PREHOOK: query: select * from t5_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@t5 +PREHOOK: Input: default@t5_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from t5 +POSTHOOK: query: select * from t5_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t5 +POSTHOOK: Input: default@t5_n4 #### A masked pattern was here #### 254.08333333333334 -PREHOOK: query: select * from t6 +PREHOOK: query: select * from t6_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@t6 +PREHOOK: Input: default@t6_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from t6 +POSTHOOK: query: select * from t6_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t6 +POSTHOOK: Input: default@t6_n4 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t7 +PREHOOK: query: select * from t7_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@t7 +PREHOOK: Input: default@t7_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from t7 +POSTHOOK: query: select * from t7_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t7 +POSTHOOK: Input: default@t7_n4 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t8 +PREHOOK: query: select * from t8_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@t8 +PREHOOK: Input: default@t8_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from t8 +POSTHOOK: query: select * from t8_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t8 +POSTHOOK: Input: default@t8_n3 #### A masked pattern was here #### [23.355555555555558,254.08333333333334,477.0625,488.38271604938274] -PREHOOK: query: select * from t9 +PREHOOK: query: select * from t9_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@t9 +PREHOOK: Input: default@t9_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from t9 +POSTHOOK: query: select * from t9_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t9 +POSTHOOK: Input: default@t9_n2 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t10 +PREHOOK: query: select * from t10_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@t10 +PREHOOK: Input: default@t10_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from t10 +POSTHOOK: query: select * from t10_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t10 +POSTHOOK: Input: default@t10_n1 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t11 +PREHOOK: query: select * from t11_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@t11 +PREHOOK: Input: default@t11_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from t11 +POSTHOOK: query: select * from t11_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t11 +POSTHOOK: Input: default@t11_n3 #### A masked pattern was here #### [23.355555555555558,254.08333333333334,477.0625,488.38271604938274] -PREHOOK: query: select * from t12 +PREHOOK: query: select * from t12_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@t12 +PREHOOK: Input: default@t12_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from t12 +POSTHOOK: query: select * from t12_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t12 +POSTHOOK: Input: default@t12_n1 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) +PREHOOK: query: FROM bucket_n1 +insert overwrite table t1_n46 SELECT percentile_approx(cast(key AS double), 0.5) +insert overwrite table t2_n25 SELECT percentile_approx(cast(key AS double), 0.5, 100) +insert overwrite table t3_n7 SELECT percentile_approx(cast(key AS double), 0.5, 1000) -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) +insert overwrite table t4_n8 SELECT percentile_approx(cast(key AS int), 0.5) +insert overwrite table t5_n4 SELECT percentile_approx(cast(key AS int), 0.5, 100) +insert overwrite table t6_n4 SELECT percentile_approx(cast(key AS int), 0.5, 1000) -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t7_n4 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) +insert overwrite table t8_n3 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t9_n2 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t10_n1 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) +insert overwrite table t11_n3 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t12_n1 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) PREHOOK: type: QUERY -PREHOOK: Input: default@bucket -PREHOOK: Output: default@t1 -PREHOOK: Output: default@t10 -PREHOOK: Output: default@t11 -PREHOOK: Output: default@t12 -PREHOOK: Output: default@t2 -PREHOOK: Output: default@t3 -PREHOOK: Output: default@t4 -PREHOOK: Output: default@t5 -PREHOOK: Output: default@t6 -PREHOOK: Output: default@t7 -PREHOOK: Output: default@t8 -PREHOOK: Output: default@t9 -POSTHOOK: query: FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) +PREHOOK: Input: default@bucket_n1 +PREHOOK: Output: default@t10_n1 +PREHOOK: Output: default@t11_n3 +PREHOOK: Output: default@t12_n1 +PREHOOK: Output: default@t1_n46 +PREHOOK: Output: default@t2_n25 +PREHOOK: Output: default@t3_n7 +PREHOOK: Output: default@t4_n8 +PREHOOK: Output: default@t5_n4 +PREHOOK: Output: default@t6_n4 +PREHOOK: Output: default@t7_n4 +PREHOOK: Output: default@t8_n3 +PREHOOK: Output: default@t9_n2 +POSTHOOK: query: FROM bucket_n1 +insert overwrite table t1_n46 SELECT percentile_approx(cast(key AS double), 0.5) +insert overwrite table t2_n25 SELECT percentile_approx(cast(key AS double), 0.5, 100) +insert overwrite table t3_n7 SELECT percentile_approx(cast(key AS double), 0.5, 1000) -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) +insert overwrite table t4_n8 SELECT percentile_approx(cast(key AS int), 0.5) +insert overwrite table t5_n4 SELECT percentile_approx(cast(key AS int), 0.5, 100) +insert overwrite table t6_n4 SELECT percentile_approx(cast(key AS int), 0.5, 1000) -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t7_n4 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) +insert overwrite table t8_n3 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t9_n2 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t10_n1 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) +insert overwrite table t11_n3 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t12_n1 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t10 -POSTHOOK: Output: default@t11 -POSTHOOK: Output: default@t12 -POSTHOOK: Output: default@t2 -POSTHOOK: Output: default@t3 -POSTHOOK: Output: default@t4 -POSTHOOK: Output: default@t5 -POSTHOOK: Output: default@t6 -POSTHOOK: Output: default@t7 -POSTHOOK: Output: default@t8 -POSTHOOK: Output: default@t9 -POSTHOOK: Lineage: t1.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t10.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t11.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t12.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t2.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t3.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t4.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t5.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t6.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t7.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t8.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t9.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -PREHOOK: query: select * from t1 +POSTHOOK: Input: default@bucket_n1 +POSTHOOK: Output: default@t10_n1 +POSTHOOK: Output: default@t11_n3 +POSTHOOK: Output: default@t12_n1 +POSTHOOK: Output: default@t1_n46 +POSTHOOK: Output: default@t2_n25 +POSTHOOK: Output: default@t3_n7 +POSTHOOK: Output: default@t4_n8 +POSTHOOK: Output: default@t5_n4 +POSTHOOK: Output: default@t6_n4 +POSTHOOK: Output: default@t7_n4 +POSTHOOK: Output: default@t8_n3 +POSTHOOK: Output: default@t9_n2 +POSTHOOK: Lineage: t10_n1.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t11_n3.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t12_n1.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t1_n46.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t2_n25.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t3_n7.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t4_n8.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t5_n4.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t6_n4.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t7_n4.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t8_n3.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t9_n2.result EXPRESSION [(bucket_n1)bucket_n1.FieldSchema(name:key, type:double, comment:null), ] +PREHOOK: query: select * from t1_n46 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n46 #### A masked pattern was here #### -POSTHOOK: query: select * from t1 +POSTHOOK: query: select * from t1_n46 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n46 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t2 +PREHOOK: query: select * from t2_n25 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2_n25 #### A masked pattern was here #### -POSTHOOK: query: select * from t2 +POSTHOOK: query: select * from t2_n25 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2_n25 #### A masked pattern was here #### 254.08333333333334 -PREHOOK: query: select * from t3 +PREHOOK: query: select * from t3_n7 PREHOOK: type: QUERY -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t3_n7 #### A masked pattern was here #### -POSTHOOK: query: select * from t3 +POSTHOOK: query: select * from t3_n7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t3_n7 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t4 +PREHOOK: query: select * from t4_n8 PREHOOK: type: QUERY -PREHOOK: Input: default@t4 +PREHOOK: Input: default@t4_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from t4 +POSTHOOK: query: select * from t4_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t4_n8 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t5 +PREHOOK: query: select * from t5_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@t5 +PREHOOK: Input: default@t5_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from t5 +POSTHOOK: query: select * from t5_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t5 +POSTHOOK: Input: default@t5_n4 #### A masked pattern was here #### 254.08333333333334 -PREHOOK: query: select * from t6 +PREHOOK: query: select * from t6_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@t6 +PREHOOK: Input: default@t6_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from t6 +POSTHOOK: query: select * from t6_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t6 +POSTHOOK: Input: default@t6_n4 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t7 +PREHOOK: query: select * from t7_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@t7 +PREHOOK: Input: default@t7_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from t7 +POSTHOOK: query: select * from t7_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t7 +POSTHOOK: Input: default@t7_n4 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t8 +PREHOOK: query: select * from t8_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@t8 +PREHOOK: Input: default@t8_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from t8 +POSTHOOK: query: select * from t8_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t8 +POSTHOOK: Input: default@t8_n3 #### A masked pattern was here #### [23.355555555555558,254.08333333333334,477.0625,488.38271604938274] -PREHOOK: query: select * from t9 +PREHOOK: query: select * from t9_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@t9 +PREHOOK: Input: default@t9_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from t9 +POSTHOOK: query: select * from t9_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t9 +POSTHOOK: Input: default@t9_n2 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t10 +PREHOOK: query: select * from t10_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@t10 +PREHOOK: Input: default@t10_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from t10 +POSTHOOK: query: select * from t10_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t10 +POSTHOOK: Input: default@t10_n1 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t11 +PREHOOK: query: select * from t11_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@t11 +PREHOOK: Input: default@t11_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from t11 +POSTHOOK: query: select * from t11_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t11 +POSTHOOK: Input: default@t11_n3 #### A masked pattern was here #### [23.355555555555558,254.08333333333334,477.0625,488.38271604938274] -PREHOOK: query: select * from t12 +PREHOOK: query: select * from t12_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@t12 +PREHOOK: Input: default@t12_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from t12 +POSTHOOK: query: select * from t12_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t12 +POSTHOOK: Input: default@t12_n1 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] PREHOOK: query: explain -select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket +select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket_n1 PREHOOK: type: QUERY POSTHOOK: query: explain -select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket +select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -509,7 +509,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: bucket + alias: bucket_n1 Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: CASE WHEN ((key < 100.0D)) THEN (NaND) ELSE (key) END (type: double) @@ -544,20 +544,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) between 340.5 and 343.0 from bucket +PREHOOK: query: select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) between 340.5 and 343.0 from bucket_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@bucket +PREHOOK: Input: default@bucket_n1 #### A masked pattern was here #### -POSTHOOK: query: select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) between 340.5 and 343.0 from bucket +POSTHOOK: query: select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) between 340.5 and 343.0 from bucket_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket +POSTHOOK: Input: default@bucket_n1 #### A masked pattern was here #### true PREHOOK: query: explain -select percentile_approx(key, 0.5) from bucket +select percentile_approx(key, 0.5) from bucket_n1 PREHOOK: type: QUERY POSTHOOK: query: explain -select percentile_approx(key, 0.5) from bucket +select percentile_approx(key, 0.5) from bucket_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -568,7 +568,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: bucket + alias: bucket_n1 Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: double) @@ -603,12 +603,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket +PREHOOK: query: select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@bucket +PREHOOK: Input: default@bucket_n1 #### A masked pattern was here #### -POSTHOOK: query: select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket +POSTHOOK: query: select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket +POSTHOOK: Input: default@bucket_n1 #### A masked pattern was here #### true diff --git a/ql/src/test/results/clientpositive/cbo_subq_exists.q.out b/ql/src/test/results/clientpositive/cbo_subq_exists.q.out index 06dbd30339..f60be8e7e0 100644 --- a/ql/src/test/results/clientpositive/cbo_subq_exists.q.out +++ b/ql/src/test/results/clientpositive/cbo_subq_exists.q.out @@ -173,7 +173,7 @@ POSTHOOK: Input: default@src_cbo 118 val_118 119 val_119 12 val_12 -PREHOOK: query: create view cv1 as +PREHOOK: query: create view cv1_n2 as select * from src_cbo b where exists @@ -183,8 +183,8 @@ where exists PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src_cbo PREHOOK: Output: database:default -PREHOOK: Output: default@cv1 -POSTHOOK: query: create view cv1 as +PREHOOK: Output: default@cv1_n2 +POSTHOOK: query: create view cv1_n2 as select * from src_cbo b where exists @@ -194,17 +194,17 @@ where exists POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src_cbo POSTHOOK: Output: database:default -POSTHOOK: Output: default@cv1 -POSTHOOK: Lineage: cv1.key SIMPLE [(src_cbo)b.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: cv1.value SIMPLE [(src_cbo)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select * from cv1 +POSTHOOK: Output: default@cv1_n2 +POSTHOOK: Lineage: cv1_n2.key SIMPLE [(src_cbo)b.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: cv1_n2.value SIMPLE [(src_cbo)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from cv1_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@cv1 +PREHOOK: Input: default@cv1_n2 PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: select * from cv1 +POSTHOOK: query: select * from cv1_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@cv1 +POSTHOOK: Input: default@cv1_n2 POSTHOOK: Input: default@src_cbo #### A masked pattern was here #### 90 val_90 diff --git a/ql/src/test/results/clientpositive/char_2.q.out b/ql/src/test/results/clientpositive/char_2.q.out index 9b994e69e4..53ac07dc7d 100644 --- a/ql/src/test/results/clientpositive/char_2.q.out +++ b/ql/src/test/results/clientpositive/char_2.q.out @@ -1,31 +1,31 @@ -PREHOOK: query: drop table char_2 +PREHOOK: query: drop table char_2_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table char_2 +POSTHOOK: query: drop table char_2_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table char_2 ( +PREHOOK: query: create table char_2_n1 ( key char(10), value char(20) ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@char_2 -POSTHOOK: query: create table char_2 ( +PREHOOK: Output: default@char_2_n1 +POSTHOOK: query: create table char_2_n1 ( key char(10), value char(20) ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@char_2 -PREHOOK: query: insert overwrite table char_2 select * from src +POSTHOOK: Output: default@char_2_n1 +PREHOOK: query: insert overwrite table char_2_n1 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@char_2 -POSTHOOK: query: insert overwrite table char_2 select * from src +PREHOOK: Output: default@char_2_n1 +POSTHOOK: query: insert overwrite table char_2_n1 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@char_2 -POSTHOOK: Lineage: char_2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: char_2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@char_2_n1 +POSTHOOK: Lineage: char_2_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: char_2_n1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: select value, sum(cast(key as int)), count(*) numrows from src group by value @@ -48,20 +48,20 @@ val_100 200 2 val_103 206 2 val_104 208 2 PREHOOK: query: select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n1 group by value order by value asc limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@char_2 +PREHOOK: Input: default@char_2_n1 #### A masked pattern was here #### POSTHOOK: query: select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n1 group by value order by value asc limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@char_2 +POSTHOOK: Input: default@char_2_n1 #### A masked pattern was here #### val_0 0 3 val_10 10 1 @@ -90,31 +90,31 @@ val_96 96 1 val_95 190 2 val_92 92 1 PREHOOK: query: select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n1 group by value order by value desc limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@char_2 +PREHOOK: Input: default@char_2_n1 #### A masked pattern was here #### POSTHOOK: query: select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n1 group by value order by value desc limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@char_2 +POSTHOOK: Input: default@char_2_n1 #### A masked pattern was here #### val_98 196 2 val_97 194 2 val_96 96 1 val_95 190 2 val_92 92 1 -PREHOOK: query: drop table char_2 +PREHOOK: query: drop table char_2_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@char_2 -PREHOOK: Output: default@char_2 -POSTHOOK: query: drop table char_2 +PREHOOK: Input: default@char_2_n1 +PREHOOK: Output: default@char_2_n1 +POSTHOOK: query: drop table char_2_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@char_2 -POSTHOOK: Output: default@char_2 +POSTHOOK: Input: default@char_2_n1 +POSTHOOK: Output: default@char_2_n1 diff --git a/ql/src/test/results/clientpositive/char_join1.q.out b/ql/src/test/results/clientpositive/char_join1.q.out index 0e350194a3..3176574773 100644 --- a/ql/src/test/results/clientpositive/char_join1.q.out +++ b/ql/src/test/results/clientpositive/char_join1.q.out @@ -6,9 +6,9 @@ PREHOOK: query: drop table char_join1_ch2 PREHOOK: type: DROPTABLE POSTHOOK: query: drop table char_join1_ch2 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table char_join1_str +PREHOOK: query: drop table char_join1_str_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table char_join1_str +POSTHOOK: query: drop table char_join1_str_n0 POSTHOOK: type: DROPTABLE PREHOOK: query: create table char_join1_ch1 ( c1 int, @@ -38,20 +38,20 @@ POSTHOOK: query: create table char_join1_ch2 ( POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@char_join1_ch2 -PREHOOK: query: create table char_join1_str ( +PREHOOK: query: create table char_join1_str_n0 ( c1 int, c2 string ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@char_join1_str -POSTHOOK: query: create table char_join1_str ( +PREHOOK: Output: default@char_join1_str_n0 +POSTHOOK: query: create table char_join1_str_n0 ( c1 int, c2 string ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@char_join1_str +POSTHOOK: Output: default@char_join1_str_n0 PREHOOK: query: load data local inpath '../../data/files/vc1.txt' into table char_join1_ch1 PREHOOK: type: LOAD #### A masked pattern was here #### @@ -68,14 +68,14 @@ POSTHOOK: query: load data local inpath '../../data/files/vc1.txt' into table ch POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@char_join1_ch2 -PREHOOK: query: load data local inpath '../../data/files/vc1.txt' into table char_join1_str +PREHOOK: query: load data local inpath '../../data/files/vc1.txt' into table char_join1_str_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@char_join1_str -POSTHOOK: query: load data local inpath '../../data/files/vc1.txt' into table char_join1_str +PREHOOK: Output: default@char_join1_str_n0 +POSTHOOK: query: load data local inpath '../../data/files/vc1.txt' into table char_join1_str_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@char_join1_str +POSTHOOK: Output: default@char_join1_str_n0 PREHOOK: query: select * from char_join1_ch1 a join char_join1_ch1 b on (a.c2 = b.c2) PREHOOK: type: QUERY PREHOOK: Input: default@char_join1_ch1 @@ -104,15 +104,15 @@ POSTHOOK: Input: default@char_join1_ch2 2 abc 1 abc 2 abc 2 abc 3 abc 3 abc -PREHOOK: query: select * from char_join1_ch1 a join char_join1_str b on (a.c2 = b.c2) +PREHOOK: query: select * from char_join1_ch1 a join char_join1_str_n0 b on (a.c2 = b.c2) PREHOOK: type: QUERY PREHOOK: Input: default@char_join1_ch1 -PREHOOK: Input: default@char_join1_str +PREHOOK: Input: default@char_join1_str_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from char_join1_ch1 a join char_join1_str b on (a.c2 = b.c2) +POSTHOOK: query: select * from char_join1_ch1 a join char_join1_str_n0 b on (a.c2 = b.c2) POSTHOOK: type: QUERY POSTHOOK: Input: default@char_join1_ch1 -POSTHOOK: Input: default@char_join1_str +POSTHOOK: Input: default@char_join1_str_n0 #### A masked pattern was here #### 1 abc 1 abc 2 abc 1 abc @@ -133,11 +133,11 @@ POSTHOOK: query: drop table char_join1_ch2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@char_join1_ch2 POSTHOOK: Output: default@char_join1_ch2 -PREHOOK: query: drop table char_join1_str +PREHOOK: query: drop table char_join1_str_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@char_join1_str -PREHOOK: Output: default@char_join1_str -POSTHOOK: query: drop table char_join1_str +PREHOOK: Input: default@char_join1_str_n0 +PREHOOK: Output: default@char_join1_str_n0 +POSTHOOK: query: drop table char_join1_str_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@char_join1_str -POSTHOOK: Output: default@char_join1_str +POSTHOOK: Input: default@char_join1_str_n0 +POSTHOOK: Output: default@char_join1_str_n0 diff --git a/ql/src/test/results/clientpositive/char_pad_convert.q.out b/ql/src/test/results/clientpositive/char_pad_convert.q.out index 62d1a3755b..b63dc25c99 100644 --- a/ql/src/test/results/clientpositive/char_pad_convert.q.out +++ b/ql/src/test/results/clientpositive/char_pad_convert.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table over1k( +PREHOOK: query: create table over1k_n6( t tinyint, si smallint, i int, @@ -14,8 +14,8 @@ PREHOOK: query: create table over1k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over1k -POSTHOOK: query: create table over1k( +PREHOOK: Output: default@over1k_n6 +POSTHOOK: query: create table over1k_n6( t tinyint, si smallint, i int, @@ -31,28 +31,28 @@ POSTHOOK: query: create table over1k( fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over1k -PREHOOK: query: load data local inpath '../../data/files/over1k' into table over1k +POSTHOOK: Output: default@over1k_n6 +PREHOOK: query: load data local inpath '../../data/files/over1k' into table over1k_n6 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over1k -POSTHOOK: query: load data local inpath '../../data/files/over1k' into table over1k +PREHOOK: Output: default@over1k_n6 +POSTHOOK: query: load data local inpath '../../data/files/over1k' into table over1k_n6 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over1k +POSTHOOK: Output: default@over1k_n6 PREHOOK: query: select lpad(t, 4, ' '), lpad(si, 2, ' '), lpad(i, 9, 'z'), - lpad(b, 2, 'a') from over1k limit 5 + lpad(b, 2, 'a') from over1k_n6 limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@over1k +PREHOOK: Input: default@over1k_n6 #### A masked pattern was here #### POSTHOOK: query: select lpad(t, 4, ' '), lpad(si, 2, ' '), lpad(i, 9, 'z'), - lpad(b, 2, 'a') from over1k limit 5 + lpad(b, 2, 'a') from over1k_n6 limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1k +POSTHOOK: Input: default@over1k_n6 #### A masked pattern was here #### 124 33 zzzz65664 42 19 44 zzzz65553 42 @@ -62,16 +62,16 @@ POSTHOOK: Input: default@over1k PREHOOK: query: select lpad("oh", 10, t), lpad("my", 6, si), lpad("other", 14, i), - lpad("one", 12, b) from over1k limit 5 + lpad("one", 12, b) from over1k_n6 limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@over1k +PREHOOK: Input: default@over1k_n6 #### A masked pattern was here #### POSTHOOK: query: select lpad("oh", 10, t), lpad("my", 6, si), lpad("other", 14, i), - lpad("one", 12, b) from over1k limit 5 + lpad("one", 12, b) from over1k_n6 limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1k +POSTHOOK: Input: default@over1k_n6 #### A masked pattern was here #### 12412412oh 3363my 656646566other 429496743one 19191919oh 4424my 655536555other 429496738one @@ -81,16 +81,16 @@ POSTHOOK: Input: default@over1k PREHOOK: query: select rpad(t, 4, ' '), rpad(si, 2, ' '), rpad(i, 9, 'z'), - rpad(b, 2, 'a') from over1k limit 5 + rpad(b, 2, 'a') from over1k_n6 limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@over1k +PREHOOK: Input: default@over1k_n6 #### A masked pattern was here #### POSTHOOK: query: select rpad(t, 4, ' '), rpad(si, 2, ' '), rpad(i, 9, 'z'), - rpad(b, 2, 'a') from over1k limit 5 + rpad(b, 2, 'a') from over1k_n6 limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1k +POSTHOOK: Input: default@over1k_n6 #### A masked pattern was here #### 124 33 65664zzzz 42 19 44 65553zzzz 42 @@ -100,16 +100,16 @@ POSTHOOK: Input: default@over1k PREHOOK: query: select rpad("oh", 10, t), rpad("my", 6, si), rpad("other", 14, i), - rpad("one", 12, b) from over1k limit 5 + rpad("one", 12, b) from over1k_n6 limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@over1k +PREHOOK: Input: default@over1k_n6 #### A masked pattern was here #### POSTHOOK: query: select rpad("oh", 10, t), rpad("my", 6, si), rpad("other", 14, i), - rpad("one", 12, b) from over1k limit 5 + rpad("one", 12, b) from over1k_n6 limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1k +POSTHOOK: Input: default@over1k_n6 #### A masked pattern was here #### oh12412412 my3363 other656646566 one429496743 oh19191919 my4424 other655536555 one429496738 @@ -121,18 +121,18 @@ PREHOOK: query: select lpad(f, 4, ' '), lpad(bo, 9, 'z'), lpad(ts, 2, 'a'), lpad(`dec`, 7, 'd'), - lpad(bin, 8, 'b') from over1k limit 5 + lpad(bin, 8, 'b') from over1k_n6 limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@over1k +PREHOOK: Input: default@over1k_n6 #### A masked pattern was here #### POSTHOOK: query: select lpad(f, 4, ' '), lpad(d, 2, ' '), lpad(bo, 9, 'z'), lpad(ts, 2, 'a'), lpad(`dec`, 7, 'd'), - lpad(bin, 8, 'b') from over1k limit 5 + lpad(bin, 8, 'b') from over1k_n6 limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1k +POSTHOOK: Input: default@over1k_n6 #### A masked pattern was here #### 74.7 42 zzzzzTRUE 20 ddd45.4 yard du 26.4 37 zzzzzTRUE 20 dd29.62 history @@ -144,18 +144,18 @@ PREHOOK: query: select lpad("oh", 10, f), lpad("other", 14, bo), lpad("one", 12, ts), lpad("two", 7, `dec`), - lpad("three", 8, bin) from over1k limit 5 + lpad("three", 8, bin) from over1k_n6 limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@over1k +PREHOOK: Input: default@over1k_n6 #### A masked pattern was here #### POSTHOOK: query: select lpad("oh", 10, f), lpad("my", 6, d), lpad("other", 14, bo), lpad("one", 12, ts), lpad("two", 7, `dec`), - lpad("three", 8, bin) from over1k limit 5 + lpad("three", 8, bin) from over1k_n6 limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1k +POSTHOOK: Input: default@over1k_n6 #### A masked pattern was here #### 74.7274.oh 42.4my TRUETRUETother 2013-03-0one 45.4two yathree 26.4326.oh 37.7my TRUETRUETother 2013-03-0one 29.6two hithree @@ -167,18 +167,18 @@ PREHOOK: query: select rpad(f, 4, ' '), rpad(bo, 9, 'z'), rpad(ts, 2, 'a'), rpad(`dec`, 7, 'd'), - rpad(bin, 8, 'b') from over1k limit 5 + rpad(bin, 8, 'b') from over1k_n6 limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@over1k +PREHOOK: Input: default@over1k_n6 #### A masked pattern was here #### POSTHOOK: query: select rpad(f, 4, ' '), rpad(d, 2, ' '), rpad(bo, 9, 'z'), rpad(ts, 2, 'a'), rpad(`dec`, 7, 'd'), - rpad(bin, 8, 'b') from over1k limit 5 + rpad(bin, 8, 'b') from over1k_n6 limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1k +POSTHOOK: Input: default@over1k_n6 #### A masked pattern was here #### 74.7 42 TRUEzzzzz 20 45.4ddd yard du 26.4 37 TRUEzzzzz 20 29.62dd history @@ -190,18 +190,18 @@ PREHOOK: query: select rpad("oh", 10, f), rpad("other", 14, bo), rpad("one", 12, ts), rpad("two", 7, `dec`), - rpad("three", 8, bin) from over1k limit 5 + rpad("three", 8, bin) from over1k_n6 limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@over1k +PREHOOK: Input: default@over1k_n6 #### A masked pattern was here #### POSTHOOK: query: select rpad("oh", 10, f), rpad("my", 6, d), rpad("other", 14, bo), rpad("one", 12, ts), rpad("two", 7, `dec`), - rpad("three", 8, bin) from over1k limit 5 + rpad("three", 8, bin) from over1k_n6 limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1k +POSTHOOK: Input: default@over1k_n6 #### A masked pattern was here #### oh74.7274. my42.4 otherTRUETRUET one2013-03-0 two45.4 threeya oh26.4326. my37.7 otherTRUETRUET one2013-03-0 two29.6 threehi diff --git a/ql/src/test/results/clientpositive/colstats_all_nulls.q.out b/ql/src/test/results/clientpositive/colstats_all_nulls.q.out index e9525126e1..0e8a3324da 100644 --- a/ql/src/test/results/clientpositive/colstats_all_nulls.q.out +++ b/ql/src/test/results/clientpositive/colstats_all_nulls.q.out @@ -1,27 +1,27 @@ -PREHOOK: query: CREATE TABLE src_null(a bigint) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE src_null_n2(a bigint) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_null -POSTHOOK: query: CREATE TABLE src_null(a bigint) STORED AS TEXTFILE +PREHOOK: Output: default@src_null_n2 +POSTHOOK: query: CREATE TABLE src_null_n2(a bigint) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_null -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null +POSTHOOK: Output: default@src_null_n2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@src_null -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null +PREHOOK: Output: default@src_null_n2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@src_null -PREHOOK: query: create table all_nulls as SELECT a, cast(a as double) as b, cast(a as decimal) as c FROM src_null where a is null limit 5 +POSTHOOK: Output: default@src_null_n2 +PREHOOK: query: create table all_nulls as SELECT a, cast(a as double) as b, cast(a as decimal) as c FROM src_null_n2 where a is null limit 5 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src_null +PREHOOK: Input: default@src_null_n2 PREHOOK: Output: database:default PREHOOK: Output: default@all_nulls -POSTHOOK: query: create table all_nulls as SELECT a, cast(a as double) as b, cast(a as decimal) as c FROM src_null where a is null limit 5 +POSTHOOK: query: create table all_nulls as SELECT a, cast(a as double) as b, cast(a as decimal) as c FROM src_null_n2 where a is null limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src_null +POSTHOOK: Input: default@src_null_n2 POSTHOOK: Output: database:default POSTHOOK: Output: default@all_nulls POSTHOOK: Lineage: all_nulls.a SIMPLE [] @@ -83,11 +83,11 @@ POSTHOOK: query: drop table all_nulls POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@all_nulls POSTHOOK: Output: default@all_nulls -PREHOOK: query: drop table src_null +PREHOOK: query: drop table src_null_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@src_null -PREHOOK: Output: default@src_null -POSTHOOK: query: drop table src_null +PREHOOK: Input: default@src_null_n2 +PREHOOK: Output: default@src_null_n2 +POSTHOOK: query: drop table src_null_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@src_null -POSTHOOK: Output: default@src_null +POSTHOOK: Input: default@src_null_n2 +POSTHOOK: Output: default@src_null_n2 diff --git a/ql/src/test/results/clientpositive/column_pruner_multiple_children.q.out b/ql/src/test/results/clientpositive/column_pruner_multiple_children.q.out index 17271b65e4..4b0aaab305 100644 --- a/ql/src/test/results/clientpositive/column_pruner_multiple_children.q.out +++ b/ql/src/test/results/clientpositive/column_pruner_multiple_children.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n52(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n52 +POSTHOOK: query: CREATE TABLE DEST1_n52(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: create table s as select * from src where key='10' +POSTHOOK: Output: default@DEST1_n52 +PREHOOK: query: create table s_n129 as select * from src where key='10' PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@s -POSTHOOK: query: create table s as select * from src where key='10' +PREHOOK: Output: default@s_n129 +POSTHOOK: query: create table s_n129 as select * from src where key='10' POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@s -POSTHOOK: Lineage: s.key SIMPLE [] -POSTHOOK: Lineage: s.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain FROM S -INSERT OVERWRITE TABLE DEST1 SELECT key, sum(SUBSTR(value,5)) GROUP BY key +POSTHOOK: Output: default@s_n129 +POSTHOOK: Lineage: s_n129.key SIMPLE [] +POSTHOOK: Lineage: s_n129.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain FROM S_n129 +INSERT OVERWRITE TABLE DEST1_n52 SELECT key, sum(SUBSTR(value,5)) GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: explain FROM S -INSERT OVERWRITE TABLE DEST1 SELECT key, sum(SUBSTR(value,5)) GROUP BY key +POSTHOOK: query: explain FROM S_n129 +INSERT OVERWRITE TABLE DEST1_n52 SELECT key, sum(SUBSTR(value,5)) GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -35,7 +35,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: s + alias: s_n129 Statistics: Num rows: 1 Data size: 9 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), substr(value, 5) (type: string) @@ -66,7 +66,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n52 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -86,7 +86,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n52 Stage: Stage-2 Stats Work @@ -94,7 +94,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n52 Stage: Stage-3 Map Reduce @@ -119,24 +119,24 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: FROM S -INSERT OVERWRITE TABLE DEST1 SELECT key, sum(SUBSTR(value,5)) GROUP BY key +PREHOOK: query: FROM S_n129 +INSERT OVERWRITE TABLE DEST1_n52 SELECT key, sum(SUBSTR(value,5)) GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@s -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM S -INSERT OVERWRITE TABLE DEST1 SELECT key, sum(SUBSTR(value,5)) GROUP BY key +PREHOOK: Input: default@s_n129 +PREHOOK: Output: default@dest1_n52 +POSTHOOK: query: FROM S_n129 +INSERT OVERWRITE TABLE DEST1_n52 SELECT key, sum(SUBSTR(value,5)) GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@s -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(s)s.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(s)s.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: desc formatted DEST1 +POSTHOOK: Input: default@s_n129 +POSTHOOK: Output: default@dest1_n52 +POSTHOOK: Lineage: dest1_n52.key EXPRESSION [(s_n129)s_n129.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n52.value EXPRESSION [(s_n129)s_n129.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: desc formatted DEST1_n52 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@dest1 -POSTHOOK: query: desc formatted DEST1 +PREHOOK: Input: default@dest1_n52 +POSTHOOK: query: desc formatted DEST1_n52 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n52 # col_name data_type comment key int value string @@ -166,12 +166,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted DEST1 key +PREHOOK: query: desc formatted DEST1_n52 key PREHOOK: type: DESCTABLE -PREHOOK: Input: default@dest1 -POSTHOOK: query: desc formatted DEST1 key +PREHOOK: Input: default@dest1_n52 +POSTHOOK: query: desc formatted DEST1_n52 key POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n52 col_name key data_type int min 10 @@ -185,12 +185,12 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} -PREHOOK: query: desc formatted DEST1 value +PREHOOK: query: desc formatted DEST1_n52 value PREHOOK: type: DESCTABLE -PREHOOK: Input: default@dest1 -POSTHOOK: query: desc formatted DEST1 value +PREHOOK: Input: default@dest1_n52 +POSTHOOK: query: desc formatted DEST1_n52 value POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n52 col_name value data_type string min diff --git a/ql/src/test/results/clientpositive/columnstats_infinity.q.out b/ql/src/test/results/clientpositive/columnstats_infinity.q.out index 81b4d4f25e..494bb0559a 100644 --- a/ql/src/test/results/clientpositive/columnstats_infinity.q.out +++ b/ql/src/test/results/clientpositive/columnstats_infinity.q.out @@ -1,76 +1,76 @@ -PREHOOK: query: CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +PREHOOK: query: CREATE TABLE schema_evolution_data_n45(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@schema_evolution_data -POSTHOOK: query: CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) +PREHOOK: Output: default@schema_evolution_data_n45 +POSTHOOK: query: CREATE TABLE schema_evolution_data_n45(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string) row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@schema_evolution_data -PREHOOK: query: load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data +POSTHOOK: Output: default@schema_evolution_data_n45 +PREHOOK: query: load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n45 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@schema_evolution_data -POSTHOOK: query: load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data +PREHOOK: Output: default@schema_evolution_data_n45 +POSTHOOK: query: load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n45 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@schema_evolution_data -PREHOOK: query: CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +POSTHOOK: Output: default@schema_evolution_data_n45 +PREHOOK: query: CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n13(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group -POSTHOOK: query: CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +PREHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group_n13 +POSTHOOK: query: CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n13(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group -PREHOOK: query: insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +POSTHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group_n13 +PREHOOK: query: insert into table table_change_numeric_group_string_group_floating_string_group_n13 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data + 'original' FROM schema_evolution_data_n45 PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group -POSTHOOK: query: insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +PREHOOK: Input: default@schema_evolution_data_n45 +PREHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group_n13 +POSTHOOK: query: insert into table table_change_numeric_group_string_group_floating_string_group_n13 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data + 'original' FROM schema_evolution_data_n45 POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.b SIMPLE [] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c10 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c11 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c12 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c13 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c14 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c15 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c4 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c5 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c6 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c7 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c8 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c9 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -PREHOOK: query: desc formatted table_change_numeric_group_string_group_floating_string_group +POSTHOOK: Input: default@schema_evolution_data_n45 +POSTHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group_n13 +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.b SIMPLE [] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c1 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c10 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c11 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:float1, type:float, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c12 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:double1, type:double, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c13 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c14 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:float1, type:float, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c15 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:double1, type:double, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c2 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:float1, type:float, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c3 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:double1, type:double, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c4 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c5 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:float1, type:float, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c6 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:double1, type:double, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c7 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c8 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:float1, type:float, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c9 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:double1, type:double, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.insert_num SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:insert_num, type:int, comment:null), ] +PREHOOK: query: desc formatted table_change_numeric_group_string_group_floating_string_group_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group -POSTHOOK: query: desc formatted table_change_numeric_group_string_group_floating_string_group +PREHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group_n13 +POSTHOOK: query: desc formatted table_change_numeric_group_string_group_floating_string_group_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group +POSTHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group_n13 # col_name data_type comment insert_num int c1 decimal(38,18) @@ -115,22 +115,22 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: analyze table table_change_numeric_group_string_group_floating_string_group compute statistics for columns +PREHOOK: query: analyze table table_change_numeric_group_string_group_floating_string_group_n13 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group -PREHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group +PREHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group_n13 +PREHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group_n13 #### A masked pattern was here #### -POSTHOOK: query: analyze table table_change_numeric_group_string_group_floating_string_group compute statistics for columns +POSTHOOK: query: analyze table table_change_numeric_group_string_group_floating_string_group_n13 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group -POSTHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group +POSTHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group_n13 +POSTHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group_n13 #### A masked pattern was here #### -PREHOOK: query: desc formatted table_change_numeric_group_string_group_floating_string_group +PREHOOK: query: desc formatted table_change_numeric_group_string_group_floating_string_group_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group -POSTHOOK: query: desc formatted table_change_numeric_group_string_group_floating_string_group +PREHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group_n13 +POSTHOOK: query: desc formatted table_change_numeric_group_string_group_floating_string_group_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group +POSTHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group_n13 # col_name data_type comment insert_num int c1 decimal(38,18) @@ -175,82 +175,82 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group +PREHOOK: query: select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group +PREHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group_n13 #### A masked pattern was here #### -POSTHOOK: query: select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group +POSTHOOK: query: select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from table_change_numeric_group_string_group_floating_string_group_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group +POSTHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group_n13 #### A masked pattern was here #### 101 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 99999999999999999999.999999999999999999 Infinity 1.7976931348623157E308 original 102 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 -99999999999999999999.999999999999999999 -Infinity -1.7976931348623157E308 original 103 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL original 104 66475.561431000000000000 -100.35978 30.774 66475.561431000000000000 -100.35978 30.774 66475.561431000000000000 -100.35978 30.774 66475.561431000000000000 -100.35978 30.774 66475.561431000000000000 -100.35978 30.774 original 105 9250340.750000000000000000 NULL 46114.28 9250340.750000000000000000 NULL 46114.28 9250340.750000000000000000 NULL 46114.28 9250340.750000000000000000 NULL 46114.28 9250340.750000000000000000 NULL 46114.28 original -PREHOOK: query: drop table table_change_numeric_group_string_group_floating_string_group +PREHOOK: query: drop table table_change_numeric_group_string_group_floating_string_group_n13 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group -PREHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group -POSTHOOK: query: drop table table_change_numeric_group_string_group_floating_string_group +PREHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group_n13 +PREHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group_n13 +POSTHOOK: query: drop table table_change_numeric_group_string_group_floating_string_group_n13 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group -POSTHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group -PREHOOK: query: CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +POSTHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group_n13 +POSTHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group_n13 +PREHOOK: query: CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n13(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group -POSTHOOK: query: CREATE TABLE table_change_numeric_group_string_group_floating_string_group(insert_num int, +PREHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group_n13 +POSTHOOK: query: CREATE TABLE table_change_numeric_group_string_group_floating_string_group_n13(insert_num int, c1 decimal(38,18), c2 float, c3 double, c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double, c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double, b STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group -PREHOOK: query: insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +POSTHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group_n13 +PREHOOK: query: insert into table table_change_numeric_group_string_group_floating_string_group_n13 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data + 'original' FROM schema_evolution_data_n45 PREHOOK: type: QUERY -PREHOOK: Input: default@schema_evolution_data -PREHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group -POSTHOOK: query: insert into table table_change_numeric_group_string_group_floating_string_group SELECT insert_num, +PREHOOK: Input: default@schema_evolution_data_n45 +PREHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group_n13 +POSTHOOK: query: insert into table table_change_numeric_group_string_group_floating_string_group_n13 SELECT insert_num, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, decimal1, float1, double1, - 'original' FROM schema_evolution_data + 'original' FROM schema_evolution_data_n45 POSTHOOK: type: QUERY -POSTHOOK: Input: default@schema_evolution_data -POSTHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.b SIMPLE [] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c1 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c10 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c11 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c12 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c13 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c14 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c15 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c2 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c3 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c4 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c5 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c6 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c7 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c8 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:float1, type:float, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.c9 SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, type:double, comment:null), ] -POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group.insert_num SIMPLE [(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, type:int, comment:null), ] -PREHOOK: query: desc formatted table_change_numeric_group_string_group_floating_string_group +POSTHOOK: Input: default@schema_evolution_data_n45 +POSTHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group_n13 +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.b SIMPLE [] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c1 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c10 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c11 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:float1, type:float, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c12 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:double1, type:double, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c13 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c14 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:float1, type:float, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c15 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:double1, type:double, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c2 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:float1, type:float, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c3 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:double1, type:double, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c4 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c5 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:float1, type:float, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c6 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:double1, type:double, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c7 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c8 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:float1, type:float, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.c9 SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:double1, type:double, comment:null), ] +POSTHOOK: Lineage: table_change_numeric_group_string_group_floating_string_group_n13.insert_num SIMPLE [(schema_evolution_data_n45)schema_evolution_data_n45.FieldSchema(name:insert_num, type:int, comment:null), ] +PREHOOK: query: desc formatted table_change_numeric_group_string_group_floating_string_group_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group -POSTHOOK: query: desc formatted table_change_numeric_group_string_group_floating_string_group +PREHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group_n13 +POSTHOOK: query: desc formatted table_change_numeric_group_string_group_floating_string_group_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group +POSTHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group_n13 # col_name data_type comment insert_num int c1 decimal(38,18) diff --git a/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out b/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out index b45249ce52..93d55f3cc6 100644 --- a/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out +++ b/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out @@ -1,76 +1,76 @@ -PREHOOK: query: DROP TABLE Employee_Part +PREHOOK: query: DROP TABLE Employee_Part_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE Employee_Part +POSTHOOK: query: DROP TABLE Employee_Part_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string) +PREHOOK: query: CREATE TABLE Employee_Part_n0(employeeID int, employeeName String) partitioned by (employeeSalary double, country string) row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@Employee_Part -POSTHOOK: query: CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string) +PREHOOK: Output: default@Employee_Part_n0 +POSTHOOK: query: CREATE TABLE Employee_Part_n0(employeeID int, employeeName String) partitioned by (employeeSalary double, country string) row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@Employee_Part -PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA') +POSTHOOK: Output: default@Employee_Part_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='2000.0', country='USA') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@employee_part -POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA') +PREHOOK: Output: default@employee_part_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='2000.0', country='USA') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@employee_part -POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA -PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK') +POSTHOOK: Output: default@employee_part_n0 +POSTHOOK: Output: default@employee_part_n0@employeesalary=2000.0/country=USA +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='2000.0', country='UK') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@employee_part -POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK') +PREHOOK: Output: default@employee_part_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='2000.0', country='UK') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@employee_part -POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK -PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA') +POSTHOOK: Output: default@employee_part_n0 +POSTHOOK: Output: default@employee_part_n0@employeesalary=2000.0/country=UK +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='3000.0', country='USA') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@employee_part -POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA') +PREHOOK: Output: default@employee_part_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='3000.0', country='USA') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@employee_part -POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=USA -PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA') +POSTHOOK: Output: default@employee_part_n0 +POSTHOOK: Output: default@employee_part_n0@employeesalary=3000.0/country=USA +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='4000.0', country='USA') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@employee_part -POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA') +PREHOOK: Output: default@employee_part_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='4000.0', country='USA') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@employee_part -POSTHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA -PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK') +POSTHOOK: Output: default@employee_part_n0 +POSTHOOK: Output: default@employee_part_n0@employeesalary=4000.0/country=USA +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='3500.0', country='UK') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@employee_part -POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK') +PREHOOK: Output: default@employee_part_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='3500.0', country='UK') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@employee_part -POSTHOOK: Output: default@employee_part@employeesalary=3500.0/country=UK -PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK') +POSTHOOK: Output: default@employee_part_n0 +POSTHOOK: Output: default@employee_part_n0@employeesalary=3500.0/country=UK +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='3000.0', country='UK') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@employee_part -POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK') +PREHOOK: Output: default@employee_part_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part_n0 partition(employeeSalary='3000.0', country='UK') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@employee_part -POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=UK +POSTHOOK: Output: default@employee_part_n0 +POSTHOOK: Output: default@employee_part_n0@employeesalary=3000.0/country=UK PREHOOK: query: explain -analyze table Employee_Part partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID +analyze table Employee_Part_n0 partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID PREHOOK: type: ANALYZE_TABLE POSTHOOK: query: explain -analyze table Employee_Part partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID +analyze table Employee_Part_n0 partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID POSTHOOK: type: ANALYZE_TABLE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -81,7 +81,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: employee_part + alias: employee_part_n0 Statistics: Num rows: 1 Data size: 640 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: employeeid (type: int), employeename (type: string), country (type: string) @@ -124,28 +124,28 @@ STAGE PLANS: Column Stats Desc: Columns: employeeName, employeeID Column Types: string, int - Table: default.employee_part + Table: default.employee_part_n0 -PREHOOK: query: analyze table Employee_Part partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID +PREHOOK: query: analyze table Employee_Part_n0 partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@employee_part -PREHOOK: Input: default@employee_part@employeesalary=4000.0/country=USA -PREHOOK: Output: default@employee_part -PREHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA +PREHOOK: Input: default@employee_part_n0 +PREHOOK: Input: default@employee_part_n0@employeesalary=4000.0/country=USA +PREHOOK: Output: default@employee_part_n0 +PREHOOK: Output: default@employee_part_n0@employeesalary=4000.0/country=USA #### A masked pattern was here #### -POSTHOOK: query: analyze table Employee_Part partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID +POSTHOOK: query: analyze table Employee_Part_n0 partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@employee_part -POSTHOOK: Input: default@employee_part@employeesalary=4000.0/country=USA -POSTHOOK: Output: default@employee_part -POSTHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA +POSTHOOK: Input: default@employee_part_n0 +POSTHOOK: Input: default@employee_part_n0@employeesalary=4000.0/country=USA +POSTHOOK: Output: default@employee_part_n0 +POSTHOOK: Output: default@employee_part_n0@employeesalary=4000.0/country=USA #### A masked pattern was here #### -PREHOOK: query: describe formatted Employee_Part partition (employeeSalary='4000.0', country='USA') +PREHOOK: query: describe formatted Employee_Part_n0 partition (employeeSalary='4000.0', country='USA') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@employee_part -POSTHOOK: query: describe formatted Employee_Part partition (employeeSalary='4000.0', country='USA') +PREHOOK: Input: default@employee_part_n0 +POSTHOOK: query: describe formatted Employee_Part_n0 partition (employeeSalary='4000.0', country='USA') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@employee_part +POSTHOOK: Input: default@employee_part_n0 # col_name data_type comment employeeid int employeename string @@ -158,7 +158,7 @@ country string # Detailed Partition Information Partition Value: [4000.0, USA] Database: default -Table: employee_part +Table: employee_part_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"employeeid\":\"true\",\"employeename\":\"true\"}} @@ -179,12 +179,12 @@ Sort Columns: [] Storage Desc Params: field.delim | serialization.format | -PREHOOK: query: describe formatted Employee_Part partition (employeeSalary='4000.0', country='USA') employeeName +PREHOOK: query: describe formatted Employee_Part_n0 partition (employeeSalary='4000.0', country='USA') employeeName PREHOOK: type: DESCTABLE -PREHOOK: Input: default@employee_part -POSTHOOK: query: describe formatted Employee_Part partition (employeeSalary='4000.0', country='USA') employeeName +PREHOOK: Input: default@employee_part_n0 +POSTHOOK: query: describe formatted Employee_Part_n0 partition (employeeSalary='4000.0', country='USA') employeeName POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@employee_part +POSTHOOK: Input: default@employee_part_n0 col_name employeeName data_type string min @@ -198,10 +198,10 @@ num_falses bitVector HL comment from deserializer PREHOOK: query: explain -analyze table Employee_Part partition (employeeSalary='2000.0') compute statistics for columns employeeID +analyze table Employee_Part_n0 partition (employeeSalary='2000.0') compute statistics for columns employeeID PREHOOK: type: ANALYZE_TABLE POSTHOOK: query: explain -analyze table Employee_Part partition (employeeSalary='2000.0') compute statistics for columns employeeID +analyze table Employee_Part_n0 partition (employeeSalary='2000.0') compute statistics for columns employeeID POSTHOOK: type: ANALYZE_TABLE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -212,7 +212,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: employee_part + alias: employee_part_n0 Statistics: Num rows: 3 Data size: 1690 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: employeeid (type: int), country (type: string) @@ -255,32 +255,32 @@ STAGE PLANS: Column Stats Desc: Columns: employeeID Column Types: int - Table: default.employee_part + Table: default.employee_part_n0 -PREHOOK: query: analyze table Employee_Part partition (employeeSalary='2000.0') compute statistics for columns employeeID +PREHOOK: query: analyze table Employee_Part_n0 partition (employeeSalary='2000.0') compute statistics for columns employeeID PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@employee_part -PREHOOK: Input: default@employee_part@employeesalary=2000.0/country=UK -PREHOOK: Input: default@employee_part@employeesalary=2000.0/country=USA -PREHOOK: Output: default@employee_part -PREHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK -PREHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA -#### A masked pattern was here #### -POSTHOOK: query: analyze table Employee_Part partition (employeeSalary='2000.0') compute statistics for columns employeeID +PREHOOK: Input: default@employee_part_n0 +PREHOOK: Input: default@employee_part_n0@employeesalary=2000.0/country=UK +PREHOOK: Input: default@employee_part_n0@employeesalary=2000.0/country=USA +PREHOOK: Output: default@employee_part_n0 +PREHOOK: Output: default@employee_part_n0@employeesalary=2000.0/country=UK +PREHOOK: Output: default@employee_part_n0@employeesalary=2000.0/country=USA +#### A masked pattern was here #### +POSTHOOK: query: analyze table Employee_Part_n0 partition (employeeSalary='2000.0') compute statistics for columns employeeID POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@employee_part -POSTHOOK: Input: default@employee_part@employeesalary=2000.0/country=UK -POSTHOOK: Input: default@employee_part@employeesalary=2000.0/country=USA -POSTHOOK: Output: default@employee_part -POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK -POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA -#### A masked pattern was here #### -PREHOOK: query: describe formatted Employee_Part partition (employeeSalary='2000.0', country='USA') employeeID +POSTHOOK: Input: default@employee_part_n0 +POSTHOOK: Input: default@employee_part_n0@employeesalary=2000.0/country=UK +POSTHOOK: Input: default@employee_part_n0@employeesalary=2000.0/country=USA +POSTHOOK: Output: default@employee_part_n0 +POSTHOOK: Output: default@employee_part_n0@employeesalary=2000.0/country=UK +POSTHOOK: Output: default@employee_part_n0@employeesalary=2000.0/country=USA +#### A masked pattern was here #### +PREHOOK: query: describe formatted Employee_Part_n0 partition (employeeSalary='2000.0', country='USA') employeeID PREHOOK: type: DESCTABLE -PREHOOK: Input: default@employee_part -POSTHOOK: query: describe formatted Employee_Part partition (employeeSalary='2000.0', country='USA') employeeID +PREHOOK: Input: default@employee_part_n0 +POSTHOOK: query: describe formatted Employee_Part_n0 partition (employeeSalary='2000.0', country='USA') employeeID POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@employee_part +POSTHOOK: Input: default@employee_part_n0 col_name employeeID data_type int min 16 @@ -293,12 +293,12 @@ num_trues num_falses bitVector HL comment from deserializer -PREHOOK: query: describe formatted Employee_Part partition (employeeSalary='2000.0', country='UK') employeeID +PREHOOK: query: describe formatted Employee_Part_n0 partition (employeeSalary='2000.0', country='UK') employeeID PREHOOK: type: DESCTABLE -PREHOOK: Input: default@employee_part -POSTHOOK: query: describe formatted Employee_Part partition (employeeSalary='2000.0', country='UK') employeeID +PREHOOK: Input: default@employee_part_n0 +POSTHOOK: query: describe formatted Employee_Part_n0 partition (employeeSalary='2000.0', country='UK') employeeID POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@employee_part +POSTHOOK: Input: default@employee_part_n0 col_name employeeID data_type int min 16 @@ -312,10 +312,10 @@ num_falses bitVector HL comment from deserializer PREHOOK: query: explain -analyze table Employee_Part partition (employeeSalary) compute statistics for columns employeeID +analyze table Employee_Part_n0 partition (employeeSalary) compute statistics for columns employeeID PREHOOK: type: ANALYZE_TABLE POSTHOOK: query: explain -analyze table Employee_Part partition (employeeSalary) compute statistics for columns employeeID +analyze table Employee_Part_n0 partition (employeeSalary) compute statistics for columns employeeID POSTHOOK: type: ANALYZE_TABLE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -326,7 +326,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: employee_part + alias: employee_part_n0 Statistics: Num rows: 27 Data size: 206 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: employeeid (type: int), employeesalary (type: double), country (type: string) @@ -369,48 +369,48 @@ STAGE PLANS: Column Stats Desc: Columns: employeeID Column Types: int - Table: default.employee_part + Table: default.employee_part_n0 -PREHOOK: query: analyze table Employee_Part partition (employeeSalary) compute statistics for columns employeeID +PREHOOK: query: analyze table Employee_Part_n0 partition (employeeSalary) compute statistics for columns employeeID PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@employee_part -PREHOOK: Input: default@employee_part@employeesalary=2000.0/country=UK -PREHOOK: Input: default@employee_part@employeesalary=2000.0/country=USA -PREHOOK: Input: default@employee_part@employeesalary=3000.0/country=UK -PREHOOK: Input: default@employee_part@employeesalary=3000.0/country=USA -PREHOOK: Input: default@employee_part@employeesalary=3500.0/country=UK -PREHOOK: Input: default@employee_part@employeesalary=4000.0/country=USA -PREHOOK: Output: default@employee_part -PREHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK -PREHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA -PREHOOK: Output: default@employee_part@employeesalary=3000.0/country=UK -PREHOOK: Output: default@employee_part@employeesalary=3000.0/country=USA -PREHOOK: Output: default@employee_part@employeesalary=3500.0/country=UK -PREHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA -#### A masked pattern was here #### -POSTHOOK: query: analyze table Employee_Part partition (employeeSalary) compute statistics for columns employeeID +PREHOOK: Input: default@employee_part_n0 +PREHOOK: Input: default@employee_part_n0@employeesalary=2000.0/country=UK +PREHOOK: Input: default@employee_part_n0@employeesalary=2000.0/country=USA +PREHOOK: Input: default@employee_part_n0@employeesalary=3000.0/country=UK +PREHOOK: Input: default@employee_part_n0@employeesalary=3000.0/country=USA +PREHOOK: Input: default@employee_part_n0@employeesalary=3500.0/country=UK +PREHOOK: Input: default@employee_part_n0@employeesalary=4000.0/country=USA +PREHOOK: Output: default@employee_part_n0 +PREHOOK: Output: default@employee_part_n0@employeesalary=2000.0/country=UK +PREHOOK: Output: default@employee_part_n0@employeesalary=2000.0/country=USA +PREHOOK: Output: default@employee_part_n0@employeesalary=3000.0/country=UK +PREHOOK: Output: default@employee_part_n0@employeesalary=3000.0/country=USA +PREHOOK: Output: default@employee_part_n0@employeesalary=3500.0/country=UK +PREHOOK: Output: default@employee_part_n0@employeesalary=4000.0/country=USA +#### A masked pattern was here #### +POSTHOOK: query: analyze table Employee_Part_n0 partition (employeeSalary) compute statistics for columns employeeID POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@employee_part -POSTHOOK: Input: default@employee_part@employeesalary=2000.0/country=UK -POSTHOOK: Input: default@employee_part@employeesalary=2000.0/country=USA -POSTHOOK: Input: default@employee_part@employeesalary=3000.0/country=UK -POSTHOOK: Input: default@employee_part@employeesalary=3000.0/country=USA -POSTHOOK: Input: default@employee_part@employeesalary=3500.0/country=UK -POSTHOOK: Input: default@employee_part@employeesalary=4000.0/country=USA -POSTHOOK: Output: default@employee_part -POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK -POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA -POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=UK -POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=USA -POSTHOOK: Output: default@employee_part@employeesalary=3500.0/country=UK -POSTHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA -#### A masked pattern was here #### -PREHOOK: query: describe formatted Employee_Part partition (employeeSalary='3000.0', country='UK') employeeID +POSTHOOK: Input: default@employee_part_n0 +POSTHOOK: Input: default@employee_part_n0@employeesalary=2000.0/country=UK +POSTHOOK: Input: default@employee_part_n0@employeesalary=2000.0/country=USA +POSTHOOK: Input: default@employee_part_n0@employeesalary=3000.0/country=UK +POSTHOOK: Input: default@employee_part_n0@employeesalary=3000.0/country=USA +POSTHOOK: Input: default@employee_part_n0@employeesalary=3500.0/country=UK +POSTHOOK: Input: default@employee_part_n0@employeesalary=4000.0/country=USA +POSTHOOK: Output: default@employee_part_n0 +POSTHOOK: Output: default@employee_part_n0@employeesalary=2000.0/country=UK +POSTHOOK: Output: default@employee_part_n0@employeesalary=2000.0/country=USA +POSTHOOK: Output: default@employee_part_n0@employeesalary=3000.0/country=UK +POSTHOOK: Output: default@employee_part_n0@employeesalary=3000.0/country=USA +POSTHOOK: Output: default@employee_part_n0@employeesalary=3500.0/country=UK +POSTHOOK: Output: default@employee_part_n0@employeesalary=4000.0/country=USA +#### A masked pattern was here #### +PREHOOK: query: describe formatted Employee_Part_n0 partition (employeeSalary='3000.0', country='UK') employeeID PREHOOK: type: DESCTABLE -PREHOOK: Input: default@employee_part -POSTHOOK: query: describe formatted Employee_Part partition (employeeSalary='3000.0', country='UK') employeeID +PREHOOK: Input: default@employee_part_n0 +POSTHOOK: query: describe formatted Employee_Part_n0 partition (employeeSalary='3000.0', country='UK') employeeID POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@employee_part +POSTHOOK: Input: default@employee_part_n0 col_name employeeID data_type int min 16 @@ -424,10 +424,10 @@ num_falses bitVector HL comment from deserializer PREHOOK: query: explain -analyze table Employee_Part partition (employeeSalary,country) compute statistics for columns +analyze table Employee_Part_n0 partition (employeeSalary,country) compute statistics for columns PREHOOK: type: ANALYZE_TABLE POSTHOOK: query: explain -analyze table Employee_Part partition (employeeSalary,country) compute statistics for columns +analyze table Employee_Part_n0 partition (employeeSalary,country) compute statistics for columns POSTHOOK: type: ANALYZE_TABLE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -438,7 +438,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: employee_part + alias: employee_part_n0 Statistics: Num rows: 54 Data size: 412 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: employeeid (type: int), employeename (type: string), employeesalary (type: double), country (type: string) @@ -481,48 +481,48 @@ STAGE PLANS: Column Stats Desc: Columns: employeeid, employeename Column Types: int, string - Table: default.employee_part + Table: default.employee_part_n0 -PREHOOK: query: analyze table Employee_Part partition (employeeSalary,country) compute statistics for columns +PREHOOK: query: analyze table Employee_Part_n0 partition (employeeSalary,country) compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@employee_part -PREHOOK: Input: default@employee_part@employeesalary=2000.0/country=UK -PREHOOK: Input: default@employee_part@employeesalary=2000.0/country=USA -PREHOOK: Input: default@employee_part@employeesalary=3000.0/country=UK -PREHOOK: Input: default@employee_part@employeesalary=3000.0/country=USA -PREHOOK: Input: default@employee_part@employeesalary=3500.0/country=UK -PREHOOK: Input: default@employee_part@employeesalary=4000.0/country=USA -PREHOOK: Output: default@employee_part -PREHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK -PREHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA -PREHOOK: Output: default@employee_part@employeesalary=3000.0/country=UK -PREHOOK: Output: default@employee_part@employeesalary=3000.0/country=USA -PREHOOK: Output: default@employee_part@employeesalary=3500.0/country=UK -PREHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA -#### A masked pattern was here #### -POSTHOOK: query: analyze table Employee_Part partition (employeeSalary,country) compute statistics for columns +PREHOOK: Input: default@employee_part_n0 +PREHOOK: Input: default@employee_part_n0@employeesalary=2000.0/country=UK +PREHOOK: Input: default@employee_part_n0@employeesalary=2000.0/country=USA +PREHOOK: Input: default@employee_part_n0@employeesalary=3000.0/country=UK +PREHOOK: Input: default@employee_part_n0@employeesalary=3000.0/country=USA +PREHOOK: Input: default@employee_part_n0@employeesalary=3500.0/country=UK +PREHOOK: Input: default@employee_part_n0@employeesalary=4000.0/country=USA +PREHOOK: Output: default@employee_part_n0 +PREHOOK: Output: default@employee_part_n0@employeesalary=2000.0/country=UK +PREHOOK: Output: default@employee_part_n0@employeesalary=2000.0/country=USA +PREHOOK: Output: default@employee_part_n0@employeesalary=3000.0/country=UK +PREHOOK: Output: default@employee_part_n0@employeesalary=3000.0/country=USA +PREHOOK: Output: default@employee_part_n0@employeesalary=3500.0/country=UK +PREHOOK: Output: default@employee_part_n0@employeesalary=4000.0/country=USA +#### A masked pattern was here #### +POSTHOOK: query: analyze table Employee_Part_n0 partition (employeeSalary,country) compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@employee_part -POSTHOOK: Input: default@employee_part@employeesalary=2000.0/country=UK -POSTHOOK: Input: default@employee_part@employeesalary=2000.0/country=USA -POSTHOOK: Input: default@employee_part@employeesalary=3000.0/country=UK -POSTHOOK: Input: default@employee_part@employeesalary=3000.0/country=USA -POSTHOOK: Input: default@employee_part@employeesalary=3500.0/country=UK -POSTHOOK: Input: default@employee_part@employeesalary=4000.0/country=USA -POSTHOOK: Output: default@employee_part -POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK -POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA -POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=UK -POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=USA -POSTHOOK: Output: default@employee_part@employeesalary=3500.0/country=UK -POSTHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA -#### A masked pattern was here #### -PREHOOK: query: describe formatted Employee_Part partition (employeeSalary='3500.0', country='UK') employeeName +POSTHOOK: Input: default@employee_part_n0 +POSTHOOK: Input: default@employee_part_n0@employeesalary=2000.0/country=UK +POSTHOOK: Input: default@employee_part_n0@employeesalary=2000.0/country=USA +POSTHOOK: Input: default@employee_part_n0@employeesalary=3000.0/country=UK +POSTHOOK: Input: default@employee_part_n0@employeesalary=3000.0/country=USA +POSTHOOK: Input: default@employee_part_n0@employeesalary=3500.0/country=UK +POSTHOOK: Input: default@employee_part_n0@employeesalary=4000.0/country=USA +POSTHOOK: Output: default@employee_part_n0 +POSTHOOK: Output: default@employee_part_n0@employeesalary=2000.0/country=UK +POSTHOOK: Output: default@employee_part_n0@employeesalary=2000.0/country=USA +POSTHOOK: Output: default@employee_part_n0@employeesalary=3000.0/country=UK +POSTHOOK: Output: default@employee_part_n0@employeesalary=3000.0/country=USA +POSTHOOK: Output: default@employee_part_n0@employeesalary=3500.0/country=UK +POSTHOOK: Output: default@employee_part_n0@employeesalary=4000.0/country=USA +#### A masked pattern was here #### +PREHOOK: query: describe formatted Employee_Part_n0 partition (employeeSalary='3500.0', country='UK') employeeName PREHOOK: type: DESCTABLE -PREHOOK: Input: default@employee_part -POSTHOOK: query: describe formatted Employee_Part partition (employeeSalary='3500.0', country='UK') employeeName +PREHOOK: Input: default@employee_part_n0 +POSTHOOK: query: describe formatted Employee_Part_n0 partition (employeeSalary='3500.0', country='UK') employeeName POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@employee_part +POSTHOOK: Input: default@employee_part_n0 col_name employeeName data_type string min diff --git a/ql/src/test/results/clientpositive/combine2.q.out b/ql/src/test/results/clientpositive/combine2.q.out index f9e6f2abf0..383ef25dd4 100644 --- a/ql/src/test/results/clientpositive/combine2.q.out +++ b/ql/src/test/results/clientpositive/combine2.q.out @@ -4,15 +4,15 @@ PREHOOK: Input: database:default POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: create table combine2(key string) partitioned by (value string) +PREHOOK: query: create table combine2_n0(key string) partitioned by (value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@combine2 -POSTHOOK: query: create table combine2(key string) partitioned by (value string) +PREHOOK: Output: default@combine2_n0 +POSTHOOK: query: create table combine2_n0(key string) partitioned by (value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@combine2 -PREHOOK: query: insert overwrite table combine2 partition(value) +POSTHOOK: Output: default@combine2_n0 +PREHOOK: query: insert overwrite table combine2_n0 partition(value) select * from ( select key, value from src where key < 10 union all @@ -21,8 +21,8 @@ select * from ( select key, '2010-04-21 09:45:00' value from src where key = 19) s PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@combine2 -POSTHOOK: query: insert overwrite table combine2 partition(value) +PREHOOK: Output: default@combine2_n0 +POSTHOOK: query: insert overwrite table combine2_n0 partition(value) select * from ( select key, value from src where key < 10 union all @@ -31,28 +31,28 @@ select * from ( select key, '2010-04-21 09:45:00' value from src where key = 19) s POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@combine2@value=2010-04-21 09%3A45%3A00 -POSTHOOK: Output: default@combine2@value=val_0 -POSTHOOK: Output: default@combine2@value=val_2 -POSTHOOK: Output: default@combine2@value=val_4 -POSTHOOK: Output: default@combine2@value=val_5 -POSTHOOK: Output: default@combine2@value=val_8 -POSTHOOK: Output: default@combine2@value=val_9 -POSTHOOK: Output: default@combine2@value=| -POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: show partitions combine2 +POSTHOOK: Output: default@combine2_n0@value=2010-04-21 09%3A45%3A00 +POSTHOOK: Output: default@combine2_n0@value=val_0 +POSTHOOK: Output: default@combine2_n0@value=val_2 +POSTHOOK: Output: default@combine2_n0@value=val_4 +POSTHOOK: Output: default@combine2_n0@value=val_5 +POSTHOOK: Output: default@combine2_n0@value=val_8 +POSTHOOK: Output: default@combine2_n0@value=val_9 +POSTHOOK: Output: default@combine2_n0@value=| +POSTHOOK: Lineage: combine2_n0 PARTITION(value=2010-04-21 09:45:00).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: combine2_n0 PARTITION(value=val_0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: combine2_n0 PARTITION(value=val_2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: combine2_n0 PARTITION(value=val_4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: combine2_n0 PARTITION(value=val_5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: combine2_n0 PARTITION(value=val_8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: combine2_n0 PARTITION(value=val_9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: combine2_n0 PARTITION(value=|).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: show partitions combine2_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@combine2 -POSTHOOK: query: show partitions combine2 +PREHOOK: Input: default@combine2_n0 +POSTHOOK: query: show partitions combine2_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@combine2 +POSTHOOK: Input: default@combine2_n0 value=2010-04-21 09%3A45%3A00 value=val_0 value=val_2 @@ -62,10 +62,10 @@ value=val_8 value=val_9 value=| PREHOOK: query: explain -select key, value from combine2 where value is not null +select key, value from combine2_n0 where value is not null PREHOOK: type: QUERY POSTHOOK: query: explain -select key, value from combine2 where value is not null +select key, value from combine2_n0 where value is not null POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -76,7 +76,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: combine2 + alias: combine2_n0 Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -84,29 +84,29 @@ STAGE PLANS: Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select key, value from combine2 where value is not null +PREHOOK: query: select key, value from combine2_n0 where value is not null PREHOOK: type: QUERY -PREHOOK: Input: default@combine2 -PREHOOK: Input: default@combine2@value=2010-04-21 09%3A45%3A00 -PREHOOK: Input: default@combine2@value=val_0 -PREHOOK: Input: default@combine2@value=val_2 -PREHOOK: Input: default@combine2@value=val_4 -PREHOOK: Input: default@combine2@value=val_5 -PREHOOK: Input: default@combine2@value=val_8 -PREHOOK: Input: default@combine2@value=val_9 -PREHOOK: Input: default@combine2@value=| +PREHOOK: Input: default@combine2_n0 +PREHOOK: Input: default@combine2_n0@value=2010-04-21 09%3A45%3A00 +PREHOOK: Input: default@combine2_n0@value=val_0 +PREHOOK: Input: default@combine2_n0@value=val_2 +PREHOOK: Input: default@combine2_n0@value=val_4 +PREHOOK: Input: default@combine2_n0@value=val_5 +PREHOOK: Input: default@combine2_n0@value=val_8 +PREHOOK: Input: default@combine2_n0@value=val_9 +PREHOOK: Input: default@combine2_n0@value=| #### A masked pattern was here #### -POSTHOOK: query: select key, value from combine2 where value is not null +POSTHOOK: query: select key, value from combine2_n0 where value is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@combine2 -POSTHOOK: Input: default@combine2@value=2010-04-21 09%3A45%3A00 -POSTHOOK: Input: default@combine2@value=val_0 -POSTHOOK: Input: default@combine2@value=val_2 -POSTHOOK: Input: default@combine2@value=val_4 -POSTHOOK: Input: default@combine2@value=val_5 -POSTHOOK: Input: default@combine2@value=val_8 -POSTHOOK: Input: default@combine2@value=val_9 -POSTHOOK: Input: default@combine2@value=| +POSTHOOK: Input: default@combine2_n0 +POSTHOOK: Input: default@combine2_n0@value=2010-04-21 09%3A45%3A00 +POSTHOOK: Input: default@combine2_n0@value=val_0 +POSTHOOK: Input: default@combine2_n0@value=val_2 +POSTHOOK: Input: default@combine2_n0@value=val_4 +POSTHOOK: Input: default@combine2_n0@value=val_5 +POSTHOOK: Input: default@combine2_n0@value=val_8 +POSTHOOK: Input: default@combine2_n0@value=val_9 +POSTHOOK: Input: default@combine2_n0@value=| #### A masked pattern was here #### 0 val_0 0 val_0 @@ -121,10 +121,10 @@ POSTHOOK: Input: default@combine2@value=| 8 val_8 9 val_9 PREHOOK: query: explain extended -select count(1) from combine2 where value is not null +select count(1) from combine2_n0 where value is not null PREHOOK: type: QUERY POSTHOOK: query: explain extended -select count(1) from combine2 where value is not null +select count(1) from combine2_n0 where value is not null POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -136,13 +136,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(1) from combine2 where value is not null +PREHOOK: query: select count(1) from combine2_n0 where value is not null PREHOOK: type: QUERY -PREHOOK: Input: default@combine2 +PREHOOK: Input: default@combine2_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from combine2 where value is not null +POSTHOOK: query: select count(1) from combine2_n0 where value is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@combine2 +POSTHOOK: Input: default@combine2_n0 #### A masked pattern was here #### 12 PREHOOK: query: explain diff --git a/ql/src/test/results/clientpositive/comma_in_column_name.q.out b/ql/src/test/results/clientpositive/comma_in_column_name.q.out index 43357c0a72..ecbcff7fda 100644 --- a/ql/src/test/results/clientpositive/comma_in_column_name.q.out +++ b/ql/src/test/results/clientpositive/comma_in_column_name.q.out @@ -1,60 +1,60 @@ -PREHOOK: query: create table test (`x,y` int) +PREHOOK: query: create table test_n4 (`x,y` int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test -POSTHOOK: query: create table test (`x,y` int) +PREHOOK: Output: default@test_n4 +POSTHOOK: query: create table test_n4 (`x,y` int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test -PREHOOK: query: insert into test values (1),(2) +POSTHOOK: Output: default@test_n4 +PREHOOK: query: insert into test_n4 values (1),(2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test -POSTHOOK: query: insert into test values (1),(2) +PREHOOK: Output: default@test_n4 +POSTHOOK: query: insert into test_n4 values (1),(2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test -POSTHOOK: Lineage: test.x,y SCRIPT [] -PREHOOK: query: select `x,y` from test where `x,y` >=2 +POSTHOOK: Output: default@test_n4 +POSTHOOK: Lineage: test_n4.x,y SCRIPT [] +PREHOOK: query: select `x,y` from test_n4 where `x,y` >=2 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n4 #### A masked pattern was here #### -POSTHOOK: query: select `x,y` from test where `x,y` >=2 +POSTHOOK: query: select `x,y` from test_n4 where `x,y` >=2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n4 #### A masked pattern was here #### 2 -PREHOOK: query: drop table test +PREHOOK: query: drop table test_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test -PREHOOK: Output: default@test -POSTHOOK: query: drop table test +PREHOOK: Input: default@test_n4 +PREHOOK: Output: default@test_n4 +POSTHOOK: query: drop table test_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test -POSTHOOK: Output: default@test -PREHOOK: query: create table test (`x,y` int) stored as orc +POSTHOOK: Input: default@test_n4 +POSTHOOK: Output: default@test_n4 +PREHOOK: query: create table test_n4 (`x,y` int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test -POSTHOOK: query: create table test (`x,y` int) stored as orc +PREHOOK: Output: default@test_n4 +POSTHOOK: query: create table test_n4 (`x,y` int) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test -PREHOOK: query: insert into test values (1),(2) +POSTHOOK: Output: default@test_n4 +PREHOOK: query: insert into test_n4 values (1),(2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test -POSTHOOK: query: insert into test values (1),(2) +PREHOOK: Output: default@test_n4 +POSTHOOK: query: insert into test_n4 values (1),(2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test -POSTHOOK: Lineage: test.x,y SCRIPT [] -PREHOOK: query: select `x,y` from test where `x,y` <2 +POSTHOOK: Output: default@test_n4 +POSTHOOK: Lineage: test_n4.x,y SCRIPT [] +PREHOOK: query: select `x,y` from test_n4 where `x,y` <2 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n4 #### A masked pattern was here #### -POSTHOOK: query: select `x,y` from test where `x,y` <2 +POSTHOOK: query: select `x,y` from test_n4 where `x,y` <2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n4 #### A masked pattern was here #### 1 diff --git a/ql/src/test/results/clientpositive/constGby.q.out b/ql/src/test/results/clientpositive/constGby.q.out index e5c60352e2..12c492af18 100644 --- a/ql/src/test/results/clientpositive/constGby.q.out +++ b/ql/src/test/results/clientpositive/constGby.q.out @@ -1,32 +1,32 @@ -PREHOOK: query: create table t1 (a int) +PREHOOK: query: create table t1_n13 (a int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (a int) +PREHOOK: Output: default@t1_n13 +POSTHOOK: query: create table t1_n13 (a int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: analyze table t1 compute statistics +POSTHOOK: Output: default@t1_n13 +PREHOOK: query: analyze table t1_n13 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: analyze table t1 compute statistics +PREHOOK: Input: default@t1_n13 +PREHOOK: Output: default@t1_n13 +POSTHOOK: query: analyze table t1_n13 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: analyze table t1 compute statistics for columns +POSTHOOK: Input: default@t1_n13 +POSTHOOK: Output: default@t1_n13 +PREHOOK: query: analyze table t1_n13 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 +PREHOOK: Input: default@t1_n13 +PREHOOK: Output: default@t1_n13 #### A masked pattern was here #### -POSTHOOK: query: analyze table t1 compute statistics for columns +POSTHOOK: query: analyze table t1_n13 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n13 +POSTHOOK: Output: default@t1_n13 #### A masked pattern was here #### -PREHOOK: query: explain select count(1) from t1 group by 1 +PREHOOK: query: explain select count(1) from t1_n13 group by 1 PREHOOK: type: QUERY -POSTHOOK: query: explain select count(1) from t1 group by 1 +POSTHOOK: query: explain select count(1) from t1_n13 group by 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -37,7 +37,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n13 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Select Operator Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE @@ -79,26 +79,26 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(1) from t1 group by 1 +PREHOOK: query: select count(1) from t1_n13 group by 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n13 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from t1 group by 1 +POSTHOOK: query: select count(1) from t1_n13 group by 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n13 #### A masked pattern was here #### -PREHOOK: query: select count(1) from t1 +PREHOOK: query: select count(1) from t1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n13 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from t1 +POSTHOOK: query: select count(1) from t1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n13 #### A masked pattern was here #### 0 -PREHOOK: query: explain select count(*) from t1 +PREHOOK: query: explain select count(*) from t1_n13 PREHOOK: type: QUERY -POSTHOOK: query: explain select count(*) from t1 +POSTHOOK: query: explain select count(*) from t1_n13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -110,70 +110,70 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from t1 +PREHOOK: query: select count(*) from t1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n13 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from t1 +POSTHOOK: query: select count(*) from t1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n13 #### A masked pattern was here #### 0 -PREHOOK: query: select count(1) from t1 group by 1=1 +PREHOOK: query: select count(1) from t1_n13 group by 1=1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n13 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from t1 group by 1=1 +POSTHOOK: query: select count(1) from t1_n13 group by 1=1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n13 #### A masked pattern was here #### -PREHOOK: query: select count(1), max(a) from t1 group by 1=1 +PREHOOK: query: select count(1), max(a) from t1_n13 group by 1=1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n13 #### A masked pattern was here #### -POSTHOOK: query: select count(1), max(a) from t1 group by 1=1 +POSTHOOK: query: select count(1), max(a) from t1_n13 group by 1=1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n13 #### A masked pattern was here #### -PREHOOK: query: select count(1) from t1 group by 1 +PREHOOK: query: select count(1) from t1_n13 group by 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n13 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from t1 group by 1 +POSTHOOK: query: select count(1) from t1_n13 group by 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n13 #### A masked pattern was here #### -PREHOOK: query: select count(1) from t1 +PREHOOK: query: select count(1) from t1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n13 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from t1 +POSTHOOK: query: select count(1) from t1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n13 #### A masked pattern was here #### 0 -PREHOOK: query: select count(*) from t1 +PREHOOK: query: select count(*) from t1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n13 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from t1 +POSTHOOK: query: select count(*) from t1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n13 #### A masked pattern was here #### 0 -PREHOOK: query: select count(1) from t1 group by 1=1 +PREHOOK: query: select count(1) from t1_n13 group by 1=1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n13 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from t1 group by 1=1 +POSTHOOK: query: select count(1) from t1_n13 group by 1=1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n13 #### A masked pattern was here #### -PREHOOK: query: select count(1), max(a) from t1 group by 1=1 +PREHOOK: query: select count(1), max(a) from t1_n13 group by 1=1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n13 #### A masked pattern was here #### -POSTHOOK: query: select count(1), max(a) from t1 group by 1=1 +POSTHOOK: query: select count(1), max(a) from t1_n13 group by 1=1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n13 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/constantPropWhen.q.out b/ql/src/test/results/clientpositive/constantPropWhen.q.out index 6cfe3e52b2..82fd74f854 100644 --- a/ql/src/test/results/clientpositive/constantPropWhen.q.out +++ b/ql/src/test/results/clientpositive/constantPropWhen.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: drop table test_1 +PREHOOK: query: drop table test_1_n4 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table test_1 +POSTHOOK: query: drop table test_1_n4 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table test_1 (id int, id2 int) +PREHOOK: query: create table test_1_n4 (id int, id2 int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_1 -POSTHOOK: query: create table test_1 (id int, id2 int) +PREHOOK: Output: default@test_1_n4 +POSTHOOK: query: create table test_1_n4 (id int, id2 int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_1 -PREHOOK: query: insert into table test_1 values (123, NULL), (NULL, NULL), (NULL, 123), (123, 123) +POSTHOOK: Output: default@test_1_n4 +PREHOOK: query: insert into table test_1_n4 values (123, NULL), (NULL, NULL), (NULL, 123), (123, 123) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_1 -POSTHOOK: query: insert into table test_1 values (123, NULL), (NULL, NULL), (NULL, 123), (123, 123) +PREHOOK: Output: default@test_1_n4 +POSTHOOK: query: insert into table test_1_n4 values (123, NULL), (NULL, NULL), (NULL, 123), (123, 123) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_1 -POSTHOOK: Lineage: test_1.id SCRIPT [] -POSTHOOK: Lineage: test_1.id2 SCRIPT [] -PREHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: Output: default@test_1_n4 +POSTHOOK: Lineage: test_1_n4.id SCRIPT [] +POSTHOOK: Lineage: test_1_n4.id2 SCRIPT [] +PREHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -33,7 +33,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_1 + alias: test_1_n4 Statistics: Num rows: 4 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (not NVL((id = id2),false)) (type: boolean) @@ -54,21 +54,21 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@test_1 +PREHOOK: Input: default@test_1_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_1 +POSTHOOK: Input: default@test_1_n4 #### A masked pattern was here #### true true true false -PREHOOK: query: explain SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: explain SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: explain SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -79,7 +79,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_1 + alias: test_1_n4 Statistics: Num rows: 4 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (not NVL((id = id2),false)) (type: boolean) @@ -100,21 +100,21 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@test_1 +PREHOOK: Input: default@test_1_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_1 +POSTHOOK: Input: default@test_1_n4 #### A masked pattern was here #### true true true false -PREHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -125,7 +125,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_1 + alias: test_1_n4 Statistics: Num rows: 4 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: NVL((id = id2),false) (type: boolean) @@ -146,21 +146,21 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@test_1 +PREHOOK: Input: default@test_1_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_1 +POSTHOOK: Input: default@test_1_n4 #### A masked pattern was here #### false false false true -PREHOOK: query: explain SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: explain SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: explain SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -171,7 +171,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_1 + alias: test_1_n4 Statistics: Num rows: 4 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: NVL((id = id2),false) (type: boolean) @@ -192,21 +192,21 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@test_1 +PREHOOK: Input: default@test_1_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_1 +POSTHOOK: Input: default@test_1_n4 #### A masked pattern was here #### false false false true -PREHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -217,7 +217,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_1 + alias: test_1_n4 Statistics: Num rows: 4 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (not NVL((id = id2),false)) (type: boolean) @@ -238,21 +238,21 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@test_1 +PREHOOK: Input: default@test_1_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_1 +POSTHOOK: Input: default@test_1_n4 #### A masked pattern was here #### true true true false -PREHOOK: query: explain SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: explain SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: explain SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -263,7 +263,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_1 + alias: test_1_n4 Statistics: Num rows: 4 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (not NVL((id = id2),false)) (type: boolean) @@ -284,21 +284,21 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@test_1 +PREHOOK: Input: default@test_1_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: SELECT cast(CASE id when id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_1 +POSTHOOK: Input: default@test_1_n4 #### A masked pattern was here #### true true true false -PREHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -309,7 +309,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_1 + alias: test_1_n4 Statistics: Num rows: 4 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: NVL((id = id2),false) (type: boolean) @@ -330,21 +330,21 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@test_1 +PREHOOK: Input: default@test_1_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: SELECT cast(CASE WHEN id = id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_1 +POSTHOOK: Input: default@test_1_n4 #### A masked pattern was here #### false false false true -PREHOOK: query: explain SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: explain SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -POSTHOOK: query: explain SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: explain SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -355,7 +355,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_1 + alias: test_1_n4 Statistics: Num rows: 4 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: NVL((id = id2),false) (type: boolean) @@ -376,13 +376,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +PREHOOK: query: SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@test_1 +PREHOOK: Input: default@test_1_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1 +POSTHOOK: query: SELECT cast(CASE id when id2 THEN TRUE ELSE FALSE END AS BOOLEAN) AS b FROM test_1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_1 +POSTHOOK: Input: default@test_1_n4 #### A masked pattern was here #### false false diff --git a/ql/src/test/results/clientpositive/constant_prop_1.q.out b/ql/src/test/results/clientpositive/constant_prop_1.q.out index b938983829..fc0453f7eb 100644 --- a/ql/src/test/results/clientpositive/constant_prop_1.q.out +++ b/ql/src/test/results/clientpositive/constant_prop_1.q.out @@ -483,7 +483,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: create table t ( +PREHOOK: query: create table t_n26 ( a int, b int, c int, @@ -492,8 +492,8 @@ e int ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t ( +PREHOOK: Output: default@t_n26 +POSTHOOK: query: create table t_n26 ( a int, b int, c int, @@ -502,16 +502,16 @@ e int ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t +POSTHOOK: Output: default@t_n26 PREHOOK: query: explain select a2 as a3 from (select a1 as a2, c1 as c2 from -(select a as a1, b as b1, c as c1 from t where a=1 and b=2 and c=3)sub1)sub2 +(select a as a1, b as b1, c as c1 from t_n26 where a=1 and b=2 and c=3)sub1)sub2 PREHOOK: type: QUERY POSTHOOK: query: explain select a2 as a3 from (select a1 as a2, c1 as c2 from -(select a as a1, b as b1, c as c1 from t where a=1 and b=2 and c=3)sub1)sub2 +(select a as a1, b as b1, c as c1 from t_n26 where a=1 and b=2 and c=3)sub1)sub2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -522,7 +522,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t + alias: t_n26 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: ((a = 1) and (b = 2) and (c = 3)) (type: boolean) diff --git a/ql/src/test/results/clientpositive/constantfolding.q.out b/ql/src/test/results/clientpositive/constantfolding.q.out index 41297ebc71..9d1111d8b7 100644 --- a/ql/src/test/results/clientpositive/constantfolding.q.out +++ b/ql/src/test/results/clientpositive/constantfolding.q.out @@ -20,101 +20,101 @@ k3 k3 k4 k4 -PREHOOK: query: drop table if exists union_all_bug_test_1 +PREHOOK: query: drop table if exists union_all_bug_test_1_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists union_all_bug_test_1 +POSTHOOK: query: drop table if exists union_all_bug_test_1_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table if exists union_all_bug_test_2 +PREHOOK: query: drop table if exists union_all_bug_test_2_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists union_all_bug_test_2 +POSTHOOK: query: drop table if exists union_all_bug_test_2_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table if not exists union_all_bug_test_1 +PREHOOK: query: create table if not exists union_all_bug_test_1_n0 ( f1 int, f2 int ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@union_all_bug_test_1 -POSTHOOK: query: create table if not exists union_all_bug_test_1 +PREHOOK: Output: default@union_all_bug_test_1_n0 +POSTHOOK: query: create table if not exists union_all_bug_test_1_n0 ( f1 int, f2 int ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@union_all_bug_test_1 -PREHOOK: query: create table if not exists union_all_bug_test_2 +POSTHOOK: Output: default@union_all_bug_test_1_n0 +PREHOOK: query: create table if not exists union_all_bug_test_2_n0 ( f1 int ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@union_all_bug_test_2 -POSTHOOK: query: create table if not exists union_all_bug_test_2 +PREHOOK: Output: default@union_all_bug_test_2_n0 +POSTHOOK: query: create table if not exists union_all_bug_test_2_n0 ( f1 int ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@union_all_bug_test_2 -PREHOOK: query: insert into table union_all_bug_test_1 values (1,1) +POSTHOOK: Output: default@union_all_bug_test_2_n0 +PREHOOK: query: insert into table union_all_bug_test_1_n0 values (1,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@union_all_bug_test_1 -POSTHOOK: query: insert into table union_all_bug_test_1 values (1,1) +PREHOOK: Output: default@union_all_bug_test_1_n0 +POSTHOOK: query: insert into table union_all_bug_test_1_n0 values (1,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@union_all_bug_test_1 -POSTHOOK: Lineage: union_all_bug_test_1.f1 SCRIPT [] -POSTHOOK: Lineage: union_all_bug_test_1.f2 SCRIPT [] -PREHOOK: query: insert into table union_all_bug_test_2 values (1) +POSTHOOK: Output: default@union_all_bug_test_1_n0 +POSTHOOK: Lineage: union_all_bug_test_1_n0.f1 SCRIPT [] +POSTHOOK: Lineage: union_all_bug_test_1_n0.f2 SCRIPT [] +PREHOOK: query: insert into table union_all_bug_test_2_n0 values (1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@union_all_bug_test_2 -POSTHOOK: query: insert into table union_all_bug_test_2 values (1) +PREHOOK: Output: default@union_all_bug_test_2_n0 +POSTHOOK: query: insert into table union_all_bug_test_2_n0 values (1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@union_all_bug_test_2 -POSTHOOK: Lineage: union_all_bug_test_2.f1 SCRIPT [] -PREHOOK: query: insert into table union_all_bug_test_1 values (0,0) +POSTHOOK: Output: default@union_all_bug_test_2_n0 +POSTHOOK: Lineage: union_all_bug_test_2_n0.f1 SCRIPT [] +PREHOOK: query: insert into table union_all_bug_test_1_n0 values (0,0) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@union_all_bug_test_1 -POSTHOOK: query: insert into table union_all_bug_test_1 values (0,0) +PREHOOK: Output: default@union_all_bug_test_1_n0 +POSTHOOK: query: insert into table union_all_bug_test_1_n0 values (0,0) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@union_all_bug_test_1 -POSTHOOK: Lineage: union_all_bug_test_1.f1 SCRIPT [] -POSTHOOK: Lineage: union_all_bug_test_1.f2 SCRIPT [] -PREHOOK: query: insert into table union_all_bug_test_2 values (0) +POSTHOOK: Output: default@union_all_bug_test_1_n0 +POSTHOOK: Lineage: union_all_bug_test_1_n0.f1 SCRIPT [] +POSTHOOK: Lineage: union_all_bug_test_1_n0.f2 SCRIPT [] +PREHOOK: query: insert into table union_all_bug_test_2_n0 values (0) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@union_all_bug_test_2 -POSTHOOK: query: insert into table union_all_bug_test_2 values (0) +PREHOOK: Output: default@union_all_bug_test_2_n0 +POSTHOOK: query: insert into table union_all_bug_test_2_n0 values (0) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@union_all_bug_test_2 -POSTHOOK: Lineage: union_all_bug_test_2.f1 SCRIPT [] +POSTHOOK: Output: default@union_all_bug_test_2_n0 +POSTHOOK: Lineage: union_all_bug_test_2_n0.f1 SCRIPT [] PREHOOK: query: SELECT f1 FROM ( SELECT f1 , if('helloworld' like '%hello%' ,f1,f2) as filter -FROM union_all_bug_test_1 +FROM union_all_bug_test_1_n0 union all select f1 , 0 as filter -from union_all_bug_test_2 +from union_all_bug_test_2_n0 ) A WHERE (filter = 1 and f1 = 1) PREHOOK: type: QUERY -PREHOOK: Input: default@union_all_bug_test_1 -PREHOOK: Input: default@union_all_bug_test_2 +PREHOOK: Input: default@union_all_bug_test_1_n0 +PREHOOK: Input: default@union_all_bug_test_2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT f1 FROM ( @@ -122,19 +122,19 @@ FROM ( SELECT f1 , if('helloworld' like '%hello%' ,f1,f2) as filter -FROM union_all_bug_test_1 +FROM union_all_bug_test_1_n0 union all select f1 , 0 as filter -from union_all_bug_test_2 +from union_all_bug_test_2_n0 ) A WHERE (filter = 1 and f1 = 1) POSTHOOK: type: QUERY -POSTHOOK: Input: default@union_all_bug_test_1 -POSTHOOK: Input: default@union_all_bug_test_2 +POSTHOOK: Input: default@union_all_bug_test_1_n0 +POSTHOOK: Input: default@union_all_bug_test_2_n0 #### A masked pattern was here #### 1 PREHOOK: query: select percentile(cast(key as bigint), array()) from src where false @@ -218,23 +218,23 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n107(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n107 +POSTHOOK: query: CREATE TABLE dest1_n107(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +POSTHOOK: Output: default@dest1_n107 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n107 SELECT ' abc ' WHERE src.key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +PREHOOK: Output: default@dest1_n107 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n107 SELECT ' abc ' WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Output: default@dest1_n107 +POSTHOOK: Lineage: dest1_n107.c1 SIMPLE [] PREHOOK: query: EXPLAIN SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), @@ -243,7 +243,7 @@ SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), - POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1_n107 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), @@ -253,7 +253,7 @@ SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), - POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1_n107 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -264,7 +264,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: dest1 + alias: dest1_n107 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: 1.098612288668D (type: double), null (type: double), null (type: double), 1.098612288668D (type: double), null (type: double), null (type: double), 1.584962500721D (type: double), null (type: double), null (type: double), 0.47712125472D (type: double), null (type: double), null (type: double), 1.584962500721D (type: double), null (type: double), null (type: double), null (type: double), -1.0D (type: double), 7.389056098931D (type: double), 8.0D (type: double), 8.0D (type: double), 0.125D (type: double), 8.0D (type: double), 2.0D (type: double), NaND (type: double), 1.0D (type: double), 1.0D (type: double), 8.0D (type: double), 8.0D (type: double) @@ -279,9 +279,9 @@ PREHOOK: query: SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), L POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), - POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1_n107 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n107 #### A masked pattern was here #### POSTHOOK: query: SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), @@ -290,8 +290,8 @@ POSTHOOK: query: SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), - POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1_n107 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n107 #### A masked pattern was here #### 1.098612288668 NULL NULL 1.098612288668 NULL NULL 1.584962500721 NULL NULL 0.47712125472 NULL NULL 1.584962500721 NULL NULL NULL -1.0 7.389056098931 8.0 8.0 0.125 8.0 2.0 NaN 1.0 1.0 8.0 8.0 diff --git a/ql/src/test/results/clientpositive/constprog_dp.q.out b/ql/src/test/results/clientpositive/constprog_dp.q.out index 4ed3ebf666..b5143a7b3a 100644 --- a/ql/src/test/results/clientpositive/constprog_dp.q.out +++ b/ql/src/test/results/clientpositive/constprog_dp.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: create table dest(key string, value string) partitioned by (ds string) +PREHOOK: query: create table dest_n1(key string, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest -POSTHOOK: query: create table dest(key string, value string) partitioned by (ds string) +PREHOOK: Output: default@dest_n1 +POSTHOOK: query: create table dest_n1(key string, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest +POSTHOOK: Output: default@dest_n1 PREHOOK: query: EXPLAIN from srcpart -insert overwrite table dest partition (ds) select key, value, ds where ds='2008-04-08' +insert overwrite table dest_n1 partition (ds) select key, value, ds where ds='2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN from srcpart -insert overwrite table dest partition (ds) select key, value, ds where ds='2008-04-08' +insert overwrite table dest_n1 partition (ds) select key, value, ds where ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -42,7 +42,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest + name: default.dest_n1 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) outputColumnNames: key, value, ds @@ -97,7 +97,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest + name: default.dest_n1 Stage: Stage-2 Stats Work @@ -105,7 +105,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.dest + Table: default.dest_n1 Stage: Stage-3 Map Reduce @@ -117,7 +117,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest + name: default.dest_n1 Stage: Stage-5 Map Reduce @@ -129,7 +129,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest + name: default.dest_n1 Stage: Stage-6 Move Operator @@ -138,18 +138,18 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: from srcpart -insert overwrite table dest partition (ds) select key, value, ds where ds='2008-04-08' +insert overwrite table dest_n1 partition (ds) select key, value, ds where ds='2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@dest +PREHOOK: Output: default@dest_n1 POSTHOOK: query: from srcpart -insert overwrite table dest partition (ds) select key, value, ds where ds='2008-04-08' +insert overwrite table dest_n1 partition (ds) select key, value, ds where ds='2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@dest@ds=2008-04-08 -POSTHOOK: Lineage: dest PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@dest_n1@ds=2008-04-08 +POSTHOOK: Lineage: dest_n1 PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_n1 PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] diff --git a/ql/src/test/results/clientpositive/constprog_type.q.out b/ql/src/test/results/clientpositive/constprog_type.q.out index dc63e5c5ae..137ec252e0 100644 --- a/ql/src/test/results/clientpositive/constprog_type.q.out +++ b/ql/src/test/results/clientpositive/constprog_type.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(d date, t timestamp) +PREHOOK: query: CREATE TABLE dest1_n23(d date, t timestamp) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(d date, t timestamp) +PREHOOK: Output: default@dest1_n23 +POSTHOOK: query: CREATE TABLE dest1_n23(d date, t timestamp) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n23 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n23 SELECT cast('2013-11-17' as date), cast(cast('1.3041352164485E9' as double) as timestamp) FROM src tablesample (1 rows) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n23 SELECT cast('2013-11-17' as date), cast(cast('1.3041352164485E9' as double) as timestamp) FROM src tablesample (1 rows) POSTHOOK: type: QUERY @@ -45,7 +45,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n23 Execution mode: vectorized Stage: Stage-7 @@ -65,7 +65,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n23 Stage: Stage-2 Stats Work @@ -81,7 +81,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n23 Stage: Stage-5 Map Reduce @@ -93,7 +93,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n23 Stage: Stage-6 Move Operator @@ -101,27 +101,27 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE dest1 +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n23 SELECT cast('2013-11-17' as date), cast(cast('1.3041352164485E9' as double) as timestamp) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 +PREHOOK: Output: default@dest1_n23 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n23 SELECT cast('2013-11-17' as date), cast(cast('1.3041352164485E9' as double) as timestamp) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.d EXPRESSION [] -POSTHOOK: Lineage: dest1.t EXPRESSION [] -PREHOOK: query: SELECT * FROM dest1 +POSTHOOK: Output: default@dest1_n23 +POSTHOOK: Lineage: dest1_n23.d EXPRESSION [] +POSTHOOK: Lineage: dest1_n23.t EXPRESSION [] +PREHOOK: query: SELECT * FROM dest1_n23 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n23 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM dest1 +POSTHOOK: query: SELECT * FROM dest1_n23 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n23 #### A masked pattern was here #### 2013-11-17 2011-04-29 20:46:56.4485 PREHOOK: query: SELECT key, value FROM src WHERE key = cast(86 as double) diff --git a/ql/src/test/results/clientpositive/correlated_join_keys.q.out b/ql/src/test/results/clientpositive/correlated_join_keys.q.out index 08c3145600..3c0e6e8bcf 100644 --- a/ql/src/test/results/clientpositive/correlated_join_keys.q.out +++ b/ql/src/test/results/clientpositive/correlated_join_keys.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table customer_address +PREHOOK: query: drop table customer_address_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table customer_address +POSTHOOK: query: drop table customer_address_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table customer_address +PREHOOK: query: create table customer_address_n0 ( ca_address_sk int, ca_address_id string, @@ -21,8 +21,8 @@ PREHOOK: query: create table customer_address row format delimited fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@customer_address -POSTHOOK: query: create table customer_address +PREHOOK: Output: default@customer_address_n0 +POSTHOOK: query: create table customer_address_n0 ( ca_address_sk int, ca_address_id string, @@ -41,36 +41,36 @@ POSTHOOK: query: create table customer_address row format delimited fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@customer_address -PREHOOK: query: load data local inpath '../../data/files/customer_address.txt' overwrite into table customer_address +POSTHOOK: Output: default@customer_address_n0 +PREHOOK: query: load data local inpath '../../data/files/customer_address.txt' overwrite into table customer_address_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@customer_address -POSTHOOK: query: load data local inpath '../../data/files/customer_address.txt' overwrite into table customer_address +PREHOOK: Output: default@customer_address_n0 +POSTHOOK: query: load data local inpath '../../data/files/customer_address.txt' overwrite into table customer_address_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@customer_address -PREHOOK: query: analyze table customer_address compute statistics +POSTHOOK: Output: default@customer_address_n0 +PREHOOK: query: analyze table customer_address_n0 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@customer_address -PREHOOK: Output: default@customer_address -POSTHOOK: query: analyze table customer_address compute statistics +PREHOOK: Input: default@customer_address_n0 +PREHOOK: Output: default@customer_address_n0 +POSTHOOK: query: analyze table customer_address_n0 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@customer_address -POSTHOOK: Output: default@customer_address -PREHOOK: query: analyze table customer_address compute statistics for columns ca_state, ca_zip +POSTHOOK: Input: default@customer_address_n0 +POSTHOOK: Output: default@customer_address_n0 +PREHOOK: query: analyze table customer_address_n0 compute statistics for columns ca_state, ca_zip PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@customer_address -PREHOOK: Output: default@customer_address +PREHOOK: Input: default@customer_address_n0 +PREHOOK: Output: default@customer_address_n0 #### A masked pattern was here #### -POSTHOOK: query: analyze table customer_address compute statistics for columns ca_state, ca_zip +POSTHOOK: query: analyze table customer_address_n0 compute statistics for columns ca_state, ca_zip POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@customer_address -POSTHOOK: Output: default@customer_address +POSTHOOK: Input: default@customer_address_n0 +POSTHOOK: Output: default@customer_address_n0 #### A masked pattern was here #### -PREHOOK: query: explain select count(*) from customer_address a join customer_address b on (a.ca_zip = b.ca_zip and a.ca_state = b.ca_state) +PREHOOK: query: explain select count(*) from customer_address_n0 a join customer_address_n0 b on (a.ca_zip = b.ca_zip and a.ca_state = b.ca_state) PREHOOK: type: QUERY -POSTHOOK: query: explain select count(*) from customer_address a join customer_address b on (a.ca_zip = b.ca_zip and a.ca_state = b.ca_state) +POSTHOOK: query: explain select count(*) from customer_address_n0 a join customer_address_n0 b on (a.ca_zip = b.ca_zip and a.ca_state = b.ca_state) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -160,9 +160,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select count(*) from customer_address a join customer_address b on (a.ca_zip = b.ca_zip and a.ca_state = b.ca_state) +PREHOOK: query: explain select count(*) from customer_address_n0 a join customer_address_n0 b on (a.ca_zip = b.ca_zip and a.ca_state = b.ca_state) PREHOOK: type: QUERY -POSTHOOK: query: explain select count(*) from customer_address a join customer_address b on (a.ca_zip = b.ca_zip and a.ca_state = b.ca_state) +POSTHOOK: query: explain select count(*) from customer_address_n0 a join customer_address_n0 b on (a.ca_zip = b.ca_zip and a.ca_state = b.ca_state) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -252,11 +252,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: drop table customer_address +PREHOOK: query: drop table customer_address_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@customer_address -PREHOOK: Output: default@customer_address -POSTHOOK: query: drop table customer_address +PREHOOK: Input: default@customer_address_n0 +PREHOOK: Output: default@customer_address_n0 +POSTHOOK: query: drop table customer_address_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@customer_address -POSTHOOK: Output: default@customer_address +POSTHOOK: Input: default@customer_address_n0 +POSTHOOK: Output: default@customer_address_n0 diff --git a/ql/src/test/results/clientpositive/correlationoptimizer11.q.out b/ql/src/test/results/clientpositive/correlationoptimizer11.q.out index fa729ea7b6..57db64ae38 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer11.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer11.q.out @@ -1,45 +1,45 @@ -PREHOOK: query: CREATE TABLE part_table(key string, value string) PARTITIONED BY (partitionId int) +PREHOOK: query: CREATE TABLE part_table_n1(key string, value string) PARTITIONED BY (partitionId int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part_table -POSTHOOK: query: CREATE TABLE part_table(key string, value string) PARTITIONED BY (partitionId int) +PREHOOK: Output: default@part_table_n1 +POSTHOOK: query: CREATE TABLE part_table_n1(key string, value string) PARTITIONED BY (partitionId int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_table -PREHOOK: query: INSERT OVERWRITE TABLE part_table PARTITION (partitionId=1) +POSTHOOK: Output: default@part_table_n1 +PREHOOK: query: INSERT OVERWRITE TABLE part_table_n1 PARTITION (partitionId=1) SELECT key, value FROM src ORDER BY key, value LIMIT 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@part_table@partitionid=1 -POSTHOOK: query: INSERT OVERWRITE TABLE part_table PARTITION (partitionId=1) +PREHOOK: Output: default@part_table_n1@partitionid=1 +POSTHOOK: query: INSERT OVERWRITE TABLE part_table_n1 PARTITION (partitionId=1) SELECT key, value FROM src ORDER BY key, value LIMIT 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@part_table@partitionid=1 -POSTHOOK: Lineage: part_table PARTITION(partitionid=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: part_table PARTITION(partitionid=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE part_table PARTITION (partitionId=2) +POSTHOOK: Output: default@part_table_n1@partitionid=1 +POSTHOOK: Lineage: part_table_n1 PARTITION(partitionid=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: part_table_n1 PARTITION(partitionid=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: INSERT OVERWRITE TABLE part_table_n1 PARTITION (partitionId=2) SELECT key, value FROM src1 ORDER BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@part_table@partitionid=2 -POSTHOOK: query: INSERT OVERWRITE TABLE part_table PARTITION (partitionId=2) +PREHOOK: Output: default@part_table_n1@partitionid=2 +POSTHOOK: query: INSERT OVERWRITE TABLE part_table_n1 PARTITION (partitionId=2) SELECT key, value FROM src1 ORDER BY key, value POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@part_table@partitionid=2 -POSTHOOK: Lineage: part_table PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: part_table PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@part_table_n1@partitionid=2 +POSTHOOK: Lineage: part_table_n1 PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: part_table_n1 PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 1 AND y.partitionId = 2 GROUP BY x.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 1 AND y.partitionId = 2 GROUP BY x.key @@ -138,38 +138,38 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 1 AND y.partitionId = 2 GROUP BY x.key PREHOOK: type: QUERY -PREHOOK: Input: default@part_table -PREHOOK: Input: default@part_table@partitionid=1 -PREHOOK: Input: default@part_table@partitionid=2 +PREHOOK: Input: default@part_table_n1 +PREHOOK: Input: default@part_table_n1@partitionid=1 +PREHOOK: Input: default@part_table_n1@partitionid=2 #### A masked pattern was here #### POSTHOOK: query: SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 1 AND y.partitionId = 2 GROUP BY x.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_table -POSTHOOK: Input: default@part_table@partitionid=1 -POSTHOOK: Input: default@part_table@partitionid=2 +POSTHOOK: Input: default@part_table_n1 +POSTHOOK: Input: default@part_table_n1@partitionid=1 +POSTHOOK: Input: default@part_table_n1@partitionid=2 #### A masked pattern was here #### 128 3 146 2 150 1 PREHOOK: query: EXPLAIN SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 1 AND y.partitionId = 2 GROUP BY x.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 1 AND y.partitionId = 2 GROUP BY x.key @@ -246,38 +246,38 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 1 AND y.partitionId = 2 GROUP BY x.key PREHOOK: type: QUERY -PREHOOK: Input: default@part_table -PREHOOK: Input: default@part_table@partitionid=1 -PREHOOK: Input: default@part_table@partitionid=2 +PREHOOK: Input: default@part_table_n1 +PREHOOK: Input: default@part_table_n1@partitionid=1 +PREHOOK: Input: default@part_table_n1@partitionid=2 #### A masked pattern was here #### POSTHOOK: query: SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 1 AND y.partitionId = 2 GROUP BY x.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_table -POSTHOOK: Input: default@part_table@partitionid=1 -POSTHOOK: Input: default@part_table@partitionid=2 +POSTHOOK: Input: default@part_table_n1 +POSTHOOK: Input: default@part_table_n1@partitionid=1 +POSTHOOK: Input: default@part_table_n1@partitionid=2 #### A masked pattern was here #### 128 3 146 2 150 1 PREHOOK: query: EXPLAIN SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 2 AND y.partitionId = 2 GROUP BY x.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 2 AND y.partitionId = 2 GROUP BY x.key @@ -376,22 +376,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 2 AND y.partitionId = 2 GROUP BY x.key PREHOOK: type: QUERY -PREHOOK: Input: default@part_table -PREHOOK: Input: default@part_table@partitionid=2 +PREHOOK: Input: default@part_table_n1 +PREHOOK: Input: default@part_table_n1@partitionid=2 #### A masked pattern was here #### POSTHOOK: query: SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 2 AND y.partitionId = 2 GROUP BY x.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_table -POSTHOOK: Input: default@part_table@partitionid=2 +POSTHOOK: Input: default@part_table_n1 +POSTHOOK: Input: default@part_table_n1@partitionid=2 #### A masked pattern was here #### 100 128 1 @@ -411,14 +411,14 @@ POSTHOOK: Input: default@part_table@partitionid=2 98 1 PREHOOK: query: EXPLAIN SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 2 AND y.partitionId = 2 GROUP BY x.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 2 AND y.partitionId = 2 GROUP BY x.key @@ -495,22 +495,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 2 AND y.partitionId = 2 GROUP BY x.key PREHOOK: type: QUERY -PREHOOK: Input: default@part_table -PREHOOK: Input: default@part_table@partitionid=2 +PREHOOK: Input: default@part_table_n1 +PREHOOK: Input: default@part_table_n1@partitionid=2 #### A masked pattern was here #### POSTHOOK: query: SELECT x.key AS key, count(1) AS cnt -FROM part_table x JOIN part_table y ON (x.key = y.key) +FROM part_table_n1 x JOIN part_table_n1 y ON (x.key = y.key) WHERE x.partitionId = 2 AND y.partitionId = 2 GROUP BY x.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_table -POSTHOOK: Input: default@part_table@partitionid=2 +POSTHOOK: Input: default@part_table_n1 +POSTHOOK: Input: default@part_table_n1@partitionid=2 #### A masked pattern was here #### 100 128 1 diff --git a/ql/src/test/results/clientpositive/correlationoptimizer5.q.out b/ql/src/test/results/clientpositive/correlationoptimizer5.q.out index 43a1c874cb..9b0c45611f 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer5.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer5.q.out @@ -1,51 +1,51 @@ -PREHOOK: query: CREATE TABLE T1(key INT, val STRING) +PREHOOK: query: CREATE TABLE T1_n11(key INT, val STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key INT, val STRING) +PREHOOK: Output: default@T1_n11 +POSTHOOK: query: CREATE TABLE T1_n11(key INT, val STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n11 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1_n11 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n11 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1_n11 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key INT, val STRING) +POSTHOOK: Output: default@t1_n11 +PREHOOK: query: CREATE TABLE T2_n7(key INT, val STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key INT, val STRING) +PREHOOK: Output: default@T2_n7 +POSTHOOK: query: CREATE TABLE T2_n7(key INT, val STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n7 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T2_n7 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n7 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T2_n7 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: CREATE TABLE T3(key INT, val STRING) +POSTHOOK: Output: default@t2_n7 +PREHOOK: query: CREATE TABLE T3_n3(key INT, val STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key INT, val STRING) +PREHOOK: Output: default@T3_n3 +POSTHOOK: query: CREATE TABLE T3_n3(key INT, val STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE T3 +POSTHOOK: Output: default@T3_n3 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE T3_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE T3 +PREHOOK: Output: default@t3_n3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE T3_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 +POSTHOOK: Output: default@t3_n3 PREHOOK: query: CREATE TABLE T4(key INT, val STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -90,18 +90,18 @@ PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_co1 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n11 x JOIN T2_n7 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n3 m JOIN T4 n ON (m.key = n.key)) d ON b.key = d.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_co1 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n11 x JOIN T2_n7 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n3 m JOIN T4 n ON (m.key = n.key)) d ON b.key = d.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -270,47 +270,47 @@ STAGE PLANS: PREHOOK: query: INSERT OVERWRITE TABLE dest_co1 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n11 x JOIN T2_n7 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n3 m JOIN T4 n ON (m.key = n.key)) d ON b.key = d.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n11 +PREHOOK: Input: default@t2_n7 +PREHOOK: Input: default@t3_n3 PREHOOK: Input: default@t4 PREHOOK: Output: default@dest_co1 POSTHOOK: query: INSERT OVERWRITE TABLE dest_co1 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n11 x JOIN T2_n7 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n3 m JOIN T4 n ON (m.key = n.key)) d ON b.key = d.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n11 +POSTHOOK: Input: default@t2_n7 +POSTHOOK: Input: default@t3_n3 POSTHOOK: Input: default@t4 POSTHOOK: Output: default@dest_co1 -POSTHOOK: Lineage: dest_co1.key SIMPLE [(t1)x.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest_co1.key SIMPLE [(t1_n11)x.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: dest_co1.val SIMPLE [(t4)n.FieldSchema(name:val, type:string, comment:null), ] PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_co2 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n11 x JOIN T2_n7 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n3 m JOIN T4 n ON (m.key = n.key)) d ON b.key = d.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_co2 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n11 x JOIN T2_n7 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n3 m JOIN T4 n ON (m.key = n.key)) d ON b.key = d.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -468,47 +468,47 @@ STAGE PLANS: PREHOOK: query: INSERT OVERWRITE TABLE dest_co2 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n11 x JOIN T2_n7 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n3 m JOIN T4 n ON (m.key = n.key)) d ON b.key = d.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n11 +PREHOOK: Input: default@t2_n7 +PREHOOK: Input: default@t3_n3 PREHOOK: Input: default@t4 PREHOOK: Output: default@dest_co2 POSTHOOK: query: INSERT OVERWRITE TABLE dest_co2 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n11 x JOIN T2_n7 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n3 m JOIN T4 n ON (m.key = n.key)) d ON b.key = d.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n11 +POSTHOOK: Input: default@t2_n7 +POSTHOOK: Input: default@t3_n3 POSTHOOK: Input: default@t4 POSTHOOK: Output: default@dest_co2 -POSTHOOK: Lineage: dest_co2.key SIMPLE [(t1)x.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest_co2.key SIMPLE [(t1_n11)x.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: dest_co2.val SIMPLE [(t4)n.FieldSchema(name:val, type:string, comment:null), ] PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_co3 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n11 x JOIN T2_n7 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n3 m JOIN T4 n ON (m.key = n.key)) d ON b.key = d.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_co3 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n11 x JOIN T2_n7 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n3 m JOIN T4 n ON (m.key = n.key)) d ON b.key = d.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -780,30 +780,30 @@ STAGE PLANS: PREHOOK: query: INSERT OVERWRITE TABLE dest_co3 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n11 x JOIN T2_n7 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n3 m JOIN T4 n ON (m.key = n.key)) d ON b.key = d.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n11 +PREHOOK: Input: default@t2_n7 +PREHOOK: Input: default@t3_n3 PREHOOK: Input: default@t4 PREHOOK: Output: default@dest_co3 POSTHOOK: query: INSERT OVERWRITE TABLE dest_co3 SELECT b.key, d.val FROM -(SELECT x.key, x.val FROM T1 x JOIN T2 y ON (x.key = y.key)) b +(SELECT x.key, x.val FROM T1_n11 x JOIN T2_n7 y ON (x.key = y.key)) b JOIN -(SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d +(SELECT m.key, n.val FROM T3_n3 m JOIN T4 n ON (m.key = n.key)) d ON b.key = d.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n11 +POSTHOOK: Input: default@t2_n7 +POSTHOOK: Input: default@t3_n3 POSTHOOK: Input: default@t4 POSTHOOK: Output: default@dest_co3 -POSTHOOK: Lineage: dest_co3.key SIMPLE [(t1)x.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest_co3.key SIMPLE [(t1_n11)x.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: dest_co3.val SIMPLE [(t4)n.FieldSchema(name:val, type:string, comment:null), ] PREHOOK: query: SELECT SUM(HASH(key)), SUM(HASH(val)) FROM dest_co1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/correlationoptimizer9.q.out b/ql/src/test/results/clientpositive/correlationoptimizer9.q.out index f52942d223..69d30c164f 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer9.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer9.q.out @@ -1,39 +1,39 @@ -PREHOOK: query: CREATE TABLE tmp(c1 INT, c2 INT, c3 STRING, c4 STRING) +PREHOOK: query: CREATE TABLE tmp_n2(c1 INT, c2 INT, c3 STRING, c4 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmp -POSTHOOK: query: CREATE TABLE tmp(c1 INT, c2 INT, c3 STRING, c4 STRING) +PREHOOK: Output: default@tmp_n2 +POSTHOOK: query: CREATE TABLE tmp_n2(c1 INT, c2 INT, c3 STRING, c4 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmp -PREHOOK: query: INSERT OVERWRITE TABLE tmp +POSTHOOK: Output: default@tmp_n2 +PREHOOK: query: INSERT OVERWRITE TABLE tmp_n2 SELECT x.key, y.key, x.value, y.value FROM src x JOIN src y ON (x.key = y.key) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmp -POSTHOOK: query: INSERT OVERWRITE TABLE tmp +PREHOOK: Output: default@tmp_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE tmp_n2 SELECT x.key, y.key, x.value, y.value FROM src x JOIN src y ON (x.key = y.key) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmp -POSTHOOK: Lineage: tmp.c1 EXPRESSION [(src)x.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tmp.c2 EXPRESSION [(src)y.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tmp.c3 SIMPLE [(src)x.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tmp.c4 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@tmp_n2 +POSTHOOK: Lineage: tmp_n2.c1 EXPRESSION [(src)x.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_n2.c2 EXPRESSION [(src)y.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_n2.c3 SIMPLE [(src)x.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_n2.c4 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN SELECT xx.key, yy.key, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx +(SELECT x.c1 AS key, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1) xx JOIN -(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy +(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy ON (xx.key = yy.key) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT xx.key, yy.key, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx +(SELECT x.c1 AS key, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1) xx JOIN -(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy +(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy ON (xx.key = yy.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -161,21 +161,21 @@ STAGE PLANS: PREHOOK: query: SELECT xx.key, yy.key, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx +(SELECT x.c1 AS key, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1) xx JOIN -(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy +(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy ON (xx.key = yy.key) PREHOOK: type: QUERY -PREHOOK: Input: default@tmp +PREHOOK: Input: default@tmp_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT xx.key, yy.key, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx +(SELECT x.c1 AS key, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1) xx JOIN -(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy +(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy ON (xx.key = yy.key) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp +POSTHOOK: Input: default@tmp_n2 #### A masked pattern was here #### 103 103 4 4 104 104 4 4 @@ -189,17 +189,17 @@ POSTHOOK: Input: default@tmp PREHOOK: query: EXPLAIN SELECT xx.key, yy.key, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx +(SELECT x.c1 AS key, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1) xx JOIN -(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy +(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy ON (xx.key = yy.key) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT xx.key, yy.key, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx +(SELECT x.c1 AS key, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1) xx JOIN -(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy +(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy ON (xx.key = yy.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -312,21 +312,21 @@ STAGE PLANS: PREHOOK: query: SELECT xx.key, yy.key, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx +(SELECT x.c1 AS key, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1) xx JOIN -(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy +(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy ON (xx.key = yy.key) PREHOOK: type: QUERY -PREHOOK: Input: default@tmp +PREHOOK: Input: default@tmp_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT xx.key, yy.key, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx +(SELECT x.c1 AS key, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1) xx JOIN -(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy +(SELECT x1.c2 AS key, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy ON (xx.key = yy.key) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp +POSTHOOK: Input: default@tmp_n2 #### A masked pattern was here #### 103 103 4 4 104 104 4 4 @@ -340,17 +340,17 @@ POSTHOOK: Input: default@tmp PREHOOK: query: EXPLAIN SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx +(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx JOIN -(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy +(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx +(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx JOIN -(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy +(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -482,21 +482,21 @@ STAGE PLANS: PREHOOK: query: SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx +(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx JOIN -(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy +(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) PREHOOK: type: QUERY -PREHOOK: Input: default@tmp +PREHOOK: Input: default@tmp_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx +(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx JOIN -(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy +(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp +POSTHOOK: Input: default@tmp_n2 #### A masked pattern was here #### 103 val_103 103 val_103 4 4 104 val_104 104 val_104 4 4 @@ -510,17 +510,17 @@ POSTHOOK: Input: default@tmp PREHOOK: query: EXPLAIN SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx +(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx JOIN -(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy +(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx +(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx JOIN -(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy +(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -637,21 +637,21 @@ STAGE PLANS: PREHOOK: query: SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx +(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx JOIN -(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy +(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) PREHOOK: type: QUERY -PREHOOK: Input: default@tmp +PREHOOK: Input: default@tmp_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt FROM -(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx +(SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp_n2 x WHERE x.c1 < 120 GROUP BY x.c1, x.c3) xx JOIN -(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy +(SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp_n2 x1 WHERE x1.c2 > 100 GROUP BY x1.c1, x1.c3) yy ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp +POSTHOOK: Input: default@tmp_n2 #### A masked pattern was here #### 103 val_103 103 val_103 4 4 104 val_104 104 val_104 4 4 diff --git a/ql/src/test/results/clientpositive/create_1.q.out b/ql/src/test/results/clientpositive/create_1.q.out index 489496aa32..f4a62c51a7 100644 --- a/ql/src/test/results/clientpositive/create_1.q.out +++ b/ql/src/test/results/clientpositive/create_1.q.out @@ -1,55 +1,55 @@ -PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE table1_n4 (a STRING, b STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: CREATE TABLE table1 (a STRING, b STRING) STORED AS TEXTFILE +PREHOOK: Output: default@table1_n4 +POSTHOOK: query: CREATE TABLE table1_n4 (a STRING, b STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: DESCRIBE table1 +POSTHOOK: Output: default@table1_n4 +PREHOOK: query: DESCRIBE table1_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE table1 +PREHOOK: Input: default@table1_n4 +POSTHOOK: query: DESCRIBE table1_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n4 a string b string -PREHOOK: query: DESCRIBE EXTENDED table1 +PREHOOK: query: DESCRIBE EXTENDED table1_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE EXTENDED table1 +PREHOOK: Input: default@table1_n4 +POSTHOOK: query: DESCRIBE EXTENDED table1_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n4 a string b string #### A masked pattern was here #### -PREHOOK: query: CREATE TABLE IF NOT EXISTS table1 (a STRING, b STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE IF NOT EXISTS table1_n4 (a STRING, b STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE -POSTHOOK: query: CREATE TABLE IF NOT EXISTS table1 (a STRING, b STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE IF NOT EXISTS table1_n4 (a STRING, b STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE -PREHOOK: query: CREATE TABLE IF NOT EXISTS table2 (a STRING, b INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE IF NOT EXISTS table2_n2 (a STRING, b INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table2 -POSTHOOK: query: CREATE TABLE IF NOT EXISTS table2 (a STRING, b INT) STORED AS TEXTFILE +PREHOOK: Output: default@table2_n2 +POSTHOOK: query: CREATE TABLE IF NOT EXISTS table2_n2 (a STRING, b INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table2 -PREHOOK: query: DESCRIBE table2 +POSTHOOK: Output: default@table2_n2 +PREHOOK: query: DESCRIBE table2_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table2 -POSTHOOK: query: DESCRIBE table2 +PREHOOK: Input: default@table2_n2 +POSTHOOK: query: DESCRIBE table2_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n2 a string b int -PREHOOK: query: DESCRIBE EXTENDED table2 +PREHOOK: query: DESCRIBE EXTENDED table2_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table2 -POSTHOOK: query: DESCRIBE EXTENDED table2 +PREHOOK: Input: default@table2_n2 +POSTHOOK: query: DESCRIBE EXTENDED table2_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n2 a string b int @@ -114,32 +114,32 @@ a string b string #### A masked pattern was here #### -PREHOOK: query: CREATE TABLE table5 (a STRING, b STRING) +PREHOOK: query: CREATE TABLE table5_n1 (a STRING, b STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table5 -POSTHOOK: query: CREATE TABLE table5 (a STRING, b STRING) +PREHOOK: Output: default@table5_n1 +POSTHOOK: query: CREATE TABLE table5_n1 (a STRING, b STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table5 -PREHOOK: query: DESCRIBE table5 +POSTHOOK: Output: default@table5_n1 +PREHOOK: query: DESCRIBE table5_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table5 -POSTHOOK: query: DESCRIBE table5 +PREHOOK: Input: default@table5_n1 +POSTHOOK: query: DESCRIBE table5_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table5 +POSTHOOK: Input: default@table5_n1 a string b string -PREHOOK: query: DESCRIBE EXTENDED table5 +PREHOOK: query: DESCRIBE EXTENDED table5_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table5 -POSTHOOK: query: DESCRIBE EXTENDED table5 +PREHOOK: Input: default@table5_n1 +POSTHOOK: query: DESCRIBE EXTENDED table5_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table5 +POSTHOOK: Input: default@table5_n1 a string b string diff --git a/ql/src/test/results/clientpositive/create_escape.q.out b/ql/src/test/results/clientpositive/create_escape.q.out index dbabb1439f..18402b7881 100644 --- a/ql/src/test/results/clientpositive/create_escape.q.out +++ b/ql/src/test/results/clientpositive/create_escape.q.out @@ -1,49 +1,49 @@ -PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING) +PREHOOK: query: CREATE TABLE table1_n5 (a STRING, b STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: CREATE TABLE table1 (a STRING, b STRING) +PREHOOK: Output: default@table1_n5 +POSTHOOK: query: CREATE TABLE table1_n5 (a STRING, b STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: DESCRIBE table1 +POSTHOOK: Output: default@table1_n5 +PREHOOK: query: DESCRIBE table1_n5 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE table1 +PREHOOK: Input: default@table1_n5 +POSTHOOK: query: DESCRIBE table1_n5 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n5 a string b string -PREHOOK: query: DESCRIBE EXTENDED table1 +PREHOOK: query: DESCRIBE EXTENDED table1_n5 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE EXTENDED table1 +PREHOOK: Input: default@table1_n5 +POSTHOOK: query: DESCRIBE EXTENDED table1_n5 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n5 a string b string #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE table1 SELECT key, '\\\t\\' FROM src WHERE key = 86 +PREHOOK: query: INSERT OVERWRITE TABLE table1_n5 SELECT key, '\\\t\\' FROM src WHERE key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@table1 -POSTHOOK: query: INSERT OVERWRITE TABLE table1 SELECT key, '\\\t\\' FROM src WHERE key = 86 +PREHOOK: Output: default@table1_n5 +POSTHOOK: query: INSERT OVERWRITE TABLE table1_n5 SELECT key, '\\\t\\' FROM src WHERE key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@table1 -POSTHOOK: Lineage: table1.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: table1.b SIMPLE [] -PREHOOK: query: SELECT * FROM table1 +POSTHOOK: Output: default@table1_n5 +POSTHOOK: Lineage: table1_n5.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table1_n5.b SIMPLE [] +PREHOOK: query: SELECT * FROM table1_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n5 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM table1 +POSTHOOK: query: SELECT * FROM table1_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n5 #### A masked pattern was here #### 86 \ \ diff --git a/ql/src/test/results/clientpositive/create_genericudf.q.out b/ql/src/test/results/clientpositive/create_genericudf.q.out index b7771b232d..c2489c521a 100644 --- a/ql/src/test/results/clientpositive/create_genericudf.q.out +++ b/ql/src/test/results/clientpositive/create_genericudf.q.out @@ -16,16 +16,16 @@ PREHOOK: Output: test_translate POSTHOOK: query: CREATE TEMPORARY FUNCTION test_translate AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestTranslate' POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: test_translate -PREHOOK: query: CREATE TABLE dest1(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING, c6 STRING, c7 STRING) +PREHOOK: query: CREATE TABLE dest1_n95(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING, c6 STRING, c7 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING, c6 STRING, c7 STRING) +PREHOOK: Output: default@dest1_n95 +POSTHOOK: query: CREATE TABLE dest1_n95(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING, c6 STRING, c7 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n95 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n95 SELECT test_translate('abc', 'a', 'b'), test_translate('abc', 'ab', 'bc'), @@ -36,9 +36,9 @@ SELECT test_translate('abc', 'a', 'ab') PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n95 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n95 SELECT test_translate('abc', 'a', 'b'), test_translate('abc', 'ab', 'bc'), @@ -49,21 +49,21 @@ SELECT test_translate('abc', 'a', 'ab') POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 SIMPLE [] -POSTHOOK: Lineage: dest1.c2 SIMPLE [] -POSTHOOK: Lineage: dest1.c3 SIMPLE [] -POSTHOOK: Lineage: dest1.c4 SIMPLE [] -POSTHOOK: Lineage: dest1.c5 SIMPLE [] -POSTHOOK: Lineage: dest1.c6 SIMPLE [] -POSTHOOK: Lineage: dest1.c7 SIMPLE [] -PREHOOK: query: SELECT dest1.* FROM dest1 LIMIT 1 +POSTHOOK: Output: default@dest1_n95 +POSTHOOK: Lineage: dest1_n95.c1 SIMPLE [] +POSTHOOK: Lineage: dest1_n95.c2 SIMPLE [] +POSTHOOK: Lineage: dest1_n95.c3 SIMPLE [] +POSTHOOK: Lineage: dest1_n95.c4 SIMPLE [] +POSTHOOK: Lineage: dest1_n95.c5 SIMPLE [] +POSTHOOK: Lineage: dest1_n95.c6 SIMPLE [] +POSTHOOK: Lineage: dest1_n95.c7 SIMPLE [] +PREHOOK: query: SELECT dest1_n95.* FROM dest1_n95 LIMIT 1 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n95 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 LIMIT 1 +POSTHOOK: query: SELECT dest1_n95.* FROM dest1_n95 LIMIT 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n95 #### A masked pattern was here #### bbc bcc NULL NULL NULL bc abc PREHOOK: query: DROP TEMPORARY FUNCTION test_translate diff --git a/ql/src/test/results/clientpositive/create_like.q.out b/ql/src/test/results/clientpositive/create_like.q.out index c672a98119..579198c419 100644 --- a/ql/src/test/results/clientpositive/create_like.q.out +++ b/ql/src/test/results/clientpositive/create_like.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE table1_n16 (a STRING, b STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: CREATE TABLE table1 (a STRING, b STRING) STORED AS TEXTFILE +PREHOOK: Output: default@table1_n16 +POSTHOOK: query: CREATE TABLE table1_n16 (a STRING, b STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: DESCRIBE FORMATTED table1 +POSTHOOK: Output: default@table1_n16 +PREHOOK: query: DESCRIBE FORMATTED table1_n16 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE FORMATTED table1 +PREHOOK: Input: default@table1_n16 +POSTHOOK: query: DESCRIBE FORMATTED table1_n16 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n16 # col_name data_type comment a string b string @@ -41,20 +41,20 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: CREATE TABLE table2 LIKE table1 +PREHOOK: query: CREATE TABLE table2_n12 LIKE table1_n16 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table2 -POSTHOOK: query: CREATE TABLE table2 LIKE table1 +PREHOOK: Output: default@table2_n12 +POSTHOOK: query: CREATE TABLE table2_n12 LIKE table1_n16 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table2 -PREHOOK: query: DESCRIBE FORMATTED table2 +POSTHOOK: Output: default@table2_n12 +PREHOOK: query: DESCRIBE FORMATTED table2_n12 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table2 -POSTHOOK: query: DESCRIBE FORMATTED table2 +PREHOOK: Input: default@table2_n12 +POSTHOOK: query: DESCRIBE FORMATTED table2_n12 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n12 # col_name data_type comment a string b string @@ -83,28 +83,28 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: CREATE TABLE IF NOT EXISTS table2 LIKE table1 +PREHOOK: query: CREATE TABLE IF NOT EXISTS table2_n12 LIKE table1_n16 PREHOOK: type: CREATETABLE -POSTHOOK: query: CREATE TABLE IF NOT EXISTS table2 LIKE table1 +POSTHOOK: query: CREATE TABLE IF NOT EXISTS table2_n12 LIKE table1_n16 POSTHOOK: type: CREATETABLE -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table2 LIKE table1 +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table2_n12 LIKE table1_n16 PREHOOK: type: CREATETABLE -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table2 LIKE table1 +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table2_n12 LIKE table1_n16 POSTHOOK: type: CREATETABLE -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table3 LIKE table1 +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table3_n3 LIKE table1_n16 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table3 -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table3 LIKE table1 +PREHOOK: Output: default@table3_n3 +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table3_n3 LIKE table1_n16 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table3 -PREHOOK: query: DESCRIBE FORMATTED table3 +POSTHOOK: Output: default@table3_n3 +PREHOOK: query: DESCRIBE FORMATTED table3_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table3 -POSTHOOK: query: DESCRIBE FORMATTED table3 +PREHOOK: Input: default@table3_n3 +POSTHOOK: query: DESCRIBE FORMATTED table3_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table3 +POSTHOOK: Input: default@table3_n3 # col_name data_type comment a string b string @@ -134,42 +134,42 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE table1 SELECT key, value FROM src WHERE key = 86 +PREHOOK: query: INSERT OVERWRITE TABLE table1_n16 SELECT key, value FROM src WHERE key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@table1 -POSTHOOK: query: INSERT OVERWRITE TABLE table1 SELECT key, value FROM src WHERE key = 86 +PREHOOK: Output: default@table1_n16 +POSTHOOK: query: INSERT OVERWRITE TABLE table1_n16 SELECT key, value FROM src WHERE key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@table1 -POSTHOOK: Lineage: table1.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: table1.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE table2 SELECT key, value FROM src WHERE key = 100 +POSTHOOK: Output: default@table1_n16 +POSTHOOK: Lineage: table1_n16.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table1_n16.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: INSERT OVERWRITE TABLE table2_n12 SELECT key, value FROM src WHERE key = 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@table2 -POSTHOOK: query: INSERT OVERWRITE TABLE table2 SELECT key, value FROM src WHERE key = 100 +PREHOOK: Output: default@table2_n12 +POSTHOOK: query: INSERT OVERWRITE TABLE table2_n12 SELECT key, value FROM src WHERE key = 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@table2 -POSTHOOK: Lineage: table2.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: table2.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM table1 +POSTHOOK: Output: default@table2_n12 +POSTHOOK: Lineage: table2_n12.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table2_n12.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM table1_n16 PREHOOK: type: QUERY -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n16 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM table1 +POSTHOOK: query: SELECT * FROM table1_n16 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n16 #### A masked pattern was here #### 86 val_86 -PREHOOK: query: SELECT * FROM table2 +PREHOOK: query: SELECT * FROM table2_n12 PREHOOK: type: QUERY -PREHOOK: Input: default@table2 +PREHOOK: Input: default@table2_n12 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM table2 +POSTHOOK: query: SELECT * FROM table2_n12 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n12 #### A masked pattern was here #### 100 val_100 100 val_100 @@ -177,29 +177,29 @@ POSTHOOK: Input: default@table2 PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@table4 +PREHOOK: Output: default@table4_n1 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@table4 +POSTHOOK: Output: default@table4_n1 #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@table5 +PREHOOK: Output: default@table5_n5 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@table5 -PREHOOK: query: SELECT * FROM table4 +POSTHOOK: Output: default@table5_n5 +PREHOOK: query: SELECT * FROM table4_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@table4 +PREHOOK: Input: default@table4_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM table4 +POSTHOOK: query: SELECT * FROM table4_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table4 +POSTHOOK: Input: default@table4_n1 #### A masked pattern was here #### 1 2 @@ -207,13 +207,13 @@ POSTHOOK: Input: default@table4 4 5 6 -PREHOOK: query: SELECT * FROM table5 +PREHOOK: query: SELECT * FROM table5_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@table5 +PREHOOK: Input: default@table5_n5 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM table5 +POSTHOOK: query: SELECT * FROM table5_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table5 +POSTHOOK: Input: default@table5_n5 #### A masked pattern was here #### 1 2 @@ -221,21 +221,21 @@ POSTHOOK: Input: default@table5 4 5 6 -PREHOOK: query: DROP TABLE table5 +PREHOOK: query: DROP TABLE table5_n5 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@table5 -PREHOOK: Output: default@table5 -POSTHOOK: query: DROP TABLE table5 +PREHOOK: Input: default@table5_n5 +PREHOOK: Output: default@table5_n5 +POSTHOOK: query: DROP TABLE table5_n5 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@table5 -POSTHOOK: Output: default@table5 -PREHOOK: query: SELECT * FROM table4 +POSTHOOK: Input: default@table5_n5 +POSTHOOK: Output: default@table5_n5 +PREHOOK: query: SELECT * FROM table4_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@table4 +PREHOOK: Input: default@table4_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM table4 +POSTHOOK: query: SELECT * FROM table4_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table4 +POSTHOOK: Input: default@table4_n1 #### A masked pattern was here #### 1 2 @@ -243,31 +243,31 @@ POSTHOOK: Input: default@table4 4 5 6 -PREHOOK: query: DROP TABLE table4 +PREHOOK: query: DROP TABLE table4_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@table4 -PREHOOK: Output: default@table4 -POSTHOOK: query: DROP TABLE table4 +PREHOOK: Input: default@table4_n1 +PREHOOK: Output: default@table4_n1 +POSTHOOK: query: DROP TABLE table4_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@table4 -POSTHOOK: Output: default@table4 +POSTHOOK: Input: default@table4_n1 +POSTHOOK: Output: default@table4_n1 #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@table4 +PREHOOK: Output: default@table4_n1 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@table4 -PREHOOK: query: SELECT * FROM table4 +POSTHOOK: Output: default@table4_n1 +PREHOOK: query: SELECT * FROM table4_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@table4 +PREHOOK: Input: default@table4_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM table4 +POSTHOOK: query: SELECT * FROM table4_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table4 +POSTHOOK: Input: default@table4_n1 #### A masked pattern was here #### 1 2 @@ -275,9 +275,9 @@ POSTHOOK: Input: default@table4 4 5 6 -PREHOOK: query: CREATE TABLE doctors STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{ +PREHOOK: query: CREATE TABLE doctors_n2 STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "doctors", + "name": "doctors_n2", "type": "record", "fields": [ { @@ -299,10 +299,10 @@ PREHOOK: query: CREATE TABLE doctors STORED AS AVRO TBLPROPERTIES ('avro.schema. }') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@doctors -POSTHOOK: query: CREATE TABLE doctors STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{ +PREHOOK: Output: default@doctors_n2 +POSTHOOK: query: CREATE TABLE doctors_n2 STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", - "name": "doctors", + "name": "doctors_n2", "type": "record", "fields": [ { @@ -324,21 +324,21 @@ POSTHOOK: query: CREATE TABLE doctors STORED AS AVRO TBLPROPERTIES ('avro.schema }') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@doctors -PREHOOK: query: alter table doctors set tblproperties ('k1'='v1', 'k2'='v2') +POSTHOOK: Output: default@doctors_n2 +PREHOOK: query: alter table doctors_n2 set tblproperties ('k1'='v1', 'k2'='v2') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@doctors -PREHOOK: Output: default@doctors -POSTHOOK: query: alter table doctors set tblproperties ('k1'='v1', 'k2'='v2') +PREHOOK: Input: default@doctors_n2 +PREHOOK: Output: default@doctors_n2 +POSTHOOK: query: alter table doctors_n2 set tblproperties ('k1'='v1', 'k2'='v2') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@doctors -POSTHOOK: Output: default@doctors -PREHOOK: query: DESCRIBE FORMATTED doctors +POSTHOOK: Input: default@doctors_n2 +POSTHOOK: Output: default@doctors_n2 +PREHOOK: query: DESCRIBE FORMATTED doctors_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@doctors -POSTHOOK: query: DESCRIBE FORMATTED doctors +PREHOOK: Input: default@doctors_n2 +POSTHOOK: query: DESCRIBE FORMATTED doctors_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@doctors +POSTHOOK: Input: default@doctors_n2 # col_name data_type comment number int Order of playing the role first_name string first name of actor playing role @@ -354,7 +354,7 @@ Table Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"first_name\":\"true\",\"last_name\":\"true\",\"number\":\"true\"}} avro.schema.literal { \"namespace\": \"testing.hive.avro.serde\", - \"name\": \"doctors\", + \"name\": \"doctors_n2\", \"type\": \"record\", \"fields\": [ { @@ -394,11 +394,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: CREATE TABLE doctors2 like doctors +PREHOOK: query: CREATE TABLE doctors2 like doctors_n2 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors2 -POSTHOOK: query: CREATE TABLE doctors2 like doctors +POSTHOOK: query: CREATE TABLE doctors2 like doctors_n2 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@doctors2 @@ -423,7 +423,7 @@ Table Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"first_name\":\"true\",\"last_name\":\"true\",\"number\":\"true\"}} avro.schema.literal { \"namespace\": \"testing.hive.avro.serde\", - \"name\": \"doctors\", + \"name\": \"doctors_n2\", \"type\": \"record\", \"fields\": [ { @@ -510,20 +510,20 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: CREATE TABLE table5(col1 int, col2 string) stored as TEXTFILE +PREHOOK: query: CREATE TABLE table5_n5(col1 int, col2 string) stored as TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table5 -POSTHOOK: query: CREATE TABLE table5(col1 int, col2 string) stored as TEXTFILE +PREHOOK: Output: default@table5_n5 +POSTHOOK: query: CREATE TABLE table5_n5(col1 int, col2 string) stored as TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table5 -PREHOOK: query: DESCRIBE FORMATTED table5 +POSTHOOK: Output: default@table5_n5 +PREHOOK: query: DESCRIBE FORMATTED table5_n5 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table5 -POSTHOOK: query: DESCRIBE FORMATTED table5 +PREHOOK: Input: default@table5_n5 +POSTHOOK: query: DESCRIBE FORMATTED table5_n5 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table5 +POSTHOOK: Input: default@table5_n5 # col_name data_type comment col1 int col2 string @@ -553,20 +553,20 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: CREATE TABLE table6 like table5 stored as RCFILE +PREHOOK: query: CREATE TABLE table6_n4 like table5_n5 stored as RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table6 -POSTHOOK: query: CREATE TABLE table6 like table5 stored as RCFILE +PREHOOK: Output: default@table6_n4 +POSTHOOK: query: CREATE TABLE table6_n4 like table5_n5 stored as RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table6 -PREHOOK: query: DESCRIBE FORMATTED table6 +POSTHOOK: Output: default@table6_n4 +PREHOOK: query: DESCRIBE FORMATTED table6_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table6 -POSTHOOK: query: DESCRIBE FORMATTED table6 +PREHOOK: Input: default@table6_n4 +POSTHOOK: query: DESCRIBE FORMATTED table6_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table6 +POSTHOOK: Input: default@table6_n4 # col_name data_type comment col1 int col2 string @@ -595,30 +595,30 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table table6 +PREHOOK: query: drop table table6_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@table6 -PREHOOK: Output: default@table6 -POSTHOOK: query: drop table table6 +PREHOOK: Input: default@table6_n4 +PREHOOK: Output: default@table6_n4 +POSTHOOK: query: drop table table6_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@table6 -POSTHOOK: Output: default@table6 +POSTHOOK: Input: default@table6_n4 +POSTHOOK: Output: default@table6_n4 #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@table6 +PREHOOK: Output: default@table6_n4 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@table6 -PREHOOK: query: DESCRIBE FORMATTED table6 +POSTHOOK: Output: default@table6_n4 +PREHOOK: query: DESCRIBE FORMATTED table6_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table6 -POSTHOOK: query: DESCRIBE FORMATTED table6 +PREHOOK: Input: default@table6_n4 +POSTHOOK: query: DESCRIBE FORMATTED table6_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table6 +POSTHOOK: Input: default@table6_n4 # col_name data_type comment col1 int col2 string @@ -642,31 +642,31 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table table5 +PREHOOK: query: drop table table5_n5 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@table5 -PREHOOK: Output: default@table5 -POSTHOOK: query: drop table table5 +PREHOOK: Input: default@table5_n5 +PREHOOK: Output: default@table5_n5 +POSTHOOK: query: drop table table5_n5 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@table5 -POSTHOOK: Output: default@table5 -PREHOOK: query: create table orc_table ( +POSTHOOK: Input: default@table5_n5 +POSTHOOK: Output: default@table5_n5 +PREHOOK: query: create table orc_table_n0 ( `time` string) stored as ORC tblproperties ("orc.compress"="SNAPPY") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orc_table -POSTHOOK: query: create table orc_table ( +PREHOOK: Output: default@orc_table_n0 +POSTHOOK: query: create table orc_table_n0 ( `time` string) stored as ORC tblproperties ("orc.compress"="SNAPPY") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orc_table -PREHOOK: query: create table orc_table_using_like like orc_table +POSTHOOK: Output: default@orc_table_n0 +PREHOOK: query: create table orc_table_using_like like orc_table_n0 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@orc_table_using_like -POSTHOOK: query: create table orc_table_using_like like orc_table +POSTHOOK: query: create table orc_table_using_like like orc_table_n0 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orc_table_using_like @@ -712,11 +712,11 @@ POSTHOOK: query: drop table orc_table_using_like POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@orc_table_using_like POSTHOOK: Output: default@orc_table_using_like -PREHOOK: query: drop table orc_table +PREHOOK: query: drop table orc_table_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@orc_table -PREHOOK: Output: default@orc_table -POSTHOOK: query: drop table orc_table +PREHOOK: Input: default@orc_table_n0 +PREHOOK: Output: default@orc_table_n0 +POSTHOOK: query: drop table orc_table_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@orc_table -POSTHOOK: Output: default@orc_table +POSTHOOK: Input: default@orc_table_n0 +POSTHOOK: Output: default@orc_table_n0 diff --git a/ql/src/test/results/clientpositive/create_like2.q.out b/ql/src/test/results/clientpositive/create_like2.q.out index 601b929a2d..325e216853 100644 --- a/ql/src/test/results/clientpositive/create_like2.q.out +++ b/ql/src/test/results/clientpositive/create_like2.q.out @@ -1,33 +1,33 @@ -PREHOOK: query: CREATE TABLE table1(a INT, b STRING) +PREHOOK: query: CREATE TABLE table1_n19(a INT, b STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: CREATE TABLE table1(a INT, b STRING) +PREHOOK: Output: default@table1_n19 +POSTHOOK: query: CREATE TABLE table1_n19(a INT, b STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: ALTER TABLE table1 SET TBLPROPERTIES ('a'='1', 'b'='2', 'c'='3', 'd' = '4') +POSTHOOK: Output: default@table1_n19 +PREHOOK: query: ALTER TABLE table1_n19 SET TBLPROPERTIES ('a'='1', 'b'='2', 'c'='3', 'd' = '4') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@table1 -PREHOOK: Output: default@table1 -POSTHOOK: query: ALTER TABLE table1 SET TBLPROPERTIES ('a'='1', 'b'='2', 'c'='3', 'd' = '4') +PREHOOK: Input: default@table1_n19 +PREHOOK: Output: default@table1_n19 +POSTHOOK: query: ALTER TABLE table1_n19 SET TBLPROPERTIES ('a'='1', 'b'='2', 'c'='3', 'd' = '4') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@table1 -POSTHOOK: Output: default@table1 -PREHOOK: query: CREATE TABLE table2 LIKE table1 +POSTHOOK: Input: default@table1_n19 +POSTHOOK: Output: default@table1_n19 +PREHOOK: query: CREATE TABLE table2_n14 LIKE table1_n19 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table2 -POSTHOOK: query: CREATE TABLE table2 LIKE table1 +PREHOOK: Output: default@table2_n14 +POSTHOOK: query: CREATE TABLE table2_n14 LIKE table1_n19 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table2 -PREHOOK: query: DESC FORMATTED table2 +POSTHOOK: Output: default@table2_n14 +PREHOOK: query: DESC FORMATTED table2_n14 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table2 -POSTHOOK: query: DESC FORMATTED table2 +PREHOOK: Input: default@table2_n14 +POSTHOOK: query: DESC FORMATTED table2_n14 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n14 # col_name data_type comment a int b string diff --git a/ql/src/test/results/clientpositive/create_like_tbl_props.q.out b/ql/src/test/results/clientpositive/create_like_tbl_props.q.out index 675f5c29d6..4d11fc3c9e 100644 --- a/ql/src/test/results/clientpositive/create_like_tbl_props.q.out +++ b/ql/src/test/results/clientpositive/create_like_tbl_props.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: CREATE TABLE test_table LIKE src TBLPROPERTIES('key'='value') +PREHOOK: query: CREATE TABLE test_table_n15 LIKE src TBLPROPERTIES('key'='value') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table -POSTHOOK: query: CREATE TABLE test_table LIKE src TBLPROPERTIES('key'='value') +PREHOOK: Output: default@test_table_n15 +POSTHOOK: query: CREATE TABLE test_table_n15 LIKE src TBLPROPERTIES('key'='value') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table -PREHOOK: query: DESC FORMATTED test_table +POSTHOOK: Output: default@test_table_n15 +PREHOOK: query: DESC FORMATTED test_table_n15 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESC FORMATTED test_table +PREHOOK: Input: default@test_table_n15 +POSTHOOK: query: DESC FORMATTED test_table_n15 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n15 # col_name data_type comment key string default value string default @@ -41,20 +41,20 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: CREATE TABLE test_table1 LIKE src +PREHOOK: query: CREATE TABLE test_table1_n18 LIKE src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table1 -POSTHOOK: query: CREATE TABLE test_table1 LIKE src +PREHOOK: Output: default@test_table1_n18 +POSTHOOK: query: CREATE TABLE test_table1_n18 LIKE src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table1 -PREHOOK: query: DESC FORMATTED test_table1 +POSTHOOK: Output: default@test_table1_n18 +PREHOOK: query: DESC FORMATTED test_table1_n18 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table1 -POSTHOOK: query: DESC FORMATTED test_table1 +PREHOOK: Input: default@test_table1_n18 +POSTHOOK: query: DESC FORMATTED test_table1_n18 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1_n18 # col_name data_type comment key string default value string default @@ -84,20 +84,20 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: CREATE TABLE test_table2 LIKE src TBLPROPERTIES('key2' = 'value2') +PREHOOK: query: CREATE TABLE test_table2_n17 LIKE src TBLPROPERTIES('key2' = 'value2') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 LIKE src TBLPROPERTIES('key2' = 'value2') +PREHOOK: Output: default@test_table2_n17 +POSTHOOK: query: CREATE TABLE test_table2_n17 LIKE src TBLPROPERTIES('key2' = 'value2') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 -PREHOOK: query: DESC FORMATTED test_table2 +POSTHOOK: Output: default@test_table2_n17 +PREHOOK: query: DESC FORMATTED test_table2_n17 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table2 -POSTHOOK: query: DESC FORMATTED test_table2 +PREHOOK: Input: default@test_table2_n17 +POSTHOOK: query: DESC FORMATTED test_table2_n17 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2_n17 # col_name data_type comment key string default value string default @@ -128,20 +128,20 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: CREATE TABLE test_table3 LIKE test_table2 TBLPROPERTIES('key2' = 'value3') +PREHOOK: query: CREATE TABLE test_table3_n9 LIKE test_table2_n17 TBLPROPERTIES('key2' = 'value3') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table3 -POSTHOOK: query: CREATE TABLE test_table3 LIKE test_table2 TBLPROPERTIES('key2' = 'value3') +PREHOOK: Output: default@test_table3_n9 +POSTHOOK: query: CREATE TABLE test_table3_n9 LIKE test_table2_n17 TBLPROPERTIES('key2' = 'value3') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table3 -PREHOOK: query: DESC FORMATTED test_table3 +POSTHOOK: Output: default@test_table3_n9 +PREHOOK: query: DESC FORMATTED test_table3_n9 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table3 -POSTHOOK: query: DESC FORMATTED test_table3 +PREHOOK: Input: default@test_table3_n9 +POSTHOOK: query: DESC FORMATTED test_table3_n9 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3_n9 # col_name data_type comment key string default value string default @@ -184,20 +184,20 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@test_view POSTHOOK: Lineage: test_view.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_view.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE TABLE test_table4 LIKE test_view TBLPROPERTIES('key'='value') +PREHOOK: query: CREATE TABLE test_table4_n2 LIKE test_view TBLPROPERTIES('key'='value') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table4 -POSTHOOK: query: CREATE TABLE test_table4 LIKE test_view TBLPROPERTIES('key'='value') +PREHOOK: Output: default@test_table4_n2 +POSTHOOK: query: CREATE TABLE test_table4_n2 LIKE test_view TBLPROPERTIES('key'='value') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table4 -PREHOOK: query: DESC FORMATTED test_table4 +POSTHOOK: Output: default@test_table4_n2 +PREHOOK: query: DESC FORMATTED test_table4_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table4 -POSTHOOK: query: DESC FORMATTED test_table4 +PREHOOK: Input: default@test_table4_n2 +POSTHOOK: query: DESC FORMATTED test_table4_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table4 +POSTHOOK: Input: default@test_table4_n2 # col_name data_type comment key string value string diff --git a/ql/src/test/results/clientpositive/create_like_view.q.out b/ql/src/test/results/clientpositive/create_like_view.q.out index a955c3f964..0932c2d7fe 100644 --- a/ql/src/test/results/clientpositive/create_like_view.q.out +++ b/ql/src/test/results/clientpositive/create_like_view.q.out @@ -1,41 +1,41 @@ -PREHOOK: query: DROP TABLE IF EXISTS table1 +PREHOOK: query: DROP TABLE IF EXISTS table1_n13 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS table1 +POSTHOOK: query: DROP TABLE IF EXISTS table1_n13 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE IF EXISTS table2 +PREHOOK: query: DROP TABLE IF EXISTS table2_n9 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS table2 +POSTHOOK: query: DROP TABLE IF EXISTS table2_n9 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE IF EXISTS table3 +PREHOOK: query: DROP TABLE IF EXISTS table3_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS table3 +POSTHOOK: query: DROP TABLE IF EXISTS table3_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP VIEW IF EXISTS view1 +PREHOOK: query: DROP VIEW IF EXISTS view1_n1 PREHOOK: type: DROPVIEW -POSTHOOK: query: DROP VIEW IF EXISTS view1 +POSTHOOK: query: DROP VIEW IF EXISTS view1_n1 POSTHOOK: type: DROPVIEW -PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE table1_n13 (a STRING, b STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: CREATE TABLE table1 (a STRING, b STRING) STORED AS TEXTFILE +PREHOOK: Output: default@table1_n13 +POSTHOOK: query: CREATE TABLE table1_n13 (a STRING, b STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: DESCRIBE table1 +POSTHOOK: Output: default@table1_n13 +PREHOOK: query: DESCRIBE table1_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE table1 +PREHOOK: Input: default@table1_n13 +POSTHOOK: query: DESCRIBE table1_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n13 a string b string -PREHOOK: query: DESCRIBE FORMATTED table1 +PREHOOK: query: DESCRIBE FORMATTED table1_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE FORMATTED table1 +PREHOOK: Input: default@table1_n13 +POSTHOOK: query: DESCRIBE FORMATTED table1_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n13 # col_name data_type comment a string b string @@ -65,40 +65,40 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: CREATE VIEW view1 AS SELECT * FROM table1 +PREHOOK: query: CREATE VIEW view1_n1 AS SELECT * FROM table1_n13 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n13 PREHOOK: Output: database:default -PREHOOK: Output: default@view1 -POSTHOOK: query: CREATE VIEW view1 AS SELECT * FROM table1 +PREHOOK: Output: default@view1_n1 +POSTHOOK: query: CREATE VIEW view1_n1 AS SELECT * FROM table1_n13 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n13 POSTHOOK: Output: database:default -POSTHOOK: Output: default@view1 -POSTHOOK: Lineage: view1.a SIMPLE [(table1)table1.FieldSchema(name:a, type:string, comment:null), ] -POSTHOOK: Lineage: view1.b SIMPLE [(table1)table1.FieldSchema(name:b, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE table2 LIKE view1 +POSTHOOK: Output: default@view1_n1 +POSTHOOK: Lineage: view1_n1.a SIMPLE [(table1_n13)table1_n13.FieldSchema(name:a, type:string, comment:null), ] +POSTHOOK: Lineage: view1_n1.b SIMPLE [(table1_n13)table1_n13.FieldSchema(name:b, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE table2_n9 LIKE view1_n1 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table2 -POSTHOOK: query: CREATE TABLE table2 LIKE view1 +PREHOOK: Output: default@table2_n9 +POSTHOOK: query: CREATE TABLE table2_n9 LIKE view1_n1 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table2 -PREHOOK: query: DESCRIBE table2 +POSTHOOK: Output: default@table2_n9 +PREHOOK: query: DESCRIBE table2_n9 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table2 -POSTHOOK: query: DESCRIBE table2 +PREHOOK: Input: default@table2_n9 +POSTHOOK: query: DESCRIBE table2_n9 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n9 a string b string -PREHOOK: query: DESCRIBE FORMATTED table2 +PREHOOK: query: DESCRIBE FORMATTED table2_n9 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table2 -POSTHOOK: query: DESCRIBE FORMATTED table2 +PREHOOK: Input: default@table2_n9 +POSTHOOK: query: DESCRIBE FORMATTED table2_n9 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n9 # col_name data_type comment a string b string @@ -128,36 +128,36 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: CREATE TABLE IF NOT EXISTS table2 LIKE view1 +PREHOOK: query: CREATE TABLE IF NOT EXISTS table2_n9 LIKE view1_n1 PREHOOK: type: CREATETABLE -POSTHOOK: query: CREATE TABLE IF NOT EXISTS table2 LIKE view1 +POSTHOOK: query: CREATE TABLE IF NOT EXISTS table2_n9 LIKE view1_n1 POSTHOOK: type: CREATETABLE -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table2 LIKE view1 +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table2_n9 LIKE view1_n1 PREHOOK: type: CREATETABLE -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table2 LIKE view1 +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table2_n9 LIKE view1_n1 POSTHOOK: type: CREATETABLE -PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table3 LIKE view1 +PREHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table3_n2 LIKE view1_n1 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table3 -POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table3 LIKE view1 +PREHOOK: Output: default@table3_n2 +POSTHOOK: query: CREATE EXTERNAL TABLE IF NOT EXISTS table3_n2 LIKE view1_n1 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table3 -PREHOOK: query: DESCRIBE table3 +POSTHOOK: Output: default@table3_n2 +PREHOOK: query: DESCRIBE table3_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table3 -POSTHOOK: query: DESCRIBE table3 +PREHOOK: Input: default@table3_n2 +POSTHOOK: query: DESCRIBE table3_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table3 +POSTHOOK: Input: default@table3_n2 a string b string -PREHOOK: query: DESCRIBE FORMATTED table3 +PREHOOK: query: DESCRIBE FORMATTED table3_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table3 -POSTHOOK: query: DESCRIBE FORMATTED table3 +PREHOOK: Input: default@table3_n2 +POSTHOOK: query: DESCRIBE FORMATTED table3_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table3 +POSTHOOK: Input: default@table3_n2 # col_name data_type comment a string b string @@ -188,95 +188,95 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE table1 SELECT key, value FROM src WHERE key = 86 +PREHOOK: query: INSERT OVERWRITE TABLE table1_n13 SELECT key, value FROM src WHERE key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@table1 -POSTHOOK: query: INSERT OVERWRITE TABLE table1 SELECT key, value FROM src WHERE key = 86 +PREHOOK: Output: default@table1_n13 +POSTHOOK: query: INSERT OVERWRITE TABLE table1_n13 SELECT key, value FROM src WHERE key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@table1 -POSTHOOK: Lineage: table1.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: table1.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE table2 SELECT key, value FROM src WHERE key = 100 +POSTHOOK: Output: default@table1_n13 +POSTHOOK: Lineage: table1_n13.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table1_n13.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: INSERT OVERWRITE TABLE table2_n9 SELECT key, value FROM src WHERE key = 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@table2 -POSTHOOK: query: INSERT OVERWRITE TABLE table2 SELECT key, value FROM src WHERE key = 100 +PREHOOK: Output: default@table2_n9 +POSTHOOK: query: INSERT OVERWRITE TABLE table2_n9 SELECT key, value FROM src WHERE key = 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@table2 -POSTHOOK: Lineage: table2.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: table2.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM table1 +POSTHOOK: Output: default@table2_n9 +POSTHOOK: Lineage: table2_n9.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table2_n9.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM table1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM table1 +POSTHOOK: query: SELECT * FROM table1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n13 #### A masked pattern was here #### 86 val_86 -PREHOOK: query: SELECT * FROM table2 +PREHOOK: query: SELECT * FROM table2_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@table2 +PREHOOK: Input: default@table2_n9 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM table2 +POSTHOOK: query: SELECT * FROM table2_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n9 #### A masked pattern was here #### 100 val_100 100 val_100 -PREHOOK: query: DROP TABLE table1 +PREHOOK: query: DROP TABLE table1_n13 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@table1 -PREHOOK: Output: default@table1 -POSTHOOK: query: DROP TABLE table1 +PREHOOK: Input: default@table1_n13 +PREHOOK: Output: default@table1_n13 +POSTHOOK: query: DROP TABLE table1_n13 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@table1 -POSTHOOK: Output: default@table1 -PREHOOK: query: DROP TABLE table2 +POSTHOOK: Input: default@table1_n13 +POSTHOOK: Output: default@table1_n13 +PREHOOK: query: DROP TABLE table2_n9 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@table2 -PREHOOK: Output: default@table2 -POSTHOOK: query: DROP TABLE table2 +PREHOOK: Input: default@table2_n9 +PREHOOK: Output: default@table2_n9 +POSTHOOK: query: DROP TABLE table2_n9 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@table2 -POSTHOOK: Output: default@table2 -PREHOOK: query: DROP VIEW view1 +POSTHOOK: Input: default@table2_n9 +POSTHOOK: Output: default@table2_n9 +PREHOOK: query: DROP VIEW view1_n1 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@view1 -PREHOOK: Output: default@view1 -POSTHOOK: query: DROP VIEW view1 +PREHOOK: Input: default@view1_n1 +PREHOOK: Output: default@view1_n1 +POSTHOOK: query: DROP VIEW view1_n1 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@view1 -POSTHOOK: Output: default@view1 -PREHOOK: query: create view view1 partitioned on (ds, hr) as select * from srcpart +POSTHOOK: Input: default@view1_n1 +POSTHOOK: Output: default@view1_n1 +PREHOOK: query: create view view1_n1 partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart PREHOOK: Output: database:default -PREHOOK: Output: default@view1 -POSTHOOK: query: create view view1 partitioned on (ds, hr) as select * from srcpart +PREHOOK: Output: default@view1_n1 +POSTHOOK: query: create view view1_n1 partitioned on (ds, hr) as select * from srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart POSTHOOK: Output: database:default -POSTHOOK: Output: default@view1 -POSTHOOK: Lineage: view1.key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: view1.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table table1 like view1 +POSTHOOK: Output: default@view1_n1 +POSTHOOK: Lineage: view1_n1.key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: view1_n1.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table table1_n13 like view1_n1 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: create table table1 like view1 +PREHOOK: Output: default@table1_n13 +POSTHOOK: query: create table table1_n13 like view1_n1 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: describe formatted table1 +POSTHOOK: Output: default@table1_n13 +PREHOOK: query: describe formatted table1_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: describe formatted table1 +PREHOOK: Input: default@table1_n13 +POSTHOOK: query: describe formatted table1_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n13 # col_name data_type comment key string value string @@ -312,19 +312,19 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: DROP TABLE table1 +PREHOOK: query: DROP TABLE table1_n13 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@table1 -PREHOOK: Output: default@table1 -POSTHOOK: query: DROP TABLE table1 +PREHOOK: Input: default@table1_n13 +PREHOOK: Output: default@table1_n13 +POSTHOOK: query: DROP TABLE table1_n13 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@table1 -POSTHOOK: Output: default@table1 -PREHOOK: query: DROP VIEW view1 +POSTHOOK: Input: default@table1_n13 +POSTHOOK: Output: default@table1_n13 +PREHOOK: query: DROP VIEW view1_n1 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@view1 -PREHOOK: Output: default@view1 -POSTHOOK: query: DROP VIEW view1 +PREHOOK: Input: default@view1_n1 +PREHOOK: Output: default@view1_n1 +POSTHOOK: query: DROP VIEW view1_n1 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@view1 -POSTHOOK: Output: default@view1 +POSTHOOK: Input: default@view1_n1 +POSTHOOK: Output: default@view1_n1 diff --git a/ql/src/test/results/clientpositive/create_merge_compressed.q.out b/ql/src/test/results/clientpositive/create_merge_compressed.q.out index 58f517c895..64e20f796c 100644 --- a/ql/src/test/results/clientpositive/create_merge_compressed.q.out +++ b/ql/src/test/results/clientpositive/create_merge_compressed.q.out @@ -1,52 +1,52 @@ -PREHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile +PREHOOK: query: create table src_rc_merge_test_n1(key int, value string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_rc_merge_test -POSTHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile +PREHOOK: Output: default@src_rc_merge_test_n1 +POSTHOOK: query: create table src_rc_merge_test_n1(key int, value string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_rc_merge_test -PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test +POSTHOOK: Output: default@src_rc_merge_test_n1 +PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@src_rc_merge_test -POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test +PREHOOK: Output: default@src_rc_merge_test_n1 +POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@src_rc_merge_test -PREHOOK: query: create table tgt_rc_merge_test(key int, value string) stored as rcfile +POSTHOOK: Output: default@src_rc_merge_test_n1 +PREHOOK: query: create table tgt_rc_merge_test_n1(key int, value string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tgt_rc_merge_test -POSTHOOK: query: create table tgt_rc_merge_test(key int, value string) stored as rcfile +PREHOOK: Output: default@tgt_rc_merge_test_n1 +POSTHOOK: query: create table tgt_rc_merge_test_n1(key int, value string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tgt_rc_merge_test -PREHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test +POSTHOOK: Output: default@tgt_rc_merge_test_n1 +PREHOOK: query: insert into table tgt_rc_merge_test_n1 select * from src_rc_merge_test_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@src_rc_merge_test -PREHOOK: Output: default@tgt_rc_merge_test -POSTHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test +PREHOOK: Input: default@src_rc_merge_test_n1 +PREHOOK: Output: default@tgt_rc_merge_test_n1 +POSTHOOK: query: insert into table tgt_rc_merge_test_n1 select * from src_rc_merge_test_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_rc_merge_test -POSTHOOK: Output: default@tgt_rc_merge_test -POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test +POSTHOOK: Input: default@src_rc_merge_test_n1 +POSTHOOK: Output: default@tgt_rc_merge_test_n1 +POSTHOOK: Lineage: tgt_rc_merge_test_n1.key SIMPLE [(src_rc_merge_test_n1)src_rc_merge_test_n1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test_n1.value SIMPLE [(src_rc_merge_test_n1)src_rc_merge_test_n1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert into table tgt_rc_merge_test_n1 select * from src_rc_merge_test_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@src_rc_merge_test -PREHOOK: Output: default@tgt_rc_merge_test -POSTHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test +PREHOOK: Input: default@src_rc_merge_test_n1 +PREHOOK: Output: default@tgt_rc_merge_test_n1 +POSTHOOK: query: insert into table tgt_rc_merge_test_n1 select * from src_rc_merge_test_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_rc_merge_test -POSTHOOK: Output: default@tgt_rc_merge_test -POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: show table extended like `tgt_rc_merge_test` +POSTHOOK: Input: default@src_rc_merge_test_n1 +POSTHOOK: Output: default@tgt_rc_merge_test_n1 +POSTHOOK: Lineage: tgt_rc_merge_test_n1.key SIMPLE [(src_rc_merge_test_n1)src_rc_merge_test_n1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tgt_rc_merge_test_n1.value SIMPLE [(src_rc_merge_test_n1)src_rc_merge_test_n1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: show table extended like `tgt_rc_merge_test_n1` PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like `tgt_rc_merge_test` +POSTHOOK: query: show table extended like `tgt_rc_merge_test_n1` POSTHOOK: type: SHOW_TABLESTATUS -tableName:tgt_rc_merge_test +tableName:tgt_rc_merge_test_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat @@ -59,37 +59,37 @@ maxFileSize:171 minFileSize:171 #### A masked pattern was here #### -PREHOOK: query: select count(1) from tgt_rc_merge_test +PREHOOK: query: select count(1) from tgt_rc_merge_test_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@tgt_rc_merge_test +PREHOOK: Input: default@tgt_rc_merge_test_n1 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from tgt_rc_merge_test +POSTHOOK: query: select count(1) from tgt_rc_merge_test_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tgt_rc_merge_test +POSTHOOK: Input: default@tgt_rc_merge_test_n1 #### A masked pattern was here #### 10 -PREHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test +PREHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@tgt_rc_merge_test +PREHOOK: Input: default@tgt_rc_merge_test_n1 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test +POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tgt_rc_merge_test +POSTHOOK: Input: default@tgt_rc_merge_test_n1 #### A masked pattern was here #### 46 -751895388 -PREHOOK: query: alter table tgt_rc_merge_test concatenate +PREHOOK: query: alter table tgt_rc_merge_test_n1 concatenate PREHOOK: type: ALTER_TABLE_MERGE -PREHOOK: Input: default@tgt_rc_merge_test -PREHOOK: Output: default@tgt_rc_merge_test -POSTHOOK: query: alter table tgt_rc_merge_test concatenate +PREHOOK: Input: default@tgt_rc_merge_test_n1 +PREHOOK: Output: default@tgt_rc_merge_test_n1 +POSTHOOK: query: alter table tgt_rc_merge_test_n1 concatenate POSTHOOK: type: ALTER_TABLE_MERGE -POSTHOOK: Input: default@tgt_rc_merge_test -POSTHOOK: Output: default@tgt_rc_merge_test -PREHOOK: query: show table extended like `tgt_rc_merge_test` +POSTHOOK: Input: default@tgt_rc_merge_test_n1 +POSTHOOK: Output: default@tgt_rc_merge_test_n1 +PREHOOK: query: show table extended like `tgt_rc_merge_test_n1` PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like `tgt_rc_merge_test` +POSTHOOK: query: show table extended like `tgt_rc_merge_test_n1` POSTHOOK: type: SHOW_TABLESTATUS -tableName:tgt_rc_merge_test +tableName:tgt_rc_merge_test_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat @@ -102,37 +102,37 @@ maxFileSize:243 minFileSize:243 #### A masked pattern was here #### -PREHOOK: query: select count(1) from tgt_rc_merge_test +PREHOOK: query: select count(1) from tgt_rc_merge_test_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@tgt_rc_merge_test +PREHOOK: Input: default@tgt_rc_merge_test_n1 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from tgt_rc_merge_test +POSTHOOK: query: select count(1) from tgt_rc_merge_test_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tgt_rc_merge_test +POSTHOOK: Input: default@tgt_rc_merge_test_n1 #### A masked pattern was here #### 10 -PREHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test +PREHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@tgt_rc_merge_test +PREHOOK: Input: default@tgt_rc_merge_test_n1 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test +POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tgt_rc_merge_test +POSTHOOK: Input: default@tgt_rc_merge_test_n1 #### A masked pattern was here #### 46 -751895388 -PREHOOK: query: drop table src_rc_merge_test +PREHOOK: query: drop table src_rc_merge_test_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@src_rc_merge_test -PREHOOK: Output: default@src_rc_merge_test -POSTHOOK: query: drop table src_rc_merge_test +PREHOOK: Input: default@src_rc_merge_test_n1 +PREHOOK: Output: default@src_rc_merge_test_n1 +POSTHOOK: query: drop table src_rc_merge_test_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@src_rc_merge_test -POSTHOOK: Output: default@src_rc_merge_test -PREHOOK: query: drop table tgt_rc_merge_test +POSTHOOK: Input: default@src_rc_merge_test_n1 +POSTHOOK: Output: default@src_rc_merge_test_n1 +PREHOOK: query: drop table tgt_rc_merge_test_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tgt_rc_merge_test -PREHOOK: Output: default@tgt_rc_merge_test -POSTHOOK: query: drop table tgt_rc_merge_test +PREHOOK: Input: default@tgt_rc_merge_test_n1 +PREHOOK: Output: default@tgt_rc_merge_test_n1 +POSTHOOK: query: drop table tgt_rc_merge_test_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tgt_rc_merge_test -POSTHOOK: Output: default@tgt_rc_merge_test +POSTHOOK: Input: default@tgt_rc_merge_test_n1 +POSTHOOK: Output: default@tgt_rc_merge_test_n1 diff --git a/ql/src/test/results/clientpositive/create_nested_type.q.out b/ql/src/test/results/clientpositive/create_nested_type.q.out index e338f3fdcd..bad902deb9 100644 --- a/ql/src/test/results/clientpositive/create_nested_type.q.out +++ b/ql/src/test/results/clientpositive/create_nested_type.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE table1 ( +PREHOOK: query: CREATE TABLE table1_n2 ( a STRING, b ARRAY, c ARRAY>, @@ -6,8 +6,8 @@ PREHOOK: query: CREATE TABLE table1 ( ) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: CREATE TABLE table1 ( +PREHOOK: Output: default@table1_n2 +POSTHOOK: query: CREATE TABLE table1_n2 ( a STRING, b ARRAY, c ARRAY>, @@ -15,44 +15,44 @@ POSTHOOK: query: CREATE TABLE table1 ( ) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: DESCRIBE table1 +POSTHOOK: Output: default@table1_n2 +PREHOOK: query: DESCRIBE table1_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE table1 +PREHOOK: Input: default@table1_n2 +POSTHOOK: query: DESCRIBE table1_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n2 a string b array c array> d map> -PREHOOK: query: DESCRIBE EXTENDED table1 +PREHOOK: query: DESCRIBE EXTENDED table1_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE EXTENDED table1 +PREHOOK: Input: default@table1_n2 +POSTHOOK: query: DESCRIBE EXTENDED table1_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n2 a string b array c array> d map> #### A masked pattern was here #### -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/create_nested_type.txt' OVERWRITE INTO TABLE table1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/create_nested_type.txt' OVERWRITE INTO TABLE table1_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@table1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/create_nested_type.txt' OVERWRITE INTO TABLE table1 +PREHOOK: Output: default@table1_n2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/create_nested_type.txt' OVERWRITE INTO TABLE table1_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@table1 -PREHOOK: query: SELECT * from table1 +POSTHOOK: Output: default@table1_n2 +PREHOOK: query: SELECT * from table1_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT * from table1 +POSTHOOK: query: SELECT * from table1_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n2 #### A masked pattern was here #### a0 ["b00","b01"] [{"c001":"C001","c002":"C002"},{"c011":null,"c012":"C012"}] {"d01":["d011","d012"],"d02":["d021","d022"]} a1 ["b10"] [{"c001":"C001","c002":"C002"}] {"d01":["d011","d012"],"d02":null} diff --git a/ql/src/test/results/clientpositive/create_table_like_stats.q.out b/ql/src/test/results/clientpositive/create_table_like_stats.q.out index 7740fd7d9b..8982bdcd3e 100644 --- a/ql/src/test/results/clientpositive/create_table_like_stats.q.out +++ b/ql/src/test/results/clientpositive/create_table_like_stats.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: drop table a +PREHOOK: query: drop table a_n10 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table a +POSTHOOK: query: drop table a_n10 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table a like src +PREHOOK: query: create table a_n10 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@a -POSTHOOK: query: create table a like src +PREHOOK: Output: default@a_n10 +POSTHOOK: query: create table a_n10 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@a -PREHOOK: query: desc formatted a +POSTHOOK: Output: default@a_n10 +PREHOOK: query: desc formatted a_n10 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@a -POSTHOOK: query: desc formatted a +PREHOOK: Input: default@a_n10 +POSTHOOK: query: desc formatted a_n10 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n10 # col_name data_type comment key string default value string default @@ -44,30 +44,30 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table a +PREHOOK: query: drop table a_n10 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@a -PREHOOK: Output: default@a -POSTHOOK: query: drop table a +PREHOOK: Input: default@a_n10 +PREHOOK: Output: default@a_n10 +POSTHOOK: query: drop table a_n10 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@a -POSTHOOK: Output: default@a +POSTHOOK: Input: default@a_n10 +POSTHOOK: Output: default@a_n10 #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@a +PREHOOK: Output: default@a_n10 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@a -PREHOOK: query: desc formatted a +POSTHOOK: Output: default@a_n10 +PREHOOK: query: desc formatted a_n10 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@a -POSTHOOK: query: desc formatted a +PREHOOK: Input: default@a_n10 +POSTHOOK: query: desc formatted a_n10 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n10 # col_name data_type comment key string default value string default @@ -93,32 +93,32 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table a +PREHOOK: query: drop table a_n10 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@a -PREHOOK: Output: default@a -POSTHOOK: query: drop table a +PREHOOK: Input: default@a_n10 +PREHOOK: Output: default@a_n10 +POSTHOOK: query: drop table a_n10 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@a -POSTHOOK: Output: default@a -PREHOOK: query: create table a (key STRING COMMENT 'default', value STRING COMMENT 'default') +POSTHOOK: Input: default@a_n10 +POSTHOOK: Output: default@a_n10 +PREHOOK: query: create table a_n10 (key STRING COMMENT 'default', value STRING COMMENT 'default') PARTITIONED BY (ds STRING, hr STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@a -POSTHOOK: query: create table a (key STRING COMMENT 'default', value STRING COMMENT 'default') +PREHOOK: Output: default@a_n10 +POSTHOOK: query: create table a_n10 (key STRING COMMENT 'default', value STRING COMMENT 'default') PARTITIONED BY (ds STRING, hr STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@a -PREHOOK: query: desc formatted a +POSTHOOK: Output: default@a_n10 +PREHOOK: query: desc formatted a_n10 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@a -POSTHOOK: query: desc formatted a +PREHOOK: Input: default@a_n10 +POSTHOOK: query: desc formatted a_n10 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n10 # col_name data_type comment key string default value string default @@ -154,28 +154,28 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table a +PREHOOK: query: drop table a_n10 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@a -PREHOOK: Output: default@a -POSTHOOK: query: drop table a +PREHOOK: Input: default@a_n10 +PREHOOK: Output: default@a_n10 +POSTHOOK: query: drop table a_n10 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@a -POSTHOOK: Output: default@a -PREHOOK: query: create table a like srcpart +POSTHOOK: Input: default@a_n10 +POSTHOOK: Output: default@a_n10 +PREHOOK: query: create table a_n10 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@a -POSTHOOK: query: create table a like srcpart +PREHOOK: Output: default@a_n10 +POSTHOOK: query: create table a_n10 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@a -PREHOOK: query: desc formatted a +POSTHOOK: Output: default@a_n10 +PREHOOK: query: desc formatted a_n10 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@a -POSTHOOK: query: desc formatted a +PREHOOK: Input: default@a_n10 +POSTHOOK: query: desc formatted a_n10 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n10 # col_name data_type comment key string default value string default diff --git a/ql/src/test/results/clientpositive/create_udaf.q.out b/ql/src/test/results/clientpositive/create_udaf.q.out index 6959b47faf..7e647a8ccc 100644 --- a/ql/src/test/results/clientpositive/create_udaf.q.out +++ b/ql/src/test/results/clientpositive/create_udaf.q.out @@ -16,30 +16,30 @@ PREHOOK: Output: test_max POSTHOOK: query: CREATE TEMPORARY FUNCTION test_max AS 'org.apache.hadoop.hive.ql.udf.UDAFTestMax' POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: test_max -PREHOOK: query: CREATE TABLE dest1(col INT) +PREHOOK: query: CREATE TABLE dest1_n30(col INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(col INT) +PREHOOK: Output: default@dest1_n30 +POSTHOOK: query: CREATE TABLE dest1_n30(col INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT test_max(length(src.value)) +POSTHOOK: Output: default@dest1_n30 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n30 SELECT test_max(length(src.value)) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT test_max(length(src.value)) +PREHOOK: Output: default@dest1_n30 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n30 SELECT test_max(length(src.value)) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.col EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n30 +POSTHOOK: Lineage: dest1_n30.col EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n30.* FROM dest1_n30 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n30 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n30.* FROM dest1_n30 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n30 #### A masked pattern was here #### 7 PREHOOK: query: SELECT test_max(CAST(length(src.value) AS SMALLINT)) FROM src diff --git a/ql/src/test/results/clientpositive/create_view.q.out b/ql/src/test/results/clientpositive/create_view.q.out index 3165642ba7..70e5980133 100644 --- a/ql/src/test/results/clientpositive/create_view.q.out +++ b/ql/src/test/results/clientpositive/create_view.q.out @@ -418,20 +418,20 @@ Sort Columns: [] View Original Text: SELECT upper(value) FROM src WHERE key=86 View Expanded Text: SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `default`.`src` WHERE `src`.`key`=86) `default.view3` View Rewrite Enabled: No -PREHOOK: query: CREATE TABLE table1 (key int) +PREHOOK: query: CREATE TABLE table1_n3 (key int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: CREATE TABLE table1 (key int) +PREHOOK: Output: default@table1_n3 +POSTHOOK: query: CREATE TABLE table1_n3 (key int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: DESCRIBE EXTENDED table1 +POSTHOOK: Output: default@table1_n3 +PREHOOK: query: DESCRIBE EXTENDED table1_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE EXTENDED table1 +PREHOOK: Input: default@table1_n3 +POSTHOOK: query: DESCRIBE EXTENDED table1_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 key int #### A masked pattern was here #### @@ -445,52 +445,52 @@ key string default value string default #### A masked pattern was here #### -PREHOOK: query: DESCRIBE EXTENDED table1 +PREHOOK: query: DESCRIBE EXTENDED table1_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE EXTENDED table1 +PREHOOK: Input: default@table1_n3 +POSTHOOK: query: DESCRIBE EXTENDED table1_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 key int #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE table1 SELECT key FROM src WHERE key = 86 +PREHOOK: query: INSERT OVERWRITE TABLE table1_n3 SELECT key FROM src WHERE key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@table1 -POSTHOOK: query: INSERT OVERWRITE TABLE table1 SELECT key FROM src WHERE key = 86 +PREHOOK: Output: default@table1_n3 +POSTHOOK: query: INSERT OVERWRITE TABLE table1_n3 SELECT key FROM src WHERE key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@table1 -POSTHOOK: Lineage: table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM table1 +POSTHOOK: Output: default@table1_n3 +POSTHOOK: Lineage: table1_n3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM table1_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n3 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM table1 +POSTHOOK: query: SELECT * FROM table1_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 #### A masked pattern was here #### 86 -PREHOOK: query: CREATE VIEW view4 AS SELECT * FROM table1 +PREHOOK: query: CREATE VIEW view4 AS SELECT * FROM table1_n3 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n3 PREHOOK: Output: database:default PREHOOK: Output: default@view4 -POSTHOOK: query: CREATE VIEW view4 AS SELECT * FROM table1 +POSTHOOK: query: CREATE VIEW view4 AS SELECT * FROM table1_n3 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 POSTHOOK: Output: database:default POSTHOOK: Output: default@view4 -POSTHOOK: Lineage: view4.key SIMPLE [(table1)table1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: view4.key SIMPLE [(table1_n3)table1_n3.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: SELECT * FROM view4 PREHOOK: type: QUERY -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n3 PREHOOK: Input: default@view4 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM view4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 POSTHOOK: Input: default@view4 #### A masked pattern was here #### 86 @@ -501,40 +501,40 @@ POSTHOOK: query: DESCRIBE view4 POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@view4 key int -PREHOOK: query: ALTER TABLE table1 ADD COLUMNS (value STRING) +PREHOOK: query: ALTER TABLE table1_n3 ADD COLUMNS (value STRING) PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@table1 -PREHOOK: Output: default@table1 -POSTHOOK: query: ALTER TABLE table1 ADD COLUMNS (value STRING) +PREHOOK: Input: default@table1_n3 +PREHOOK: Output: default@table1_n3 +POSTHOOK: query: ALTER TABLE table1_n3 ADD COLUMNS (value STRING) POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@table1 -POSTHOOK: Output: default@table1 -PREHOOK: query: SELECT * FROM table1 +POSTHOOK: Input: default@table1_n3 +POSTHOOK: Output: default@table1_n3 +PREHOOK: query: SELECT * FROM table1_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n3 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM table1 +POSTHOOK: query: SELECT * FROM table1_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 #### A masked pattern was here #### 86 NULL PREHOOK: query: SELECT * FROM view4 PREHOOK: type: QUERY -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n3 PREHOOK: Input: default@view4 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM view4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 POSTHOOK: Input: default@view4 #### A masked pattern was here #### 86 -PREHOOK: query: DESCRIBE table1 +PREHOOK: query: DESCRIBE table1_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE table1 +PREHOOK: Input: default@table1_n3 +POSTHOOK: query: DESCRIBE table1_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 key int value string PREHOOK: query: DESCRIBE view4 @@ -547,29 +547,29 @@ key int PREHOOK: query: CREATE VIEW view5 AS SELECT v1.key as key1, v2.key as key2 FROM view4 v1 join view4 v2 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n3 PREHOOK: Input: default@view4 PREHOOK: Output: database:default PREHOOK: Output: default@view5 POSTHOOK: query: CREATE VIEW view5 AS SELECT v1.key as key1, v2.key as key2 FROM view4 v1 join view4 v2 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 POSTHOOK: Input: default@view4 POSTHOOK: Output: database:default POSTHOOK: Output: default@view5 -POSTHOOK: Lineage: view5.key1 SIMPLE [(table1)table1.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: view5.key2 SIMPLE [(table1)table1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: view5.key1 SIMPLE [(table1_n3)table1_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: view5.key2 SIMPLE [(table1_n3)table1_n3.FieldSchema(name:key, type:int, comment:null), ] Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * FROM view5 PREHOOK: type: QUERY -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n3 PREHOOK: Input: default@view4 PREHOOK: Input: default@view5 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM view5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 POSTHOOK: Input: default@view4 POSTHOOK: Input: default@view5 #### A masked pattern was here #### @@ -707,16 +707,16 @@ POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: test_translate PREHOOK: query: CREATE VIEW view8(c) AS SELECT test_translate('abc', 'a', 'b') -FROM table1 +FROM table1_n3 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n3 PREHOOK: Output: database:default PREHOOK: Output: default@view8 POSTHOOK: query: CREATE VIEW view8(c) AS SELECT test_translate('abc', 'a', 'b') -FROM table1 +FROM table1_n3 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 POSTHOOK: Output: database:default POSTHOOK: Output: default@view8 POSTHOOK: Lineage: view8.c SIMPLE [] @@ -758,18 +758,18 @@ Sort Columns: [] # View Information View Original Text: SELECT test_translate('abc', 'a', 'b') - FROM table1 + FROM table1_n3 View Expanded Text: SELECT `_c0` AS `c` FROM (SELECT `test_translate`('abc', 'a', 'b') - FROM `default`.`table1`) `default.view8` + FROM `default`.`table1_n3`) `default.view8` View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view8 PREHOOK: type: QUERY -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n3 PREHOOK: Input: default@view8 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM view8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 POSTHOOK: Input: default@view8 #### A masked pattern was here #### bbc @@ -1002,16 +1002,16 @@ POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: test_explode PREHOOK: query: CREATE VIEW view11 AS SELECT test_explode(array(1,2,3)) AS (boom) -FROM table1 +FROM table1_n3 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n3 PREHOOK: Output: database:default PREHOOK: Output: default@view11 POSTHOOK: query: CREATE VIEW view11 AS SELECT test_explode(array(1,2,3)) AS (boom) -FROM table1 +FROM table1_n3 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 POSTHOOK: Output: database:default POSTHOOK: Output: default@view11 POSTHOOK: Lineage: view11.boom SCRIPT [] @@ -1053,18 +1053,18 @@ Sort Columns: [] # View Information View Original Text: SELECT test_explode(array(1,2,3)) AS (boom) - FROM table1 + FROM table1_n3 View Expanded Text: SELECT `test_explode`(array(1,2,3)) AS (`boom`) - FROM `default`.`table1` + FROM `default`.`table1_n3` View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view11 PREHOOK: type: QUERY -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n3 PREHOOK: Input: default@view11 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM view11 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 POSTHOOK: Input: default@view11 #### A masked pattern was here #### 1 @@ -1553,20 +1553,20 @@ POSTHOOK: query: DESCRIBE view16 POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@view16 value string -PREHOOK: query: DROP VIEW IF EXISTS table1 +PREHOOK: query: DROP VIEW IF EXISTS table1_n3 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@table1 -PREHOOK: Output: default@table1 -POSTHOOK: query: DROP VIEW IF EXISTS table1 +PREHOOK: Input: default@table1_n3 +PREHOOK: Output: default@table1_n3 +POSTHOOK: query: DROP VIEW IF EXISTS table1_n3 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@table1 -POSTHOOK: Output: default@table1 -PREHOOK: query: DESCRIBE table1 +POSTHOOK: Input: default@table1_n3 +POSTHOOK: Output: default@table1_n3 +PREHOOK: query: DESCRIBE table1_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE table1 +PREHOOK: Input: default@table1_n3 +POSTHOOK: query: DESCRIBE table1_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n3 key int value string PREHOOK: query: DROP VIEW view1 diff --git a/ql/src/test/results/clientpositive/create_view_partitioned.q.out b/ql/src/test/results/clientpositive/create_view_partitioned.q.out index a337e86211..2173484abb 100644 --- a/ql/src/test/results/clientpositive/create_view_partitioned.q.out +++ b/ql/src/test/results/clientpositive/create_view_partitioned.q.out @@ -443,48 +443,48 @@ POSTHOOK: query: DROP VIEW vp3 POSTHOOK: type: DROPVIEW POSTHOOK: Input: default@vp3 POSTHOOK: Output: default@vp3 -PREHOOK: query: CREATE TABLE table1 (id int) PARTITIONED BY (year int) +PREHOOK: query: CREATE TABLE table1_n11 (id int) PARTITIONED BY (year int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: CREATE TABLE table1 (id int) PARTITIONED BY (year int) +PREHOOK: Output: default@table1_n11 +POSTHOOK: query: CREATE TABLE table1_n11 (id int) PARTITIONED BY (year int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: CREATE VIEW view1 partitioned on (year) as select id, year from table1 +POSTHOOK: Output: default@table1_n11 +PREHOOK: query: CREATE VIEW view1_n0 partitioned on (year) as select id, year from table1_n11 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1_n11 PREHOOK: Output: database:default -PREHOOK: Output: default@view1 -POSTHOOK: query: CREATE VIEW view1 partitioned on (year) as select id, year from table1 +PREHOOK: Output: default@view1_n0 +POSTHOOK: query: CREATE VIEW view1_n0 partitioned on (year) as select id, year from table1_n11 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n11 POSTHOOK: Output: database:default -POSTHOOK: Output: default@view1 -POSTHOOK: Lineage: view1.id SIMPLE [(table1)table1.FieldSchema(name:id, type:int, comment:null), ] -PREHOOK: query: select year from view1 +POSTHOOK: Output: default@view1_n0 +POSTHOOK: Lineage: view1_n0.id SIMPLE [(table1_n11)table1_n11.FieldSchema(name:id, type:int, comment:null), ] +PREHOOK: query: select year from view1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@table1 -PREHOOK: Input: default@view1 +PREHOOK: Input: default@table1_n11 +PREHOOK: Input: default@view1_n0 #### A masked pattern was here #### -POSTHOOK: query: select year from view1 +POSTHOOK: query: select year from view1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 -POSTHOOK: Input: default@view1 +POSTHOOK: Input: default@table1_n11 +POSTHOOK: Input: default@view1_n0 #### A masked pattern was here #### -PREHOOK: query: Drop view view1 +PREHOOK: query: Drop view view1_n0 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@view1 -PREHOOK: Output: default@view1 -POSTHOOK: query: Drop view view1 +PREHOOK: Input: default@view1_n0 +PREHOOK: Output: default@view1_n0 +POSTHOOK: query: Drop view view1_n0 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@view1 -POSTHOOK: Output: default@view1 -PREHOOK: query: drop table table1 +POSTHOOK: Input: default@view1_n0 +POSTHOOK: Output: default@view1_n0 +PREHOOK: query: drop table table1_n11 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@table1 -PREHOOK: Output: default@table1 -POSTHOOK: query: drop table table1 +PREHOOK: Input: default@table1_n11 +PREHOOK: Output: default@table1_n11 +POSTHOOK: query: drop table table1_n11 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@table1 -POSTHOOK: Output: default@table1 +POSTHOOK: Input: default@table1_n11 +POSTHOOK: Output: default@table1_n11 diff --git a/ql/src/test/results/clientpositive/create_view_translate.q.out b/ql/src/test/results/clientpositive/create_view_translate.q.out index 7c746ea109..cb9e536deb 100644 --- a/ql/src/test/results/clientpositive/create_view_translate.q.out +++ b/ql/src/test/results/clientpositive/create_view_translate.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: drop view if exists v +PREHOOK: query: drop view if exists v_n7 PREHOOK: type: DROPVIEW -POSTHOOK: query: drop view if exists v +POSTHOOK: query: drop view if exists v_n7 POSTHOOK: type: DROPVIEW PREHOOK: query: drop view if exists w PREHOOK: type: DROPVIEW POSTHOOK: query: drop view if exists w POSTHOOK: type: DROPVIEW -PREHOOK: query: create view v as select cast(key as string) from src +PREHOOK: query: create view v_n7 as select cast(key as string) from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select cast(key as string) from src +PREHOOK: Output: default@v_n7 +POSTHOOK: query: create view v_n7 as select cast(key as string) from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: describe formatted v +POSTHOOK: Output: default@v_n7 +POSTHOOK: Lineage: v_n7.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: describe formatted v_n7 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: describe formatted v +PREHOOK: Input: default@v_n7 +POSTHOOK: query: describe formatted v_n7 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n7 # col_name data_type comment key string @@ -100,14 +100,14 @@ View Expanded Text: select `a`.`key`, `a`.`value` from ( select `src`.`key`, `src`.`value` from `default`.`src` ) `a` View Rewrite Enabled: No -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n7 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n7 +PREHOOK: Output: default@v_n7 +POSTHOOK: query: drop view v_n7 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v +POSTHOOK: Input: default@v_n7 +POSTHOOK: Output: default@v_n7 PREHOOK: query: drop view w PREHOOK: type: DROPVIEW PREHOOK: Input: default@w diff --git a/ql/src/test/results/clientpositive/create_with_constraints.q.out b/ql/src/test/results/clientpositive/create_with_constraints.q.out index 88976631d7..6aebda3ac5 100644 --- a/ql/src/test/results/clientpositive/create_with_constraints.q.out +++ b/ql/src/test/results/clientpositive/create_with_constraints.q.out @@ -1,63 +1,63 @@ -PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, PRIMARY KEY (a) DISABLE) +PREHOOK: query: CREATE TABLE table1_n12 (a STRING, b STRING, PRIMARY KEY (a) DISABLE) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: CREATE TABLE table1 (a STRING, b STRING, PRIMARY KEY (a) DISABLE) +PREHOOK: Output: default@table1_n12 +POSTHOOK: query: CREATE TABLE table1_n12 (a STRING, b STRING, PRIMARY KEY (a) DISABLE) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: CREATE TABLE table2 (a STRING, b STRING, CONSTRAINT pk1 PRIMARY KEY (a) DISABLE) +POSTHOOK: Output: default@table1_n12 +PREHOOK: query: CREATE TABLE table2_n8 (a STRING, b STRING, CONSTRAINT pk1 PRIMARY KEY (a) DISABLE) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table2 -POSTHOOK: query: CREATE TABLE table2 (a STRING, b STRING, CONSTRAINT pk1 PRIMARY KEY (a) DISABLE) +PREHOOK: Output: default@table2_n8 +POSTHOOK: query: CREATE TABLE table2_n8 (a STRING, b STRING, CONSTRAINT pk1 PRIMARY KEY (a) DISABLE) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table2 -PREHOOK: query: CREATE TABLE table3 (x string NOT NULL DISABLE, PRIMARY KEY (x) DISABLE, CONSTRAINT fk1 FOREIGN KEY (x) REFERENCES table2(a) DISABLE) +POSTHOOK: Output: default@table2_n8 +PREHOOK: query: CREATE TABLE table3_n1 (x string NOT NULL DISABLE, PRIMARY KEY (x) DISABLE, CONSTRAINT fk1 FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table3 -POSTHOOK: query: CREATE TABLE table3 (x string NOT NULL DISABLE, PRIMARY KEY (x) DISABLE, CONSTRAINT fk1 FOREIGN KEY (x) REFERENCES table2(a) DISABLE) +PREHOOK: Output: default@table3_n1 +POSTHOOK: query: CREATE TABLE table3_n1 (x string NOT NULL DISABLE, PRIMARY KEY (x) DISABLE, CONSTRAINT fk1 FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table3 -PREHOOK: query: CREATE TABLE table4 (x string CONSTRAINT nn4_1 NOT NULL DISABLE, y string CONSTRAINT nn4_2 NOT NULL DISABLE, UNIQUE (x) DISABLE, CONSTRAINT fk2 FOREIGN KEY (x) REFERENCES table2(a) DISABLE, -CONSTRAINT fk3 FOREIGN KEY (y) REFERENCES table2(a) DISABLE) +POSTHOOK: Output: default@table3_n1 +PREHOOK: query: CREATE TABLE table4_n0 (x string CONSTRAINT nn4_1 NOT NULL DISABLE, y string CONSTRAINT nn4_2 NOT NULL DISABLE, UNIQUE (x) DISABLE, CONSTRAINT fk2 FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE, +CONSTRAINT fk3 FOREIGN KEY (y) REFERENCES table2_n8(a) DISABLE) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table4 -POSTHOOK: query: CREATE TABLE table4 (x string CONSTRAINT nn4_1 NOT NULL DISABLE, y string CONSTRAINT nn4_2 NOT NULL DISABLE, UNIQUE (x) DISABLE, CONSTRAINT fk2 FOREIGN KEY (x) REFERENCES table2(a) DISABLE, -CONSTRAINT fk3 FOREIGN KEY (y) REFERENCES table2(a) DISABLE) +PREHOOK: Output: default@table4_n0 +POSTHOOK: query: CREATE TABLE table4_n0 (x string CONSTRAINT nn4_1 NOT NULL DISABLE, y string CONSTRAINT nn4_2 NOT NULL DISABLE, UNIQUE (x) DISABLE, CONSTRAINT fk2 FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE, +CONSTRAINT fk3 FOREIGN KEY (y) REFERENCES table2_n8(a) DISABLE) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table4 -PREHOOK: query: CREATE TABLE table5 (x string, PRIMARY KEY (x) DISABLE, FOREIGN KEY (x) REFERENCES table2(a) DISABLE) +POSTHOOK: Output: default@table4_n0 +PREHOOK: query: CREATE TABLE table5_n4 (x string, PRIMARY KEY (x) DISABLE, FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table5 -POSTHOOK: query: CREATE TABLE table5 (x string, PRIMARY KEY (x) DISABLE, FOREIGN KEY (x) REFERENCES table2(a) DISABLE) +PREHOOK: Output: default@table5_n4 +POSTHOOK: query: CREATE TABLE table5_n4 (x string, PRIMARY KEY (x) DISABLE, FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table5 -PREHOOK: query: CREATE TABLE table6 (x string, y string, PRIMARY KEY (x) DISABLE, FOREIGN KEY (x) REFERENCES table2(a) DISABLE, -CONSTRAINT fk4 FOREIGN KEY (y) REFERENCES table1(a) DISABLE) +POSTHOOK: Output: default@table5_n4 +PREHOOK: query: CREATE TABLE table6_n3 (x string, y string, PRIMARY KEY (x) DISABLE, FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE, +CONSTRAINT fk4 FOREIGN KEY (y) REFERENCES table1_n12(a) DISABLE) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table6 -POSTHOOK: query: CREATE TABLE table6 (x string, y string, PRIMARY KEY (x) DISABLE, FOREIGN KEY (x) REFERENCES table2(a) DISABLE, -CONSTRAINT fk4 FOREIGN KEY (y) REFERENCES table1(a) DISABLE) +PREHOOK: Output: default@table6_n3 +POSTHOOK: query: CREATE TABLE table6_n3 (x string, y string, PRIMARY KEY (x) DISABLE, FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE, +CONSTRAINT fk4 FOREIGN KEY (y) REFERENCES table1_n12(a) DISABLE) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table6 -PREHOOK: query: CREATE TABLE table7 (a STRING, b STRING, PRIMARY KEY (a) DISABLE RELY) +POSTHOOK: Output: default@table6_n3 +PREHOOK: query: CREATE TABLE table7_n3 (a STRING, b STRING, PRIMARY KEY (a) DISABLE RELY) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table7 -POSTHOOK: query: CREATE TABLE table7 (a STRING, b STRING, PRIMARY KEY (a) DISABLE RELY) +PREHOOK: Output: default@table7_n3 +POSTHOOK: query: CREATE TABLE table7_n3 (a STRING, b STRING, PRIMARY KEY (a) DISABLE RELY) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table7 +POSTHOOK: Output: default@table7_n3 PREHOOK: query: CREATE TABLE table8 (a STRING, b STRING, CONSTRAINT pk8 PRIMARY KEY (a) DISABLE NORELY) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -83,12 +83,12 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@table10 PREHOOK: query: CREATE TABLE table11 (a STRING, b STRING, c STRING, CONSTRAINT pk11 PRIMARY KEY (a) DISABLE RELY, CONSTRAINT fk11_1 FOREIGN KEY (a, b) REFERENCES table9(a, b) DISABLE, -CONSTRAINT fk11_2 FOREIGN KEY (c) REFERENCES table4(x) DISABLE) +CONSTRAINT fk11_2 FOREIGN KEY (c) REFERENCES table4_n0(x) DISABLE) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table11 POSTHOOK: query: CREATE TABLE table11 (a STRING, b STRING, c STRING, CONSTRAINT pk11 PRIMARY KEY (a) DISABLE RELY, CONSTRAINT fk11_1 FOREIGN KEY (a, b) REFERENCES table9(a, b) DISABLE, -CONSTRAINT fk11_2 FOREIGN KEY (c) REFERENCES table4(x) DISABLE) +CONSTRAINT fk11_2 FOREIGN KEY (c) REFERENCES table4_n0(x) DISABLE) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@table11 @@ -116,19 +116,19 @@ POSTHOOK: query: CREATE TABLE table14 (a STRING CONSTRAINT nn14_1 NOT NULL DISAB POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@table14 -PREHOOK: query: CREATE TABLE table15 (a STRING REFERENCES table4(x) DISABLE, b STRING) +PREHOOK: query: CREATE TABLE table15 (a STRING REFERENCES table4_n0(x) DISABLE, b STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table15 -POSTHOOK: query: CREATE TABLE table15 (a STRING REFERENCES table4(x) DISABLE, b STRING) +POSTHOOK: query: CREATE TABLE table15 (a STRING REFERENCES table4_n0(x) DISABLE, b STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@table15 -PREHOOK: query: CREATE TABLE table16 (a STRING CONSTRAINT nn16_1 REFERENCES table4(x) DISABLE RELY, b STRING) +PREHOOK: query: CREATE TABLE table16 (a STRING CONSTRAINT nn16_1 REFERENCES table4_n0(x) DISABLE RELY, b STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table16 -POSTHOOK: query: CREATE TABLE table16 (a STRING CONSTRAINT nn16_1 REFERENCES table4(x) DISABLE RELY, b STRING) +POSTHOOK: query: CREATE TABLE table16 (a STRING CONSTRAINT nn16_1 REFERENCES table4_n0(x) DISABLE RELY, b STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@table16 @@ -180,87 +180,87 @@ POSTHOOK: query: CREATE TABLE table22 (a STRING, b STRING, CONSTRAINT fk22_1 FOR POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@table22 -PREHOOK: query: DESCRIBE EXTENDED table1 +PREHOOK: query: DESCRIBE EXTENDED table1_n12 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE EXTENDED table1 +PREHOOK: Input: default@table1_n12 +POSTHOOK: query: DESCRIBE EXTENDED table1_n12 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n12 a string b string #### A masked pattern was here #### -Constraints Primary Key for default.table1:[a], Constraint Name: #### A masked pattern was here #### -PREHOOK: query: DESCRIBE EXTENDED table2 +Constraints Primary Key for default.table1_n12:[a], Constraint Name: #### A masked pattern was here #### +PREHOOK: query: DESCRIBE EXTENDED table2_n8 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table2 -POSTHOOK: query: DESCRIBE EXTENDED table2 +PREHOOK: Input: default@table2_n8 +POSTHOOK: query: DESCRIBE EXTENDED table2_n8 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n8 a string b string #### A masked pattern was here #### -Constraints Primary Key for default.table2:[a], Constraint Name: pk1 -PREHOOK: query: DESCRIBE EXTENDED table3 +Constraints Primary Key for default.table2_n8:[a], Constraint Name: pk1 +PREHOOK: query: DESCRIBE EXTENDED table3_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table3 -POSTHOOK: query: DESCRIBE EXTENDED table3 +PREHOOK: Input: default@table3_n1 +POSTHOOK: query: DESCRIBE EXTENDED table3_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table3 +POSTHOOK: Input: default@table3_n1 x string #### A masked pattern was here #### -Constraints Primary Key for default.table3:[x], Constraint Name: #### A masked pattern was here #### -Foreign Keys for default.table3:[ {Constraint Name: fk1, (Parent Column Name: default.table2.a, Column Name: x, Key Sequence: 1)}] -Not Null Constraints for default.table3:[ {Constraint Name: #### A masked pattern was here ####, Column Name: x}] -PREHOOK: query: DESCRIBE EXTENDED table4 +Constraints Primary Key for default.table3_n1:[x], Constraint Name: #### A masked pattern was here #### +Foreign Keys for default.table3_n1:[ {Constraint Name: fk1, (Parent Column Name: default.table2_n8.a, Column Name: x, Key Sequence: 1)}] +Not Null Constraints for default.table3_n1:[ {Constraint Name: #### A masked pattern was here ####, Column Name: x}] +PREHOOK: query: DESCRIBE EXTENDED table4_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table4 -POSTHOOK: query: DESCRIBE EXTENDED table4 +PREHOOK: Input: default@table4_n0 +POSTHOOK: query: DESCRIBE EXTENDED table4_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table4 +POSTHOOK: Input: default@table4_n0 x string y string #### A masked pattern was here #### -Constraints Foreign Keys for default.table4:[ {Constraint Name: fk2, (Parent Column Name: default.table2.a, Column Name: x, Key Sequence: 1)}, {Constraint Name: fk3, (Parent Column Name: default.table2.a, Column Name: y, Key Sequence: 1)}] -Unique Constraints for default.table4:[ {Constraint Name: #### A masked pattern was here ####, (Column Name: x, Key Sequence: 1)}] -Not Null Constraints for default.table4:[ {Constraint Name: nn4_1, Column Name: x}, {Constraint Name: nn4_2, Column Name: y}] -PREHOOK: query: DESCRIBE EXTENDED table5 +Constraints Foreign Keys for default.table4_n0:[ {Constraint Name: fk2, (Parent Column Name: default.table2_n8.a, Column Name: x, Key Sequence: 1)}, {Constraint Name: fk3, (Parent Column Name: default.table2_n8.a, Column Name: y, Key Sequence: 1)}] +Unique Constraints for default.table4_n0:[ {Constraint Name: #### A masked pattern was here ####, (Column Name: x, Key Sequence: 1)}] +Not Null Constraints for default.table4_n0:[ {Constraint Name: nn4_1, Column Name: x}, {Constraint Name: nn4_2, Column Name: y}] +PREHOOK: query: DESCRIBE EXTENDED table5_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table5 -POSTHOOK: query: DESCRIBE EXTENDED table5 +PREHOOK: Input: default@table5_n4 +POSTHOOK: query: DESCRIBE EXTENDED table5_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table5 +POSTHOOK: Input: default@table5_n4 x string #### A masked pattern was here #### -Constraints Primary Key for default.table5:[x], Constraint Name: #### A masked pattern was here #### -Foreign Keys for default.table5:[ {Constraint Name: #### A masked pattern was here ####, (Parent Column Name: default.table2.a, Column Name: x, Key Sequence: 1)}] -PREHOOK: query: DESCRIBE EXTENDED table6 +Constraints Primary Key for default.table5_n4:[x], Constraint Name: #### A masked pattern was here #### +Foreign Keys for default.table5_n4:[ {Constraint Name: #### A masked pattern was here ####, (Parent Column Name: default.table2_n8.a, Column Name: x, Key Sequence: 1)}] +PREHOOK: query: DESCRIBE EXTENDED table6_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table6 -POSTHOOK: query: DESCRIBE EXTENDED table6 +PREHOOK: Input: default@table6_n3 +POSTHOOK: query: DESCRIBE EXTENDED table6_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table6 +POSTHOOK: Input: default@table6_n3 x string y string #### A masked pattern was here #### -Constraints Primary Key for default.table6:[x], Constraint Name: #### A masked pattern was here #### -Foreign Keys for default.table6:[ {Constraint Name: fk4, (Parent Column Name: default.table1.a, Column Name: y, Key Sequence: 1)}, {Constraint Name: #### A masked pattern was here ####, (Parent Column Name: default.table2.a, Column Name: x, Key Sequence: 1)}] -PREHOOK: query: DESCRIBE EXTENDED table7 +Constraints Primary Key for default.table6_n3:[x], Constraint Name: #### A masked pattern was here #### +Foreign Keys for default.table6_n3:[ {Constraint Name: fk4, (Parent Column Name: default.table1_n12.a, Column Name: y, Key Sequence: 1)}, {Constraint Name: #### A masked pattern was here ####, (Parent Column Name: default.table2_n8.a, Column Name: x, Key Sequence: 1)}] +PREHOOK: query: DESCRIBE EXTENDED table7_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table7 -POSTHOOK: query: DESCRIBE EXTENDED table7 +PREHOOK: Input: default@table7_n3 +POSTHOOK: query: DESCRIBE EXTENDED table7_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table7 +POSTHOOK: Input: default@table7_n3 a string b string #### A masked pattern was here #### -Constraints Primary Key for default.table7:[a], Constraint Name: #### A masked pattern was here #### +Constraints Primary Key for default.table7_n3:[a], Constraint Name: #### A masked pattern was here #### PREHOOK: query: DESCRIBE EXTENDED table8 PREHOOK: type: DESCTABLE PREHOOK: Input: default@table8 @@ -439,12 +439,12 @@ a string b string #### A masked pattern was here #### -PREHOOK: query: DESCRIBE FORMATTED table1 +PREHOOK: query: DESCRIBE FORMATTED table1_n12 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE FORMATTED table1 +PREHOOK: Input: default@table1_n12 +POSTHOOK: query: DESCRIBE FORMATTED table1_n12 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n12 # col_name data_type comment a string b string @@ -478,15 +478,15 @@ Storage Desc Params: # Constraints # Primary Key -Table: default.table1 +Table: default.table1_n12 Constraint Name: #### A masked pattern was here #### Column Names: a -PREHOOK: query: DESCRIBE FORMATTED table2 +PREHOOK: query: DESCRIBE FORMATTED table2_n8 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table2 -POSTHOOK: query: DESCRIBE FORMATTED table2 +PREHOOK: Input: default@table2_n8 +POSTHOOK: query: DESCRIBE FORMATTED table2_n8 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n8 # col_name data_type comment a string b string @@ -520,15 +520,15 @@ Storage Desc Params: # Constraints # Primary Key -Table: default.table2 +Table: default.table2_n8 Constraint Name: pk1 Column Names: a -PREHOOK: query: DESCRIBE FORMATTED table3 +PREHOOK: query: DESCRIBE FORMATTED table3_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table3 -POSTHOOK: query: DESCRIBE FORMATTED table3 +PREHOOK: Input: default@table3_n1 +POSTHOOK: query: DESCRIBE FORMATTED table3_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table3 +POSTHOOK: Input: default@table3_n1 # col_name data_type comment x string @@ -561,27 +561,27 @@ Storage Desc Params: # Constraints # Primary Key -Table: default.table3 +Table: default.table3_n1 Constraint Name: #### A masked pattern was here #### Column Names: x # Foreign Keys -Table: default.table3 +Table: default.table3_n1 Constraint Name: fk1 -Parent Column Name:default.table2.a Column Name:x Key Sequence:1 +Parent Column Name:default.table2_n8.a Column Name:x Key Sequence:1 # Not Null Constraints -Table: default.table3 +Table: default.table3_n1 Constraint Name: #### A masked pattern was here #### Column Name: x -PREHOOK: query: DESCRIBE FORMATTED table4 +PREHOOK: query: DESCRIBE FORMATTED table4_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table4 -POSTHOOK: query: DESCRIBE FORMATTED table4 +PREHOOK: Input: default@table4_n0 +POSTHOOK: query: DESCRIBE FORMATTED table4_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table4 +POSTHOOK: Input: default@table4_n0 # col_name data_type comment x string y string @@ -615,34 +615,34 @@ Storage Desc Params: # Constraints # Foreign Keys -Table: default.table4 +Table: default.table4_n0 Constraint Name: fk2 -Parent Column Name:default.table2.a Column Name:x Key Sequence:1 +Parent Column Name:default.table2_n8.a Column Name:x Key Sequence:1 Constraint Name: fk3 -Parent Column Name:default.table2.a Column Name:y Key Sequence:1 +Parent Column Name:default.table2_n8.a Column Name:y Key Sequence:1 # Unique Constraints -Table: default.table4 +Table: default.table4_n0 Constraint Name: #### A masked pattern was here #### Column Name:x Key Sequence:1 # Not Null Constraints -Table: default.table4 +Table: default.table4_n0 Constraint Name: nn4_1 Column Name: x Constraint Name: nn4_2 Column Name: y -PREHOOK: query: DESCRIBE FORMATTED table5 +PREHOOK: query: DESCRIBE FORMATTED table5_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table5 -POSTHOOK: query: DESCRIBE FORMATTED table5 +PREHOOK: Input: default@table5_n4 +POSTHOOK: query: DESCRIBE FORMATTED table5_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table5 +POSTHOOK: Input: default@table5_n4 # col_name data_type comment x string @@ -675,21 +675,21 @@ Storage Desc Params: # Constraints # Primary Key -Table: default.table5 +Table: default.table5_n4 Constraint Name: #### A masked pattern was here #### Column Names: x # Foreign Keys -Table: default.table5 +Table: default.table5_n4 Constraint Name: #### A masked pattern was here #### -Parent Column Name:default.table2.a Column Name:x Key Sequence:1 +Parent Column Name:default.table2_n8.a Column Name:x Key Sequence:1 -PREHOOK: query: DESCRIBE FORMATTED table6 +PREHOOK: query: DESCRIBE FORMATTED table6_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table6 -POSTHOOK: query: DESCRIBE FORMATTED table6 +PREHOOK: Input: default@table6_n3 +POSTHOOK: query: DESCRIBE FORMATTED table6_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table6 +POSTHOOK: Input: default@table6_n3 # col_name data_type comment x string y string @@ -723,24 +723,24 @@ Storage Desc Params: # Constraints # Primary Key -Table: default.table6 +Table: default.table6_n3 Constraint Name: #### A masked pattern was here #### Column Names: x # Foreign Keys -Table: default.table6 +Table: default.table6_n3 Constraint Name: fk4 -Parent Column Name:default.table1.a Column Name:y Key Sequence:1 +Parent Column Name:default.table1_n12.a Column Name:y Key Sequence:1 Constraint Name: #### A masked pattern was here #### -Parent Column Name:default.table2.a Column Name:x Key Sequence:1 +Parent Column Name:default.table2_n8.a Column Name:x Key Sequence:1 -PREHOOK: query: DESCRIBE FORMATTED table7 +PREHOOK: query: DESCRIBE FORMATTED table7_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table7 -POSTHOOK: query: DESCRIBE FORMATTED table7 +PREHOOK: Input: default@table7_n3 +POSTHOOK: query: DESCRIBE FORMATTED table7_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table7 +POSTHOOK: Input: default@table7_n3 # col_name data_type comment a string b string @@ -774,7 +774,7 @@ Storage Desc Params: # Constraints # Primary Key -Table: default.table7 +Table: default.table7_n3 Constraint Name: #### A masked pattern was here #### Column Names: a PREHOOK: query: DESCRIBE FORMATTED table8 @@ -1427,21 +1427,21 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: ALTER TABLE table2 DROP CONSTRAINT pk1 +PREHOOK: query: ALTER TABLE table2_n8 DROP CONSTRAINT pk1 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT -POSTHOOK: query: ALTER TABLE table2 DROP CONSTRAINT pk1 +POSTHOOK: query: ALTER TABLE table2_n8 DROP CONSTRAINT pk1 POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT -PREHOOK: query: ALTER TABLE table3 DROP CONSTRAINT fk1 +PREHOOK: query: ALTER TABLE table3_n1 DROP CONSTRAINT fk1 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT -POSTHOOK: query: ALTER TABLE table3 DROP CONSTRAINT fk1 +POSTHOOK: query: ALTER TABLE table3_n1 DROP CONSTRAINT fk1 POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT -PREHOOK: query: ALTER TABLE table4 DROP CONSTRAINT nn4_1 +PREHOOK: query: ALTER TABLE table4_n0 DROP CONSTRAINT nn4_1 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT -POSTHOOK: query: ALTER TABLE table4 DROP CONSTRAINT nn4_1 +POSTHOOK: query: ALTER TABLE table4_n0 DROP CONSTRAINT nn4_1 POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT -PREHOOK: query: ALTER TABLE table6 DROP CONSTRAINT fk4 +PREHOOK: query: ALTER TABLE table6_n3 DROP CONSTRAINT fk4 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT -POSTHOOK: query: ALTER TABLE table6 DROP CONSTRAINT fk4 +POSTHOOK: query: ALTER TABLE table6_n3 DROP CONSTRAINT fk4 POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT PREHOOK: query: ALTER TABLE table8 DROP CONSTRAINT pk8 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT @@ -1455,50 +1455,50 @@ PREHOOK: query: ALTER TABLE table18 DROP CONSTRAINT uk18_1 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT POSTHOOK: query: ALTER TABLE table18 DROP CONSTRAINT uk18_1 POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT -PREHOOK: query: DESCRIBE EXTENDED table2 +PREHOOK: query: DESCRIBE EXTENDED table2_n8 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table2 -POSTHOOK: query: DESCRIBE EXTENDED table2 +PREHOOK: Input: default@table2_n8 +POSTHOOK: query: DESCRIBE EXTENDED table2_n8 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n8 a string b string #### A masked pattern was here #### -PREHOOK: query: DESCRIBE EXTENDED table3 +PREHOOK: query: DESCRIBE EXTENDED table3_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table3 -POSTHOOK: query: DESCRIBE EXTENDED table3 +PREHOOK: Input: default@table3_n1 +POSTHOOK: query: DESCRIBE EXTENDED table3_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table3 +POSTHOOK: Input: default@table3_n1 x string #### A masked pattern was here #### -Constraints Primary Key for default.table3:[x], Constraint Name: #### A masked pattern was here #### -Not Null Constraints for default.table3:[ {Constraint Name: #### A masked pattern was here ####, Column Name: x}] -PREHOOK: query: DESCRIBE EXTENDED table4 +Constraints Primary Key for default.table3_n1:[x], Constraint Name: #### A masked pattern was here #### +Not Null Constraints for default.table3_n1:[ {Constraint Name: #### A masked pattern was here ####, Column Name: x}] +PREHOOK: query: DESCRIBE EXTENDED table4_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table4 -POSTHOOK: query: DESCRIBE EXTENDED table4 +PREHOOK: Input: default@table4_n0 +POSTHOOK: query: DESCRIBE EXTENDED table4_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table4 +POSTHOOK: Input: default@table4_n0 x string y string #### A masked pattern was here #### -Constraints Unique Constraints for default.table4:[ {Constraint Name: #### A masked pattern was here ####, (Column Name: x, Key Sequence: 1)}] -Not Null Constraints for default.table4:[ {Constraint Name: nn4_2, Column Name: y}] -PREHOOK: query: DESCRIBE EXTENDED table6 +Constraints Unique Constraints for default.table4_n0:[ {Constraint Name: #### A masked pattern was here ####, (Column Name: x, Key Sequence: 1)}] +Not Null Constraints for default.table4_n0:[ {Constraint Name: nn4_2, Column Name: y}] +PREHOOK: query: DESCRIBE EXTENDED table6_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table6 -POSTHOOK: query: DESCRIBE EXTENDED table6 +PREHOOK: Input: default@table6_n3 +POSTHOOK: query: DESCRIBE EXTENDED table6_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table6 +POSTHOOK: Input: default@table6_n3 x string y string #### A masked pattern was here #### -Constraints Primary Key for default.table6:[x], Constraint Name: #### A masked pattern was here #### +Constraints Primary Key for default.table6_n3:[x], Constraint Name: #### A masked pattern was here #### PREHOOK: query: DESCRIBE EXTENDED table8 PREHOOK: type: DESCTABLE PREHOOK: Input: default@table8 @@ -1533,12 +1533,12 @@ b string b string #### A masked pattern was here #### -PREHOOK: query: DESCRIBE FORMATTED table2 +PREHOOK: query: DESCRIBE FORMATTED table2_n8 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table2 -POSTHOOK: query: DESCRIBE FORMATTED table2 +PREHOOK: Input: default@table2_n8 +POSTHOOK: query: DESCRIBE FORMATTED table2_n8 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n8 # col_name data_type comment a string b string @@ -1568,12 +1568,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: DESCRIBE FORMATTED table3 +PREHOOK: query: DESCRIBE FORMATTED table3_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table3 -POSTHOOK: query: DESCRIBE FORMATTED table3 +PREHOOK: Input: default@table3_n1 +POSTHOOK: query: DESCRIBE FORMATTED table3_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table3 +POSTHOOK: Input: default@table3_n1 # col_name data_type comment x string @@ -1606,21 +1606,21 @@ Storage Desc Params: # Constraints # Primary Key -Table: default.table3 +Table: default.table3_n1 Constraint Name: #### A masked pattern was here #### Column Names: x # Not Null Constraints -Table: default.table3 +Table: default.table3_n1 Constraint Name: #### A masked pattern was here #### Column Name: x -PREHOOK: query: DESCRIBE FORMATTED table4 +PREHOOK: query: DESCRIBE FORMATTED table4_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table4 -POSTHOOK: query: DESCRIBE FORMATTED table4 +PREHOOK: Input: default@table4_n0 +POSTHOOK: query: DESCRIBE FORMATTED table4_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table4 +POSTHOOK: Input: default@table4_n0 # col_name data_type comment x string y string @@ -1654,22 +1654,22 @@ Storage Desc Params: # Constraints # Unique Constraints -Table: default.table4 +Table: default.table4_n0 Constraint Name: #### A masked pattern was here #### Column Name:x Key Sequence:1 # Not Null Constraints -Table: default.table4 +Table: default.table4_n0 Constraint Name: nn4_2 Column Name: y -PREHOOK: query: DESCRIBE FORMATTED table6 +PREHOOK: query: DESCRIBE FORMATTED table6_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table6 -POSTHOOK: query: DESCRIBE FORMATTED table6 +PREHOOK: Input: default@table6_n3 +POSTHOOK: query: DESCRIBE FORMATTED table6_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table6 +POSTHOOK: Input: default@table6_n3 # col_name data_type comment x string y string @@ -1703,7 +1703,7 @@ Storage Desc Params: # Constraints # Primary Key -Table: default.table6 +Table: default.table6_n3 Constraint Name: #### A masked pattern was here #### Column Names: x PREHOOK: query: DESCRIBE FORMATTED table8 @@ -1815,27 +1815,27 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: ALTER TABLE table2 ADD CONSTRAINT pkt2 PRIMARY KEY (a) DISABLE NOVALIDATE +PREHOOK: query: ALTER TABLE table2_n8 ADD CONSTRAINT pkt2 PRIMARY KEY (a) DISABLE NOVALIDATE PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -POSTHOOK: query: ALTER TABLE table2 ADD CONSTRAINT pkt2 PRIMARY KEY (a) DISABLE NOVALIDATE +POSTHOOK: query: ALTER TABLE table2_n8 ADD CONSTRAINT pkt2 PRIMARY KEY (a) DISABLE NOVALIDATE POSTHOOK: type: ALTERTABLE_ADDCONSTRAINT -PREHOOK: query: ALTER TABLE table3 ADD CONSTRAINT fk1 FOREIGN KEY (x) REFERENCES table2(a) DISABLE NOVALIDATE RELY +PREHOOK: query: ALTER TABLE table3_n1 ADD CONSTRAINT fk1 FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE NOVALIDATE RELY PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -POSTHOOK: query: ALTER TABLE table3 ADD CONSTRAINT fk1 FOREIGN KEY (x) REFERENCES table2(a) DISABLE NOVALIDATE RELY +POSTHOOK: query: ALTER TABLE table3_n1 ADD CONSTRAINT fk1 FOREIGN KEY (x) REFERENCES table2_n8(a) DISABLE NOVALIDATE RELY POSTHOOK: type: ALTERTABLE_ADDCONSTRAINT -PREHOOK: query: ALTER TABLE table6 ADD CONSTRAINT fk4 FOREIGN KEY (y) REFERENCES table1(a) DISABLE NOVALIDATE +PREHOOK: query: ALTER TABLE table6_n3 ADD CONSTRAINT fk4 FOREIGN KEY (y) REFERENCES table1_n12(a) DISABLE NOVALIDATE PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -POSTHOOK: query: ALTER TABLE table6 ADD CONSTRAINT fk4 FOREIGN KEY (y) REFERENCES table1(a) DISABLE NOVALIDATE +POSTHOOK: query: ALTER TABLE table6_n3 ADD CONSTRAINT fk4 FOREIGN KEY (y) REFERENCES table1_n12(a) DISABLE NOVALIDATE POSTHOOK: type: ALTERTABLE_ADDCONSTRAINT PREHOOK: query: ALTER TABLE table8 ADD CONSTRAINT pk8_2 PRIMARY KEY (a, b) DISABLE NOVALIDATE RELY PREHOOK: type: ALTERTABLE_ADDCONSTRAINT POSTHOOK: query: ALTER TABLE table8 ADD CONSTRAINT pk8_2 PRIMARY KEY (a, b) DISABLE NOVALIDATE RELY POSTHOOK: type: ALTERTABLE_ADDCONSTRAINT -PREHOOK: query: ALTER TABLE table16 CHANGE a a STRING REFERENCES table4(x) DISABLE NOVALIDATE +PREHOOK: query: ALTER TABLE table16 CHANGE a a STRING REFERENCES table4_n0(x) DISABLE NOVALIDATE PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@table16 PREHOOK: Output: default@table16 -POSTHOOK: query: ALTER TABLE table16 CHANGE a a STRING REFERENCES table4(x) DISABLE NOVALIDATE +POSTHOOK: query: ALTER TABLE table16 CHANGE a a STRING REFERENCES table4_n0(x) DISABLE NOVALIDATE POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@table16 POSTHOOK: Output: default@table16 @@ -1843,12 +1843,12 @@ PREHOOK: query: ALTER TABLE table18 ADD CONSTRAINT uk18_2 UNIQUE (a, b) DISABLE PREHOOK: type: ALTERTABLE_ADDCONSTRAINT POSTHOOK: query: ALTER TABLE table18 ADD CONSTRAINT uk18_2 UNIQUE (a, b) DISABLE NOVALIDATE POSTHOOK: type: ALTERTABLE_ADDCONSTRAINT -PREHOOK: query: DESCRIBE FORMATTED table2 +PREHOOK: query: DESCRIBE FORMATTED table2_n8 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table2 -POSTHOOK: query: DESCRIBE FORMATTED table2 +PREHOOK: Input: default@table2_n8 +POSTHOOK: query: DESCRIBE FORMATTED table2_n8 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n8 # col_name data_type comment a string b string @@ -1882,15 +1882,15 @@ Storage Desc Params: # Constraints # Primary Key -Table: default.table2 +Table: default.table2_n8 Constraint Name: pkt2 Column Names: a -PREHOOK: query: DESCRIBE FORMATTED table3 +PREHOOK: query: DESCRIBE FORMATTED table3_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table3 -POSTHOOK: query: DESCRIBE FORMATTED table3 +PREHOOK: Input: default@table3_n1 +POSTHOOK: query: DESCRIBE FORMATTED table3_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table3 +POSTHOOK: Input: default@table3_n1 # col_name data_type comment x string @@ -1923,27 +1923,27 @@ Storage Desc Params: # Constraints # Primary Key -Table: default.table3 +Table: default.table3_n1 Constraint Name: #### A masked pattern was here #### Column Names: x # Foreign Keys -Table: default.table3 +Table: default.table3_n1 Constraint Name: fk1 -Parent Column Name:default.table2.a Column Name:x Key Sequence:1 +Parent Column Name:default.table2_n8.a Column Name:x Key Sequence:1 # Not Null Constraints -Table: default.table3 +Table: default.table3_n1 Constraint Name: #### A masked pattern was here #### Column Name: x -PREHOOK: query: DESCRIBE FORMATTED table6 +PREHOOK: query: DESCRIBE FORMATTED table6_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table6 -POSTHOOK: query: DESCRIBE FORMATTED table6 +PREHOOK: Input: default@table6_n3 +POSTHOOK: query: DESCRIBE FORMATTED table6_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table6 +POSTHOOK: Input: default@table6_n3 # col_name data_type comment x string y string @@ -1977,17 +1977,17 @@ Storage Desc Params: # Constraints # Primary Key -Table: default.table6 +Table: default.table6_n3 Constraint Name: #### A masked pattern was here #### Column Names: x # Foreign Keys -Table: default.table6 +Table: default.table6_n3 Constraint Name: fk4 -Parent Column Name:default.table1.a Column Name:y Key Sequence:1 +Parent Column Name:default.table1_n12.a Column Name:y Key Sequence:1 Constraint Name: #### A masked pattern was here #### -Parent Column Name:default.table2.a Column Name:x Key Sequence:1 +Parent Column Name:default.table2_n8.a Column Name:x Key Sequence:1 PREHOOK: query: DESCRIBE FORMATTED table8 PREHOOK: type: DESCTABLE @@ -2473,9 +2473,9 @@ Table: dbconstraint.table2 Constraint Name: #### A masked pattern was here #### Column Name: b -PREHOOK: query: ALTER TABLE DbConstraint.Table2 ADD CONSTRAINT fkx FOREIGN KEY (b) REFERENCES table1(a) DISABLE NOVALIDATE +PREHOOK: query: ALTER TABLE DbConstraint.Table2 ADD CONSTRAINT fkx FOREIGN KEY (b) REFERENCES table1_n12(a) DISABLE NOVALIDATE PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -POSTHOOK: query: ALTER TABLE DbConstraint.Table2 ADD CONSTRAINT fkx FOREIGN KEY (b) REFERENCES table1(a) DISABLE NOVALIDATE +POSTHOOK: query: ALTER TABLE DbConstraint.Table2 ADD CONSTRAINT fkx FOREIGN KEY (b) REFERENCES table1_n12(a) DISABLE NOVALIDATE POSTHOOK: type: ALTERTABLE_ADDCONSTRAINT PREHOOK: query: DESCRIBE FORMATTED DbConstraint.Table2 PREHOOK: type: DESCTABLE @@ -2523,7 +2523,7 @@ Column Names: a # Foreign Keys Table: dbconstraint.table2 Constraint Name: fkx -Parent Column Name:default.table1.a Column Name:b Key Sequence:1 +Parent Column Name:default.table1_n12.a Column Name:b Key Sequence:1 # Not Null Constraints @@ -2593,40 +2593,40 @@ Storage Desc Params: Table: default.table23 Constraint Name: pk23_1 Column Names: b -PREHOOK: query: CREATE TABLE numericDataType(a TINYINT, b SMALLINT NOT NULL ENABLE, c INT, +PREHOOK: query: CREATE TABLE numericDataType_n0(a TINYINT, b SMALLINT NOT NULL ENABLE, c INT, d BIGINT , e DOUBLE , f DECIMAL(9,2)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@numericDataType -POSTHOOK: query: CREATE TABLE numericDataType(a TINYINT, b SMALLINT NOT NULL ENABLE, c INT, +PREHOOK: Output: default@numericDataType_n0 +POSTHOOK: query: CREATE TABLE numericDataType_n0(a TINYINT, b SMALLINT NOT NULL ENABLE, c INT, d BIGINT , e DOUBLE , f DECIMAL(9,2)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@numericDataType -PREHOOK: query: INSERT INTO numericDataType values(2,45,5667,67890,5.6,678.5) +POSTHOOK: Output: default@numericDataType_n0 +PREHOOK: query: INSERT INTO numericDataType_n0 values(2,45,5667,67890,5.6,678.5) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@numericdatatype -POSTHOOK: query: INSERT INTO numericDataType values(2,45,5667,67890,5.6,678.5) +PREHOOK: Output: default@numericdatatype_n0 +POSTHOOK: query: INSERT INTO numericDataType_n0 values(2,45,5667,67890,5.6,678.5) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@numericdatatype -POSTHOOK: Lineage: numericdatatype.a SCRIPT [] -POSTHOOK: Lineage: numericdatatype.b SCRIPT [] -POSTHOOK: Lineage: numericdatatype.c SCRIPT [] -POSTHOOK: Lineage: numericdatatype.d SCRIPT [] -POSTHOOK: Lineage: numericdatatype.e SCRIPT [] -POSTHOOK: Lineage: numericdatatype.f SCRIPT [] -PREHOOK: query: ALTER TABLE numericDataType ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE +POSTHOOK: Output: default@numericdatatype_n0 +POSTHOOK: Lineage: numericdatatype_n0.a SCRIPT [] +POSTHOOK: Lineage: numericdatatype_n0.b SCRIPT [] +POSTHOOK: Lineage: numericdatatype_n0.c SCRIPT [] +POSTHOOK: Lineage: numericdatatype_n0.d SCRIPT [] +POSTHOOK: Lineage: numericdatatype_n0.e SCRIPT [] +POSTHOOK: Lineage: numericdatatype_n0.f SCRIPT [] +PREHOOK: query: ALTER TABLE numericDataType_n0 ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -POSTHOOK: query: ALTER TABLE numericDataType ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE +POSTHOOK: query: ALTER TABLE numericDataType_n0 ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE POSTHOOK: type: ALTERTABLE_ADDCONSTRAINT -PREHOOK: query: DESC FORMATTED numericDataType +PREHOOK: query: DESC FORMATTED numericDataType_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@numericdatatype -POSTHOOK: query: DESC FORMATTED numericDataType +PREHOOK: Input: default@numericdatatype_n0 +POSTHOOK: query: DESC FORMATTED numericDataType_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@numericdatatype +POSTHOOK: Input: default@numericdatatype_n0 # col_name data_type comment a tinyint b smallint @@ -2664,22 +2664,22 @@ Storage Desc Params: # Constraints # Unique Constraints -Table: default.numericdatatype +Table: default.numericdatatype_n0 Constraint Name: uk1 Column Name:a Key Sequence:1 Column Name:b Key Sequence:2 # Not Null Constraints -Table: default.numericdatatype +Table: default.numericdatatype_n0 Constraint Name: #### A masked pattern was here #### Column Name: b -PREHOOK: query: DROP TABLE numericDataType +PREHOOK: query: DROP TABLE numericDataType_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@numericdatatype -PREHOOK: Output: default@numericdatatype -POSTHOOK: query: DROP TABLE numericDataType +PREHOOK: Input: default@numericdatatype_n0 +PREHOOK: Output: default@numericdatatype_n0 +POSTHOOK: query: DROP TABLE numericDataType_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@numericdatatype -POSTHOOK: Output: default@numericdatatype +POSTHOOK: Input: default@numericdatatype_n0 +POSTHOOK: Output: default@numericdatatype_n0 diff --git a/ql/src/test/results/clientpositive/create_with_constraints2.q.out b/ql/src/test/results/clientpositive/create_with_constraints2.q.out index 9f600b2f34..b703ee6fbf 100644 --- a/ql/src/test/results/clientpositive/create_with_constraints2.q.out +++ b/ql/src/test/results/clientpositive/create_with_constraints2.q.out @@ -1,47 +1,47 @@ -PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, PRIMARY KEY (a) NOT ENFORCED) +PREHOOK: query: CREATE TABLE table1_n10 (a STRING, b STRING, PRIMARY KEY (a) NOT ENFORCED) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: CREATE TABLE table1 (a STRING, b STRING, PRIMARY KEY (a) NOT ENFORCED) +PREHOOK: Output: default@table1_n10 +POSTHOOK: query: CREATE TABLE table1_n10 (a STRING, b STRING, PRIMARY KEY (a) NOT ENFORCED) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: CREATE TABLE table2 (a STRING, b STRING, PRIMARY KEY (a) NOT ENFORCED RELY) +POSTHOOK: Output: default@table1_n10 +PREHOOK: query: CREATE TABLE table2_n7 (a STRING, b STRING, PRIMARY KEY (a) NOT ENFORCED RELY) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table2 -POSTHOOK: query: CREATE TABLE table2 (a STRING, b STRING, PRIMARY KEY (a) NOT ENFORCED RELY) +PREHOOK: Output: default@table2_n7 +POSTHOOK: query: CREATE TABLE table2_n7 (a STRING, b STRING, PRIMARY KEY (a) NOT ENFORCED RELY) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table2 -PREHOOK: query: DESCRIBE EXTENDED table1 +POSTHOOK: Output: default@table2_n7 +PREHOOK: query: DESCRIBE EXTENDED table1_n10 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE EXTENDED table1 +PREHOOK: Input: default@table1_n10 +POSTHOOK: query: DESCRIBE EXTENDED table1_n10 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n10 a string b string #### A masked pattern was here #### -Constraints Primary Key for default.table1:[a], Constraint Name: #### A masked pattern was here #### -PREHOOK: query: DESCRIBE EXTENDED table2 +Constraints Primary Key for default.table1_n10:[a], Constraint Name: #### A masked pattern was here #### +PREHOOK: query: DESCRIBE EXTENDED table2_n7 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table2 -POSTHOOK: query: DESCRIBE EXTENDED table2 +PREHOOK: Input: default@table2_n7 +POSTHOOK: query: DESCRIBE EXTENDED table2_n7 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n7 a string b string #### A masked pattern was here #### -Constraints Primary Key for default.table2:[a], Constraint Name: #### A masked pattern was here #### -PREHOOK: query: DESCRIBE FORMATTED table1 +Constraints Primary Key for default.table2_n7:[a], Constraint Name: #### A masked pattern was here #### +PREHOOK: query: DESCRIBE FORMATTED table1_n10 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table1 -POSTHOOK: query: DESCRIBE FORMATTED table1 +PREHOOK: Input: default@table1_n10 +POSTHOOK: query: DESCRIBE FORMATTED table1_n10 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n10 # col_name data_type comment a string b string @@ -75,15 +75,15 @@ Storage Desc Params: # Constraints # Primary Key -Table: default.table1 +Table: default.table1_n10 Constraint Name: #### A masked pattern was here #### Column Names: a -PREHOOK: query: DESCRIBE FORMATTED table2 +PREHOOK: query: DESCRIBE FORMATTED table2_n7 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@table2 -POSTHOOK: query: DESCRIBE FORMATTED table2 +PREHOOK: Input: default@table2_n7 +POSTHOOK: query: DESCRIBE FORMATTED table2_n7 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n7 # col_name data_type comment a string b string @@ -117,6 +117,6 @@ Storage Desc Params: # Constraints # Primary Key -Table: default.table2 +Table: default.table2_n7 Constraint Name: #### A masked pattern was here #### Column Names: a diff --git a/ql/src/test/results/clientpositive/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/cross_product_check_1.q.out index 32b6d2ede8..e560853d0b 100644 --- a/ql/src/test/results/clientpositive/cross_product_check_1.q.out +++ b/ql/src/test/results/clientpositive/cross_product_check_1.q.out @@ -1,37 +1,37 @@ -PREHOOK: query: create table A as +PREHOOK: query: create table A_n0 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@A -POSTHOOK: query: create table A as +PREHOOK: Output: default@A_n0 +POSTHOOK: query: create table A_n0 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@A -POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table B as +POSTHOOK: Output: default@A_n0 +POSTHOOK: Lineage: a_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table B_n0 as select * from src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@B -POSTHOOK: query: create table B as +PREHOOK: Output: default@B_n0 +POSTHOOK: query: create table B_n0 as select * from src limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@B -POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@B_n0 +POSTHOOK: Lineage: b_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: b_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: explain select * from A join B +PREHOOK: query: explain select * from A_n0 join B_n0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from A join B +POSTHOOK: query: explain select * from A_n0 join B_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -42,7 +42,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -53,7 +53,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) TableScan - alias: b + alias: b_n0 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -87,9 +87,9 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product -PREHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A +PREHOOK: query: explain select * from B_n0 d1 join B_n0 d2 on d1.key = d2.key join A_n0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A +POSTHOOK: query: explain select * from B_n0 d1 join B_n0 d2 on d1.key = d2.key join A_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -157,7 +157,7 @@ STAGE PLANS: Statistics: Num rows: 11 Data size: 114 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) TableScan - alias: a + alias: a_n0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -191,14 +191,14 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[18][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: explain select * from A join +PREHOOK: query: explain select * from A_n0 join (select d1.key - from B d1 join B d2 on d1.key = d2.key + from B_n0 d1 join B_n0 d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from A join +POSTHOOK: query: explain select * from A_n0 join (select d1.key - from B d1 join B d2 on d1.key = d2.key + from B_n0 d1 join B_n0 d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -289,7 +289,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -329,9 +329,9 @@ STAGE PLANS: Warning: Shuffle Join JOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product Warning: Shuffle Join JOIN[8][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product -PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 +PREHOOK: query: explain select * from A_n0 join (select d1.key from B_n0 d1 join B_n0 d2 where 1 = 1 group by d1.key) od1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 +POSTHOOK: query: explain select * from A_n0 join (select d1.key from B_n0 d1 join B_n0 d2 where 1 = 1 group by d1.key) od1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -410,7 +410,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -450,12 +450,12 @@ STAGE PLANS: Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: explain select * from -(select A.key from A group by key) ss join -(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1 +(select A_n0.key from A_n0 group by key) ss join +(select d1.key from B_n0 d1 join B_n0 d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1 PREHOOK: type: QUERY POSTHOOK: query: explain select * from -(select A.key from A group by key) ss join -(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1 +(select A_n0.key from A_n0 group by key) ss join +(select d1.key from B_n0 d1 join B_n0 d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -469,7 +469,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/cteViews.q.out b/ql/src/test/results/clientpositive/cteViews.q.out index 1f1e862ffd..4ead80a27e 100644 --- a/ql/src/test/results/clientpositive/cteViews.q.out +++ b/ql/src/test/results/clientpositive/cteViews.q.out @@ -19,29 +19,29 @@ PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n0 PREHOOK: type: DROPVIEW -POSTHOOK: query: drop view v +POSTHOOK: query: drop view v_n0 POSTHOOK: type: DROPVIEW -PREHOOK: query: create view v as with cte as (select key, value from src order by key limit 5) +PREHOOK: query: create view v_n0 as with cte as (select key, value from src order by key limit 5) select key from cte PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as with cte as (select key, value from src order by key limit 5) +PREHOOK: Output: default@v_n0 +POSTHOOK: query: create view v_n0 as with cte as (select key, value from src order by key limit 5) select key from cte POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: describe extended v +POSTHOOK: Output: default@v_n0 +POSTHOOK: Lineage: v_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: describe extended v_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: describe extended v +PREHOOK: Input: default@v_n0 +POSTHOOK: query: describe extended v_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n0 key string #### A masked pattern was here #### @@ -57,15 +57,15 @@ PREHOOK: Input: database:bug POSTHOOK: query: use bug POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:bug -PREHOOK: query: select * from default.v +PREHOOK: query: select * from default.v_n0 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v +PREHOOK: Input: default@v_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from default.v +POSTHOOK: query: select * from default.v_n0 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n0 #### A masked pattern was here #### 0 0 @@ -86,34 +86,34 @@ PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n0 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n0 +PREHOOK: Output: default@v_n0 +POSTHOOK: query: drop view v_n0 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as with cte as (select * from src order by key limit 5) +POSTHOOK: Input: default@v_n0 +POSTHOOK: Output: default@v_n0 +PREHOOK: query: create view v_n0 as with cte as (select * from src order by key limit 5) select * from cte PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as with cte as (select * from src order by key limit 5) +PREHOOK: Output: default@v_n0 +POSTHOOK: query: create view v_n0 as with cte as (select * from src order by key limit 5) select * from cte POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: v.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: describe extended v +POSTHOOK: Output: default@v_n0 +POSTHOOK: Lineage: v_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: v_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe extended v_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: describe extended v +PREHOOK: Input: default@v_n0 +POSTHOOK: query: describe extended v_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n0 key string value string @@ -130,15 +130,15 @@ PREHOOK: Input: database:bug POSTHOOK: query: use bug POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:bug -PREHOOK: query: select * from default.v +PREHOOK: query: select * from default.v_n0 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v +PREHOOK: Input: default@v_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from default.v +POSTHOOK: query: select * from default.v_n0 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n0 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -159,33 +159,33 @@ PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n0 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n0 +PREHOOK: Output: default@v_n0 +POSTHOOK: query: drop view v_n0 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as with src1 as (select key from src order by key limit 5) +POSTHOOK: Input: default@v_n0 +POSTHOOK: Output: default@v_n0 +PREHOOK: query: create view v_n0 as with src1 as (select key from src order by key limit 5) select * from src1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as with src1 as (select key from src order by key limit 5) +PREHOOK: Output: default@v_n0 +POSTHOOK: query: create view v_n0 as with src1 as (select key from src order by key limit 5) select * from src1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: describe extended v +POSTHOOK: Output: default@v_n0 +POSTHOOK: Lineage: v_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: describe extended v_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: describe extended v +PREHOOK: Input: default@v_n0 +POSTHOOK: query: describe extended v_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n0 key string #### A masked pattern was here #### @@ -201,15 +201,15 @@ PREHOOK: Input: database:bug POSTHOOK: query: use bug POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:bug -PREHOOK: query: select * from default.v +PREHOOK: query: select * from default.v_n0 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v +PREHOOK: Input: default@v_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from default.v +POSTHOOK: query: select * from default.v_n0 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n0 #### A masked pattern was here #### 0 0 @@ -222,59 +222,59 @@ PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n0 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n0 +PREHOOK: Output: default@v_n0 +POSTHOOK: query: drop view v_n0 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as with src1 as (select key from src order by key limit 5) +POSTHOOK: Input: default@v_n0 +POSTHOOK: Output: default@v_n0 +PREHOOK: query: create view v_n0 as with src1 as (select key from src order by key limit 5) select * from src1 a where a.key is not null PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as with src1 as (select key from src order by key limit 5) +PREHOOK: Output: default@v_n0 +POSTHOOK: query: create view v_n0 as with src1 as (select key from src order by key limit 5) select * from src1 a where a.key is not null POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: describe extended v +POSTHOOK: Output: default@v_n0 +POSTHOOK: Lineage: v_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: describe extended v_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: describe extended v +PREHOOK: Input: default@v_n0 +POSTHOOK: query: describe extended v_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n0 key string #### A masked pattern was here #### -PREHOOK: query: select * from v +PREHOOK: query: select * from v_n0 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v +PREHOOK: Input: default@v_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from v +POSTHOOK: query: select * from v_n0 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n0 #### A masked pattern was here #### 0 0 0 10 100 -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n0 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n0 +PREHOOK: Output: default@v_n0 +POSTHOOK: query: drop view v_n0 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v +POSTHOOK: Input: default@v_n0 +POSTHOOK: Output: default@v_n0 PREHOOK: query: drop database bug PREHOOK: type: DROPDATABASE PREHOOK: Input: database:bug diff --git a/ql/src/test/results/clientpositive/cte_2.q.out b/ql/src/test/results/clientpositive/cte_2.q.out index 4c9af870a5..f5ebd4a236 100644 --- a/ql/src/test/results/clientpositive/cte_2.q.out +++ b/ql/src/test/results/clientpositive/cte_2.q.out @@ -1,11 +1,11 @@ PREHOOK: query: with q1 as (select * from src where key= '5'), -q2 as (select * from src s2 where key = '4') +q2 as (select * from src s2_n0 where key = '4') select * from q1 union all select * from q2 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### POSTHOOK: query: with q1 as (select * from src where key= '5'), -q2 as (select * from src s2 where key = '4') +q2 as (select * from src s2_n0 where key = '4') select * from q1 union all select * from q2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -14,49 +14,49 @@ POSTHOOK: Input: default@src 5 val_5 5 val_5 5 val_5 -PREHOOK: query: create table s1 like src +PREHOOK: query: create table s1_n1 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@s1 -POSTHOOK: query: create table s1 like src +PREHOOK: Output: default@s1_n1 +POSTHOOK: query: create table s1_n1 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@s1 +POSTHOOK: Output: default@s1_n1 PREHOOK: query: with q1 as ( select key, value from src where key = '5') from q1 -insert overwrite table s1 +insert overwrite table s1_n1 select * PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@s1 +PREHOOK: Output: default@s1_n1 POSTHOOK: query: with q1 as ( select key, value from src where key = '5') from q1 -insert overwrite table s1 +insert overwrite table s1_n1 select * POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@s1 -POSTHOOK: Lineage: s1.key SIMPLE [] -POSTHOOK: Lineage: s1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from s1 +POSTHOOK: Output: default@s1_n1 +POSTHOOK: Lineage: s1_n1.key SIMPLE [] +POSTHOOK: Lineage: s1_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from s1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@s1 +PREHOOK: Input: default@s1_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from s1 +POSTHOOK: query: select * from s1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@s1 +POSTHOOK: Input: default@s1_n1 #### A masked pattern was here #### 5 val_5 5 val_5 5 val_5 -PREHOOK: query: drop table s1 +PREHOOK: query: drop table s1_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@s1 -PREHOOK: Output: default@s1 -POSTHOOK: query: drop table s1 +PREHOOK: Input: default@s1_n1 +PREHOOK: Output: default@s1_n1 +POSTHOOK: query: drop table s1_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@s1 -POSTHOOK: Output: default@s1 +POSTHOOK: Input: default@s1_n1 +POSTHOOK: Output: default@s1_n1 PREHOOK: query: with q1 as (select * from src where key= '5') from q1 select * @@ -72,112 +72,112 @@ POSTHOOK: Input: default@src 5 val_5 5 val_5 5 val_5 -PREHOOK: query: create table s2 as +PREHOOK: query: create table s2_n0 as with q1 as ( select key from src where key = '4') select * from q1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@s2 -POSTHOOK: query: create table s2 as +PREHOOK: Output: default@s2_n0 +POSTHOOK: query: create table s2_n0 as with q1 as ( select key from src where key = '4') select * from q1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@s2 -POSTHOOK: Lineage: s2.key SIMPLE [] -PREHOOK: query: select * from s2 +POSTHOOK: Output: default@s2_n0 +POSTHOOK: Lineage: s2_n0.key SIMPLE [] +PREHOOK: query: select * from s2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@s2 +PREHOOK: Input: default@s2_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from s2 +POSTHOOK: query: select * from s2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@s2 +POSTHOOK: Input: default@s2_n0 #### A masked pattern was here #### 4 -PREHOOK: query: drop table s2 +PREHOOK: query: drop table s2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@s2 -PREHOOK: Output: default@s2 -POSTHOOK: query: drop table s2 +PREHOOK: Input: default@s2_n0 +PREHOOK: Output: default@s2_n0 +POSTHOOK: query: drop table s2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@s2 -POSTHOOK: Output: default@s2 -PREHOOK: query: create view v1 as +POSTHOOK: Input: default@s2_n0 +POSTHOOK: Output: default@s2_n0 +PREHOOK: query: create view v1_n10 as with q1 as ( select key from src where key = '5') select * from q1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: create view v1 as +PREHOOK: Output: default@v1_n10 +POSTHOOK: query: create view v1_n10 as with q1 as ( select key from src where key = '5') select * from q1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -POSTHOOK: Lineage: v1.key SIMPLE [] -PREHOOK: query: select * from v1 +POSTHOOK: Output: default@v1_n10 +POSTHOOK: Lineage: v1_n10.key SIMPLE [] +PREHOOK: query: select * from v1_n10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v1 +PREHOOK: Input: default@v1_n10 #### A masked pattern was here #### -POSTHOOK: query: select * from v1 +POSTHOOK: query: select * from v1_n10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v1_n10 #### A masked pattern was here #### 5 5 5 -PREHOOK: query: drop view v1 +PREHOOK: query: drop view v1_n10 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v1 -PREHOOK: Output: default@v1 -POSTHOOK: query: drop view v1 +PREHOOK: Input: default@v1_n10 +PREHOOK: Output: default@v1_n10 +POSTHOOK: query: drop view v1_n10 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v1 -POSTHOOK: Output: default@v1 -PREHOOK: query: create view v1 as +POSTHOOK: Input: default@v1_n10 +POSTHOOK: Output: default@v1_n10 +PREHOOK: query: create view v1_n10 as with q1 as ( select key from src where key = '5') select * from q1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: create view v1 as +PREHOOK: Output: default@v1_n10 +POSTHOOK: query: create view v1_n10 as with q1 as ( select key from src where key = '5') select * from q1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -POSTHOOK: Lineage: v1.key SIMPLE [] +POSTHOOK: Output: default@v1_n10 +POSTHOOK: Lineage: v1_n10.key SIMPLE [] PREHOOK: query: with q1 as ( select key from src where key = '4') -select * from v1 +select * from v1_n10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v1 +PREHOOK: Input: default@v1_n10 #### A masked pattern was here #### POSTHOOK: query: with q1 as ( select key from src where key = '4') -select * from v1 +select * from v1_n10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v1_n10 #### A masked pattern was here #### 5 5 5 -PREHOOK: query: drop view v1 +PREHOOK: query: drop view v1_n10 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v1 -PREHOOK: Output: default@v1 -POSTHOOK: query: drop view v1 +PREHOOK: Input: default@v1_n10 +PREHOOK: Output: default@v1_n10 +POSTHOOK: query: drop view v1_n10 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v1 -POSTHOOK: Output: default@v1 +POSTHOOK: Input: default@v1_n10 +POSTHOOK: Output: default@v1_n10 PREHOOK: query: drop view if exists view_3 PREHOOK: type: DROPVIEW POSTHOOK: query: drop view if exists view_3 diff --git a/ql/src/test/results/clientpositive/cte_4.q.out b/ql/src/test/results/clientpositive/cte_4.q.out index dbe4140aaf..4daa1ebac9 100644 --- a/ql/src/test/results/clientpositive/cte_4.q.out +++ b/ql/src/test/results/clientpositive/cte_4.q.out @@ -24,57 +24,57 @@ POSTHOOK: Output: default@q2 5 val_5 5 val_5 4 val_4 -PREHOOK: query: create table s1 like src +PREHOOK: query: create table s1_n0 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@s1 -POSTHOOK: query: create table s1 like src +PREHOOK: Output: default@s1_n0 +POSTHOOK: query: create table s1_n0 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@s1 +POSTHOOK: Output: default@s1_n0 PREHOOK: query: with q1 as ( select key, value from src where key = '5') from q1 -insert overwrite table s1 +insert overwrite table s1_n0 select * PREHOOK: type: QUERY PREHOOK: Input: default@q1 PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@q1 -PREHOOK: Output: default@s1 +PREHOOK: Output: default@s1_n0 #### A masked pattern was here #### POSTHOOK: query: with q1 as ( select key, value from src where key = '5') from q1 -insert overwrite table s1 +insert overwrite table s1_n0 select * POSTHOOK: type: QUERY POSTHOOK: Input: default@q1 POSTHOOK: Input: default@src POSTHOOK: Output: database:default POSTHOOK: Output: default@q1 -POSTHOOK: Output: default@s1 +POSTHOOK: Output: default@s1_n0 #### A masked pattern was here #### -POSTHOOK: Lineage: s1.key SIMPLE [(q1)q1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: s1.value SIMPLE [(q1)q1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select * from s1 +POSTHOOK: Lineage: s1_n0.key SIMPLE [(q1)q1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: s1_n0.value SIMPLE [(q1)q1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from s1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@s1 +PREHOOK: Input: default@s1_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from s1 +POSTHOOK: query: select * from s1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@s1 +POSTHOOK: Input: default@s1_n0 #### A masked pattern was here #### 5 val_5 5 val_5 5 val_5 -PREHOOK: query: drop table s1 +PREHOOK: query: drop table s1_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@s1 -PREHOOK: Output: default@s1 -POSTHOOK: query: drop table s1 +PREHOOK: Input: default@s1_n0 +PREHOOK: Output: default@s1_n0 +POSTHOOK: query: drop table s1_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@s1 -POSTHOOK: Output: default@s1 +POSTHOOK: Input: default@s1_n0 +POSTHOOK: Output: default@s1_n0 PREHOOK: query: with q1 as (select * from src where key= '5') from q1 select * @@ -134,77 +134,77 @@ POSTHOOK: query: drop table s2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@s2 POSTHOOK: Output: default@s2 -PREHOOK: query: create view v1 as +PREHOOK: query: create view v1_n2 as with q1 as ( select key from src where key = '5') select * from q1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: create view v1 as +PREHOOK: Output: default@v1_n2 +POSTHOOK: query: create view v1_n2 as with q1 as ( select key from src where key = '5') select * from q1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -POSTHOOK: Lineage: v1.key SIMPLE [] -PREHOOK: query: select * from v1 +POSTHOOK: Output: default@v1_n2 +POSTHOOK: Lineage: v1_n2.key SIMPLE [] +PREHOOK: query: select * from v1_n2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v1 +PREHOOK: Input: default@v1_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from v1 +POSTHOOK: query: select * from v1_n2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v1_n2 #### A masked pattern was here #### 5 5 5 -PREHOOK: query: drop view v1 +PREHOOK: query: drop view v1_n2 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v1 -PREHOOK: Output: default@v1 -POSTHOOK: query: drop view v1 +PREHOOK: Input: default@v1_n2 +PREHOOK: Output: default@v1_n2 +POSTHOOK: query: drop view v1_n2 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v1 -POSTHOOK: Output: default@v1 -PREHOOK: query: create view v1 as +POSTHOOK: Input: default@v1_n2 +POSTHOOK: Output: default@v1_n2 +PREHOOK: query: create view v1_n2 as with q1 as ( select key from src where key = '5') select * from q1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: create view v1 as +PREHOOK: Output: default@v1_n2 +POSTHOOK: query: create view v1_n2 as with q1 as ( select key from src where key = '5') select * from q1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -POSTHOOK: Lineage: v1.key SIMPLE [] +POSTHOOK: Output: default@v1_n2 +POSTHOOK: Lineage: v1_n2.key SIMPLE [] PREHOOK: query: with q1 as ( select key from src where key = '4') -select * from v1 +select * from v1_n2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v1 +PREHOOK: Input: default@v1_n2 #### A masked pattern was here #### POSTHOOK: query: with q1 as ( select key from src where key = '4') -select * from v1 +select * from v1_n2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v1_n2 #### A masked pattern was here #### 5 5 5 -PREHOOK: query: drop view v1 +PREHOOK: query: drop view v1_n2 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v1 -PREHOOK: Output: default@v1 -POSTHOOK: query: drop view v1 +PREHOOK: Input: default@v1_n2 +PREHOOK: Output: default@v1_n2 +POSTHOOK: query: drop view v1_n2 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v1 -POSTHOOK: Output: default@v1 +POSTHOOK: Input: default@v1_n2 +POSTHOOK: Output: default@v1_n2 diff --git a/ql/src/test/results/clientpositive/cte_5.q.out b/ql/src/test/results/clientpositive/cte_5.q.out index 83d47c5de9..2eb59ca334 100644 --- a/ql/src/test/results/clientpositive/cte_5.q.out +++ b/ql/src/test/results/clientpositive/cte_5.q.out @@ -10,24 +10,24 @@ PREHOOK: Input: database:mydb POSTHOOK: query: use mydb POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:mydb -PREHOOK: query: create table q1 (colnum int, colstring string) +PREHOOK: query: create table q1_n0 (colnum int, colstring string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:mydb -PREHOOK: Output: mydb@q1 -POSTHOOK: query: create table q1 (colnum int, colstring string) +PREHOOK: Output: mydb@q1_n0 +POSTHOOK: query: create table q1_n0 (colnum int, colstring string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:mydb -POSTHOOK: Output: mydb@q1 -PREHOOK: query: insert into q1 values (5, 'A') +POSTHOOK: Output: mydb@q1_n0 +PREHOOK: query: insert into q1_n0 values (5, 'A') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: mydb@q1 -POSTHOOK: query: insert into q1 values (5, 'A') +PREHOOK: Output: mydb@q1_n0 +POSTHOOK: query: insert into q1_n0 values (5, 'A') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: mydb@q1 -POSTHOOK: Lineage: q1.colnum SCRIPT [] -POSTHOOK: Lineage: q1.colstring SCRIPT [] +POSTHOOK: Output: mydb@q1_n0 +POSTHOOK: Lineage: q1_n0.colnum SCRIPT [] +POSTHOOK: Lineage: q1_n0.colstring SCRIPT [] PREHOOK: query: use default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default @@ -40,18 +40,18 @@ PREHOOK: Input: database:mydb POSTHOOK: query: show tables in mydb POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:mydb -q1 +q1_n0 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: explain -with q1 as (select * from src where key= '5') +with q1_n0 as (select * from src where key= '5') select a.colnum -from mydb.q1 as a join q1 as b +from mydb.q1_n0 as a join q1_n0 as b on a.colnum=b.key PREHOOK: type: QUERY POSTHOOK: query: explain -with q1 as (select * from src where key= '5') +with q1_n0 as (select * from src where key= '5') select a.colnum -from mydb.q1 as a join q1 as b +from mydb.q1_n0 as a join q1_n0 as b on a.colnum=b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -111,21 +111,21 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: with q1 as (select * from src where key= '5') +PREHOOK: query: with q1_n0 as (select * from src where key= '5') select a.colnum -from mydb.q1 as a join q1 as b +from mydb.q1_n0 as a join q1_n0 as b on a.colnum=b.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: mydb@q1 +PREHOOK: Input: mydb@q1_n0 #### A masked pattern was here #### -POSTHOOK: query: with q1 as (select * from src where key= '5') +POSTHOOK: query: with q1_n0 as (select * from src where key= '5') select a.colnum -from mydb.q1 as a join q1 as b +from mydb.q1_n0 as a join q1_n0 as b on a.colnum=b.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: mydb@q1 +POSTHOOK: Input: mydb@q1_n0 #### A masked pattern was here #### 5 5 diff --git a/ql/src/test/results/clientpositive/cte_7.q.out b/ql/src/test/results/clientpositive/cte_7.q.out index 6f82dd4f7d..6bc9289975 100644 --- a/ql/src/test/results/clientpositive/cte_7.q.out +++ b/ql/src/test/results/clientpositive/cte_7.q.out @@ -1,55 +1,55 @@ -PREHOOK: query: create table t (i int,a string,b string) +PREHOOK: query: create table t_n1 (i int,a string,b string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t (i int,a string,b string) +PREHOOK: Output: default@t_n1 +POSTHOOK: query: create table t_n1 (i int,a string,b string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values (1,'hello','world'),(2,'bye',null) +POSTHOOK: Output: default@t_n1 +PREHOOK: query: insert into t_n1 values (1,'hello','world'),(2,'bye',null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (1,'hello','world'),(2,'bye',null) +PREHOOK: Output: default@t_n1 +POSTHOOK: query: insert into t_n1 values (1,'hello','world'),(2,'bye',null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.a SCRIPT [] -POSTHOOK: Lineage: t.b SCRIPT [] -POSTHOOK: Lineage: t.i SCRIPT [] -PREHOOK: query: select * from t where t.b is null +POSTHOOK: Output: default@t_n1 +POSTHOOK: Lineage: t_n1.a SCRIPT [] +POSTHOOK: Lineage: t_n1.b SCRIPT [] +POSTHOOK: Lineage: t_n1.i SCRIPT [] +PREHOOK: query: select * from t_n1 where t_n1.b is null PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from t where t.b is null +POSTHOOK: query: select * from t_n1 where t_n1.b is null POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n1 #### A masked pattern was here #### 2 bye NULL -PREHOOK: query: with cte as (select t.a as a,t.a as b,t.a as c from t where t.b is null) select * from cte +PREHOOK: query: with cte as (select t_n1.a as a,t_n1.a as b,t_n1.a as c from t_n1 where t_n1.b is null) select * from cte PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n1 #### A masked pattern was here #### -POSTHOOK: query: with cte as (select t.a as a,t.a as b,t.a as c from t where t.b is null) select * from cte +POSTHOOK: query: with cte as (select t_n1.a as a,t_n1.a as b,t_n1.a as c from t_n1 where t_n1.b is null) select * from cte POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n1 #### A masked pattern was here #### bye bye bye -PREHOOK: query: select t.a as a,t.a as b,t.a as c from t where t.b is null +PREHOOK: query: select t_n1.a as a,t_n1.a as b,t_n1.a as c from t_n1 where t_n1.b is null PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n1 #### A masked pattern was here #### -POSTHOOK: query: select t.a as a,t.a as b,t.a as c from t where t.b is null +POSTHOOK: query: select t_n1.a as a,t_n1.a as b,t_n1.a as c from t_n1 where t_n1.b is null POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n1 #### A masked pattern was here #### bye bye bye -PREHOOK: query: with cte as (select t.a as a,t.a as b,t.a as c from t where t.b is not null) select * from cte +PREHOOK: query: with cte as (select t_n1.a as a,t_n1.a as b,t_n1.a as c from t_n1 where t_n1.b is not null) select * from cte PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n1 #### A masked pattern was here #### -POSTHOOK: query: with cte as (select t.a as a,t.a as b,t.a as c from t where t.b is not null) select * from cte +POSTHOOK: query: with cte as (select t_n1.a as a,t_n1.a as b,t_n1.a as c from t_n1 where t_n1.b is not null) select * from cte POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n1 #### A masked pattern was here #### hello hello hello diff --git a/ql/src/test/results/clientpositive/database_drop.q.out b/ql/src/test/results/clientpositive/database_drop.q.out index dac2e9792b..b43535bfff 100644 --- a/ql/src/test/results/clientpositive/database_drop.q.out +++ b/ql/src/test/results/clientpositive/database_drop.q.out @@ -17,64 +17,64 @@ POSTHOOK: query: USE db5 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:db5 #### A masked pattern was here #### -PREHOOK: query: CREATE TABLE temp_tbl (id INT, name STRING) +PREHOOK: query: CREATE TABLE temp_tbl_n0 (id INT, name STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:db5 -PREHOOK: Output: db5@temp_tbl -POSTHOOK: query: CREATE TABLE temp_tbl (id INT, name STRING) +PREHOOK: Output: db5@temp_tbl_n0 +POSTHOOK: query: CREATE TABLE temp_tbl_n0 (id INT, name STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db5 -POSTHOOK: Output: db5@temp_tbl -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE temp_tbl +POSTHOOK: Output: db5@temp_tbl_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE temp_tbl_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: db5@temp_tbl -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE temp_tbl +PREHOOK: Output: db5@temp_tbl_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE temp_tbl_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: db5@temp_tbl -PREHOOK: query: CREATE VIEW temp_tbl_view AS SELECT * FROM temp_tbl +POSTHOOK: Output: db5@temp_tbl_n0 +PREHOOK: query: CREATE VIEW temp_tbl_view AS SELECT * FROM temp_tbl_n0 PREHOOK: type: CREATEVIEW -PREHOOK: Input: db5@temp_tbl +PREHOOK: Input: db5@temp_tbl_n0 PREHOOK: Output: database:db5 PREHOOK: Output: db5@temp_tbl_view -POSTHOOK: query: CREATE VIEW temp_tbl_view AS SELECT * FROM temp_tbl +POSTHOOK: query: CREATE VIEW temp_tbl_view AS SELECT * FROM temp_tbl_n0 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: db5@temp_tbl +POSTHOOK: Input: db5@temp_tbl_n0 POSTHOOK: Output: database:db5 POSTHOOK: Output: db5@temp_tbl_view -POSTHOOK: Lineage: temp_tbl_view.id SIMPLE [(temp_tbl)temp_tbl.FieldSchema(name:id, type:int, comment:null), ] -POSTHOOK: Lineage: temp_tbl_view.name SIMPLE [(temp_tbl)temp_tbl.FieldSchema(name:name, type:string, comment:null), ] +POSTHOOK: Lineage: temp_tbl_view.id SIMPLE [(temp_tbl_n0)temp_tbl_n0.FieldSchema(name:id, type:int, comment:null), ] +POSTHOOK: Lineage: temp_tbl_view.name SIMPLE [(temp_tbl_n0)temp_tbl_n0.FieldSchema(name:name, type:string, comment:null), ] #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:db5 -PREHOOK: Output: db5@temp_tbl2 +PREHOOK: Output: db5@temp_tbl2_n0 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:db5 -POSTHOOK: Output: db5@temp_tbl2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' into table temp_tbl2 +POSTHOOK: Output: db5@temp_tbl2_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' into table temp_tbl2_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: db5@temp_tbl2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' into table temp_tbl2 +PREHOOK: Output: db5@temp_tbl2_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' into table temp_tbl2_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: db5@temp_tbl2 -PREHOOK: query: CREATE VIEW temp_tbl2_view AS SELECT * FROM temp_tbl2 +POSTHOOK: Output: db5@temp_tbl2_n0 +PREHOOK: query: CREATE VIEW temp_tbl2_view AS SELECT * FROM temp_tbl2_n0 PREHOOK: type: CREATEVIEW -PREHOOK: Input: db5@temp_tbl2 +PREHOOK: Input: db5@temp_tbl2_n0 PREHOOK: Output: database:db5 PREHOOK: Output: db5@temp_tbl2_view -POSTHOOK: query: CREATE VIEW temp_tbl2_view AS SELECT * FROM temp_tbl2 +POSTHOOK: query: CREATE VIEW temp_tbl2_view AS SELECT * FROM temp_tbl2_n0 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: db5@temp_tbl2 +POSTHOOK: Input: db5@temp_tbl2_n0 POSTHOOK: Output: database:db5 POSTHOOK: Output: db5@temp_tbl2_view -POSTHOOK: Lineage: temp_tbl2_view.id SIMPLE [(temp_tbl2)temp_tbl2.FieldSchema(name:id, type:int, comment:null), ] -POSTHOOK: Lineage: temp_tbl2_view.name SIMPLE [(temp_tbl2)temp_tbl2.FieldSchema(name:name, type:string, comment:null), ] +POSTHOOK: Lineage: temp_tbl2_view.id SIMPLE [(temp_tbl2_n0)temp_tbl2_n0.FieldSchema(name:id, type:int, comment:null), ] +POSTHOOK: Lineage: temp_tbl2_view.name SIMPLE [(temp_tbl2_n0)temp_tbl2_n0.FieldSchema(name:name, type:string, comment:null), ] PREHOOK: query: CREATE TABLE part_tab (id INT, name STRING) PARTITIONED BY (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:db5 @@ -212,10 +212,10 @@ PREHOOK: Output: db5@extab1 PREHOOK: Output: db5@part_tab PREHOOK: Output: db5@part_tab2 PREHOOK: Output: db5@part_tab3 -PREHOOK: Output: db5@temp_tbl -PREHOOK: Output: db5@temp_tbl2 +PREHOOK: Output: db5@temp_tbl2_n0 PREHOOK: Output: db5@temp_tbl2_view PREHOOK: Output: db5@temp_tbl3 +PREHOOK: Output: db5@temp_tbl_n0 PREHOOK: Output: db5@temp_tbl_view POSTHOOK: query: DROP DATABASE db5 CASCADE POSTHOOK: type: DROPDATABASE @@ -225,9 +225,9 @@ POSTHOOK: Output: db5@extab1 POSTHOOK: Output: db5@part_tab POSTHOOK: Output: db5@part_tab2 POSTHOOK: Output: db5@part_tab3 -POSTHOOK: Output: db5@temp_tbl -POSTHOOK: Output: db5@temp_tbl2 +POSTHOOK: Output: db5@temp_tbl2_n0 POSTHOOK: Output: db5@temp_tbl2_view POSTHOOK: Output: db5@temp_tbl3 +POSTHOOK: Output: db5@temp_tbl_n0 POSTHOOK: Output: db5@temp_tbl_view #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/database_location.q.out b/ql/src/test/results/clientpositive/database_location.q.out index 13fd01e316..3d00e091ce 100644 --- a/ql/src/test/results/clientpositive/database_location.q.out +++ b/ql/src/test/results/clientpositive/database_location.q.out @@ -17,20 +17,20 @@ PREHOOK: Input: database:db1 POSTHOOK: query: USE db1 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:db1 -PREHOOK: query: CREATE TABLE table_db1 (name STRING, value INT) +PREHOOK: query: CREATE TABLE table_db1_n0 (name STRING, value INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:db1 -PREHOOK: Output: db1@table_db1 -POSTHOOK: query: CREATE TABLE table_db1 (name STRING, value INT) +PREHOOK: Output: db1@table_db1_n0 +POSTHOOK: query: CREATE TABLE table_db1_n0 (name STRING, value INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@table_db1 -PREHOOK: query: DESCRIBE FORMATTED table_db1 +POSTHOOK: Output: db1@table_db1_n0 +PREHOOK: query: DESCRIBE FORMATTED table_db1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: db1@table_db1 -POSTHOOK: query: DESCRIBE FORMATTED table_db1 +PREHOOK: Input: db1@table_db1_n0 +POSTHOOK: query: DESCRIBE FORMATTED table_db1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: db1@table_db1 +POSTHOOK: Input: db1@table_db1_n0 # col_name data_type comment name string value int @@ -66,7 +66,7 @@ PREHOOK: Input: database:db1 POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:db1 -table_db1 +table_db1_n0 PREHOOK: query: CREATE DATABASE db2 COMMENT 'database 2' #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out index 0dad32d525..d4c203e7d6 100644 --- a/ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out +++ b/ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: create table T1(key string, val string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: query: create table T1_n97(key string, val string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: create table T1(key string, val string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: Output: default@T1_n97 +POSTHOOK: query: create table T1_n97(key string, val string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: alter table T1 compact 'major' +POSTHOOK: Output: default@T1_n97 +PREHOOK: query: alter table T1_n97 compact 'major' PREHOOK: type: ALTERTABLE_COMPACT -POSTHOOK: query: alter table T1 compact 'major' +POSTHOOK: query: alter table T1_n97 compact 'major' POSTHOOK: type: ALTERTABLE_COMPACT -PREHOOK: query: alter table T1 compact 'minor' +PREHOOK: query: alter table T1_n97 compact 'minor' PREHOOK: type: ALTERTABLE_COMPACT -POSTHOOK: query: alter table T1 compact 'minor' +POSTHOOK: query: alter table T1_n97 compact 'minor' POSTHOOK: type: ALTERTABLE_COMPACT -PREHOOK: query: drop table T1 +PREHOOK: query: drop table T1_n97 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table T1 +PREHOOK: Input: default@t1_n97 +PREHOOK: Output: default@t1_n97 +POSTHOOK: query: drop table T1_n97 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n97 +POSTHOOK: Output: default@t1_n97 diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out index 2114575d8c..06d7b1dd23 100644 --- a/ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out +++ b/ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out @@ -1,38 +1,38 @@ -PREHOOK: query: create table T1(key string, val string) partitioned by (ds string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: query: create table T1_n67(key string, val string) partitioned by (ds string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: create table T1(key string, val string) partitioned by (ds string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: Output: default@T1_n67 +POSTHOOK: query: create table T1_n67(key string, val string) partitioned by (ds string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: alter table T1 add partition (ds = 'today') +POSTHOOK: Output: default@T1_n67 +PREHOOK: query: alter table T1_n67 add partition (ds = 'today') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@t1 -POSTHOOK: query: alter table T1 add partition (ds = 'today') +PREHOOK: Output: default@t1_n67 +POSTHOOK: query: alter table T1_n67 add partition (ds = 'today') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t1@ds=today -PREHOOK: query: alter table T1 add partition (ds = 'yesterday') +POSTHOOK: Output: default@t1_n67 +POSTHOOK: Output: default@t1_n67@ds=today +PREHOOK: query: alter table T1_n67 add partition (ds = 'yesterday') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@t1 -POSTHOOK: query: alter table T1 add partition (ds = 'yesterday') +PREHOOK: Output: default@t1_n67 +POSTHOOK: query: alter table T1_n67 add partition (ds = 'yesterday') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t1@ds=yesterday -PREHOOK: query: alter table T1 partition (ds = 'today') compact 'major' +POSTHOOK: Output: default@t1_n67 +POSTHOOK: Output: default@t1_n67@ds=yesterday +PREHOOK: query: alter table T1_n67 partition (ds = 'today') compact 'major' PREHOOK: type: ALTERTABLE_COMPACT -POSTHOOK: query: alter table T1 partition (ds = 'today') compact 'major' +POSTHOOK: query: alter table T1_n67 partition (ds = 'today') compact 'major' POSTHOOK: type: ALTERTABLE_COMPACT -PREHOOK: query: alter table T1 partition (ds = 'yesterday') compact 'minor' +PREHOOK: query: alter table T1_n67 partition (ds = 'yesterday') compact 'minor' PREHOOK: type: ALTERTABLE_COMPACT -POSTHOOK: query: alter table T1 partition (ds = 'yesterday') compact 'minor' +POSTHOOK: query: alter table T1_n67 partition (ds = 'yesterday') compact 'minor' POSTHOOK: type: ALTERTABLE_COMPACT -PREHOOK: query: drop table T1 +PREHOOK: query: drop table T1_n67 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table T1 +PREHOOK: Input: default@t1_n67 +PREHOOK: Output: default@t1_n67 +POSTHOOK: query: drop table T1_n67 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n67 +POSTHOOK: Output: default@t1_n67 diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out index 1fa090faf7..32b484d547 100644 --- a/ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out +++ b/ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out @@ -10,27 +10,27 @@ PREHOOK: Input: database:d1 POSTHOOK: query: use D1 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:d1 -PREHOOK: query: create table T1(key string, val string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: query: create table T1_n43(key string, val string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE -PREHOOK: Output: D1@T1 +PREHOOK: Output: D1@T1_n43 PREHOOK: Output: database:d1 -POSTHOOK: query: create table T1(key string, val string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true') +POSTHOOK: query: create table T1_n43(key string, val string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE -POSTHOOK: Output: D1@T1 +POSTHOOK: Output: D1@T1_n43 POSTHOOK: Output: database:d1 -PREHOOK: query: alter table T1 compact 'major' +PREHOOK: query: alter table T1_n43 compact 'major' PREHOOK: type: ALTERTABLE_COMPACT -POSTHOOK: query: alter table T1 compact 'major' +POSTHOOK: query: alter table T1_n43 compact 'major' POSTHOOK: type: ALTERTABLE_COMPACT -PREHOOK: query: alter table T1 compact 'minor' +PREHOOK: query: alter table T1_n43 compact 'minor' PREHOOK: type: ALTERTABLE_COMPACT -POSTHOOK: query: alter table T1 compact 'minor' +POSTHOOK: query: alter table T1_n43 compact 'minor' POSTHOOK: type: ALTERTABLE_COMPACT -PREHOOK: query: drop table T1 +PREHOOK: query: drop table T1_n43 PREHOOK: type: DROPTABLE -PREHOOK: Input: d1@t1 -PREHOOK: Output: d1@t1 -POSTHOOK: query: drop table T1 +PREHOOK: Input: d1@t1_n43 +PREHOOK: Output: d1@t1_n43 +POSTHOOK: query: drop table T1_n43 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: d1@t1 -POSTHOOK: Output: d1@t1 +POSTHOOK: Input: d1@t1_n43 +POSTHOOK: Output: d1@t1_n43 diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out index 836ab3fb85..a32dd2c95a 100644 --- a/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out +++ b/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out @@ -18,37 +18,37 @@ POSTHOOK: query: drop database D1 POSTHOOK: type: DROPDATABASE POSTHOOK: Input: database:d1 POSTHOOK: Output: database:d1 -PREHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: query: create table T1_n32(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: Output: default@T1_n32 +POSTHOOK: query: create table T1_n32(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: create table T2 like T1 +POSTHOOK: Output: default@T1_n32 +PREHOOK: query: create table T2_n20 like T1_n32 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: create table T2 like T1 +PREHOOK: Output: default@T2_n20 +POSTHOOK: query: create table T2_n20 like T1_n32 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T2_n20 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n32 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n32 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n32 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: select * from T1 +POSTHOOK: Output: default@t1_n32 +PREHOOK: query: select * from T1_n32 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n32 #### A masked pattern was here #### -POSTHOOK: query: select * from T1 +POSTHOOK: query: select * from T1_n32 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n32 #### A masked pattern was here #### 1 11 2 12 @@ -56,105 +56,105 @@ POSTHOOK: Input: default@t1 7 17 8 18 8 28 -PREHOOK: query: create table T3 as select * from T1 +PREHOOK: query: create table T3_n8 as select * from T1_n32 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n32 PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: create table T3 as select * from T1 +PREHOOK: Output: default@T3_n8 +POSTHOOK: query: create table T3_n8 as select * from T1_n32 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n32 POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -POSTHOOK: Lineage: t3.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t3.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: create table T4 (key char(10), val decimal(5,2), b int) +POSTHOOK: Output: default@T3_n8 +POSTHOOK: Lineage: t3_n8.key SIMPLE [(t1_n32)t1_n32.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t3_n8.val SIMPLE [(t1_n32)t1_n32.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: create table T4_n1 (key char(10), val decimal(5,2), b int) partitioned by (ds string) clustered by (b) into 10 buckets stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T4 -POSTHOOK: query: create table T4 (key char(10), val decimal(5,2), b int) +PREHOOK: Output: default@T4_n1 +POSTHOOK: query: create table T4_n1 (key char(10), val decimal(5,2), b int) partitioned by (ds string) clustered by (b) into 10 buckets stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T4 -PREHOOK: query: alter table T3 rename to newT3 +POSTHOOK: Output: default@T4_n1 +PREHOOK: query: alter table T3_n8 rename to newT3_n8 PREHOOK: type: ALTERTABLE_RENAME -PREHOOK: Input: default@t3 -PREHOOK: Output: default@t3 -POSTHOOK: query: alter table T3 rename to newT3 +PREHOOK: Input: default@t3_n8 +PREHOOK: Output: default@t3_n8 +POSTHOOK: query: alter table T3_n8 rename to newT3_n8 POSTHOOK: type: ALTERTABLE_RENAME -POSTHOOK: Input: default@t3 -POSTHOOK: Output: default@newT3 -POSTHOOK: Output: default@t3 -PREHOOK: query: alter table T2 set tblproperties ('test'='thisisatest') +POSTHOOK: Input: default@t3_n8 +POSTHOOK: Output: default@newT3_n8 +POSTHOOK: Output: default@t3_n8 +PREHOOK: query: alter table T2_n20 set tblproperties ('test'='thisisatest') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: alter table T2 set tblproperties ('test'='thisisatest') +PREHOOK: Input: default@t2_n20 +PREHOOK: Output: default@t2_n20 +POSTHOOK: query: alter table T2_n20 set tblproperties ('test'='thisisatest') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 -PREHOOK: query: alter table T2 set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' +POSTHOOK: Input: default@t2_n20 +POSTHOOK: Output: default@t2_n20 +PREHOOK: query: alter table T2_n20 set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: alter table T2 set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' +PREHOOK: Input: default@t2_n20 +PREHOOK: Output: default@t2_n20 +POSTHOOK: query: alter table T2_n20 set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 -PREHOOK: query: alter table T2 set serdeproperties ('test'='thisisatest') +POSTHOOK: Input: default@t2_n20 +POSTHOOK: Output: default@t2_n20 +PREHOOK: query: alter table T2_n20 set serdeproperties ('test'='thisisatest') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: alter table T2 set serdeproperties ('test'='thisisatest') +PREHOOK: Input: default@t2_n20 +PREHOOK: Output: default@t2_n20 +POSTHOOK: query: alter table T2_n20 set serdeproperties ('test'='thisisatest') POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 -PREHOOK: query: alter table T2 clustered by (key) into 32 buckets +POSTHOOK: Input: default@t2_n20 +POSTHOOK: Output: default@t2_n20 +PREHOOK: query: alter table T2_n20 clustered by (key) into 32 buckets PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: alter table T2 clustered by (key) into 32 buckets +PREHOOK: Input: default@t2_n20 +PREHOOK: Output: default@t2_n20 +POSTHOOK: query: alter table T2_n20 clustered by (key) into 32 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 -PREHOOK: query: alter table T4 add partition (ds='today') +POSTHOOK: Input: default@t2_n20 +POSTHOOK: Output: default@t2_n20 +PREHOOK: query: alter table T4_n1 add partition (ds='today') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@t4 -POSTHOOK: query: alter table T4 add partition (ds='today') +PREHOOK: Output: default@t4_n1 +POSTHOOK: query: alter table T4_n1 add partition (ds='today') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@t4 -POSTHOOK: Output: default@t4@ds=today -PREHOOK: query: alter table T4 partition (ds='today') rename to partition(ds='yesterday') +POSTHOOK: Output: default@t4_n1 +POSTHOOK: Output: default@t4_n1@ds=today +PREHOOK: query: alter table T4_n1 partition (ds='today') rename to partition(ds='yesterday') PREHOOK: type: ALTERTABLE_RENAMEPART -PREHOOK: Input: default@t4 -PREHOOK: Output: default@t4@ds=today -POSTHOOK: query: alter table T4 partition (ds='today') rename to partition(ds='yesterday') +PREHOOK: Input: default@t4_n1 +PREHOOK: Output: default@t4_n1@ds=today +POSTHOOK: query: alter table T4_n1 partition (ds='today') rename to partition(ds='yesterday') POSTHOOK: type: ALTERTABLE_RENAMEPART -POSTHOOK: Input: default@t4 -POSTHOOK: Input: default@t4@ds=today -POSTHOOK: Output: default@t4@ds=today -POSTHOOK: Output: default@t4@ds=yesterday -PREHOOK: query: alter table T4 drop partition (ds='yesterday') +POSTHOOK: Input: default@t4_n1 +POSTHOOK: Input: default@t4_n1@ds=today +POSTHOOK: Output: default@t4_n1@ds=today +POSTHOOK: Output: default@t4_n1@ds=yesterday +PREHOOK: query: alter table T4_n1 drop partition (ds='yesterday') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@t4 -PREHOOK: Output: default@t4@ds=yesterday -POSTHOOK: query: alter table T4 drop partition (ds='yesterday') +PREHOOK: Input: default@t4_n1 +PREHOOK: Output: default@t4_n1@ds=yesterday +POSTHOOK: query: alter table T4_n1 drop partition (ds='yesterday') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@t4 -POSTHOOK: Output: default@t4@ds=yesterday -PREHOOK: query: alter table T4 add partition (ds='tomorrow') +POSTHOOK: Input: default@t4_n1 +POSTHOOK: Output: default@t4_n1@ds=yesterday +PREHOOK: query: alter table T4_n1 add partition (ds='tomorrow') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@t4 -POSTHOOK: query: alter table T4 add partition (ds='tomorrow') +PREHOOK: Output: default@t4_n1 +POSTHOOK: query: alter table T4_n1 add partition (ds='tomorrow') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@t4 -POSTHOOK: Output: default@t4@ds=tomorrow +POSTHOOK: Output: default@t4_n1 +POSTHOOK: Output: default@t4_n1@ds=tomorrow PREHOOK: query: create table T5 (a string, b int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -189,79 +189,79 @@ POSTHOOK: Input: default@t7 POSTHOOK: Output: default@t7 #### A masked pattern was here #### PREHOOK: type: ALTERPARTITION_LOCATION -PREHOOK: Input: default@t4 -PREHOOK: Output: default@t4@ds=tomorrow +PREHOOK: Input: default@t4_n1 +PREHOOK: Output: default@t4_n1@ds=tomorrow #### A masked pattern was here #### POSTHOOK: type: ALTERPARTITION_LOCATION -POSTHOOK: Input: default@t4 -POSTHOOK: Input: default@t4@ds=tomorrow -POSTHOOK: Output: default@t4@ds=tomorrow +POSTHOOK: Input: default@t4_n1 +POSTHOOK: Input: default@t4_n1@ds=tomorrow +POSTHOOK: Output: default@t4_n1@ds=tomorrow #### A masked pattern was here #### -PREHOOK: query: alter table T2 touch +PREHOOK: query: alter table T2_n20 touch PREHOOK: type: ALTERTABLE_TOUCH -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: alter table T2 touch +PREHOOK: Input: default@t2_n20 +PREHOOK: Output: default@t2_n20 +POSTHOOK: query: alter table T2_n20 touch POSTHOOK: type: ALTERTABLE_TOUCH -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 -PREHOOK: query: alter table T4 touch partition (ds='tomorrow') +POSTHOOK: Input: default@t2_n20 +POSTHOOK: Output: default@t2_n20 +PREHOOK: query: alter table T4_n1 touch partition (ds='tomorrow') PREHOOK: type: ALTERTABLE_TOUCH -PREHOOK: Input: default@t4 -PREHOOK: Output: default@t4@ds=tomorrow -POSTHOOK: query: alter table T4 touch partition (ds='tomorrow') +PREHOOK: Input: default@t4_n1 +PREHOOK: Output: default@t4_n1@ds=tomorrow +POSTHOOK: query: alter table T4_n1 touch partition (ds='tomorrow') POSTHOOK: type: ALTERTABLE_TOUCH -POSTHOOK: Input: default@t4 -POSTHOOK: Input: default@t4@ds=tomorrow -POSTHOOK: Output: default@t4@ds=tomorrow -PREHOOK: query: create view V1 as select key from T1 +POSTHOOK: Input: default@t4_n1 +POSTHOOK: Input: default@t4_n1@ds=tomorrow +POSTHOOK: Output: default@t4_n1@ds=tomorrow +PREHOOK: query: create view V1_n1 as select key from T1_n32 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n32 PREHOOK: Output: database:default -PREHOOK: Output: default@V1 -POSTHOOK: query: create view V1 as select key from T1 +PREHOOK: Output: default@V1_n1 +POSTHOOK: query: create view V1_n1 as select key from T1_n32 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n32 POSTHOOK: Output: database:default -POSTHOOK: Output: default@V1 -POSTHOOK: Lineage: V1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: alter view V1 set tblproperties ('test'='thisisatest') +POSTHOOK: Output: default@V1_n1 +POSTHOOK: Lineage: V1_n1.key SIMPLE [(t1_n32)t1_n32.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: alter view V1_n1 set tblproperties ('test'='thisisatest') PREHOOK: type: ALTERVIEW_PROPERTIES -PREHOOK: Input: default@v1 -PREHOOK: Output: default@v1 -POSTHOOK: query: alter view V1 set tblproperties ('test'='thisisatest') +PREHOOK: Input: default@v1_n1 +PREHOOK: Output: default@v1_n1 +POSTHOOK: query: alter view V1_n1 set tblproperties ('test'='thisisatest') POSTHOOK: type: ALTERVIEW_PROPERTIES -POSTHOOK: Input: default@v1 -POSTHOOK: Output: default@v1 -PREHOOK: query: drop view V1 +POSTHOOK: Input: default@v1_n1 +POSTHOOK: Output: default@v1_n1 +PREHOOK: query: drop view V1_n1 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v1 -PREHOOK: Output: default@v1 -POSTHOOK: query: drop view V1 +PREHOOK: Input: default@v1_n1 +PREHOOK: Output: default@v1_n1 +POSTHOOK: query: drop view V1_n1 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v1 -POSTHOOK: Output: default@v1 -PREHOOK: query: drop table T1 +POSTHOOK: Input: default@v1_n1 +POSTHOOK: Output: default@v1_n1 +PREHOOK: query: drop table T1_n32 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table T1 +PREHOOK: Input: default@t1_n32 +PREHOOK: Output: default@t1_n32 +POSTHOOK: query: drop table T1_n32 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: drop table T2 +POSTHOOK: Input: default@t1_n32 +POSTHOOK: Output: default@t1_n32 +PREHOOK: query: drop table T2_n20 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: drop table T2 +PREHOOK: Input: default@t2_n20 +PREHOOK: Output: default@t2_n20 +POSTHOOK: query: drop table T2_n20 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 -PREHOOK: query: drop table newT3 +POSTHOOK: Input: default@t2_n20 +POSTHOOK: Output: default@t2_n20 +PREHOOK: query: drop table newT3_n8 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@newt3 -PREHOOK: Output: default@newt3 -POSTHOOK: query: drop table newT3 +PREHOOK: Input: default@newt3_n8 +PREHOOK: Output: default@newt3_n8 +POSTHOOK: query: drop table newT3_n8 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@newt3 -POSTHOOK: Output: default@newt3 +POSTHOOK: Input: default@newt3_n8 +POSTHOOK: Output: default@newt3_n8 diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_query1.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_query1.q.out index 2e14e8ddaf..66084e3c68 100644 --- a/ql/src/test/results/clientpositive/dbtxnmgr_query1.q.out +++ b/ql/src/test/results/clientpositive/dbtxnmgr_query1.q.out @@ -1,26 +1,26 @@ -PREHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: query: create table T1_n12(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: Output: default@T1_n12 +POSTHOOK: query: create table T1_n12(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n12 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n12 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n12 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n12 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: select * from T1 +POSTHOOK: Output: default@t1_n12 +PREHOOK: query: select * from T1_n12 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n12 #### A masked pattern was here #### -POSTHOOK: query: select * from T1 +POSTHOOK: query: select * from T1_n12 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n12 #### A masked pattern was here #### 1 11 2 12 @@ -28,31 +28,31 @@ POSTHOOK: Input: default@t1 7 17 8 18 8 28 -PREHOOK: query: create table T2(key string, val string) stored as textfile +PREHOOK: query: create table T2_n8(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: create table T2(key string, val string) stored as textfile +PREHOOK: Output: default@T2_n8 +POSTHOOK: query: create table T2_n8(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: insert into table T2 select * from T1 +POSTHOOK: Output: default@T2_n8 +PREHOOK: query: insert into table T2_n8 select * from T1_n12 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: insert into table T2 select * from T1 +PREHOOK: Input: default@t1_n12 +PREHOOK: Output: default@t2_n8 +POSTHOOK: query: insert into table T2_n8 select * from T1_n12 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: select * from T2 +POSTHOOK: Input: default@t1_n12 +POSTHOOK: Output: default@t2_n8 +POSTHOOK: Lineage: t2_n8.key SIMPLE [(t1_n12)t1_n12.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n8.val SIMPLE [(t1_n12)t1_n12.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: select * from T2_n8 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from T2 +POSTHOOK: query: select * from T2_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2_n8 #### A masked pattern was here #### 1 11 2 12 @@ -60,19 +60,19 @@ POSTHOOK: Input: default@t2 7 17 8 18 8 28 -PREHOOK: query: drop table T1 +PREHOOK: query: drop table T1_n12 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table T1 +PREHOOK: Input: default@t1_n12 +PREHOOK: Output: default@t1_n12 +POSTHOOK: query: drop table T1_n12 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: drop table T2 +POSTHOOK: Input: default@t1_n12 +POSTHOOK: Output: default@t1_n12 +PREHOOK: query: drop table T2_n8 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: drop table T2 +PREHOOK: Input: default@t2_n8 +PREHOOK: Output: default@t2_n8 +POSTHOOK: query: drop table T2_n8 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 +POSTHOOK: Input: default@t2_n8 +POSTHOOK: Output: default@t2_n8 diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_query2.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_query2.q.out index ad19a82418..ea4e077a3a 100644 --- a/ql/src/test/results/clientpositive/dbtxnmgr_query2.q.out +++ b/ql/src/test/results/clientpositive/dbtxnmgr_query2.q.out @@ -1,26 +1,26 @@ -PREHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: query: create table T1_n45(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: Output: default@T1_n45 +POSTHOOK: query: create table T1_n45(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n45 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n45 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n45 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n45 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: select * from T1 +POSTHOOK: Output: default@t1_n45 +PREHOOK: query: select * from T1_n45 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n45 #### A masked pattern was here #### -POSTHOOK: query: select * from T1 +POSTHOOK: query: select * from T1_n45 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n45 #### A masked pattern was here #### 1 11 2 12 @@ -28,31 +28,31 @@ POSTHOOK: Input: default@t1 7 17 8 18 8 28 -PREHOOK: query: create table T2(key string, val string) stored as textfile +PREHOOK: query: create table T2_n28(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: create table T2(key string, val string) stored as textfile +PREHOOK: Output: default@T2_n28 +POSTHOOK: query: create table T2_n28(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: insert overwrite table T2 select * from T1 +POSTHOOK: Output: default@T2_n28 +PREHOOK: query: insert overwrite table T2_n28 select * from T1_n45 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: insert overwrite table T2 select * from T1 +PREHOOK: Input: default@t1_n45 +PREHOOK: Output: default@t2_n28 +POSTHOOK: query: insert overwrite table T2_n28 select * from T1_n45 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: select * from T2 +POSTHOOK: Input: default@t1_n45 +POSTHOOK: Output: default@t2_n28 +POSTHOOK: Lineage: t2_n28.key SIMPLE [(t1_n45)t1_n45.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n28.val SIMPLE [(t1_n45)t1_n45.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: select * from T2_n28 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2_n28 #### A masked pattern was here #### -POSTHOOK: query: select * from T2 +POSTHOOK: query: select * from T2_n28 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2_n28 #### A masked pattern was here #### 1 11 2 12 @@ -60,19 +60,19 @@ POSTHOOK: Input: default@t2 7 17 8 18 8 28 -PREHOOK: query: drop table T1 +PREHOOK: query: drop table T1_n45 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table T1 +PREHOOK: Input: default@t1_n45 +PREHOOK: Output: default@t1_n45 +POSTHOOK: query: drop table T1_n45 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: drop table T2 +POSTHOOK: Input: default@t1_n45 +POSTHOOK: Output: default@t1_n45 +PREHOOK: query: drop table T2_n28 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: drop table T2 +PREHOOK: Input: default@t2_n28 +PREHOOK: Output: default@t2_n28 +POSTHOOK: query: drop table T2_n28 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 +POSTHOOK: Input: default@t2_n28 +POSTHOOK: Output: default@t2_n28 diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_query3.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_query3.q.out index a306d7a91b..02009106f6 100644 --- a/ql/src/test/results/clientpositive/dbtxnmgr_query3.q.out +++ b/ql/src/test/results/clientpositive/dbtxnmgr_query3.q.out @@ -1,26 +1,26 @@ -PREHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: query: create table T1_n71(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: Output: default@T1_n71 +POSTHOOK: query: create table T1_n71(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n71 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n71 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n71 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n71 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: select * from T1 +POSTHOOK: Output: default@t1_n71 +PREHOOK: query: select * from T1_n71 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n71 #### A masked pattern was here #### -POSTHOOK: query: select * from T1 +POSTHOOK: query: select * from T1_n71 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n71 #### A masked pattern was here #### 1 11 2 12 @@ -28,33 +28,33 @@ POSTHOOK: Input: default@t1 7 17 8 18 8 28 -PREHOOK: query: create table T2(key string, val string) partitioned by (pval string) stored as textfile +PREHOOK: query: create table T2_n43(key string, val string) partitioned by (pval string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: create table T2(key string, val string) partitioned by (pval string) stored as textfile +PREHOOK: Output: default@T2_n43 +POSTHOOK: query: create table T2_n43(key string, val string) partitioned by (pval string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: insert into table T2 partition (pval = '1') select * from T1 +POSTHOOK: Output: default@T2_n43 +PREHOOK: query: insert into table T2_n43 partition (pval = '1') select * from T1_n71 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2@pval=1 -POSTHOOK: query: insert into table T2 partition (pval = '1') select * from T1 +PREHOOK: Input: default@t1_n71 +PREHOOK: Output: default@t2_n43@pval=1 +POSTHOOK: query: insert into table T2_n43 partition (pval = '1') select * from T1_n71 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2@pval=1 -POSTHOOK: Lineage: t2 PARTITION(pval=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(pval=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: select * from T2 +POSTHOOK: Input: default@t1_n71 +POSTHOOK: Output: default@t2_n43@pval=1 +POSTHOOK: Lineage: t2_n43 PARTITION(pval=1).key SIMPLE [(t1_n71)t1_n71.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n43 PARTITION(pval=1).val SIMPLE [(t1_n71)t1_n71.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: select * from T2_n43 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@pval=1 +PREHOOK: Input: default@t2_n43 +PREHOOK: Input: default@t2_n43@pval=1 #### A masked pattern was here #### -POSTHOOK: query: select * from T2 +POSTHOOK: query: select * from T2_n43 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@pval=1 +POSTHOOK: Input: default@t2_n43 +POSTHOOK: Input: default@t2_n43@pval=1 #### A masked pattern was here #### 1 11 1 2 12 1 @@ -62,25 +62,25 @@ POSTHOOK: Input: default@t2@pval=1 7 17 1 8 18 1 8 28 1 -PREHOOK: query: insert overwrite table T2 partition (pval = '1') select * from T1 +PREHOOK: query: insert overwrite table T2_n43 partition (pval = '1') select * from T1_n71 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2@pval=1 -POSTHOOK: query: insert overwrite table T2 partition (pval = '1') select * from T1 +PREHOOK: Input: default@t1_n71 +PREHOOK: Output: default@t2_n43@pval=1 +POSTHOOK: query: insert overwrite table T2_n43 partition (pval = '1') select * from T1_n71 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2@pval=1 -POSTHOOK: Lineage: t2 PARTITION(pval=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(pval=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: select * from T2 +POSTHOOK: Input: default@t1_n71 +POSTHOOK: Output: default@t2_n43@pval=1 +POSTHOOK: Lineage: t2_n43 PARTITION(pval=1).key SIMPLE [(t1_n71)t1_n71.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n43 PARTITION(pval=1).val SIMPLE [(t1_n71)t1_n71.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: select * from T2_n43 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@pval=1 +PREHOOK: Input: default@t2_n43 +PREHOOK: Input: default@t2_n43@pval=1 #### A masked pattern was here #### -POSTHOOK: query: select * from T2 +POSTHOOK: query: select * from T2_n43 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@pval=1 +POSTHOOK: Input: default@t2_n43 +POSTHOOK: Input: default@t2_n43@pval=1 #### A masked pattern was here #### 1 11 1 2 12 1 @@ -88,19 +88,19 @@ POSTHOOK: Input: default@t2@pval=1 7 17 1 8 18 1 8 28 1 -PREHOOK: query: drop table T1 +PREHOOK: query: drop table T1_n71 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table T1 +PREHOOK: Input: default@t1_n71 +PREHOOK: Output: default@t1_n71 +POSTHOOK: query: drop table T1_n71 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: drop table T2 +POSTHOOK: Input: default@t1_n71 +POSTHOOK: Output: default@t1_n71 +PREHOOK: query: drop table T2_n43 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: drop table T2 +PREHOOK: Input: default@t2_n43 +PREHOOK: Output: default@t2_n43 +POSTHOOK: query: drop table T2_n43 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 +POSTHOOK: Input: default@t2_n43 +POSTHOOK: Output: default@t2_n43 diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_query4.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_query4.q.out index 92e59bbe88..e11fd6eb5f 100644 --- a/ql/src/test/results/clientpositive/dbtxnmgr_query4.q.out +++ b/ql/src/test/results/clientpositive/dbtxnmgr_query4.q.out @@ -1,26 +1,26 @@ -PREHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: query: create table T1_n105(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: create table T1(key string, val string) stored as textfile +PREHOOK: Output: default@T1_n105 +POSTHOOK: query: create table T1_n105(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n105 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n105 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n105 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n105 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: select * from T1 +POSTHOOK: Output: default@t1_n105 +PREHOOK: query: select * from T1_n105 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n105 #### A masked pattern was here #### -POSTHOOK: query: select * from T1 +POSTHOOK: query: select * from T1_n105 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n105 #### A masked pattern was here #### 1 11 2 12 @@ -28,52 +28,52 @@ POSTHOOK: Input: default@t1 7 17 8 18 8 28 -PREHOOK: query: create table T2(key string) partitioned by (val string) stored as textfile +PREHOOK: query: create table T2_n64(key string) partitioned by (val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: create table T2(key string) partitioned by (val string) stored as textfile +PREHOOK: Output: default@T2_n64 +POSTHOOK: query: create table T2_n64(key string) partitioned by (val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: insert overwrite table T2 partition (val) select key, val from T1 +POSTHOOK: Output: default@T2_n64 +PREHOOK: query: insert overwrite table T2_n64 partition (val) select key, val from T1_n105 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: insert overwrite table T2 partition (val) select key, val from T1 +PREHOOK: Input: default@t1_n105 +PREHOOK: Output: default@t2_n64 +POSTHOOK: query: insert overwrite table T2_n64 partition (val) select key, val from T1_n105 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2@val=11 -POSTHOOK: Output: default@t2@val=12 -POSTHOOK: Output: default@t2@val=13 -POSTHOOK: Output: default@t2@val=17 -POSTHOOK: Output: default@t2@val=18 -POSTHOOK: Output: default@t2@val=28 -POSTHOOK: Lineage: t2 PARTITION(val=11).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(val=12).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(val=13).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(val=17).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(val=18).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(val=28).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: select * from T2 +POSTHOOK: Input: default@t1_n105 +POSTHOOK: Output: default@t2_n64@val=11 +POSTHOOK: Output: default@t2_n64@val=12 +POSTHOOK: Output: default@t2_n64@val=13 +POSTHOOK: Output: default@t2_n64@val=17 +POSTHOOK: Output: default@t2_n64@val=18 +POSTHOOK: Output: default@t2_n64@val=28 +POSTHOOK: Lineage: t2_n64 PARTITION(val=11).key SIMPLE [(t1_n105)t1_n105.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n64 PARTITION(val=12).key SIMPLE [(t1_n105)t1_n105.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n64 PARTITION(val=13).key SIMPLE [(t1_n105)t1_n105.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n64 PARTITION(val=17).key SIMPLE [(t1_n105)t1_n105.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n64 PARTITION(val=18).key SIMPLE [(t1_n105)t1_n105.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n64 PARTITION(val=28).key SIMPLE [(t1_n105)t1_n105.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: select * from T2_n64 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@val=11 -PREHOOK: Input: default@t2@val=12 -PREHOOK: Input: default@t2@val=13 -PREHOOK: Input: default@t2@val=17 -PREHOOK: Input: default@t2@val=18 -PREHOOK: Input: default@t2@val=28 +PREHOOK: Input: default@t2_n64 +PREHOOK: Input: default@t2_n64@val=11 +PREHOOK: Input: default@t2_n64@val=12 +PREHOOK: Input: default@t2_n64@val=13 +PREHOOK: Input: default@t2_n64@val=17 +PREHOOK: Input: default@t2_n64@val=18 +PREHOOK: Input: default@t2_n64@val=28 #### A masked pattern was here #### -POSTHOOK: query: select * from T2 +POSTHOOK: query: select * from T2_n64 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@val=11 -POSTHOOK: Input: default@t2@val=12 -POSTHOOK: Input: default@t2@val=13 -POSTHOOK: Input: default@t2@val=17 -POSTHOOK: Input: default@t2@val=18 -POSTHOOK: Input: default@t2@val=28 +POSTHOOK: Input: default@t2_n64 +POSTHOOK: Input: default@t2_n64@val=11 +POSTHOOK: Input: default@t2_n64@val=12 +POSTHOOK: Input: default@t2_n64@val=13 +POSTHOOK: Input: default@t2_n64@val=17 +POSTHOOK: Input: default@t2_n64@val=18 +POSTHOOK: Input: default@t2_n64@val=28 #### A masked pattern was here #### 1 11 2 12 @@ -81,19 +81,19 @@ POSTHOOK: Input: default@t2@val=28 7 17 8 18 8 28 -PREHOOK: query: drop table T1 +PREHOOK: query: drop table T1_n105 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table T1 +PREHOOK: Input: default@t1_n105 +PREHOOK: Output: default@t1_n105 +POSTHOOK: query: drop table T1_n105 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: drop table T2 +POSTHOOK: Input: default@t1_n105 +POSTHOOK: Output: default@t1_n105 +PREHOOK: query: drop table T2_n64 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: drop table T2 +PREHOOK: Input: default@t2_n64 +PREHOOK: Output: default@t2_n64 +POSTHOOK: query: drop table T2_n64 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 +POSTHOOK: Input: default@t2_n64 +POSTHOOK: Output: default@t2_n64 diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_query5.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_query5.q.out index 1d7206f31e..cd36de5a95 100644 --- a/ql/src/test/results/clientpositive/dbtxnmgr_query5.q.out +++ b/ql/src/test/results/clientpositive/dbtxnmgr_query5.q.out @@ -10,46 +10,46 @@ PREHOOK: Input: database:foo POSTHOOK: query: use foo POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:foo -PREHOOK: query: create table T1(key string, val string) partitioned by (ds string) stored as textfile +PREHOOK: query: create table T1_n25(key string, val string) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:foo -PREHOOK: Output: foo@T1 -POSTHOOK: query: create table T1(key string, val string) partitioned by (ds string) stored as textfile +PREHOOK: Output: foo@T1_n25 +POSTHOOK: query: create table T1_n25(key string, val string) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:foo -POSTHOOK: Output: foo@T1 -PREHOOK: query: alter table T1 add partition (ds='today') +POSTHOOK: Output: foo@T1_n25 +PREHOOK: query: alter table T1_n25 add partition (ds='today') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: foo@t1 -POSTHOOK: query: alter table T1 add partition (ds='today') +PREHOOK: Output: foo@t1_n25 +POSTHOOK: query: alter table T1_n25 add partition (ds='today') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: foo@t1 -POSTHOOK: Output: foo@t1@ds=today -PREHOOK: query: create view V1 as select key from T1 +POSTHOOK: Output: foo@t1_n25 +POSTHOOK: Output: foo@t1_n25@ds=today +PREHOOK: query: create view V1_n0 as select key from T1_n25 PREHOOK: type: CREATEVIEW -PREHOOK: Input: foo@t1 +PREHOOK: Input: foo@t1_n25 PREHOOK: Output: database:foo -PREHOOK: Output: foo@V1 -POSTHOOK: query: create view V1 as select key from T1 +PREHOOK: Output: foo@V1_n0 +POSTHOOK: query: create view V1_n0 as select key from T1_n25 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: foo@t1 +POSTHOOK: Input: foo@t1_n25 POSTHOOK: Output: database:foo -POSTHOOK: Output: foo@V1 -POSTHOOK: Lineage: V1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Output: foo@V1_n0 +POSTHOOK: Lineage: V1_n0.key SIMPLE [(t1_n25)t1_n25.FieldSchema(name:key, type:string, comment:null), ] PREHOOK: query: show tables PREHOOK: type: SHOWTABLES PREHOOK: Input: database:foo POSTHOOK: query: show tables POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:foo -t1 -v1 -PREHOOK: query: describe T1 +t1_n25 +v1_n0 +PREHOOK: query: describe T1_n25 PREHOOK: type: DESCTABLE -PREHOOK: Input: foo@t1 -POSTHOOK: query: describe T1 +PREHOOK: Input: foo@t1_n25 +POSTHOOK: query: describe T1_n25 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: foo@t1 +POSTHOOK: Input: foo@t1_n25 key string val string ds string @@ -57,22 +57,22 @@ ds string # Partition Information # col_name data_type comment ds string -PREHOOK: query: drop view V1 +PREHOOK: query: drop view V1_n0 PREHOOK: type: DROPVIEW -PREHOOK: Input: foo@v1 -PREHOOK: Output: foo@v1 -POSTHOOK: query: drop view V1 +PREHOOK: Input: foo@v1_n0 +PREHOOK: Output: foo@v1_n0 +POSTHOOK: query: drop view V1_n0 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: foo@v1 -POSTHOOK: Output: foo@v1 -PREHOOK: query: drop table T1 +POSTHOOK: Input: foo@v1_n0 +POSTHOOK: Output: foo@v1_n0 +PREHOOK: query: drop table T1_n25 PREHOOK: type: DROPTABLE -PREHOOK: Input: foo@t1 -PREHOOK: Output: foo@t1 -POSTHOOK: query: drop table T1 +PREHOOK: Input: foo@t1_n25 +PREHOOK: Output: foo@t1_n25 +POSTHOOK: query: drop table T1_n25 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: foo@t1 -POSTHOOK: Output: foo@t1 +POSTHOOK: Input: foo@t1_n25 +POSTHOOK: Output: foo@t1_n25 PREHOOK: query: show databases PREHOOK: type: SHOWDATABASES POSTHOOK: query: show databases diff --git a/ql/src/test/results/clientpositive/decimal_1.q.out b/ql/src/test/results/clientpositive/decimal_1.q.out index 17090f7059..f5c92f3d2c 100644 --- a/ql/src/test/results/clientpositive/decimal_1.q.out +++ b/ql/src/test/results/clientpositive/decimal_1.q.out @@ -1,131 +1,131 @@ -PREHOOK: query: drop table if exists decimal_1 +PREHOOK: query: drop table if exists decimal_1_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists decimal_1 +POSTHOOK: query: drop table if exists decimal_1_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table decimal_1 (t decimal(4,2), u decimal(5), v decimal) +PREHOOK: query: create table decimal_1_n0 (t decimal(4,2), u decimal(5), v decimal) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@decimal_1 -POSTHOOK: query: create table decimal_1 (t decimal(4,2), u decimal(5), v decimal) +PREHOOK: Output: default@decimal_1_n0 +POSTHOOK: query: create table decimal_1_n0 (t decimal(4,2), u decimal(5), v decimal) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@decimal_1 -PREHOOK: query: alter table decimal_1 set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +POSTHOOK: Output: default@decimal_1_n0 +PREHOOK: query: alter table decimal_1_n0 set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@decimal_1 -PREHOOK: Output: default@decimal_1 -POSTHOOK: query: alter table decimal_1 set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +PREHOOK: Input: default@decimal_1_n0 +PREHOOK: Output: default@decimal_1_n0 +POSTHOOK: query: alter table decimal_1_n0 set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@decimal_1 -POSTHOOK: Output: default@decimal_1 -PREHOOK: query: desc decimal_1 +POSTHOOK: Input: default@decimal_1_n0 +POSTHOOK: Output: default@decimal_1_n0 +PREHOOK: query: desc decimal_1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@decimal_1 -POSTHOOK: query: desc decimal_1 +PREHOOK: Input: default@decimal_1_n0 +POSTHOOK: query: desc decimal_1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n0 t decimal(4,2) u decimal(5,0) v decimal(10,0) -PREHOOK: query: insert overwrite table decimal_1 +PREHOOK: query: insert overwrite table decimal_1_n0 select cast('17.29' as decimal(4,2)), 3.1415926BD, 3115926.54321BD from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@decimal_1 -POSTHOOK: query: insert overwrite table decimal_1 +PREHOOK: Output: default@decimal_1_n0 +POSTHOOK: query: insert overwrite table decimal_1_n0 select cast('17.29' as decimal(4,2)), 3.1415926BD, 3115926.54321BD from src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@decimal_1 -POSTHOOK: Lineage: decimal_1.t EXPRESSION [] -POSTHOOK: Lineage: decimal_1.u EXPRESSION [] -POSTHOOK: Lineage: decimal_1.v EXPRESSION [] -PREHOOK: query: select cast(t as boolean) from decimal_1 +POSTHOOK: Output: default@decimal_1_n0 +POSTHOOK: Lineage: decimal_1_n0.t EXPRESSION [] +POSTHOOK: Lineage: decimal_1_n0.u EXPRESSION [] +POSTHOOK: Lineage: decimal_1_n0.v EXPRESSION [] +PREHOOK: query: select cast(t as boolean) from decimal_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_1 +PREHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as boolean) from decimal_1 +POSTHOOK: query: select cast(t as boolean) from decimal_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### true -PREHOOK: query: select cast(t as tinyint) from decimal_1 +PREHOOK: query: select cast(t as tinyint) from decimal_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_1 +PREHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as tinyint) from decimal_1 +POSTHOOK: query: select cast(t as tinyint) from decimal_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### 17 -PREHOOK: query: select cast(t as smallint) from decimal_1 +PREHOOK: query: select cast(t as smallint) from decimal_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_1 +PREHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as smallint) from decimal_1 +POSTHOOK: query: select cast(t as smallint) from decimal_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### 17 -PREHOOK: query: select cast(t as int) from decimal_1 +PREHOOK: query: select cast(t as int) from decimal_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_1 +PREHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as int) from decimal_1 +POSTHOOK: query: select cast(t as int) from decimal_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### 17 -PREHOOK: query: select cast(t as bigint) from decimal_1 +PREHOOK: query: select cast(t as bigint) from decimal_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_1 +PREHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as bigint) from decimal_1 +POSTHOOK: query: select cast(t as bigint) from decimal_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### 17 -PREHOOK: query: select cast(t as float) from decimal_1 +PREHOOK: query: select cast(t as float) from decimal_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_1 +PREHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as float) from decimal_1 +POSTHOOK: query: select cast(t as float) from decimal_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### 17.29 -PREHOOK: query: select cast(t as double) from decimal_1 +PREHOOK: query: select cast(t as double) from decimal_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_1 +PREHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as double) from decimal_1 +POSTHOOK: query: select cast(t as double) from decimal_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### 17.29 -PREHOOK: query: select cast(t as string) from decimal_1 +PREHOOK: query: select cast(t as string) from decimal_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_1 +PREHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as string) from decimal_1 +POSTHOOK: query: select cast(t as string) from decimal_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### 17.29 -PREHOOK: query: select cast(t as timestamp) from decimal_1 +PREHOOK: query: select cast(t as timestamp) from decimal_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_1 +PREHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as timestamp) from decimal_1 +POSTHOOK: query: select cast(t as timestamp) from decimal_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n0 #### A masked pattern was here #### 1969-12-31 16:00:17.29 -PREHOOK: query: drop table decimal_1 +PREHOOK: query: drop table decimal_1_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal_1 -PREHOOK: Output: default@decimal_1 -POSTHOOK: query: drop table decimal_1 +PREHOOK: Input: default@decimal_1_n0 +PREHOOK: Output: default@decimal_1_n0 +POSTHOOK: query: drop table decimal_1_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal_1 -POSTHOOK: Output: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n0 +POSTHOOK: Output: default@decimal_1_n0 diff --git a/ql/src/test/results/clientpositive/decimal_10_0.q.out b/ql/src/test/results/clientpositive/decimal_10_0.q.out index 6b891b13ae..fb65b1b7b2 100644 --- a/ql/src/test/results/clientpositive/decimal_10_0.q.out +++ b/ql/src/test/results/clientpositive/decimal_10_0.q.out @@ -1,38 +1,38 @@ -PREHOOK: query: DROP TABLE IF EXISTS `DECIMAL` +PREHOOK: query: DROP TABLE IF EXISTS `DECIMAL_n0` PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS `DECIMAL` +POSTHOOK: query: DROP TABLE IF EXISTS `DECIMAL_n0` POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE `DECIMAL` (`dec` decimal) +PREHOOK: query: CREATE TABLE `DECIMAL_n0` (`dec` decimal) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL -POSTHOOK: query: CREATE TABLE `DECIMAL` (`dec` decimal) +PREHOOK: Output: default@DECIMAL_n0 +POSTHOOK: query: CREATE TABLE `DECIMAL_n0` (`dec` decimal) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE `DECIMAL` +POSTHOOK: Output: default@DECIMAL_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE `DECIMAL_n0` PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@decimal -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE `DECIMAL` +PREHOOK: Output: default@decimal_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE `DECIMAL_n0` POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@decimal -PREHOOK: query: SELECT `dec` FROM `DECIMAL` +POSTHOOK: Output: default@decimal_n0 +PREHOOK: query: SELECT `dec` FROM `DECIMAL_n0` PREHOOK: type: QUERY -PREHOOK: Input: default@decimal +PREHOOK: Input: default@decimal_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT `dec` FROM `DECIMAL` +POSTHOOK: query: SELECT `dec` FROM `DECIMAL_n0` POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal +POSTHOOK: Input: default@decimal_n0 #### A masked pattern was here #### 1000000000 NULL -PREHOOK: query: DROP TABLE `DECIMAL` +PREHOOK: query: DROP TABLE `DECIMAL_n0` PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal -PREHOOK: Output: default@decimal -POSTHOOK: query: DROP TABLE `DECIMAL` +PREHOOK: Input: default@decimal_n0 +PREHOOK: Output: default@decimal_n0 +POSTHOOK: query: DROP TABLE `DECIMAL_n0` POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal -POSTHOOK: Output: default@decimal +POSTHOOK: Input: default@decimal_n0 +POSTHOOK: Output: default@decimal_n0 diff --git a/ql/src/test/results/clientpositive/decimal_2.q.out b/ql/src/test/results/clientpositive/decimal_2.q.out index f3168f6f50..56e08d735d 100644 --- a/ql/src/test/results/clientpositive/decimal_2.q.out +++ b/ql/src/test/results/clientpositive/decimal_2.q.out @@ -1,284 +1,284 @@ -PREHOOK: query: drop table decimal_2 +PREHOOK: query: drop table decimal_2_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table decimal_2 +POSTHOOK: query: drop table decimal_2_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table decimal_2 (t decimal(18,9)) +PREHOOK: query: create table decimal_2_n1 (t decimal(18,9)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@decimal_2 -POSTHOOK: query: create table decimal_2 (t decimal(18,9)) +PREHOOK: Output: default@decimal_2_n1 +POSTHOOK: query: create table decimal_2_n1 (t decimal(18,9)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@decimal_2 -PREHOOK: query: alter table decimal_2 set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' +POSTHOOK: Output: default@decimal_2_n1 +PREHOOK: query: alter table decimal_2_n1 set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@decimal_2 -PREHOOK: Output: default@decimal_2 -POSTHOOK: query: alter table decimal_2 set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' +PREHOOK: Input: default@decimal_2_n1 +PREHOOK: Output: default@decimal_2_n1 +POSTHOOK: query: alter table decimal_2_n1 set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@decimal_2 -POSTHOOK: Output: default@decimal_2 -PREHOOK: query: insert overwrite table decimal_2 +POSTHOOK: Input: default@decimal_2_n1 +POSTHOOK: Output: default@decimal_2_n1 +PREHOOK: query: insert overwrite table decimal_2_n1 select cast('17.29' as decimal(4,2)) from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@decimal_2 -POSTHOOK: query: insert overwrite table decimal_2 +PREHOOK: Output: default@decimal_2_n1 +POSTHOOK: query: insert overwrite table decimal_2_n1 select cast('17.29' as decimal(4,2)) from src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@decimal_2 -POSTHOOK: Lineage: decimal_2.t EXPRESSION [] -PREHOOK: query: select cast(t as boolean) from decimal_2 +POSTHOOK: Output: default@decimal_2_n1 +POSTHOOK: Lineage: decimal_2_n1.t EXPRESSION [] +PREHOOK: query: select cast(t as boolean) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as boolean) from decimal_2 +POSTHOOK: query: select cast(t as boolean) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### true -PREHOOK: query: select cast(t as tinyint) from decimal_2 +PREHOOK: query: select cast(t as tinyint) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as tinyint) from decimal_2 +POSTHOOK: query: select cast(t as tinyint) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 17 -PREHOOK: query: select cast(t as smallint) from decimal_2 +PREHOOK: query: select cast(t as smallint) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as smallint) from decimal_2 +POSTHOOK: query: select cast(t as smallint) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 17 -PREHOOK: query: select cast(t as int) from decimal_2 +PREHOOK: query: select cast(t as int) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as int) from decimal_2 +POSTHOOK: query: select cast(t as int) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 17 -PREHOOK: query: select cast(t as bigint) from decimal_2 +PREHOOK: query: select cast(t as bigint) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as bigint) from decimal_2 +POSTHOOK: query: select cast(t as bigint) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 17 -PREHOOK: query: select cast(t as float) from decimal_2 +PREHOOK: query: select cast(t as float) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as float) from decimal_2 +POSTHOOK: query: select cast(t as float) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 17.29 -PREHOOK: query: select cast(t as double) from decimal_2 +PREHOOK: query: select cast(t as double) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as double) from decimal_2 +POSTHOOK: query: select cast(t as double) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 17.29 -PREHOOK: query: select cast(t as string) from decimal_2 +PREHOOK: query: select cast(t as string) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as string) from decimal_2 +POSTHOOK: query: select cast(t as string) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 17.29 -PREHOOK: query: insert overwrite table decimal_2 +PREHOOK: query: insert overwrite table decimal_2_n1 select cast('3404045.5044003' as decimal(18,9)) from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@decimal_2 -POSTHOOK: query: insert overwrite table decimal_2 +PREHOOK: Output: default@decimal_2_n1 +POSTHOOK: query: insert overwrite table decimal_2_n1 select cast('3404045.5044003' as decimal(18,9)) from src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@decimal_2 -POSTHOOK: Lineage: decimal_2.t EXPRESSION [] -PREHOOK: query: select cast(t as boolean) from decimal_2 +POSTHOOK: Output: default@decimal_2_n1 +POSTHOOK: Lineage: decimal_2_n1.t EXPRESSION [] +PREHOOK: query: select cast(t as boolean) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as boolean) from decimal_2 +POSTHOOK: query: select cast(t as boolean) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### true -PREHOOK: query: select cast(t as tinyint) from decimal_2 +PREHOOK: query: select cast(t as tinyint) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as tinyint) from decimal_2 +POSTHOOK: query: select cast(t as tinyint) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### NULL -PREHOOK: query: select cast(t as smallint) from decimal_2 +PREHOOK: query: select cast(t as smallint) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as smallint) from decimal_2 +POSTHOOK: query: select cast(t as smallint) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### NULL -PREHOOK: query: select cast(t as int) from decimal_2 +PREHOOK: query: select cast(t as int) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as int) from decimal_2 +POSTHOOK: query: select cast(t as int) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 3404045 -PREHOOK: query: select cast(t as bigint) from decimal_2 +PREHOOK: query: select cast(t as bigint) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as bigint) from decimal_2 +POSTHOOK: query: select cast(t as bigint) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 3404045 -PREHOOK: query: select cast(t as float) from decimal_2 +PREHOOK: query: select cast(t as float) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as float) from decimal_2 +POSTHOOK: query: select cast(t as float) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 3404045.5 -PREHOOK: query: select cast(t as double) from decimal_2 +PREHOOK: query: select cast(t as double) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as double) from decimal_2 +POSTHOOK: query: select cast(t as double) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 3404045.5044003 -PREHOOK: query: select cast(t as string) from decimal_2 +PREHOOK: query: select cast(t as string) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as string) from decimal_2 +POSTHOOK: query: select cast(t as string) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 3404045.5044003 -PREHOOK: query: select cast(3.14 as decimal(4,2)) from decimal_2 +PREHOOK: query: select cast(3.14 as decimal(4,2)) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(3.14 as decimal(4,2)) from decimal_2 +POSTHOOK: query: select cast(3.14 as decimal(4,2)) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 3.14 -PREHOOK: query: select cast(cast(3.14 as float) as decimal(4,2)) from decimal_2 +PREHOOK: query: select cast(cast(3.14 as float) as decimal(4,2)) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(cast(3.14 as float) as decimal(4,2)) from decimal_2 +POSTHOOK: query: select cast(cast(3.14 as float) as decimal(4,2)) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 3.14 -PREHOOK: query: select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) from decimal_2 +PREHOOK: query: select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) from decimal_2 +POSTHOOK: query: select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 1355944339.1234567 -PREHOOK: query: select cast(true as decimal) from decimal_2 +PREHOOK: query: select cast(true as decimal) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(true as decimal) from decimal_2 +POSTHOOK: query: select cast(true as decimal) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 1 -PREHOOK: query: select cast(3Y as decimal) from decimal_2 +PREHOOK: query: select cast(3Y as decimal) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(3Y as decimal) from decimal_2 +POSTHOOK: query: select cast(3Y as decimal) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 3 -PREHOOK: query: select cast(3S as decimal) from decimal_2 +PREHOOK: query: select cast(3S as decimal) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(3S as decimal) from decimal_2 +POSTHOOK: query: select cast(3S as decimal) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 3 -PREHOOK: query: select cast(cast(3 as int) as decimal) from decimal_2 +PREHOOK: query: select cast(cast(3 as int) as decimal) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(cast(3 as int) as decimal) from decimal_2 +POSTHOOK: query: select cast(cast(3 as int) as decimal) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 3 -PREHOOK: query: select cast(3L as decimal) from decimal_2 +PREHOOK: query: select cast(3L as decimal) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(3L as decimal) from decimal_2 +POSTHOOK: query: select cast(3L as decimal) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 3 -PREHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) from decimal_2 +PREHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) from decimal_2 +POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 1 -PREHOOK: query: select cast('0.99999999999999999999' as decimal(20,20)) from decimal_2 +PREHOOK: query: select cast('0.99999999999999999999' as decimal(20,20)) from decimal_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast('0.99999999999999999999' as decimal(20,20)) from decimal_2 +POSTHOOK: query: select cast('0.99999999999999999999' as decimal(20,20)) from decimal_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 #### A masked pattern was here #### 0.99999999999999999999 -PREHOOK: query: drop table decimal_2 +PREHOOK: query: drop table decimal_2_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal_2 -PREHOOK: Output: default@decimal_2 -POSTHOOK: query: drop table decimal_2 +PREHOOK: Input: default@decimal_2_n1 +PREHOOK: Output: default@decimal_2_n1 +POSTHOOK: query: drop table decimal_2_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal_2 -POSTHOOK: Output: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n1 +POSTHOOK: Output: default@decimal_2_n1 diff --git a/ql/src/test/results/clientpositive/decimal_5.q.out b/ql/src/test/results/clientpositive/decimal_5.q.out index 0c465381f2..d94f5f2e32 100644 --- a/ql/src/test/results/clientpositive/decimal_5.q.out +++ b/ql/src/test/results/clientpositive/decimal_5.q.out @@ -1,36 +1,36 @@ -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_5 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_5_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_5 +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_5_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE DECIMAL_5(key decimal(10,5), value int) +PREHOOK: query: CREATE TABLE DECIMAL_5_n0(key decimal(10,5), value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL_5 -POSTHOOK: query: CREATE TABLE DECIMAL_5(key decimal(10,5), value int) +PREHOOK: Output: default@DECIMAL_5_n0 +POSTHOOK: query: CREATE TABLE DECIMAL_5_n0(key decimal(10,5), value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL_5 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_5 +POSTHOOK: Output: default@DECIMAL_5_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_5_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@decimal_5 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_5 +PREHOOK: Output: default@decimal_5_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_5_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@decimal_5 -PREHOOK: query: SELECT key FROM DECIMAL_5 ORDER BY key +POSTHOOK: Output: default@decimal_5_n0 +PREHOOK: query: SELECT key FROM DECIMAL_5_n0 ORDER BY key PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_5 +PREHOOK: Input: default@decimal_5_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT key FROM DECIMAL_5 ORDER BY key +POSTHOOK: query: SELECT key FROM DECIMAL_5_n0 ORDER BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_5 +POSTHOOK: Input: default@decimal_5_n0 #### A masked pattern was here #### NULL NULL @@ -70,13 +70,13 @@ NULL 124.00000 125.20000 200.00000 -PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key +PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_5_n0 ORDER BY key PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_5 +PREHOOK: Input: default@decimal_5_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key +POSTHOOK: query: SELECT DISTINCT key FROM DECIMAL_5_n0 ORDER BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_5 +POSTHOOK: Input: default@decimal_5_n0 #### A masked pattern was here #### NULL -4400.00000 @@ -105,13 +105,13 @@ NULL 124.00000 125.20000 200.00000 -PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5 +PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_5 +PREHOOK: Input: default@decimal_5_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5 +POSTHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_5 +POSTHOOK: Input: default@decimal_5_n0 #### A masked pattern was here #### -4400 NULL @@ -151,13 +151,13 @@ NULL 1 NULL NULL -PREHOOK: query: SELECT cast(key as decimal(6,3)) FROM DECIMAL_5 +PREHOOK: query: SELECT cast(key as decimal(6,3)) FROM DECIMAL_5_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_5 +PREHOOK: Input: default@decimal_5_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT cast(key as decimal(6,3)) FROM DECIMAL_5 +POSTHOOK: query: SELECT cast(key as decimal(6,3)) FROM DECIMAL_5_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_5 +POSTHOOK: Input: default@decimal_5_n0 #### A masked pattern was here #### NULL NULL @@ -197,11 +197,11 @@ NULL 1.000 NULL NULL -PREHOOK: query: DROP TABLE DECIMAL_5 +PREHOOK: query: DROP TABLE DECIMAL_5_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal_5 -PREHOOK: Output: default@decimal_5 -POSTHOOK: query: DROP TABLE DECIMAL_5 +PREHOOK: Input: default@decimal_5_n0 +PREHOOK: Output: default@decimal_5_n0 +POSTHOOK: query: DROP TABLE DECIMAL_5_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal_5 -POSTHOOK: Output: default@decimal_5 +POSTHOOK: Input: default@decimal_5_n0 +POSTHOOK: Output: default@decimal_5_n0 diff --git a/ql/src/test/results/clientpositive/decimal_6.q.out b/ql/src/test/results/clientpositive/decimal_6.q.out index 8504f2b8d2..1959dd9f37 100644 --- a/ql/src/test/results/clientpositive/decimal_6.q.out +++ b/ql/src/test/results/clientpositive/decimal_6.q.out @@ -1,76 +1,76 @@ -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1 +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2 +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3 +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE DECIMAL_6_1(key decimal(10,5), value int) +PREHOOK: query: CREATE TABLE DECIMAL_6_1_n0(key decimal(10,5), value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL_6_1 -POSTHOOK: query: CREATE TABLE DECIMAL_6_1(key decimal(10,5), value int) +PREHOOK: Output: default@DECIMAL_6_1_n0 +POSTHOOK: query: CREATE TABLE DECIMAL_6_1_n0(key decimal(10,5), value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL_6_1 -PREHOOK: query: CREATE TABLE DECIMAL_6_2(key decimal(17,4), value int) +POSTHOOK: Output: default@DECIMAL_6_1_n0 +PREHOOK: query: CREATE TABLE DECIMAL_6_2_n0(key decimal(17,4), value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL_6_2 -POSTHOOK: query: CREATE TABLE DECIMAL_6_2(key decimal(17,4), value int) +PREHOOK: Output: default@DECIMAL_6_2_n0 +POSTHOOK: query: CREATE TABLE DECIMAL_6_2_n0(key decimal(17,4), value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL_6_2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_1 +POSTHOOK: Output: default@DECIMAL_6_2_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_1_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@decimal_6_1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_1 +PREHOOK: Output: default@decimal_6_1_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_1_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@decimal_6_1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_2 +POSTHOOK: Output: default@decimal_6_1_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_2_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@decimal_6_2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_2 +PREHOOK: Output: default@decimal_6_2_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_2_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@decimal_6_2 +POSTHOOK: Output: default@decimal_6_2_n0 PREHOOK: query: SELECT T.key from ( - SELECT key, value from DECIMAL_6_1 + SELECT key, value from DECIMAL_6_1_n0 UNION ALL - SELECT key, value from DECIMAL_6_2 + SELECT key, value from DECIMAL_6_2_n0 ) T order by T.key PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_6_1 -PREHOOK: Input: default@decimal_6_2 +PREHOOK: Input: default@decimal_6_1_n0 +PREHOOK: Input: default@decimal_6_2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT T.key from ( - SELECT key, value from DECIMAL_6_1 + SELECT key, value from DECIMAL_6_1_n0 UNION ALL - SELECT key, value from DECIMAL_6_2 + SELECT key, value from DECIMAL_6_2_n0 ) T order by T.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_6_1 -POSTHOOK: Input: default@decimal_6_2 +POSTHOOK: Input: default@decimal_6_1_n0 +POSTHOOK: Input: default@decimal_6_2_n0 #### A masked pattern was here #### NULL NULL @@ -126,23 +126,23 @@ NULL 2389432.23750 2389432.23750 1234567890.12350 -PREHOOK: query: CREATE TABLE DECIMAL_6_3 AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v +PREHOOK: query: CREATE TABLE DECIMAL_6_3_n0 AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1_n0 ORDER BY v PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@decimal_6_1 +PREHOOK: Input: default@decimal_6_1_n0 PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL_6_3 -POSTHOOK: query: CREATE TABLE DECIMAL_6_3 AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v +PREHOOK: Output: default@DECIMAL_6_3_n0 +POSTHOOK: query: CREATE TABLE DECIMAL_6_3_n0 AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1_n0 ORDER BY v POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@decimal_6_1 +POSTHOOK: Input: default@decimal_6_1_n0 POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL_6_3 -POSTHOOK: Lineage: decimal_6_3.k EXPRESSION [(decimal_6_1)decimal_6_1.FieldSchema(name:key, type:decimal(10,5), comment:null), ] -POSTHOOK: Lineage: decimal_6_3.v EXPRESSION [(decimal_6_1)decimal_6_1.FieldSchema(name:value, type:int, comment:null), ] -PREHOOK: query: desc DECIMAL_6_3 +POSTHOOK: Output: default@DECIMAL_6_3_n0 +POSTHOOK: Lineage: decimal_6_3_n0.k EXPRESSION [(decimal_6_1_n0)decimal_6_1_n0.FieldSchema(name:key, type:decimal(10,5), comment:null), ] +POSTHOOK: Lineage: decimal_6_3_n0.v EXPRESSION [(decimal_6_1_n0)decimal_6_1_n0.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: desc DECIMAL_6_3_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@decimal_6_3 -POSTHOOK: query: desc DECIMAL_6_3 +PREHOOK: Input: default@decimal_6_3_n0 +POSTHOOK: query: desc DECIMAL_6_3_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@decimal_6_3 +POSTHOOK: Input: default@decimal_6_3_n0 k decimal(11,5) v int diff --git a/ql/src/test/results/clientpositive/decimal_join2.q.out b/ql/src/test/results/clientpositive/decimal_join2.q.out index 0be3993b26..ce452740a5 100644 --- a/ql/src/test/results/clientpositive/decimal_join2.q.out +++ b/ql/src/test/results/clientpositive/decimal_join2.q.out @@ -2,9 +2,9 @@ PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_txt PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_txt POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_3 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_3 +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_n0 POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE DECIMAL_3_txt(key decimal(38,18), value int) ROW FORMAT DELIMITED @@ -28,23 +28,23 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DE POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@decimal_3_txt -PREHOOK: query: CREATE TABLE DECIMAL_3 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt +PREHOOK: query: CREATE TABLE DECIMAL_3_n0 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@decimal_3_txt PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL_3 -POSTHOOK: query: CREATE TABLE DECIMAL_3 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt +PREHOOK: Output: default@DECIMAL_3_n0 +POSTHOOK: query: CREATE TABLE DECIMAL_3_n0 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@decimal_3_txt POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL_3 -POSTHOOK: Lineage: decimal_3.key SIMPLE [(decimal_3_txt)decimal_3_txt.FieldSchema(name:key, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: decimal_3.value SIMPLE [(decimal_3_txt)decimal_3_txt.FieldSchema(name:value, type:int, comment:null), ] +POSTHOOK: Output: default@DECIMAL_3_n0 +POSTHOOK: Lineage: decimal_3_n0.key SIMPLE [(decimal_3_txt)decimal_3_txt.FieldSchema(name:key, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: decimal_3_n0.value SIMPLE [(decimal_3_txt)decimal_3_txt.FieldSchema(name:value, type:int, comment:null), ] PREHOOK: query: EXPLAIN -SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +SELECT * FROM DECIMAL_3_n0 a JOIN DECIMAL_3_n0 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +SELECT * FROM DECIMAL_3_n0 a JOIN DECIMAL_3_n0 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -131,13 +131,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +PREHOOK: query: SELECT * FROM DECIMAL_3_n0 a JOIN DECIMAL_3_n0 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_3 +PREHOOK: Input: default@decimal_3_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +POSTHOOK: query: SELECT * FROM DECIMAL_3_n0 a JOIN DECIMAL_3_n0 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_3 +POSTHOOK: Input: default@decimal_3_n0 #### A masked pattern was here #### -1234567890.123456789000000000 -1234567890 -1234567890.123456789000000000 -1234567890 -4400.000000000000000000 4400 -4400.000000000000000000 4400 @@ -205,10 +205,10 @@ POSTHOOK: Input: default@decimal_3 200.000000000000000000 200 200.000000000000000000 200 1234567890.123456780000000000 1234567890 1234567890.123456780000000000 1234567890 PREHOOK: query: EXPLAIN -SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +SELECT * FROM DECIMAL_3_n0 a JOIN DECIMAL_3_n0 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +SELECT * FROM DECIMAL_3_n0 a JOIN DECIMAL_3_n0 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-5 is a root stage @@ -286,13 +286,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +PREHOOK: query: SELECT * FROM DECIMAL_3_n0 a JOIN DECIMAL_3_n0 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_3 +PREHOOK: Input: default@decimal_3_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +POSTHOOK: query: SELECT * FROM DECIMAL_3_n0 a JOIN DECIMAL_3_n0 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_3 +POSTHOOK: Input: default@decimal_3_n0 #### A masked pattern was here #### -1234567890.123456789000000000 -1234567890 -1234567890.123456789000000000 -1234567890 -4400.000000000000000000 4400 -4400.000000000000000000 4400 @@ -367,11 +367,11 @@ POSTHOOK: query: DROP TABLE DECIMAL_3_txt POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@decimal_3_txt POSTHOOK: Output: default@decimal_3_txt -PREHOOK: query: DROP TABLE DECIMAL_3 +PREHOOK: query: DROP TABLE DECIMAL_3_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal_3 -PREHOOK: Output: default@decimal_3 -POSTHOOK: query: DROP TABLE DECIMAL_3 +PREHOOK: Input: default@decimal_3_n0 +PREHOOK: Output: default@decimal_3_n0 +POSTHOOK: query: DROP TABLE DECIMAL_3_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal_3 -POSTHOOK: Output: default@decimal_3 +POSTHOOK: Input: default@decimal_3_n0 +POSTHOOK: Output: default@decimal_3_n0 diff --git a/ql/src/test/results/clientpositive/decimal_precision.q.out b/ql/src/test/results/clientpositive/decimal_precision.q.out index bf168b093a..921d86bff9 100644 --- a/ql/src/test/results/clientpositive/decimal_precision.q.out +++ b/ql/src/test/results/clientpositive/decimal_precision.q.out @@ -1,36 +1,36 @@ -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_PRECISION +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_PRECISION_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_PRECISION +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_PRECISION_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE DECIMAL_PRECISION(`dec` decimal(20,10)) +PREHOOK: query: CREATE TABLE DECIMAL_PRECISION_n0(`dec` decimal(20,10)) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL_PRECISION -POSTHOOK: query: CREATE TABLE DECIMAL_PRECISION(`dec` decimal(20,10)) +PREHOOK: Output: default@DECIMAL_PRECISION_n0 +POSTHOOK: query: CREATE TABLE DECIMAL_PRECISION_n0(`dec` decimal(20,10)) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL_PRECISION -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv8.txt' INTO TABLE DECIMAL_PRECISION +POSTHOOK: Output: default@DECIMAL_PRECISION_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv8.txt' INTO TABLE DECIMAL_PRECISION_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@decimal_precision -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv8.txt' INTO TABLE DECIMAL_PRECISION +PREHOOK: Output: default@decimal_precision_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv8.txt' INTO TABLE DECIMAL_PRECISION_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@decimal_precision -PREHOOK: query: SELECT * FROM DECIMAL_PRECISION ORDER BY `dec` +POSTHOOK: Output: default@decimal_precision_n0 +PREHOOK: query: SELECT * FROM DECIMAL_PRECISION_n0 ORDER BY `dec` PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_precision +PREHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DECIMAL_PRECISION ORDER BY `dec` +POSTHOOK: query: SELECT * FROM DECIMAL_PRECISION_n0 ORDER BY `dec` POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_precision +POSTHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### NULL NULL @@ -107,13 +107,13 @@ NULL 123456789.0123456789 1234567890.1234560000 1234567890.1234567890 -PREHOOK: query: SELECT `dec`, `dec` + 1, `dec` - 1 FROM DECIMAL_PRECISION ORDER BY `dec` +PREHOOK: query: SELECT `dec`, `dec` + 1, `dec` - 1 FROM DECIMAL_PRECISION_n0 ORDER BY `dec` PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_precision +PREHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT `dec`, `dec` + 1, `dec` - 1 FROM DECIMAL_PRECISION ORDER BY `dec` +POSTHOOK: query: SELECT `dec`, `dec` + 1, `dec` - 1 FROM DECIMAL_PRECISION_n0 ORDER BY `dec` POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_precision +POSTHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### NULL NULL NULL NULL NULL NULL @@ -190,13 +190,13 @@ NULL NULL NULL 123456789.0123456789 123456790.0123456789 123456788.0123456789 1234567890.1234560000 1234567891.1234560000 1234567889.1234560000 1234567890.1234567890 1234567891.1234567890 1234567889.1234567890 -PREHOOK: query: SELECT `dec`, `dec` * 2, `dec` / 3 FROM DECIMAL_PRECISION ORDER BY `dec` +PREHOOK: query: SELECT `dec`, `dec` * 2, `dec` / 3 FROM DECIMAL_PRECISION_n0 ORDER BY `dec` PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_precision +PREHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT `dec`, `dec` * 2, `dec` / 3 FROM DECIMAL_PRECISION ORDER BY `dec` +POSTHOOK: query: SELECT `dec`, `dec` * 2, `dec` / 3 FROM DECIMAL_PRECISION_n0 ORDER BY `dec` POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_precision +POSTHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### NULL NULL NULL NULL NULL NULL @@ -273,13 +273,13 @@ NULL NULL NULL 123456789.0123456789 246913578.0246913578 41152263.004115226300 1234567890.1234560000 2469135780.2469120000 411522630.041152000000 1234567890.1234567890 2469135780.2469135780 411522630.041152263000 -PREHOOK: query: SELECT `dec`, `dec` / 9 FROM DECIMAL_PRECISION ORDER BY `dec` +PREHOOK: query: SELECT `dec`, `dec` / 9 FROM DECIMAL_PRECISION_n0 ORDER BY `dec` PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_precision +PREHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT `dec`, `dec` / 9 FROM DECIMAL_PRECISION ORDER BY `dec` +POSTHOOK: query: SELECT `dec`, `dec` / 9 FROM DECIMAL_PRECISION_n0 ORDER BY `dec` POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_precision +POSTHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### NULL NULL NULL NULL @@ -356,13 +356,13 @@ NULL NULL 123456789.0123456789 13717421.001371742100 1234567890.1234560000 137174210.013717333333 1234567890.1234567890 137174210.013717421000 -PREHOOK: query: SELECT `dec`, `dec` / 27 FROM DECIMAL_PRECISION ORDER BY `dec` +PREHOOK: query: SELECT `dec`, `dec` / 27 FROM DECIMAL_PRECISION_n0 ORDER BY `dec` PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_precision +PREHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT `dec`, `dec` / 27 FROM DECIMAL_PRECISION ORDER BY `dec` +POSTHOOK: query: SELECT `dec`, `dec` / 27 FROM DECIMAL_PRECISION_n0 ORDER BY `dec` POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_precision +POSTHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### NULL NULL NULL NULL @@ -439,13 +439,13 @@ NULL NULL 123456789.0123456789 4572473.6671239140333 1234567890.1234560000 45724736.6712391111111 1234567890.1234567890 45724736.6712391403333 -PREHOOK: query: SELECT `dec`, `dec` * `dec` FROM DECIMAL_PRECISION ORDER BY `dec` +PREHOOK: query: SELECT `dec`, `dec` * `dec` FROM DECIMAL_PRECISION_n0 ORDER BY `dec` PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_precision +PREHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT `dec`, `dec` * `dec` FROM DECIMAL_PRECISION ORDER BY `dec` +POSTHOOK: query: SELECT `dec`, `dec` * `dec` FROM DECIMAL_PRECISION_n0 ORDER BY `dec` POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_precision +POSTHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### NULL NULL NULL NULL @@ -522,9 +522,9 @@ NULL NULL 123456789.0123456789 15241578753238836.75019051998750191 1234567890.1234560000 1524157875323881726.87092138393600000 1234567890.1234567890 1524157875323883675.01905199875019052 -PREHOOK: query: EXPLAIN SELECT avg(`dec`), sum(`dec`) FROM DECIMAL_PRECISION +PREHOOK: query: EXPLAIN SELECT avg(`dec`), sum(`dec`) FROM DECIMAL_PRECISION_n0 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT avg(`dec`), sum(`dec`) FROM DECIMAL_PRECISION +POSTHOOK: query: EXPLAIN SELECT avg(`dec`), sum(`dec`) FROM DECIMAL_PRECISION_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -535,7 +535,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: decimal_precision + alias: decimal_precision_n0 Statistics: Num rows: 1 Data size: 26610 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: dec (type: decimal(20,10)) @@ -575,98 +575,98 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT avg(`dec`), sum(`dec`) FROM DECIMAL_PRECISION +PREHOOK: query: SELECT avg(`dec`), sum(`dec`) FROM DECIMAL_PRECISION_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_precision +PREHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT avg(`dec`), sum(`dec`) FROM DECIMAL_PRECISION +POSTHOOK: query: SELECT avg(`dec`), sum(`dec`) FROM DECIMAL_PRECISION_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_precision +POSTHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### 88499534.575865762206451613 2743485571.8518386284 -PREHOOK: query: SELECT `dec` * cast('12345678901234567890.12345678' as decimal(38,18)) FROM DECIMAL_PRECISION LIMIT 1 +PREHOOK: query: SELECT `dec` * cast('12345678901234567890.12345678' as decimal(38,18)) FROM DECIMAL_PRECISION_n0 LIMIT 1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_precision +PREHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT `dec` * cast('12345678901234567890.12345678' as decimal(38,18)) FROM DECIMAL_PRECISION LIMIT 1 +POSTHOOK: query: SELECT `dec` * cast('12345678901234567890.12345678' as decimal(38,18)) FROM DECIMAL_PRECISION_n0 LIMIT 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_precision +POSTHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### NULL -PREHOOK: query: SELECT * from DECIMAL_PRECISION WHERE `dec` > cast('1234567890123456789012345678.12345678' as decimal(38,18)) LIMIT 1 +PREHOOK: query: SELECT * from DECIMAL_PRECISION_n0 WHERE `dec` > cast('1234567890123456789012345678.12345678' as decimal(38,18)) LIMIT 1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_precision +PREHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * from DECIMAL_PRECISION WHERE `dec` > cast('1234567890123456789012345678.12345678' as decimal(38,18)) LIMIT 1 +POSTHOOK: query: SELECT * from DECIMAL_PRECISION_n0 WHERE `dec` > cast('1234567890123456789012345678.12345678' as decimal(38,18)) LIMIT 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_precision +POSTHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### -PREHOOK: query: SELECT `dec` * 12345678901234567890.12345678 FROM DECIMAL_PRECISION LIMIT 1 +PREHOOK: query: SELECT `dec` * 12345678901234567890.12345678 FROM DECIMAL_PRECISION_n0 LIMIT 1 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_precision +PREHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT `dec` * 12345678901234567890.12345678 FROM DECIMAL_PRECISION LIMIT 1 +POSTHOOK: query: SELECT `dec` * 12345678901234567890.12345678 FROM DECIMAL_PRECISION_n0 LIMIT 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_precision +POSTHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### NULL -PREHOOK: query: SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION +PREHOOK: query: SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_precision +PREHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION +POSTHOOK: query: SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_precision +POSTHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### 12345678901234567890.123456780000000000 -PREHOOK: query: SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION +PREHOOK: query: SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_precision +PREHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION +POSTHOOK: query: SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_precision +POSTHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### 75 -PREHOOK: query: DROP TABLE DECIMAL_PRECISION +PREHOOK: query: DROP TABLE DECIMAL_PRECISION_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal_precision -PREHOOK: Output: default@decimal_precision -POSTHOOK: query: DROP TABLE DECIMAL_PRECISION +PREHOOK: Input: default@decimal_precision_n0 +PREHOOK: Output: default@decimal_precision_n0 +POSTHOOK: query: DROP TABLE DECIMAL_PRECISION_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal_precision -POSTHOOK: Output: default@decimal_precision -PREHOOK: query: CREATE TABLE DECIMAL_PRECISION(`dec` decimal(38,18)) +POSTHOOK: Input: default@decimal_precision_n0 +POSTHOOK: Output: default@decimal_precision_n0 +PREHOOK: query: CREATE TABLE DECIMAL_PRECISION_n0(`dec` decimal(38,18)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL_PRECISION -POSTHOOK: query: CREATE TABLE DECIMAL_PRECISION(`dec` decimal(38,18)) +PREHOOK: Output: default@DECIMAL_PRECISION_n0 +POSTHOOK: query: CREATE TABLE DECIMAL_PRECISION_n0(`dec` decimal(38,18)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL_PRECISION -PREHOOK: query: INSERT INTO DECIMAL_PRECISION VALUES(98765432109876543210.12345), (98765432109876543210.12345) +POSTHOOK: Output: default@DECIMAL_PRECISION_n0 +PREHOOK: query: INSERT INTO DECIMAL_PRECISION_n0 VALUES(98765432109876543210.12345), (98765432109876543210.12345) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@decimal_precision -POSTHOOK: query: INSERT INTO DECIMAL_PRECISION VALUES(98765432109876543210.12345), (98765432109876543210.12345) +PREHOOK: Output: default@decimal_precision_n0 +POSTHOOK: query: INSERT INTO DECIMAL_PRECISION_n0 VALUES(98765432109876543210.12345), (98765432109876543210.12345) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@decimal_precision -POSTHOOK: Lineage: decimal_precision.dec SCRIPT [] -PREHOOK: query: SELECT SUM(`dec`) FROM DECIMAL_PRECISION +POSTHOOK: Output: default@decimal_precision_n0 +POSTHOOK: Lineage: decimal_precision_n0.dec SCRIPT [] +PREHOOK: query: SELECT SUM(`dec`) FROM DECIMAL_PRECISION_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_precision +PREHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT SUM(`dec`) FROM DECIMAL_PRECISION +POSTHOOK: query: SELECT SUM(`dec`) FROM DECIMAL_PRECISION_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_precision +POSTHOOK: Input: default@decimal_precision_n0 #### A masked pattern was here #### NULL -PREHOOK: query: DROP TABLE DECIMAL_PRECISION +PREHOOK: query: DROP TABLE DECIMAL_PRECISION_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal_precision -PREHOOK: Output: default@decimal_precision -POSTHOOK: query: DROP TABLE DECIMAL_PRECISION +PREHOOK: Input: default@decimal_precision_n0 +PREHOOK: Output: default@decimal_precision_n0 +POSTHOOK: query: DROP TABLE DECIMAL_PRECISION_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal_precision -POSTHOOK: Output: default@decimal_precision +POSTHOOK: Input: default@decimal_precision_n0 +POSTHOOK: Output: default@decimal_precision_n0 diff --git a/ql/src/test/results/clientpositive/decimal_stats.q.out b/ql/src/test/results/clientpositive/decimal_stats.q.out index 63b16bc3a1..de3b66daaa 100644 --- a/ql/src/test/results/clientpositive/decimal_stats.q.out +++ b/ql/src/test/results/clientpositive/decimal_stats.q.out @@ -1,53 +1,53 @@ -PREHOOK: query: drop table if exists decimal_1 +PREHOOK: query: drop table if exists decimal_1_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists decimal_1 +POSTHOOK: query: drop table if exists decimal_1_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table decimal_1 (t decimal(4,2), u decimal(5), v decimal) +PREHOOK: query: create table decimal_1_n1 (t decimal(4,2), u decimal(5), v decimal) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@decimal_1 -POSTHOOK: query: create table decimal_1 (t decimal(4,2), u decimal(5), v decimal) +PREHOOK: Output: default@decimal_1_n1 +POSTHOOK: query: create table decimal_1_n1 (t decimal(4,2), u decimal(5), v decimal) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@decimal_1 -PREHOOK: query: desc decimal_1 +POSTHOOK: Output: default@decimal_1_n1 +PREHOOK: query: desc decimal_1_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@decimal_1 -POSTHOOK: query: desc decimal_1 +PREHOOK: Input: default@decimal_1_n1 +POSTHOOK: query: desc decimal_1_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n1 t decimal(4,2) u decimal(5,0) v decimal(10,0) -PREHOOK: query: insert overwrite table decimal_1 +PREHOOK: query: insert overwrite table decimal_1_n1 select cast('17.29' as decimal(4,2)), 3.1415926BD, null from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@decimal_1 -POSTHOOK: query: insert overwrite table decimal_1 +PREHOOK: Output: default@decimal_1_n1 +POSTHOOK: query: insert overwrite table decimal_1_n1 select cast('17.29' as decimal(4,2)), 3.1415926BD, null from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@decimal_1 -POSTHOOK: Lineage: decimal_1.t SIMPLE [] -POSTHOOK: Lineage: decimal_1.u EXPRESSION [] -POSTHOOK: Lineage: decimal_1.v EXPRESSION [] -PREHOOK: query: analyze table decimal_1 compute statistics for columns +POSTHOOK: Output: default@decimal_1_n1 +POSTHOOK: Lineage: decimal_1_n1.t SIMPLE [] +POSTHOOK: Lineage: decimal_1_n1.u EXPRESSION [] +POSTHOOK: Lineage: decimal_1_n1.v EXPRESSION [] +PREHOOK: query: analyze table decimal_1_n1 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@decimal_1 -PREHOOK: Output: default@decimal_1 +PREHOOK: Input: default@decimal_1_n1 +PREHOOK: Output: default@decimal_1_n1 #### A masked pattern was here #### -POSTHOOK: query: analyze table decimal_1 compute statistics for columns +POSTHOOK: query: analyze table decimal_1_n1 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@decimal_1 -POSTHOOK: Output: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n1 +POSTHOOK: Output: default@decimal_1_n1 #### A masked pattern was here #### -PREHOOK: query: desc formatted decimal_1 v +PREHOOK: query: desc formatted decimal_1_n1 v PREHOOK: type: DESCTABLE -PREHOOK: Input: default@decimal_1 -POSTHOOK: query: desc formatted decimal_1 v +PREHOOK: Input: default@decimal_1_n1 +POSTHOOK: query: desc formatted decimal_1_n1 v POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n1 col_name v data_type decimal(10,0) min @@ -61,9 +61,9 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"t\":\"true\",\"u\":\"true\",\"v\":\"true\"}} -PREHOOK: query: explain select * from decimal_1 order by t limit 100 +PREHOOK: query: explain select * from decimal_1_n1 order by t limit 100 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from decimal_1 order by t limit 100 +POSTHOOK: query: explain select * from decimal_1_n1 order by t limit 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -74,7 +74,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: decimal_1 + alias: decimal_1_n1 Statistics: Num rows: 500 Data size: 112112 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: t (type: decimal(4,2)), u (type: decimal(5,0)), v (type: decimal(10,0)) @@ -109,11 +109,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: drop table decimal_1 +PREHOOK: query: drop table decimal_1_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal_1 -PREHOOK: Output: default@decimal_1 -POSTHOOK: query: drop table decimal_1 +PREHOOK: Input: default@decimal_1_n1 +PREHOOK: Output: default@decimal_1_n1 +POSTHOOK: query: drop table decimal_1_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal_1 -POSTHOOK: Output: default@decimal_1 +POSTHOOK: Input: default@decimal_1_n1 +POSTHOOK: Output: default@decimal_1_n1 diff --git a/ql/src/test/results/clientpositive/decimal_trailing.q.out b/ql/src/test/results/clientpositive/decimal_trailing.q.out index 1b70737819..070de2b0ba 100644 --- a/ql/src/test/results/clientpositive/decimal_trailing.q.out +++ b/ql/src/test/results/clientpositive/decimal_trailing.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_TRAILING +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_TRAILING_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_TRAILING +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_TRAILING_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE DECIMAL_TRAILING ( +PREHOOK: query: CREATE TABLE DECIMAL_TRAILING_n0 ( id int, a decimal(10,4), b decimal(15,8) @@ -12,8 +12,8 @@ ROW FORMAT DELIMITED STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL_TRAILING -POSTHOOK: query: CREATE TABLE DECIMAL_TRAILING ( +PREHOOK: Output: default@DECIMAL_TRAILING_n0 +POSTHOOK: query: CREATE TABLE DECIMAL_TRAILING_n0 ( id int, a decimal(10,4), b decimal(15,8) @@ -23,22 +23,22 @@ ROW FORMAT DELIMITED STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL_TRAILING -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv10.txt' INTO TABLE DECIMAL_TRAILING +POSTHOOK: Output: default@DECIMAL_TRAILING_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv10.txt' INTO TABLE DECIMAL_TRAILING_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@decimal_trailing -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv10.txt' INTO TABLE DECIMAL_TRAILING +PREHOOK: Output: default@decimal_trailing_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv10.txt' INTO TABLE DECIMAL_TRAILING_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@decimal_trailing -PREHOOK: query: SELECT * FROM DECIMAL_TRAILING ORDER BY id +POSTHOOK: Output: default@decimal_trailing_n0 +PREHOOK: query: SELECT * FROM DECIMAL_TRAILING_n0 ORDER BY id PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_trailing +PREHOOK: Input: default@decimal_trailing_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DECIMAL_TRAILING ORDER BY id +POSTHOOK: query: SELECT * FROM DECIMAL_TRAILING_n0 ORDER BY id POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_trailing +POSTHOOK: Input: default@decimal_trailing_n0 #### A masked pattern was here #### 0 0.0000 0.00000000 1 0.0000 0.00000000 @@ -70,11 +70,11 @@ POSTHOOK: Input: default@decimal_trailing 27 0.0000 0.00000000 28 12313.2000 134134.31252500 29 99999.9990 134134.31242553 -PREHOOK: query: DROP TABLE DECIMAL_TRAILING +PREHOOK: query: DROP TABLE DECIMAL_TRAILING_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal_trailing -PREHOOK: Output: default@decimal_trailing -POSTHOOK: query: DROP TABLE DECIMAL_TRAILING +PREHOOK: Input: default@decimal_trailing_n0 +PREHOOK: Output: default@decimal_trailing_n0 +POSTHOOK: query: DROP TABLE DECIMAL_TRAILING_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal_trailing -POSTHOOK: Output: default@decimal_trailing +POSTHOOK: Input: default@decimal_trailing_n0 +POSTHOOK: Output: default@decimal_trailing_n0 diff --git a/ql/src/test/results/clientpositive/default_file_format.q.out b/ql/src/test/results/clientpositive/default_file_format.q.out index 11909e15a2..0adf5ae741 100644 --- a/ql/src/test/results/clientpositive/default_file_format.q.out +++ b/ql/src/test/results/clientpositive/default_file_format.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: create table t (c int) +PREHOOK: query: create table t_n2 (c int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t (c int) +PREHOOK: Output: default@t_n2 +POSTHOOK: query: create table t_n2 (c int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t +POSTHOOK: Output: default@t_n2 PREHOOK: query: create table o (c int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -18,22 +18,22 @@ POSTHOOK: Output: default@o PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@e +PREHOOK: Output: default@e_n1 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@e +POSTHOOK: Output: default@e_n1 #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@i +PREHOOK: Output: default@i_n0 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@i +POSTHOOK: Output: default@i_n0 PREHOOK: query: create table io (c int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -52,12 +52,12 @@ POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default POSTHOOK: Output: default@e2 -PREHOOK: query: describe formatted t +PREHOOK: query: describe formatted t_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t -POSTHOOK: query: describe formatted t +PREHOOK: Input: default@t_n2 +POSTHOOK: query: describe formatted t_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n2 # col_name data_type comment c int @@ -154,12 +154,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted e +PREHOOK: query: describe formatted e_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@e -POSTHOOK: query: describe formatted e +PREHOOK: Input: default@e_n1 +POSTHOOK: query: describe formatted e_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@e +POSTHOOK: Input: default@e_n1 # col_name data_type comment c int @@ -184,12 +184,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted i +PREHOOK: query: describe formatted i_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@i -POSTHOOK: query: describe formatted i +PREHOOK: Input: default@i_n0 +POSTHOOK: query: describe formatted i_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@i +POSTHOOK: Input: default@i_n0 # col_name data_type comment c int @@ -250,14 +250,14 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table t +PREHOOK: query: drop table t_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t -PREHOOK: Output: default@t -POSTHOOK: query: drop table t +PREHOOK: Input: default@t_n2 +PREHOOK: Output: default@t_n2 +POSTHOOK: query: drop table t_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t -POSTHOOK: Output: default@t +POSTHOOK: Input: default@t_n2 +POSTHOOK: Output: default@t_n2 PREHOOK: query: drop table o PREHOOK: type: DROPTABLE PREHOOK: Input: default@o @@ -274,22 +274,22 @@ POSTHOOK: query: drop table io POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@io POSTHOOK: Output: default@io -PREHOOK: query: drop table e +PREHOOK: query: drop table e_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@e -PREHOOK: Output: default@e -POSTHOOK: query: drop table e +PREHOOK: Input: default@e_n1 +PREHOOK: Output: default@e_n1 +POSTHOOK: query: drop table e_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@e -POSTHOOK: Output: default@e -PREHOOK: query: drop table i +POSTHOOK: Input: default@e_n1 +POSTHOOK: Output: default@e_n1 +PREHOOK: query: drop table i_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@i -PREHOOK: Output: default@i -POSTHOOK: query: drop table i +PREHOOK: Input: default@i_n0 +PREHOOK: Output: default@i_n0 +POSTHOOK: query: drop table i_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@i -POSTHOOK: Output: default@i +POSTHOOK: Input: default@i_n0 +POSTHOOK: Output: default@i_n0 PREHOOK: query: drop table e2 PREHOOK: type: DROPTABLE PREHOOK: Input: default@e2 @@ -298,14 +298,14 @@ POSTHOOK: query: drop table e2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@e2 POSTHOOK: Output: default@e2 -PREHOOK: query: create table t (c int) +PREHOOK: query: create table t_n2 (c int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t (c int) +PREHOOK: Output: default@t_n2 +POSTHOOK: query: create table t_n2 (c int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t +POSTHOOK: Output: default@t_n2 PREHOOK: query: create table o (c int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -318,22 +318,22 @@ POSTHOOK: Output: default@o PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@e +PREHOOK: Output: default@e_n1 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@e +POSTHOOK: Output: default@e_n1 #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@i +PREHOOK: Output: default@i_n0 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@i +POSTHOOK: Output: default@i_n0 PREHOOK: query: create table io (c int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -352,12 +352,12 @@ POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default POSTHOOK: Output: default@e2 -PREHOOK: query: describe formatted t +PREHOOK: query: describe formatted t_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t -POSTHOOK: query: describe formatted t +PREHOOK: Input: default@t_n2 +POSTHOOK: query: describe formatted t_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n2 # col_name data_type comment c int @@ -454,12 +454,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted e +PREHOOK: query: describe formatted e_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@e -POSTHOOK: query: describe formatted e +PREHOOK: Input: default@e_n1 +POSTHOOK: query: describe formatted e_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@e +POSTHOOK: Input: default@e_n1 # col_name data_type comment c int @@ -486,12 +486,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted i +PREHOOK: query: describe formatted i_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@i -POSTHOOK: query: describe formatted i +PREHOOK: Input: default@i_n0 +POSTHOOK: query: describe formatted i_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@i +POSTHOOK: Input: default@i_n0 # col_name data_type comment c int @@ -552,14 +552,14 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table t +PREHOOK: query: drop table t_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t -PREHOOK: Output: default@t -POSTHOOK: query: drop table t +PREHOOK: Input: default@t_n2 +PREHOOK: Output: default@t_n2 +POSTHOOK: query: drop table t_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t -POSTHOOK: Output: default@t +POSTHOOK: Input: default@t_n2 +POSTHOOK: Output: default@t_n2 PREHOOK: query: drop table o PREHOOK: type: DROPTABLE PREHOOK: Input: default@o @@ -576,22 +576,22 @@ POSTHOOK: query: drop table io POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@io POSTHOOK: Output: default@io -PREHOOK: query: drop table e +PREHOOK: query: drop table e_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@e -PREHOOK: Output: default@e -POSTHOOK: query: drop table e +PREHOOK: Input: default@e_n1 +PREHOOK: Output: default@e_n1 +POSTHOOK: query: drop table e_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@e -POSTHOOK: Output: default@e -PREHOOK: query: drop table i +POSTHOOK: Input: default@e_n1 +POSTHOOK: Output: default@e_n1 +PREHOOK: query: drop table i_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@i -PREHOOK: Output: default@i -POSTHOOK: query: drop table i +PREHOOK: Input: default@i_n0 +PREHOOK: Output: default@i_n0 +POSTHOOK: query: drop table i_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@i -POSTHOOK: Output: default@i +POSTHOOK: Input: default@i_n0 +POSTHOOK: Output: default@i_n0 PREHOOK: query: drop table e2 PREHOOK: type: DROPTABLE PREHOOK: Input: default@e2 diff --git a/ql/src/test/results/clientpositive/deleteAnalyze.q.out b/ql/src/test/results/clientpositive/deleteAnalyze.q.out index 4925a12e48..d98114bff1 100644 --- a/ql/src/test/results/clientpositive/deleteAnalyze.q.out +++ b/ql/src/test/results/clientpositive/deleteAnalyze.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table testdeci2( +PREHOOK: query: create table testdeci2_n0( id int, amount decimal(10,3), sales_tax decimal(10,3), @@ -7,8 +7,8 @@ item string) PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@testdeci2 -POSTHOOK: query: create table testdeci2( +PREHOOK: Output: default@testdeci2_n0 +POSTHOOK: query: create table testdeci2_n0( id int, amount decimal(10,3), sales_tax decimal(10,3), @@ -17,25 +17,25 @@ item string) POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@testdeci2 -PREHOOK: query: insert into table testdeci2 values(1,12.123,12345.123,'desk1'),(2,123.123,1234.123,'desk2') +POSTHOOK: Output: default@testdeci2_n0 +PREHOOK: query: insert into table testdeci2_n0 values(1,12.123,12345.123,'desk1'),(2,123.123,1234.123,'desk2') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@testdeci2 -POSTHOOK: query: insert into table testdeci2 values(1,12.123,12345.123,'desk1'),(2,123.123,1234.123,'desk2') +PREHOOK: Output: default@testdeci2_n0 +POSTHOOK: query: insert into table testdeci2_n0 values(1,12.123,12345.123,'desk1'),(2,123.123,1234.123,'desk2') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@testdeci2 -POSTHOOK: Lineage: testdeci2.amount SCRIPT [] -POSTHOOK: Lineage: testdeci2.id SCRIPT [] -POSTHOOK: Lineage: testdeci2.item SCRIPT [] -POSTHOOK: Lineage: testdeci2.sales_tax SCRIPT [] -PREHOOK: query: describe formatted testdeci2 +POSTHOOK: Output: default@testdeci2_n0 +POSTHOOK: Lineage: testdeci2_n0.amount SCRIPT [] +POSTHOOK: Lineage: testdeci2_n0.id SCRIPT [] +POSTHOOK: Lineage: testdeci2_n0.item SCRIPT [] +POSTHOOK: Lineage: testdeci2_n0.sales_tax SCRIPT [] +PREHOOK: query: describe formatted testdeci2_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@testdeci2 -POSTHOOK: query: describe formatted testdeci2 +PREHOOK: Input: default@testdeci2_n0 +POSTHOOK: query: describe formatted testdeci2_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@testdeci2 +POSTHOOK: Input: default@testdeci2_n0 # col_name data_type comment id int amount decimal(10,3) @@ -68,12 +68,12 @@ Sort Columns: [] Storage Desc Params: serialization.format 1 #### A masked pattern was here #### -PREHOOK: query: describe formatted testdeci2 amount +PREHOOK: query: describe formatted testdeci2_n0 amount PREHOOK: type: DESCTABLE -PREHOOK: Input: default@testdeci2 -POSTHOOK: query: describe formatted testdeci2 amount +PREHOOK: Input: default@testdeci2_n0 +POSTHOOK: query: describe formatted testdeci2_n0 amount POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@testdeci2 +POSTHOOK: Input: default@testdeci2_n0 col_name amount data_type decimal(10,3) min 12.123 @@ -87,22 +87,22 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"amount\":\"true\",\"id\":\"true\",\"item\":\"true\",\"sales_tax\":\"true\"}} -PREHOOK: query: analyze table testdeci2 compute statistics for columns +PREHOOK: query: analyze table testdeci2_n0 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@testdeci2 -PREHOOK: Output: default@testdeci2 +PREHOOK: Input: default@testdeci2_n0 +PREHOOK: Output: default@testdeci2_n0 #### A masked pattern was here #### -POSTHOOK: query: analyze table testdeci2 compute statistics for columns +POSTHOOK: query: analyze table testdeci2_n0 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@testdeci2 -POSTHOOK: Output: default@testdeci2 +POSTHOOK: Input: default@testdeci2_n0 +POSTHOOK: Output: default@testdeci2_n0 #### A masked pattern was here #### -PREHOOK: query: describe formatted testdeci2 +PREHOOK: query: describe formatted testdeci2_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@testdeci2 -POSTHOOK: query: describe formatted testdeci2 +PREHOOK: Input: default@testdeci2_n0 +POSTHOOK: query: describe formatted testdeci2_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@testdeci2 +POSTHOOK: Input: default@testdeci2_n0 # col_name data_type comment id int amount decimal(10,3) @@ -134,28 +134,28 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: analyze table testdeci2 compute statistics for columns +PREHOOK: query: analyze table testdeci2_n0 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@testdeci2 -PREHOOK: Output: default@testdeci2 +PREHOOK: Input: default@testdeci2_n0 +PREHOOK: Output: default@testdeci2_n0 #### A masked pattern was here #### -POSTHOOK: query: analyze table testdeci2 compute statistics for columns +POSTHOOK: query: analyze table testdeci2_n0 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@testdeci2 -POSTHOOK: Output: default@testdeci2 +POSTHOOK: Input: default@testdeci2_n0 +POSTHOOK: Output: default@testdeci2_n0 #### A masked pattern was here #### PREHOOK: query: explain select s.id, coalesce(d.amount,0) as sales, coalesce(d.sales_tax,0) as tax -from testdeci2 s join testdeci2 d +from testdeci2_n0 s join testdeci2_n0 d on s.item=d.item and d.id=2 PREHOOK: type: QUERY POSTHOOK: query: explain select s.id, coalesce(d.amount,0) as sales, coalesce(d.sales_tax,0) as tax -from testdeci2 s join testdeci2 d +from testdeci2_n0 s join testdeci2_n0 d on s.item=d.item and d.id=2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/desc_tbl_part_cols.q.out b/ql/src/test/results/clientpositive/desc_tbl_part_cols.q.out index 196f85b528..aae6b5ef93 100644 --- a/ql/src/test/results/clientpositive/desc_tbl_part_cols.q.out +++ b/ql/src/test/results/clientpositive/desc_tbl_part_cols.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: create table t1 (a int, b string) partitioned by (c int, d string) +PREHOOK: query: create table t1_n23 (a int, b string) partitioned by (c int, d string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (a int, b string) partitioned by (c int, d string) +PREHOOK: Output: default@t1_n23 +POSTHOOK: query: create table t1_n23 (a int, b string) partitioned by (c int, d string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: describe t1 +POSTHOOK: Output: default@t1_n23 +PREHOOK: query: describe t1_n23 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: describe t1 +PREHOOK: Input: default@t1_n23 +POSTHOOK: query: describe t1_n23 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n23 a int b string c int @@ -21,12 +21,12 @@ d string # col_name data_type comment c int d string -PREHOOK: query: describe t1 +PREHOOK: query: describe t1_n23 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: describe t1 +PREHOOK: Input: default@t1_n23 +POSTHOOK: query: describe t1_n23 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n23 a int b string c int diff --git a/ql/src/test/results/clientpositive/describe_comment_indent.q.out b/ql/src/test/results/clientpositive/describe_comment_indent.q.out index d88ed12639..36c14c1041 100644 --- a/ql/src/test/results/clientpositive/describe_comment_indent.q.out +++ b/ql/src/test/results/clientpositive/describe_comment_indent.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE test_table( +PREHOOK: query: CREATE TABLE test_table_n13( col1 INT COMMENT 'col1 one line comment', col2 STRING COMMENT 'col2 two lines comment', @@ -9,8 +9,8 @@ COMMENT 'table comment two lines' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table -POSTHOOK: query: CREATE TABLE test_table( +PREHOOK: Output: default@test_table_n13 +POSTHOOK: query: CREATE TABLE test_table_n13( col1 INT COMMENT 'col1 one line comment', col2 STRING COMMENT 'col2 two lines comment', @@ -21,25 +21,25 @@ COMMENT 'table comment two lines' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table -PREHOOK: query: DESCRIBE test_table +POSTHOOK: Output: default@test_table_n13 +PREHOOK: query: DESCRIBE test_table_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE test_table +PREHOOK: Input: default@test_table_n13 +POSTHOOK: query: DESCRIBE test_table_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n13 col1 int col1 one line comment col2 string col2 two lines comment col3 string col3 three lines comment -PREHOOK: query: DESCRIBE FORMATTED test_table +PREHOOK: query: DESCRIBE FORMATTED test_table_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table +PREHOOK: Input: default@test_table_n13 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n13 # col_name data_type comment col1 int col1 one line comment col2 string col2 diff --git a/ql/src/test/results/clientpositive/describe_formatted_view_partitioned_json.q.out b/ql/src/test/results/clientpositive/describe_formatted_view_partitioned_json.q.out index 001abb6305..41c59006a1 100644 --- a/ql/src/test/results/clientpositive/describe_formatted_view_partitioned_json.q.out +++ b/ql/src/test/results/clientpositive/describe_formatted_view_partitioned_json.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: DROP VIEW view_partitioned +PREHOOK: query: DROP VIEW view_partitioned_n0 PREHOOK: type: DROPVIEW -POSTHOOK: query: DROP VIEW view_partitioned +POSTHOOK: query: DROP VIEW view_partitioned_n0 POSTHOOK: type: DROPVIEW -PREHOOK: query: CREATE VIEW view_partitioned +PREHOOK: query: CREATE VIEW view_partitioned_n0 PARTITIONED ON (value) AS SELECT key, value @@ -11,8 +11,8 @@ WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@view_partitioned -POSTHOOK: query: CREATE VIEW view_partitioned +PREHOOK: Output: default@view_partitioned_n0 +POSTHOOK: query: CREATE VIEW view_partitioned_n0 PARTITIONED ON (value) AS SELECT key, value @@ -21,33 +21,33 @@ WHERE key=86 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@view_partitioned -POSTHOOK: Lineage: view_partitioned.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: ALTER VIEW view_partitioned +POSTHOOK: Output: default@view_partitioned_n0 +POSTHOOK: Lineage: view_partitioned_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: ALTER VIEW view_partitioned_n0 ADD PARTITION (value='val_86') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Input: default@src -PREHOOK: Input: default@view_partitioned -PREHOOK: Output: default@view_partitioned -POSTHOOK: query: ALTER VIEW view_partitioned +PREHOOK: Input: default@view_partitioned_n0 +PREHOOK: Output: default@view_partitioned_n0 +POSTHOOK: query: ALTER VIEW view_partitioned_n0 ADD PARTITION (value='val_86') POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Input: default@src -POSTHOOK: Input: default@view_partitioned -POSTHOOK: Output: default@view_partitioned -POSTHOOK: Output: default@view_partitioned@value=val_86 -PREHOOK: query: DESCRIBE FORMATTED view_partitioned PARTITION (value='val_86') +POSTHOOK: Input: default@view_partitioned_n0 +POSTHOOK: Output: default@view_partitioned_n0 +POSTHOOK: Output: default@view_partitioned_n0@value=val_86 +PREHOOK: query: DESCRIBE FORMATTED view_partitioned_n0 PARTITION (value='val_86') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@view_partitioned -POSTHOOK: query: DESCRIBE FORMATTED view_partitioned PARTITION (value='val_86') +PREHOOK: Input: default@view_partitioned_n0 +POSTHOOK: query: DESCRIBE FORMATTED view_partitioned_n0 PARTITION (value='val_86') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@view_partitioned +POSTHOOK: Input: default@view_partitioned_n0 {"columns":[{"name":"key","type":"string"}]} -PREHOOK: query: DROP VIEW view_partitioned +PREHOOK: query: DROP VIEW view_partitioned_n0 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@view_partitioned -PREHOOK: Output: default@view_partitioned -POSTHOOK: query: DROP VIEW view_partitioned +PREHOOK: Input: default@view_partitioned_n0 +PREHOOK: Output: default@view_partitioned_n0 +POSTHOOK: query: DROP VIEW view_partitioned_n0 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@view_partitioned -POSTHOOK: Output: default@view_partitioned +POSTHOOK: Input: default@view_partitioned_n0 +POSTHOOK: Output: default@view_partitioned_n0 diff --git a/ql/src/test/results/clientpositive/describe_table.q.out b/ql/src/test/results/clientpositive/describe_table.q.out index 23236853ab..363fabe072 100644 --- a/ql/src/test/results/clientpositive/describe_table.q.out +++ b/ql/src/test/results/clientpositive/describe_table.q.out @@ -528,14 +528,14 @@ PREHOOK: Input: database:name2 POSTHOOK: query: use name2 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:name2 -PREHOOK: query: CREATE TABLE IF NOT EXISTS table1 (col1 int, col2 string) +PREHOOK: query: CREATE TABLE IF NOT EXISTS table1_n17 (col1 int, col2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:name2 -PREHOOK: Output: name2@table1 -POSTHOOK: query: CREATE TABLE IF NOT EXISTS table1 (col1 int, col2 string) +PREHOOK: Output: name2@table1_n17 +POSTHOOK: query: CREATE TABLE IF NOT EXISTS table1_n17 (col1 int, col2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:name2 -POSTHOOK: Output: name2@table1 +POSTHOOK: Output: name2@table1_n17 PREHOOK: query: use default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default @@ -719,28 +719,28 @@ POSTHOOK: type: DESCTABLE POSTHOOK: Input: name1@name2 name4 string from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"name3\":\"true\",\"name4\":\"true\"}} -PREHOOK: query: DESCRIBE name2.table1 +PREHOOK: query: DESCRIBE name2.table1_n17 PREHOOK: type: DESCTABLE -PREHOOK: Input: name2@table1 -POSTHOOK: query: DESCRIBE name2.table1 +PREHOOK: Input: name2@table1_n17 +POSTHOOK: query: DESCRIBE name2.table1_n17 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: name2@table1 +POSTHOOK: Input: name2@table1_n17 col1 int col2 string -PREHOOK: query: DESCRIBE name2.table1 col1 +PREHOOK: query: DESCRIBE name2.table1_n17 col1 PREHOOK: type: DESCTABLE -PREHOOK: Input: name2@table1 -POSTHOOK: query: DESCRIBE name2.table1 col1 +PREHOOK: Input: name2@table1_n17 +POSTHOOK: query: DESCRIBE name2.table1_n17 col1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: name2@table1 +POSTHOOK: Input: name2@table1_n17 col1 int from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\",\"col2\":\"true\"}} -PREHOOK: query: DESCRIBE name2.table1 col2 +PREHOOK: query: DESCRIBE name2.table1_n17 col2 PREHOOK: type: DESCTABLE -PREHOOK: Input: name2@table1 -POSTHOOK: query: DESCRIBE name2.table1 col2 +PREHOOK: Input: name2@table1_n17 +POSTHOOK: query: DESCRIBE name2.table1_n17 col2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: name2@table1 +POSTHOOK: Input: name2@table1_n17 col2 string from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\",\"col2\":\"true\"}} PREHOOK: query: use name2 @@ -749,62 +749,62 @@ PREHOOK: Input: database:name2 POSTHOOK: query: use name2 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:name2 -PREHOOK: query: DESCRIBE table1 +PREHOOK: query: DESCRIBE table1_n17 PREHOOK: type: DESCTABLE -PREHOOK: Input: name2@table1 -POSTHOOK: query: DESCRIBE table1 +PREHOOK: Input: name2@table1_n17 +POSTHOOK: query: DESCRIBE table1_n17 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: name2@table1 +POSTHOOK: Input: name2@table1_n17 col1 int col2 string -PREHOOK: query: DESCRIBE table1 col1 +PREHOOK: query: DESCRIBE table1_n17 col1 PREHOOK: type: DESCTABLE -PREHOOK: Input: name2@table1 -POSTHOOK: query: DESCRIBE table1 col1 +PREHOOK: Input: name2@table1_n17 +POSTHOOK: query: DESCRIBE table1_n17 col1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: name2@table1 +POSTHOOK: Input: name2@table1_n17 col1 int from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\",\"col2\":\"true\"}} -PREHOOK: query: DESCRIBE table1 col2 +PREHOOK: query: DESCRIBE table1_n17 col2 PREHOOK: type: DESCTABLE -PREHOOK: Input: name2@table1 -POSTHOOK: query: DESCRIBE table1 col2 +PREHOOK: Input: name2@table1_n17 +POSTHOOK: query: DESCRIBE table1_n17 col2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: name2@table1 +POSTHOOK: Input: name2@table1_n17 col2 string from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\",\"col2\":\"true\"}} -PREHOOK: query: DESCRIBE name2.table1 +PREHOOK: query: DESCRIBE name2.table1_n17 PREHOOK: type: DESCTABLE -PREHOOK: Input: name2@table1 -POSTHOOK: query: DESCRIBE name2.table1 +PREHOOK: Input: name2@table1_n17 +POSTHOOK: query: DESCRIBE name2.table1_n17 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: name2@table1 +POSTHOOK: Input: name2@table1_n17 col1 int col2 string -PREHOOK: query: DESCRIBE name2.table1 col1 +PREHOOK: query: DESCRIBE name2.table1_n17 col1 PREHOOK: type: DESCTABLE -PREHOOK: Input: name2@table1 -POSTHOOK: query: DESCRIBE name2.table1 col1 +PREHOOK: Input: name2@table1_n17 +POSTHOOK: query: DESCRIBE name2.table1_n17 col1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: name2@table1 +POSTHOOK: Input: name2@table1_n17 col1 int from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\",\"col2\":\"true\"}} -PREHOOK: query: DESCRIBE name2.table1 col2 +PREHOOK: query: DESCRIBE name2.table1_n17 col2 PREHOOK: type: DESCTABLE -PREHOOK: Input: name2@table1 -POSTHOOK: query: DESCRIBE name2.table1 col2 +PREHOOK: Input: name2@table1_n17 +POSTHOOK: query: DESCRIBE name2.table1_n17 col2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: name2@table1 +POSTHOOK: Input: name2@table1_n17 col2 string from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\",\"col2\":\"true\"}} -PREHOOK: query: DROP TABLE IF EXISTS table1 +PREHOOK: query: DROP TABLE IF EXISTS table1_n17 PREHOOK: type: DROPTABLE -PREHOOK: Input: name2@table1 -PREHOOK: Output: name2@table1 -POSTHOOK: query: DROP TABLE IF EXISTS table1 +PREHOOK: Input: name2@table1_n17 +PREHOOK: Output: name2@table1_n17 +POSTHOOK: query: DROP TABLE IF EXISTS table1_n17 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: name2@table1 -POSTHOOK: Output: name2@table1 +POSTHOOK: Input: name2@table1_n17 +POSTHOOK: Output: name2@table1_n17 PREHOOK: query: use name1 PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:name1 @@ -833,9 +833,9 @@ PREHOOK: Input: database:name2 POSTHOOK: query: use name2 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:name2 -PREHOOK: query: DROP TABLE IF EXISTS table1 +PREHOOK: query: DROP TABLE IF EXISTS table1_n17 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS table1 +POSTHOOK: query: DROP TABLE IF EXISTS table1_n17 POSTHOOK: type: DROPTABLE PREHOOK: query: DROP DATABASE IF EXISTS name1 PREHOOK: type: DROPDATABASE diff --git a/ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out b/ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out index 51cc2778a4..4d5144c835 100644 --- a/ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out +++ b/ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: DROP TABLE IF EXISTS UserVisits_web_text_none +PREHOOK: query: DROP TABLE IF EXISTS UserVisits_web_text_none_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS UserVisits_web_text_none +POSTHOOK: query: DROP TABLE IF EXISTS UserVisits_web_text_none_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE UserVisits_web_text_none ( +PREHOOK: query: CREATE TABLE UserVisits_web_text_none_n0 ( sourceIP string, destURL string, visitDate string, @@ -15,8 +15,8 @@ PREHOOK: query: CREATE TABLE UserVisits_web_text_none ( row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@UserVisits_web_text_none -POSTHOOK: query: CREATE TABLE UserVisits_web_text_none ( +PREHOOK: Output: default@UserVisits_web_text_none_n0 +POSTHOOK: query: CREATE TABLE UserVisits_web_text_none_n0 ( sourceIP string, destURL string, visitDate string, @@ -29,28 +29,28 @@ POSTHOOK: query: CREATE TABLE UserVisits_web_text_none ( row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@UserVisits_web_text_none -PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none +POSTHOOK: Output: default@UserVisits_web_text_none_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@uservisits_web_text_none -POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none +PREHOOK: Output: default@uservisits_web_text_none_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@uservisits_web_text_none -PREHOOK: query: desc extended UserVisits_web_text_none sourceIP +POSTHOOK: Output: default@uservisits_web_text_none_n0 +PREHOOK: query: desc extended UserVisits_web_text_none_n0 sourceIP PREHOOK: type: DESCTABLE -PREHOOK: Input: default@uservisits_web_text_none -POSTHOOK: query: desc extended UserVisits_web_text_none sourceIP +PREHOOK: Input: default@uservisits_web_text_none_n0 +POSTHOOK: query: desc extended UserVisits_web_text_none_n0 sourceIP POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@uservisits_web_text_none +POSTHOOK: Input: default@uservisits_web_text_none_n0 sourceIP string from deserializer -PREHOOK: query: desc formatted UserVisits_web_text_none sourceIP +PREHOOK: query: desc formatted UserVisits_web_text_none_n0 sourceIP PREHOOK: type: DESCTABLE -PREHOOK: Input: default@uservisits_web_text_none -POSTHOOK: query: desc formatted UserVisits_web_text_none sourceIP +PREHOOK: Input: default@uservisits_web_text_none_n0 +POSTHOOK: query: desc formatted UserVisits_web_text_none_n0 sourceIP POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@uservisits_web_text_none +POSTHOOK: Input: default@uservisits_web_text_none_n0 col_name sourceIP data_type string min @@ -64,10 +64,10 @@ num_falses bitVector comment from deserializer PREHOOK: query: explain -analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue +analyze table UserVisits_web_text_none_n0 compute statistics for columns sourceIP, avgTimeOnSite, adRevenue PREHOOK: type: ANALYZE_TABLE POSTHOOK: query: explain -analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue +analyze table UserVisits_web_text_none_n0 compute statistics for columns sourceIP, avgTimeOnSite, adRevenue POSTHOOK: type: ANALYZE_TABLE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -78,7 +78,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: uservisits_web_text_none + alias: uservisits_web_text_none_n0 Statistics: Num rows: 1 Data size: 70600 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: sourceip (type: string), adrevenue (type: float), avgtimeonsite (type: int) @@ -113,13 +113,13 @@ STAGE PLANS: Column Stats Desc: Columns: sourceIP, avgTimeOnSite, adRevenue Column Types: string, int, float - Table: default.uservisits_web_text_none + Table: default.uservisits_web_text_none_n0 PREHOOK: query: explain extended -analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue +analyze table UserVisits_web_text_none_n0 compute statistics for columns sourceIP, avgTimeOnSite, adRevenue PREHOOK: type: ANALYZE_TABLE POSTHOOK: query: explain extended -analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue +analyze table UserVisits_web_text_none_n0 compute statistics for columns sourceIP, avgTimeOnSite, adRevenue POSTHOOK: type: ANALYZE_TABLE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -130,9 +130,9 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: uservisits_web_text_none + alias: uservisits_web_text_none_n0 Statistics: Num rows: 1 Data size: 70600 Basic stats: COMPLETE Column stats: NONE - Statistics Aggregation Key Prefix: default.uservisits_web_text_none/ + Statistics Aggregation Key Prefix: default.uservisits_web_text_none_n0/ GatherStats: true Select Operator expressions: sourceip (type: string), adrevenue (type: float), avgtimeonsite (type: int) @@ -155,7 +155,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: uservisits_web_text_none + base file name: uservisits_web_text_none_n0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -167,11 +167,11 @@ STAGE PLANS: columns.types string:string:string:float:string:string:string:string:int field.delim | #### A masked pattern was here #### - name default.uservisits_web_text_none + name default.uservisits_web_text_none_n0 numFiles 1 numRows 0 rawDataSize 0 - serialization.ddl struct uservisits_web_text_none { string sourceip, string desturl, string visitdate, float adrevenue, string useragent, string ccode, string lcode, string skeyword, i32 avgtimeonsite} + serialization.ddl struct uservisits_web_text_none_n0 { string sourceip, string desturl, string visitdate, float adrevenue, string useragent, string ccode, string lcode, string skeyword, i32 avgtimeonsite} serialization.format | serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 7060 @@ -189,20 +189,20 @@ STAGE PLANS: columns.types string:string:string:float:string:string:string:string:int field.delim | #### A masked pattern was here #### - name default.uservisits_web_text_none + name default.uservisits_web_text_none_n0 numFiles 1 numRows 0 rawDataSize 0 - serialization.ddl struct uservisits_web_text_none { string sourceip, string desturl, string visitdate, float adrevenue, string useragent, string ccode, string lcode, string skeyword, i32 avgtimeonsite} + serialization.ddl struct uservisits_web_text_none_n0 { string sourceip, string desturl, string visitdate, float adrevenue, string useragent, string ccode, string lcode, string skeyword, i32 avgtimeonsite} serialization.format | serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 7060 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.uservisits_web_text_none - name: default.uservisits_web_text_none + name: default.uservisits_web_text_none_n0 + name: default.uservisits_web_text_none_n0 Truncated Path -> Alias: - /uservisits_web_text_none [uservisits_web_text_none] + /uservisits_web_text_none_n0 [uservisits_web_text_none_n0] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -236,29 +236,29 @@ STAGE PLANS: Stage: Stage-1 Stats Work Basic Stats Work: - Stats Aggregation Key Prefix: default.uservisits_web_text_none/ + Stats Aggregation Key Prefix: default.uservisits_web_text_none_n0/ Column Stats Desc: Columns: sourceIP, avgTimeOnSite, adRevenue Column Types: string, int, float - Table: default.uservisits_web_text_none + Table: default.uservisits_web_text_none_n0 Is Table Level Stats: true -PREHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue +PREHOOK: query: analyze table UserVisits_web_text_none_n0 compute statistics for columns sourceIP, avgTimeOnSite, adRevenue PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@uservisits_web_text_none -PREHOOK: Output: default@uservisits_web_text_none +PREHOOK: Input: default@uservisits_web_text_none_n0 +PREHOOK: Output: default@uservisits_web_text_none_n0 #### A masked pattern was here #### -POSTHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue +POSTHOOK: query: analyze table UserVisits_web_text_none_n0 compute statistics for columns sourceIP, avgTimeOnSite, adRevenue POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@uservisits_web_text_none -POSTHOOK: Output: default@uservisits_web_text_none +POSTHOOK: Input: default@uservisits_web_text_none_n0 +POSTHOOK: Output: default@uservisits_web_text_none_n0 #### A masked pattern was here #### -PREHOOK: query: desc formatted UserVisits_web_text_none sourceIP +PREHOOK: query: desc formatted UserVisits_web_text_none_n0 sourceIP PREHOOK: type: DESCTABLE -PREHOOK: Input: default@uservisits_web_text_none -POSTHOOK: query: desc formatted UserVisits_web_text_none sourceIP +PREHOOK: Input: default@uservisits_web_text_none_n0 +POSTHOOK: query: desc formatted UserVisits_web_text_none_n0 sourceIP POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@uservisits_web_text_none +POSTHOOK: Input: default@uservisits_web_text_none_n0 col_name sourceIP data_type string min @@ -272,12 +272,12 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}} -PREHOOK: query: desc formatted UserVisits_web_text_none avgTimeOnSite +PREHOOK: query: desc formatted UserVisits_web_text_none_n0 avgTimeOnSite PREHOOK: type: DESCTABLE -PREHOOK: Input: default@uservisits_web_text_none -POSTHOOK: query: desc formatted UserVisits_web_text_none avgTimeOnSite +PREHOOK: Input: default@uservisits_web_text_none_n0 +POSTHOOK: query: desc formatted UserVisits_web_text_none_n0 avgTimeOnSite POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@uservisits_web_text_none +POSTHOOK: Input: default@uservisits_web_text_none_n0 col_name avgTimeOnSite data_type int min 1 @@ -291,12 +291,12 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}} -PREHOOK: query: desc formatted UserVisits_web_text_none adRevenue +PREHOOK: query: desc formatted UserVisits_web_text_none_n0 adRevenue PREHOOK: type: DESCTABLE -PREHOOK: Input: default@uservisits_web_text_none -POSTHOOK: query: desc formatted UserVisits_web_text_none adRevenue +PREHOOK: Input: default@uservisits_web_text_none_n0 +POSTHOOK: query: desc formatted UserVisits_web_text_none_n0 adRevenue POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@uservisits_web_text_none +POSTHOOK: Input: default@uservisits_web_text_none_n0 col_name adRevenue data_type float min 13.099044799804688 @@ -310,7 +310,7 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}} -PREHOOK: query: CREATE TABLE empty_tab( +PREHOOK: query: CREATE TABLE empty_tab_n0( a int, b double, c string, @@ -319,8 +319,8 @@ PREHOOK: query: CREATE TABLE empty_tab( row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@empty_tab -POSTHOOK: query: CREATE TABLE empty_tab( +PREHOOK: Output: default@empty_tab_n0 +POSTHOOK: query: CREATE TABLE empty_tab_n0( a int, b double, c string, @@ -329,13 +329,13 @@ POSTHOOK: query: CREATE TABLE empty_tab( row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@empty_tab -PREHOOK: query: desc formatted empty_tab a +POSTHOOK: Output: default@empty_tab_n0 +PREHOOK: query: desc formatted empty_tab_n0 a PREHOOK: type: DESCTABLE -PREHOOK: Input: default@empty_tab -POSTHOOK: query: desc formatted empty_tab a +PREHOOK: Input: default@empty_tab_n0 +POSTHOOK: query: desc formatted empty_tab_n0 a POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@empty_tab +POSTHOOK: Input: default@empty_tab_n0 col_name a data_type int min @@ -350,10 +350,10 @@ bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\",\"e\":\"true\"}} PREHOOK: query: explain -analyze table empty_tab compute statistics for columns a,b,c,d,e +analyze table empty_tab_n0 compute statistics for columns a,b,c,d,e PREHOOK: type: ANALYZE_TABLE POSTHOOK: query: explain -analyze table empty_tab compute statistics for columns a,b,c,d,e +analyze table empty_tab_n0 compute statistics for columns a,b,c,d,e POSTHOOK: type: ANALYZE_TABLE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -364,7 +364,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: empty_tab + alias: empty_tab_n0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: a (type: int), b (type: double), c (type: string), d (type: boolean), e (type: binary) @@ -399,24 +399,24 @@ STAGE PLANS: Column Stats Desc: Columns: a, b, c, d, e Column Types: int, double, string, boolean, binary - Table: default.empty_tab + Table: default.empty_tab_n0 -PREHOOK: query: analyze table empty_tab compute statistics for columns a,b,c,d,e +PREHOOK: query: analyze table empty_tab_n0 compute statistics for columns a,b,c,d,e PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@empty_tab -PREHOOK: Output: default@empty_tab +PREHOOK: Input: default@empty_tab_n0 +PREHOOK: Output: default@empty_tab_n0 #### A masked pattern was here #### -POSTHOOK: query: analyze table empty_tab compute statistics for columns a,b,c,d,e +POSTHOOK: query: analyze table empty_tab_n0 compute statistics for columns a,b,c,d,e POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@empty_tab -POSTHOOK: Output: default@empty_tab +POSTHOOK: Input: default@empty_tab_n0 +POSTHOOK: Output: default@empty_tab_n0 #### A masked pattern was here #### -PREHOOK: query: desc formatted empty_tab a +PREHOOK: query: desc formatted empty_tab_n0 a PREHOOK: type: DESCTABLE -PREHOOK: Input: default@empty_tab -POSTHOOK: query: desc formatted empty_tab a +PREHOOK: Input: default@empty_tab_n0 +POSTHOOK: query: desc formatted empty_tab_n0 a POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@empty_tab +POSTHOOK: Input: default@empty_tab_n0 col_name a data_type int min 0 @@ -430,12 +430,12 @@ num_falses bitVector comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\",\"e\":\"true\"}} -PREHOOK: query: desc formatted empty_tab b +PREHOOK: query: desc formatted empty_tab_n0 b PREHOOK: type: DESCTABLE -PREHOOK: Input: default@empty_tab -POSTHOOK: query: desc formatted empty_tab b +PREHOOK: Input: default@empty_tab_n0 +POSTHOOK: query: desc formatted empty_tab_n0 b POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@empty_tab +POSTHOOK: Input: default@empty_tab_n0 col_name b data_type double min 0.0 @@ -461,7 +461,7 @@ PREHOOK: Input: database:test POSTHOOK: query: USE test POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test -PREHOOK: query: CREATE TABLE UserVisits_web_text_none ( +PREHOOK: query: CREATE TABLE UserVisits_web_text_none_n0 ( sourceIP string, destURL string, visitDate string, @@ -474,8 +474,8 @@ PREHOOK: query: CREATE TABLE UserVisits_web_text_none ( row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:test -PREHOOK: Output: test@UserVisits_web_text_none -POSTHOOK: query: CREATE TABLE UserVisits_web_text_none ( +PREHOOK: Output: test@UserVisits_web_text_none_n0 +POSTHOOK: query: CREATE TABLE UserVisits_web_text_none_n0 ( sourceIP string, destURL string, visitDate string, @@ -488,43 +488,43 @@ POSTHOOK: query: CREATE TABLE UserVisits_web_text_none ( row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:test -POSTHOOK: Output: test@UserVisits_web_text_none -PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none +POSTHOOK: Output: test@UserVisits_web_text_none_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: test@uservisits_web_text_none -POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none +PREHOOK: Output: test@uservisits_web_text_none_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: test@uservisits_web_text_none -PREHOOK: query: desc extended UserVisits_web_text_none sourceIP +POSTHOOK: Output: test@uservisits_web_text_none_n0 +PREHOOK: query: desc extended UserVisits_web_text_none_n0 sourceIP PREHOOK: type: DESCTABLE -PREHOOK: Input: test@uservisits_web_text_none -POSTHOOK: query: desc extended UserVisits_web_text_none sourceIP +PREHOOK: Input: test@uservisits_web_text_none_n0 +POSTHOOK: query: desc extended UserVisits_web_text_none_n0 sourceIP POSTHOOK: type: DESCTABLE -POSTHOOK: Input: test@uservisits_web_text_none +POSTHOOK: Input: test@uservisits_web_text_none_n0 sourceIP string from deserializer -PREHOOK: query: desc extended test.UserVisits_web_text_none sourceIP +PREHOOK: query: desc extended test.UserVisits_web_text_none_n0 sourceIP PREHOOK: type: DESCTABLE -PREHOOK: Input: test@uservisits_web_text_none -POSTHOOK: query: desc extended test.UserVisits_web_text_none sourceIP +PREHOOK: Input: test@uservisits_web_text_none_n0 +POSTHOOK: query: desc extended test.UserVisits_web_text_none_n0 sourceIP POSTHOOK: type: DESCTABLE -POSTHOOK: Input: test@uservisits_web_text_none +POSTHOOK: Input: test@uservisits_web_text_none_n0 sourceIP string from deserializer -PREHOOK: query: desc extended default.UserVisits_web_text_none sourceIP +PREHOOK: query: desc extended default.UserVisits_web_text_none_n0 sourceIP PREHOOK: type: DESCTABLE -PREHOOK: Input: default@uservisits_web_text_none -POSTHOOK: query: desc extended default.UserVisits_web_text_none sourceIP +PREHOOK: Input: default@uservisits_web_text_none_n0 +POSTHOOK: query: desc extended default.UserVisits_web_text_none_n0 sourceIP POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@uservisits_web_text_none +POSTHOOK: Input: default@uservisits_web_text_none_n0 sourceIP string from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}} -PREHOOK: query: desc formatted UserVisits_web_text_none sourceIP +PREHOOK: query: desc formatted UserVisits_web_text_none_n0 sourceIP PREHOOK: type: DESCTABLE -PREHOOK: Input: test@uservisits_web_text_none -POSTHOOK: query: desc formatted UserVisits_web_text_none sourceIP +PREHOOK: Input: test@uservisits_web_text_none_n0 +POSTHOOK: query: desc formatted UserVisits_web_text_none_n0 sourceIP POSTHOOK: type: DESCTABLE -POSTHOOK: Input: test@uservisits_web_text_none +POSTHOOK: Input: test@uservisits_web_text_none_n0 col_name sourceIP data_type string min @@ -537,12 +537,12 @@ num_trues num_falses bitVector comment from deserializer -PREHOOK: query: desc formatted test.UserVisits_web_text_none sourceIP +PREHOOK: query: desc formatted test.UserVisits_web_text_none_n0 sourceIP PREHOOK: type: DESCTABLE -PREHOOK: Input: test@uservisits_web_text_none -POSTHOOK: query: desc formatted test.UserVisits_web_text_none sourceIP +PREHOOK: Input: test@uservisits_web_text_none_n0 +POSTHOOK: query: desc formatted test.UserVisits_web_text_none_n0 sourceIP POSTHOOK: type: DESCTABLE -POSTHOOK: Input: test@uservisits_web_text_none +POSTHOOK: Input: test@uservisits_web_text_none_n0 col_name sourceIP data_type string min @@ -555,12 +555,12 @@ num_trues num_falses bitVector comment from deserializer -PREHOOK: query: desc formatted default.UserVisits_web_text_none sourceIP +PREHOOK: query: desc formatted default.UserVisits_web_text_none_n0 sourceIP PREHOOK: type: DESCTABLE -PREHOOK: Input: default@uservisits_web_text_none -POSTHOOK: query: desc formatted default.UserVisits_web_text_none sourceIP +PREHOOK: Input: default@uservisits_web_text_none_n0 +POSTHOOK: query: desc formatted default.UserVisits_web_text_none_n0 sourceIP POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@uservisits_web_text_none +POSTHOOK: Input: default@uservisits_web_text_none_n0 col_name sourceIP data_type string min @@ -574,30 +574,30 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}} -PREHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sKeyword +PREHOOK: query: analyze table UserVisits_web_text_none_n0 compute statistics for columns sKeyword PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: test@uservisits_web_text_none +PREHOOK: Input: test@uservisits_web_text_none_n0 #### A masked pattern was here #### -PREHOOK: Output: test@uservisits_web_text_none -POSTHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sKeyword +PREHOOK: Output: test@uservisits_web_text_none_n0 +POSTHOOK: query: analyze table UserVisits_web_text_none_n0 compute statistics for columns sKeyword POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: test@uservisits_web_text_none +POSTHOOK: Input: test@uservisits_web_text_none_n0 #### A masked pattern was here #### -POSTHOOK: Output: test@uservisits_web_text_none -PREHOOK: query: desc extended UserVisits_web_text_none sKeyword +POSTHOOK: Output: test@uservisits_web_text_none_n0 +PREHOOK: query: desc extended UserVisits_web_text_none_n0 sKeyword PREHOOK: type: DESCTABLE -PREHOOK: Input: test@uservisits_web_text_none -POSTHOOK: query: desc extended UserVisits_web_text_none sKeyword +PREHOOK: Input: test@uservisits_web_text_none_n0 +POSTHOOK: query: desc extended UserVisits_web_text_none_n0 sKeyword POSTHOOK: type: DESCTABLE -POSTHOOK: Input: test@uservisits_web_text_none +POSTHOOK: Input: test@uservisits_web_text_none_n0 sKeyword string from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"skeyword\":\"true\"}} -PREHOOK: query: desc formatted UserVisits_web_text_none sKeyword +PREHOOK: query: desc formatted UserVisits_web_text_none_n0 sKeyword PREHOOK: type: DESCTABLE -PREHOOK: Input: test@uservisits_web_text_none -POSTHOOK: query: desc formatted UserVisits_web_text_none sKeyword +PREHOOK: Input: test@uservisits_web_text_none_n0 +POSTHOOK: query: desc formatted UserVisits_web_text_none_n0 sKeyword POSTHOOK: type: DESCTABLE -POSTHOOK: Input: test@uservisits_web_text_none +POSTHOOK: Input: test@uservisits_web_text_none_n0 col_name sKeyword data_type string min @@ -611,12 +611,12 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"skeyword\":\"true\"}} -PREHOOK: query: desc formatted test.UserVisits_web_text_none sKeyword +PREHOOK: query: desc formatted test.UserVisits_web_text_none_n0 sKeyword PREHOOK: type: DESCTABLE -PREHOOK: Input: test@uservisits_web_text_none -POSTHOOK: query: desc formatted test.UserVisits_web_text_none sKeyword +PREHOOK: Input: test@uservisits_web_text_none_n0 +POSTHOOK: query: desc formatted test.UserVisits_web_text_none_n0 sKeyword POSTHOOK: type: DESCTABLE -POSTHOOK: Input: test@uservisits_web_text_none +POSTHOOK: Input: test@uservisits_web_text_none_n0 col_name sKeyword data_type string min diff --git a/ql/src/test/results/clientpositive/distinct_stats.q.out b/ql/src/test/results/clientpositive/distinct_stats.q.out index eac0bb94eb..f72710d28d 100644 --- a/ql/src/test/results/clientpositive/distinct_stats.q.out +++ b/ql/src/test/results/clientpositive/distinct_stats.q.out @@ -1,36 +1,36 @@ -PREHOOK: query: create table t1 (a string, b string) +PREHOOK: query: create table t1_n4 (a string, b string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (a string, b string) +PREHOOK: Output: default@t1_n4 +POSTHOOK: query: create table t1_n4 (a string, b string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: insert into table t1 select * from src +POSTHOOK: Output: default@t1_n4 +PREHOOK: query: insert into table t1_n4 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t1 -POSTHOOK: query: insert into table t1 select * from src +PREHOOK: Output: default@t1_n4 +POSTHOOK: query: insert into table t1_n4 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: analyze table t1 compute statistics for columns a,b +POSTHOOK: Output: default@t1_n4 +POSTHOOK: Lineage: t1_n4.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n4.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: analyze table t1_n4 compute statistics for columns a,b PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 +PREHOOK: Input: default@t1_n4 +PREHOOK: Output: default@t1_n4 #### A masked pattern was here #### -POSTHOOK: query: analyze table t1 compute statistics for columns a,b +POSTHOOK: query: analyze table t1_n4 compute statistics for columns a,b POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n4 +POSTHOOK: Output: default@t1_n4 #### A masked pattern was here #### PREHOOK: query: explain -select count(distinct b) from t1 group by a +select count(distinct b) from t1_n4 group by a PREHOOK: type: QUERY POSTHOOK: query: explain -select count(distinct b) from t1 group by a +select count(distinct b) from t1_n4 group by a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -41,7 +41,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n4 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) @@ -84,10 +84,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select distinct(b) from t1 +select distinct(b) from t1_n4 PREHOOK: type: QUERY POSTHOOK: query: explain -select distinct(b) from t1 +select distinct(b) from t1_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -98,7 +98,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n4 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: b (type: string) @@ -136,10 +136,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select a, count(*) from t1 group by a +select a, count(*) from t1_n4 group by a PREHOOK: type: QUERY POSTHOOK: query: explain -select a, count(*) from t1 group by a +select a, count(*) from t1_n4 group by a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -150,7 +150,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n4 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string) @@ -190,11 +190,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n4 +PREHOOK: Output: default@t1_n4 +POSTHOOK: query: drop table t1_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n4 +POSTHOOK: Output: default@t1_n4 diff --git a/ql/src/test/results/clientpositive/distinct_windowing.q.out b/ql/src/test/results/clientpositive/distinct_windowing.q.out index 51f165c243..39b87e8b87 100644 --- a/ql/src/test/results/clientpositive/distinct_windowing.q.out +++ b/ql/src/test/results/clientpositive/distinct_windowing.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table over10k +PREHOOK: query: drop table over10k_n15 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table over10k +POSTHOOK: query: drop table over10k_n15 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table over10k( +PREHOOK: query: create table over10k_n15( t tinyint, si smallint, i int, @@ -18,8 +18,8 @@ PREHOOK: query: create table over10k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over10k -POSTHOOK: query: create table over10k( +PREHOOK: Output: default@over10k_n15 +POSTHOOK: query: create table over10k_n15( t tinyint, si smallint, i int, @@ -35,20 +35,20 @@ POSTHOOK: query: create table over10k( fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k -PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: Output: default@over10k_n15 +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n15 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over10k -POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: Output: default@over10k_n15 +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n15 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over10k +POSTHOOK: Output: default@over10k_n15 PREHOOK: query: explain -select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10 +select distinct first_value(t) over ( partition by si order by i ) from over10k_n15 limit 10 PREHOOK: type: QUERY POSTHOOK: query: explain -select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10 +select distinct first_value(t) over ( partition by si order by i ) from over10k_n15 limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -60,7 +60,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: over10k + alias: over10k_n15 Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: si (type: smallint), i (type: int) @@ -144,13 +144,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10 +PREHOOK: query: select distinct first_value(t) over ( partition by si order by i ) from over10k_n15 limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n15 #### A masked pattern was here #### -POSTHOOK: query: select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10 +POSTHOOK: query: select distinct first_value(t) over ( partition by si order by i ) from over10k_n15 limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n15 #### A masked pattern was here #### -2 -1 @@ -164,11 +164,11 @@ POSTHOOK: Input: default@over10k 8 PREHOOK: query: explain select distinct last_value(i) over ( partition by si order by i ) -from over10k limit 10 +from over10k_n15 limit 10 PREHOOK: type: QUERY POSTHOOK: query: explain select distinct last_value(i) over ( partition by si order by i ) -from over10k limit 10 +from over10k_n15 limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -180,7 +180,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: over10k + alias: over10k_n15 Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: si (type: smallint), i (type: int) @@ -264,14 +264,14 @@ STAGE PLANS: ListSink PREHOOK: query: select distinct last_value(i) over ( partition by si order by i ) -from over10k limit 10 +from over10k_n15 limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n15 #### A masked pattern was here #### POSTHOOK: query: select distinct last_value(i) over ( partition by si order by i ) -from over10k limit 10 +from over10k_n15 limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n15 #### A masked pattern was here #### 65536 65537 @@ -286,12 +286,12 @@ POSTHOOK: Input: default@over10k PREHOOK: query: explain select distinct last_value(i) over ( partition by si order by i ), first_value(t) over ( partition by si order by i ) -from over10k limit 50 +from over10k_n15 limit 50 PREHOOK: type: QUERY POSTHOOK: query: explain select distinct last_value(i) over ( partition by si order by i ), first_value(t) over ( partition by si order by i ) -from over10k limit 50 +from over10k_n15 limit 50 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -303,7 +303,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: over10k + alias: over10k_n15 Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: si (type: smallint), i (type: int) @@ -395,15 +395,15 @@ STAGE PLANS: PREHOOK: query: select distinct last_value(i) over ( partition by si order by i ), first_value(t) over ( partition by si order by i ) -from over10k limit 50 +from over10k_n15 limit 50 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n15 #### A masked pattern was here #### POSTHOOK: query: select distinct last_value(i) over ( partition by si order by i ), first_value(t) over ( partition by si order by i ) -from over10k limit 50 +from over10k_n15 limit 50 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n15 #### A masked pattern was here #### 65536 -2 65536 2 diff --git a/ql/src/test/results/clientpositive/distinct_windowing_no_cbo.q.out b/ql/src/test/results/clientpositive/distinct_windowing_no_cbo.q.out index 7093e448c8..c1fe3a2ece 100644 --- a/ql/src/test/results/clientpositive/distinct_windowing_no_cbo.q.out +++ b/ql/src/test/results/clientpositive/distinct_windowing_no_cbo.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table over10k +PREHOOK: query: drop table over10k_n14 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table over10k +POSTHOOK: query: drop table over10k_n14 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table over10k( +PREHOOK: query: create table over10k_n14( t tinyint, si smallint, i int, @@ -18,8 +18,8 @@ PREHOOK: query: create table over10k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over10k -POSTHOOK: query: create table over10k( +PREHOOK: Output: default@over10k_n14 +POSTHOOK: query: create table over10k_n14( t tinyint, si smallint, i int, @@ -35,20 +35,20 @@ POSTHOOK: query: create table over10k( fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k -PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: Output: default@over10k_n14 +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n14 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over10k -POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: Output: default@over10k_n14 +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n14 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over10k +POSTHOOK: Output: default@over10k_n14 PREHOOK: query: explain -select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10 +select distinct first_value(t) over ( partition by si order by i ) from over10k_n14 limit 10 PREHOOK: type: QUERY POSTHOOK: query: explain -select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10 +select distinct first_value(t) over ( partition by si order by i ) from over10k_n14 limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -60,7 +60,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: over10k + alias: over10k_n14 Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: si (type: smallint), i (type: int) @@ -144,13 +144,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10 +PREHOOK: query: select distinct first_value(t) over ( partition by si order by i ) from over10k_n14 limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n14 #### A masked pattern was here #### -POSTHOOK: query: select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10 +POSTHOOK: query: select distinct first_value(t) over ( partition by si order by i ) from over10k_n14 limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n14 #### A masked pattern was here #### -2 -1 @@ -164,11 +164,11 @@ POSTHOOK: Input: default@over10k 8 PREHOOK: query: explain select distinct last_value(i) over ( partition by si order by i ) -from over10k limit 10 +from over10k_n14 limit 10 PREHOOK: type: QUERY POSTHOOK: query: explain select distinct last_value(i) over ( partition by si order by i ) -from over10k limit 10 +from over10k_n14 limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -180,7 +180,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: over10k + alias: over10k_n14 Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: si (type: smallint), i (type: int) @@ -264,14 +264,14 @@ STAGE PLANS: ListSink PREHOOK: query: select distinct last_value(i) over ( partition by si order by i ) -from over10k limit 10 +from over10k_n14 limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n14 #### A masked pattern was here #### POSTHOOK: query: select distinct last_value(i) over ( partition by si order by i ) -from over10k limit 10 +from over10k_n14 limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n14 #### A masked pattern was here #### 65536 65537 @@ -286,12 +286,12 @@ POSTHOOK: Input: default@over10k PREHOOK: query: explain select distinct last_value(i) over ( partition by si order by i ), first_value(t) over ( partition by si order by i ) -from over10k limit 50 +from over10k_n14 limit 50 PREHOOK: type: QUERY POSTHOOK: query: explain select distinct last_value(i) over ( partition by si order by i ), first_value(t) over ( partition by si order by i ) -from over10k limit 50 +from over10k_n14 limit 50 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -303,7 +303,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: over10k + alias: over10k_n14 Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: si (type: smallint), i (type: int) @@ -395,15 +395,15 @@ STAGE PLANS: PREHOOK: query: select distinct last_value(i) over ( partition by si order by i ), first_value(t) over ( partition by si order by i ) -from over10k limit 50 +from over10k_n14 limit 50 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n14 #### A masked pattern was here #### POSTHOOK: query: select distinct last_value(i) over ( partition by si order by i ), first_value(t) over ( partition by si order by i ) -from over10k limit 50 +from over10k_n14 limit 50 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n14 #### A masked pattern was here #### 65536 -2 65536 2 @@ -457,14 +457,14 @@ POSTHOOK: Input: default@over10k 65537 59 PREHOOK: query: explain select si, max(f) mf, rank() over ( partition by si order by mf ) -FROM over10k +FROM over10k_n14 GROUP BY si HAVING max(f) > 0 limit 50 PREHOOK: type: QUERY POSTHOOK: query: explain select si, max(f) mf, rank() over ( partition by si order by mf ) -FROM over10k +FROM over10k_n14 GROUP BY si HAVING max(f) > 0 limit 50 @@ -479,7 +479,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: over10k + alias: over10k_n14 Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: si (type: smallint), f (type: float) @@ -573,20 +573,20 @@ STAGE PLANS: ListSink PREHOOK: query: select si, max(f) mf, rank() over ( partition by si order by mf ) -FROM over10k +FROM over10k_n14 GROUP BY si HAVING max(f) > 0 limit 50 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n14 #### A masked pattern was here #### POSTHOOK: query: select si, max(f) mf, rank() over ( partition by si order by mf ) -FROM over10k +FROM over10k_n14 GROUP BY si HAVING max(f) > 0 limit 50 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n14 #### A masked pattern was here #### 256 94.87 1 257 98.0 1 @@ -640,12 +640,12 @@ POSTHOOK: Input: default@over10k 305 96.1 1 PREHOOK: query: explain select distinct si, rank() over ( partition by si order by i ) -FROM over10k +FROM over10k_n14 limit 50 PREHOOK: type: QUERY POSTHOOK: query: explain select distinct si, rank() over ( partition by si order by i ) -FROM over10k +FROM over10k_n14 limit 50 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -658,7 +658,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: over10k + alias: over10k_n14 Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: si (type: smallint), i (type: int) @@ -743,16 +743,16 @@ STAGE PLANS: ListSink PREHOOK: query: select distinct si, rank() over ( partition by si order by i ) -FROM over10k +FROM over10k_n14 limit 50 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n14 #### A masked pattern was here #### POSTHOOK: query: select distinct si, rank() over ( partition by si order by i ) -FROM over10k +FROM over10k_n14 limit 50 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n14 #### A masked pattern was here #### 256 1 256 2 diff --git a/ql/src/test/results/clientpositive/drop_database_removes_partition_dirs.q.out b/ql/src/test/results/clientpositive/drop_database_removes_partition_dirs.q.out index cb578e130d..a96b60b2d5 100644 --- a/ql/src/test/results/clientpositive/drop_database_removes_partition_dirs.q.out +++ b/ql/src/test/results/clientpositive/drop_database_removes_partition_dirs.q.out @@ -10,45 +10,45 @@ PREHOOK: Input: database:test_database POSTHOOK: query: USE test_database POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test_database -PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING) +PREHOOK: query: CREATE TABLE test_table_n12 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:test_database -PREHOOK: Output: test_database@test_table -POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING) +PREHOOK: Output: test_database@test_table_n12 +POSTHOOK: query: CREATE TABLE test_table_n12 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:test_database -POSTHOOK: Output: test_database@test_table -PREHOOK: query: ALTER TABLE test_table ADD PARTITION (part = '1') +POSTHOOK: Output: test_database@test_table_n12 +PREHOOK: query: ALTER TABLE test_table_n12 ADD PARTITION (part = '1') #### A masked pattern was here #### PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -PREHOOK: Output: test_database@test_table -POSTHOOK: query: ALTER TABLE test_table ADD PARTITION (part = '1') +PREHOOK: Output: test_database@test_table_n12 +POSTHOOK: query: ALTER TABLE test_table_n12 ADD PARTITION (part = '1') #### A masked pattern was here #### POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -POSTHOOK: Output: test_database@test_table -POSTHOOK: Output: test_database@test_table@part=1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: Output: test_database@test_table_n12 +POSTHOOK: Output: test_database@test_table_n12@part=1 +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n12 PARTITION (part = '1') SELECT * FROM default.src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: test_database@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: test_database@test_table_n12@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n12 PARTITION (part = '1') SELECT * FROM default.src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: test_database@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: test_database@test_table_n12@part=1 +POSTHOOK: Lineage: test_table_n12 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n12 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### PREHOOK: query: USE default @@ -61,10 +61,10 @@ PREHOOK: query: DROP DATABASE test_database CASCADE PREHOOK: type: DROPDATABASE PREHOOK: Input: database:test_database PREHOOK: Output: database:test_database -PREHOOK: Output: test_database@test_table +PREHOOK: Output: test_database@test_table_n12 POSTHOOK: query: DROP DATABASE test_database CASCADE POSTHOOK: type: DROPDATABASE POSTHOOK: Input: database:test_database POSTHOOK: Output: database:test_database -POSTHOOK: Output: test_database@test_table +POSTHOOK: Output: test_database@test_table_n12 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/drop_multi_partitions.q.out b/ql/src/test/results/clientpositive/drop_multi_partitions.q.out index 5a0f84a226..d1f01160c6 100644 --- a/ql/src/test/results/clientpositive/drop_multi_partitions.q.out +++ b/ql/src/test/results/clientpositive/drop_multi_partitions.q.out @@ -4,47 +4,47 @@ PREHOOK: Output: database:dmp POSTHOOK: query: create database dmp POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:dmp -PREHOOK: query: create table dmp.mp (a string) partitioned by (b string, c string) +PREHOOK: query: create table dmp.mp_n0 (a string) partitioned by (b string, c string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:dmp -PREHOOK: Output: dmp@mp -POSTHOOK: query: create table dmp.mp (a string) partitioned by (b string, c string) +PREHOOK: Output: dmp@mp_n0 +POSTHOOK: query: create table dmp.mp_n0 (a string) partitioned by (b string, c string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:dmp -POSTHOOK: Output: dmp@mp -PREHOOK: query: alter table dmp.mp add partition (b='1', c='1') +POSTHOOK: Output: dmp@mp_n0 +PREHOOK: query: alter table dmp.mp_n0 add partition (b='1', c='1') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: dmp@mp -POSTHOOK: query: alter table dmp.mp add partition (b='1', c='1') +PREHOOK: Output: dmp@mp_n0 +POSTHOOK: query: alter table dmp.mp_n0 add partition (b='1', c='1') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: dmp@mp -POSTHOOK: Output: dmp@mp@b=1/c=1 -PREHOOK: query: alter table dmp.mp add partition (b='1', c='2') +POSTHOOK: Output: dmp@mp_n0 +POSTHOOK: Output: dmp@mp_n0@b=1/c=1 +PREHOOK: query: alter table dmp.mp_n0 add partition (b='1', c='2') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: dmp@mp -POSTHOOK: query: alter table dmp.mp add partition (b='1', c='2') +PREHOOK: Output: dmp@mp_n0 +POSTHOOK: query: alter table dmp.mp_n0 add partition (b='1', c='2') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: dmp@mp -POSTHOOK: Output: dmp@mp@b=1/c=2 -PREHOOK: query: alter table dmp.mp add partition (b='2', c='2') +POSTHOOK: Output: dmp@mp_n0 +POSTHOOK: Output: dmp@mp_n0@b=1/c=2 +PREHOOK: query: alter table dmp.mp_n0 add partition (b='2', c='2') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: dmp@mp -POSTHOOK: query: alter table dmp.mp add partition (b='2', c='2') +PREHOOK: Output: dmp@mp_n0 +POSTHOOK: query: alter table dmp.mp_n0 add partition (b='2', c='2') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: dmp@mp -POSTHOOK: Output: dmp@mp@b=2/c=2 -PREHOOK: query: show partitions dmp.mp +POSTHOOK: Output: dmp@mp_n0 +POSTHOOK: Output: dmp@mp_n0@b=2/c=2 +PREHOOK: query: show partitions dmp.mp_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: dmp@mp -POSTHOOK: query: show partitions dmp.mp +PREHOOK: Input: dmp@mp_n0 +POSTHOOK: query: show partitions dmp.mp_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: dmp@mp +POSTHOOK: Input: dmp@mp_n0 b=1/c=1 b=1/c=2 b=2/c=2 -PREHOOK: query: explain extended alter table dmp.mp drop partition (b='1') +PREHOOK: query: explain extended alter table dmp.mp_n0 drop partition (b='1') PREHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: query: explain extended alter table dmp.mp drop partition (b='1') +POSTHOOK: query: explain extended alter table dmp.mp_n0 drop partition (b='1') POSTHOOK: type: ALTERTABLE_DROPPARTS STAGE DEPENDENCIES: Stage-0 is a root stage @@ -53,46 +53,46 @@ STAGE PLANS: Stage: Stage-0 Drop Table Operator: Drop Table - table: dmp.mp + table: dmp.mp_n0 -PREHOOK: query: alter table dmp.mp drop partition (b='1') +PREHOOK: query: alter table dmp.mp_n0 drop partition (b='1') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: dmp@mp -PREHOOK: Output: dmp@mp@b=1/c=1 -PREHOOK: Output: dmp@mp@b=1/c=2 -POSTHOOK: query: alter table dmp.mp drop partition (b='1') +PREHOOK: Input: dmp@mp_n0 +PREHOOK: Output: dmp@mp_n0@b=1/c=1 +PREHOOK: Output: dmp@mp_n0@b=1/c=2 +POSTHOOK: query: alter table dmp.mp_n0 drop partition (b='1') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: dmp@mp -POSTHOOK: Output: dmp@mp@b=1/c=1 -POSTHOOK: Output: dmp@mp@b=1/c=2 -PREHOOK: query: show partitions dmp.mp +POSTHOOK: Input: dmp@mp_n0 +POSTHOOK: Output: dmp@mp_n0@b=1/c=1 +POSTHOOK: Output: dmp@mp_n0@b=1/c=2 +PREHOOK: query: show partitions dmp.mp_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: dmp@mp -POSTHOOK: query: show partitions dmp.mp +PREHOOK: Input: dmp@mp_n0 +POSTHOOK: query: show partitions dmp.mp_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: dmp@mp +POSTHOOK: Input: dmp@mp_n0 b=2/c=2 -PREHOOK: query: alter table dmp.mp drop if exists partition (b='3') +PREHOOK: query: alter table dmp.mp_n0 drop if exists partition (b='3') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: dmp@mp -POSTHOOK: query: alter table dmp.mp drop if exists partition (b='3') +PREHOOK: Input: dmp@mp_n0 +POSTHOOK: query: alter table dmp.mp_n0 drop if exists partition (b='3') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: dmp@mp -PREHOOK: query: show partitions dmp.mp +POSTHOOK: Input: dmp@mp_n0 +PREHOOK: query: show partitions dmp.mp_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: dmp@mp -POSTHOOK: query: show partitions dmp.mp +PREHOOK: Input: dmp@mp_n0 +POSTHOOK: query: show partitions dmp.mp_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: dmp@mp +POSTHOOK: Input: dmp@mp_n0 b=2/c=2 -PREHOOK: query: drop table dmp.mp +PREHOOK: query: drop table dmp.mp_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: dmp@mp -PREHOOK: Output: dmp@mp -POSTHOOK: query: drop table dmp.mp +PREHOOK: Input: dmp@mp_n0 +PREHOOK: Output: dmp@mp_n0 +POSTHOOK: query: drop table dmp.mp_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: dmp@mp -POSTHOOK: Output: dmp@mp +POSTHOOK: Input: dmp@mp_n0 +POSTHOOK: Output: dmp@mp_n0 PREHOOK: query: drop database dmp PREHOOK: type: DROPDATABASE PREHOOK: Input: database:dmp diff --git a/ql/src/test/results/clientpositive/drop_partitions_filter.q.out b/ql/src/test/results/clientpositive/drop_partitions_filter.q.out index 4f607da1ab..2cbc05da5e 100644 --- a/ql/src/test/results/clientpositive/drop_partitions_filter.q.out +++ b/ql/src/test/results/clientpositive/drop_partitions_filter.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: create table ptestfilter (a string, b int) partitioned by (c string, d string) +PREHOOK: query: create table ptestfilter_n1 (a string, b int) partitioned by (c string, d string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: create table ptestfilter (a string, b int) partitioned by (c string, d string) +PREHOOK: Output: default@ptestfilter_n1 +POSTHOOK: query: create table ptestfilter_n1 (a string, b int) partitioned by (c string, d string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@ptestfilter -PREHOOK: query: describe ptestfilter +POSTHOOK: Output: default@ptestfilter_n1 +PREHOOK: query: describe ptestfilter_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: describe ptestfilter +PREHOOK: Input: default@ptestfilter_n1 +POSTHOOK: query: describe ptestfilter_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 a string b int c string @@ -21,75 +21,75 @@ d string # col_name data_type comment c string d string -PREHOOK: query: alter table ptestfilter add partition (c='US', d=1) +PREHOOK: query: alter table ptestfilter_n1 add partition (c='US', d=1) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c='US', d=1) +PREHOOK: Output: default@ptestfilter_n1 +POSTHOOK: query: alter table ptestfilter_n1 add partition (c='US', d=1) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=US/d=1 -PREHOOK: query: alter table ptestfilter add partition (c='US', d=2) +POSTHOOK: Output: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=US/d=1 +PREHOOK: query: alter table ptestfilter_n1 add partition (c='US', d=2) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c='US', d=2) +PREHOOK: Output: default@ptestfilter_n1 +POSTHOOK: query: alter table ptestfilter_n1 add partition (c='US', d=2) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=US/d=2 -PREHOOK: query: alter table ptestFilter add partition (c='Uganda', d=2) +POSTHOOK: Output: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=US/d=2 +PREHOOK: query: alter table ptestFilter_n1 add partition (c='Uganda', d=2) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestFilter add partition (c='Uganda', d=2) +PREHOOK: Output: default@ptestfilter_n1 +POSTHOOK: query: alter table ptestFilter_n1 add partition (c='Uganda', d=2) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=Uganda/d=2 -PREHOOK: query: alter table ptestfilter add partition (c='Germany', d=2) +POSTHOOK: Output: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=Uganda/d=2 +PREHOOK: query: alter table ptestfilter_n1 add partition (c='Germany', d=2) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c='Germany', d=2) +PREHOOK: Output: default@ptestfilter_n1 +POSTHOOK: query: alter table ptestfilter_n1 add partition (c='Germany', d=2) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=Germany/d=2 -PREHOOK: query: alter table ptestfilter add partition (c='Canada', d=3) +POSTHOOK: Output: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=Germany/d=2 +PREHOOK: query: alter table ptestfilter_n1 add partition (c='Canada', d=3) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c='Canada', d=3) +PREHOOK: Output: default@ptestfilter_n1 +POSTHOOK: query: alter table ptestfilter_n1 add partition (c='Canada', d=3) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=Canada/d=3 -PREHOOK: query: alter table ptestfilter add partition (c='Russia', d=3) +POSTHOOK: Output: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=Canada/d=3 +PREHOOK: query: alter table ptestfilter_n1 add partition (c='Russia', d=3) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c='Russia', d=3) +PREHOOK: Output: default@ptestfilter_n1 +POSTHOOK: query: alter table ptestfilter_n1 add partition (c='Russia', d=3) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=Russia/d=3 -PREHOOK: query: alter table ptestfilter add partition (c='Greece', d=2) +POSTHOOK: Output: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=Russia/d=3 +PREHOOK: query: alter table ptestfilter_n1 add partition (c='Greece', d=2) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c='Greece', d=2) +PREHOOK: Output: default@ptestfilter_n1 +POSTHOOK: query: alter table ptestfilter_n1 add partition (c='Greece', d=2) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=Greece/d=2 -PREHOOK: query: alter table ptestfilter add partition (c='India', d=3) +POSTHOOK: Output: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=Greece/d=2 +PREHOOK: query: alter table ptestfilter_n1 add partition (c='India', d=3) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c='India', d=3) +PREHOOK: Output: default@ptestfilter_n1 +POSTHOOK: query: alter table ptestfilter_n1 add partition (c='India', d=3) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=India/d=3 -PREHOOK: query: alter table ptestfilter add partition (c='France', d=4) +POSTHOOK: Output: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=India/d=3 +PREHOOK: query: alter table ptestfilter_n1 add partition (c='France', d=4) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c='France', d=4) +PREHOOK: Output: default@ptestfilter_n1 +POSTHOOK: query: alter table ptestfilter_n1 add partition (c='France', d=4) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=France/d=4 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Output: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=France/d=4 +PREHOOK: query: show partitions ptestfilter_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n1 +POSTHOOK: query: show partitions ptestfilter_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 c=Canada/d=3 c=France/d=4 c=Germany/d=2 @@ -99,20 +99,20 @@ c=Russia/d=3 c=US/d=1 c=US/d=2 c=Uganda/d=2 -PREHOOK: query: alter table ptestfilter drop partition (c='US', d<'2') +PREHOOK: query: alter table ptestfilter_n1 drop partition (c='US', d<'2') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=US/d=1 -POSTHOOK: query: alter table ptestfilter drop partition (c='US', d<'2') +PREHOOK: Input: default@ptestfilter_n1 +PREHOOK: Output: default@ptestfilter_n1@c=US/d=1 +POSTHOOK: query: alter table ptestfilter_n1 drop partition (c='US', d<'2') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=US/d=1 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=US/d=1 +PREHOOK: query: show partitions ptestfilter_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n1 +POSTHOOK: query: show partitions ptestfilter_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 c=Canada/d=3 c=France/d=4 c=Germany/d=2 @@ -121,103 +121,103 @@ c=India/d=3 c=Russia/d=3 c=US/d=2 c=Uganda/d=2 -PREHOOK: query: alter table ptestfilter drop partition (c>='US', d<='2') +PREHOOK: query: alter table ptestfilter_n1 drop partition (c>='US', d<='2') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=US/d=2 -PREHOOK: Output: default@ptestfilter@c=Uganda/d=2 -POSTHOOK: query: alter table ptestfilter drop partition (c>='US', d<='2') +PREHOOK: Input: default@ptestfilter_n1 +PREHOOK: Output: default@ptestfilter_n1@c=US/d=2 +PREHOOK: Output: default@ptestfilter_n1@c=Uganda/d=2 +POSTHOOK: query: alter table ptestfilter_n1 drop partition (c>='US', d<='2') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=US/d=2 -POSTHOOK: Output: default@ptestfilter@c=Uganda/d=2 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=US/d=2 +POSTHOOK: Output: default@ptestfilter_n1@c=Uganda/d=2 +PREHOOK: query: show partitions ptestfilter_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n1 +POSTHOOK: query: show partitions ptestfilter_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 c=Canada/d=3 c=France/d=4 c=Germany/d=2 c=Greece/d=2 c=India/d=3 c=Russia/d=3 -PREHOOK: query: alter table ptestfilter drop partition (c >'India') +PREHOOK: query: alter table ptestfilter_n1 drop partition (c >'India') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=Russia/d=3 -POSTHOOK: query: alter table ptestfilter drop partition (c >'India') +PREHOOK: Input: default@ptestfilter_n1 +PREHOOK: Output: default@ptestfilter_n1@c=Russia/d=3 +POSTHOOK: query: alter table ptestfilter_n1 drop partition (c >'India') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=Russia/d=3 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=Russia/d=3 +PREHOOK: query: show partitions ptestfilter_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n1 +POSTHOOK: query: show partitions ptestfilter_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 c=Canada/d=3 c=France/d=4 c=Germany/d=2 c=Greece/d=2 c=India/d=3 -PREHOOK: query: alter table ptestfilter drop partition (c >='India'), +PREHOOK: query: alter table ptestfilter_n1 drop partition (c >='India'), partition (c='Greece', d='2') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=Greece/d=2 -PREHOOK: Output: default@ptestfilter@c=India/d=3 -POSTHOOK: query: alter table ptestfilter drop partition (c >='India'), +PREHOOK: Input: default@ptestfilter_n1 +PREHOOK: Output: default@ptestfilter_n1@c=Greece/d=2 +PREHOOK: Output: default@ptestfilter_n1@c=India/d=3 +POSTHOOK: query: alter table ptestfilter_n1 drop partition (c >='India'), partition (c='Greece', d='2') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=Greece/d=2 -POSTHOOK: Output: default@ptestfilter@c=India/d=3 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=Greece/d=2 +POSTHOOK: Output: default@ptestfilter_n1@c=India/d=3 +PREHOOK: query: show partitions ptestfilter_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n1 +POSTHOOK: query: show partitions ptestfilter_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 c=Canada/d=3 c=France/d=4 c=Germany/d=2 -PREHOOK: query: alter table ptestfilter drop partition (c != 'France') +PREHOOK: query: alter table ptestfilter_n1 drop partition (c != 'France') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=Canada/d=3 -PREHOOK: Output: default@ptestfilter@c=Germany/d=2 -POSTHOOK: query: alter table ptestfilter drop partition (c != 'France') +PREHOOK: Input: default@ptestfilter_n1 +PREHOOK: Output: default@ptestfilter_n1@c=Canada/d=3 +PREHOOK: Output: default@ptestfilter_n1@c=Germany/d=2 +POSTHOOK: query: alter table ptestfilter_n1 drop partition (c != 'France') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=Canada/d=3 -POSTHOOK: Output: default@ptestfilter@c=Germany/d=2 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1@c=Canada/d=3 +POSTHOOK: Output: default@ptestfilter_n1@c=Germany/d=2 +PREHOOK: query: show partitions ptestfilter_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n1 +POSTHOOK: query: show partitions ptestfilter_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 c=France/d=4 -PREHOOK: query: alter table ptestfilter drop if exists partition (c='US') +PREHOOK: query: alter table ptestfilter_n1 drop if exists partition (c='US') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: alter table ptestfilter drop if exists partition (c='US') +PREHOOK: Input: default@ptestfilter_n1 +POSTHOOK: query: alter table ptestfilter_n1 drop if exists partition (c='US') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 +PREHOOK: query: show partitions ptestfilter_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n1 +POSTHOOK: query: show partitions ptestfilter_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 c=France/d=4 -PREHOOK: query: drop table ptestfilter +PREHOOK: query: drop table ptestfilter_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: drop table ptestfilter +PREHOOK: Input: default@ptestfilter_n1 +PREHOOK: Output: default@ptestfilter_n1 +POSTHOOK: query: drop table ptestfilter_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n1 +POSTHOOK: Output: default@ptestfilter_n1 diff --git a/ql/src/test/results/clientpositive/drop_partitions_filter2.q.out b/ql/src/test/results/clientpositive/drop_partitions_filter2.q.out index fd95b448ab..1ebf38a096 100644 --- a/ql/src/test/results/clientpositive/drop_partitions_filter2.q.out +++ b/ql/src/test/results/clientpositive/drop_partitions_filter2.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: create table ptestfilter (a string, b int) partitioned by (c int, d int) +PREHOOK: query: create table ptestfilter_n0 (a string, b int) partitioned by (c int, d int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: create table ptestfilter (a string, b int) partitioned by (c int, d int) +PREHOOK: Output: default@ptestfilter_n0 +POSTHOOK: query: create table ptestfilter_n0 (a string, b int) partitioned by (c int, d int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@ptestfilter -PREHOOK: query: describe ptestfilter +POSTHOOK: Output: default@ptestfilter_n0 +PREHOOK: query: describe ptestfilter_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: describe ptestfilter +PREHOOK: Input: default@ptestfilter_n0 +POSTHOOK: query: describe ptestfilter_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n0 a string b int c int @@ -21,120 +21,120 @@ d int # col_name data_type comment c int d int -PREHOOK: query: alter table ptestfilter add partition (c=1, d=1) +PREHOOK: query: alter table ptestfilter_n0 add partition (c=1, d=1) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c=1, d=1) +PREHOOK: Output: default@ptestfilter_n0 +POSTHOOK: query: alter table ptestfilter_n0 add partition (c=1, d=1) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=1/d=1 -PREHOOK: query: alter table ptestfilter add partition (c=1, d=2) +POSTHOOK: Output: default@ptestfilter_n0 +POSTHOOK: Output: default@ptestfilter_n0@c=1/d=1 +PREHOOK: query: alter table ptestfilter_n0 add partition (c=1, d=2) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c=1, d=2) +PREHOOK: Output: default@ptestfilter_n0 +POSTHOOK: query: alter table ptestfilter_n0 add partition (c=1, d=2) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=1/d=2 -PREHOOK: query: alter table ptestFilter add partition (c=2, d=1) +POSTHOOK: Output: default@ptestfilter_n0 +POSTHOOK: Output: default@ptestfilter_n0@c=1/d=2 +PREHOOK: query: alter table ptestFilter_n0 add partition (c=2, d=1) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestFilter add partition (c=2, d=1) +PREHOOK: Output: default@ptestfilter_n0 +POSTHOOK: query: alter table ptestFilter_n0 add partition (c=2, d=1) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=2/d=1 -PREHOOK: query: alter table ptestfilter add partition (c=2, d=2) +POSTHOOK: Output: default@ptestfilter_n0 +POSTHOOK: Output: default@ptestfilter_n0@c=2/d=1 +PREHOOK: query: alter table ptestfilter_n0 add partition (c=2, d=2) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c=2, d=2) +PREHOOK: Output: default@ptestfilter_n0 +POSTHOOK: query: alter table ptestfilter_n0 add partition (c=2, d=2) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=2/d=2 -PREHOOK: query: alter table ptestfilter add partition (c=3, d=1) +POSTHOOK: Output: default@ptestfilter_n0 +POSTHOOK: Output: default@ptestfilter_n0@c=2/d=2 +PREHOOK: query: alter table ptestfilter_n0 add partition (c=3, d=1) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c=3, d=1) +PREHOOK: Output: default@ptestfilter_n0 +POSTHOOK: query: alter table ptestfilter_n0 add partition (c=3, d=1) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=3/d=1 -PREHOOK: query: alter table ptestfilter add partition (c=30, d=2) +POSTHOOK: Output: default@ptestfilter_n0 +POSTHOOK: Output: default@ptestfilter_n0@c=3/d=1 +PREHOOK: query: alter table ptestfilter_n0 add partition (c=30, d=2) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c=30, d=2) +PREHOOK: Output: default@ptestfilter_n0 +POSTHOOK: query: alter table ptestfilter_n0 add partition (c=30, d=2) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=30/d=2 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Output: default@ptestfilter_n0 +POSTHOOK: Output: default@ptestfilter_n0@c=30/d=2 +PREHOOK: query: show partitions ptestfilter_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n0 +POSTHOOK: query: show partitions ptestfilter_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n0 c=1/d=1 c=1/d=2 c=2/d=1 c=2/d=2 c=3/d=1 c=30/d=2 -PREHOOK: query: alter table ptestfilter drop partition (c=1, d=1) +PREHOOK: query: alter table ptestfilter_n0 drop partition (c=1, d=1) PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=1/d=1 -POSTHOOK: query: alter table ptestfilter drop partition (c=1, d=1) +PREHOOK: Input: default@ptestfilter_n0 +PREHOOK: Output: default@ptestfilter_n0@c=1/d=1 +POSTHOOK: query: alter table ptestfilter_n0 drop partition (c=1, d=1) POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=1/d=1 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n0 +POSTHOOK: Output: default@ptestfilter_n0@c=1/d=1 +PREHOOK: query: show partitions ptestfilter_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n0 +POSTHOOK: query: show partitions ptestfilter_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n0 c=1/d=2 c=2/d=1 c=2/d=2 c=3/d=1 c=30/d=2 -PREHOOK: query: alter table ptestfilter drop partition (c=2) +PREHOOK: query: alter table ptestfilter_n0 drop partition (c=2) PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=2/d=1 -PREHOOK: Output: default@ptestfilter@c=2/d=2 -POSTHOOK: query: alter table ptestfilter drop partition (c=2) +PREHOOK: Input: default@ptestfilter_n0 +PREHOOK: Output: default@ptestfilter_n0@c=2/d=1 +PREHOOK: Output: default@ptestfilter_n0@c=2/d=2 +POSTHOOK: query: alter table ptestfilter_n0 drop partition (c=2) POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=2/d=1 -POSTHOOK: Output: default@ptestfilter@c=2/d=2 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n0 +POSTHOOK: Output: default@ptestfilter_n0@c=2/d=1 +POSTHOOK: Output: default@ptestfilter_n0@c=2/d=2 +PREHOOK: query: show partitions ptestfilter_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n0 +POSTHOOK: query: show partitions ptestfilter_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n0 c=1/d=2 c=3/d=1 c=30/d=2 -PREHOOK: query: alter table ptestfilter drop partition (c<4) +PREHOOK: query: alter table ptestfilter_n0 drop partition (c<4) PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=1/d=2 -PREHOOK: Output: default@ptestfilter@c=3/d=1 -POSTHOOK: query: alter table ptestfilter drop partition (c<4) +PREHOOK: Input: default@ptestfilter_n0 +PREHOOK: Output: default@ptestfilter_n0@c=1/d=2 +PREHOOK: Output: default@ptestfilter_n0@c=3/d=1 +POSTHOOK: query: alter table ptestfilter_n0 drop partition (c<4) POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=1/d=2 -POSTHOOK: Output: default@ptestfilter@c=3/d=1 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n0 +POSTHOOK: Output: default@ptestfilter_n0@c=1/d=2 +POSTHOOK: Output: default@ptestfilter_n0@c=3/d=1 +PREHOOK: query: show partitions ptestfilter_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n0 +POSTHOOK: query: show partitions ptestfilter_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n0 c=30/d=2 -PREHOOK: query: drop table ptestfilter +PREHOOK: query: drop table ptestfilter_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: drop table ptestfilter +PREHOOK: Input: default@ptestfilter_n0 +PREHOOK: Output: default@ptestfilter_n0 +POSTHOOK: query: drop table ptestfilter_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n0 +POSTHOOK: Output: default@ptestfilter_n0 diff --git a/ql/src/test/results/clientpositive/drop_partitions_filter3.q.out b/ql/src/test/results/clientpositive/drop_partitions_filter3.q.out index 2ba9ec888b..6520aaf7c1 100644 --- a/ql/src/test/results/clientpositive/drop_partitions_filter3.q.out +++ b/ql/src/test/results/clientpositive/drop_partitions_filter3.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: create table ptestfilter (a string, b int) partitioned by (c string, d int) +PREHOOK: query: create table ptestfilter_n3 (a string, b int) partitioned by (c string, d int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: create table ptestfilter (a string, b int) partitioned by (c string, d int) +PREHOOK: Output: default@ptestfilter_n3 +POSTHOOK: query: create table ptestfilter_n3 (a string, b int) partitioned by (c string, d int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@ptestfilter -PREHOOK: query: describe ptestfilter +POSTHOOK: Output: default@ptestfilter_n3 +PREHOOK: query: describe ptestfilter_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: describe ptestfilter +PREHOOK: Input: default@ptestfilter_n3 +POSTHOOK: query: describe ptestfilter_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n3 a string b int c string @@ -21,103 +21,103 @@ d int # col_name data_type comment c string d int -PREHOOK: query: alter table ptestfilter add partition (c='1', d=1) +PREHOOK: query: alter table ptestfilter_n3 add partition (c='1', d=1) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c='1', d=1) +PREHOOK: Output: default@ptestfilter_n3 +POSTHOOK: query: alter table ptestfilter_n3 add partition (c='1', d=1) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=1/d=1 -PREHOOK: query: alter table ptestfilter add partition (c='1', d=2) +POSTHOOK: Output: default@ptestfilter_n3 +POSTHOOK: Output: default@ptestfilter_n3@c=1/d=1 +PREHOOK: query: alter table ptestfilter_n3 add partition (c='1', d=2) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c='1', d=2) +PREHOOK: Output: default@ptestfilter_n3 +POSTHOOK: query: alter table ptestfilter_n3 add partition (c='1', d=2) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=1/d=2 -PREHOOK: query: alter table ptestFilter add partition (c='2', d=1) +POSTHOOK: Output: default@ptestfilter_n3 +POSTHOOK: Output: default@ptestfilter_n3@c=1/d=2 +PREHOOK: query: alter table ptestFilter_n3 add partition (c='2', d=1) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestFilter add partition (c='2', d=1) +PREHOOK: Output: default@ptestfilter_n3 +POSTHOOK: query: alter table ptestFilter_n3 add partition (c='2', d=1) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=2/d=1 -PREHOOK: query: alter table ptestfilter add partition (c='2', d=2) +POSTHOOK: Output: default@ptestfilter_n3 +POSTHOOK: Output: default@ptestfilter_n3@c=2/d=1 +PREHOOK: query: alter table ptestfilter_n3 add partition (c='2', d=2) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c='2', d=2) +PREHOOK: Output: default@ptestfilter_n3 +POSTHOOK: query: alter table ptestfilter_n3 add partition (c='2', d=2) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=2/d=2 -PREHOOK: query: alter table ptestfilter add partition (c='3', d=1) +POSTHOOK: Output: default@ptestfilter_n3 +POSTHOOK: Output: default@ptestfilter_n3@c=2/d=2 +PREHOOK: query: alter table ptestfilter_n3 add partition (c='3', d=1) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c='3', d=1) +PREHOOK: Output: default@ptestfilter_n3 +POSTHOOK: query: alter table ptestfilter_n3 add partition (c='3', d=1) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=3/d=1 -PREHOOK: query: alter table ptestfilter add partition (c='3', d=2) +POSTHOOK: Output: default@ptestfilter_n3 +POSTHOOK: Output: default@ptestfilter_n3@c=3/d=1 +PREHOOK: query: alter table ptestfilter_n3 add partition (c='3', d=2) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c='3', d=2) +PREHOOK: Output: default@ptestfilter_n3 +POSTHOOK: query: alter table ptestfilter_n3 add partition (c='3', d=2) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=3/d=2 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Output: default@ptestfilter_n3 +POSTHOOK: Output: default@ptestfilter_n3@c=3/d=2 +PREHOOK: query: show partitions ptestfilter_n3 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n3 +POSTHOOK: query: show partitions ptestfilter_n3 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n3 c=1/d=1 c=1/d=2 c=2/d=1 c=2/d=2 c=3/d=1 c=3/d=2 -PREHOOK: query: alter table ptestfilter drop partition (c='1', d=1) +PREHOOK: query: alter table ptestfilter_n3 drop partition (c='1', d=1) PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=1/d=1 -POSTHOOK: query: alter table ptestfilter drop partition (c='1', d=1) +PREHOOK: Input: default@ptestfilter_n3 +PREHOOK: Output: default@ptestfilter_n3@c=1/d=1 +POSTHOOK: query: alter table ptestfilter_n3 drop partition (c='1', d=1) POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=1/d=1 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n3 +POSTHOOK: Output: default@ptestfilter_n3@c=1/d=1 +PREHOOK: query: show partitions ptestfilter_n3 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n3 +POSTHOOK: query: show partitions ptestfilter_n3 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n3 c=1/d=2 c=2/d=1 c=2/d=2 c=3/d=1 c=3/d=2 -PREHOOK: query: alter table ptestfilter drop partition (c='2') +PREHOOK: query: alter table ptestfilter_n3 drop partition (c='2') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=2/d=1 -PREHOOK: Output: default@ptestfilter@c=2/d=2 -POSTHOOK: query: alter table ptestfilter drop partition (c='2') +PREHOOK: Input: default@ptestfilter_n3 +PREHOOK: Output: default@ptestfilter_n3@c=2/d=1 +PREHOOK: Output: default@ptestfilter_n3@c=2/d=2 +POSTHOOK: query: alter table ptestfilter_n3 drop partition (c='2') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=2/d=1 -POSTHOOK: Output: default@ptestfilter@c=2/d=2 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n3 +POSTHOOK: Output: default@ptestfilter_n3@c=2/d=1 +POSTHOOK: Output: default@ptestfilter_n3@c=2/d=2 +PREHOOK: query: show partitions ptestfilter_n3 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n3 +POSTHOOK: query: show partitions ptestfilter_n3 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n3 c=1/d=2 c=3/d=1 c=3/d=2 -PREHOOK: query: drop table ptestfilter +PREHOOK: query: drop table ptestfilter_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: drop table ptestfilter +PREHOOK: Input: default@ptestfilter_n3 +PREHOOK: Output: default@ptestfilter_n3 +POSTHOOK: query: drop table ptestfilter_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n3 +POSTHOOK: Output: default@ptestfilter_n3 diff --git a/ql/src/test/results/clientpositive/drop_partitions_filter4.q.out b/ql/src/test/results/clientpositive/drop_partitions_filter4.q.out index ba69b6d03a..ddfb7bf423 100644 --- a/ql/src/test/results/clientpositive/drop_partitions_filter4.q.out +++ b/ql/src/test/results/clientpositive/drop_partitions_filter4.q.out @@ -1,238 +1,238 @@ -PREHOOK: query: create table ptestfilter (a string, b int) partitioned by (c double) +PREHOOK: query: create table ptestfilter_n2 (a string, b int) partitioned by (c double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: create table ptestfilter (a string, b int) partitioned by (c double) +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: create table ptestfilter_n2 (a string, b int) partitioned by (c double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@ptestfilter -PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c) select 'Col1', 1, null +POSTHOOK: Output: default@ptestfilter_n2 +PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c) select 'Col1', 1, null PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c) select 'Col1', 1, null +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c) select 'Col1', 1, null POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@ptestfilter@c=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: Lineage: ptestfilter PARTITION(c=__HIVE_DEFAULT_PARTITION__).a SIMPLE [] -POSTHOOK: Lineage: ptestfilter PARTITION(c=__HIVE_DEFAULT_PARTITION__).b SIMPLE [] -PREHOOK: query: alter table ptestfilter add partition (c=3.4) +POSTHOOK: Output: default@ptestfilter_n2@c=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=__HIVE_DEFAULT_PARTITION__).a SIMPLE [] +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=__HIVE_DEFAULT_PARTITION__).b SIMPLE [] +PREHOOK: query: alter table ptestfilter_n2 add partition (c=3.4) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c=3.4) +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: alter table ptestfilter_n2 add partition (c=3.4) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=3.4 -PREHOOK: query: alter table ptestfilter add partition (c=5.55) +POSTHOOK: Output: default@ptestfilter_n2 +POSTHOOK: Output: default@ptestfilter_n2@c=3.4 +PREHOOK: query: alter table ptestfilter_n2 add partition (c=5.55) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c=5.55) +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: alter table ptestfilter_n2 add partition (c=5.55) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=5.55 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Output: default@ptestfilter_n2 +POSTHOOK: Output: default@ptestfilter_n2@c=5.55 +PREHOOK: query: show partitions ptestfilter_n2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n2 +POSTHOOK: query: show partitions ptestfilter_n2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n2 c=3.4 c=5.55 c=__HIVE_DEFAULT_PARTITION__ -PREHOOK: query: alter table ptestfilter drop partition(c = '__HIVE_DEFAULT_PARTITION__') +PREHOOK: query: alter table ptestfilter_n2 drop partition(c = '__HIVE_DEFAULT_PARTITION__') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: query: alter table ptestfilter drop partition(c = '__HIVE_DEFAULT_PARTITION__') +PREHOOK: Input: default@ptestfilter_n2 +PREHOOK: Output: default@ptestfilter_n2@c=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: query: alter table ptestfilter_n2 drop partition(c = '__HIVE_DEFAULT_PARTITION__') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=__HIVE_DEFAULT_PARTITION__ -PREHOOK: query: alter table ptestfilter drop partition(c = 3.40) +POSTHOOK: Input: default@ptestfilter_n2 +POSTHOOK: Output: default@ptestfilter_n2@c=__HIVE_DEFAULT_PARTITION__ +PREHOOK: query: alter table ptestfilter_n2 drop partition(c = 3.40) PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=3.4 -POSTHOOK: query: alter table ptestfilter drop partition(c = 3.40) +PREHOOK: Input: default@ptestfilter_n2 +PREHOOK: Output: default@ptestfilter_n2@c=3.4 +POSTHOOK: query: alter table ptestfilter_n2 drop partition(c = 3.40) POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=3.4 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n2 +POSTHOOK: Output: default@ptestfilter_n2@c=3.4 +PREHOOK: query: show partitions ptestfilter_n2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n2 +POSTHOOK: query: show partitions ptestfilter_n2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n2 c=5.55 -PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c) select 'Col1', 1, null +PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c) select 'Col1', 1, null PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c) select 'Col1', 1, null +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c) select 'Col1', 1, null POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@ptestfilter@c=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: Lineage: ptestfilter PARTITION(c=__HIVE_DEFAULT_PARTITION__).a SIMPLE [] -POSTHOOK: Lineage: ptestfilter PARTITION(c=__HIVE_DEFAULT_PARTITION__).b SIMPLE [] -PREHOOK: query: alter table ptestfilter drop partition(c != '__HIVE_DEFAULT_PARTITION__') +POSTHOOK: Output: default@ptestfilter_n2@c=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=__HIVE_DEFAULT_PARTITION__).a SIMPLE [] +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=__HIVE_DEFAULT_PARTITION__).b SIMPLE [] +PREHOOK: query: alter table ptestfilter_n2 drop partition(c != '__HIVE_DEFAULT_PARTITION__') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=5.55 -POSTHOOK: query: alter table ptestfilter drop partition(c != '__HIVE_DEFAULT_PARTITION__') +PREHOOK: Input: default@ptestfilter_n2 +PREHOOK: Output: default@ptestfilter_n2@c=5.55 +POSTHOOK: query: alter table ptestfilter_n2 drop partition(c != '__HIVE_DEFAULT_PARTITION__') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=5.55 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n2 +POSTHOOK: Output: default@ptestfilter_n2@c=5.55 +PREHOOK: query: show partitions ptestfilter_n2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n2 +POSTHOOK: query: show partitions ptestfilter_n2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n2 c=__HIVE_DEFAULT_PARTITION__ -PREHOOK: query: drop table ptestfilter +PREHOOK: query: drop table ptestfilter_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: drop table ptestfilter +PREHOOK: Input: default@ptestfilter_n2 +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: drop table ptestfilter_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter -PREHOOK: query: create table ptestfilter (a string, b int) partitioned by (c string, d int) +POSTHOOK: Input: default@ptestfilter_n2 +POSTHOOK: Output: default@ptestfilter_n2 +PREHOOK: query: create table ptestfilter_n2 (a string, b int) partitioned by (c string, d int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: create table ptestfilter (a string, b int) partitioned by (c string, d int) +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: create table ptestfilter_n2 (a string, b int) partitioned by (c string, d int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@ptestfilter -PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col1', 1, null, null +POSTHOOK: Output: default@ptestfilter_n2 +PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col1', 1, null, null PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col1', 1, null, null +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col1', 1, null, null POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@ptestfilter@c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: Lineage: ptestfilter PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).a SIMPLE [] -POSTHOOK: Lineage: ptestfilter PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).b SIMPLE [] -PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col2', 2, null, 2 +POSTHOOK: Output: default@ptestfilter_n2@c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).a SIMPLE [] +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=__HIVE_DEFAULT_PARTITION__).b SIMPLE [] +PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col2', 2, null, 2 PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col2', 2, null, 2 +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col2', 2, null, 2 POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@ptestfilter@c=__HIVE_DEFAULT_PARTITION__/d=2 -POSTHOOK: Lineage: ptestfilter PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=2).a SIMPLE [] -POSTHOOK: Lineage: ptestfilter PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=2).b SIMPLE [] -PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col3', 3, 'Uganda', null +POSTHOOK: Output: default@ptestfilter_n2@c=__HIVE_DEFAULT_PARTITION__/d=2 +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=2).a SIMPLE [] +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=2).b SIMPLE [] +PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col3', 3, 'Uganda', null PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col3', 3, 'Uganda', null +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col3', 3, 'Uganda', null POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@ptestfilter@c=Uganda/d=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: Lineage: ptestfilter PARTITION(c=Uganda,d=__HIVE_DEFAULT_PARTITION__).a SIMPLE [] -POSTHOOK: Lineage: ptestfilter PARTITION(c=Uganda,d=__HIVE_DEFAULT_PARTITION__).b SIMPLE [] -PREHOOK: query: alter table ptestfilter add partition (c='Germany', d=2) +POSTHOOK: Output: default@ptestfilter_n2@c=Uganda/d=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=Uganda,d=__HIVE_DEFAULT_PARTITION__).a SIMPLE [] +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=Uganda,d=__HIVE_DEFAULT_PARTITION__).b SIMPLE [] +PREHOOK: query: alter table ptestfilter_n2 add partition (c='Germany', d=2) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: alter table ptestfilter add partition (c='Germany', d=2) +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: alter table ptestfilter_n2 add partition (c='Germany', d=2) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=Germany/d=2 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Output: default@ptestfilter_n2 +POSTHOOK: Output: default@ptestfilter_n2@c=Germany/d=2 +PREHOOK: query: show partitions ptestfilter_n2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n2 +POSTHOOK: query: show partitions ptestfilter_n2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n2 c=Germany/d=2 c=Uganda/d=__HIVE_DEFAULT_PARTITION__ c=__HIVE_DEFAULT_PARTITION__/d=2 c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__ -PREHOOK: query: alter table ptestfilter drop partition (c='__HIVE_DEFAULT_PARTITION__') +PREHOOK: query: alter table ptestfilter_n2 drop partition (c='__HIVE_DEFAULT_PARTITION__') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=__HIVE_DEFAULT_PARTITION__/d=2 -PREHOOK: Output: default@ptestfilter@c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: query: alter table ptestfilter drop partition (c='__HIVE_DEFAULT_PARTITION__') +PREHOOK: Input: default@ptestfilter_n2 +PREHOOK: Output: default@ptestfilter_n2@c=__HIVE_DEFAULT_PARTITION__/d=2 +PREHOOK: Output: default@ptestfilter_n2@c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: query: alter table ptestfilter_n2 drop partition (c='__HIVE_DEFAULT_PARTITION__') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=__HIVE_DEFAULT_PARTITION__/d=2 -POSTHOOK: Output: default@ptestfilter@c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__ -PREHOOK: query: alter table ptestfilter drop partition (c='Uganda', d='__HIVE_DEFAULT_PARTITION__') +POSTHOOK: Input: default@ptestfilter_n2 +POSTHOOK: Output: default@ptestfilter_n2@c=__HIVE_DEFAULT_PARTITION__/d=2 +POSTHOOK: Output: default@ptestfilter_n2@c=__HIVE_DEFAULT_PARTITION__/d=__HIVE_DEFAULT_PARTITION__ +PREHOOK: query: alter table ptestfilter_n2 drop partition (c='Uganda', d='__HIVE_DEFAULT_PARTITION__') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=Uganda/d=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: query: alter table ptestfilter drop partition (c='Uganda', d='__HIVE_DEFAULT_PARTITION__') +PREHOOK: Input: default@ptestfilter_n2 +PREHOOK: Output: default@ptestfilter_n2@c=Uganda/d=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: query: alter table ptestfilter_n2 drop partition (c='Uganda', d='__HIVE_DEFAULT_PARTITION__') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=Uganda/d=__HIVE_DEFAULT_PARTITION__ -PREHOOK: query: alter table ptestfilter drop partition (c='Germany', d=2) +POSTHOOK: Input: default@ptestfilter_n2 +POSTHOOK: Output: default@ptestfilter_n2@c=Uganda/d=__HIVE_DEFAULT_PARTITION__ +PREHOOK: query: alter table ptestfilter_n2 drop partition (c='Germany', d=2) PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=Germany/d=2 -POSTHOOK: query: alter table ptestfilter drop partition (c='Germany', d=2) +PREHOOK: Input: default@ptestfilter_n2 +PREHOOK: Output: default@ptestfilter_n2@c=Germany/d=2 +POSTHOOK: query: alter table ptestfilter_n2 drop partition (c='Germany', d=2) POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=Germany/d=2 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n2 +POSTHOOK: Output: default@ptestfilter_n2@c=Germany/d=2 +PREHOOK: query: show partitions ptestfilter_n2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n2 +POSTHOOK: query: show partitions ptestfilter_n2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter -PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col2', 2, null, 2 +POSTHOOK: Input: default@ptestfilter_n2 +PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col2', 2, null, 2 PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col2', 2, null, 2 +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col2', 2, null, 2 POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@ptestfilter@c=__HIVE_DEFAULT_PARTITION__/d=2 -POSTHOOK: Lineage: ptestfilter PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=2).a SIMPLE [] -POSTHOOK: Lineage: ptestfilter PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=2).b SIMPLE [] -PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col2', 2, null, 3 +POSTHOOK: Output: default@ptestfilter_n2@c=__HIVE_DEFAULT_PARTITION__/d=2 +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=2).a SIMPLE [] +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=2).b SIMPLE [] +PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col2', 2, null, 3 PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col2', 2, null, 3 +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col2', 2, null, 3 POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@ptestfilter@c=__HIVE_DEFAULT_PARTITION__/d=3 -POSTHOOK: Lineage: ptestfilter PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=3).a SIMPLE [] -POSTHOOK: Lineage: ptestfilter PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=3).b SIMPLE [] -PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col3', 3, 'Uganda', null +POSTHOOK: Output: default@ptestfilter_n2@c=__HIVE_DEFAULT_PARTITION__/d=3 +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=3).a SIMPLE [] +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=__HIVE_DEFAULT_PARTITION__,d=3).b SIMPLE [] +PREHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col3', 3, 'Uganda', null PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col3', 3, 'Uganda', null +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col3', 3, 'Uganda', null POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@ptestfilter@c=Uganda/d=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: Lineage: ptestfilter PARTITION(c=Uganda,d=__HIVE_DEFAULT_PARTITION__).a SIMPLE [] -POSTHOOK: Lineage: ptestfilter PARTITION(c=Uganda,d=__HIVE_DEFAULT_PARTITION__).b SIMPLE [] -PREHOOK: query: alter table ptestfilter drop partition (d != 3) +POSTHOOK: Output: default@ptestfilter_n2@c=Uganda/d=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=Uganda,d=__HIVE_DEFAULT_PARTITION__).a SIMPLE [] +POSTHOOK: Lineage: ptestfilter_n2 PARTITION(c=Uganda,d=__HIVE_DEFAULT_PARTITION__).b SIMPLE [] +PREHOOK: query: alter table ptestfilter_n2 drop partition (d != 3) PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter@c=__HIVE_DEFAULT_PARTITION__/d=2 -POSTHOOK: query: alter table ptestfilter drop partition (d != 3) +PREHOOK: Input: default@ptestfilter_n2 +PREHOOK: Output: default@ptestfilter_n2@c=__HIVE_DEFAULT_PARTITION__/d=2 +POSTHOOK: query: alter table ptestfilter_n2 drop partition (d != 3) POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter@c=__HIVE_DEFAULT_PARTITION__/d=2 -PREHOOK: query: show partitions ptestfilter +POSTHOOK: Input: default@ptestfilter_n2 +POSTHOOK: Output: default@ptestfilter_n2@c=__HIVE_DEFAULT_PARTITION__/d=2 +PREHOOK: query: show partitions ptestfilter_n2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@ptestfilter -POSTHOOK: query: show partitions ptestfilter +PREHOOK: Input: default@ptestfilter_n2 +POSTHOOK: query: show partitions ptestfilter_n2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n2 c=Uganda/d=__HIVE_DEFAULT_PARTITION__ c=__HIVE_DEFAULT_PARTITION__/d=3 -PREHOOK: query: drop table ptestfilter +PREHOOK: query: drop table ptestfilter_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@ptestfilter -PREHOOK: Output: default@ptestfilter -POSTHOOK: query: drop table ptestfilter +PREHOOK: Input: default@ptestfilter_n2 +PREHOOK: Output: default@ptestfilter_n2 +POSTHOOK: query: drop table ptestfilter_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@ptestfilter -POSTHOOK: Output: default@ptestfilter +POSTHOOK: Input: default@ptestfilter_n2 +POSTHOOK: Output: default@ptestfilter_n2 diff --git a/ql/src/test/results/clientpositive/drop_table2.q.out b/ql/src/test/results/clientpositive/drop_table2.q.out index d6a8c3a5dc..957fcc495c 100644 --- a/ql/src/test/results/clientpositive/drop_table2.q.out +++ b/ql/src/test/results/clientpositive/drop_table2.q.out @@ -1,68 +1,68 @@ -PREHOOK: query: create table if not exists temp(col STRING) partitioned by (p STRING) +PREHOOK: query: create table if not exists temp_n0(col STRING) partitioned by (p STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@temp -POSTHOOK: query: create table if not exists temp(col STRING) partitioned by (p STRING) +PREHOOK: Output: default@temp_n0 +POSTHOOK: query: create table if not exists temp_n0(col STRING) partitioned by (p STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@temp -PREHOOK: query: alter table temp add if not exists partition (p ='p1') +POSTHOOK: Output: default@temp_n0 +PREHOOK: query: alter table temp_n0 add if not exists partition (p ='p1') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@temp -POSTHOOK: query: alter table temp add if not exists partition (p ='p1') +PREHOOK: Output: default@temp_n0 +POSTHOOK: query: alter table temp_n0 add if not exists partition (p ='p1') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@temp -POSTHOOK: Output: default@temp@p=p1 -PREHOOK: query: alter table temp add if not exists partition (p ='p2') +POSTHOOK: Output: default@temp_n0 +POSTHOOK: Output: default@temp_n0@p=p1 +PREHOOK: query: alter table temp_n0 add if not exists partition (p ='p2') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@temp -POSTHOOK: query: alter table temp add if not exists partition (p ='p2') +PREHOOK: Output: default@temp_n0 +POSTHOOK: query: alter table temp_n0 add if not exists partition (p ='p2') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@temp -POSTHOOK: Output: default@temp@p=p2 -PREHOOK: query: alter table temp add if not exists partition (p ='p3') +POSTHOOK: Output: default@temp_n0 +POSTHOOK: Output: default@temp_n0@p=p2 +PREHOOK: query: alter table temp_n0 add if not exists partition (p ='p3') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@temp -POSTHOOK: query: alter table temp add if not exists partition (p ='p3') +PREHOOK: Output: default@temp_n0 +POSTHOOK: query: alter table temp_n0 add if not exists partition (p ='p3') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@temp -POSTHOOK: Output: default@temp@p=p3 -PREHOOK: query: show partitions temp +POSTHOOK: Output: default@temp_n0 +POSTHOOK: Output: default@temp_n0@p=p3 +PREHOOK: query: show partitions temp_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@temp -POSTHOOK: query: show partitions temp +PREHOOK: Input: default@temp_n0 +POSTHOOK: query: show partitions temp_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@temp +POSTHOOK: Input: default@temp_n0 p=p1 p=p2 p=p3 -PREHOOK: query: drop table temp +PREHOOK: query: drop table temp_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@temp -PREHOOK: Output: default@temp -POSTHOOK: query: drop table temp +PREHOOK: Input: default@temp_n0 +PREHOOK: Output: default@temp_n0 +POSTHOOK: query: drop table temp_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@temp -PREHOOK: query: create table if not exists temp(col STRING) partitioned by (p STRING) +POSTHOOK: Input: default@temp_n0 +POSTHOOK: Output: default@temp_n0 +PREHOOK: query: create table if not exists temp_n0(col STRING) partitioned by (p STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@temp -POSTHOOK: query: create table if not exists temp(col STRING) partitioned by (p STRING) +PREHOOK: Output: default@temp_n0 +POSTHOOK: query: create table if not exists temp_n0(col STRING) partitioned by (p STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@temp -PREHOOK: query: show partitions temp +POSTHOOK: Output: default@temp_n0 +PREHOOK: query: show partitions temp_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@temp -POSTHOOK: query: show partitions temp +PREHOOK: Input: default@temp_n0 +POSTHOOK: query: show partitions temp_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@temp -PREHOOK: query: drop table temp +POSTHOOK: Input: default@temp_n0 +PREHOOK: query: drop table temp_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@temp -PREHOOK: Output: default@temp -POSTHOOK: query: drop table temp +PREHOOK: Input: default@temp_n0 +PREHOOK: Output: default@temp_n0 +POSTHOOK: query: drop table temp_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@temp +POSTHOOK: Input: default@temp_n0 +POSTHOOK: Output: default@temp_n0 diff --git a/ql/src/test/results/clientpositive/drop_table_purge.q.out b/ql/src/test/results/clientpositive/drop_table_purge.q.out index 14f53b642b..a98ec79d65 100644 --- a/ql/src/test/results/clientpositive/drop_table_purge.q.out +++ b/ql/src/test/results/clientpositive/drop_table_purge.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: CREATE TABLE IF NOT EXISTS temp(col STRING) +PREHOOK: query: CREATE TABLE IF NOT EXISTS temp_n1(col STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@temp -POSTHOOK: query: CREATE TABLE IF NOT EXISTS temp(col STRING) +PREHOOK: Output: default@temp_n1 +POSTHOOK: query: CREATE TABLE IF NOT EXISTS temp_n1(col STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@temp -PREHOOK: query: DROP TABLE temp PURGE +POSTHOOK: Output: default@temp_n1 +PREHOOK: query: DROP TABLE temp_n1 PURGE PREHOOK: type: DROPTABLE -PREHOOK: Input: default@temp -PREHOOK: Output: default@temp -POSTHOOK: query: DROP TABLE temp PURGE +PREHOOK: Input: default@temp_n1 +PREHOOK: Output: default@temp_n1 +POSTHOOK: query: DROP TABLE temp_n1 PURGE POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@temp +POSTHOOK: Input: default@temp_n1 +POSTHOOK: Output: default@temp_n1 diff --git a/ql/src/test/results/clientpositive/drop_table_removes_partition_dirs.q.out b/ql/src/test/results/clientpositive/drop_table_removes_partition_dirs.q.out index 2c34dd0428..b4863ce00b 100644 --- a/ql/src/test/results/clientpositive/drop_table_removes_partition_dirs.q.out +++ b/ql/src/test/results/clientpositive/drop_table_removes_partition_dirs.q.out @@ -1,50 +1,50 @@ -PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING) +PREHOOK: query: CREATE TABLE test_table_n3 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@test_table -POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING) +PREHOOK: Output: default@test_table_n3 +POSTHOOK: query: CREATE TABLE test_table_n3 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table -PREHOOK: query: ALTER TABLE test_table ADD PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n3 +PREHOOK: query: ALTER TABLE test_table_n3 ADD PARTITION (part = '1') #### A masked pattern was here #### PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -PREHOOK: Output: default@test_table -POSTHOOK: query: ALTER TABLE test_table ADD PARTITION (part = '1') +PREHOOK: Output: default@test_table_n3 +POSTHOOK: query: ALTER TABLE test_table_n3 ADD PARTITION (part = '1') #### A masked pattern was here #### POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -POSTHOOK: Output: default@test_table -POSTHOOK: Output: default@test_table@part=1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n3 +POSTHOOK: Output: default@test_table_n3@part=1 +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n3 PARTITION (part = '1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n3@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n3 PARTITION (part = '1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@test_table_n3@part=1 +POSTHOOK: Lineage: test_table_n3 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n3 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### -PREHOOK: query: DROP TABLE test_table +PREHOOK: query: DROP TABLE test_table_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_table -PREHOOK: Output: default@test_table -POSTHOOK: query: DROP TABLE test_table +PREHOOK: Input: default@test_table_n3 +PREHOOK: Output: default@test_table_n3 +POSTHOOK: query: DROP TABLE test_table_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_table -POSTHOOK: Output: default@test_table +POSTHOOK: Input: default@test_table_n3 +POSTHOOK: Output: default@test_table_n3 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/druid_basic2.q.out b/ql/src/test/results/clientpositive/druid_basic2.q.out index 0e5e957d29..8c22c94690 100644 --- a/ql/src/test/results/clientpositive/druid_basic2.q.out +++ b/ql/src/test/results/clientpositive/druid_basic2.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1 +PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n2 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@druid_table_1 -POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1 +PREHOOK: Output: default@druid_table_1_n2 +POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n2 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@druid_table_1 -PREHOOK: query: DESCRIBE FORMATTED druid_table_1 +POSTHOOK: Output: default@druid_table_1_n2 +PREHOOK: query: DESCRIBE FORMATTED druid_table_1_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@druid_table_1 -POSTHOOK: query: DESCRIBE FORMATTED druid_table_1 +PREHOOK: Input: default@druid_table_1_n2 +POSTHOOK: query: DESCRIBE FORMATTED druid_table_1_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@druid_table_1 +POSTHOOK: Input: default@druid_table_1_n2 # col_name data_type comment __time timestamp with local time zone from deserializer robot string from deserializer @@ -61,10 +61,10 @@ Sort Columns: [] Storage Desc Params: serialization.format 1 PREHOOK: query: EXPLAIN EXTENDED -SELECT robot FROM druid_table_1 +SELECT robot FROM druid_table_1_n2 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -SELECT robot FROM druid_table_1 +SELECT robot FROM druid_table_1_n2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -75,7 +75,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n2 properties: druid.fieldNames robot druid.fieldTypes string @@ -90,10 +90,10 @@ STAGE PLANS: ListSink PREHOOK: query: EXPLAIN EXTENDED -SELECT delta FROM druid_table_1 +SELECT delta FROM druid_table_1_n2 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -SELECT delta FROM druid_table_1 +SELECT delta FROM druid_table_1_n2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -104,7 +104,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n2 properties: druid.fieldNames delta druid.fieldTypes float @@ -120,12 +120,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN EXTENDED SELECT robot -FROM druid_table_1 +FROM druid_table_1_n2 WHERE language = 'en' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT robot -FROM druid_table_1 +FROM druid_table_1_n2 WHERE language = 'en' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -137,7 +137,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n2 properties: druid.fieldNames robot druid.fieldTypes string @@ -153,12 +153,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN EXTENDED SELECT DISTINCT robot -FROM druid_table_1 +FROM druid_table_1_n2 WHERE language = 'en' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT DISTINCT robot -FROM druid_table_1 +FROM druid_table_1_n2 WHERE language = 'en' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -170,7 +170,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n2 properties: druid.fieldNames robot druid.fieldTypes string @@ -189,10 +189,10 @@ SELECT a.robot, b.language FROM ( (SELECT robot, language - FROM druid_table_1) a + FROM druid_table_1_n2) a JOIN (SELECT language - FROM druid_table_1) b + FROM druid_table_1_n2) b ON a.language = b.language ) PREHOOK: type: QUERY @@ -201,10 +201,10 @@ SELECT a.robot, b.language FROM ( (SELECT robot, language - FROM druid_table_1) a + FROM druid_table_1_n2) a JOIN (SELECT language - FROM druid_table_1) b + FROM druid_table_1_n2) b ON a.language = b.language ) POSTHOOK: type: QUERY @@ -217,7 +217,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n2 properties: druid.fieldNames robot,language druid.fieldTypes string,string @@ -239,7 +239,7 @@ STAGE PLANS: value expressions: _col0 (type: string) auto parallelism: false TableScan - alias: druid_table_1 + alias: druid_table_1_n2 properties: druid.fieldNames language druid.fieldTypes string @@ -260,7 +260,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: druid_table_1 + base file name: druid_table_1_n2 input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat properties: @@ -278,11 +278,11 @@ STAGE PLANS: druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"columns":["robot","language"],"resultFormat":"compactedList"} druid.query.type scan #### A masked pattern was here #### - name default.druid_table_1 + name default.druid_table_1_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} + serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} serialization.format 1 serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler @@ -307,21 +307,21 @@ STAGE PLANS: druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"columns":["language"],"resultFormat":"compactedList"} druid.query.type scan #### A masked pattern was here #### - name default.druid_table_1 + name default.druid_table_1_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} + serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} serialization.format 1 serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.druid.QTestDruidSerDe - name: default.druid_table_1 - name: default.druid_table_1 + name: default.druid_table_1_n2 + name: default.druid_table_1_n2 Truncated Path -> Alias: - /druid_table_1 [$hdt$_0:druid_table_1, druid_table_1] + /druid_table_1_n2 [$hdt$_0:druid_table_1_n2, druid_table_1_n2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -365,17 +365,17 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[5][tables = [$hdt$_0, druid_table_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[5][tables = [$hdt$_0, druid_table_1_n2]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN EXTENDED SELECT a.robot, b.language FROM ( (SELECT robot, language - FROM druid_table_1 + FROM druid_table_1_n2 WHERE language = 'en') a JOIN (SELECT language - FROM druid_table_1) b + FROM druid_table_1_n2) b ON a.language = b.language ) PREHOOK: type: QUERY @@ -384,11 +384,11 @@ SELECT a.robot, b.language FROM ( (SELECT robot, language - FROM druid_table_1 + FROM druid_table_1_n2 WHERE language = 'en') a JOIN (SELECT language - FROM druid_table_1) b + FROM druid_table_1_n2) b ON a.language = b.language ) POSTHOOK: type: QUERY @@ -401,7 +401,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n2 properties: druid.fieldNames robot druid.fieldTypes string @@ -421,7 +421,7 @@ STAGE PLANS: value expressions: _col0 (type: string) auto parallelism: false TableScan - alias: druid_table_1 + alias: druid_table_1_n2 properties: druid.fieldNames vc druid.fieldTypes string @@ -440,7 +440,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: druid_table_1 + base file name: druid_table_1_n2 input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat properties: @@ -458,11 +458,11 @@ STAGE PLANS: druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"columns":["robot"],"resultFormat":"compactedList"} druid.query.type scan #### A masked pattern was here #### - name default.druid_table_1 + name default.druid_table_1_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} + serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} serialization.format 1 serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler @@ -487,21 +487,21 @@ STAGE PLANS: druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"virtualColumns":[{"type":"expression","name":"vc","expression":"'en'","outputType":"STRING"}],"columns":["vc"],"resultFormat":"compactedList"} druid.query.type scan #### A masked pattern was here #### - name default.druid_table_1 + name default.druid_table_1_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} + serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} serialization.format 1 serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.druid.QTestDruidSerDe - name: default.druid_table_1 - name: default.druid_table_1 + name: default.druid_table_1_n2 + name: default.druid_table_1_n2 Truncated Path -> Alias: - /druid_table_1 [$hdt$_0:druid_table_1, druid_table_1] + /druid_table_1_n2 [$hdt$_0:druid_table_1_n2, druid_table_1_n2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -547,14 +547,14 @@ STAGE PLANS: PREHOOK: query: EXPLAIN EXTENDED SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s -FROM druid_table_1 +FROM druid_table_1_n2 GROUP BY robot, language, floor_day(`__time`) ORDER BY CAST(robot AS INTEGER) ASC, m DESC LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s -FROM druid_table_1 +FROM druid_table_1_n2 GROUP BY robot, language, floor_day(`__time`) ORDER BY CAST(robot AS INTEGER) ASC, m DESC LIMIT 10 @@ -568,7 +568,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n2 properties: druid.fieldNames robot,floor_day,$f3,$f4,(tok_function tok_int (tok_table_or_col robot)) druid.fieldTypes string,timestamp with local time zone,float,double,int @@ -584,11 +584,11 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT substring(namespace, CAST(deleted AS INT), 4) -FROM druid_table_1 +FROM druid_table_1_n2 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT substring(namespace, CAST(deleted AS INT), 4) -FROM druid_table_1 +FROM druid_table_1_n2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -599,7 +599,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n2 properties: druid.fieldNames vc druid.fieldTypes string @@ -614,7 +614,7 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT robot, floor_day(`__time`) -FROM druid_table_1 +FROM druid_table_1_n2 WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' GROUP BY robot, floor_day(`__time`) ORDER BY robot @@ -622,7 +622,7 @@ LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT robot, floor_day(`__time`) -FROM druid_table_1 +FROM druid_table_1_n2 WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' GROUP BY robot, floor_day(`__time`) ORDER BY robot @@ -637,7 +637,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n2 properties: druid.fieldNames robot,floor_day druid.fieldTypes string,timestamp with local time zone @@ -652,7 +652,7 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT robot, `__time` -FROM druid_table_1 +FROM druid_table_1_n2 WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' GROUP BY robot, `__time` ORDER BY robot @@ -660,7 +660,7 @@ LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT robot, `__time` -FROM druid_table_1 +FROM druid_table_1_n2 WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' GROUP BY robot, `__time` ORDER BY robot @@ -675,7 +675,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n2 properties: druid.fieldNames extract,robot druid.fieldTypes timestamp with local time zone,string @@ -690,7 +690,7 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT robot, floor_day(`__time`) -FROM druid_table_1 +FROM druid_table_1_n2 WHERE `__time` BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' GROUP BY robot, floor_day(`__time`) ORDER BY robot @@ -698,7 +698,7 @@ LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT robot, floor_day(`__time`) -FROM druid_table_1 +FROM druid_table_1_n2 WHERE `__time` BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00' GROUP BY robot, floor_day(`__time`) ORDER BY robot @@ -713,7 +713,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n2 properties: druid.fieldNames robot,floor_day druid.fieldTypes string,timestamp with local time zone @@ -728,14 +728,14 @@ STAGE PLANS: PREHOOK: query: EXPLAIN EXTENDED SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s -FROM druid_table_1 +FROM druid_table_1_n2 GROUP BY robot, language, floor_day(`__time`) ORDER BY CAST(robot AS INTEGER) ASC, m DESC LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s -FROM druid_table_1 +FROM druid_table_1_n2 GROUP BY robot, language, floor_day(`__time`) ORDER BY CAST(robot AS INTEGER) ASC, m DESC LIMIT 10 @@ -750,7 +750,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator @@ -777,7 +777,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: druid_table_1 + base file name: druid_table_1_n2 input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat properties: @@ -791,11 +791,11 @@ STAGE PLANS: columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float druid.datasource wikipedia #### A masked pattern was here #### - name default.druid_table_1 + name default.druid_table_1_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} + serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} serialization.format 1 serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler @@ -816,21 +816,21 @@ STAGE PLANS: columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float druid.datasource wikipedia #### A masked pattern was here #### - name default.druid_table_1 + name default.druid_table_1_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} + serialization.ddl struct druid_table_1_n2 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted} serialization.format 1 serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.druid.QTestDruidSerDe - name: default.druid_table_1 - name: default.druid_table_1 + name: default.druid_table_1_n2 + name: default.druid_table_1_n2 Truncated Path -> Alias: - /druid_table_1 [druid_table_1] + /druid_table_1_n2 [druid_table_1_n2] Needs Tagging: false Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/druid_basic3.q.out b/ql/src/test/results/clientpositive/druid_basic3.q.out index 96f6fe9f7e..54719f7517 100644 --- a/ql/src/test/results/clientpositive/druid_basic3.q.out +++ b/ql/src/test/results/clientpositive/druid_basic3.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1 +PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n4 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@druid_table_1 -POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1 +PREHOOK: Output: default@druid_table_1_n4 +POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n4 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@druid_table_1 +POSTHOOK: Output: default@druid_table_1_n4 PREHOOK: query: EXPLAIN SELECT sum(added) + sum(delta) as a, language -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT sum(added) + sum(delta) as a, language -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC POSTHOOK: type: QUERY @@ -31,7 +31,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n4 properties: druid.fieldNames a,language druid.fieldTypes double,string @@ -46,13 +46,13 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT sum(delta), sum(added) + sum(delta) AS a, language -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT sum(delta), sum(added) + sum(delta) AS a, language -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC POSTHOOK: type: QUERY @@ -65,7 +65,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n4 properties: druid.fieldNames $f1,a,language druid.fieldTypes double,double,string @@ -80,13 +80,13 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT language, sum(added) / sum(delta) AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT language, sum(added) / sum(delta) AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC POSTHOOK: type: QUERY @@ -99,7 +99,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n4 properties: druid.fieldNames language,a druid.fieldTypes string,double @@ -114,13 +114,13 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT language, sum(added) * sum(delta) AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT language, sum(added) * sum(delta) AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC POSTHOOK: type: QUERY @@ -133,7 +133,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n4 properties: druid.fieldNames language,a druid.fieldTypes string,double @@ -148,13 +148,13 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT language, sum(added) - sum(delta) AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT language, sum(added) - sum(delta) AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC POSTHOOK: type: QUERY @@ -167,7 +167,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n4 properties: druid.fieldNames language,a druid.fieldTypes string,double @@ -182,13 +182,13 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT language, sum(added) + 100 AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT language, sum(added) + 100 AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC POSTHOOK: type: QUERY @@ -201,7 +201,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n4 properties: druid.fieldNames language,a druid.fieldTypes string,double @@ -218,7 +218,7 @@ PREHOOK: query: EXPLAIN SELECT language, -1 * (a + b) AS c FROM ( SELECT (sum(added)-sum(delta)) / (count(*) * 3) AS a, sum(deleted) AS b, language - FROM druid_table_1 + FROM druid_table_1_n4 GROUP BY language) subq ORDER BY c DESC PREHOOK: type: QUERY @@ -226,7 +226,7 @@ POSTHOOK: query: EXPLAIN SELECT language, -1 * (a + b) AS c FROM ( SELECT (sum(added)-sum(delta)) / (count(*) * 3) AS a, sum(deleted) AS b, language - FROM druid_table_1 + FROM druid_table_1_n4 GROUP BY language) subq ORDER BY c DESC POSTHOOK: type: QUERY @@ -239,7 +239,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n4 properties: druid.fieldNames language,c druid.fieldTypes string,double @@ -254,13 +254,13 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT language, robot, sum(added) - sum(delta) AS a -FROM druid_table_1 +FROM druid_table_1_n4 WHERE extract (week from `__time`) IN (10,11) GROUP BY language, robot PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT language, robot, sum(added) - sum(delta) AS a -FROM druid_table_1 +FROM druid_table_1_n4 WHERE extract (week from `__time`) IN (10,11) GROUP BY language, robot POSTHOOK: type: QUERY @@ -273,7 +273,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n4 properties: druid.fieldNames language,robot,a druid.fieldTypes string,string,double @@ -288,13 +288,13 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT language, sum(delta) / count(*) AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT language, sum(delta) / count(*) AS a -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC POSTHOOK: type: QUERY @@ -307,7 +307,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n4 properties: druid.fieldNames language,a druid.fieldTypes string,double @@ -323,14 +323,14 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT language, sum(added) / sum(delta) AS a, CASE WHEN sum(deleted)=0 THEN 1.0 ELSE sum(deleted) END AS b -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT language, sum(added) / sum(delta) AS a, CASE WHEN sum(deleted)=0 THEN 1.0 ELSE sum(deleted) END AS b -FROM druid_table_1 +FROM druid_table_1_n4 GROUP BY language ORDER BY a DESC POSTHOOK: type: QUERY @@ -343,7 +343,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n4 properties: druid.fieldNames language,a,b druid.fieldTypes string,double,double @@ -360,7 +360,7 @@ PREHOOK: query: EXPLAIN SELECT language, a, a - b as c FROM ( SELECT language, sum(added) + 100 AS a, sum(delta) AS b - FROM druid_table_1 + FROM druid_table_1_n4 GROUP BY language) subq ORDER BY a DESC PREHOOK: type: QUERY @@ -368,7 +368,7 @@ POSTHOOK: query: EXPLAIN SELECT language, a, a - b as c FROM ( SELECT language, sum(added) + 100 AS a, sum(delta) AS b - FROM druid_table_1 + FROM druid_table_1_n4 GROUP BY language) subq ORDER BY a DESC POSTHOOK: type: QUERY @@ -381,7 +381,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n4 properties: druid.fieldNames language,a,c druid.fieldTypes string,double,double @@ -398,7 +398,7 @@ PREHOOK: query: EXPLAIN SELECT language, robot, "A" FROM ( SELECT sum(added) - sum(delta) AS a, language, robot - FROM druid_table_1 + FROM druid_table_1_n4 GROUP BY language, robot ) subq ORDER BY "A" LIMIT 5 @@ -407,7 +407,7 @@ POSTHOOK: query: EXPLAIN SELECT language, robot, "A" FROM ( SELECT sum(added) - sum(delta) AS a, language, robot - FROM druid_table_1 + FROM druid_table_1_n4 GROUP BY language, robot ) subq ORDER BY "A" LIMIT 5 @@ -421,7 +421,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n4 properties: druid.fieldNames robot,language druid.fieldTypes string,string @@ -438,7 +438,7 @@ PREHOOK: query: EXPLAIN SELECT language, robot, "A" FROM ( SELECT language, sum(added) + sum(delta) AS a, robot - FROM druid_table_1 + FROM druid_table_1_n4 GROUP BY language, robot) subq ORDER BY robot, language LIMIT 5 @@ -447,7 +447,7 @@ POSTHOOK: query: EXPLAIN SELECT language, robot, "A" FROM ( SELECT language, sum(added) + sum(delta) AS a, robot - FROM druid_table_1 + FROM druid_table_1_n4 GROUP BY language, robot) subq ORDER BY robot, language LIMIT 5 @@ -461,7 +461,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n4 properties: druid.fieldNames robot,language druid.fieldTypes string,string diff --git a/ql/src/test/results/clientpositive/druid_intervals.q.out b/ql/src/test/results/clientpositive/druid_intervals.q.out index 11082edf78..fde446cecb 100644 --- a/ql/src/test/results/clientpositive/druid_intervals.q.out +++ b/ql/src/test/results/clientpositive/druid_intervals.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1 +PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n0 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@druid_table_1 -POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1 +PREHOOK: Output: default@druid_table_1_n0 +POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n0 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@druid_table_1 -PREHOOK: query: DESCRIBE FORMATTED druid_table_1 +POSTHOOK: Output: default@druid_table_1_n0 +PREHOOK: query: DESCRIBE FORMATTED druid_table_1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@druid_table_1 -POSTHOOK: query: DESCRIBE FORMATTED druid_table_1 +PREHOOK: Input: default@druid_table_1_n0 +POSTHOOK: query: DESCRIBE FORMATTED druid_table_1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@druid_table_1 +POSTHOOK: Input: default@druid_table_1_n0 # col_name data_type comment __time timestamp with local time zone from deserializer robot string from deserializer @@ -62,11 +62,11 @@ Storage Desc Params: serialization.format 1 PREHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -77,7 +77,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n0 properties: druid.fieldNames vc druid.fieldTypes timestamp with local time zone @@ -92,12 +92,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` < '2012-03-01 00:00:00' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` < '2012-03-01 00:00:00' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -109,7 +109,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n0 properties: druid.fieldNames vc druid.fieldTypes timestamp with local time zone @@ -124,12 +124,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -141,7 +141,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n0 properties: druid.fieldNames vc druid.fieldTypes timestamp with local time zone @@ -156,13 +156,13 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00' AND `__time` < '2011-01-01 00:00:00' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00' AND `__time` < '2011-01-01 00:00:00' POSTHOOK: type: QUERY @@ -175,7 +175,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n0 properties: druid.fieldNames vc druid.fieldTypes timestamp with local time zone @@ -190,12 +190,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -207,7 +207,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n0 properties: druid.fieldNames vc druid.fieldTypes timestamp with local time zone @@ -222,13 +222,13 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00') OR (`__time` BETWEEN '2012-01-01 00:00:00' AND '2013-01-01 00:00:00') PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00') OR (`__time` BETWEEN '2012-01-01 00:00:00' AND '2013-01-01 00:00:00') POSTHOOK: type: QUERY @@ -241,7 +241,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n0 properties: druid.fieldNames vc druid.fieldTypes timestamp with local time zone @@ -256,13 +256,13 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00') OR (`__time` BETWEEN '2010-06-01 00:00:00' AND '2012-01-01 00:00:00') PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00') OR (`__time` BETWEEN '2010-06-01 00:00:00' AND '2012-01-01 00:00:00') POSTHOOK: type: QUERY @@ -275,7 +275,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n0 properties: druid.fieldNames vc druid.fieldTypes timestamp with local time zone @@ -290,12 +290,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00') PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT `__time` -FROM druid_table_1 +FROM druid_table_1_n0 WHERE `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00') POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -307,7 +307,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n0 properties: druid.fieldNames vc druid.fieldTypes timestamp with local time zone @@ -322,12 +322,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT `__time`, robot -FROM druid_table_1 +FROM druid_table_1_n0 WHERE robot = 'user1' AND `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00') PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT `__time`, robot -FROM druid_table_1 +FROM druid_table_1_n0 WHERE robot = 'user1' AND `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00') POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -339,7 +339,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n0 properties: druid.fieldNames vc,vc0 druid.fieldTypes timestamp with local time zone,string @@ -354,12 +354,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT `__time`, robot -FROM druid_table_1 +FROM druid_table_1_n0 WHERE robot = 'user1' OR `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00') PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT `__time`, robot -FROM druid_table_1 +FROM druid_table_1_n0 WHERE robot = 'user1' OR `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00') POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -371,7 +371,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n0 properties: druid.fieldNames vc,robot druid.fieldTypes timestamp with local time zone,string diff --git a/ql/src/test/results/clientpositive/druid_timeseries.q.out b/ql/src/test/results/clientpositive/druid_timeseries.q.out index f39b762a8b..455bdd5421 100644 --- a/ql/src/test/results/clientpositive/druid_timeseries.q.out +++ b/ql/src/test/results/clientpositive/druid_timeseries.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1 +PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n3 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@druid_table_1 -POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1 +PREHOOK: Output: default@druid_table_1_n3 +POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n3 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@druid_table_1 -PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` >= '2010-01-01 00:00:00 UTC' AND `__time` <= '2012-03-01 00:00:00 UTC' OR added <= 0 +POSTHOOK: Output: default@druid_table_1_n3 +PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` >= '2010-01-01 00:00:00 UTC' AND `__time` <= '2012-03-01 00:00:00 UTC' OR added <= 0 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` >= '2010-01-01 00:00:00 UTC' AND `__time` <= '2012-03-01 00:00:00 UTC' OR added <= 0 +POSTHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` >= '2010-01-01 00:00:00 UTC' AND `__time` <= '2012-03-01 00:00:00 UTC' OR added <= 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -23,7 +23,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames $f0 druid.fieldTypes bigint @@ -36,9 +36,9 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` <= '2010-01-01 00:00:00 UTC' +PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00 UTC' PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` <= '2010-01-01 00:00:00 UTC' +POSTHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00 UTC' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -49,7 +49,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames $f0 druid.fieldTypes bigint @@ -64,11 +64,11 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -79,7 +79,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames $f0,$f1 druid.fieldTypes float,double @@ -94,12 +94,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT `__time`, max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY `__time` PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT `__time`, max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY `__time` POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -111,7 +111,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames extract,$f1,$f2 druid.fieldTypes timestamp with local time zone,float,double @@ -126,12 +126,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT floor_year(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_year(`__time`) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT floor_year(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_year(`__time`) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -143,7 +143,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames timestamp,$f1,$f2 druid.fieldTypes timestamp with local time zone,float,double @@ -158,12 +158,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT floor_quarter(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_quarter(`__time`) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT floor_quarter(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_quarter(`__time`) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -175,7 +175,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames vc,added,variation druid.fieldTypes timestamp with local time zone,float,float @@ -221,12 +221,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT floor_month(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_month(`__time`) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT floor_month(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_month(`__time`) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -238,7 +238,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames timestamp,$f1,$f2 druid.fieldTypes timestamp with local time zone,float,double @@ -253,12 +253,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT floor_week(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_week(`__time`) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT floor_week(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_week(`__time`) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -270,7 +270,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames timestamp,$f1,$f2 druid.fieldTypes timestamp with local time zone,float,double @@ -285,12 +285,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT floor_day(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_day(`__time`) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT floor_day(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_day(`__time`) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -302,7 +302,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames timestamp,$f1,$f2 druid.fieldTypes timestamp with local time zone,float,double @@ -317,12 +317,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_hour(`__time`) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_hour(`__time`) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -334,7 +334,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames timestamp,$f1,$f2 druid.fieldTypes timestamp with local time zone,float,double @@ -349,12 +349,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT floor_minute(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_minute(`__time`) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT floor_minute(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_minute(`__time`) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -366,7 +366,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames timestamp,$f1,$f2 druid.fieldTypes timestamp with local time zone,float,double @@ -381,12 +381,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT floor_second(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_second(`__time`) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT floor_second(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 GROUP BY floor_second(`__time`) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -398,7 +398,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames timestamp,$f1,$f2 druid.fieldTypes timestamp with local time zone,float,double @@ -413,13 +413,13 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 WHERE robot='1' GROUP BY floor_hour(`__time`) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 WHERE robot='1' GROUP BY floor_hour(`__time`) POSTHOOK: type: QUERY @@ -432,7 +432,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames timestamp,$f1,$f2 druid.fieldTypes timestamp with local time zone,float,double @@ -447,7 +447,7 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 WHERE floor_hour(`__time`) BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) @@ -455,7 +455,7 @@ GROUP BY floor_hour(`__time`) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) -FROM druid_table_1 +FROM druid_table_1_n3 WHERE floor_hour(`__time`) BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) @@ -470,7 +470,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames timestamp,$f1,$f2 druid.fieldTypes timestamp with local time zone,float,double @@ -488,7 +488,7 @@ SELECT subq.h, subq.m, subq.s FROM ( SELECT floor_hour(`__time`) as h, max(added) as m, sum(variation) as s - FROM druid_table_1 + FROM druid_table_1_n3 GROUP BY floor_hour(`__time`) ) subq WHERE subq.h BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) @@ -499,7 +499,7 @@ SELECT subq.h, subq.m, subq.s FROM ( SELECT floor_hour(`__time`) as h, max(added) as m, sum(variation) as s - FROM druid_table_1 + FROM druid_table_1_n3 GROUP BY floor_hour(`__time`) ) subq WHERE subq.h BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) @@ -514,7 +514,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames timestamp,$f1,$f2 druid.fieldTypes timestamp with local time zone,float,double @@ -527,9 +527,9 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1 +PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1 +POSTHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -540,7 +540,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames $f0 druid.fieldTypes bigint @@ -553,9 +553,9 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` <= '2010-01-01 00:00:00 UTC' +PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00 UTC' PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` <= '2010-01-01 00:00:00 UTC' +POSTHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00 UTC' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -566,7 +566,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames $f0 druid.fieldTypes bigint @@ -579,9 +579,9 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` >= '2010-01-01 00:00:00' +PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` >= '2010-01-01 00:00:00' PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` >= '2010-01-01 00:00:00' +POSTHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` >= '2010-01-01 00:00:00' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -592,7 +592,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames $f0 druid.fieldTypes bigint @@ -605,9 +605,9 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` <= '2010-01-01 00:00:00' OR `__time` <= '2012-03-01 00:00:00' +PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00' OR `__time` <= '2012-03-01 00:00:00' PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` <= '2010-01-01 00:00:00' OR `__time` <= '2012-03-01 00:00:00' +POSTHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00' OR `__time` <= '2012-03-01 00:00:00' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -618,7 +618,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n3 properties: druid.fieldNames $f0 druid.fieldTypes bigint diff --git a/ql/src/test/results/clientpositive/druid_topn.q.out b/ql/src/test/results/clientpositive/druid_topn.q.out index 0dc5e2a4b9..7bfd2ae93b 100644 --- a/ql/src/test/results/clientpositive/druid_topn.q.out +++ b/ql/src/test/results/clientpositive/druid_topn.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1 +PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n1 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@druid_table_1 -POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1 +PREHOOK: Output: default@druid_table_1_n1 +POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n1 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@druid_table_1 -PREHOOK: query: DESCRIBE FORMATTED druid_table_1 +POSTHOOK: Output: default@druid_table_1_n1 +PREHOOK: query: DESCRIBE FORMATTED druid_table_1_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@druid_table_1 -POSTHOOK: query: DESCRIBE FORMATTED druid_table_1 +PREHOOK: Input: default@druid_table_1_n1 +POSTHOOK: query: DESCRIBE FORMATTED druid_table_1_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@druid_table_1 +POSTHOOK: Input: default@druid_table_1_n1 # col_name data_type comment __time timestamp with local time zone from deserializer robot string from deserializer @@ -62,14 +62,14 @@ Storage Desc Params: serialization.format 1 PREHOOK: query: EXPLAIN SELECT robot, max(added) as m, sum(variation) -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot ORDER BY m DESC LIMIT 100 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT robot, max(added) as m, sum(variation) -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot ORDER BY m DESC LIMIT 100 @@ -83,7 +83,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n1 properties: druid.fieldNames robot,$f1,$f2 druid.fieldTypes string,float,double @@ -98,14 +98,14 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT robot, `__time`, max(added), sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, `__time` ORDER BY s DESC LIMIT 100 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT robot, `__time`, max(added), sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, `__time` ORDER BY s DESC LIMIT 100 @@ -119,7 +119,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n1 properties: druid.fieldNames extract,robot,$f2,$f3 druid.fieldTypes timestamp with local time zone,string,float,double @@ -134,14 +134,14 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT robot, floor_year(`__time`), max(added), sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, floor_year(`__time`) ORDER BY s DESC LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT robot, floor_year(`__time`), max(added), sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, floor_year(`__time`) ORDER BY s DESC LIMIT 10 @@ -155,7 +155,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n1 properties: druid.fieldNames robot,floor_year,$f2,$f3 druid.fieldTypes string,timestamp with local time zone,float,double @@ -170,14 +170,14 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT robot, floor_month(`__time`), max(added), sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, floor_month(`__time`) ORDER BY s LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT robot, floor_month(`__time`), max(added), sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, floor_month(`__time`) ORDER BY s LIMIT 10 @@ -191,7 +191,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n1 properties: druid.fieldNames robot,floor_month,$f2,$f3 druid.fieldTypes string,timestamp with local time zone,float,double @@ -206,14 +206,14 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, namespace, floor_month(`__time`) ORDER BY s DESC, m DESC LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, namespace, floor_month(`__time`) ORDER BY s DESC, m DESC LIMIT 10 @@ -227,7 +227,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n1 properties: druid.fieldNames robot,namespace,floor_month,$f3,$f4 druid.fieldTypes string,string,timestamp with local time zone,float,double @@ -242,14 +242,14 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, namespace, floor_month(`__time`) ORDER BY robot ASC, m DESC LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 GROUP BY robot, namespace, floor_month(`__time`) ORDER BY robot ASC, m DESC LIMIT 10 @@ -263,7 +263,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n1 properties: druid.fieldNames robot,namespace,floor_month,$f3,$f4 druid.fieldTypes string,string,timestamp with local time zone,float,double @@ -278,7 +278,7 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT robot, floor_year(`__time`), max(added), sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 WHERE robot='1' GROUP BY robot, floor_year(`__time`) ORDER BY s @@ -286,7 +286,7 @@ LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT robot, floor_year(`__time`), max(added), sum(variation) as s -FROM druid_table_1 +FROM druid_table_1_n1 WHERE robot='1' GROUP BY robot, floor_year(`__time`) ORDER BY s @@ -301,7 +301,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n1 properties: druid.fieldNames floor_year,$f1_0,$f2 druid.fieldTypes timestamp with local time zone,float,double @@ -316,7 +316,7 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT robot, floor_hour(`__time`), max(added) as m, sum(variation) -FROM druid_table_1 +FROM druid_table_1_n1 WHERE floor_hour(`__time`) BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) @@ -326,7 +326,7 @@ LIMIT 100 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT robot, floor_hour(`__time`), max(added) as m, sum(variation) -FROM druid_table_1 +FROM druid_table_1_n1 WHERE floor_hour(`__time`) BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE) @@ -343,7 +343,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: druid_table_1 + alias: druid_table_1_n1 properties: druid.fieldNames robot,floor_hour,$f2,$f3 druid.fieldTypes string,timestamp with local time zone,float,double diff --git a/ql/src/test/results/clientpositive/dynamic_partition_insert.q.out b/ql/src/test/results/clientpositive/dynamic_partition_insert.q.out index 295986e4d8..bd3f1893d0 100644 --- a/ql/src/test/results/clientpositive/dynamic_partition_insert.q.out +++ b/ql/src/test/results/clientpositive/dynamic_partition_insert.q.out @@ -1,100 +1,100 @@ -PREHOOK: query: CREATE TABLE t1 (c1 BIGINT, c2 STRING) +PREHOOK: query: CREATE TABLE t1_n45 (c1 BIGINT, c2 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: CREATE TABLE t1 (c1 BIGINT, c2 STRING) +PREHOOK: Output: default@t1_n45 +POSTHOOK: query: CREATE TABLE t1_n45 (c1 BIGINT, c2 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE t2 (c1 INT, c2 STRING) +POSTHOOK: Output: default@t1_n45 +PREHOOK: query: CREATE TABLE t2_n24 (c1 INT, c2 STRING) PARTITIONED BY (p1 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: CREATE TABLE t2 (c1 INT, c2 STRING) +PREHOOK: Output: default@t2_n24 +POSTHOOK: query: CREATE TABLE t2_n24 (c1 INT, c2 STRING) PARTITIONED BY (p1 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1 +POSTHOOK: Output: default@t2_n24 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n45 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1 +PREHOOK: Output: default@t1_n45 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n45 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1 +POSTHOOK: Output: default@t1_n45 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n45 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1 +PREHOOK: Output: default@t1_n45 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n45 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1 +POSTHOOK: Output: default@t1_n45 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n45 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1 +PREHOOK: Output: default@t1_n45 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n45 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1 +POSTHOOK: Output: default@t1_n45 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n45 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1 +PREHOOK: Output: default@t1_n45 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n45 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1 +POSTHOOK: Output: default@t1_n45 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n45 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1 +PREHOOK: Output: default@t1_n45 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dynamic_partition_insert.txt' INTO TABLE t1_n45 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: INSERT OVERWRITE TABLE t2 partition(p1) SELECT *,c1 AS p1 FROM t1 DISTRIBUTE BY p1 +POSTHOOK: Output: default@t1_n45 +PREHOOK: query: INSERT OVERWRITE TABLE t2_n24 partition(p1) SELECT *,c1 AS p1 FROM t1_n45 DISTRIBUTE BY p1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: INSERT OVERWRITE TABLE t2 partition(p1) SELECT *,c1 AS p1 FROM t1 DISTRIBUTE BY p1 +PREHOOK: Input: default@t1_n45 +PREHOOK: Output: default@t2_n24 +POSTHOOK: query: INSERT OVERWRITE TABLE t2_n24 partition(p1) SELECT *,c1 AS p1 FROM t1_n45 DISTRIBUTE BY p1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2@p1=1 -POSTHOOK: Output: default@t2@p1=2 -POSTHOOK: Output: default@t2@p1=3 -POSTHOOK: Output: default@t2@p1=4 -POSTHOOK: Output: default@t2@p1=5 -POSTHOOK: Lineage: t2 PARTITION(p1=1).c1 EXPRESSION [(t1)t1.FieldSchema(name:c1, type:bigint, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=1).c2 SIMPLE [(t1)t1.FieldSchema(name:c2, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=2).c1 EXPRESSION [(t1)t1.FieldSchema(name:c1, type:bigint, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=2).c2 SIMPLE [(t1)t1.FieldSchema(name:c2, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=3).c1 EXPRESSION [(t1)t1.FieldSchema(name:c1, type:bigint, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=3).c2 SIMPLE [(t1)t1.FieldSchema(name:c2, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=4).c1 EXPRESSION [(t1)t1.FieldSchema(name:c1, type:bigint, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=4).c2 SIMPLE [(t1)t1.FieldSchema(name:c2, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=5).c1 EXPRESSION [(t1)t1.FieldSchema(name:c1, type:bigint, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=5).c2 SIMPLE [(t1)t1.FieldSchema(name:c2, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM t2 +POSTHOOK: Input: default@t1_n45 +POSTHOOK: Output: default@t2_n24@p1=1 +POSTHOOK: Output: default@t2_n24@p1=2 +POSTHOOK: Output: default@t2_n24@p1=3 +POSTHOOK: Output: default@t2_n24@p1=4 +POSTHOOK: Output: default@t2_n24@p1=5 +POSTHOOK: Lineage: t2_n24 PARTITION(p1=1).c1 EXPRESSION [(t1_n45)t1_n45.FieldSchema(name:c1, type:bigint, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=1).c2 SIMPLE [(t1_n45)t1_n45.FieldSchema(name:c2, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=2).c1 EXPRESSION [(t1_n45)t1_n45.FieldSchema(name:c1, type:bigint, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=2).c2 SIMPLE [(t1_n45)t1_n45.FieldSchema(name:c2, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=3).c1 EXPRESSION [(t1_n45)t1_n45.FieldSchema(name:c1, type:bigint, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=3).c2 SIMPLE [(t1_n45)t1_n45.FieldSchema(name:c2, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=4).c1 EXPRESSION [(t1_n45)t1_n45.FieldSchema(name:c1, type:bigint, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=4).c2 SIMPLE [(t1_n45)t1_n45.FieldSchema(name:c2, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=5).c1 EXPRESSION [(t1_n45)t1_n45.FieldSchema(name:c1, type:bigint, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=5).c2 SIMPLE [(t1_n45)t1_n45.FieldSchema(name:c2, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM t2_n24 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@p1=1 -PREHOOK: Input: default@t2@p1=2 -PREHOOK: Input: default@t2@p1=3 -PREHOOK: Input: default@t2@p1=4 -PREHOOK: Input: default@t2@p1=5 +PREHOOK: Input: default@t2_n24 +PREHOOK: Input: default@t2_n24@p1=1 +PREHOOK: Input: default@t2_n24@p1=2 +PREHOOK: Input: default@t2_n24@p1=3 +PREHOOK: Input: default@t2_n24@p1=4 +PREHOOK: Input: default@t2_n24@p1=5 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM t2 +POSTHOOK: query: SELECT * FROM t2_n24 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@p1=1 -POSTHOOK: Input: default@t2@p1=2 -POSTHOOK: Input: default@t2@p1=3 -POSTHOOK: Input: default@t2@p1=4 -POSTHOOK: Input: default@t2@p1=5 +POSTHOOK: Input: default@t2_n24 +POSTHOOK: Input: default@t2_n24@p1=1 +POSTHOOK: Input: default@t2_n24@p1=2 +POSTHOOK: Input: default@t2_n24@p1=3 +POSTHOOK: Input: default@t2_n24@p1=4 +POSTHOOK: Input: default@t2_n24@p1=5 #### A masked pattern was here #### 1 one 1 1 one 1 @@ -121,70 +121,70 @@ POSTHOOK: Input: default@t2@p1=5 5 five 5 5 five 5 5 five 5 -PREHOOK: query: TRUNCATE TABLE t2 +PREHOOK: query: TRUNCATE TABLE t2_n24 PREHOOK: type: TRUNCATETABLE -PREHOOK: Output: default@t2@p1=1 -PREHOOK: Output: default@t2@p1=2 -PREHOOK: Output: default@t2@p1=3 -PREHOOK: Output: default@t2@p1=4 -PREHOOK: Output: default@t2@p1=5 -POSTHOOK: query: TRUNCATE TABLE t2 +PREHOOK: Output: default@t2_n24@p1=1 +PREHOOK: Output: default@t2_n24@p1=2 +PREHOOK: Output: default@t2_n24@p1=3 +PREHOOK: Output: default@t2_n24@p1=4 +PREHOOK: Output: default@t2_n24@p1=5 +POSTHOOK: query: TRUNCATE TABLE t2_n24 POSTHOOK: type: TRUNCATETABLE -POSTHOOK: Output: default@t2@p1=1 -POSTHOOK: Output: default@t2@p1=2 -POSTHOOK: Output: default@t2@p1=3 -POSTHOOK: Output: default@t2@p1=4 -POSTHOOK: Output: default@t2@p1=5 -PREHOOK: query: INSERT OVERWRITE TABLE t2 SELECT *,c1 AS p1 FROM t1 DISTRIBUTE BY p1 +POSTHOOK: Output: default@t2_n24@p1=1 +POSTHOOK: Output: default@t2_n24@p1=2 +POSTHOOK: Output: default@t2_n24@p1=3 +POSTHOOK: Output: default@t2_n24@p1=4 +POSTHOOK: Output: default@t2_n24@p1=5 +PREHOOK: query: INSERT OVERWRITE TABLE t2_n24 SELECT *,c1 AS p1 FROM t1_n45 DISTRIBUTE BY p1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: INSERT OVERWRITE TABLE t2 SELECT *,c1 AS p1 FROM t1 DISTRIBUTE BY p1 +PREHOOK: Input: default@t1_n45 +PREHOOK: Output: default@t2_n24 +POSTHOOK: query: INSERT OVERWRITE TABLE t2_n24 SELECT *,c1 AS p1 FROM t1_n45 DISTRIBUTE BY p1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2@p1=1 -POSTHOOK: Output: default@t2@p1=2 -POSTHOOK: Output: default@t2@p1=3 -POSTHOOK: Output: default@t2@p1=4 -POSTHOOK: Output: default@t2@p1=5 -POSTHOOK: Lineage: t2 PARTITION(p1=1).c1 EXPRESSION [(t1)t1.FieldSchema(name:c1, type:bigint, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=1).c2 SIMPLE [(t1)t1.FieldSchema(name:c2, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=2).c1 EXPRESSION [(t1)t1.FieldSchema(name:c1, type:bigint, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=2).c2 SIMPLE [(t1)t1.FieldSchema(name:c2, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=3).c1 EXPRESSION [(t1)t1.FieldSchema(name:c1, type:bigint, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=3).c2 SIMPLE [(t1)t1.FieldSchema(name:c2, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=4).c1 EXPRESSION [(t1)t1.FieldSchema(name:c1, type:bigint, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=4).c2 SIMPLE [(t1)t1.FieldSchema(name:c2, type:string, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=5).c1 EXPRESSION [(t1)t1.FieldSchema(name:c1, type:bigint, comment:null), ] -POSTHOOK: Lineage: t2 PARTITION(p1=5).c2 SIMPLE [(t1)t1.FieldSchema(name:c2, type:string, comment:null), ] -PREHOOK: query: SHOW PARTITIONS t2 +POSTHOOK: Input: default@t1_n45 +POSTHOOK: Output: default@t2_n24@p1=1 +POSTHOOK: Output: default@t2_n24@p1=2 +POSTHOOK: Output: default@t2_n24@p1=3 +POSTHOOK: Output: default@t2_n24@p1=4 +POSTHOOK: Output: default@t2_n24@p1=5 +POSTHOOK: Lineage: t2_n24 PARTITION(p1=1).c1 EXPRESSION [(t1_n45)t1_n45.FieldSchema(name:c1, type:bigint, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=1).c2 SIMPLE [(t1_n45)t1_n45.FieldSchema(name:c2, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=2).c1 EXPRESSION [(t1_n45)t1_n45.FieldSchema(name:c1, type:bigint, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=2).c2 SIMPLE [(t1_n45)t1_n45.FieldSchema(name:c2, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=3).c1 EXPRESSION [(t1_n45)t1_n45.FieldSchema(name:c1, type:bigint, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=3).c2 SIMPLE [(t1_n45)t1_n45.FieldSchema(name:c2, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=4).c1 EXPRESSION [(t1_n45)t1_n45.FieldSchema(name:c1, type:bigint, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=4).c2 SIMPLE [(t1_n45)t1_n45.FieldSchema(name:c2, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=5).c1 EXPRESSION [(t1_n45)t1_n45.FieldSchema(name:c1, type:bigint, comment:null), ] +POSTHOOK: Lineage: t2_n24 PARTITION(p1=5).c2 SIMPLE [(t1_n45)t1_n45.FieldSchema(name:c2, type:string, comment:null), ] +PREHOOK: query: SHOW PARTITIONS t2_n24 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@t2 -POSTHOOK: query: SHOW PARTITIONS t2 +PREHOOK: Input: default@t2_n24 +POSTHOOK: query: SHOW PARTITIONS t2_n24 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2_n24 p1=1 p1=2 p1=3 p1=4 p1=5 -PREHOOK: query: SELECT * FROM t2 +PREHOOK: query: SELECT * FROM t2_n24 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@p1=1 -PREHOOK: Input: default@t2@p1=2 -PREHOOK: Input: default@t2@p1=3 -PREHOOK: Input: default@t2@p1=4 -PREHOOK: Input: default@t2@p1=5 +PREHOOK: Input: default@t2_n24 +PREHOOK: Input: default@t2_n24@p1=1 +PREHOOK: Input: default@t2_n24@p1=2 +PREHOOK: Input: default@t2_n24@p1=3 +PREHOOK: Input: default@t2_n24@p1=4 +PREHOOK: Input: default@t2_n24@p1=5 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM t2 +POSTHOOK: query: SELECT * FROM t2_n24 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@p1=1 -POSTHOOK: Input: default@t2@p1=2 -POSTHOOK: Input: default@t2@p1=3 -POSTHOOK: Input: default@t2@p1=4 -POSTHOOK: Input: default@t2@p1=5 +POSTHOOK: Input: default@t2_n24 +POSTHOOK: Input: default@t2_n24@p1=1 +POSTHOOK: Input: default@t2_n24@p1=2 +POSTHOOK: Input: default@t2_n24@p1=3 +POSTHOOK: Input: default@t2_n24@p1=4 +POSTHOOK: Input: default@t2_n24@p1=5 #### A masked pattern was here #### 1 one 1 1 one 1 @@ -211,451 +211,451 @@ POSTHOOK: Input: default@t2@p1=5 5 five 5 5 five 5 5 five 5 -PREHOOK: query: DROP TABLE t1 +PREHOOK: query: DROP TABLE t1_n45 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: DROP TABLE t1 +PREHOOK: Input: default@t1_n45 +PREHOOK: Output: default@t1_n45 +POSTHOOK: query: DROP TABLE t1_n45 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: DROP TABLE t2 +POSTHOOK: Input: default@t1_n45 +POSTHOOK: Output: default@t1_n45 +PREHOOK: query: DROP TABLE t2_n24 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: DROP TABLE t2 +PREHOOK: Input: default@t2_n24 +PREHOOK: Output: default@t2_n24 +POSTHOOK: query: DROP TABLE t2_n24 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 -PREHOOK: query: CREATE TABLE table1 (id int) partitioned by (key string) clustered by (id) into 2 buckets +POSTHOOK: Input: default@t2_n24 +POSTHOOK: Output: default@t2_n24 +PREHOOK: query: CREATE TABLE table1_n14 (id int) partitioned by (key string) clustered by (id) into 2 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: CREATE TABLE table1 (id int) partitioned by (key string) clustered by (id) into 2 buckets +PREHOOK: Output: default@table1_n14 +POSTHOOK: query: CREATE TABLE table1_n14 (id int) partitioned by (key string) clustered by (id) into 2 buckets POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: INSERT INTO TABLE table1 VALUES (1, '101'), (2, '202'), (3, '303'), (4, '404'), (5, '505') +POSTHOOK: Output: default@table1_n14 +PREHOOK: query: INSERT INTO TABLE table1_n14 VALUES (1, '101'), (2, '202'), (3, '303'), (4, '404'), (5, '505') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@table1 -POSTHOOK: query: INSERT INTO TABLE table1 VALUES (1, '101'), (2, '202'), (3, '303'), (4, '404'), (5, '505') +PREHOOK: Output: default@table1_n14 +POSTHOOK: query: INSERT INTO TABLE table1_n14 VALUES (1, '101'), (2, '202'), (3, '303'), (4, '404'), (5, '505') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@table1@key=101 -POSTHOOK: Output: default@table1@key=202 -POSTHOOK: Output: default@table1@key=303 -POSTHOOK: Output: default@table1@key=404 -POSTHOOK: Output: default@table1@key=505 -POSTHOOK: Lineage: table1 PARTITION(key=101).id SCRIPT [] -POSTHOOK: Lineage: table1 PARTITION(key=202).id SCRIPT [] -POSTHOOK: Lineage: table1 PARTITION(key=303).id SCRIPT [] -POSTHOOK: Lineage: table1 PARTITION(key=404).id SCRIPT [] -POSTHOOK: Lineage: table1 PARTITION(key=505).id SCRIPT [] -PREHOOK: query: SHOW PARTITIONS table1 +POSTHOOK: Output: default@table1_n14@key=101 +POSTHOOK: Output: default@table1_n14@key=202 +POSTHOOK: Output: default@table1_n14@key=303 +POSTHOOK: Output: default@table1_n14@key=404 +POSTHOOK: Output: default@table1_n14@key=505 +POSTHOOK: Lineage: table1_n14 PARTITION(key=101).id SCRIPT [] +POSTHOOK: Lineage: table1_n14 PARTITION(key=202).id SCRIPT [] +POSTHOOK: Lineage: table1_n14 PARTITION(key=303).id SCRIPT [] +POSTHOOK: Lineage: table1_n14 PARTITION(key=404).id SCRIPT [] +POSTHOOK: Lineage: table1_n14 PARTITION(key=505).id SCRIPT [] +PREHOOK: query: SHOW PARTITIONS table1_n14 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@table1 -POSTHOOK: query: SHOW PARTITIONS table1 +PREHOOK: Input: default@table1_n14 +POSTHOOK: query: SHOW PARTITIONS table1_n14 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n14 key=101 key=202 key=303 key=404 key=505 -PREHOOK: query: SELECT * FROM table1 +PREHOOK: query: SELECT * FROM table1_n14 PREHOOK: type: QUERY -PREHOOK: Input: default@table1 -PREHOOK: Input: default@table1@key=101 -PREHOOK: Input: default@table1@key=202 -PREHOOK: Input: default@table1@key=303 -PREHOOK: Input: default@table1@key=404 -PREHOOK: Input: default@table1@key=505 +PREHOOK: Input: default@table1_n14 +PREHOOK: Input: default@table1_n14@key=101 +PREHOOK: Input: default@table1_n14@key=202 +PREHOOK: Input: default@table1_n14@key=303 +PREHOOK: Input: default@table1_n14@key=404 +PREHOOK: Input: default@table1_n14@key=505 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM table1 +POSTHOOK: query: SELECT * FROM table1_n14 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 -POSTHOOK: Input: default@table1@key=101 -POSTHOOK: Input: default@table1@key=202 -POSTHOOK: Input: default@table1@key=303 -POSTHOOK: Input: default@table1@key=404 -POSTHOOK: Input: default@table1@key=505 +POSTHOOK: Input: default@table1_n14 +POSTHOOK: Input: default@table1_n14@key=101 +POSTHOOK: Input: default@table1_n14@key=202 +POSTHOOK: Input: default@table1_n14@key=303 +POSTHOOK: Input: default@table1_n14@key=404 +POSTHOOK: Input: default@table1_n14@key=505 #### A masked pattern was here #### 1 101 2 202 3 303 4 404 5 505 -PREHOOK: query: DROP TABLE table1 +PREHOOK: query: DROP TABLE table1_n14 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@table1 -PREHOOK: Output: default@table1 -POSTHOOK: query: DROP TABLE table1 +PREHOOK: Input: default@table1_n14 +PREHOOK: Output: default@table1_n14 +POSTHOOK: query: DROP TABLE table1_n14 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@table1 -POSTHOOK: Output: default@table1 -PREHOOK: query: CREATE TABLE table1 (name string, age int) PARTITIONED BY (country string, state string) +POSTHOOK: Input: default@table1_n14 +POSTHOOK: Output: default@table1_n14 +PREHOOK: query: CREATE TABLE table1_n14 (name string, age int) PARTITIONED BY (country string, state string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: CREATE TABLE table1 (name string, age int) PARTITIONED BY (country string, state string) +PREHOOK: Output: default@table1_n14 +POSTHOOK: query: CREATE TABLE table1_n14 (name string, age int) PARTITIONED BY (country string, state string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: INSERT INTO table1 values ('John Doe', 23, 'USA', 'CA'), ('Jane Doe', 22, 'USA', 'TX') +POSTHOOK: Output: default@table1_n14 +PREHOOK: query: INSERT INTO table1_n14 values ('John Doe', 23, 'USA', 'CA'), ('Jane Doe', 22, 'USA', 'TX') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@table1 -POSTHOOK: query: INSERT INTO table1 values ('John Doe', 23, 'USA', 'CA'), ('Jane Doe', 22, 'USA', 'TX') +PREHOOK: Output: default@table1_n14 +POSTHOOK: query: INSERT INTO table1_n14 values ('John Doe', 23, 'USA', 'CA'), ('Jane Doe', 22, 'USA', 'TX') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@table1@country=USA/state=CA -POSTHOOK: Output: default@table1@country=USA/state=TX -POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).age SCRIPT [] -POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).name SCRIPT [] -POSTHOOK: Lineage: table1 PARTITION(country=USA,state=TX).age SCRIPT [] -POSTHOOK: Lineage: table1 PARTITION(country=USA,state=TX).name SCRIPT [] -PREHOOK: query: SHOW PARTITIONS table1 +POSTHOOK: Output: default@table1_n14@country=USA/state=CA +POSTHOOK: Output: default@table1_n14@country=USA/state=TX +POSTHOOK: Lineage: table1_n14 PARTITION(country=USA,state=CA).age SCRIPT [] +POSTHOOK: Lineage: table1_n14 PARTITION(country=USA,state=CA).name SCRIPT [] +POSTHOOK: Lineage: table1_n14 PARTITION(country=USA,state=TX).age SCRIPT [] +POSTHOOK: Lineage: table1_n14 PARTITION(country=USA,state=TX).name SCRIPT [] +PREHOOK: query: SHOW PARTITIONS table1_n14 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@table1 -POSTHOOK: query: SHOW PARTITIONS table1 +PREHOOK: Input: default@table1_n14 +POSTHOOK: query: SHOW PARTITIONS table1_n14 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1_n14 country=USA/state=CA country=USA/state=TX -PREHOOK: query: CREATE TABLE table2 (name string, age int) PARTITIONED BY (country string, state string) +PREHOOK: query: CREATE TABLE table2_n10 (name string, age int) PARTITIONED BY (country string, state string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table2 -POSTHOOK: query: CREATE TABLE table2 (name string, age int) PARTITIONED BY (country string, state string) +PREHOOK: Output: default@table2_n10 +POSTHOOK: query: CREATE TABLE table2_n10 (name string, age int) PARTITIONED BY (country string, state string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table2 -PREHOOK: query: INSERT INTO TABLE table2 SELECT * FROM table1 +POSTHOOK: Output: default@table2_n10 +PREHOOK: query: INSERT INTO TABLE table2_n10 SELECT * FROM table1_n14 PREHOOK: type: QUERY -PREHOOK: Input: default@table1 -PREHOOK: Input: default@table1@country=USA/state=CA -PREHOOK: Input: default@table1@country=USA/state=TX -PREHOOK: Output: default@table2 -POSTHOOK: query: INSERT INTO TABLE table2 SELECT * FROM table1 +PREHOOK: Input: default@table1_n14 +PREHOOK: Input: default@table1_n14@country=USA/state=CA +PREHOOK: Input: default@table1_n14@country=USA/state=TX +PREHOOK: Output: default@table2_n10 +POSTHOOK: query: INSERT INTO TABLE table2_n10 SELECT * FROM table1_n14 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table1 -POSTHOOK: Input: default@table1@country=USA/state=CA -POSTHOOK: Input: default@table1@country=USA/state=TX -POSTHOOK: Output: default@table2@country=USA/state=CA -POSTHOOK: Output: default@table2@country=USA/state=TX -POSTHOOK: Lineage: table2 PARTITION(country=USA,state=CA).age SIMPLE [(table1)table1.FieldSchema(name:age, type:int, comment:null), ] -POSTHOOK: Lineage: table2 PARTITION(country=USA,state=CA).name SIMPLE [(table1)table1.FieldSchema(name:name, type:string, comment:null), ] -POSTHOOK: Lineage: table2 PARTITION(country=USA,state=TX).age SIMPLE [(table1)table1.FieldSchema(name:age, type:int, comment:null), ] -POSTHOOK: Lineage: table2 PARTITION(country=USA,state=TX).name SIMPLE [(table1)table1.FieldSchema(name:name, type:string, comment:null), ] -PREHOOK: query: SHOW PARTITIONS table2 +POSTHOOK: Input: default@table1_n14 +POSTHOOK: Input: default@table1_n14@country=USA/state=CA +POSTHOOK: Input: default@table1_n14@country=USA/state=TX +POSTHOOK: Output: default@table2_n10@country=USA/state=CA +POSTHOOK: Output: default@table2_n10@country=USA/state=TX +POSTHOOK: Lineage: table2_n10 PARTITION(country=USA,state=CA).age SIMPLE [(table1_n14)table1_n14.FieldSchema(name:age, type:int, comment:null), ] +POSTHOOK: Lineage: table2_n10 PARTITION(country=USA,state=CA).name SIMPLE [(table1_n14)table1_n14.FieldSchema(name:name, type:string, comment:null), ] +POSTHOOK: Lineage: table2_n10 PARTITION(country=USA,state=TX).age SIMPLE [(table1_n14)table1_n14.FieldSchema(name:age, type:int, comment:null), ] +POSTHOOK: Lineage: table2_n10 PARTITION(country=USA,state=TX).name SIMPLE [(table1_n14)table1_n14.FieldSchema(name:name, type:string, comment:null), ] +PREHOOK: query: SHOW PARTITIONS table2_n10 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@table2 -POSTHOOK: query: SHOW PARTITIONS table2 +PREHOOK: Input: default@table2_n10 +POSTHOOK: query: SHOW PARTITIONS table2_n10 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2_n10 country=USA/state=CA country=USA/state=TX -PREHOOK: query: SELECT * FROM table2 +PREHOOK: query: SELECT * FROM table2_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@table2 -PREHOOK: Input: default@table2@country=USA/state=CA -PREHOOK: Input: default@table2@country=USA/state=TX +PREHOOK: Input: default@table2_n10 +PREHOOK: Input: default@table2_n10@country=USA/state=CA +PREHOOK: Input: default@table2_n10@country=USA/state=TX #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM table2 +POSTHOOK: query: SELECT * FROM table2_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table2 -POSTHOOK: Input: default@table2@country=USA/state=CA -POSTHOOK: Input: default@table2@country=USA/state=TX +POSTHOOK: Input: default@table2_n10 +POSTHOOK: Input: default@table2_n10@country=USA/state=CA +POSTHOOK: Input: default@table2_n10@country=USA/state=TX #### A masked pattern was here #### John Doe 23 USA CA Jane Doe 22 USA TX -PREHOOK: query: DROP TABLE table2 +PREHOOK: query: DROP TABLE table2_n10 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@table2 -PREHOOK: Output: default@table2 -POSTHOOK: query: DROP TABLE table2 +PREHOOK: Input: default@table2_n10 +PREHOOK: Output: default@table2_n10 +POSTHOOK: query: DROP TABLE table2_n10 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@table2 -POSTHOOK: Output: default@table2 -PREHOOK: query: DROP TABLE table1 +POSTHOOK: Input: default@table2_n10 +POSTHOOK: Output: default@table2_n10 +PREHOOK: query: DROP TABLE table1_n14 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@table1 -PREHOOK: Output: default@table1 -POSTHOOK: query: DROP TABLE table1 +PREHOOK: Input: default@table1_n14 +PREHOOK: Output: default@table1_n14 +POSTHOOK: query: DROP TABLE table1_n14 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@table1 -POSTHOOK: Output: default@table1 -PREHOOK: query: CREATE TABLE dest1(key string) partitioned by (value string) +POSTHOOK: Input: default@table1_n14 +POSTHOOK: Output: default@table1_n14 +PREHOOK: query: CREATE TABLE dest1_n119(key string) partitioned by (value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key string) partitioned by (value string) +PREHOOK: Output: default@dest1_n119 +POSTHOOK: query: CREATE TABLE dest1_n119(key string) partitioned by (value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest2(key string) partitioned by (value string) +POSTHOOK: Output: default@dest1_n119 +PREHOOK: query: CREATE TABLE dest2_n16(key string) partitioned by (value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest2 -POSTHOOK: query: CREATE TABLE dest2(key string) partitioned by (value string) +PREHOOK: Output: default@dest2_n16 +POSTHOOK: query: CREATE TABLE dest2_n16(key string) partitioned by (value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest2 +POSTHOOK: Output: default@dest2_n16 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest1_n119 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n16 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n119 +PREHOOK: Output: default@dest2_n16 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest1_n119 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n16 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1@value=val_0 -POSTHOOK: Output: default@dest1@value=val_10 -POSTHOOK: Output: default@dest1@value=val_11 -POSTHOOK: Output: default@dest1@value=val_12 -POSTHOOK: Output: default@dest1@value=val_15 -POSTHOOK: Output: default@dest1@value=val_17 -POSTHOOK: Output: default@dest1@value=val_18 -POSTHOOK: Output: default@dest1@value=val_19 -POSTHOOK: Output: default@dest1@value=val_2 -POSTHOOK: Output: default@dest1@value=val_20 -POSTHOOK: Output: default@dest1@value=val_24 -POSTHOOK: Output: default@dest1@value=val_26 -POSTHOOK: Output: default@dest1@value=val_27 -POSTHOOK: Output: default@dest1@value=val_28 -POSTHOOK: Output: default@dest1@value=val_30 -POSTHOOK: Output: default@dest1@value=val_33 -POSTHOOK: Output: default@dest1@value=val_34 -POSTHOOK: Output: default@dest1@value=val_35 -POSTHOOK: Output: default@dest1@value=val_37 -POSTHOOK: Output: default@dest1@value=val_4 -POSTHOOK: Output: default@dest1@value=val_41 -POSTHOOK: Output: default@dest1@value=val_42 -POSTHOOK: Output: default@dest1@value=val_43 -POSTHOOK: Output: default@dest1@value=val_44 -POSTHOOK: Output: default@dest1@value=val_47 -POSTHOOK: Output: default@dest1@value=val_5 -POSTHOOK: Output: default@dest1@value=val_51 -POSTHOOK: Output: default@dest1@value=val_53 -POSTHOOK: Output: default@dest1@value=val_54 -POSTHOOK: Output: default@dest1@value=val_57 -POSTHOOK: Output: default@dest1@value=val_58 -POSTHOOK: Output: default@dest1@value=val_64 -POSTHOOK: Output: default@dest1@value=val_65 -POSTHOOK: Output: default@dest1@value=val_66 -POSTHOOK: Output: default@dest1@value=val_67 -POSTHOOK: Output: default@dest1@value=val_69 -POSTHOOK: Output: default@dest1@value=val_70 -POSTHOOK: Output: default@dest1@value=val_72 -POSTHOOK: Output: default@dest1@value=val_74 -POSTHOOK: Output: default@dest1@value=val_76 -POSTHOOK: Output: default@dest1@value=val_77 -POSTHOOK: Output: default@dest1@value=val_78 -POSTHOOK: Output: default@dest1@value=val_8 -POSTHOOK: Output: default@dest1@value=val_80 -POSTHOOK: Output: default@dest1@value=val_82 -POSTHOOK: Output: default@dest1@value=val_83 -POSTHOOK: Output: default@dest1@value=val_84 -POSTHOOK: Output: default@dest1@value=val_85 -POSTHOOK: Output: default@dest1@value=val_86 -POSTHOOK: Output: default@dest1@value=val_87 -POSTHOOK: Output: default@dest1@value=val_9 -POSTHOOK: Output: default@dest1@value=val_90 -POSTHOOK: Output: default@dest1@value=val_92 -POSTHOOK: Output: default@dest1@value=val_95 -POSTHOOK: Output: default@dest1@value=val_96 -POSTHOOK: Output: default@dest1@value=val_97 -POSTHOOK: Output: default@dest1@value=val_98 -POSTHOOK: Output: default@dest2@value=val_100 -POSTHOOK: Output: default@dest2@value=val_103 -POSTHOOK: Output: default@dest2@value=val_104 -POSTHOOK: Output: default@dest2@value=val_105 -POSTHOOK: Output: default@dest2@value=val_111 -POSTHOOK: Output: default@dest2@value=val_113 -POSTHOOK: Output: default@dest2@value=val_114 -POSTHOOK: Output: default@dest2@value=val_116 -POSTHOOK: Output: default@dest2@value=val_118 -POSTHOOK: Output: default@dest2@value=val_119 -POSTHOOK: Output: default@dest2@value=val_120 -POSTHOOK: Output: default@dest2@value=val_125 -POSTHOOK: Output: default@dest2@value=val_126 -POSTHOOK: Output: default@dest2@value=val_128 -POSTHOOK: Output: default@dest2@value=val_129 -POSTHOOK: Output: default@dest2@value=val_131 -POSTHOOK: Output: default@dest2@value=val_133 -POSTHOOK: Output: default@dest2@value=val_134 -POSTHOOK: Output: default@dest2@value=val_136 -POSTHOOK: Output: default@dest2@value=val_137 -POSTHOOK: Output: default@dest2@value=val_138 -POSTHOOK: Output: default@dest2@value=val_143 -POSTHOOK: Output: default@dest2@value=val_145 -POSTHOOK: Output: default@dest2@value=val_146 -POSTHOOK: Output: default@dest2@value=val_149 -POSTHOOK: Output: default@dest2@value=val_150 -POSTHOOK: Output: default@dest2@value=val_152 -POSTHOOK: Output: default@dest2@value=val_153 -POSTHOOK: Output: default@dest2@value=val_155 -POSTHOOK: Output: default@dest2@value=val_156 -POSTHOOK: Output: default@dest2@value=val_157 -POSTHOOK: Output: default@dest2@value=val_158 -POSTHOOK: Output: default@dest2@value=val_160 -POSTHOOK: Output: default@dest2@value=val_162 -POSTHOOK: Output: default@dest2@value=val_163 -POSTHOOK: Output: default@dest2@value=val_164 -POSTHOOK: Output: default@dest2@value=val_165 -POSTHOOK: Output: default@dest2@value=val_166 -POSTHOOK: Output: default@dest2@value=val_167 -POSTHOOK: Output: default@dest2@value=val_168 -POSTHOOK: Output: default@dest2@value=val_169 -POSTHOOK: Output: default@dest2@value=val_170 -POSTHOOK: Output: default@dest2@value=val_172 -POSTHOOK: Output: default@dest2@value=val_174 -POSTHOOK: Output: default@dest2@value=val_175 -POSTHOOK: Output: default@dest2@value=val_176 -POSTHOOK: Output: default@dest2@value=val_177 -POSTHOOK: Output: default@dest2@value=val_178 -POSTHOOK: Output: default@dest2@value=val_179 -POSTHOOK: Output: default@dest2@value=val_180 -POSTHOOK: Output: default@dest2@value=val_181 -POSTHOOK: Output: default@dest2@value=val_183 -POSTHOOK: Output: default@dest2@value=val_186 -POSTHOOK: Output: default@dest2@value=val_187 -POSTHOOK: Output: default@dest2@value=val_189 -POSTHOOK: Output: default@dest2@value=val_190 -POSTHOOK: Output: default@dest2@value=val_191 -POSTHOOK: Output: default@dest2@value=val_192 -POSTHOOK: Output: default@dest2@value=val_193 -POSTHOOK: Output: default@dest2@value=val_194 -POSTHOOK: Output: default@dest2@value=val_195 -POSTHOOK: Output: default@dest2@value=val_196 -POSTHOOK: Output: default@dest2@value=val_197 -POSTHOOK: Output: default@dest2@value=val_199 -POSTHOOK: Lineage: dest1 PARTITION(value=val_0).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_10).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_12).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_15).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_17).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_18).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_19).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_20).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_24).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_26).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_27).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_28).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_30).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_33).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_34).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_35).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_37).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_41).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_42).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_43).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_44).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_47).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_4).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_51).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_53).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_54).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_57).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_58).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_5).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_64).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_65).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_66).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_67).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_69).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_70).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_72).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_74).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_76).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_77).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_78).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_80).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_82).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_83).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_84).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_85).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_86).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_87).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_8).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_90).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_92).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_95).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_96).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_97).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_98).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(value=val_9).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_103).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_104).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_105).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_111).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_113).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_114).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_116).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_118).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_119).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_120).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_125).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_126).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_128).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_129).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_131).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_133).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_134).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_136).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_137).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_138).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_143).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_145).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_146).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_149).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_150).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_152).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_153).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_155).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_156).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_157).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_158).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_160).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_162).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_163).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_164).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_165).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_166).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_167).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_168).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_169).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_170).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_172).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_174).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_175).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_176).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_177).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_178).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_179).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_180).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_181).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_183).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_186).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_187).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_189).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_190).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_191).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_192).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_193).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_194).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_195).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_196).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_197).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(value=val_199).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Output: default@dest1_n119@value=val_0 +POSTHOOK: Output: default@dest1_n119@value=val_10 +POSTHOOK: Output: default@dest1_n119@value=val_11 +POSTHOOK: Output: default@dest1_n119@value=val_12 +POSTHOOK: Output: default@dest1_n119@value=val_15 +POSTHOOK: Output: default@dest1_n119@value=val_17 +POSTHOOK: Output: default@dest1_n119@value=val_18 +POSTHOOK: Output: default@dest1_n119@value=val_19 +POSTHOOK: Output: default@dest1_n119@value=val_2 +POSTHOOK: Output: default@dest1_n119@value=val_20 +POSTHOOK: Output: default@dest1_n119@value=val_24 +POSTHOOK: Output: default@dest1_n119@value=val_26 +POSTHOOK: Output: default@dest1_n119@value=val_27 +POSTHOOK: Output: default@dest1_n119@value=val_28 +POSTHOOK: Output: default@dest1_n119@value=val_30 +POSTHOOK: Output: default@dest1_n119@value=val_33 +POSTHOOK: Output: default@dest1_n119@value=val_34 +POSTHOOK: Output: default@dest1_n119@value=val_35 +POSTHOOK: Output: default@dest1_n119@value=val_37 +POSTHOOK: Output: default@dest1_n119@value=val_4 +POSTHOOK: Output: default@dest1_n119@value=val_41 +POSTHOOK: Output: default@dest1_n119@value=val_42 +POSTHOOK: Output: default@dest1_n119@value=val_43 +POSTHOOK: Output: default@dest1_n119@value=val_44 +POSTHOOK: Output: default@dest1_n119@value=val_47 +POSTHOOK: Output: default@dest1_n119@value=val_5 +POSTHOOK: Output: default@dest1_n119@value=val_51 +POSTHOOK: Output: default@dest1_n119@value=val_53 +POSTHOOK: Output: default@dest1_n119@value=val_54 +POSTHOOK: Output: default@dest1_n119@value=val_57 +POSTHOOK: Output: default@dest1_n119@value=val_58 +POSTHOOK: Output: default@dest1_n119@value=val_64 +POSTHOOK: Output: default@dest1_n119@value=val_65 +POSTHOOK: Output: default@dest1_n119@value=val_66 +POSTHOOK: Output: default@dest1_n119@value=val_67 +POSTHOOK: Output: default@dest1_n119@value=val_69 +POSTHOOK: Output: default@dest1_n119@value=val_70 +POSTHOOK: Output: default@dest1_n119@value=val_72 +POSTHOOK: Output: default@dest1_n119@value=val_74 +POSTHOOK: Output: default@dest1_n119@value=val_76 +POSTHOOK: Output: default@dest1_n119@value=val_77 +POSTHOOK: Output: default@dest1_n119@value=val_78 +POSTHOOK: Output: default@dest1_n119@value=val_8 +POSTHOOK: Output: default@dest1_n119@value=val_80 +POSTHOOK: Output: default@dest1_n119@value=val_82 +POSTHOOK: Output: default@dest1_n119@value=val_83 +POSTHOOK: Output: default@dest1_n119@value=val_84 +POSTHOOK: Output: default@dest1_n119@value=val_85 +POSTHOOK: Output: default@dest1_n119@value=val_86 +POSTHOOK: Output: default@dest1_n119@value=val_87 +POSTHOOK: Output: default@dest1_n119@value=val_9 +POSTHOOK: Output: default@dest1_n119@value=val_90 +POSTHOOK: Output: default@dest1_n119@value=val_92 +POSTHOOK: Output: default@dest1_n119@value=val_95 +POSTHOOK: Output: default@dest1_n119@value=val_96 +POSTHOOK: Output: default@dest1_n119@value=val_97 +POSTHOOK: Output: default@dest1_n119@value=val_98 +POSTHOOK: Output: default@dest2_n16@value=val_100 +POSTHOOK: Output: default@dest2_n16@value=val_103 +POSTHOOK: Output: default@dest2_n16@value=val_104 +POSTHOOK: Output: default@dest2_n16@value=val_105 +POSTHOOK: Output: default@dest2_n16@value=val_111 +POSTHOOK: Output: default@dest2_n16@value=val_113 +POSTHOOK: Output: default@dest2_n16@value=val_114 +POSTHOOK: Output: default@dest2_n16@value=val_116 +POSTHOOK: Output: default@dest2_n16@value=val_118 +POSTHOOK: Output: default@dest2_n16@value=val_119 +POSTHOOK: Output: default@dest2_n16@value=val_120 +POSTHOOK: Output: default@dest2_n16@value=val_125 +POSTHOOK: Output: default@dest2_n16@value=val_126 +POSTHOOK: Output: default@dest2_n16@value=val_128 +POSTHOOK: Output: default@dest2_n16@value=val_129 +POSTHOOK: Output: default@dest2_n16@value=val_131 +POSTHOOK: Output: default@dest2_n16@value=val_133 +POSTHOOK: Output: default@dest2_n16@value=val_134 +POSTHOOK: Output: default@dest2_n16@value=val_136 +POSTHOOK: Output: default@dest2_n16@value=val_137 +POSTHOOK: Output: default@dest2_n16@value=val_138 +POSTHOOK: Output: default@dest2_n16@value=val_143 +POSTHOOK: Output: default@dest2_n16@value=val_145 +POSTHOOK: Output: default@dest2_n16@value=val_146 +POSTHOOK: Output: default@dest2_n16@value=val_149 +POSTHOOK: Output: default@dest2_n16@value=val_150 +POSTHOOK: Output: default@dest2_n16@value=val_152 +POSTHOOK: Output: default@dest2_n16@value=val_153 +POSTHOOK: Output: default@dest2_n16@value=val_155 +POSTHOOK: Output: default@dest2_n16@value=val_156 +POSTHOOK: Output: default@dest2_n16@value=val_157 +POSTHOOK: Output: default@dest2_n16@value=val_158 +POSTHOOK: Output: default@dest2_n16@value=val_160 +POSTHOOK: Output: default@dest2_n16@value=val_162 +POSTHOOK: Output: default@dest2_n16@value=val_163 +POSTHOOK: Output: default@dest2_n16@value=val_164 +POSTHOOK: Output: default@dest2_n16@value=val_165 +POSTHOOK: Output: default@dest2_n16@value=val_166 +POSTHOOK: Output: default@dest2_n16@value=val_167 +POSTHOOK: Output: default@dest2_n16@value=val_168 +POSTHOOK: Output: default@dest2_n16@value=val_169 +POSTHOOK: Output: default@dest2_n16@value=val_170 +POSTHOOK: Output: default@dest2_n16@value=val_172 +POSTHOOK: Output: default@dest2_n16@value=val_174 +POSTHOOK: Output: default@dest2_n16@value=val_175 +POSTHOOK: Output: default@dest2_n16@value=val_176 +POSTHOOK: Output: default@dest2_n16@value=val_177 +POSTHOOK: Output: default@dest2_n16@value=val_178 +POSTHOOK: Output: default@dest2_n16@value=val_179 +POSTHOOK: Output: default@dest2_n16@value=val_180 +POSTHOOK: Output: default@dest2_n16@value=val_181 +POSTHOOK: Output: default@dest2_n16@value=val_183 +POSTHOOK: Output: default@dest2_n16@value=val_186 +POSTHOOK: Output: default@dest2_n16@value=val_187 +POSTHOOK: Output: default@dest2_n16@value=val_189 +POSTHOOK: Output: default@dest2_n16@value=val_190 +POSTHOOK: Output: default@dest2_n16@value=val_191 +POSTHOOK: Output: default@dest2_n16@value=val_192 +POSTHOOK: Output: default@dest2_n16@value=val_193 +POSTHOOK: Output: default@dest2_n16@value=val_194 +POSTHOOK: Output: default@dest2_n16@value=val_195 +POSTHOOK: Output: default@dest2_n16@value=val_196 +POSTHOOK: Output: default@dest2_n16@value=val_197 +POSTHOOK: Output: default@dest2_n16@value=val_199 +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_0).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_10).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_12).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_15).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_17).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_18).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_19).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_20).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_24).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_26).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_27).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_28).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_30).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_33).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_34).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_35).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_37).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_41).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_42).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_43).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_44).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_47).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_4).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_51).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_53).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_54).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_57).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_58).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_5).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_64).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_65).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_66).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_67).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_69).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_70).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_72).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_74).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_76).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_77).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_78).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_80).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_82).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_83).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_84).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_85).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_86).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_87).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_8).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_90).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_92).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_95).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_96).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_97).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_98).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n119 PARTITION(value=val_9).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_103).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_104).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_105).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_111).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_113).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_114).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_116).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_118).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_119).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_120).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_125).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_126).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_128).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_129).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_131).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_133).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_134).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_136).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_137).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_138).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_143).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_145).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_146).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_149).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_150).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_152).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_153).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_155).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_156).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_157).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_158).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_160).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_162).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_163).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_164).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_165).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_166).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_167).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_168).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_169).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_170).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_172).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_174).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_175).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_176).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_177).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_178).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_179).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_180).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_181).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_183).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_186).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_187).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_189).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_190).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_191).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_192).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_193).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_194).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_195).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_196).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_197).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16 PARTITION(value=val_199).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: SELECT distinct value FROM SRC WHERE src.key < 100 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -721,12 +721,12 @@ val_95 val_96 val_97 val_98 -PREHOOK: query: SHOW PARTITIONS dest1 +PREHOOK: query: SHOW PARTITIONS dest1_n119 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@dest1 -POSTHOOK: query: SHOW PARTITIONS dest1 +PREHOOK: Input: default@dest1_n119 +POSTHOOK: query: SHOW PARTITIONS dest1_n119 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n119 value=val_0 value=val_10 value=val_11 @@ -856,12 +856,12 @@ val_195 val_196 val_197 val_199 -PREHOOK: query: SHOW PARTITIONS dest2 +PREHOOK: query: SHOW PARTITIONS dest2_n16 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@dest2 -POSTHOOK: query: SHOW PARTITIONS dest2 +PREHOOK: Input: default@dest2_n16 +POSTHOOK: query: SHOW PARTITIONS dest2_n16 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n16 value=val_100 value=val_103 value=val_104 @@ -926,19 +926,19 @@ value=val_195 value=val_196 value=val_197 value=val_199 -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n119 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n119 +PREHOOK: Output: default@dest1_n119 +POSTHOOK: query: DROP TABLE dest1_n119 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -PREHOOK: query: DROP TABLE dest2 +POSTHOOK: Input: default@dest1_n119 +POSTHOOK: Output: default@dest1_n119 +PREHOOK: query: DROP TABLE dest2_n16 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest2 -PREHOOK: Output: default@dest2 -POSTHOOK: query: DROP TABLE dest2 +PREHOOK: Input: default@dest2_n16 +PREHOOK: Output: default@dest2_n16 +POSTHOOK: query: DROP TABLE dest2_n16 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest2 -POSTHOOK: Output: default@dest2 +POSTHOOK: Input: default@dest2_n16 +POSTHOOK: Output: default@dest2_n16 diff --git a/ql/src/test/results/clientpositive/dynpart_sort_opt_bucketing.q.out b/ql/src/test/results/clientpositive/dynpart_sort_opt_bucketing.q.out index d50d4ad53f..21e4891c9e 100644 --- a/ql/src/test/results/clientpositive/dynpart_sort_opt_bucketing.q.out +++ b/ql/src/test/results/clientpositive/dynpart_sort_opt_bucketing.q.out @@ -35,11 +35,11 @@ POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1_staging POSTHOOK: Output: default@t1_staging@e=epart -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n147 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table t1 +POSTHOOK: query: drop table t1_n147 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table t1( +PREHOOK: query: create table t1_n147( a string, b int, c int, @@ -49,8 +49,8 @@ clustered by(a) sorted by(a desc) into 10 buckets stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1( +PREHOOK: Output: default@t1_n147 +POSTHOOK: query: create table t1_n147( a string, b int, c int, @@ -60,21 +60,21 @@ clustered by(a) sorted by(a desc) into 10 buckets stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: insert overwrite table t1 partition(e) select a,b,c,d,'epart' from t1_staging +POSTHOOK: Output: default@t1_n147 +PREHOOK: query: insert overwrite table t1_n147 partition(e) select a,b,c,d,'epart' from t1_staging PREHOOK: type: QUERY PREHOOK: Input: default@t1_staging PREHOOK: Input: default@t1_staging@e=epart -PREHOOK: Output: default@t1 -POSTHOOK: query: insert overwrite table t1 partition(e) select a,b,c,d,'epart' from t1_staging +PREHOOK: Output: default@t1_n147 +POSTHOOK: query: insert overwrite table t1_n147 partition(e) select a,b,c,d,'epart' from t1_staging POSTHOOK: type: QUERY POSTHOOK: Input: default@t1_staging POSTHOOK: Input: default@t1_staging@e=epart -POSTHOOK: Output: default@t1@e=epart -POSTHOOK: Lineage: t1 PARTITION(e=epart).a SIMPLE [(t1_staging)t1_staging.FieldSchema(name:a, type:string, comment:null), ] -POSTHOOK: Lineage: t1 PARTITION(e=epart).b SIMPLE [(t1_staging)t1_staging.FieldSchema(name:b, type:int, comment:null), ] -POSTHOOK: Lineage: t1 PARTITION(e=epart).c SIMPLE [(t1_staging)t1_staging.FieldSchema(name:c, type:int, comment:null), ] -POSTHOOK: Lineage: t1 PARTITION(e=epart).d SIMPLE [(t1_staging)t1_staging.FieldSchema(name:d, type:string, comment:null), ] +POSTHOOK: Output: default@t1_n147@e=epart +POSTHOOK: Lineage: t1_n147 PARTITION(e=epart).a SIMPLE [(t1_staging)t1_staging.FieldSchema(name:a, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n147 PARTITION(e=epart).b SIMPLE [(t1_staging)t1_staging.FieldSchema(name:b, type:int, comment:null), ] +POSTHOOK: Lineage: t1_n147 PARTITION(e=epart).c SIMPLE [(t1_staging)t1_staging.FieldSchema(name:c, type:int, comment:null), ] +POSTHOOK: Lineage: t1_n147 PARTITION(e=epart).d SIMPLE [(t1_staging)t1_staging.FieldSchema(name:d, type:string, comment:null), ] PREHOOK: query: select 'bucket_0' PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table @@ -134,15 +134,15 @@ fffbe3c110c390ec20218e5ad4a026ff515668ed55488b717319b556daa962a1002015-01-21 fffb1b226efc3cfaac8d73647ce4fa4e82413d67265fb55366ac3a4996518738012015-01-21 fff56191e39b15f0e2f04984c70152fb1bde2ecba52ff5a73b4c28bf4d58c017002015-01-21 fff4166378aa9d94cd4f8a9cd543375890a61b4f09a57dbfb31a66b33b3e3fd9\N\N2015-01-21 -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n147 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n147 +PREHOOK: Output: default@t1_n147 +POSTHOOK: query: drop table t1_n147 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t1( +POSTHOOK: Input: default@t1_n147 +POSTHOOK: Output: default@t1_n147 +PREHOOK: query: create table t1_n147( a string, b int, c int, @@ -152,8 +152,8 @@ clustered by(a) sorted by(a desc) into 10 buckets stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1( +PREHOOK: Output: default@t1_n147 +POSTHOOK: query: create table t1_n147( a string, b int, c int, @@ -163,21 +163,21 @@ clustered by(a) sorted by(a desc) into 10 buckets stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: insert overwrite table t1 partition(e) select a,b,c,d,'epart' from t1_staging +POSTHOOK: Output: default@t1_n147 +PREHOOK: query: insert overwrite table t1_n147 partition(e) select a,b,c,d,'epart' from t1_staging PREHOOK: type: QUERY PREHOOK: Input: default@t1_staging PREHOOK: Input: default@t1_staging@e=epart -PREHOOK: Output: default@t1 -POSTHOOK: query: insert overwrite table t1 partition(e) select a,b,c,d,'epart' from t1_staging +PREHOOK: Output: default@t1_n147 +POSTHOOK: query: insert overwrite table t1_n147 partition(e) select a,b,c,d,'epart' from t1_staging POSTHOOK: type: QUERY POSTHOOK: Input: default@t1_staging POSTHOOK: Input: default@t1_staging@e=epart -POSTHOOK: Output: default@t1@e=epart -POSTHOOK: Lineage: t1 PARTITION(e=epart).a SIMPLE [(t1_staging)t1_staging.FieldSchema(name:a, type:string, comment:null), ] -POSTHOOK: Lineage: t1 PARTITION(e=epart).b SIMPLE [(t1_staging)t1_staging.FieldSchema(name:b, type:int, comment:null), ] -POSTHOOK: Lineage: t1 PARTITION(e=epart).c SIMPLE [(t1_staging)t1_staging.FieldSchema(name:c, type:int, comment:null), ] -POSTHOOK: Lineage: t1 PARTITION(e=epart).d SIMPLE [(t1_staging)t1_staging.FieldSchema(name:d, type:string, comment:null), ] +POSTHOOK: Output: default@t1_n147@e=epart +POSTHOOK: Lineage: t1_n147 PARTITION(e=epart).a SIMPLE [(t1_staging)t1_staging.FieldSchema(name:a, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n147 PARTITION(e=epart).b SIMPLE [(t1_staging)t1_staging.FieldSchema(name:b, type:int, comment:null), ] +POSTHOOK: Lineage: t1_n147 PARTITION(e=epart).c SIMPLE [(t1_staging)t1_staging.FieldSchema(name:c, type:int, comment:null), ] +POSTHOOK: Lineage: t1_n147 PARTITION(e=epart).d SIMPLE [(t1_staging)t1_staging.FieldSchema(name:d, type:string, comment:null), ] PREHOOK: query: select 'bucket_0' PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table diff --git a/ql/src/test/results/clientpositive/empty_join.q.out b/ql/src/test/results/clientpositive/empty_join.q.out index 5bde18d4ff..dc473e1fcb 100644 --- a/ql/src/test/results/clientpositive/empty_join.q.out +++ b/ql/src/test/results/clientpositive/empty_join.q.out @@ -1,56 +1,56 @@ -PREHOOK: query: DROP TABLE IF EXISTS test_1 +PREHOOK: query: DROP TABLE IF EXISTS test_1_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS test_1 +POSTHOOK: query: DROP TABLE IF EXISTS test_1_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE test_1 AS SELECT 1 AS id +PREHOOK: query: CREATE TABLE test_1_n2 AS SELECT 1 AS id PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: _dummy_database@_dummy_table PREHOOK: Output: database:default -PREHOOK: Output: default@test_1 -POSTHOOK: query: CREATE TABLE test_1 AS SELECT 1 AS id +PREHOOK: Output: default@test_1_n2 +POSTHOOK: query: CREATE TABLE test_1_n2 AS SELECT 1 AS id POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_1 -POSTHOOK: Lineage: test_1.id SIMPLE [] -PREHOOK: query: DROP TABLE IF EXISTS test_2 +POSTHOOK: Output: default@test_1_n2 +POSTHOOK: Lineage: test_1_n2.id SIMPLE [] +PREHOOK: query: DROP TABLE IF EXISTS test_2_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS test_2 +POSTHOOK: query: DROP TABLE IF EXISTS test_2_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE test_2 (id INT) +PREHOOK: query: CREATE TABLE test_2_n2 (id INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_2 -POSTHOOK: query: CREATE TABLE test_2 (id INT) +PREHOOK: Output: default@test_2_n2 +POSTHOOK: query: CREATE TABLE test_2_n2 (id INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_2 -PREHOOK: query: DROP TABLE IF EXISTS test_3 +POSTHOOK: Output: default@test_2_n2 +PREHOOK: query: DROP TABLE IF EXISTS test_3_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS test_3 +POSTHOOK: query: DROP TABLE IF EXISTS test_3_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE test_3 AS SELECT 1 AS id +PREHOOK: query: CREATE TABLE test_3_n0 AS SELECT 1 AS id PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: _dummy_database@_dummy_table PREHOOK: Output: database:default -PREHOOK: Output: default@test_3 -POSTHOOK: query: CREATE TABLE test_3 AS SELECT 1 AS id +PREHOOK: Output: default@test_3_n0 +POSTHOOK: query: CREATE TABLE test_3_n0 AS SELECT 1 AS id POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_3 -POSTHOOK: Lineage: test_3.id SIMPLE [] +POSTHOOK: Output: default@test_3_n0 +POSTHOOK: Lineage: test_3_n0.id SIMPLE [] PREHOOK: query: explain SELECT t1.id, t2.id, t3.id -FROM test_1 t1 -LEFT JOIN test_2 t2 ON t1.id = t2.id -INNER JOIN test_3 t3 ON t1.id = t3.id +FROM test_1_n2 t1 +LEFT JOIN test_2_n2 t2 ON t1.id = t2.id +INNER JOIN test_3_n0 t3 ON t1.id = t3.id PREHOOK: type: QUERY POSTHOOK: query: explain SELECT t1.id, t2.id, t3.id -FROM test_1 t1 -LEFT JOIN test_2 t2 ON t1.id = t2.id -INNER JOIN test_3 t3 ON t1.id = t3.id +FROM test_1_n2 t1 +LEFT JOIN test_2_n2 t2 ON t1.id = t2.id +INNER JOIN test_3_n0 t3 ON t1.id = t3.id POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-5 is a root stage @@ -142,21 +142,21 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT t1.id, t2.id, t3.id -FROM test_1 t1 -LEFT JOIN test_2 t2 ON t1.id = t2.id -INNER JOIN test_3 t3 ON t1.id = t3.id +FROM test_1_n2 t1 +LEFT JOIN test_2_n2 t2 ON t1.id = t2.id +INNER JOIN test_3_n0 t3 ON t1.id = t3.id PREHOOK: type: QUERY -PREHOOK: Input: default@test_1 -PREHOOK: Input: default@test_2 -PREHOOK: Input: default@test_3 +PREHOOK: Input: default@test_1_n2 +PREHOOK: Input: default@test_2_n2 +PREHOOK: Input: default@test_3_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT t1.id, t2.id, t3.id -FROM test_1 t1 -LEFT JOIN test_2 t2 ON t1.id = t2.id -INNER JOIN test_3 t3 ON t1.id = t3.id +FROM test_1_n2 t1 +LEFT JOIN test_2_n2 t2 ON t1.id = t2.id +INNER JOIN test_3_n0 t3 ON t1.id = t3.id POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_1 -POSTHOOK: Input: default@test_2 -POSTHOOK: Input: default@test_3 +POSTHOOK: Input: default@test_1_n2 +POSTHOOK: Input: default@test_2_n2 +POSTHOOK: Input: default@test_3_n0 #### A masked pattern was here #### 1 NULL 1 diff --git a/ql/src/test/results/clientpositive/escape_crlf.q.out b/ql/src/test/results/clientpositive/escape_crlf.q.out index 8b5df8c28d..6060e4afa7 100644 --- a/ql/src/test/results/clientpositive/escape_crlf.q.out +++ b/ql/src/test/results/clientpositive/escape_crlf.q.out @@ -1,62 +1,62 @@ -PREHOOK: query: DROP TABLE IF EXISTS base_tab +PREHOOK: query: DROP TABLE IF EXISTS base_tab_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS base_tab +POSTHOOK: query: DROP TABLE IF EXISTS base_tab_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE base_tab(a STRING, b STRING) +PREHOOK: query: CREATE TABLE base_tab_n0(a STRING, b STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@base_tab -POSTHOOK: query: CREATE TABLE base_tab(a STRING, b STRING) +PREHOOK: Output: default@base_tab_n0 +POSTHOOK: query: CREATE TABLE base_tab_n0(a STRING, b STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@base_tab -PREHOOK: query: DESCRIBE EXTENDED base_tab +POSTHOOK: Output: default@base_tab_n0 +PREHOOK: query: DESCRIBE EXTENDED base_tab_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@base_tab -POSTHOOK: query: DESCRIBE EXTENDED base_tab +PREHOOK: Input: default@base_tab_n0 +POSTHOOK: query: DESCRIBE EXTENDED base_tab_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@base_tab +POSTHOOK: Input: default@base_tab_n0 a string b string #### A masked pattern was here #### -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/escape_crlf.txt' OVERWRITE INTO TABLE base_tab +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/escape_crlf.txt' OVERWRITE INTO TABLE base_tab_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@base_tab -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/escape_crlf.txt' OVERWRITE INTO TABLE base_tab +PREHOOK: Output: default@base_tab_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/escape_crlf.txt' OVERWRITE INTO TABLE base_tab_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@base_tab -PREHOOK: query: SELECT * FROM base_tab +POSTHOOK: Output: default@base_tab_n0 +PREHOOK: query: SELECT * FROM base_tab_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@base_tab +PREHOOK: Input: default@base_tab_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM base_tab +POSTHOOK: query: SELECT * FROM base_tab_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@base_tab +POSTHOOK: Input: default@base_tab_n0 #### A masked pattern was here #### This\nis\rthe first\r\nmulti-line field field1-2 This\nis\rthe second\r\nmulti-line field field2-2 -PREHOOK: query: ALTER TABLE base_tab SET SERDEPROPERTIES ('escape.delim'='\\', 'serialization.escape.crlf'='true') +PREHOOK: query: ALTER TABLE base_tab_n0 SET SERDEPROPERTIES ('escape.delim'='\\', 'serialization.escape.crlf'='true') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES -PREHOOK: Input: default@base_tab -PREHOOK: Output: default@base_tab -POSTHOOK: query: ALTER TABLE base_tab SET SERDEPROPERTIES ('escape.delim'='\\', 'serialization.escape.crlf'='true') +PREHOOK: Input: default@base_tab_n0 +PREHOOK: Output: default@base_tab_n0 +POSTHOOK: query: ALTER TABLE base_tab_n0 SET SERDEPROPERTIES ('escape.delim'='\\', 'serialization.escape.crlf'='true') POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES -POSTHOOK: Input: default@base_tab -POSTHOOK: Output: default@base_tab -PREHOOK: query: SELECT * FROM base_tab +POSTHOOK: Input: default@base_tab_n0 +POSTHOOK: Output: default@base_tab_n0 +PREHOOK: query: SELECT * FROM base_tab_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@base_tab +PREHOOK: Input: default@base_tab_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM base_tab +POSTHOOK: query: SELECT * FROM base_tab_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@base_tab +POSTHOOK: Input: default@base_tab_n0 #### A masked pattern was here #### This is @@ -66,13 +66,13 @@ This is the second multi-line field field2-2 -PREHOOK: query: SELECT * FROM base_tab +PREHOOK: query: SELECT * FROM base_tab_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@base_tab +PREHOOK: Input: default@base_tab_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM base_tab +POSTHOOK: query: SELECT * FROM base_tab_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@base_tab +POSTHOOK: Input: default@base_tab_n0 #### A masked pattern was here #### This is @@ -82,11 +82,11 @@ This is the second multi-line field field2-2 -PREHOOK: query: DROP TABLE base_tab +PREHOOK: query: DROP TABLE base_tab_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@base_tab -PREHOOK: Output: default@base_tab -POSTHOOK: query: DROP TABLE base_tab +PREHOOK: Input: default@base_tab_n0 +PREHOOK: Output: default@base_tab_n0 +POSTHOOK: query: DROP TABLE base_tab_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@base_tab -POSTHOOK: Output: default@base_tab +POSTHOOK: Input: default@base_tab_n0 +POSTHOOK: Output: default@base_tab_n0 diff --git a/ql/src/test/results/clientpositive/except_all.q.out b/ql/src/test/results/clientpositive/except_all.q.out index dc1d078cdf..34f6ba3108 100644 --- a/ql/src/test/results/clientpositive/except_all.q.out +++ b/ql/src/test/results/clientpositive/except_all.q.out @@ -1,207 +1,207 @@ -PREHOOK: query: create table a(key int) +PREHOOK: query: create table a_n12(key int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@a -POSTHOOK: query: create table a(key int) +PREHOOK: Output: default@a_n12 +POSTHOOK: query: create table a_n12(key int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@a -PREHOOK: query: insert into table a values (0),(1),(2),(2),(2),(2),(3),(NULL),(NULL) +POSTHOOK: Output: default@a_n12 +PREHOOK: query: insert into table a_n12 values (0),(1),(2),(2),(2),(2),(3),(NULL),(NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@a -POSTHOOK: query: insert into table a values (0),(1),(2),(2),(2),(2),(3),(NULL),(NULL) +PREHOOK: Output: default@a_n12 +POSTHOOK: query: insert into table a_n12 values (0),(1),(2),(2),(2),(2),(3),(NULL),(NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@a -POSTHOOK: Lineage: a.key SCRIPT [] -PREHOOK: query: create table b(key bigint) +POSTHOOK: Output: default@a_n12 +POSTHOOK: Lineage: a_n12.key SCRIPT [] +PREHOOK: query: create table b_n9(key bigint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@b -POSTHOOK: query: create table b(key bigint) +PREHOOK: Output: default@b_n9 +POSTHOOK: query: create table b_n9(key bigint) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@b -PREHOOK: query: insert into table b values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL) +POSTHOOK: Output: default@b_n9 +PREHOOK: query: insert into table b_n9 values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@b -POSTHOOK: query: insert into table b values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL) +PREHOOK: Output: default@b_n9 +POSTHOOK: query: insert into table b_n9 values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@b -POSTHOOK: Lineage: b.key SCRIPT [] -PREHOOK: query: select * from a except all select * from b +POSTHOOK: Output: default@b_n9 +POSTHOOK: Lineage: b_n9.key SCRIPT [] +PREHOOK: query: select * from a_n12 except all select * from b_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n12 +PREHOOK: Input: default@b_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from a except all select * from b +POSTHOOK: query: select * from a_n12 except all select * from b_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n12 +POSTHOOK: Input: default@b_n9 #### A masked pattern was here #### 0 2 2 -PREHOOK: query: drop table a +PREHOOK: query: drop table a_n12 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@a -PREHOOK: Output: default@a -POSTHOOK: query: drop table a +PREHOOK: Input: default@a_n12 +PREHOOK: Output: default@a_n12 +POSTHOOK: query: drop table a_n12 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@a -POSTHOOK: Output: default@a -PREHOOK: query: drop table b +POSTHOOK: Input: default@a_n12 +POSTHOOK: Output: default@a_n12 +PREHOOK: query: drop table b_n9 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@b -PREHOOK: Output: default@b -POSTHOOK: query: drop table b +PREHOOK: Input: default@b_n9 +PREHOOK: Output: default@b_n9 +POSTHOOK: query: drop table b_n9 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@b -POSTHOOK: Output: default@b -PREHOOK: query: create table a(key int, value int) +POSTHOOK: Input: default@b_n9 +POSTHOOK: Output: default@b_n9 +PREHOOK: query: create table a_n12(key int, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@a -POSTHOOK: query: create table a(key int, value int) +PREHOOK: Output: default@a_n12 +POSTHOOK: query: create table a_n12(key int, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@a -PREHOOK: query: insert into table a values (1,2),(1,2),(1,3),(2,3),(2,2) +POSTHOOK: Output: default@a_n12 +PREHOOK: query: insert into table a_n12 values (1,2),(1,2),(1,3),(2,3),(2,2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@a -POSTHOOK: query: insert into table a values (1,2),(1,2),(1,3),(2,3),(2,2) +PREHOOK: Output: default@a_n12 +POSTHOOK: query: insert into table a_n12 values (1,2),(1,2),(1,3),(2,3),(2,2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@a -POSTHOOK: Lineage: a.key SCRIPT [] -POSTHOOK: Lineage: a.value SCRIPT [] -PREHOOK: query: create table b(key int, value int) +POSTHOOK: Output: default@a_n12 +POSTHOOK: Lineage: a_n12.key SCRIPT [] +POSTHOOK: Lineage: a_n12.value SCRIPT [] +PREHOOK: query: create table b_n9(key int, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@b -POSTHOOK: query: create table b(key int, value int) +PREHOOK: Output: default@b_n9 +POSTHOOK: query: create table b_n9(key int, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@b -PREHOOK: query: insert into table b values (1,2),(2,3),(2,2),(2,2),(2,20) +POSTHOOK: Output: default@b_n9 +PREHOOK: query: insert into table b_n9 values (1,2),(2,3),(2,2),(2,2),(2,20) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@b -POSTHOOK: query: insert into table b values (1,2),(2,3),(2,2),(2,2),(2,20) +PREHOOK: Output: default@b_n9 +POSTHOOK: query: insert into table b_n9 values (1,2),(2,3),(2,2),(2,2),(2,20) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@b -POSTHOOK: Lineage: b.key SCRIPT [] -POSTHOOK: Lineage: b.value SCRIPT [] -PREHOOK: query: select * from a except all select * from b +POSTHOOK: Output: default@b_n9 +POSTHOOK: Lineage: b_n9.key SCRIPT [] +POSTHOOK: Lineage: b_n9.value SCRIPT [] +PREHOOK: query: select * from a_n12 except all select * from b_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n12 +PREHOOK: Input: default@b_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from a except all select * from b +POSTHOOK: query: select * from a_n12 except all select * from b_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n12 +POSTHOOK: Input: default@b_n9 #### A masked pattern was here #### 1 2 1 3 -PREHOOK: query: select * from b except all select * from a +PREHOOK: query: select * from b_n9 except all select * from a_n12 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n12 +PREHOOK: Input: default@b_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from b except all select * from a +POSTHOOK: query: select * from b_n9 except all select * from a_n12 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n12 +POSTHOOK: Input: default@b_n9 #### A masked pattern was here #### 2 2 2 20 -PREHOOK: query: select * from b except all select * from a intersect distinct select * from b +PREHOOK: query: select * from b_n9 except all select * from a_n12 intersect distinct select * from b_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n12 +PREHOOK: Input: default@b_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from b except all select * from a intersect distinct select * from b +POSTHOOK: query: select * from b_n9 except all select * from a_n12 intersect distinct select * from b_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n12 +POSTHOOK: Input: default@b_n9 #### A masked pattern was here #### 2 2 2 20 -PREHOOK: query: select * from b except all select * from a except distinct select * from b +PREHOOK: query: select * from b_n9 except all select * from a_n12 except distinct select * from b_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n12 +PREHOOK: Input: default@b_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from b except all select * from a except distinct select * from b +POSTHOOK: query: select * from b_n9 except all select * from a_n12 except distinct select * from b_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n12 +POSTHOOK: Input: default@b_n9 #### A masked pattern was here #### -PREHOOK: query: select * from a except all select * from b union all select * from a except distinct select * from b +PREHOOK: query: select * from a_n12 except all select * from b_n9 union all select * from a_n12 except distinct select * from b_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n12 +PREHOOK: Input: default@b_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from a except all select * from b union all select * from a except distinct select * from b +POSTHOOK: query: select * from a_n12 except all select * from b_n9 union all select * from a_n12 except distinct select * from b_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n12 +POSTHOOK: Input: default@b_n9 #### A masked pattern was here #### 1 3 -PREHOOK: query: select * from a except all select * from b union select * from a except distinct select * from b +PREHOOK: query: select * from a_n12 except all select * from b_n9 union select * from a_n12 except distinct select * from b_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n12 +PREHOOK: Input: default@b_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from a except all select * from b union select * from a except distinct select * from b +POSTHOOK: query: select * from a_n12 except all select * from b_n9 union select * from a_n12 except distinct select * from b_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n12 +POSTHOOK: Input: default@b_n9 #### A masked pattern was here #### 1 3 -PREHOOK: query: select * from a except all select * from b except distinct select * from a except distinct select * from b +PREHOOK: query: select * from a_n12 except all select * from b_n9 except distinct select * from a_n12 except distinct select * from b_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n12 +PREHOOK: Input: default@b_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from a except all select * from b except distinct select * from a except distinct select * from b +POSTHOOK: query: select * from a_n12 except all select * from b_n9 except distinct select * from a_n12 except distinct select * from b_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n12 +POSTHOOK: Input: default@b_n9 #### A masked pattern was here #### -PREHOOK: query: select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +PREHOOK: query: select * from (select a_n12.key, b_n9.value from a_n12 join b_n9 on a_n12.key=b_n9.key)sub1 except all -select * from (select a.key, b.value from a join b on a.key=b.key)sub2 +select * from (select a_n12.key, b_n9.value from a_n12 join b_n9 on a_n12.key=b_n9.key)sub2 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n12 +PREHOOK: Input: default@b_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +POSTHOOK: query: select * from (select a_n12.key, b_n9.value from a_n12 join b_n9 on a_n12.key=b_n9.key)sub1 except all -select * from (select a.key, b.value from a join b on a.key=b.key)sub2 +select * from (select a_n12.key, b_n9.value from a_n12 join b_n9 on a_n12.key=b_n9.key)sub2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n12 +POSTHOOK: Input: default@b_n9 #### A masked pattern was here #### -PREHOOK: query: select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +PREHOOK: query: select * from (select a_n12.key, b_n9.value from a_n12 join b_n9 on a_n12.key=b_n9.key)sub1 except all -select * from (select b.value as key, a.key as value from a join b on a.key=b.key)sub2 +select * from (select b_n9.value as key, a_n12.key as value from a_n12 join b_n9 on a_n12.key=b_n9.key)sub2 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n12 +PREHOOK: Input: default@b_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +POSTHOOK: query: select * from (select a_n12.key, b_n9.value from a_n12 join b_n9 on a_n12.key=b_n9.key)sub1 except all -select * from (select b.value as key, a.key as value from a join b on a.key=b.key)sub2 +select * from (select b_n9.value as key, a_n12.key as value from a_n12 join b_n9 on a_n12.key=b_n9.key)sub2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n12 +POSTHOOK: Input: default@b_n9 #### A masked pattern was here #### 1 2 1 2 @@ -814,9 +814,9 @@ POSTHOOK: query: select * from src except all select * from src except distinct POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -PREHOOK: query: explain select value from a group by value except distinct select key from b group by key +PREHOOK: query: explain select value from a_n12 group by value except distinct select key from b_n9 group by key PREHOOK: type: QUERY -POSTHOOK: query: explain select value from a group by value except distinct select key from b group by key +POSTHOOK: query: explain select value from a_n12 group by value except distinct select key from b_n9 group by key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -829,7 +829,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n12 Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: int) @@ -936,7 +936,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: b + alias: b_n9 Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int) @@ -982,14 +982,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select value from a group by value except distinct select key from b group by key +PREHOOK: query: select value from a_n12 group by value except distinct select key from b_n9 group by key PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n12 +PREHOOK: Input: default@b_n9 #### A masked pattern was here #### -POSTHOOK: query: select value from a group by value except distinct select key from b group by key +POSTHOOK: query: select value from a_n12 group by value except distinct select key from b_n9 group by key POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n12 +POSTHOOK: Input: default@b_n9 #### A masked pattern was here #### 3 diff --git a/ql/src/test/results/clientpositive/exchange_partition3.q.out b/ql/src/test/results/clientpositive/exchange_partition3.q.out index d351b09f2c..c4266a1912 100644 --- a/ql/src/test/results/clientpositive/exchange_partition3.q.out +++ b/ql/src/test/results/clientpositive/exchange_partition3.q.out @@ -1,93 +1,93 @@ -PREHOOK: query: CREATE TABLE exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING, hr STRING) +PREHOOK: query: CREATE TABLE exchange_part_test1_n0 (f1 string) PARTITIONED BY (ds STRING, hr STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exchange_part_test1 -POSTHOOK: query: CREATE TABLE exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING, hr STRING) +PREHOOK: Output: default@exchange_part_test1_n0 +POSTHOOK: query: CREATE TABLE exchange_part_test1_n0 (f1 string) PARTITIONED BY (ds STRING, hr STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exchange_part_test1 -PREHOOK: query: CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING, hr STRING) +POSTHOOK: Output: default@exchange_part_test1_n0 +PREHOOK: query: CREATE TABLE exchange_part_test2_n0 (f1 string) PARTITIONED BY (ds STRING, hr STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exchange_part_test2 -POSTHOOK: query: CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING, hr STRING) +PREHOOK: Output: default@exchange_part_test2_n0 +POSTHOOK: query: CREATE TABLE exchange_part_test2_n0 (f1 string) PARTITIONED BY (ds STRING, hr STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exchange_part_test2 -PREHOOK: query: SHOW PARTITIONS exchange_part_test1 +POSTHOOK: Output: default@exchange_part_test2_n0 +PREHOOK: query: SHOW PARTITIONS exchange_part_test1_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@exchange_part_test1 -POSTHOOK: query: SHOW PARTITIONS exchange_part_test1 +PREHOOK: Input: default@exchange_part_test1_n0 +POSTHOOK: query: SHOW PARTITIONS exchange_part_test1_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@exchange_part_test1 -PREHOOK: query: SHOW PARTITIONS exchange_part_test2 +POSTHOOK: Input: default@exchange_part_test1_n0 +PREHOOK: query: SHOW PARTITIONS exchange_part_test2_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@exchange_part_test2 -POSTHOOK: query: SHOW PARTITIONS exchange_part_test2 +PREHOOK: Input: default@exchange_part_test2_n0 +POSTHOOK: query: SHOW PARTITIONS exchange_part_test2_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@exchange_part_test2 -PREHOOK: query: ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2014-01-03', hr='1') +POSTHOOK: Input: default@exchange_part_test2_n0 +PREHOOK: query: ALTER TABLE exchange_part_test1_n0 ADD PARTITION (ds='2014-01-03', hr='1') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@exchange_part_test1 -POSTHOOK: query: ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2014-01-03', hr='1') +PREHOOK: Output: default@exchange_part_test1_n0 +POSTHOOK: query: ALTER TABLE exchange_part_test1_n0 ADD PARTITION (ds='2014-01-03', hr='1') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@exchange_part_test1 -POSTHOOK: Output: default@exchange_part_test1@ds=2014-01-03/hr=1 -PREHOOK: query: ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='1') +POSTHOOK: Output: default@exchange_part_test1_n0 +POSTHOOK: Output: default@exchange_part_test1_n0@ds=2014-01-03/hr=1 +PREHOOK: query: ALTER TABLE exchange_part_test2_n0 ADD PARTITION (ds='2013-04-05', hr='1') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@exchange_part_test2 -POSTHOOK: query: ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='1') +PREHOOK: Output: default@exchange_part_test2_n0 +POSTHOOK: query: ALTER TABLE exchange_part_test2_n0 ADD PARTITION (ds='2013-04-05', hr='1') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@exchange_part_test2 -POSTHOOK: Output: default@exchange_part_test2@ds=2013-04-05/hr=1 -PREHOOK: query: ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='2') +POSTHOOK: Output: default@exchange_part_test2_n0 +POSTHOOK: Output: default@exchange_part_test2_n0@ds=2013-04-05/hr=1 +PREHOOK: query: ALTER TABLE exchange_part_test2_n0 ADD PARTITION (ds='2013-04-05', hr='2') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@exchange_part_test2 -POSTHOOK: query: ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='2') +PREHOOK: Output: default@exchange_part_test2_n0 +POSTHOOK: query: ALTER TABLE exchange_part_test2_n0 ADD PARTITION (ds='2013-04-05', hr='2') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@exchange_part_test2 -POSTHOOK: Output: default@exchange_part_test2@ds=2013-04-05/hr=2 -PREHOOK: query: SHOW PARTITIONS exchange_part_test1 +POSTHOOK: Output: default@exchange_part_test2_n0 +POSTHOOK: Output: default@exchange_part_test2_n0@ds=2013-04-05/hr=2 +PREHOOK: query: SHOW PARTITIONS exchange_part_test1_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@exchange_part_test1 -POSTHOOK: query: SHOW PARTITIONS exchange_part_test1 +PREHOOK: Input: default@exchange_part_test1_n0 +POSTHOOK: query: SHOW PARTITIONS exchange_part_test1_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@exchange_part_test1 +POSTHOOK: Input: default@exchange_part_test1_n0 ds=2014-01-03/hr=1 -PREHOOK: query: SHOW PARTITIONS exchange_part_test2 +PREHOOK: query: SHOW PARTITIONS exchange_part_test2_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@exchange_part_test2 -POSTHOOK: query: SHOW PARTITIONS exchange_part_test2 +PREHOOK: Input: default@exchange_part_test2_n0 +POSTHOOK: query: SHOW PARTITIONS exchange_part_test2_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@exchange_part_test2 +POSTHOOK: Input: default@exchange_part_test2_n0 ds=2013-04-05/hr=1 ds=2013-04-05/hr=2 -PREHOOK: query: ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2 +PREHOOK: query: ALTER TABLE exchange_part_test1_n0 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2_n0 PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION -PREHOOK: Input: default@exchange_part_test2 -PREHOOK: Output: default@exchange_part_test1 -POSTHOOK: query: ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2 +PREHOOK: Input: default@exchange_part_test2_n0 +PREHOOK: Output: default@exchange_part_test1_n0 +POSTHOOK: query: ALTER TABLE exchange_part_test1_n0 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2_n0 POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION -POSTHOOK: Input: default@exchange_part_test2 -POSTHOOK: Input: default@exchange_part_test2@ds=2013-04-05/hr=1 -POSTHOOK: Input: default@exchange_part_test2@ds=2013-04-05/hr=2 -POSTHOOK: Output: default@exchange_part_test1 -POSTHOOK: Output: default@exchange_part_test1@ds=2013-04-05/hr=1 -POSTHOOK: Output: default@exchange_part_test1@ds=2013-04-05/hr=2 -POSTHOOK: Output: default@exchange_part_test2@ds=2013-04-05/hr=1 -POSTHOOK: Output: default@exchange_part_test2@ds=2013-04-05/hr=2 -PREHOOK: query: SHOW PARTITIONS exchange_part_test1 +POSTHOOK: Input: default@exchange_part_test2_n0 +POSTHOOK: Input: default@exchange_part_test2_n0@ds=2013-04-05/hr=1 +POSTHOOK: Input: default@exchange_part_test2_n0@ds=2013-04-05/hr=2 +POSTHOOK: Output: default@exchange_part_test1_n0 +POSTHOOK: Output: default@exchange_part_test1_n0@ds=2013-04-05/hr=1 +POSTHOOK: Output: default@exchange_part_test1_n0@ds=2013-04-05/hr=2 +POSTHOOK: Output: default@exchange_part_test2_n0@ds=2013-04-05/hr=1 +POSTHOOK: Output: default@exchange_part_test2_n0@ds=2013-04-05/hr=2 +PREHOOK: query: SHOW PARTITIONS exchange_part_test1_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@exchange_part_test1 -POSTHOOK: query: SHOW PARTITIONS exchange_part_test1 +PREHOOK: Input: default@exchange_part_test1_n0 +POSTHOOK: query: SHOW PARTITIONS exchange_part_test1_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@exchange_part_test1 +POSTHOOK: Input: default@exchange_part_test1_n0 ds=2013-04-05/hr=1 ds=2013-04-05/hr=2 ds=2014-01-03/hr=1 -PREHOOK: query: SHOW PARTITIONS exchange_part_test2 +PREHOOK: query: SHOW PARTITIONS exchange_part_test2_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@exchange_part_test2 -POSTHOOK: query: SHOW PARTITIONS exchange_part_test2 +PREHOOK: Input: default@exchange_part_test2_n0 +POSTHOOK: query: SHOW PARTITIONS exchange_part_test2_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@exchange_part_test2 +POSTHOOK: Input: default@exchange_part_test2_n0 diff --git a/ql/src/test/results/clientpositive/exec_parallel_column_stats.q.out b/ql/src/test/results/clientpositive/exec_parallel_column_stats.q.out index c9ebe27330..b178affef5 100644 --- a/ql/src/test/results/clientpositive/exec_parallel_column_stats.q.out +++ b/ql/src/test/results/clientpositive/exec_parallel_column_stats.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: create table t as select * from src +PREHOOK: query: create table t_n25 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t as select * from src +PREHOOK: Output: default@t_n25 +POSTHOOK: query: create table t_n25 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain analyze table t compute statistics for columns +POSTHOOK: Output: default@t_n25 +POSTHOOK: Lineage: t_n25.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t_n25.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain analyze table t_n25 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -POSTHOOK: query: explain analyze table t compute statistics for columns +POSTHOOK: query: explain analyze table t_n25 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -23,7 +23,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t + alias: t_n25 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -58,15 +58,15 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.t + Table: default.t_n25 -PREHOOK: query: analyze table t compute statistics for columns +PREHOOK: query: analyze table t_n25 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@t -PREHOOK: Output: default@t +PREHOOK: Input: default@t_n25 +PREHOOK: Output: default@t_n25 #### A masked pattern was here #### -POSTHOOK: query: analyze table t compute statistics for columns +POSTHOOK: query: analyze table t_n25 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@t -POSTHOOK: Output: default@t +POSTHOOK: Input: default@t_n25 +POSTHOOK: Output: default@t_n25 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/exim_00_nonpart_empty.q.out b/ql/src/test/results/clientpositive/exim_00_nonpart_empty.q.out index 62e5b276e6..b2dc527e67 100644 --- a/ql/src/test/results/clientpositive/exim_00_nonpart_empty.q.out +++ b/ql/src/test/results/clientpositive/exim_00_nonpart_empty.q.out @@ -1,32 +1,32 @@ -PREHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: query: create table exim_department_n0 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_department -POSTHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: Output: default@exim_department_n0 +POSTHOOK: query: create table exim_department_n0 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_department +POSTHOOK: Output: default@exim_department_n0 #### A masked pattern was here #### -PREHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +PREHOOK: query: export table exim_department_n0 to 'ql/test/data/exports/exim_department' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_department +PREHOOK: Input: default@exim_department_n0 #### A masked pattern was here #### -POSTHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +POSTHOOK: query: export table exim_department_n0 to 'ql/test/data/exports/exim_department' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_department +POSTHOOK: Input: default@exim_department_n0 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_department -PREHOOK: Output: default@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: default@exim_department_n0 +PREHOOK: Output: default@exim_department_n0 +POSTHOOK: query: drop table exim_department_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_department -POSTHOOK: Output: default@exim_department +POSTHOOK: Input: default@exim_department_n0 +POSTHOOK: Output: default@exim_department_n0 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -47,21 +47,21 @@ POSTHOOK: query: import from 'ql/test/data/exports/exim_department' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_department -PREHOOK: query: describe extended exim_department +POSTHOOK: Output: importer@exim_department_n0 +PREHOOK: query: describe extended exim_department_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_department -POSTHOOK: query: describe extended exim_department +PREHOOK: Input: importer@exim_department_n0 +POSTHOOK: query: describe extended exim_department_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n0 dep_id int department id #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_department +PREHOOK: query: show table extended like exim_department_n0 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_department +POSTHOOK: query: show table extended like exim_department_n0 POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_department +tableName:exim_department_n0 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -75,22 +75,22 @@ minFileSize:0 #### A masked pattern was here #### #### A masked pattern was here #### -PREHOOK: query: select * from exim_department +PREHOOK: query: select * from exim_department_n0 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_department +PREHOOK: Input: importer@exim_department_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from exim_department +POSTHOOK: query: select * from exim_department_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n0 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_department -PREHOOK: Output: importer@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: importer@exim_department_n0 +PREHOOK: Output: importer@exim_department_n0 +POSTHOOK: query: drop table exim_department_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_department -POSTHOOK: Output: importer@exim_department +POSTHOOK: Input: importer@exim_department_n0 +POSTHOOK: Output: importer@exim_department_n0 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_01_nonpart.q.out b/ql/src/test/results/clientpositive/exim_01_nonpart.q.out index 604fe3197a..2deec7c63e 100644 --- a/ql/src/test/results/clientpositive/exim_01_nonpart.q.out +++ b/ql/src/test/results/clientpositive/exim_01_nonpart.q.out @@ -1,40 +1,40 @@ -PREHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: query: create table exim_department_n7 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_department -POSTHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: Output: default@exim_department_n7 +POSTHOOK: query: create table exim_department_n7 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_department -PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +POSTHOOK: Output: default@exim_department_n7 +PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n7 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_department -POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +PREHOOK: Output: default@exim_department_n7 +POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n7 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_department +POSTHOOK: Output: default@exim_department_n7 #### A masked pattern was here #### -PREHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +PREHOOK: query: export table exim_department_n7 to 'ql/test/data/exports/exim_department' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_department +PREHOOK: Input: default@exim_department_n7 #### A masked pattern was here #### -POSTHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +POSTHOOK: query: export table exim_department_n7 to 'ql/test/data/exports/exim_department' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_department +POSTHOOK: Input: default@exim_department_n7 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n7 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_department -PREHOOK: Output: default@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: default@exim_department_n7 +PREHOOK: Output: default@exim_department_n7 +POSTHOOK: query: drop table exim_department_n7 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_department -POSTHOOK: Output: default@exim_department +POSTHOOK: Input: default@exim_department_n7 +POSTHOOK: Output: default@exim_department_n7 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -55,21 +55,21 @@ POSTHOOK: query: import from 'ql/test/data/exports/exim_department' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_department -PREHOOK: query: describe extended exim_department +POSTHOOK: Output: importer@exim_department_n7 +PREHOOK: query: describe extended exim_department_n7 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_department -POSTHOOK: query: describe extended exim_department +PREHOOK: Input: importer@exim_department_n7 +POSTHOOK: query: describe extended exim_department_n7 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n7 dep_id int department id #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_department +PREHOOK: query: show table extended like exim_department_n7 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_department +POSTHOOK: query: show table extended like exim_department_n7 POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_department +tableName:exim_department_n7 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -83,13 +83,13 @@ minFileSize:11 #### A masked pattern was here #### #### A masked pattern was here #### -PREHOOK: query: select * from exim_department +PREHOOK: query: select * from exim_department_n7 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_department +PREHOOK: Input: importer@exim_department_n7 #### A masked pattern was here #### -POSTHOOK: query: select * from exim_department +POSTHOOK: query: select * from exim_department_n7 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n7 #### A masked pattern was here #### 1 2 @@ -97,14 +97,14 @@ POSTHOOK: Input: importer@exim_department 4 5 6 -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n7 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_department -PREHOOK: Output: importer@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: importer@exim_department_n7 +PREHOOK: Output: importer@exim_department_n7 +POSTHOOK: query: drop table exim_department_n7 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_department -POSTHOOK: Output: importer@exim_department +POSTHOOK: Input: importer@exim_department_n7 +POSTHOOK: Output: importer@exim_department_n7 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_02_00_part_empty.q.out b/ql/src/test/results/clientpositive/exim_02_00_part_empty.q.out index fe0924139f..f34f81b5a0 100644 --- a/ql/src/test/results/clientpositive/exim_02_00_part_empty.q.out +++ b/ql/src/test/results/clientpositive/exim_02_00_part_empty.q.out @@ -1,34 +1,34 @@ -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n9 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: default@exim_employee_n9 +POSTHOOK: query: create table exim_employee_n9 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee +POSTHOOK: Output: default@exim_employee_n9 #### A masked pattern was here #### -PREHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n9 to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n9 to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n9 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n9 +PREHOOK: Output: default@exim_employee_n9 +POSTHOOK: query: drop table exim_employee_n9 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n9 +POSTHOOK: Output: default@exim_employee_n9 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -49,13 +49,13 @@ POSTHOOK: query: import from 'ql/test/data/exports/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee -PREHOOK: query: describe extended exim_employee +POSTHOOK: Output: importer@exim_employee_n9 +PREHOOK: query: describe extended exim_employee_n9 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe extended exim_employee +PREHOOK: Input: importer@exim_employee_n9 +POSTHOOK: query: describe extended exim_employee_n9 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n9 emp_id int employee id emp_country string two char iso code emp_state string free text @@ -66,11 +66,11 @@ emp_country string two char iso code emp_state string free text #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee +PREHOOK: query: show table extended like exim_employee_n9 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee +POSTHOOK: query: show table extended like exim_employee_n9 POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n9 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -79,22 +79,22 @@ partitioned:true partitionColumns:struct partition_columns { string emp_country, string emp_state} #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n9 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee +PREHOOK: Input: importer@exim_employee_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n9 #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n9 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n9 +PREHOOK: Output: importer@exim_employee_n9 +POSTHOOK: query: drop table exim_employee_n9 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n9 +POSTHOOK: Output: importer@exim_employee_n9 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_03_nonpart_over_compat.q.out b/ql/src/test/results/clientpositive/exim_03_nonpart_over_compat.q.out index 791a4cb8ad..5994d4fee3 100644 --- a/ql/src/test/results/clientpositive/exim_03_nonpart_over_compat.q.out +++ b/ql/src/test/results/clientpositive/exim_03_nonpart_over_compat.q.out @@ -1,40 +1,40 @@ -PREHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: query: create table exim_department_n5 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_department -POSTHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: Output: default@exim_department_n5 +POSTHOOK: query: create table exim_department_n5 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_department -PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +POSTHOOK: Output: default@exim_department_n5 +PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n5 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_department -POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +PREHOOK: Output: default@exim_department_n5 +POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n5 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_department +POSTHOOK: Output: default@exim_department_n5 #### A masked pattern was here #### -PREHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +PREHOOK: query: export table exim_department_n5 to 'ql/test/data/exports/exim_department' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_department +PREHOOK: Input: default@exim_department_n5 #### A masked pattern was here #### -POSTHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +POSTHOOK: query: export table exim_department_n5 to 'ql/test/data/exports/exim_department' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_department +POSTHOOK: Input: default@exim_department_n5 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n5 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_department -PREHOOK: Output: default@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: default@exim_department_n5 +PREHOOK: Output: default@exim_department_n5 +POSTHOOK: query: drop table exim_department_n5 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_department -POSTHOOK: Output: default@exim_department +POSTHOOK: Input: default@exim_department_n5 +POSTHOOK: Output: default@exim_department_n5 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -47,42 +47,42 @@ PREHOOK: Input: database:importer POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer -PREHOOK: query: create table exim_department ( dep_id int comment "department identifier") +PREHOOK: query: create table exim_department_n5 ( dep_id int comment "department identifier") stored as textfile tblproperties("maker"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:importer -PREHOOK: Output: importer@exim_department -POSTHOOK: query: create table exim_department ( dep_id int comment "department identifier") +PREHOOK: Output: importer@exim_department_n5 +POSTHOOK: query: create table exim_department_n5 ( dep_id int comment "department identifier") stored as textfile tblproperties("maker"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_department +POSTHOOK: Output: importer@exim_department_n5 PREHOOK: query: import from 'ql/test/data/exports/exim_department' PREHOOK: type: IMPORT #### A masked pattern was here #### -PREHOOK: Output: importer@exim_department +PREHOOK: Output: importer@exim_department_n5 POSTHOOK: query: import from 'ql/test/data/exports/exim_department' POSTHOOK: type: IMPORT #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_department -PREHOOK: query: describe extended exim_department +POSTHOOK: Output: importer@exim_department_n5 +PREHOOK: query: describe extended exim_department_n5 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_department -POSTHOOK: query: describe extended exim_department +PREHOOK: Input: importer@exim_department_n5 +POSTHOOK: query: describe extended exim_department_n5 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n5 dep_id int department identifier #### A masked pattern was here #### -PREHOOK: query: select * from exim_department +PREHOOK: query: select * from exim_department_n5 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_department +PREHOOK: Input: importer@exim_department_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from exim_department +POSTHOOK: query: select * from exim_department_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n5 #### A masked pattern was here #### 1 2 @@ -90,14 +90,14 @@ POSTHOOK: Input: importer@exim_department 4 5 6 -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n5 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_department -PREHOOK: Output: importer@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: importer@exim_department_n5 +PREHOOK: Output: importer@exim_department_n5 +POSTHOOK: query: drop table exim_department_n5 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_department -POSTHOOK: Output: importer@exim_department +POSTHOOK: Input: importer@exim_department_n5 +POSTHOOK: Output: importer@exim_department_n5 #### A masked pattern was here #### PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE diff --git a/ql/src/test/results/clientpositive/exim_04_all_part.q.out b/ql/src/test/results/clientpositive/exim_04_all_part.q.out index 480f1cac62..96509d258b 100644 --- a/ql/src/test/results/clientpositive/exim_04_all_part.q.out +++ b/ql/src/test/results/clientpositive/exim_04_all_part.q.out @@ -1,86 +1,86 @@ -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n5 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: default@exim_employee_n5 +POSTHOOK: query: create table exim_employee_n5 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee +POSTHOOK: Output: default@exim_employee_n5 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n5 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n5 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n5 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: default@exim_employee_n5 +POSTHOOK: Output: default@exim_employee_n5@emp_country=in/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n5 partition (emp_country="in", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n5 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n5 partition (emp_country="in", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Output: default@exim_employee_n5 +POSTHOOK: Output: default@exim_employee_n5@emp_country=in/emp_state=ka PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n5 partition (emp_country="us", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n5 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n5 partition (emp_country="us", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Output: default@exim_employee_n5 +POSTHOOK: Output: default@exim_employee_n5@emp_country=us/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n5 partition (emp_country="us", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n5 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n5 partition (emp_country="us", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=ka +POSTHOOK: Output: default@exim_employee_n5 +POSTHOOK: Output: default@exim_employee_n5@emp_country=us/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n5 to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: default@exim_employee_n5@emp_country=in/emp_state=ka +PREHOOK: Input: default@exim_employee_n5@emp_country=in/emp_state=tn +PREHOOK: Input: default@exim_employee_n5@emp_country=us/emp_state=ka +PREHOOK: Input: default@exim_employee_n5@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n5 to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: default@exim_employee_n5@emp_country=in/emp_state=ka +POSTHOOK: Input: default@exim_employee_n5@emp_country=in/emp_state=tn +POSTHOOK: Input: default@exim_employee_n5@emp_country=us/emp_state=ka +POSTHOOK: Input: default@exim_employee_n5@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n5 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n5 +PREHOOK: Output: default@exim_employee_n5 +POSTHOOK: query: drop table exim_employee_n5 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n5 +POSTHOOK: Output: default@exim_employee_n5 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -101,17 +101,17 @@ POSTHOOK: query: import from 'ql/test/data/exports/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=ka -POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=tn -PREHOOK: query: describe extended exim_employee +POSTHOOK: Output: importer@exim_employee_n5 +POSTHOOK: Output: importer@exim_employee_n5@emp_country=in/emp_state=ka +POSTHOOK: Output: importer@exim_employee_n5@emp_country=in/emp_state=tn +POSTHOOK: Output: importer@exim_employee_n5@emp_country=us/emp_state=ka +POSTHOOK: Output: importer@exim_employee_n5@emp_country=us/emp_state=tn +PREHOOK: query: describe extended exim_employee_n5 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe extended exim_employee +PREHOOK: Input: importer@exim_employee_n5 +POSTHOOK: query: describe extended exim_employee_n5 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n5 emp_id int employee id emp_country string two char iso code emp_state string free text @@ -122,11 +122,11 @@ emp_country string two char iso code emp_state string free text #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee +PREHOOK: query: show table extended like exim_employee_n5 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee +POSTHOOK: query: show table extended like exim_employee_n5 POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n5 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -140,21 +140,21 @@ minFileSize:11 #### A masked pattern was here #### #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n5 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=ka -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n5 +PREHOOK: Input: importer@exim_employee_n5@emp_country=in/emp_state=ka +PREHOOK: Input: importer@exim_employee_n5@emp_country=in/emp_state=tn +PREHOOK: Input: importer@exim_employee_n5@emp_country=us/emp_state=ka +PREHOOK: Input: importer@exim_employee_n5@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=ka -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n5 +POSTHOOK: Input: importer@exim_employee_n5@emp_country=in/emp_state=ka +POSTHOOK: Input: importer@exim_employee_n5@emp_country=in/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n5@emp_country=us/emp_state=ka +POSTHOOK: Input: importer@exim_employee_n5@emp_country=us/emp_state=tn #### A masked pattern was here #### 1 in ka 2 in ka @@ -180,14 +180,14 @@ POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn 4 us tn 5 us tn 6 us tn -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n5 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n5 +PREHOOK: Output: importer@exim_employee_n5 +POSTHOOK: query: drop table exim_employee_n5 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n5 +POSTHOOK: Output: importer@exim_employee_n5 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_04_evolved_parts.q.out b/ql/src/test/results/clientpositive/exim_04_evolved_parts.q.out index 7cc8278b61..c8c5a78505 100644 --- a/ql/src/test/results/clientpositive/exim_04_evolved_parts.q.out +++ b/ql/src/test/results/clientpositive/exim_04_evolved_parts.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table exim_employee (emp_id int comment 'employee id', emp_name string, emp_dob string comment 'employee date of birth', emp_sex string comment 'M/F') +PREHOOK: query: create table exim_employee_n12 (emp_id int comment 'employee id', emp_name string, emp_dob string comment 'employee date of birth', emp_sex string comment 'M/F') comment 'employee table' partitioned by (emp_country string comment '2-char code', emp_state string comment '2-char code') clustered by (emp_sex) sorted by (emp_id ASC) into 10 buckets @@ -6,8 +6,8 @@ PREHOOK: query: create table exim_employee (emp_id int comment 'employee id', em stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee (emp_id int comment 'employee id', emp_name string, emp_dob string comment 'employee date of birth', emp_sex string comment 'M/F') +PREHOOK: Output: default@exim_employee_n12 +POSTHOOK: query: create table exim_employee_n12 (emp_id int comment 'employee id', emp_name string, emp_dob string comment 'employee date of birth', emp_sex string comment 'M/F') comment 'employee table' partitioned by (emp_country string comment '2-char code', emp_state string comment '2-char code') clustered by (emp_sex) sorted by (emp_id ASC) into 10 buckets @@ -15,78 +15,78 @@ POSTHOOK: query: create table exim_employee (emp_id int comment 'employee id', e stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee -PREHOOK: query: alter table exim_employee add columns (emp_dept int) +POSTHOOK: Output: default@exim_employee_n12 +PREHOOK: query: alter table exim_employee_n12 add columns (emp_dept int) PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: alter table exim_employee add columns (emp_dept int) +PREHOOK: Input: default@exim_employee_n12 +PREHOOK: Output: default@exim_employee_n12 +POSTHOOK: query: alter table exim_employee_n12 add columns (emp_dept int) POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee -PREHOOK: query: alter table exim_employee clustered by (emp_sex, emp_dept) sorted by (emp_id desc) into 5 buckets +POSTHOOK: Input: default@exim_employee_n12 +POSTHOOK: Output: default@exim_employee_n12 +PREHOOK: query: alter table exim_employee_n12 clustered by (emp_sex, emp_dept) sorted by (emp_id desc) into 5 buckets PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: alter table exim_employee clustered by (emp_sex, emp_dept) sorted by (emp_id desc) into 5 buckets +PREHOOK: Input: default@exim_employee_n12 +PREHOOK: Output: default@exim_employee_n12 +POSTHOOK: query: alter table exim_employee_n12 clustered by (emp_sex, emp_dept) sorted by (emp_id desc) into 5 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee -PREHOOK: query: alter table exim_employee add partition (emp_country='in', emp_state='tn') +POSTHOOK: Input: default@exim_employee_n12 +POSTHOOK: Output: default@exim_employee_n12 +PREHOOK: query: alter table exim_employee_n12 add partition (emp_country='in', emp_state='tn') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@exim_employee -POSTHOOK: query: alter table exim_employee add partition (emp_country='in', emp_state='tn') +PREHOOK: Output: default@exim_employee_n12 +POSTHOOK: query: alter table exim_employee_n12 add partition (emp_country='in', emp_state='tn') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=tn -PREHOOK: query: alter table exim_employee set fileformat +POSTHOOK: Output: default@exim_employee_n12 +POSTHOOK: Output: default@exim_employee_n12@emp_country=in/emp_state=tn +PREHOOK: query: alter table exim_employee_n12 set fileformat inputformat "org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat" outputformat "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat" serde "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe" PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: alter table exim_employee set fileformat +PREHOOK: Input: default@exim_employee_n12 +PREHOOK: Output: default@exim_employee_n12 +POSTHOOK: query: alter table exim_employee_n12 set fileformat inputformat "org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat" outputformat "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat" serde "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe" POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee -PREHOOK: query: alter table exim_employee set serde "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe" with serdeproperties ('serialization.format'='2') +POSTHOOK: Input: default@exim_employee_n12 +POSTHOOK: Output: default@exim_employee_n12 +PREHOOK: query: alter table exim_employee_n12 set serde "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe" with serdeproperties ('serialization.format'='2') PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: alter table exim_employee set serde "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe" with serdeproperties ('serialization.format'='2') +PREHOOK: Input: default@exim_employee_n12 +PREHOOK: Output: default@exim_employee_n12 +POSTHOOK: query: alter table exim_employee_n12 set serde "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe" with serdeproperties ('serialization.format'='2') POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee -PREHOOK: query: alter table exim_employee add partition (emp_country='in', emp_state='ka') +POSTHOOK: Input: default@exim_employee_n12 +POSTHOOK: Output: default@exim_employee_n12 +PREHOOK: query: alter table exim_employee_n12 add partition (emp_country='in', emp_state='ka') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@exim_employee -POSTHOOK: query: alter table exim_employee add partition (emp_country='in', emp_state='ka') +PREHOOK: Output: default@exim_employee_n12 +POSTHOOK: query: alter table exim_employee_n12 add partition (emp_country='in', emp_state='ka') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Output: default@exim_employee_n12 +POSTHOOK: Output: default@exim_employee_n12@emp_country=in/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n12 to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn +PREHOOK: Input: default@exim_employee_n12@emp_country=in/emp_state=ka +PREHOOK: Input: default@exim_employee_n12@emp_country=in/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n12 to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Input: default@exim_employee_n12@emp_country=in/emp_state=ka +POSTHOOK: Input: default@exim_employee_n12@emp_country=in/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n12 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n12 +PREHOOK: Output: default@exim_employee_n12 +POSTHOOK: query: drop table exim_employee_n12 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n12 +POSTHOOK: Output: default@exim_employee_n12 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -107,15 +107,15 @@ POSTHOOK: query: import from 'ql/test/data/exports/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=tn -PREHOOK: query: describe extended exim_employee +POSTHOOK: Output: importer@exim_employee_n12 +POSTHOOK: Output: importer@exim_employee_n12@emp_country=in/emp_state=ka +POSTHOOK: Output: importer@exim_employee_n12@emp_country=in/emp_state=tn +PREHOOK: query: describe extended exim_employee_n12 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe extended exim_employee +PREHOOK: Input: importer@exim_employee_n12 +POSTHOOK: query: describe extended exim_employee_n12 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n12 emp_id int employee id emp_name string emp_dob string employee date of birth @@ -130,12 +130,12 @@ emp_country string 2-char code emp_state string 2-char code #### A masked pattern was here #### -PREHOOK: query: describe extended exim_employee partition (emp_country='in', emp_state='tn') +PREHOOK: query: describe extended exim_employee_n12 partition (emp_country='in', emp_state='tn') PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe extended exim_employee partition (emp_country='in', emp_state='tn') +PREHOOK: Input: importer@exim_employee_n12 +POSTHOOK: query: describe extended exim_employee_n12 partition (emp_country='in', emp_state='tn') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n12 emp_id int employee id emp_name string emp_dob string employee date of birth @@ -150,12 +150,12 @@ emp_country string 2-char code emp_state string 2-char code #### A masked pattern was here #### -PREHOOK: query: describe extended exim_employee partition (emp_country='in', emp_state='ka') +PREHOOK: query: describe extended exim_employee_n12 partition (emp_country='in', emp_state='ka') PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe extended exim_employee partition (emp_country='in', emp_state='ka') +PREHOOK: Input: importer@exim_employee_n12 +POSTHOOK: query: describe extended exim_employee_n12 partition (emp_country='in', emp_state='ka') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n12 emp_id int employee id emp_name string emp_dob string employee date of birth @@ -170,11 +170,11 @@ emp_country string 2-char code emp_state string 2-char code #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee +PREHOOK: query: show table extended like exim_employee_n12 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee +POSTHOOK: query: show table extended like exim_employee_n12 POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n12 #### A masked pattern was here #### inputformat:org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -188,26 +188,26 @@ minFileSize:0 #### A masked pattern was here #### #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n12 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn +PREHOOK: Input: importer@exim_employee_n12 +PREHOOK: Input: importer@exim_employee_n12@emp_country=in/emp_state=ka +PREHOOK: Input: importer@exim_employee_n12@emp_country=in/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n12 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n12 +POSTHOOK: Input: importer@exim_employee_n12@emp_country=in/emp_state=ka +POSTHOOK: Input: importer@exim_employee_n12@emp_country=in/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n12 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n12 +PREHOOK: Output: importer@exim_employee_n12 +POSTHOOK: query: drop table exim_employee_n12 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n12 +POSTHOOK: Output: importer@exim_employee_n12 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_05_some_part.q.out b/ql/src/test/results/clientpositive/exim_05_some_part.q.out index b56e3a6370..9909c412f4 100644 --- a/ql/src/test/results/clientpositive/exim_05_some_part.q.out +++ b/ql/src/test/results/clientpositive/exim_05_some_part.q.out @@ -1,82 +1,82 @@ -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n15 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: default@exim_employee_n15 +POSTHOOK: query: create table exim_employee_n15 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee +POSTHOOK: Output: default@exim_employee_n15 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n15 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n15 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n15 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: default@exim_employee_n15 +POSTHOOK: Output: default@exim_employee_n15@emp_country=in/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n15 partition (emp_country="in", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n15 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n15 partition (emp_country="in", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Output: default@exim_employee_n15 +POSTHOOK: Output: default@exim_employee_n15@emp_country=in/emp_state=ka PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n15 partition (emp_country="us", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n15 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n15 partition (emp_country="us", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Output: default@exim_employee_n15 +POSTHOOK: Output: default@exim_employee_n15@emp_country=us/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n15 partition (emp_country="us", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n15 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n15 partition (emp_country="us", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=ka +POSTHOOK: Output: default@exim_employee_n15 +POSTHOOK: Output: default@exim_employee_n15@emp_country=us/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: export table exim_employee partition (emp_state="ka") to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n15 partition (emp_state="ka") to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka +PREHOOK: Input: default@exim_employee_n15@emp_country=in/emp_state=ka +PREHOOK: Input: default@exim_employee_n15@emp_country=us/emp_state=ka #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee partition (emp_state="ka") to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n15 partition (emp_state="ka") to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka +POSTHOOK: Input: default@exim_employee_n15@emp_country=in/emp_state=ka +POSTHOOK: Input: default@exim_employee_n15@emp_country=us/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n15 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n15 +PREHOOK: Output: default@exim_employee_n15 +POSTHOOK: query: drop table exim_employee_n15 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n15 +POSTHOOK: Output: default@exim_employee_n15 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -97,15 +97,15 @@ POSTHOOK: query: import from 'ql/test/data/exports/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=ka -PREHOOK: query: describe extended exim_employee +POSTHOOK: Output: importer@exim_employee_n15 +POSTHOOK: Output: importer@exim_employee_n15@emp_country=in/emp_state=ka +POSTHOOK: Output: importer@exim_employee_n15@emp_country=us/emp_state=ka +PREHOOK: query: describe extended exim_employee_n15 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe extended exim_employee +PREHOOK: Input: importer@exim_employee_n15 +POSTHOOK: query: describe extended exim_employee_n15 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n15 emp_id int employee id emp_country string two char iso code emp_state string free text @@ -116,11 +116,11 @@ emp_country string two char iso code emp_state string free text #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee +PREHOOK: query: show table extended like exim_employee_n15 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee +POSTHOOK: query: show table extended like exim_employee_n15 POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n15 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -134,17 +134,17 @@ minFileSize:11 #### A masked pattern was here #### #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n15 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=ka +PREHOOK: Input: importer@exim_employee_n15 +PREHOOK: Input: importer@exim_employee_n15@emp_country=in/emp_state=ka +PREHOOK: Input: importer@exim_employee_n15@emp_country=us/emp_state=ka #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n15 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=ka +POSTHOOK: Input: importer@exim_employee_n15 +POSTHOOK: Input: importer@exim_employee_n15@emp_country=in/emp_state=ka +POSTHOOK: Input: importer@exim_employee_n15@emp_country=us/emp_state=ka #### A masked pattern was here #### 1 in ka 2 in ka @@ -158,14 +158,14 @@ POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=ka 4 us ka 5 us ka 6 us ka -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n15 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n15 +PREHOOK: Output: importer@exim_employee_n15 +POSTHOOK: query: drop table exim_employee_n15 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n15 +POSTHOOK: Output: importer@exim_employee_n15 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_06_one_part.q.out b/ql/src/test/results/clientpositive/exim_06_one_part.q.out index 84ed946a68..750868f7a2 100644 --- a/ql/src/test/results/clientpositive/exim_06_one_part.q.out +++ b/ql/src/test/results/clientpositive/exim_06_one_part.q.out @@ -1,80 +1,80 @@ -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n3 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: default@exim_employee_n3 +POSTHOOK: query: create table exim_employee_n3 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee +POSTHOOK: Output: default@exim_employee_n3 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n3 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n3 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n3 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: default@exim_employee_n3 +POSTHOOK: Output: default@exim_employee_n3@emp_country=in/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n3 partition (emp_country="in", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n3 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n3 partition (emp_country="in", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Output: default@exim_employee_n3 +POSTHOOK: Output: default@exim_employee_n3@emp_country=in/emp_state=ka PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n3 partition (emp_country="us", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n3 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n3 partition (emp_country="us", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Output: default@exim_employee_n3 +POSTHOOK: Output: default@exim_employee_n3@emp_country=us/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n3 partition (emp_country="us", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n3 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n3 partition (emp_country="us", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=ka +POSTHOOK: Output: default@exim_employee_n3 +POSTHOOK: Output: default@exim_employee_n3@emp_country=us/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: export table exim_employee partition (emp_country="in",emp_state="ka") to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n3 partition (emp_country="in",emp_state="ka") to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka +PREHOOK: Input: default@exim_employee_n3@emp_country=in/emp_state=ka #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee partition (emp_country="in",emp_state="ka") to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n3 partition (emp_country="in",emp_state="ka") to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Input: default@exim_employee_n3@emp_country=in/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n3 +PREHOOK: Output: default@exim_employee_n3 +POSTHOOK: query: drop table exim_employee_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n3 +POSTHOOK: Output: default@exim_employee_n3 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -95,14 +95,14 @@ POSTHOOK: query: import from 'ql/test/data/exports/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=ka -PREHOOK: query: describe extended exim_employee +POSTHOOK: Output: importer@exim_employee_n3 +POSTHOOK: Output: importer@exim_employee_n3@emp_country=in/emp_state=ka +PREHOOK: query: describe extended exim_employee_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe extended exim_employee +PREHOOK: Input: importer@exim_employee_n3 +POSTHOOK: query: describe extended exim_employee_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n3 emp_id int employee id emp_country string two char iso code emp_state string free text @@ -113,11 +113,11 @@ emp_country string two char iso code emp_state string free text #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee +PREHOOK: query: show table extended like exim_employee_n3 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee +POSTHOOK: query: show table extended like exim_employee_n3 POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n3 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -131,15 +131,15 @@ minFileSize:11 #### A masked pattern was here #### #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n3 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka +PREHOOK: Input: importer@exim_employee_n3 +PREHOOK: Input: importer@exim_employee_n3@emp_country=in/emp_state=ka #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Input: importer@exim_employee_n3 +POSTHOOK: Input: importer@exim_employee_n3@emp_country=in/emp_state=ka #### A masked pattern was here #### 1 in ka 2 in ka @@ -147,14 +147,14 @@ POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka 4 in ka 5 in ka 6 in ka -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n3 +PREHOOK: Output: importer@exim_employee_n3 +POSTHOOK: query: drop table exim_employee_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n3 +POSTHOOK: Output: importer@exim_employee_n3 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_07_all_part_over_nonoverlap.q.out b/ql/src/test/results/clientpositive/exim_07_all_part_over_nonoverlap.q.out index 7450949408..e014bf8dcb 100644 --- a/ql/src/test/results/clientpositive/exim_07_all_part_over_nonoverlap.q.out +++ b/ql/src/test/results/clientpositive/exim_07_all_part_over_nonoverlap.q.out @@ -1,86 +1,86 @@ -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n8 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: default@exim_employee_n8 +POSTHOOK: query: create table exim_employee_n8 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee +POSTHOOK: Output: default@exim_employee_n8 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n8 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n8 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n8 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: default@exim_employee_n8 +POSTHOOK: Output: default@exim_employee_n8@emp_country=in/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n8 partition (emp_country="in", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n8 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n8 partition (emp_country="in", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Output: default@exim_employee_n8 +POSTHOOK: Output: default@exim_employee_n8@emp_country=in/emp_state=ka PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n8 partition (emp_country="us", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n8 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n8 partition (emp_country="us", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Output: default@exim_employee_n8 +POSTHOOK: Output: default@exim_employee_n8@emp_country=us/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n8 partition (emp_country="us", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n8 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n8 partition (emp_country="us", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=ka +POSTHOOK: Output: default@exim_employee_n8 +POSTHOOK: Output: default@exim_employee_n8@emp_country=us/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n8 to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: default@exim_employee_n8@emp_country=in/emp_state=ka +PREHOOK: Input: default@exim_employee_n8@emp_country=in/emp_state=tn +PREHOOK: Input: default@exim_employee_n8@emp_country=us/emp_state=ka +PREHOOK: Input: default@exim_employee_n8@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n8 to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: default@exim_employee_n8@emp_country=in/emp_state=ka +POSTHOOK: Input: default@exim_employee_n8@emp_country=in/emp_state=tn +POSTHOOK: Input: default@exim_employee_n8@emp_country=us/emp_state=ka +POSTHOOK: Input: default@exim_employee_n8@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n8 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n8 +PREHOOK: Output: default@exim_employee_n8 +POSTHOOK: query: drop table exim_employee_n8 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n8 +POSTHOOK: Output: default@exim_employee_n8 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -93,51 +93,51 @@ PREHOOK: Input: database:importer POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n8 ( emp_id int comment "employee id") comment "table of employees" partitioned by (emp_country string comment "iso code", emp_state string comment "free-form text") stored as textfile tblproperties("maker"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:importer -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: importer@exim_employee_n8 +POSTHOOK: query: create table exim_employee_n8 ( emp_id int comment "employee id") comment "table of employees" partitioned by (emp_country string comment "iso code", emp_state string comment "free-form text") stored as textfile tblproperties("maker"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Output: importer@exim_employee_n8 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="al") + into table exim_employee_n8 partition (emp_country="us", emp_state="al") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: importer@exim_employee +PREHOOK: Output: importer@exim_employee_n8 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="al") + into table exim_employee_n8 partition (emp_country="us", emp_state="al") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=al +POSTHOOK: Output: importer@exim_employee_n8 +POSTHOOK: Output: importer@exim_employee_n8@emp_country=us/emp_state=al PREHOOK: query: import from 'ql/test/data/exports/exim_employee' PREHOOK: type: IMPORT #### A masked pattern was here #### -PREHOOK: Output: importer@exim_employee +PREHOOK: Output: importer@exim_employee_n8 POSTHOOK: query: import from 'ql/test/data/exports/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=ka -POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=tn -PREHOOK: query: describe extended exim_employee +POSTHOOK: Output: importer@exim_employee_n8 +POSTHOOK: Output: importer@exim_employee_n8@emp_country=in/emp_state=ka +POSTHOOK: Output: importer@exim_employee_n8@emp_country=in/emp_state=tn +POSTHOOK: Output: importer@exim_employee_n8@emp_country=us/emp_state=ka +POSTHOOK: Output: importer@exim_employee_n8@emp_country=us/emp_state=tn +PREHOOK: query: describe extended exim_employee_n8 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe extended exim_employee +PREHOOK: Input: importer@exim_employee_n8 +POSTHOOK: query: describe extended exim_employee_n8 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n8 emp_id int employee id emp_country string iso code emp_state string free-form text @@ -148,23 +148,23 @@ emp_country string iso code emp_state string free-form text #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n8 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=al -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=ka -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn -#### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +PREHOOK: Input: importer@exim_employee_n8 +PREHOOK: Input: importer@exim_employee_n8@emp_country=in/emp_state=ka +PREHOOK: Input: importer@exim_employee_n8@emp_country=in/emp_state=tn +PREHOOK: Input: importer@exim_employee_n8@emp_country=us/emp_state=al +PREHOOK: Input: importer@exim_employee_n8@emp_country=us/emp_state=ka +PREHOOK: Input: importer@exim_employee_n8@emp_country=us/emp_state=tn +#### A masked pattern was here #### +POSTHOOK: query: select * from exim_employee_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=al -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=ka -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n8 +POSTHOOK: Input: importer@exim_employee_n8@emp_country=in/emp_state=ka +POSTHOOK: Input: importer@exim_employee_n8@emp_country=in/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n8@emp_country=us/emp_state=al +POSTHOOK: Input: importer@exim_employee_n8@emp_country=us/emp_state=ka +POSTHOOK: Input: importer@exim_employee_n8@emp_country=us/emp_state=tn #### A masked pattern was here #### 1 in ka 2 in ka @@ -196,14 +196,14 @@ POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn 4 us tn 5 us tn 6 us tn -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n8 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n8 +PREHOOK: Output: importer@exim_employee_n8 +POSTHOOK: query: drop table exim_employee_n8 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n8 +POSTHOOK: Output: importer@exim_employee_n8 #### A masked pattern was here #### PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE diff --git a/ql/src/test/results/clientpositive/exim_08_nonpart_rename.q.out b/ql/src/test/results/clientpositive/exim_08_nonpart_rename.q.out index 8cb2c75ce2..59d3d51982 100644 --- a/ql/src/test/results/clientpositive/exim_08_nonpart_rename.q.out +++ b/ql/src/test/results/clientpositive/exim_08_nonpart_rename.q.out @@ -1,40 +1,40 @@ -PREHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: query: create table exim_department_n9 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_department -POSTHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: Output: default@exim_department_n9 +POSTHOOK: query: create table exim_department_n9 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_department -PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +POSTHOOK: Output: default@exim_department_n9 +PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n9 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_department -POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +PREHOOK: Output: default@exim_department_n9 +POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n9 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_department +POSTHOOK: Output: default@exim_department_n9 #### A masked pattern was here #### -PREHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +PREHOOK: query: export table exim_department_n9 to 'ql/test/data/exports/exim_department' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_department +PREHOOK: Input: default@exim_department_n9 #### A masked pattern was here #### -POSTHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +POSTHOOK: query: export table exim_department_n9 to 'ql/test/data/exports/exim_department' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_department +POSTHOOK: Input: default@exim_department_n9 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n9 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_department -PREHOOK: Output: default@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: default@exim_department_n9 +PREHOOK: Output: default@exim_department_n9 +POSTHOOK: query: drop table exim_department_n9 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_department -POSTHOOK: Output: default@exim_department +POSTHOOK: Input: default@exim_department_n9 +POSTHOOK: Output: default@exim_department_n9 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -47,29 +47,29 @@ PREHOOK: Input: database:importer POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer -PREHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: query: create table exim_department_n9 ( dep_id int comment "department id") partitioned by (emp_org string) stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:importer -PREHOOK: Output: importer@exim_department -POSTHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: Output: importer@exim_department_n9 +POSTHOOK: query: create table exim_department_n9 ( dep_id int comment "department id") partitioned by (emp_org string) stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_department -PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department partition (emp_org="hr") +POSTHOOK: Output: importer@exim_department_n9 +PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n9 partition (emp_org="hr") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: importer@exim_department -POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department partition (emp_org="hr") +PREHOOK: Output: importer@exim_department_n9 +POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n9 partition (emp_org="hr") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_department -POSTHOOK: Output: importer@exim_department@emp_org=hr +POSTHOOK: Output: importer@exim_department_n9 +POSTHOOK: Output: importer@exim_department_n9@emp_org=hr PREHOOK: query: import table exim_imported_dept from 'ql/test/data/exports/exim_department' PREHOOK: type: IMPORT #### A masked pattern was here #### @@ -110,14 +110,14 @@ POSTHOOK: query: drop table exim_imported_dept POSTHOOK: type: DROPTABLE POSTHOOK: Input: importer@exim_imported_dept POSTHOOK: Output: importer@exim_imported_dept -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n9 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_department -PREHOOK: Output: importer@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: importer@exim_department_n9 +PREHOOK: Output: importer@exim_department_n9 +POSTHOOK: query: drop table exim_department_n9 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_department -POSTHOOK: Output: importer@exim_department +POSTHOOK: Input: importer@exim_department_n9 +POSTHOOK: Output: importer@exim_department_n9 #### A masked pattern was here #### PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE diff --git a/ql/src/test/results/clientpositive/exim_09_part_spec_nonoverlap.q.out b/ql/src/test/results/clientpositive/exim_09_part_spec_nonoverlap.q.out index f4cf862614..6a762ac9a9 100644 --- a/ql/src/test/results/clientpositive/exim_09_part_spec_nonoverlap.q.out +++ b/ql/src/test/results/clientpositive/exim_09_part_spec_nonoverlap.q.out @@ -1,86 +1,86 @@ -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n10 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: default@exim_employee_n10 +POSTHOOK: query: create table exim_employee_n10 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee +POSTHOOK: Output: default@exim_employee_n10 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n10 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n10 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n10 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: default@exim_employee_n10 +POSTHOOK: Output: default@exim_employee_n10@emp_country=in/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n10 partition (emp_country="in", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n10 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n10 partition (emp_country="in", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Output: default@exim_employee_n10 +POSTHOOK: Output: default@exim_employee_n10@emp_country=in/emp_state=ka PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n10 partition (emp_country="us", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n10 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n10 partition (emp_country="us", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Output: default@exim_employee_n10 +POSTHOOK: Output: default@exim_employee_n10@emp_country=us/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n10 partition (emp_country="us", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n10 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n10 partition (emp_country="us", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=ka +POSTHOOK: Output: default@exim_employee_n10 +POSTHOOK: Output: default@exim_employee_n10@emp_country=us/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n10 to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: default@exim_employee_n10@emp_country=in/emp_state=ka +PREHOOK: Input: default@exim_employee_n10@emp_country=in/emp_state=tn +PREHOOK: Input: default@exim_employee_n10@emp_country=us/emp_state=ka +PREHOOK: Input: default@exim_employee_n10@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n10 to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: default@exim_employee_n10@emp_country=in/emp_state=ka +POSTHOOK: Input: default@exim_employee_n10@emp_country=in/emp_state=tn +POSTHOOK: Input: default@exim_employee_n10@emp_country=us/emp_state=ka +POSTHOOK: Input: default@exim_employee_n10@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n10 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n10 +PREHOOK: Output: default@exim_employee_n10 +POSTHOOK: query: drop table exim_employee_n10 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n10 +POSTHOOK: Output: default@exim_employee_n10 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -93,59 +93,59 @@ PREHOOK: Input: database:importer POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n10 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:importer -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: importer@exim_employee_n10 +POSTHOOK: query: create table exim_employee_n10 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Output: importer@exim_employee_n10 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n10 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: importer@exim_employee +PREHOOK: Output: importer@exim_employee_n10 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n10 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: importer@exim_employee_n10 +POSTHOOK: Output: importer@exim_employee_n10@emp_country=in/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n10 partition (emp_country="in", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: importer@exim_employee +PREHOOK: Output: importer@exim_employee_n10 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n10 partition (emp_country="in", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=ka -PREHOOK: query: import table exim_employee partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' +POSTHOOK: Output: importer@exim_employee_n10 +POSTHOOK: Output: importer@exim_employee_n10@emp_country=in/emp_state=ka +PREHOOK: query: import table exim_employee_n10 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' PREHOOK: type: IMPORT #### A masked pattern was here #### -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: import table exim_employee partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' +PREHOOK: Output: importer@exim_employee_n10 +POSTHOOK: query: import table exim_employee_n10 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=tn -PREHOOK: query: describe extended exim_employee +POSTHOOK: Output: importer@exim_employee_n10 +POSTHOOK: Output: importer@exim_employee_n10@emp_country=us/emp_state=tn +PREHOOK: query: describe extended exim_employee_n10 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe extended exim_employee +PREHOOK: Input: importer@exim_employee_n10 +POSTHOOK: query: describe extended exim_employee_n10 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n10 emp_id int employee id emp_country string two char iso code emp_state string free text @@ -156,19 +156,19 @@ emp_country string two char iso code emp_state string free text #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n10 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n10 +PREHOOK: Input: importer@exim_employee_n10@emp_country=in/emp_state=ka +PREHOOK: Input: importer@exim_employee_n10@emp_country=in/emp_state=tn +PREHOOK: Input: importer@exim_employee_n10@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n10 +POSTHOOK: Input: importer@exim_employee_n10@emp_country=in/emp_state=ka +POSTHOOK: Input: importer@exim_employee_n10@emp_country=in/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n10@emp_country=us/emp_state=tn #### A masked pattern was here #### 1 in ka 2 in ka @@ -188,14 +188,14 @@ POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn 4 us tn 5 us tn 6 us tn -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n10 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n10 +PREHOOK: Output: importer@exim_employee_n10 +POSTHOOK: query: drop table exim_employee_n10 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n10 +POSTHOOK: Output: importer@exim_employee_n10 #### A masked pattern was here #### PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE diff --git a/ql/src/test/results/clientpositive/exim_10_external_managed.q.out b/ql/src/test/results/clientpositive/exim_10_external_managed.q.out index 5672db9182..1a691f060e 100644 --- a/ql/src/test/results/clientpositive/exim_10_external_managed.q.out +++ b/ql/src/test/results/clientpositive/exim_10_external_managed.q.out @@ -1,45 +1,45 @@ #### A masked pattern was here #### -PREHOOK: query: create external table exim_department ( dep_id int comment "department id") +PREHOOK: query: create external table exim_department_n4 ( dep_id int comment "department id") stored as textfile location 'ql/test/data/tablestore/exim_department' tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@exim_department -POSTHOOK: query: create external table exim_department ( dep_id int comment "department id") +PREHOOK: Output: default@exim_department_n4 +POSTHOOK: query: create external table exim_department_n4 ( dep_id int comment "department id") stored as textfile location 'ql/test/data/tablestore/exim_department' tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_department -PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +POSTHOOK: Output: default@exim_department_n4 +PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_department -POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +PREHOOK: Output: default@exim_department_n4 +POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_department +POSTHOOK: Output: default@exim_department_n4 #### A masked pattern was here #### -PREHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +PREHOOK: query: export table exim_department_n4 to 'ql/test/data/exports/exim_department' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_department +PREHOOK: Input: default@exim_department_n4 #### A masked pattern was here #### -POSTHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +POSTHOOK: query: export table exim_department_n4 to 'ql/test/data/exports/exim_department' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_department +POSTHOOK: Input: default@exim_department_n4 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_department -PREHOOK: Output: default@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: default@exim_department_n4 +PREHOOK: Output: default@exim_department_n4 +POSTHOOK: query: drop table exim_department_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_department -POSTHOOK: Output: default@exim_department +POSTHOOK: Input: default@exim_department_n4 +POSTHOOK: Output: default@exim_department_n4 #### A masked pattern was here #### PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE @@ -61,23 +61,23 @@ POSTHOOK: query: import from 'ql/test/data/exports/exim_department' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_department -PREHOOK: query: describe extended exim_department +POSTHOOK: Output: importer@exim_department_n4 +PREHOOK: query: describe extended exim_department_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_department -POSTHOOK: query: describe extended exim_department +PREHOOK: Input: importer@exim_department_n4 +POSTHOOK: query: describe extended exim_department_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n4 dep_id int department id #### A masked pattern was here #### -PREHOOK: query: select * from exim_department +PREHOOK: query: select * from exim_department_n4 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_department +PREHOOK: Input: importer@exim_department_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from exim_department +POSTHOOK: query: select * from exim_department_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n4 #### A masked pattern was here #### 1 2 @@ -85,14 +85,14 @@ POSTHOOK: Input: importer@exim_department 4 5 6 -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_department -PREHOOK: Output: importer@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: importer@exim_department_n4 +PREHOOK: Output: importer@exim_department_n4 +POSTHOOK: query: drop table exim_department_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_department -POSTHOOK: Output: importer@exim_department +POSTHOOK: Input: importer@exim_department_n4 +POSTHOOK: Output: importer@exim_department_n4 #### A masked pattern was here #### PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE diff --git a/ql/src/test/results/clientpositive/exim_12_external_location.q.out b/ql/src/test/results/clientpositive/exim_12_external_location.q.out index 70b9239432..7bbc4135e2 100644 --- a/ql/src/test/results/clientpositive/exim_12_external_location.q.out +++ b/ql/src/test/results/clientpositive/exim_12_external_location.q.out @@ -1,40 +1,40 @@ -PREHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: query: create table exim_department_n8 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_department -POSTHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: Output: default@exim_department_n8 +POSTHOOK: query: create table exim_department_n8 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_department -PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +POSTHOOK: Output: default@exim_department_n8 +PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n8 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_department -POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +PREHOOK: Output: default@exim_department_n8 +POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n8 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_department +POSTHOOK: Output: default@exim_department_n8 #### A masked pattern was here #### -PREHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +PREHOOK: query: export table exim_department_n8 to 'ql/test/data/exports/exim_department' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_department +PREHOOK: Input: default@exim_department_n8 #### A masked pattern was here #### -POSTHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +POSTHOOK: query: export table exim_department_n8 to 'ql/test/data/exports/exim_department' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_department +POSTHOOK: Input: default@exim_department_n8 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n8 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_department -PREHOOK: Output: default@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: default@exim_department_n8 +PREHOOK: Output: default@exim_department_n8 +POSTHOOK: query: drop table exim_department_n8 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_department -POSTHOOK: Output: default@exim_department +POSTHOOK: Input: default@exim_department_n8 +POSTHOOK: Output: default@exim_department_n8 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -48,33 +48,33 @@ POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer #### A masked pattern was here #### -PREHOOK: query: import external table exim_department from 'ql/test/data/exports/exim_department' +PREHOOK: query: import external table exim_department_n8 from 'ql/test/data/exports/exim_department' location 'ql/test/data/tablestore/exim_department' PREHOOK: type: IMPORT #### A masked pattern was here #### PREHOOK: Output: database:importer -POSTHOOK: query: import external table exim_department from 'ql/test/data/exports/exim_department' +POSTHOOK: query: import external table exim_department_n8 from 'ql/test/data/exports/exim_department' location 'ql/test/data/tablestore/exim_department' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_department -PREHOOK: query: describe extended exim_department +POSTHOOK: Output: importer@exim_department_n8 +PREHOOK: query: describe extended exim_department_n8 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_department -POSTHOOK: query: describe extended exim_department +PREHOOK: Input: importer@exim_department_n8 +POSTHOOK: query: describe extended exim_department_n8 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n8 dep_id int department id #### A masked pattern was here #### -PREHOOK: query: select * from exim_department +PREHOOK: query: select * from exim_department_n8 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_department +PREHOOK: Input: importer@exim_department_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from exim_department +POSTHOOK: query: select * from exim_department_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n8 #### A masked pattern was here #### 1 2 @@ -83,22 +83,22 @@ POSTHOOK: Input: importer@exim_department 5 6 #### A masked pattern was here #### -PREHOOK: query: select * from exim_department +PREHOOK: query: select * from exim_department_n8 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_department +PREHOOK: Input: importer@exim_department_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from exim_department +POSTHOOK: query: select * from exim_department_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n8 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n8 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_department -PREHOOK: Output: importer@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: importer@exim_department_n8 +PREHOOK: Output: importer@exim_department_n8 +POSTHOOK: query: drop table exim_department_n8 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_department -POSTHOOK: Output: importer@exim_department +POSTHOOK: Input: importer@exim_department_n8 +POSTHOOK: Output: importer@exim_department_n8 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_13_managed_location.q.out b/ql/src/test/results/clientpositive/exim_13_managed_location.q.out index 63322980d5..46fcc3bdff 100644 --- a/ql/src/test/results/clientpositive/exim_13_managed_location.q.out +++ b/ql/src/test/results/clientpositive/exim_13_managed_location.q.out @@ -1,40 +1,40 @@ -PREHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: query: create table exim_department_n2 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_department -POSTHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: Output: default@exim_department_n2 +POSTHOOK: query: create table exim_department_n2 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_department -PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +POSTHOOK: Output: default@exim_department_n2 +PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_department -POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +PREHOOK: Output: default@exim_department_n2 +POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_department +POSTHOOK: Output: default@exim_department_n2 #### A masked pattern was here #### -PREHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +PREHOOK: query: export table exim_department_n2 to 'ql/test/data/exports/exim_department' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_department +PREHOOK: Input: default@exim_department_n2 #### A masked pattern was here #### -POSTHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +POSTHOOK: query: export table exim_department_n2 to 'ql/test/data/exports/exim_department' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_department +POSTHOOK: Input: default@exim_department_n2 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_department -PREHOOK: Output: default@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: default@exim_department_n2 +PREHOOK: Output: default@exim_department_n2 +POSTHOOK: query: drop table exim_department_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_department -POSTHOOK: Output: default@exim_department +POSTHOOK: Input: default@exim_department_n2 +POSTHOOK: Output: default@exim_department_n2 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -48,33 +48,33 @@ POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer #### A masked pattern was here #### -PREHOOK: query: import table exim_department from 'ql/test/data/exports/exim_department' +PREHOOK: query: import table exim_department_n2 from 'ql/test/data/exports/exim_department' location 'ql/test/data/tablestore/exim_department' PREHOOK: type: IMPORT #### A masked pattern was here #### PREHOOK: Output: database:importer -POSTHOOK: query: import table exim_department from 'ql/test/data/exports/exim_department' +POSTHOOK: query: import table exim_department_n2 from 'ql/test/data/exports/exim_department' location 'ql/test/data/tablestore/exim_department' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_department -PREHOOK: query: describe extended exim_department +POSTHOOK: Output: importer@exim_department_n2 +PREHOOK: query: describe extended exim_department_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_department -POSTHOOK: query: describe extended exim_department +PREHOOK: Input: importer@exim_department_n2 +POSTHOOK: query: describe extended exim_department_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n2 dep_id int department id #### A masked pattern was here #### -PREHOOK: query: select * from exim_department +PREHOOK: query: select * from exim_department_n2 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_department +PREHOOK: Input: importer@exim_department_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from exim_department +POSTHOOK: query: select * from exim_department_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n2 #### A masked pattern was here #### 1 2 @@ -83,22 +83,22 @@ POSTHOOK: Input: importer@exim_department 5 6 #### A masked pattern was here #### -PREHOOK: query: select * from exim_department +PREHOOK: query: select * from exim_department_n2 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_department +PREHOOK: Input: importer@exim_department_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from exim_department +POSTHOOK: query: select * from exim_department_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n2 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_department -PREHOOK: Output: importer@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: importer@exim_department_n2 +PREHOOK: Output: importer@exim_department_n2 +POSTHOOK: query: drop table exim_department_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_department -POSTHOOK: Output: importer@exim_department +POSTHOOK: Input: importer@exim_department_n2 +POSTHOOK: Output: importer@exim_department_n2 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_14_managed_location_over_existing.q.out b/ql/src/test/results/clientpositive/exim_14_managed_location_over_existing.q.out index 8e7f50401c..8a80e8a1ea 100644 --- a/ql/src/test/results/clientpositive/exim_14_managed_location_over_existing.q.out +++ b/ql/src/test/results/clientpositive/exim_14_managed_location_over_existing.q.out @@ -1,40 +1,40 @@ -PREHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: query: create table exim_department_n10 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_department -POSTHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: Output: default@exim_department_n10 +POSTHOOK: query: create table exim_department_n10 ( dep_id int comment "department id") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_department -PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +POSTHOOK: Output: default@exim_department_n10 +PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n10 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_department -POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +PREHOOK: Output: default@exim_department_n10 +POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n10 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_department +POSTHOOK: Output: default@exim_department_n10 #### A masked pattern was here #### -PREHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +PREHOOK: query: export table exim_department_n10 to 'ql/test/data/exports/exim_department' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_department +PREHOOK: Input: default@exim_department_n10 #### A masked pattern was here #### -POSTHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +POSTHOOK: query: export table exim_department_n10 to 'ql/test/data/exports/exim_department' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_department +POSTHOOK: Input: default@exim_department_n10 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n10 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_department -PREHOOK: Output: default@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: default@exim_department_n10 +PREHOOK: Output: default@exim_department_n10 +POSTHOOK: query: drop table exim_department_n10 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_department -POSTHOOK: Output: default@exim_department +POSTHOOK: Input: default@exim_department_n10 +POSTHOOK: Output: default@exim_department_n10 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -48,48 +48,48 @@ POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer #### A masked pattern was here #### -PREHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: query: create table exim_department_n10 ( dep_id int comment "department id") stored as textfile location 'ql/test/data/tablestore/exim_department' tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:importer -PREHOOK: Output: importer@exim_department -POSTHOOK: query: create table exim_department ( dep_id int comment "department id") +PREHOOK: Output: importer@exim_department_n10 +POSTHOOK: query: create table exim_department_n10 ( dep_id int comment "department id") stored as textfile location 'ql/test/data/tablestore/exim_department' tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_department -PREHOOK: query: import table exim_department from 'ql/test/data/exports/exim_department' +POSTHOOK: Output: importer@exim_department_n10 +PREHOOK: query: import table exim_department_n10 from 'ql/test/data/exports/exim_department' location 'ql/test/data/tablestore/exim_department' PREHOOK: type: IMPORT #### A masked pattern was here #### -PREHOOK: Output: importer@exim_department -POSTHOOK: query: import table exim_department from 'ql/test/data/exports/exim_department' +PREHOOK: Output: importer@exim_department_n10 +POSTHOOK: query: import table exim_department_n10 from 'ql/test/data/exports/exim_department' location 'ql/test/data/tablestore/exim_department' POSTHOOK: type: IMPORT #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_department -PREHOOK: query: describe extended exim_department +POSTHOOK: Output: importer@exim_department_n10 +PREHOOK: query: describe extended exim_department_n10 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_department -POSTHOOK: query: describe extended exim_department +PREHOOK: Input: importer@exim_department_n10 +POSTHOOK: query: describe extended exim_department_n10 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n10 dep_id int department id #### A masked pattern was here #### -PREHOOK: query: select * from exim_department +PREHOOK: query: select * from exim_department_n10 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_department +PREHOOK: Input: importer@exim_department_n10 #### A masked pattern was here #### -POSTHOOK: query: select * from exim_department +POSTHOOK: query: select * from exim_department_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n10 #### A masked pattern was here #### 1 2 @@ -98,22 +98,22 @@ POSTHOOK: Input: importer@exim_department 5 6 #### A masked pattern was here #### -PREHOOK: query: select * from exim_department +PREHOOK: query: select * from exim_department_n10 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_department +PREHOOK: Input: importer@exim_department_n10 #### A masked pattern was here #### -POSTHOOK: query: select * from exim_department +POSTHOOK: query: select * from exim_department_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n10 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n10 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_department -PREHOOK: Output: importer@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: importer@exim_department_n10 +PREHOOK: Output: importer@exim_department_n10 +POSTHOOK: query: drop table exim_department_n10 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_department -POSTHOOK: Output: importer@exim_department +POSTHOOK: Input: importer@exim_department_n10 +POSTHOOK: Output: importer@exim_department_n10 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_15_external_part.q.out b/ql/src/test/results/clientpositive/exim_15_external_part.q.out index bd7bb6f9a3..d9b9213009 100644 --- a/ql/src/test/results/clientpositive/exim_15_external_part.q.out +++ b/ql/src/test/results/clientpositive/exim_15_external_part.q.out @@ -1,86 +1,86 @@ -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n0 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: default@exim_employee_n0 +POSTHOOK: query: create table exim_employee_n0 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee +POSTHOOK: Output: default@exim_employee_n0 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n0 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n0 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n0 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: default@exim_employee_n0 +POSTHOOK: Output: default@exim_employee_n0@emp_country=in/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n0 partition (emp_country="in", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n0 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n0 partition (emp_country="in", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Output: default@exim_employee_n0 +POSTHOOK: Output: default@exim_employee_n0@emp_country=in/emp_state=ka PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n0 partition (emp_country="us", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n0 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n0 partition (emp_country="us", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Output: default@exim_employee_n0 +POSTHOOK: Output: default@exim_employee_n0@emp_country=us/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n0 partition (emp_country="us", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n0 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n0 partition (emp_country="us", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=ka +POSTHOOK: Output: default@exim_employee_n0 +POSTHOOK: Output: default@exim_employee_n0@emp_country=us/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n0 to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: default@exim_employee_n0@emp_country=in/emp_state=ka +PREHOOK: Input: default@exim_employee_n0@emp_country=in/emp_state=tn +PREHOOK: Input: default@exim_employee_n0@emp_country=us/emp_state=ka +PREHOOK: Input: default@exim_employee_n0@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n0 to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: default@exim_employee_n0@emp_country=in/emp_state=ka +POSTHOOK: Input: default@exim_employee_n0@emp_country=in/emp_state=tn +POSTHOOK: Input: default@exim_employee_n0@emp_country=us/emp_state=ka +POSTHOOK: Input: default@exim_employee_n0@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n0 +PREHOOK: Output: default@exim_employee_n0 +POSTHOOK: query: drop table exim_employee_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n0 +POSTHOOK: Output: default@exim_employee_n0 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -94,7 +94,7 @@ POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer #### A masked pattern was here #### -PREHOOK: query: create external table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create external table exim_employee_n0 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile @@ -103,8 +103,8 @@ PREHOOK: query: create external table exim_employee ( emp_id int comment "employ PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:importer -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: create external table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: importer@exim_employee_n0 +POSTHOOK: query: create external table exim_employee_n0 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile @@ -113,46 +113,46 @@ POSTHOOK: query: create external table exim_employee ( emp_id int comment "emplo POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Output: importer@exim_employee_n0 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n0 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: importer@exim_employee +PREHOOK: Output: importer@exim_employee_n0 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n0 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: importer@exim_employee_n0 +POSTHOOK: Output: importer@exim_employee_n0@emp_country=in/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n0 partition (emp_country="in", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: importer@exim_employee +PREHOOK: Output: importer@exim_employee_n0 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n0 partition (emp_country="in", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=ka -PREHOOK: query: import external table exim_employee partition (emp_country="us", emp_state="tn") +POSTHOOK: Output: importer@exim_employee_n0 +POSTHOOK: Output: importer@exim_employee_n0@emp_country=in/emp_state=ka +PREHOOK: query: import external table exim_employee_n0 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' PREHOOK: type: IMPORT #### A masked pattern was here #### -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: import external table exim_employee partition (emp_country="us", emp_state="tn") +PREHOOK: Output: importer@exim_employee_n0 +POSTHOOK: query: import external table exim_employee_n0 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=tn -PREHOOK: query: describe extended exim_employee +POSTHOOK: Output: importer@exim_employee_n0 +POSTHOOK: Output: importer@exim_employee_n0@emp_country=us/emp_state=tn +PREHOOK: query: describe extended exim_employee_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe extended exim_employee +PREHOOK: Input: importer@exim_employee_n0 +POSTHOOK: query: describe extended exim_employee_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n0 emp_id int employee id emp_country string two char iso code emp_state string free text @@ -163,19 +163,19 @@ emp_country string two char iso code emp_state string free text #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n0 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n0 +PREHOOK: Input: importer@exim_employee_n0@emp_country=in/emp_state=ka +PREHOOK: Input: importer@exim_employee_n0@emp_country=in/emp_state=tn +PREHOOK: Input: importer@exim_employee_n0@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n0 +POSTHOOK: Input: importer@exim_employee_n0@emp_country=in/emp_state=ka +POSTHOOK: Input: importer@exim_employee_n0@emp_country=in/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n0@emp_country=us/emp_state=tn #### A masked pattern was here #### 1 in ka 2 in ka @@ -196,19 +196,19 @@ POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn 5 us tn 6 us tn #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n0 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n0 +PREHOOK: Input: importer@exim_employee_n0@emp_country=in/emp_state=ka +PREHOOK: Input: importer@exim_employee_n0@emp_country=in/emp_state=tn +PREHOOK: Input: importer@exim_employee_n0@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n0 +POSTHOOK: Input: importer@exim_employee_n0@emp_country=in/emp_state=ka +POSTHOOK: Input: importer@exim_employee_n0@emp_country=in/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n0@emp_country=us/emp_state=tn #### A masked pattern was here #### 1 in ka 2 in ka @@ -223,28 +223,28 @@ POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn 5 in tn 6 in tn #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n0 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n0 +PREHOOK: Input: importer@exim_employee_n0@emp_country=in/emp_state=ka +PREHOOK: Input: importer@exim_employee_n0@emp_country=in/emp_state=tn +PREHOOK: Input: importer@exim_employee_n0@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n0 +POSTHOOK: Input: importer@exim_employee_n0@emp_country=in/emp_state=ka +POSTHOOK: Input: importer@exim_employee_n0@emp_country=in/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n0@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n0 +PREHOOK: Output: importer@exim_employee_n0 +POSTHOOK: query: drop table exim_employee_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n0 +POSTHOOK: Output: importer@exim_employee_n0 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_16_part_external.q.out b/ql/src/test/results/clientpositive/exim_16_part_external.q.out index af748c9273..f7208912db 100644 --- a/ql/src/test/results/clientpositive/exim_16_part_external.q.out +++ b/ql/src/test/results/clientpositive/exim_16_part_external.q.out @@ -1,86 +1,86 @@ -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n11 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: default@exim_employee_n11 +POSTHOOK: query: create table exim_employee_n11 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee +POSTHOOK: Output: default@exim_employee_n11 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n11 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n11 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n11 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: default@exim_employee_n11 +POSTHOOK: Output: default@exim_employee_n11@emp_country=in/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n11 partition (emp_country="in", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n11 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n11 partition (emp_country="in", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Output: default@exim_employee_n11 +POSTHOOK: Output: default@exim_employee_n11@emp_country=in/emp_state=ka PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n11 partition (emp_country="us", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n11 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n11 partition (emp_country="us", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Output: default@exim_employee_n11 +POSTHOOK: Output: default@exim_employee_n11@emp_country=us/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n11 partition (emp_country="us", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n11 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n11 partition (emp_country="us", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=ka +POSTHOOK: Output: default@exim_employee_n11 +POSTHOOK: Output: default@exim_employee_n11@emp_country=us/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n11 to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: default@exim_employee_n11@emp_country=in/emp_state=ka +PREHOOK: Input: default@exim_employee_n11@emp_country=in/emp_state=tn +PREHOOK: Input: default@exim_employee_n11@emp_country=us/emp_state=ka +PREHOOK: Input: default@exim_employee_n11@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n11 to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: default@exim_employee_n11@emp_country=in/emp_state=ka +POSTHOOK: Input: default@exim_employee_n11@emp_country=in/emp_state=tn +POSTHOOK: Input: default@exim_employee_n11@emp_country=us/emp_state=ka +POSTHOOK: Input: default@exim_employee_n11@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n11 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n11 +PREHOOK: Output: default@exim_employee_n11 +POSTHOOK: query: drop table exim_employee_n11 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n11 +POSTHOOK: Output: default@exim_employee_n11 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -94,7 +94,7 @@ POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer #### A masked pattern was here #### -PREHOOK: query: create external table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create external table exim_employee_n11 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile @@ -103,8 +103,8 @@ PREHOOK: query: create external table exim_employee ( emp_id int comment "employ PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:importer -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: create external table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: importer@exim_employee_n11 +POSTHOOK: query: create external table exim_employee_n11 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile @@ -113,25 +113,25 @@ POSTHOOK: query: create external table exim_employee ( emp_id int comment "emplo POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee -PREHOOK: query: import table exim_employee partition (emp_country="us", emp_state="tn") +POSTHOOK: Output: importer@exim_employee_n11 +PREHOOK: query: import table exim_employee_n11 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee' PREHOOK: type: IMPORT #### A masked pattern was here #### -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: import table exim_employee partition (emp_country="us", emp_state="tn") +PREHOOK: Output: importer@exim_employee_n11 +POSTHOOK: query: import table exim_employee_n11 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=tn -PREHOOK: query: show table extended like exim_employee +POSTHOOK: Output: importer@exim_employee_n11 +POSTHOOK: Output: importer@exim_employee_n11@emp_country=us/emp_state=tn +PREHOOK: query: show table extended like exim_employee_n11 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee +POSTHOOK: query: show table extended like exim_employee_n11 POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n11 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -144,11 +144,11 @@ maxFileSize:11 minFileSize:11 #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee partition (emp_country="us", emp_state="tn") +PREHOOK: query: show table extended like exim_employee_n11 partition (emp_country="us", emp_state="tn") PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee partition (emp_country="us", emp_state="tn") +POSTHOOK: query: show table extended like exim_employee_n11 partition (emp_country="us", emp_state="tn") POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n11 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -162,15 +162,15 @@ minFileSize:11 #### A masked pattern was here #### #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n11 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n11 +PREHOOK: Input: importer@exim_employee_n11@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n11 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n11 +POSTHOOK: Input: importer@exim_employee_n11@emp_country=us/emp_state=tn #### A masked pattern was here #### 1 us tn 2 us tn @@ -179,24 +179,24 @@ POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn 5 us tn 6 us tn #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n11 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n11 +PREHOOK: Input: importer@exim_employee_n11@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n11 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n11 +POSTHOOK: Input: importer@exim_employee_n11@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n11 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n11 +PREHOOK: Output: importer@exim_employee_n11 +POSTHOOK: query: drop table exim_employee_n11 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n11 +POSTHOOK: Output: importer@exim_employee_n11 #### A masked pattern was here #### PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE diff --git a/ql/src/test/results/clientpositive/exim_17_part_managed.q.out b/ql/src/test/results/clientpositive/exim_17_part_managed.q.out index a92f95a7ff..5233fd728c 100644 --- a/ql/src/test/results/clientpositive/exim_17_part_managed.q.out +++ b/ql/src/test/results/clientpositive/exim_17_part_managed.q.out @@ -1,86 +1,86 @@ -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n4 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: default@exim_employee_n4 +POSTHOOK: query: create table exim_employee_n4 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee +POSTHOOK: Output: default@exim_employee_n4 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n4 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n4 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n4 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: default@exim_employee_n4 +POSTHOOK: Output: default@exim_employee_n4@emp_country=in/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n4 partition (emp_country="in", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n4 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n4 partition (emp_country="in", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Output: default@exim_employee_n4 +POSTHOOK: Output: default@exim_employee_n4@emp_country=in/emp_state=ka PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n4 partition (emp_country="us", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n4 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n4 partition (emp_country="us", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Output: default@exim_employee_n4 +POSTHOOK: Output: default@exim_employee_n4@emp_country=us/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n4 partition (emp_country="us", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n4 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n4 partition (emp_country="us", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=ka +POSTHOOK: Output: default@exim_employee_n4 +POSTHOOK: Output: default@exim_employee_n4@emp_country=us/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n4 to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: default@exim_employee_n4@emp_country=in/emp_state=ka +PREHOOK: Input: default@exim_employee_n4@emp_country=in/emp_state=tn +PREHOOK: Input: default@exim_employee_n4@emp_country=us/emp_state=ka +PREHOOK: Input: default@exim_employee_n4@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n4 to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: default@exim_employee_n4@emp_country=in/emp_state=ka +POSTHOOK: Input: default@exim_employee_n4@emp_country=in/emp_state=tn +POSTHOOK: Input: default@exim_employee_n4@emp_country=us/emp_state=ka +POSTHOOK: Input: default@exim_employee_n4@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n4 +PREHOOK: Output: default@exim_employee_n4 +POSTHOOK: query: drop table exim_employee_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n4 +POSTHOOK: Output: default@exim_employee_n4 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -94,51 +94,51 @@ POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer #### A masked pattern was here #### -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n4 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:importer -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: importer@exim_employee_n4 +POSTHOOK: query: create table exim_employee_n4 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee -PREHOOK: query: import table exim_employee partition (emp_country="us", emp_state="tn") +POSTHOOK: Output: importer@exim_employee_n4 +PREHOOK: query: import table exim_employee_n4 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee' PREHOOK: type: IMPORT #### A masked pattern was here #### -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: import table exim_employee partition (emp_country="us", emp_state="tn") +PREHOOK: Output: importer@exim_employee_n4 +POSTHOOK: query: import table exim_employee_n4 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=tn -PREHOOK: query: alter table exim_employee add partition (emp_country="us", emp_state="ap") +POSTHOOK: Output: importer@exim_employee_n4 +POSTHOOK: Output: importer@exim_employee_n4@emp_country=us/emp_state=tn +PREHOOK: query: alter table exim_employee_n4 add partition (emp_country="us", emp_state="ap") location 'ql/test/data/tablestore2/exim_employee' PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: alter table exim_employee add partition (emp_country="us", emp_state="ap") +PREHOOK: Output: importer@exim_employee_n4 +POSTHOOK: query: alter table exim_employee_n4 add partition (emp_country="us", emp_state="ap") location 'ql/test/data/tablestore2/exim_employee' POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=ap -PREHOOK: query: show table extended like exim_employee +POSTHOOK: Output: importer@exim_employee_n4 +POSTHOOK: Output: importer@exim_employee_n4@emp_country=us/emp_state=ap +PREHOOK: query: show table extended like exim_employee_n4 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee +POSTHOOK: query: show table extended like exim_employee_n4 POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n4 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -151,11 +151,11 @@ maxFileSize:11 minFileSize:11 #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee partition (emp_country="us", emp_state="tn") +PREHOOK: query: show table extended like exim_employee_n4 partition (emp_country="us", emp_state="tn") PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee partition (emp_country="us", emp_state="tn") +POSTHOOK: query: show table extended like exim_employee_n4 partition (emp_country="us", emp_state="tn") POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n4 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -168,11 +168,11 @@ maxFileSize:11 minFileSize:11 #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee partition (emp_country="us", emp_state="ap") +PREHOOK: query: show table extended like exim_employee_n4 partition (emp_country="us", emp_state="ap") PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee partition (emp_country="us", emp_state="ap") +POSTHOOK: query: show table extended like exim_employee_n4 partition (emp_country="us", emp_state="ap") POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n4 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -186,17 +186,17 @@ minFileSize:0 #### A masked pattern was here #### #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n4 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=ap -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n4 +PREHOOK: Input: importer@exim_employee_n4@emp_country=us/emp_state=ap +PREHOOK: Input: importer@exim_employee_n4@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=ap -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n4 +POSTHOOK: Input: importer@exim_employee_n4@emp_country=us/emp_state=ap +POSTHOOK: Input: importer@exim_employee_n4@emp_country=us/emp_state=tn #### A masked pattern was here #### 1 us tn 2 us tn @@ -205,26 +205,26 @@ POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn 5 us tn 6 us tn #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n4 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=ap -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n4 +PREHOOK: Input: importer@exim_employee_n4@emp_country=us/emp_state=ap +PREHOOK: Input: importer@exim_employee_n4@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=ap -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n4 +POSTHOOK: Input: importer@exim_employee_n4@emp_country=us/emp_state=ap +POSTHOOK: Input: importer@exim_employee_n4@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n4 +PREHOOK: Output: importer@exim_employee_n4 +POSTHOOK: query: drop table exim_employee_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n4 +POSTHOOK: Output: importer@exim_employee_n4 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_18_part_external.q.out b/ql/src/test/results/clientpositive/exim_18_part_external.q.out index c7d11a0af8..9d65a1d45a 100644 --- a/ql/src/test/results/clientpositive/exim_18_part_external.q.out +++ b/ql/src/test/results/clientpositive/exim_18_part_external.q.out @@ -1,86 +1,86 @@ -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n14 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: default@exim_employee_n14 +POSTHOOK: query: create table exim_employee_n14 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee +POSTHOOK: Output: default@exim_employee_n14 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n14 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n14 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n14 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: default@exim_employee_n14 +POSTHOOK: Output: default@exim_employee_n14@emp_country=in/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n14 partition (emp_country="in", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n14 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n14 partition (emp_country="in", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Output: default@exim_employee_n14 +POSTHOOK: Output: default@exim_employee_n14@emp_country=in/emp_state=ka PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n14 partition (emp_country="us", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n14 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n14 partition (emp_country="us", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Output: default@exim_employee_n14 +POSTHOOK: Output: default@exim_employee_n14@emp_country=us/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n14 partition (emp_country="us", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n14 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n14 partition (emp_country="us", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=ka +POSTHOOK: Output: default@exim_employee_n14 +POSTHOOK: Output: default@exim_employee_n14@emp_country=us/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n14 to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: default@exim_employee_n14@emp_country=in/emp_state=ka +PREHOOK: Input: default@exim_employee_n14@emp_country=in/emp_state=tn +PREHOOK: Input: default@exim_employee_n14@emp_country=us/emp_state=ka +PREHOOK: Input: default@exim_employee_n14@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n14 to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: default@exim_employee_n14@emp_country=in/emp_state=ka +POSTHOOK: Input: default@exim_employee_n14@emp_country=in/emp_state=tn +POSTHOOK: Input: default@exim_employee_n14@emp_country=us/emp_state=ka +POSTHOOK: Input: default@exim_employee_n14@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n14 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n14 +PREHOOK: Output: default@exim_employee_n14 +POSTHOOK: query: drop table exim_employee_n14 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n14 +POSTHOOK: Output: default@exim_employee_n14 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -93,24 +93,24 @@ PREHOOK: Input: database:importer POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer -PREHOOK: query: import external table exim_employee partition (emp_country="us", emp_state="tn") +PREHOOK: query: import external table exim_employee_n14 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' PREHOOK: type: IMPORT #### A masked pattern was here #### PREHOOK: Output: database:importer -POSTHOOK: query: import external table exim_employee partition (emp_country="us", emp_state="tn") +POSTHOOK: query: import external table exim_employee_n14 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=tn -PREHOOK: query: describe extended exim_employee +POSTHOOK: Output: importer@exim_employee_n14 +POSTHOOK: Output: importer@exim_employee_n14@emp_country=us/emp_state=tn +PREHOOK: query: describe extended exim_employee_n14 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe extended exim_employee +PREHOOK: Input: importer@exim_employee_n14 +POSTHOOK: query: describe extended exim_employee_n14 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n14 emp_id int employee id emp_country string two char iso code emp_state string free text @@ -121,11 +121,11 @@ emp_country string two char iso code emp_state string free text #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee +PREHOOK: query: show table extended like exim_employee_n14 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee +POSTHOOK: query: show table extended like exim_employee_n14 POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n14 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -138,11 +138,11 @@ maxFileSize:11 minFileSize:11 #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee partition (emp_country="us", emp_state="tn") +PREHOOK: query: show table extended like exim_employee_n14 partition (emp_country="us", emp_state="tn") PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee partition (emp_country="us", emp_state="tn") +POSTHOOK: query: show table extended like exim_employee_n14 partition (emp_country="us", emp_state="tn") POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n14 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -155,15 +155,15 @@ maxFileSize:11 minFileSize:11 #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n14 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n14 +PREHOOK: Input: importer@exim_employee_n14@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n14 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n14 +POSTHOOK: Input: importer@exim_employee_n14@emp_country=us/emp_state=tn #### A masked pattern was here #### 1 us tn 2 us tn @@ -172,24 +172,24 @@ POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn 5 us tn 6 us tn #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n14 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n14 +PREHOOK: Input: importer@exim_employee_n14@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n14 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n14 +POSTHOOK: Input: importer@exim_employee_n14@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n14 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n14 +PREHOOK: Output: importer@exim_employee_n14 +POSTHOOK: query: drop table exim_employee_n14 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n14 +POSTHOOK: Output: importer@exim_employee_n14 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_19_00_part_external_location.q.out b/ql/src/test/results/clientpositive/exim_19_00_part_external_location.q.out index d617a1d9de..3a296a91c2 100644 --- a/ql/src/test/results/clientpositive/exim_19_00_part_external_location.q.out +++ b/ql/src/test/results/clientpositive/exim_19_00_part_external_location.q.out @@ -1,60 +1,60 @@ -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n2 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: default@exim_employee_n2 +POSTHOOK: query: create table exim_employee_n2 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee +POSTHOOK: Output: default@exim_employee_n2 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n2 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n2 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n2 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: default@exim_employee_n2 +POSTHOOK: Output: default@exim_employee_n2@emp_country=in/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test2.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n2 partition (emp_country="in", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n2 POSTHOOK: query: load data local inpath "../../data/files/test2.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n2 partition (emp_country="in", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Output: default@exim_employee_n2 +POSTHOOK: Output: default@exim_employee_n2@emp_country=in/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n2 to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn +PREHOOK: Input: default@exim_employee_n2@emp_country=in/emp_state=ka +PREHOOK: Input: default@exim_employee_n2@emp_country=in/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n2 to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Input: default@exim_employee_n2@emp_country=in/emp_state=ka +POSTHOOK: Input: default@exim_employee_n2@emp_country=in/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n2 +PREHOOK: Output: default@exim_employee_n2 +POSTHOOK: query: drop table exim_employee_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n2 +POSTHOOK: Output: default@exim_employee_n2 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -68,27 +68,27 @@ POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer #### A masked pattern was here #### -PREHOOK: query: import external table exim_employee +PREHOOK: query: import external table exim_employee_n2 from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee' PREHOOK: type: IMPORT #### A masked pattern was here #### PREHOOK: Output: database:importer -POSTHOOK: query: import external table exim_employee +POSTHOOK: query: import external table exim_employee_n2 from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=tn -PREHOOK: query: describe extended exim_employee +POSTHOOK: Output: importer@exim_employee_n2 +POSTHOOK: Output: importer@exim_employee_n2@emp_country=in/emp_state=ka +POSTHOOK: Output: importer@exim_employee_n2@emp_country=in/emp_state=tn +PREHOOK: query: describe extended exim_employee_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe extended exim_employee +PREHOOK: Input: importer@exim_employee_n2 +POSTHOOK: query: describe extended exim_employee_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n2 emp_id int employee id emp_country string two char iso code emp_state string free text @@ -99,11 +99,11 @@ emp_country string two char iso code emp_state string free text #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee +PREHOOK: query: show table extended like exim_employee_n2 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee +POSTHOOK: query: show table extended like exim_employee_n2 POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n2 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -116,11 +116,11 @@ maxFileSize:23 minFileSize:11 #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee partition (emp_country="in", emp_state="tn") +PREHOOK: query: show table extended like exim_employee_n2 partition (emp_country="in", emp_state="tn") PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee partition (emp_country="in", emp_state="tn") +POSTHOOK: query: show table extended like exim_employee_n2 partition (emp_country="in", emp_state="tn") POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n2 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -133,11 +133,11 @@ maxFileSize:11 minFileSize:11 #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee partition (emp_country="in", emp_state="ka") +PREHOOK: query: show table extended like exim_employee_n2 partition (emp_country="in", emp_state="ka") PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee partition (emp_country="in", emp_state="ka") +POSTHOOK: query: show table extended like exim_employee_n2 partition (emp_country="in", emp_state="ka") POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n2 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -151,17 +151,17 @@ minFileSize:23 #### A masked pattern was here #### #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n2 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn +PREHOOK: Input: importer@exim_employee_n2 +PREHOOK: Input: importer@exim_employee_n2@emp_country=in/emp_state=ka +PREHOOK: Input: importer@exim_employee_n2@emp_country=in/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n2 +POSTHOOK: Input: importer@exim_employee_n2@emp_country=in/emp_state=ka +POSTHOOK: Input: importer@exim_employee_n2@emp_country=in/emp_state=tn #### A masked pattern was here #### 101 in ka 202 in ka @@ -176,26 +176,26 @@ POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn 5 in tn 6 in tn #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n2 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn +PREHOOK: Input: importer@exim_employee_n2 +PREHOOK: Input: importer@exim_employee_n2@emp_country=in/emp_state=ka +PREHOOK: Input: importer@exim_employee_n2@emp_country=in/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n2 +POSTHOOK: Input: importer@exim_employee_n2@emp_country=in/emp_state=ka +POSTHOOK: Input: importer@exim_employee_n2@emp_country=in/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n2 +PREHOOK: Output: importer@exim_employee_n2 +POSTHOOK: query: drop table exim_employee_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n2 +POSTHOOK: Output: importer@exim_employee_n2 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_19_part_external_location.q.out b/ql/src/test/results/clientpositive/exim_19_part_external_location.q.out index e722113cc1..268e96907d 100644 --- a/ql/src/test/results/clientpositive/exim_19_part_external_location.q.out +++ b/ql/src/test/results/clientpositive/exim_19_part_external_location.q.out @@ -1,86 +1,86 @@ -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n13 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: default@exim_employee_n13 +POSTHOOK: query: create table exim_employee_n13 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee +POSTHOOK: Output: default@exim_employee_n13 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n13 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n13 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n13 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: default@exim_employee_n13 +POSTHOOK: Output: default@exim_employee_n13@emp_country=in/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n13 partition (emp_country="in", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n13 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n13 partition (emp_country="in", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Output: default@exim_employee_n13 +POSTHOOK: Output: default@exim_employee_n13@emp_country=in/emp_state=ka PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n13 partition (emp_country="us", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n13 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n13 partition (emp_country="us", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Output: default@exim_employee_n13 +POSTHOOK: Output: default@exim_employee_n13@emp_country=us/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n13 partition (emp_country="us", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n13 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n13 partition (emp_country="us", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=ka +POSTHOOK: Output: default@exim_employee_n13 +POSTHOOK: Output: default@exim_employee_n13@emp_country=us/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n13 to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: default@exim_employee_n13@emp_country=in/emp_state=ka +PREHOOK: Input: default@exim_employee_n13@emp_country=in/emp_state=tn +PREHOOK: Input: default@exim_employee_n13@emp_country=us/emp_state=ka +PREHOOK: Input: default@exim_employee_n13@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n13 to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: default@exim_employee_n13@emp_country=in/emp_state=ka +POSTHOOK: Input: default@exim_employee_n13@emp_country=in/emp_state=tn +POSTHOOK: Input: default@exim_employee_n13@emp_country=us/emp_state=ka +POSTHOOK: Input: default@exim_employee_n13@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n13 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n13 +PREHOOK: Output: default@exim_employee_n13 +POSTHOOK: query: drop table exim_employee_n13 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n13 +POSTHOOK: Output: default@exim_employee_n13 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -94,26 +94,26 @@ POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer #### A masked pattern was here #### -PREHOOK: query: import external table exim_employee partition (emp_country="us", emp_state="tn") +PREHOOK: query: import external table exim_employee_n13 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee' PREHOOK: type: IMPORT #### A masked pattern was here #### PREHOOK: Output: database:importer -POSTHOOK: query: import external table exim_employee partition (emp_country="us", emp_state="tn") +POSTHOOK: query: import external table exim_employee_n13 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=tn -PREHOOK: query: describe extended exim_employee +POSTHOOK: Output: importer@exim_employee_n13 +POSTHOOK: Output: importer@exim_employee_n13@emp_country=us/emp_state=tn +PREHOOK: query: describe extended exim_employee_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe extended exim_employee +PREHOOK: Input: importer@exim_employee_n13 +POSTHOOK: query: describe extended exim_employee_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n13 emp_id int employee id emp_country string two char iso code emp_state string free text @@ -124,11 +124,11 @@ emp_country string two char iso code emp_state string free text #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee +PREHOOK: query: show table extended like exim_employee_n13 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee +POSTHOOK: query: show table extended like exim_employee_n13 POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n13 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -141,11 +141,11 @@ maxFileSize:11 minFileSize:11 #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee partition (emp_country="us", emp_state="tn") +PREHOOK: query: show table extended like exim_employee_n13 partition (emp_country="us", emp_state="tn") PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee partition (emp_country="us", emp_state="tn") +POSTHOOK: query: show table extended like exim_employee_n13 partition (emp_country="us", emp_state="tn") POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n13 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -159,15 +159,15 @@ minFileSize:11 #### A masked pattern was here #### #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n13 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n13 +PREHOOK: Input: importer@exim_employee_n13@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n13 +POSTHOOK: Input: importer@exim_employee_n13@emp_country=us/emp_state=tn #### A masked pattern was here #### 1 us tn 2 us tn @@ -176,24 +176,24 @@ POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn 5 us tn 6 us tn #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n13 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n13 +PREHOOK: Input: importer@exim_employee_n13@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n13 +POSTHOOK: Input: importer@exim_employee_n13@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n13 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n13 +PREHOOK: Output: importer@exim_employee_n13 +POSTHOOK: query: drop table exim_employee_n13 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n13 +POSTHOOK: Output: importer@exim_employee_n13 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_20_part_managed_location.q.out b/ql/src/test/results/clientpositive/exim_20_part_managed_location.q.out index 887e51c171..bdebabda3c 100644 --- a/ql/src/test/results/clientpositive/exim_20_part_managed_location.q.out +++ b/ql/src/test/results/clientpositive/exim_20_part_managed_location.q.out @@ -1,86 +1,86 @@ -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n1 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: default@exim_employee_n1 +POSTHOOK: query: create table exim_employee_n1 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee +POSTHOOK: Output: default@exim_employee_n1 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n1 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n1 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n1 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: default@exim_employee_n1 +POSTHOOK: Output: default@exim_employee_n1@emp_country=in/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n1 partition (emp_country="in", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n1 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="ka") + into table exim_employee_n1 partition (emp_country="in", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=ka +POSTHOOK: Output: default@exim_employee_n1 +POSTHOOK: Output: default@exim_employee_n1@emp_country=in/emp_state=ka PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n1 partition (emp_country="us", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n1 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="tn") + into table exim_employee_n1 partition (emp_country="us", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Output: default@exim_employee_n1 +POSTHOOK: Output: default@exim_employee_n1@emp_country=us/emp_state=tn PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n1 partition (emp_country="us", emp_state="ka") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n1 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="us", emp_state="ka") + into table exim_employee_n1 partition (emp_country="us", emp_state="ka") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=us/emp_state=ka +POSTHOOK: Output: default@exim_employee_n1 +POSTHOOK: Output: default@exim_employee_n1@emp_country=us/emp_state=ka #### A masked pattern was here #### -PREHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n1 to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -PREHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: default@exim_employee_n1@emp_country=in/emp_state=ka +PREHOOK: Input: default@exim_employee_n1@emp_country=in/emp_state=tn +PREHOOK: Input: default@exim_employee_n1@emp_country=us/emp_state=ka +PREHOOK: Input: default@exim_employee_n1@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n1 to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=ka -POSTHOOK: Input: default@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: default@exim_employee_n1@emp_country=in/emp_state=ka +POSTHOOK: Input: default@exim_employee_n1@emp_country=in/emp_state=tn +POSTHOOK: Input: default@exim_employee_n1@emp_country=us/emp_state=ka +POSTHOOK: Input: default@exim_employee_n1@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n1 +PREHOOK: Output: default@exim_employee_n1 +POSTHOOK: query: drop table exim_employee_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n1 +POSTHOOK: Output: default@exim_employee_n1 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -94,26 +94,26 @@ POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer #### A masked pattern was here #### -PREHOOK: query: import table exim_employee partition (emp_country="us", emp_state="tn") +PREHOOK: query: import table exim_employee_n1 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee' PREHOOK: type: IMPORT #### A masked pattern was here #### PREHOOK: Output: database:importer -POSTHOOK: query: import table exim_employee partition (emp_country="us", emp_state="tn") +POSTHOOK: query: import table exim_employee_n1 partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee' location 'ql/test/data/tablestore/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=us/emp_state=tn -PREHOOK: query: describe extended exim_employee +POSTHOOK: Output: importer@exim_employee_n1 +POSTHOOK: Output: importer@exim_employee_n1@emp_country=us/emp_state=tn +PREHOOK: query: describe extended exim_employee_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe extended exim_employee +PREHOOK: Input: importer@exim_employee_n1 +POSTHOOK: query: describe extended exim_employee_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n1 emp_id int employee id emp_country string two char iso code emp_state string free text @@ -124,11 +124,11 @@ emp_country string two char iso code emp_state string free text #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee +PREHOOK: query: show table extended like exim_employee_n1 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee +POSTHOOK: query: show table extended like exim_employee_n1 POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -141,11 +141,11 @@ maxFileSize:11 minFileSize:11 #### A masked pattern was here #### -PREHOOK: query: show table extended like exim_employee partition (emp_country="us", emp_state="tn") +PREHOOK: query: show table extended like exim_employee_n1 partition (emp_country="us", emp_state="tn") PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like exim_employee partition (emp_country="us", emp_state="tn") +POSTHOOK: query: show table extended like exim_employee_n1 partition (emp_country="us", emp_state="tn") POSTHOOK: type: SHOW_TABLESTATUS -tableName:exim_employee +tableName:exim_employee_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -159,15 +159,15 @@ minFileSize:11 #### A masked pattern was here #### #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n1 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n1 +PREHOOK: Input: importer@exim_employee_n1@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n1 +POSTHOOK: Input: importer@exim_employee_n1@emp_country=us/emp_state=tn #### A masked pattern was here #### 1 us tn 2 us tn @@ -176,24 +176,24 @@ POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn 5 us tn 6 us tn #### A masked pattern was here #### -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n1 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +PREHOOK: Input: importer@exim_employee_n1 +PREHOOK: Input: importer@exim_employee_n1@emp_country=us/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=us/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n1 +POSTHOOK: Input: importer@exim_employee_n1@emp_country=us/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n1 +PREHOOK: Output: importer@exim_employee_n1 +POSTHOOK: query: drop table exim_employee_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n1 +POSTHOOK: Output: importer@exim_employee_n1 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_21_export_authsuccess.q.out b/ql/src/test/results/clientpositive/exim_21_export_authsuccess.q.out index 56addeeebb..67cf53cffc 100644 --- a/ql/src/test/results/clientpositive/exim_21_export_authsuccess.q.out +++ b/ql/src/test/results/clientpositive/exim_21_export_authsuccess.q.out @@ -1,39 +1,39 @@ -PREHOOK: query: create table exim_department ( dep_id int) stored as textfile +PREHOOK: query: create table exim_department_n3 ( dep_id int) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_department -POSTHOOK: query: create table exim_department ( dep_id int) stored as textfile +PREHOOK: Output: default@exim_department_n3 +POSTHOOK: query: create table exim_department_n3 ( dep_id int) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_department -PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +POSTHOOK: Output: default@exim_department_n3 +PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_department -POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +PREHOOK: Output: default@exim_department_n3 +POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_department -PREHOOK: query: grant Select on table exim_department to user hive_test_user +POSTHOOK: Output: default@exim_department_n3 +PREHOOK: query: grant Select on table exim_department_n3 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@exim_department -POSTHOOK: query: grant Select on table exim_department to user hive_test_user +PREHOOK: Output: default@exim_department_n3 +POSTHOOK: query: grant Select on table exim_department_n3 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@exim_department +POSTHOOK: Output: default@exim_department_n3 #### A masked pattern was here #### -PREHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +PREHOOK: query: export table exim_department_n3 to 'ql/test/data/exports/exim_department' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_department +PREHOOK: Input: default@exim_department_n3 #### A masked pattern was here #### -POSTHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +POSTHOOK: query: export table exim_department_n3 to 'ql/test/data/exports/exim_department' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_department +POSTHOOK: Input: default@exim_department_n3 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_department -PREHOOK: Output: default@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: default@exim_department_n3 +PREHOOK: Output: default@exim_department_n3 +POSTHOOK: query: drop table exim_department_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_department -POSTHOOK: Output: default@exim_department +POSTHOOK: Input: default@exim_department_n3 +POSTHOOK: Output: default@exim_department_n3 diff --git a/ql/src/test/results/clientpositive/exim_22_import_exist_authsuccess.q.out b/ql/src/test/results/clientpositive/exim_22_import_exist_authsuccess.q.out index 97c3b28187..543a3c9bd8 100644 --- a/ql/src/test/results/clientpositive/exim_22_import_exist_authsuccess.q.out +++ b/ql/src/test/results/clientpositive/exim_22_import_exist_authsuccess.q.out @@ -1,36 +1,36 @@ -PREHOOK: query: create table exim_department ( dep_id int) stored as textfile +PREHOOK: query: create table exim_department_n1 ( dep_id int) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_department -POSTHOOK: query: create table exim_department ( dep_id int) stored as textfile +PREHOOK: Output: default@exim_department_n1 +POSTHOOK: query: create table exim_department_n1 ( dep_id int) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_department -PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +POSTHOOK: Output: default@exim_department_n1 +PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_department -POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +PREHOOK: Output: default@exim_department_n1 +POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_department +POSTHOOK: Output: default@exim_department_n1 #### A masked pattern was here #### -PREHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +PREHOOK: query: export table exim_department_n1 to 'ql/test/data/exports/exim_department' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_department +PREHOOK: Input: default@exim_department_n1 #### A masked pattern was here #### -POSTHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +POSTHOOK: query: export table exim_department_n1 to 'ql/test/data/exports/exim_department' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_department +POSTHOOK: Input: default@exim_department_n1 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_department -PREHOOK: Output: default@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: default@exim_department_n1 +PREHOOK: Output: default@exim_department_n1 +POSTHOOK: query: drop table exim_department_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_department -POSTHOOK: Output: default@exim_department +POSTHOOK: Input: default@exim_department_n1 +POSTHOOK: Output: default@exim_department_n1 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -43,41 +43,41 @@ PREHOOK: Input: database:importer POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer -PREHOOK: query: create table exim_department ( dep_id int) stored as textfile +PREHOOK: query: create table exim_department_n1 ( dep_id int) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:importer -PREHOOK: Output: importer@exim_department -POSTHOOK: query: create table exim_department ( dep_id int) stored as textfile +PREHOOK: Output: importer@exim_department_n1 +POSTHOOK: query: create table exim_department_n1 ( dep_id int) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_department -PREHOOK: query: grant Alter on table exim_department to user hive_test_user +POSTHOOK: Output: importer@exim_department_n1 +PREHOOK: query: grant Alter on table exim_department_n1 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: importer@exim_department -POSTHOOK: query: grant Alter on table exim_department to user hive_test_user +PREHOOK: Output: importer@exim_department_n1 +POSTHOOK: query: grant Alter on table exim_department_n1 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: importer@exim_department -PREHOOK: query: grant Update on table exim_department to user hive_test_user +POSTHOOK: Output: importer@exim_department_n1 +PREHOOK: query: grant Update on table exim_department_n1 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: importer@exim_department -POSTHOOK: query: grant Update on table exim_department to user hive_test_user +PREHOOK: Output: importer@exim_department_n1 +POSTHOOK: query: grant Update on table exim_department_n1 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: importer@exim_department +POSTHOOK: Output: importer@exim_department_n1 PREHOOK: query: import from 'ql/test/data/exports/exim_department' PREHOOK: type: IMPORT #### A masked pattern was here #### -PREHOOK: Output: importer@exim_department +PREHOOK: Output: importer@exim_department_n1 POSTHOOK: query: import from 'ql/test/data/exports/exim_department' POSTHOOK: type: IMPORT #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_department -PREHOOK: query: select * from exim_department +POSTHOOK: Output: importer@exim_department_n1 +PREHOOK: query: select * from exim_department_n1 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_department +PREHOOK: Input: importer@exim_department_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from exim_department +POSTHOOK: query: select * from exim_department_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n1 #### A masked pattern was here #### 1 2 @@ -85,14 +85,14 @@ POSTHOOK: Input: importer@exim_department 4 5 6 -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_department -PREHOOK: Output: importer@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: importer@exim_department_n1 +PREHOOK: Output: importer@exim_department_n1 +POSTHOOK: query: drop table exim_department_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_department -POSTHOOK: Output: importer@exim_department +POSTHOOK: Input: importer@exim_department_n1 +POSTHOOK: Output: importer@exim_department_n1 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_23_import_part_authsuccess.q.out b/ql/src/test/results/clientpositive/exim_23_import_part_authsuccess.q.out index 5f78a76209..b3d14340ac 100644 --- a/ql/src/test/results/clientpositive/exim_23_import_part_authsuccess.q.out +++ b/ql/src/test/results/clientpositive/exim_23_import_part_authsuccess.q.out @@ -1,47 +1,47 @@ -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n7 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: default@exim_employee_n7 +POSTHOOK: query: create table exim_employee_n7 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee +POSTHOOK: Output: default@exim_employee_n7 PREHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n7 partition (emp_country="in", emp_state="tn") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee +PREHOOK: Output: default@exim_employee_n7 POSTHOOK: query: load data local inpath "../../data/files/test.dat" - into table exim_employee partition (emp_country="in", emp_state="tn") + into table exim_employee_n7 partition (emp_country="in", emp_state="tn") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Output: default@exim_employee_n7 +POSTHOOK: Output: default@exim_employee_n7@emp_country=in/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +PREHOOK: query: export table exim_employee_n7 to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn +PREHOOK: Input: default@exim_employee_n7@emp_country=in/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n7 to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Input: default@exim_employee_n7@emp_country=in/emp_state=tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n7 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n7 +PREHOOK: Output: default@exim_employee_n7 +POSTHOOK: query: drop table exim_employee_n7 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n7 +POSTHOOK: Output: default@exim_employee_n7 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -54,52 +54,52 @@ PREHOOK: Input: database:importer POSTHOOK: query: use importer POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:importer -PREHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: query: create table exim_employee_n7 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") PREHOOK: type: CREATETABLE PREHOOK: Output: database:importer -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int comment "employee id") +PREHOOK: Output: importer@exim_employee_n7 +POSTHOOK: query: create table exim_employee_n7 ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile tblproperties("creator"="krishna") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee -PREHOOK: query: grant Alter on table exim_employee to user hive_test_user +POSTHOOK: Output: importer@exim_employee_n7 +PREHOOK: query: grant Alter on table exim_employee_n7 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: grant Alter on table exim_employee to user hive_test_user +PREHOOK: Output: importer@exim_employee_n7 +POSTHOOK: query: grant Alter on table exim_employee_n7 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: importer@exim_employee -PREHOOK: query: grant Update on table exim_employee to user hive_test_user +POSTHOOK: Output: importer@exim_employee_n7 +PREHOOK: query: grant Update on table exim_employee_n7 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: grant Update on table exim_employee to user hive_test_user +PREHOOK: Output: importer@exim_employee_n7 +POSTHOOK: query: grant Update on table exim_employee_n7 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Output: importer@exim_employee_n7 PREHOOK: query: import from 'ql/test/data/exports/exim_employee' PREHOOK: type: IMPORT #### A masked pattern was here #### -PREHOOK: Output: importer@exim_employee +PREHOOK: Output: importer@exim_employee_n7 POSTHOOK: query: import from 'ql/test/data/exports/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=in/emp_state=tn -PREHOOK: query: select * from exim_employee +POSTHOOK: Output: importer@exim_employee_n7 +POSTHOOK: Output: importer@exim_employee_n7@emp_country=in/emp_state=tn +PREHOOK: query: select * from exim_employee_n7 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn +PREHOOK: Input: importer@exim_employee_n7 +PREHOOK: Input: importer@exim_employee_n7@emp_country=in/emp_state=tn #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n7 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn +POSTHOOK: Input: importer@exim_employee_n7 +POSTHOOK: Input: importer@exim_employee_n7@emp_country=in/emp_state=tn #### A masked pattern was here #### 1 in tn 2 in tn @@ -108,14 +108,14 @@ POSTHOOK: Input: importer@exim_employee@emp_country=in/emp_state=tn 5 in tn 6 in tn #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n7 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n7 +PREHOOK: Output: importer@exim_employee_n7 +POSTHOOK: query: drop table exim_employee_n7 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n7 +POSTHOOK: Output: importer@exim_employee_n7 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_24_import_nonexist_authsuccess.q.out b/ql/src/test/results/clientpositive/exim_24_import_nonexist_authsuccess.q.out index 53ea960306..1d36f1971b 100644 --- a/ql/src/test/results/clientpositive/exim_24_import_nonexist_authsuccess.q.out +++ b/ql/src/test/results/clientpositive/exim_24_import_nonexist_authsuccess.q.out @@ -1,36 +1,36 @@ -PREHOOK: query: create table exim_department ( dep_id int) stored as textfile +PREHOOK: query: create table exim_department_n6 ( dep_id int) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_department -POSTHOOK: query: create table exim_department ( dep_id int) stored as textfile +PREHOOK: Output: default@exim_department_n6 +POSTHOOK: query: create table exim_department_n6 ( dep_id int) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_department -PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +POSTHOOK: Output: default@exim_department_n6 +PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n6 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_department -POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department +PREHOOK: Output: default@exim_department_n6 +POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_department_n6 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_department +POSTHOOK: Output: default@exim_department_n6 #### A masked pattern was here #### -PREHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +PREHOOK: query: export table exim_department_n6 to 'ql/test/data/exports/exim_department' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_department +PREHOOK: Input: default@exim_department_n6 #### A masked pattern was here #### -POSTHOOK: query: export table exim_department to 'ql/test/data/exports/exim_department' +POSTHOOK: query: export table exim_department_n6 to 'ql/test/data/exports/exim_department' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_department +POSTHOOK: Input: default@exim_department_n6 #### A masked pattern was here #### -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n6 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_department -PREHOOK: Output: default@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: default@exim_department_n6 +PREHOOK: Output: default@exim_department_n6 +POSTHOOK: query: drop table exim_department_n6 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_department -POSTHOOK: Output: default@exim_department +POSTHOOK: Input: default@exim_department_n6 +POSTHOOK: Output: default@exim_department_n6 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -55,14 +55,14 @@ POSTHOOK: query: import from 'ql/test/data/exports/exim_department' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_department -PREHOOK: query: select * from exim_department +POSTHOOK: Output: importer@exim_department_n6 +PREHOOK: query: select * from exim_department_n6 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_department +PREHOOK: Input: importer@exim_department_n6 #### A masked pattern was here #### -POSTHOOK: query: select * from exim_department +POSTHOOK: query: select * from exim_department_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_department +POSTHOOK: Input: importer@exim_department_n6 #### A masked pattern was here #### 1 2 @@ -70,14 +70,14 @@ POSTHOOK: Input: importer@exim_department 4 5 6 -PREHOOK: query: drop table exim_department +PREHOOK: query: drop table exim_department_n6 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_department -PREHOOK: Output: importer@exim_department -POSTHOOK: query: drop table exim_department +PREHOOK: Input: importer@exim_department_n6 +PREHOOK: Output: importer@exim_department_n6 +POSTHOOK: query: drop table exim_department_n6 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_department -POSTHOOK: Output: importer@exim_department +POSTHOOK: Input: importer@exim_department_n6 +POSTHOOK: Output: importer@exim_department_n6 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/exim_hidden_files.q.out b/ql/src/test/results/clientpositive/exim_hidden_files.q.out index 9d92cc7ff2..659b36d6de 100644 --- a/ql/src/test/results/clientpositive/exim_hidden_files.q.out +++ b/ql/src/test/results/clientpositive/exim_hidden_files.q.out @@ -1,36 +1,36 @@ -PREHOOK: query: create table exim_employee ( emp_id int) partitioned by (emp_country string) +PREHOOK: query: create table exim_employee_n6 ( emp_id int) partitioned by (emp_country string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@exim_employee -POSTHOOK: query: create table exim_employee ( emp_id int) partitioned by (emp_country string) +PREHOOK: Output: default@exim_employee_n6 +POSTHOOK: query: create table exim_employee_n6 ( emp_id int) partitioned by (emp_country string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@exim_employee -PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_employee partition (emp_country="in") +POSTHOOK: Output: default@exim_employee_n6 +PREHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_employee_n6 partition (emp_country="in") PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@exim_employee -POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_employee partition (emp_country="in") +PREHOOK: Output: default@exim_employee_n6 +POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table exim_employee_n6 partition (emp_country="in") POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@exim_employee -POSTHOOK: Output: default@exim_employee@emp_country=in -PREHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: Output: default@exim_employee_n6 +POSTHOOK: Output: default@exim_employee_n6@emp_country=in +PREHOOK: query: export table exim_employee_n6 to 'ql/test/data/exports/exim_employee' PREHOOK: type: EXPORT -PREHOOK: Input: default@exim_employee@emp_country=in +PREHOOK: Input: default@exim_employee_n6@emp_country=in #### A masked pattern was here #### -POSTHOOK: query: export table exim_employee to 'ql/test/data/exports/exim_employee' +POSTHOOK: query: export table exim_employee_n6 to 'ql/test/data/exports/exim_employee' POSTHOOK: type: EXPORT -POSTHOOK: Input: default@exim_employee@emp_country=in +POSTHOOK: Input: default@exim_employee_n6@emp_country=in #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n6 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@exim_employee -PREHOOK: Output: default@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: default@exim_employee_n6 +PREHOOK: Output: default@exim_employee_n6 +POSTHOOK: query: drop table exim_employee_n6 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@exim_employee -POSTHOOK: Output: default@exim_employee +POSTHOOK: Input: default@exim_employee_n6 +POSTHOOK: Output: default@exim_employee_n6 PREHOOK: query: create database importer PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:importer @@ -51,14 +51,14 @@ POSTHOOK: query: import from 'ql/test/data/exports/exim_employee' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: database:importer -POSTHOOK: Output: importer@exim_employee -POSTHOOK: Output: importer@exim_employee@emp_country=in -PREHOOK: query: describe formatted exim_employee +POSTHOOK: Output: importer@exim_employee_n6 +POSTHOOK: Output: importer@exim_employee_n6@emp_country=in +PREHOOK: query: describe formatted exim_employee_n6 PREHOOK: type: DESCTABLE -PREHOOK: Input: importer@exim_employee -POSTHOOK: query: describe formatted exim_employee +PREHOOK: Input: importer@exim_employee_n6 +POSTHOOK: query: describe formatted exim_employee_n6 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n6 # col_name data_type comment emp_id int @@ -91,15 +91,15 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from exim_employee +PREHOOK: query: select * from exim_employee_n6 PREHOOK: type: QUERY -PREHOOK: Input: importer@exim_employee -PREHOOK: Input: importer@exim_employee@emp_country=in +PREHOOK: Input: importer@exim_employee_n6 +PREHOOK: Input: importer@exim_employee_n6@emp_country=in #### A masked pattern was here #### -POSTHOOK: query: select * from exim_employee +POSTHOOK: query: select * from exim_employee_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Input: importer@exim_employee@emp_country=in +POSTHOOK: Input: importer@exim_employee_n6 +POSTHOOK: Input: importer@exim_employee_n6@emp_country=in #### A masked pattern was here #### 1 in 2 in @@ -108,14 +108,14 @@ POSTHOOK: Input: importer@exim_employee@emp_country=in 5 in 6 in #### A masked pattern was here #### -PREHOOK: query: drop table exim_employee +PREHOOK: query: drop table exim_employee_n6 PREHOOK: type: DROPTABLE -PREHOOK: Input: importer@exim_employee -PREHOOK: Output: importer@exim_employee -POSTHOOK: query: drop table exim_employee +PREHOOK: Input: importer@exim_employee_n6 +PREHOOK: Output: importer@exim_employee_n6 +POSTHOOK: query: drop table exim_employee_n6 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: importer@exim_employee -POSTHOOK: Output: importer@exim_employee +POSTHOOK: Input: importer@exim_employee_n6 +POSTHOOK: Output: importer@exim_employee_n6 PREHOOK: query: drop database importer PREHOOK: type: DROPDATABASE PREHOOK: Input: database:importer diff --git a/ql/src/test/results/clientpositive/explain_dependency.q.out b/ql/src/test/results/clientpositive/explain_dependency.q.out index 070da56b67..4d6b2cc338 100644 --- a/ql/src/test/results/clientpositive/explain_dependency.q.out +++ b/ql/src/test/results/clientpositive/explain_dependency.q.out @@ -1,15 +1,15 @@ -PREHOOK: query: CREATE VIEW V1 AS SELECT key, value from src +PREHOOK: query: CREATE VIEW V1_n2 AS SELECT key, value from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@V1 -POSTHOOK: query: CREATE VIEW V1 AS SELECT key, value from src +PREHOOK: Output: default@V1_n2 +POSTHOOK: query: CREATE VIEW V1_n2 AS SELECT key, value from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@V1 -POSTHOOK: Lineage: V1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: V1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@V1_n2 +POSTHOOK: Lineage: V1_n2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: V1_n2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: CREATE VIEW V2 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart @@ -45,21 +45,21 @@ POSTHOOK: Lineage: V3.key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:st POSTHOOK: Lineage: V3.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: CREATE VIEW V4 AS SELECT src1.key, src2.value as value1, src3.value as value2 - FROM V1 src1 JOIN V2 src2 on src1.key = src2.key JOIN src src3 ON src2.key = src3.key + FROM V1_n2 src1 JOIN V2 src2 on src1.key = src2.key JOIN src src3 ON src2.key = src3.key PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Input: default@srcpart -PREHOOK: Input: default@v1 +PREHOOK: Input: default@v1_n2 PREHOOK: Input: default@v2 PREHOOK: Output: database:default PREHOOK: Output: default@V4 POSTHOOK: query: CREATE VIEW V4 AS SELECT src1.key, src2.value as value1, src3.value as value2 - FROM V1 src1 JOIN V2 src2 on src1.key = src2.key JOIN src src3 ON src2.key = src3.key + FROM V1_n2 src1 JOIN V2 src2 on src1.key = src2.key JOIN src src3 ON src2.key = src3.key POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v1_n2 POSTHOOK: Input: default@v2 POSTHOOK: Output: database:default POSTHOOK: Output: default@V4 @@ -98,11 +98,11 @@ POSTHOOK: query: EXPLAIN DEPENDENCY SELECT S1.key, S2.value FROM src S1 JOIN srcpart S2 ON S1.key = S2.key WHERE ds IS NOT NULL POSTHOOK: type: QUERY {"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]} -PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V1 +PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V1_n2 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V1 +POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V1_n2 POSTHOOK: type: QUERY -{"input_tables":[{"tablename":"default@v1","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v1]"}],"input_partitions":[]} +{"input_tables":[{"tablename":"default@v1_n2","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v1_n2]"}],"input_partitions":[]} PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V2 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V2 @@ -117,7 +117,7 @@ PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V4 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V4 POSTHOOK: type: QUERY -{"input_tables":[{"tablename":"default@v4","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@v1","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@v2","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v4, default@v1]"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v2]"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]} +{"input_tables":[{"tablename":"default@v4","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@v1_n2","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@v2","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v4, default@v1_n2]"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v2]"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]} PREHOOK: query: CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart diff --git a/ql/src/test/results/clientpositive/explain_logical.q.out b/ql/src/test/results/clientpositive/explain_logical.q.out index 4024f200af..244c31ae02 100644 --- a/ql/src/test/results/clientpositive/explain_logical.q.out +++ b/ql/src/test/results/clientpositive/explain_logical.q.out @@ -1,71 +1,71 @@ -PREHOOK: query: CREATE VIEW V1 AS SELECT key, value from src +PREHOOK: query: CREATE VIEW V1_n3 AS SELECT key, value from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@V1 -POSTHOOK: query: CREATE VIEW V1 AS SELECT key, value from src +PREHOOK: Output: default@V1_n3 +POSTHOOK: query: CREATE VIEW V1_n3 AS SELECT key, value from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@V1 -POSTHOOK: Lineage: V1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: V1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE VIEW V2 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL +POSTHOOK: Output: default@V1_n3 +POSTHOOK: Lineage: V1_n3.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: V1_n3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: CREATE VIEW V2_n0 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart PREHOOK: Output: database:default -PREHOOK: Output: default@V2 -POSTHOOK: query: CREATE VIEW V2 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL +PREHOOK: Output: default@V2_n0 +POSTHOOK: query: CREATE VIEW V2_n0 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart POSTHOOK: Output: database:default -POSTHOOK: Output: default@V2 -POSTHOOK: Lineage: V2.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ] -POSTHOOK: Lineage: V2.key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: V2.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE VIEW V3 AS - SELECT src1.key, src2.value FROM V2 src1 +POSTHOOK: Output: default@V2_n0 +POSTHOOK: Lineage: V2_n0.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: V2_n0.key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: V2_n0.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: CREATE VIEW V3_n0 AS + SELECT src1.key, src2.value FROM V2_n0 src1 JOIN src src2 ON src1.key = src2.key WHERE src1.ds IS NOT NULL PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Input: default@srcpart -PREHOOK: Input: default@v2 +PREHOOK: Input: default@v2_n0 PREHOOK: Output: database:default -PREHOOK: Output: default@V3 -POSTHOOK: query: CREATE VIEW V3 AS - SELECT src1.key, src2.value FROM V2 src1 +PREHOOK: Output: default@V3_n0 +POSTHOOK: query: CREATE VIEW V3_n0 AS + SELECT src1.key, src2.value FROM V2_n0 src1 JOIN src src2 ON src1.key = src2.key WHERE src1.ds IS NOT NULL POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@v2 +POSTHOOK: Input: default@v2_n0 POSTHOOK: Output: database:default -POSTHOOK: Output: default@V3 -POSTHOOK: Lineage: V3.key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: V3.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE VIEW V4 AS +POSTHOOK: Output: default@V3_n0 +POSTHOOK: Lineage: V3_n0.key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: V3_n0.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: CREATE VIEW V4_n0 AS SELECT src1.key, src2.value as value1, src3.value as value2 - FROM V1 src1 JOIN V2 src2 on src1.key = src2.key JOIN src src3 ON src2.key = src3.key + FROM V1_n3 src1 JOIN V2_n0 src2 on src1.key = src2.key JOIN src src3 ON src2.key = src3.key PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Input: default@srcpart -PREHOOK: Input: default@v1 -PREHOOK: Input: default@v2 +PREHOOK: Input: default@v1_n3 +PREHOOK: Input: default@v2_n0 PREHOOK: Output: database:default -PREHOOK: Output: default@V4 -POSTHOOK: query: CREATE VIEW V4 AS +PREHOOK: Output: default@V4_n0 +POSTHOOK: query: CREATE VIEW V4_n0 AS SELECT src1.key, src2.value as value1, src3.value as value2 - FROM V1 src1 JOIN V2 src2 on src1.key = src2.key JOIN src src3 ON src2.key = src3.key + FROM V1_n3 src1 JOIN V2_n0 src2 on src1.key = src2.key JOIN src src3 ON src2.key = src3.key POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@v1 -POSTHOOK: Input: default@v2 +POSTHOOK: Input: default@v1_n3 +POSTHOOK: Input: default@v2_n0 POSTHOOK: Output: database:default -POSTHOOK: Output: default@V4 -POSTHOOK: Lineage: V4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: V4.value1 SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: V4.value2 SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@V4_n0 +POSTHOOK: Lineage: V4_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: V4_n0.value1 SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: V4_n0.value2 SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN LOGICAL SELECT key, count(1) FROM srcpart WHERE ds IS NOT NULL GROUP BY key PREHOOK: type: QUERY @@ -255,9 +255,9 @@ $hdt$_1:s2 outputColumnNames: _col0, _col2 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE -PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V1 +PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V1_n3 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN LOGICAL SELECT * FROM V1 +POSTHOOK: query: EXPLAIN LOGICAL SELECT * FROM V1_n3 POSTHOOK: type: QUERY LOGICAL PLAN: src @@ -272,9 +272,9 @@ src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE ListSink (LIST_SINK_3) -PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V2 +PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V2_n0 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN LOGICAL SELECT * FROM V2 +POSTHOOK: query: EXPLAIN LOGICAL SELECT * FROM V2_n0 POSTHOOK: type: QUERY LOGICAL PLAN: srcpart @@ -289,9 +289,9 @@ srcpart Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE ListSink (LIST_SINK_5) -PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V3 +PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V3_n0 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN LOGICAL SELECT * FROM V3 +POSTHOOK: query: EXPLAIN LOGICAL SELECT * FROM V3_n0 POSTHOOK: type: QUERY LOGICAL PLAN: $hdt$_0:srcpart @@ -359,9 +359,9 @@ $hdt$_1:src2 outputColumnNames: _col0, _col2 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE -PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V4 +PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V4_n0 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN LOGICAL SELECT * FROM V4 +POSTHOOK: query: EXPLAIN LOGICAL SELECT * FROM V4_n0 POSTHOOK: type: QUERY LOGICAL PLAN: $hdt$_0:srcpart @@ -462,23 +462,23 @@ $hdt$_2:src3 outputColumnNames: _col1, _col2, _col4 Statistics: Num rows: 4400 Data size: 46745 Basic stats: COMPLETE Column stats: NONE -PREHOOK: query: CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' +PREHOOK: query: CREATE VIEW V5_n0 as SELECT * FROM srcpart where ds = '10' PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart PREHOOK: Output: database:default -PREHOOK: Output: default@V5 -POSTHOOK: query: CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' +PREHOOK: Output: default@V5_n0 +POSTHOOK: query: CREATE VIEW V5_n0 as SELECT * FROM srcpart where ds = '10' POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart POSTHOOK: Output: database:default -POSTHOOK: Output: default@V5 -POSTHOOK: Lineage: V5.ds SIMPLE [] -POSTHOOK: Lineage: V5.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] -POSTHOOK: Lineage: V5.key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: V5.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V5 +POSTHOOK: Output: default@V5_n0 +POSTHOOK: Lineage: V5_n0.ds SIMPLE [] +POSTHOOK: Lineage: V5_n0.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] +POSTHOOK: Lineage: V5_n0.key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: V5_n0.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V5_n0 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN LOGICAL SELECT * FROM V5 +POSTHOOK: query: EXPLAIN LOGICAL SELECT * FROM V5_n0 POSTHOOK: type: QUERY LOGICAL PLAN: srcpart diff --git a/ql/src/test/results/clientpositive/explain_outputs.q.out b/ql/src/test/results/clientpositive/explain_outputs.q.out index 35248d24f0..22ad737416 100644 --- a/ql/src/test/results/clientpositive/explain_outputs.q.out +++ b/ql/src/test/results/clientpositive/explain_outputs.q.out @@ -1,42 +1,42 @@ -PREHOOK: query: create table t1 (id int) +PREHOOK: query: create table t1_n8 (id int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (id int) +PREHOOK: Output: default@t1_n8 +POSTHOOK: query: create table t1_n8 (id int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2 (id int) +POSTHOOK: Output: default@t1_n8 +PREHOOK: query: create table t2_n4 (id int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2 (id int) +PREHOOK: Output: default@t2_n4 +POSTHOOK: query: create table t2_n4 (id int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: insert into t1 values (1),(10) +POSTHOOK: Output: default@t2_n4 +PREHOOK: query: insert into t1_n8 values (1),(10) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t1 -POSTHOOK: query: insert into t1 values (1),(10) +PREHOOK: Output: default@t1_n8 +POSTHOOK: query: insert into t1_n8 values (1),(10) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.id SCRIPT [] -PREHOOK: query: insert into t2 values (1),(2),(3),(4),(5) +POSTHOOK: Output: default@t1_n8 +POSTHOOK: Lineage: t1_n8.id SCRIPT [] +PREHOOK: query: insert into t2_n4 values (1),(2),(3),(4),(5) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t2 -POSTHOOK: query: insert into t2 values (1),(2),(3),(4),(5) +PREHOOK: Output: default@t2_n4 +POSTHOOK: query: insert into t2_n4 values (1),(2),(3),(4),(5) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.id SCRIPT [] +POSTHOOK: Output: default@t2_n4 +POSTHOOK: Lineage: t2_n4.id SCRIPT [] PREHOOK: query: explain -select sum(t1.id) from t1 join t2 on (t1.id=t2.id) +select sum(t1_n8.id) from t1_n8 join t2_n4 on (t1_n8.id=t2_n4.id) PREHOOK: type: QUERY POSTHOOK: query: explain -select sum(t1.id) from t1 join t2 on (t1.id=t2.id) +select sum(t1_n8.id) from t1_n8 join t2_n4 on (t1_n8.id=t2_n4.id) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -48,7 +48,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n8 Statistics: Num rows: 2 Data size: 3 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: id is not null (type: boolean) @@ -63,7 +63,7 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 2 Data size: 3 Basic stats: COMPLETE Column stats: NONE TableScan - alias: t2 + alias: t2_n4 Statistics: Num rows: 5 Data size: 5 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: id is not null (type: boolean) @@ -127,21 +127,21 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select sum(t1.id) from t1 join t2 on (t1.id=t2.id) +PREHOOK: query: select sum(t1_n8.id) from t1_n8 join t2_n4 on (t1_n8.id=t2_n4.id) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n8 +PREHOOK: Input: default@t2_n4 #### A masked pattern was here #### -POSTHOOK: query: select sum(t1.id) from t1 join t2 on (t1.id=t2.id) +POSTHOOK: query: select sum(t1_n8.id) from t1_n8 join t2_n4 on (t1_n8.id=t2_n4.id) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n8 +POSTHOOK: Input: default@t2_n4 #### A masked pattern was here #### PREHOOK: query: explain analyze -select sum(t1.id) from t1 join t2 on (t1.id=t2.id) +select sum(t1_n8.id) from t1_n8 join t2_n4 on (t1_n8.id=t2_n4.id) PREHOOK: type: QUERY POSTHOOK: query: explain analyze -select sum(t1.id) from t1 join t2 on (t1.id=t2.id) +select sum(t1_n8.id) from t1_n8 join t2_n4 on (t1_n8.id=t2_n4.id) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -153,7 +153,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n8 Statistics: Num rows: 2/2 Data size: 3 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: id is not null (type: boolean) @@ -168,7 +168,7 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 2/2 Data size: 3 Basic stats: COMPLETE Column stats: NONE TableScan - alias: t2 + alias: t2_n4 Statistics: Num rows: 5/5 Data size: 5 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: id is not null (type: boolean) @@ -233,22 +233,22 @@ STAGE PLANS: ListSink PREHOOK: query: explain reoptimization -select sum(t1.id) from t1 join t2 on (t1.id=t2.id) +select sum(t1_n8.id) from t1_n8 join t2_n4 on (t1_n8.id=t2_n4.id) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n8 +PREHOOK: Input: default@t2_n4 #### A masked pattern was here #### POSTHOOK: query: explain reoptimization -select sum(t1.id) from t1 join t2 on (t1.id=t2.id) +select sum(t1_n8.id) from t1_n8 join t2_n4 on (t1_n8.id=t2_n4.id) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n8 +POSTHOOK: Input: default@t2_n4 #### A masked pattern was here #### PREHOOK: query: explain reoptimization -select sum(t1.id) from t1 join t2 on (t1.id=t2.id) +select sum(t1_n8.id) from t1_n8 join t2_n4 on (t1_n8.id=t2_n4.id) PREHOOK: type: QUERY POSTHOOK: query: explain reoptimization -select sum(t1.id) from t1 join t2 on (t1.id=t2.id) +select sum(t1_n8.id) from t1_n8 join t2_n4 on (t1_n8.id=t2_n4.id) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -260,7 +260,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n8 Statistics: Num rows: 2 Data size: 3 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: id is not null (type: boolean) @@ -275,7 +275,7 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 2 Data size: 3 Basic stats: COMPLETE Column stats: NONE TableScan - alias: t2 + alias: t2_n4 Statistics: Num rows: 5 Data size: 5 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: id is not null (type: boolean) diff --git a/ql/src/test/results/clientpositive/explain_rearrange.q.out b/ql/src/test/results/clientpositive/explain_rearrange.q.out index 7d4d0b8b9c..be1b8ed0c6 100644 --- a/ql/src/test/results/clientpositive/explain_rearrange.q.out +++ b/ql/src/test/results/clientpositive/explain_rearrange.q.out @@ -1,32 +1,32 @@ -PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE tbl1_n9(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tbl1 -POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: Output: default@tbl1_n9 +POSTHOOK: query: CREATE TABLE tbl1_n9(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tbl1 -PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: Output: default@tbl1_n9 +PREHOOK: query: CREATE TABLE tbl2_n8(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tbl2 -POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: Output: default@tbl2_n8 +POSTHOOK: query: CREATE TABLE tbl2_n8(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tbl2 +POSTHOOK: Output: default@tbl2_n8 PREHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key @@ -37,14 +37,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key @@ -291,14 +291,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key @@ -309,14 +309,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key @@ -563,14 +563,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key @@ -581,14 +581,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key @@ -835,14 +835,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key @@ -853,14 +853,14 @@ select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq1 group by key ) src1 join ( select key, count(*) as cnt1 from ( - select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join tbl2_n8 b on a.key = b.key ) subq2 group by key ) src2 on src1.key = src2.key diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out index c8d994e741..f1cd05cb9b 100644 --- a/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out +++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out @@ -1,63 +1,63 @@ -PREHOOK: query: create table date_dim (d_date date) partitioned by (d_date_sk bigint) stored as orc +PREHOOK: query: create table date_dim_n1 (d_date date) partitioned by (d_date_sk bigint) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@date_dim -POSTHOOK: query: create table date_dim (d_date date) partitioned by (d_date_sk bigint) stored as orc +PREHOOK: Output: default@date_dim_n1 +POSTHOOK: query: create table date_dim_n1 (d_date date) partitioned by (d_date_sk bigint) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@date_dim -PREHOOK: query: insert into date_dim partition(d_date_sk=2416945) values('1905-04-09') +POSTHOOK: Output: default@date_dim_n1 +PREHOOK: query: insert into date_dim_n1 partition(d_date_sk=2416945) values('1905-04-09') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@date_dim@d_date_sk=2416945 -POSTHOOK: query: insert into date_dim partition(d_date_sk=2416945) values('1905-04-09') +PREHOOK: Output: default@date_dim_n1@d_date_sk=2416945 +POSTHOOK: query: insert into date_dim_n1 partition(d_date_sk=2416945) values('1905-04-09') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@date_dim@d_date_sk=2416945 -POSTHOOK: Lineage: date_dim PARTITION(d_date_sk=2416945).d_date SCRIPT [] -PREHOOK: query: insert into date_dim partition(d_date_sk=2416946) values('1905-04-10') +POSTHOOK: Output: default@date_dim_n1@d_date_sk=2416945 +POSTHOOK: Lineage: date_dim_n1 PARTITION(d_date_sk=2416945).d_date SCRIPT [] +PREHOOK: query: insert into date_dim_n1 partition(d_date_sk=2416946) values('1905-04-10') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@date_dim@d_date_sk=2416946 -POSTHOOK: query: insert into date_dim partition(d_date_sk=2416946) values('1905-04-10') +PREHOOK: Output: default@date_dim_n1@d_date_sk=2416946 +POSTHOOK: query: insert into date_dim_n1 partition(d_date_sk=2416946) values('1905-04-10') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@date_dim@d_date_sk=2416946 -POSTHOOK: Lineage: date_dim PARTITION(d_date_sk=2416946).d_date SCRIPT [] -PREHOOK: query: insert into date_dim partition(d_date_sk=2416947) values('1905-04-11') +POSTHOOK: Output: default@date_dim_n1@d_date_sk=2416946 +POSTHOOK: Lineage: date_dim_n1 PARTITION(d_date_sk=2416946).d_date SCRIPT [] +PREHOOK: query: insert into date_dim_n1 partition(d_date_sk=2416947) values('1905-04-11') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@date_dim@d_date_sk=2416947 -POSTHOOK: query: insert into date_dim partition(d_date_sk=2416947) values('1905-04-11') +PREHOOK: Output: default@date_dim_n1@d_date_sk=2416947 +POSTHOOK: query: insert into date_dim_n1 partition(d_date_sk=2416947) values('1905-04-11') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@date_dim@d_date_sk=2416947 -POSTHOOK: Lineage: date_dim PARTITION(d_date_sk=2416947).d_date SCRIPT [] -PREHOOK: query: analyze table date_dim partition(d_date_sk) compute statistics for columns +POSTHOOK: Output: default@date_dim_n1@d_date_sk=2416947 +POSTHOOK: Lineage: date_dim_n1 PARTITION(d_date_sk=2416947).d_date SCRIPT [] +PREHOOK: query: analyze table date_dim_n1 partition(d_date_sk) compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@date_dim -PREHOOK: Input: default@date_dim@d_date_sk=2416945 -PREHOOK: Input: default@date_dim@d_date_sk=2416946 -PREHOOK: Input: default@date_dim@d_date_sk=2416947 -PREHOOK: Output: default@date_dim -PREHOOK: Output: default@date_dim@d_date_sk=2416945 -PREHOOK: Output: default@date_dim@d_date_sk=2416946 -PREHOOK: Output: default@date_dim@d_date_sk=2416947 +PREHOOK: Input: default@date_dim_n1 +PREHOOK: Input: default@date_dim_n1@d_date_sk=2416945 +PREHOOK: Input: default@date_dim_n1@d_date_sk=2416946 +PREHOOK: Input: default@date_dim_n1@d_date_sk=2416947 +PREHOOK: Output: default@date_dim_n1 +PREHOOK: Output: default@date_dim_n1@d_date_sk=2416945 +PREHOOK: Output: default@date_dim_n1@d_date_sk=2416946 +PREHOOK: Output: default@date_dim_n1@d_date_sk=2416947 #### A masked pattern was here #### -POSTHOOK: query: analyze table date_dim partition(d_date_sk) compute statistics for columns +POSTHOOK: query: analyze table date_dim_n1 partition(d_date_sk) compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@date_dim -POSTHOOK: Input: default@date_dim@d_date_sk=2416945 -POSTHOOK: Input: default@date_dim@d_date_sk=2416946 -POSTHOOK: Input: default@date_dim@d_date_sk=2416947 -POSTHOOK: Output: default@date_dim -POSTHOOK: Output: default@date_dim@d_date_sk=2416945 -POSTHOOK: Output: default@date_dim@d_date_sk=2416946 -POSTHOOK: Output: default@date_dim@d_date_sk=2416947 +POSTHOOK: Input: default@date_dim_n1 +POSTHOOK: Input: default@date_dim_n1@d_date_sk=2416945 +POSTHOOK: Input: default@date_dim_n1@d_date_sk=2416946 +POSTHOOK: Input: default@date_dim_n1@d_date_sk=2416947 +POSTHOOK: Output: default@date_dim_n1 +POSTHOOK: Output: default@date_dim_n1@d_date_sk=2416945 +POSTHOOK: Output: default@date_dim_n1@d_date_sk=2416946 +POSTHOOK: Output: default@date_dim_n1@d_date_sk=2416947 #### A masked pattern was here #### -PREHOOK: query: explain select count(*) from date_dim where d_date > date "1900-01-02" and d_date_sk= 2416945 +PREHOOK: query: explain select count(*) from date_dim_n1 where d_date > date "1900-01-02" and d_date_sk= 2416945 PREHOOK: type: QUERY -POSTHOOK: query: explain select count(*) from date_dim where d_date > date "1900-01-02" and d_date_sk= 2416945 +POSTHOOK: query: explain select count(*) from date_dim_n1 where d_date > date "1900-01-02" and d_date_sk= 2416945 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -68,7 +68,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: date_dim + alias: date_dim_n1 Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (d_date > DATE'1900-01-02') (type: boolean) @@ -105,18 +105,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: insert into date_dim partition(d_date_sk=2416948) values('1905-04-12') +PREHOOK: query: insert into date_dim_n1 partition(d_date_sk=2416948) values('1905-04-12') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@date_dim@d_date_sk=2416948 -POSTHOOK: query: insert into date_dim partition(d_date_sk=2416948) values('1905-04-12') +PREHOOK: Output: default@date_dim_n1@d_date_sk=2416948 +POSTHOOK: query: insert into date_dim_n1 partition(d_date_sk=2416948) values('1905-04-12') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@date_dim@d_date_sk=2416948 -POSTHOOK: Lineage: date_dim PARTITION(d_date_sk=2416948).d_date SCRIPT [] -PREHOOK: query: explain extended select d_date from date_dim +POSTHOOK: Output: default@date_dim_n1@d_date_sk=2416948 +POSTHOOK: Lineage: date_dim_n1 PARTITION(d_date_sk=2416948).d_date SCRIPT [] +PREHOOK: query: explain extended select d_date from date_dim_n1 PREHOOK: type: QUERY -POSTHOOK: query: explain extended select d_date from date_dim +POSTHOOK: query: explain extended select d_date from date_dim_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -139,13 +139,13 @@ STAGE PLANS: columns.comments columns.types date #### A masked pattern was here #### - name default.date_dim + name default.date_dim_n1 numFiles 1 numRows 1 partition_columns d_date_sk partition_columns.types bigint rawDataSize 56 - serialization.ddl struct date_dim { date d_date} + serialization.ddl struct date_dim_n1 { date d_date} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 193 @@ -162,16 +162,16 @@ STAGE PLANS: columns.comments columns.types date #### A masked pattern was here #### - name default.date_dim + name default.date_dim_n1 partition_columns d_date_sk partition_columns.types bigint - serialization.ddl struct date_dim { date d_date} + serialization.ddl struct date_dim_n1 { date d_date} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.date_dim - name: default.date_dim + name: default.date_dim_n1 + name: default.date_dim_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -185,13 +185,13 @@ STAGE PLANS: columns.comments columns.types date #### A masked pattern was here #### - name default.date_dim + name default.date_dim_n1 numFiles 1 numRows 1 partition_columns d_date_sk partition_columns.types bigint rawDataSize 56 - serialization.ddl struct date_dim { date d_date} + serialization.ddl struct date_dim_n1 { date d_date} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 193 @@ -208,16 +208,16 @@ STAGE PLANS: columns.comments columns.types date #### A masked pattern was here #### - name default.date_dim + name default.date_dim_n1 partition_columns d_date_sk partition_columns.types bigint - serialization.ddl struct date_dim { date d_date} + serialization.ddl struct date_dim_n1 { date d_date} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.date_dim - name: default.date_dim + name: default.date_dim_n1 + name: default.date_dim_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -231,13 +231,13 @@ STAGE PLANS: columns.comments columns.types date #### A masked pattern was here #### - name default.date_dim + name default.date_dim_n1 numFiles 1 numRows 1 partition_columns d_date_sk partition_columns.types bigint rawDataSize 56 - serialization.ddl struct date_dim { date d_date} + serialization.ddl struct date_dim_n1 { date d_date} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 193 @@ -254,16 +254,16 @@ STAGE PLANS: columns.comments columns.types date #### A masked pattern was here #### - name default.date_dim + name default.date_dim_n1 partition_columns d_date_sk partition_columns.types bigint - serialization.ddl struct date_dim { date d_date} + serialization.ddl struct date_dim_n1 { date d_date} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.date_dim - name: default.date_dim + name: default.date_dim_n1 + name: default.date_dim_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -277,13 +277,13 @@ STAGE PLANS: columns.comments columns.types date #### A masked pattern was here #### - name default.date_dim + name default.date_dim_n1 numFiles 1 numRows 1 partition_columns d_date_sk partition_columns.types bigint rawDataSize 56 - serialization.ddl struct date_dim { date d_date} + serialization.ddl struct date_dim_n1 { date d_date} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 193 @@ -300,19 +300,19 @@ STAGE PLANS: columns.comments columns.types date #### A masked pattern was here #### - name default.date_dim + name default.date_dim_n1 partition_columns d_date_sk partition_columns.types bigint - serialization.ddl struct date_dim { date d_date} + serialization.ddl struct date_dim_n1 { date d_date} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.date_dim - name: default.date_dim + name: default.date_dim_n1 + name: default.date_dim_n1 Processor Tree: TableScan - alias: date_dim + alias: date_dim_n1 Statistics: Num rows: 4 Data size: 224 Basic stats: COMPLETE Column stats: PARTIAL GatherStats: false Select Operator diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out index d6e1a7ed24..1a13b21e4e 100644 --- a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out +++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table if not exists ext_loc ( +PREHOOK: query: create table if not exists ext_loc_n1 ( state string, locid int, zip int, @@ -6,8 +6,8 @@ PREHOOK: query: create table if not exists ext_loc ( ) row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@ext_loc -POSTHOOK: query: create table if not exists ext_loc ( +PREHOOK: Output: default@ext_loc_n1 +POSTHOOK: query: create table if not exists ext_loc_n1 ( state string, locid int, zip int, @@ -15,88 +15,88 @@ POSTHOOK: query: create table if not exists ext_loc ( ) row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@ext_loc -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_partial.txt' OVERWRITE INTO TABLE ext_loc +POSTHOOK: Output: default@ext_loc_n1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_partial.txt' OVERWRITE INTO TABLE ext_loc_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@ext_loc -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_partial.txt' OVERWRITE INTO TABLE ext_loc +PREHOOK: Output: default@ext_loc_n1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_partial.txt' OVERWRITE INTO TABLE ext_loc_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@ext_loc -PREHOOK: query: create table if not exists loc_orc_1d ( +POSTHOOK: Output: default@ext_loc_n1 +PREHOOK: query: create table if not exists loc_orc_1d_n1 ( state string, locid int, zip int ) partitioned by(year string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_orc_1d -POSTHOOK: query: create table if not exists loc_orc_1d ( +PREHOOK: Output: default@loc_orc_1d_n1 +POSTHOOK: query: create table if not exists loc_orc_1d_n1 ( state string, locid int, zip int ) partitioned by(year string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_orc_1d -PREHOOK: query: insert overwrite table loc_orc_1d partition(year) select * from ext_loc +POSTHOOK: Output: default@loc_orc_1d_n1 +PREHOOK: query: insert overwrite table loc_orc_1d_n1 partition(year) select * from ext_loc_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@ext_loc -PREHOOK: Output: default@loc_orc_1d -POSTHOOK: query: insert overwrite table loc_orc_1d partition(year) select * from ext_loc +PREHOOK: Input: default@ext_loc_n1 +PREHOOK: Output: default@loc_orc_1d_n1 +POSTHOOK: query: insert overwrite table loc_orc_1d_n1 partition(year) select * from ext_loc_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@ext_loc -POSTHOOK: Output: default@loc_orc_1d@year=2000 -POSTHOOK: Output: default@loc_orc_1d@year=2001 -POSTHOOK: Output: default@loc_orc_1d@year=2002 -POSTHOOK: Output: default@loc_orc_1d@year=2003 -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2002).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2002).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2002).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2003).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2003).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2003).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ] -PREHOOK: query: analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid +POSTHOOK: Input: default@ext_loc_n1 +POSTHOOK: Output: default@loc_orc_1d_n1@year=2000 +POSTHOOK: Output: default@loc_orc_1d_n1@year=2001 +POSTHOOK: Output: default@loc_orc_1d_n1@year=2002 +POSTHOOK: Output: default@loc_orc_1d_n1@year=2003 +POSTHOOK: Lineage: loc_orc_1d_n1 PARTITION(year=2000).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n1 PARTITION(year=2000).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n1 PARTITION(year=2000).zip SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:zip, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n1 PARTITION(year=2001).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n1 PARTITION(year=2001).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n1 PARTITION(year=2001).zip SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:zip, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n1 PARTITION(year=2002).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n1 PARTITION(year=2002).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n1 PARTITION(year=2002).zip SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:zip, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n1 PARTITION(year=2003).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n1 PARTITION(year=2003).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n1 PARTITION(year=2003).zip SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:zip, type:int, comment:null), ] +PREHOOK: query: analyze table loc_orc_1d_n1 partition(year='2001') compute statistics for columns state,locid PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc_1d -PREHOOK: Input: default@loc_orc_1d@year=2001 -PREHOOK: Output: default@loc_orc_1d -PREHOOK: Output: default@loc_orc_1d@year=2001 +PREHOOK: Input: default@loc_orc_1d_n1 +PREHOOK: Input: default@loc_orc_1d_n1@year=2001 +PREHOOK: Output: default@loc_orc_1d_n1 +PREHOOK: Output: default@loc_orc_1d_n1@year=2001 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid +POSTHOOK: query: analyze table loc_orc_1d_n1 partition(year='2001') compute statistics for columns state,locid POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc_1d -POSTHOOK: Input: default@loc_orc_1d@year=2001 -POSTHOOK: Output: default@loc_orc_1d -POSTHOOK: Output: default@loc_orc_1d@year=2001 +POSTHOOK: Input: default@loc_orc_1d_n1 +POSTHOOK: Input: default@loc_orc_1d_n1@year=2001 +POSTHOOK: Output: default@loc_orc_1d_n1 +POSTHOOK: Output: default@loc_orc_1d_n1@year=2001 #### A masked pattern was here #### -PREHOOK: query: analyze table loc_orc_1d partition(year='2002') compute statistics for columns state,locid +PREHOOK: query: analyze table loc_orc_1d_n1 partition(year='2002') compute statistics for columns state,locid PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc_1d -PREHOOK: Input: default@loc_orc_1d@year=2002 -PREHOOK: Output: default@loc_orc_1d -PREHOOK: Output: default@loc_orc_1d@year=2002 +PREHOOK: Input: default@loc_orc_1d_n1 +PREHOOK: Input: default@loc_orc_1d_n1@year=2002 +PREHOOK: Output: default@loc_orc_1d_n1 +PREHOOK: Output: default@loc_orc_1d_n1@year=2002 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc_1d partition(year='2002') compute statistics for columns state,locid +POSTHOOK: query: analyze table loc_orc_1d_n1 partition(year='2002') compute statistics for columns state,locid POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc_1d -POSTHOOK: Input: default@loc_orc_1d@year=2002 -POSTHOOK: Output: default@loc_orc_1d -POSTHOOK: Output: default@loc_orc_1d@year=2002 +POSTHOOK: Input: default@loc_orc_1d_n1 +POSTHOOK: Input: default@loc_orc_1d_n1@year=2002 +POSTHOOK: Output: default@loc_orc_1d_n1 +POSTHOOK: Output: default@loc_orc_1d_n1@year=2002 #### A masked pattern was here #### -PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2001') state +PREHOOK: query: describe formatted loc_orc_1d_n1 PARTITION(year='2001') state PREHOOK: type: DESCTABLE -PREHOOK: Input: default@loc_orc_1d -POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2001') state +PREHOOK: Input: default@loc_orc_1d_n1 +POSTHOOK: query: describe formatted loc_orc_1d_n1 PARTITION(year='2001') state POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@loc_orc_1d +POSTHOOK: Input: default@loc_orc_1d_n1 col_name state data_type string min @@ -109,12 +109,12 @@ num_trues num_falses bitVector HL comment from deserializer -PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2002') state +PREHOOK: query: describe formatted loc_orc_1d_n1 PARTITION(year='2002') state PREHOOK: type: DESCTABLE -PREHOOK: Input: default@loc_orc_1d -POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2002') state +PREHOOK: Input: default@loc_orc_1d_n1 +POSTHOOK: query: describe formatted loc_orc_1d_n1 PARTITION(year='2002') state POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@loc_orc_1d +POSTHOOK: Input: default@loc_orc_1d_n1 col_name state data_type string min @@ -127,9 +127,9 @@ num_trues num_falses bitVector HL comment from deserializer -PREHOOK: query: explain extended select state from loc_orc_1d +PREHOOK: query: explain extended select state from loc_orc_1d_n1 PREHOOK: type: QUERY -POSTHOOK: query: explain extended select state from loc_orc_1d +POSTHOOK: query: explain extended select state from loc_orc_1d_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -152,13 +152,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 2 partition_columns year partition_columns.types string rawDataSize 184 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 369 @@ -175,16 +175,16 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -198,13 +198,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 4 partition_columns year partition_columns.types string rawDataSize 368 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 390 @@ -221,16 +221,16 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -244,13 +244,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 6 partition_columns year partition_columns.types string rawDataSize 570 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 410 @@ -267,16 +267,16 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -290,13 +290,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 8 partition_columns year partition_columns.types string rawDataSize 744 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 419 @@ -313,19 +313,19 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Processor Tree: TableScan - alias: loc_orc_1d + alias: loc_orc_1d_n1 Statistics: Num rows: 20 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE GatherStats: false Select Operator @@ -334,9 +334,9 @@ STAGE PLANS: Statistics: Num rows: 20 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain extended select state,locid from loc_orc_1d +PREHOOK: query: explain extended select state,locid from loc_orc_1d_n1 PREHOOK: type: QUERY -POSTHOOK: query: explain extended select state,locid from loc_orc_1d +POSTHOOK: query: explain extended select state,locid from loc_orc_1d_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -359,13 +359,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 2 partition_columns year partition_columns.types string rawDataSize 184 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 369 @@ -382,16 +382,16 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -405,13 +405,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 4 partition_columns year partition_columns.types string rawDataSize 368 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 390 @@ -428,16 +428,16 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -451,13 +451,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 6 partition_columns year partition_columns.types string rawDataSize 570 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 410 @@ -474,16 +474,16 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -497,13 +497,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 8 partition_columns year partition_columns.types string rawDataSize 744 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 419 @@ -520,19 +520,19 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Processor Tree: TableScan - alias: loc_orc_1d + alias: loc_orc_1d_n1 Statistics: Num rows: 20 Data size: 1820 Basic stats: COMPLETE Column stats: COMPLETE GatherStats: false Select Operator @@ -541,37 +541,37 @@ STAGE PLANS: Statistics: Num rows: 20 Data size: 1820 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: analyze table loc_orc_1d partition(year='2000') compute statistics for columns state +PREHOOK: query: analyze table loc_orc_1d_n1 partition(year='2000') compute statistics for columns state PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc_1d -PREHOOK: Input: default@loc_orc_1d@year=2000 -PREHOOK: Output: default@loc_orc_1d -PREHOOK: Output: default@loc_orc_1d@year=2000 +PREHOOK: Input: default@loc_orc_1d_n1 +PREHOOK: Input: default@loc_orc_1d_n1@year=2000 +PREHOOK: Output: default@loc_orc_1d_n1 +PREHOOK: Output: default@loc_orc_1d_n1@year=2000 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc_1d partition(year='2000') compute statistics for columns state +POSTHOOK: query: analyze table loc_orc_1d_n1 partition(year='2000') compute statistics for columns state POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc_1d -POSTHOOK: Input: default@loc_orc_1d@year=2000 -POSTHOOK: Output: default@loc_orc_1d -POSTHOOK: Output: default@loc_orc_1d@year=2000 +POSTHOOK: Input: default@loc_orc_1d_n1 +POSTHOOK: Input: default@loc_orc_1d_n1@year=2000 +POSTHOOK: Output: default@loc_orc_1d_n1 +POSTHOOK: Output: default@loc_orc_1d_n1@year=2000 #### A masked pattern was here #### -PREHOOK: query: analyze table loc_orc_1d partition(year='2003') compute statistics for columns state +PREHOOK: query: analyze table loc_orc_1d_n1 partition(year='2003') compute statistics for columns state PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc_1d -PREHOOK: Input: default@loc_orc_1d@year=2003 -PREHOOK: Output: default@loc_orc_1d -PREHOOK: Output: default@loc_orc_1d@year=2003 +PREHOOK: Input: default@loc_orc_1d_n1 +PREHOOK: Input: default@loc_orc_1d_n1@year=2003 +PREHOOK: Output: default@loc_orc_1d_n1 +PREHOOK: Output: default@loc_orc_1d_n1@year=2003 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc_1d partition(year='2003') compute statistics for columns state +POSTHOOK: query: analyze table loc_orc_1d_n1 partition(year='2003') compute statistics for columns state POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc_1d -POSTHOOK: Input: default@loc_orc_1d@year=2003 -POSTHOOK: Output: default@loc_orc_1d -POSTHOOK: Output: default@loc_orc_1d@year=2003 +POSTHOOK: Input: default@loc_orc_1d_n1 +POSTHOOK: Input: default@loc_orc_1d_n1@year=2003 +POSTHOOK: Output: default@loc_orc_1d_n1 +POSTHOOK: Output: default@loc_orc_1d_n1@year=2003 #### A masked pattern was here #### -PREHOOK: query: explain extended select state from loc_orc_1d +PREHOOK: query: explain extended select state from loc_orc_1d_n1 PREHOOK: type: QUERY -POSTHOOK: query: explain extended select state from loc_orc_1d +POSTHOOK: query: explain extended select state from loc_orc_1d_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -594,13 +594,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 2 partition_columns year partition_columns.types string rawDataSize 184 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 369 @@ -617,16 +617,16 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -640,13 +640,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 4 partition_columns year partition_columns.types string rawDataSize 368 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 390 @@ -663,16 +663,16 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -686,13 +686,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 6 partition_columns year partition_columns.types string rawDataSize 570 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 410 @@ -709,16 +709,16 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -732,13 +732,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 8 partition_columns year partition_columns.types string rawDataSize 744 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 419 @@ -755,19 +755,19 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Processor Tree: TableScan - alias: loc_orc_1d + alias: loc_orc_1d_n1 Statistics: Num rows: 20 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE GatherStats: false Select Operator @@ -776,9 +776,9 @@ STAGE PLANS: Statistics: Num rows: 20 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain extended select state,locid from loc_orc_1d +PREHOOK: query: explain extended select state,locid from loc_orc_1d_n1 PREHOOK: type: QUERY -POSTHOOK: query: explain extended select state,locid from loc_orc_1d +POSTHOOK: query: explain extended select state,locid from loc_orc_1d_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -801,13 +801,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 2 partition_columns year partition_columns.types string rawDataSize 184 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 369 @@ -824,16 +824,16 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -847,13 +847,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 4 partition_columns year partition_columns.types string rawDataSize 368 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 390 @@ -870,16 +870,16 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -893,13 +893,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 6 partition_columns year partition_columns.types string rawDataSize 570 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 410 @@ -916,16 +916,16 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -939,13 +939,13 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 numFiles 1 numRows 8 partition_columns year partition_columns.types string rawDataSize 744 - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 419 @@ -962,19 +962,19 @@ STAGE PLANS: columns.comments columns.types string:int:int #### A masked pattern was here #### - name default.loc_orc_1d + name default.loc_orc_1d_n1 partition_columns year partition_columns.types string - serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.ddl struct loc_orc_1d_n1 { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_1d - name: default.loc_orc_1d + name: default.loc_orc_1d_n1 + name: default.loc_orc_1d_n1 Processor Tree: TableScan - alias: loc_orc_1d + alias: loc_orc_1d_n1 Statistics: Num rows: 20 Data size: 1820 Basic stats: COMPLETE Column stats: COMPLETE GatherStats: false Select Operator @@ -983,91 +983,91 @@ STAGE PLANS: Statistics: Num rows: 20 Data size: 1820 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: create table if not exists loc_orc_2d ( +PREHOOK: query: create table if not exists loc_orc_2d_n1 ( state string, locid int ) partitioned by(zip int, year string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_orc_2d -POSTHOOK: query: create table if not exists loc_orc_2d ( +PREHOOK: Output: default@loc_orc_2d_n1 +POSTHOOK: query: create table if not exists loc_orc_2d_n1 ( state string, locid int ) partitioned by(zip int, year string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_orc_2d -PREHOOK: query: insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc +POSTHOOK: Output: default@loc_orc_2d_n1 +PREHOOK: query: insert overwrite table loc_orc_2d_n1 partition(zip, year) select * from ext_loc_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@ext_loc -PREHOOK: Output: default@loc_orc_2d -POSTHOOK: query: insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc +PREHOOK: Input: default@ext_loc_n1 +PREHOOK: Output: default@loc_orc_2d_n1 +POSTHOOK: query: insert overwrite table loc_orc_2d_n1 partition(zip, year) select * from ext_loc_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@ext_loc -POSTHOOK: Output: default@loc_orc_2d@zip=43201/year=2001 -POSTHOOK: Output: default@loc_orc_2d@zip=43201/year=2002 -POSTHOOK: Output: default@loc_orc_2d@zip=43201/year=2003 -POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2000 -POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2001 -POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2002 -POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2003 -POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2000 -POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2001 -POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2002 -POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2003 -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2002).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2002).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2003).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2003).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2000).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2002).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2002).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2003).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2003).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2000).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2002).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2002).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2003).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2003).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -PREHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid +POSTHOOK: Input: default@ext_loc_n1 +POSTHOOK: Output: default@loc_orc_2d_n1@zip=43201/year=2001 +POSTHOOK: Output: default@loc_orc_2d_n1@zip=43201/year=2002 +POSTHOOK: Output: default@loc_orc_2d_n1@zip=43201/year=2003 +POSTHOOK: Output: default@loc_orc_2d_n1@zip=94086/year=2000 +POSTHOOK: Output: default@loc_orc_2d_n1@zip=94086/year=2001 +POSTHOOK: Output: default@loc_orc_2d_n1@zip=94086/year=2002 +POSTHOOK: Output: default@loc_orc_2d_n1@zip=94086/year=2003 +POSTHOOK: Output: default@loc_orc_2d_n1@zip=94087/year=2000 +POSTHOOK: Output: default@loc_orc_2d_n1@zip=94087/year=2001 +POSTHOOK: Output: default@loc_orc_2d_n1@zip=94087/year=2002 +POSTHOOK: Output: default@loc_orc_2d_n1@zip=94087/year=2003 +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=43201,year=2001).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=43201,year=2001).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=43201,year=2002).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=43201,year=2002).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=43201,year=2003).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=43201,year=2003).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94086,year=2000).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94086,year=2000).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94086,year=2001).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94086,year=2001).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94086,year=2002).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94086,year=2002).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94086,year=2003).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94086,year=2003).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94087,year=2000).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94087,year=2000).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94087,year=2001).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94087,year=2001).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94087,year=2002).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94087,year=2002).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94087,year=2003).locid SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n1 PARTITION(zip=94087,year=2003).state SIMPLE [(ext_loc_n1)ext_loc_n1.FieldSchema(name:state, type:string, comment:null), ] +PREHOOK: query: analyze table loc_orc_2d_n1 partition(zip=94086, year='2001') compute statistics for columns state,locid PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc_2d -PREHOOK: Input: default@loc_orc_2d@zip=94086/year=2001 -PREHOOK: Output: default@loc_orc_2d -PREHOOK: Output: default@loc_orc_2d@zip=94086/year=2001 +PREHOOK: Input: default@loc_orc_2d_n1 +PREHOOK: Input: default@loc_orc_2d_n1@zip=94086/year=2001 +PREHOOK: Output: default@loc_orc_2d_n1 +PREHOOK: Output: default@loc_orc_2d_n1@zip=94086/year=2001 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid +POSTHOOK: query: analyze table loc_orc_2d_n1 partition(zip=94086, year='2001') compute statistics for columns state,locid POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc_2d -POSTHOOK: Input: default@loc_orc_2d@zip=94086/year=2001 -POSTHOOK: Output: default@loc_orc_2d -POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2001 +POSTHOOK: Input: default@loc_orc_2d_n1 +POSTHOOK: Input: default@loc_orc_2d_n1@zip=94086/year=2001 +POSTHOOK: Output: default@loc_orc_2d_n1 +POSTHOOK: Output: default@loc_orc_2d_n1@zip=94086/year=2001 #### A masked pattern was here #### -PREHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2002') compute statistics for columns state,locid +PREHOOK: query: analyze table loc_orc_2d_n1 partition(zip=94087, year='2002') compute statistics for columns state,locid PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc_2d -PREHOOK: Input: default@loc_orc_2d@zip=94087/year=2002 -PREHOOK: Output: default@loc_orc_2d -PREHOOK: Output: default@loc_orc_2d@zip=94087/year=2002 +PREHOOK: Input: default@loc_orc_2d_n1 +PREHOOK: Input: default@loc_orc_2d_n1@zip=94087/year=2002 +PREHOOK: Output: default@loc_orc_2d_n1 +PREHOOK: Output: default@loc_orc_2d_n1@zip=94087/year=2002 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2002') compute statistics for columns state,locid +POSTHOOK: query: analyze table loc_orc_2d_n1 partition(zip=94087, year='2002') compute statistics for columns state,locid POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc_2d -POSTHOOK: Input: default@loc_orc_2d@zip=94087/year=2002 -POSTHOOK: Output: default@loc_orc_2d -POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2002 +POSTHOOK: Input: default@loc_orc_2d_n1 +POSTHOOK: Input: default@loc_orc_2d_n1@zip=94087/year=2002 +POSTHOOK: Output: default@loc_orc_2d_n1 +POSTHOOK: Output: default@loc_orc_2d_n1@zip=94087/year=2002 #### A masked pattern was here #### -PREHOOK: query: explain extended select state from loc_orc_2d +PREHOOK: query: explain extended select state from loc_orc_2d_n1 PREHOOK: type: QUERY -POSTHOOK: query: explain extended select state from loc_orc_2d +POSTHOOK: query: explain extended select state from loc_orc_2d_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1091,13 +1091,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 1 partition_columns zip/year partition_columns.types int:string rawDataSize 90 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 286 @@ -1114,16 +1114,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1138,13 +1138,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 2 partition_columns zip/year partition_columns.types int:string rawDataSize 182 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 295 @@ -1161,16 +1161,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1185,13 +1185,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 3 partition_columns zip/year partition_columns.types int:string rawDataSize 267 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 302 @@ -1208,16 +1208,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1232,13 +1232,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 1 partition_columns zip/year partition_columns.types int:string rawDataSize 89 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 281 @@ -1255,16 +1255,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1279,13 +1279,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 2 partition_columns zip/year partition_columns.types int:string rawDataSize 176 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 276 @@ -1302,16 +1302,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1326,13 +1326,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 1 partition_columns zip/year partition_columns.types int:string rawDataSize 91 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 288 @@ -1349,16 +1349,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1373,13 +1373,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 2 partition_columns zip/year partition_columns.types int:string rawDataSize 180 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 297 @@ -1396,16 +1396,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1420,13 +1420,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 1 partition_columns zip/year partition_columns.types int:string rawDataSize 88 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 265 @@ -1443,16 +1443,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1467,13 +1467,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 1 partition_columns zip/year partition_columns.types int:string rawDataSize 88 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 265 @@ -1490,16 +1490,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1514,13 +1514,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 3 partition_columns zip/year partition_columns.types int:string rawDataSize 273 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 298 @@ -1537,16 +1537,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1561,13 +1561,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 3 partition_columns zip/year partition_columns.types int:string rawDataSize 264 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 295 @@ -1584,19 +1584,19 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Processor Tree: TableScan - alias: loc_orc_2d + alias: loc_orc_2d_n1 Statistics: Num rows: 20 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE GatherStats: false Select Operator @@ -1605,9 +1605,9 @@ STAGE PLANS: Statistics: Num rows: 20 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain extended select state,locid from loc_orc_2d +PREHOOK: query: explain extended select state,locid from loc_orc_2d_n1 PREHOOK: type: QUERY -POSTHOOK: query: explain extended select state,locid from loc_orc_2d +POSTHOOK: query: explain extended select state,locid from loc_orc_2d_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1631,13 +1631,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 1 partition_columns zip/year partition_columns.types int:string rawDataSize 90 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 286 @@ -1654,16 +1654,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1678,13 +1678,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 2 partition_columns zip/year partition_columns.types int:string rawDataSize 182 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 295 @@ -1701,16 +1701,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1725,13 +1725,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 3 partition_columns zip/year partition_columns.types int:string rawDataSize 267 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 302 @@ -1748,16 +1748,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1772,13 +1772,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 1 partition_columns zip/year partition_columns.types int:string rawDataSize 89 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 281 @@ -1795,16 +1795,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1819,13 +1819,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 2 partition_columns zip/year partition_columns.types int:string rawDataSize 176 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 276 @@ -1842,16 +1842,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1866,13 +1866,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 1 partition_columns zip/year partition_columns.types int:string rawDataSize 91 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 288 @@ -1889,16 +1889,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1913,13 +1913,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 2 partition_columns zip/year partition_columns.types int:string rawDataSize 180 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 297 @@ -1936,16 +1936,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1960,13 +1960,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 1 partition_columns zip/year partition_columns.types int:string rawDataSize 88 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 265 @@ -1983,16 +1983,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -2007,13 +2007,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 1 partition_columns zip/year partition_columns.types int:string rawDataSize 88 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 265 @@ -2030,16 +2030,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -2054,13 +2054,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 3 partition_columns zip/year partition_columns.types int:string rawDataSize 273 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 298 @@ -2077,16 +2077,16 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Partition input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -2101,13 +2101,13 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 numFiles 1 numRows 3 partition_columns zip/year partition_columns.types int:string rawDataSize 264 - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde totalSize 295 @@ -2124,19 +2124,19 @@ STAGE PLANS: columns.comments columns.types string:int #### A masked pattern was here #### - name default.loc_orc_2d + name default.loc_orc_2d_n1 partition_columns zip/year partition_columns.types int:string - serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.ddl struct loc_orc_2d_n1 { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.loc_orc_2d - name: default.loc_orc_2d + name: default.loc_orc_2d_n1 + name: default.loc_orc_2d_n1 Processor Tree: TableScan - alias: loc_orc_2d + alias: loc_orc_2d_n1 Statistics: Num rows: 20 Data size: 1820 Basic stats: COMPLETE Column stats: COMPLETE GatherStats: false Select Operator diff --git a/ql/src/test/results/clientpositive/fileformat_sequencefile.q.out b/ql/src/test/results/clientpositive/fileformat_sequencefile.q.out index 44c1030827..2d0a370dfc 100644 --- a/ql/src/test/results/clientpositive/fileformat_sequencefile.q.out +++ b/ql/src/test/results/clientpositive/fileformat_sequencefile.q.out @@ -1,10 +1,10 @@ PREHOOK: query: EXPLAIN -CREATE TABLE dest1(key INT, value STRING) STORED AS +CREATE TABLE dest1_n72(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat' PREHOOK: type: CREATETABLE POSTHOOK: query: EXPLAIN -CREATE TABLE dest1(key INT, value STRING) STORED AS +CREATE TABLE dest1_n72(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat' POSTHOOK: type: CREATETABLE @@ -18,49 +18,49 @@ STAGE PLANS: columns: key int, value string input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.mapred.SequenceFileOutputFormat - name: default.dest1 + name: default.dest1_n72 -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS +PREHOOK: query: CREATE TABLE dest1_n72(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS +PREHOOK: Output: default@dest1_n72 +POSTHOOK: query: CREATE TABLE dest1_n72(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: DESCRIBE EXTENDED dest1 +POSTHOOK: Output: default@dest1_n72 +PREHOOK: query: DESCRIBE EXTENDED dest1_n72 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@dest1 -POSTHOOK: query: DESCRIBE EXTENDED dest1 +PREHOOK: Input: default@dest1_n72 +POSTHOOK: query: DESCRIBE EXTENDED dest1_n72 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n72 key int value string #### A masked pattern was here #### PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 10 +INSERT OVERWRITE TABLE dest1_n72 SELECT src.key, src.value WHERE src.key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n72 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 10 +INSERT OVERWRITE TABLE dest1_n72 SELECT src.key, src.value WHERE src.key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n72 +POSTHOOK: Lineage: dest1_n72.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n72.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n72.* FROM dest1_n72 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n72 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n72.* FROM dest1_n72 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n72 #### A masked pattern was here #### 0 val_0 4 val_4 diff --git a/ql/src/test/results/clientpositive/fileformat_text.q.out b/ql/src/test/results/clientpositive/fileformat_text.q.out index a6f8889294..8717e8efee 100644 --- a/ql/src/test/results/clientpositive/fileformat_text.q.out +++ b/ql/src/test/results/clientpositive/fileformat_text.q.out @@ -1,10 +1,10 @@ PREHOOK: query: EXPLAIN -CREATE TABLE dest1(key INT, value STRING) STORED AS +CREATE TABLE dest1_n89(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat' PREHOOK: type: CREATETABLE POSTHOOK: query: EXPLAIN -CREATE TABLE dest1(key INT, value STRING) STORED AS +CREATE TABLE dest1_n89(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat' POSTHOOK: type: CREATETABLE @@ -18,49 +18,49 @@ STAGE PLANS: columns: key int, value string input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - name: default.dest1 + name: default.dest1_n89 -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS +PREHOOK: query: CREATE TABLE dest1_n89(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS +PREHOOK: Output: default@dest1_n89 +POSTHOOK: query: CREATE TABLE dest1_n89(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: DESCRIBE EXTENDED dest1 +POSTHOOK: Output: default@dest1_n89 +PREHOOK: query: DESCRIBE EXTENDED dest1_n89 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@dest1 -POSTHOOK: query: DESCRIBE EXTENDED dest1 +PREHOOK: Input: default@dest1_n89 +POSTHOOK: query: DESCRIBE EXTENDED dest1_n89 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n89 key int value string #### A masked pattern was here #### PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 10 +INSERT OVERWRITE TABLE dest1_n89 SELECT src.key, src.value WHERE src.key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n89 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 10 +INSERT OVERWRITE TABLE dest1_n89 SELECT src.key, src.value WHERE src.key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n89 +POSTHOOK: Lineage: dest1_n89.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n89.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n89.* FROM dest1_n89 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n89 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n89.* FROM dest1_n89 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n89 #### A masked pattern was here #### 0 val_0 4 val_4 diff --git a/ql/src/test/results/clientpositive/filter_join_breaktask2.q.out b/ql/src/test/results/clientpositive/filter_join_breaktask2.q.out index 26ce0ed549..81ad1a66ff 100644 --- a/ql/src/test/results/clientpositive/filter_join_breaktask2.q.out +++ b/ql/src/test/results/clientpositive/filter_join_breaktask2.q.out @@ -1,268 +1,268 @@ -PREHOOK: query: create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) +PREHOOK: query: create table T1_n53(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) +PREHOOK: Output: default@T1_n53 +POSTHOOK: query: create table T1_n53(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: create table T2(c1 string, c2 string, c3 string, c0 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string) partitioned by (ds string) +POSTHOOK: Output: default@T1_n53 +PREHOOK: query: create table T2_n34(c1 string, c2 string, c3 string, c0 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: create table T2(c1 string, c2 string, c3 string, c0 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string) partitioned by (ds string) +PREHOOK: Output: default@T2_n34 +POSTHOOK: query: create table T2_n34(c1 string, c2 string, c3 string, c0 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: create table T3 (c0 bigint, c1 bigint, c2 int) partitioned by (ds string) +POSTHOOK: Output: default@T2_n34 +PREHOOK: query: create table T3_n12 (c0 bigint, c1 bigint, c2 int) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: create table T3 (c0 bigint, c1 bigint, c2 int) partitioned by (ds string) +PREHOOK: Output: default@T3_n12 +POSTHOOK: query: create table T3_n12 (c0 bigint, c1 bigint, c2 int) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: create table T4 (c0 bigint, c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string, c26 string, c27 string, c28 string, c29 string, c30 string, c31 string, c32 string, c33 string, c34 string, c35 string, c36 string, c37 string, c38 string, c39 string, c40 string, c41 string, c42 string, c43 string, c44 string, c45 string, c46 string, c47 string, c48 string, c49 string, c50 string, c51 string, c52 string, c53 string, c54 string, c55 string, c56 string, c57 string, c58 string, c59 string, c60 string, c61 string, c62 string, c63 string, c64 string, c65 string, c66 string, c67 bigint, c68 string, c69 string, c70 bigint, c71 bigint, c72 bigint, c73 string, c74 string, c75 string, c76 string, c77 string, c78 string, c79 string, c80 string, c81 bigint, c82 bigint, c83 bigint) partitioned by (ds string) +POSTHOOK: Output: default@T3_n12 +PREHOOK: query: create table T4_n2 (c0 bigint, c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string, c26 string, c27 string, c28 string, c29 string, c30 string, c31 string, c32 string, c33 string, c34 string, c35 string, c36 string, c37 string, c38 string, c39 string, c40 string, c41 string, c42 string, c43 string, c44 string, c45 string, c46 string, c47 string, c48 string, c49 string, c50 string, c51 string, c52 string, c53 string, c54 string, c55 string, c56 string, c57 string, c58 string, c59 string, c60 string, c61 string, c62 string, c63 string, c64 string, c65 string, c66 string, c67 bigint, c68 string, c69 string, c70 bigint, c71 bigint, c72 bigint, c73 string, c74 string, c75 string, c76 string, c77 string, c78 string, c79 string, c80 string, c81 bigint, c82 bigint, c83 bigint) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T4 -POSTHOOK: query: create table T4 (c0 bigint, c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string, c26 string, c27 string, c28 string, c29 string, c30 string, c31 string, c32 string, c33 string, c34 string, c35 string, c36 string, c37 string, c38 string, c39 string, c40 string, c41 string, c42 string, c43 string, c44 string, c45 string, c46 string, c47 string, c48 string, c49 string, c50 string, c51 string, c52 string, c53 string, c54 string, c55 string, c56 string, c57 string, c58 string, c59 string, c60 string, c61 string, c62 string, c63 string, c64 string, c65 string, c66 string, c67 bigint, c68 string, c69 string, c70 bigint, c71 bigint, c72 bigint, c73 string, c74 string, c75 string, c76 string, c77 string, c78 string, c79 string, c80 string, c81 bigint, c82 bigint, c83 bigint) partitioned by (ds string) +PREHOOK: Output: default@T4_n2 +POSTHOOK: query: create table T4_n2 (c0 bigint, c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string, c26 string, c27 string, c28 string, c29 string, c30 string, c31 string, c32 string, c33 string, c34 string, c35 string, c36 string, c37 string, c38 string, c39 string, c40 string, c41 string, c42 string, c43 string, c44 string, c45 string, c46 string, c47 string, c48 string, c49 string, c50 string, c51 string, c52 string, c53 string, c54 string, c55 string, c56 string, c57 string, c58 string, c59 string, c60 string, c61 string, c62 string, c63 string, c64 string, c65 string, c66 string, c67 bigint, c68 string, c69 string, c70 bigint, c71 bigint, c72 bigint, c73 string, c74 string, c75 string, c76 string, c77 string, c78 string, c79 string, c80 string, c81 bigint, c82 bigint, c83 bigint) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T4 -PREHOOK: query: insert overwrite table T1 partition (ds='2010-04-17') select '5', '1', '1', '1', 0, 0,4 from src tablesample (1 rows) +POSTHOOK: Output: default@T4_n2 +PREHOOK: query: insert overwrite table T1_n53 partition (ds='2010-04-17') select '5', '1', '1', '1', 0, 0,4 from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t1@ds=2010-04-17 -POSTHOOK: query: insert overwrite table T1 partition (ds='2010-04-17') select '5', '1', '1', '1', 0, 0,4 from src tablesample (1 rows) +PREHOOK: Output: default@t1_n53@ds=2010-04-17 +POSTHOOK: query: insert overwrite table T1_n53 partition (ds='2010-04-17') select '5', '1', '1', '1', 0, 0,4 from src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1@ds=2010-04-17 -POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c1 SIMPLE [] -POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c2 SIMPLE [] -POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c3 SIMPLE [] -POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c4 SIMPLE [] -POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c5 EXPRESSION [] -POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c6 EXPRESSION [] -POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c7 EXPRESSION [] -PREHOOK: query: insert overwrite table T2 partition(ds='2010-04-17') select '5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', '0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src tablesample (1 rows) +POSTHOOK: Output: default@t1_n53@ds=2010-04-17 +POSTHOOK: Lineage: t1_n53 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t1_n53 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t1_n53 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t1_n53 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t1_n53 PARTITION(ds=2010-04-17).c5 EXPRESSION [] +POSTHOOK: Lineage: t1_n53 PARTITION(ds=2010-04-17).c6 EXPRESSION [] +POSTHOOK: Lineage: t1_n53 PARTITION(ds=2010-04-17).c7 EXPRESSION [] +PREHOOK: query: insert overwrite table T2_n34 partition(ds='2010-04-17') select '5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', '0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t2@ds=2010-04-17 -POSTHOOK: query: insert overwrite table T2 partition(ds='2010-04-17') select '5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', '0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src tablesample (1 rows) +PREHOOK: Output: default@t2_n34@ds=2010-04-17 +POSTHOOK: query: insert overwrite table T2_n34 partition(ds='2010-04-17') select '5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', '0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t2@ds=2010-04-17 -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c0 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c1 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c10 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c11 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c12 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c13 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c14 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c15 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c16 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c17 EXPRESSION [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c18 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c19 EXPRESSION [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c2 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c20 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c21 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c22 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c23 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c24 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c25 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c3 EXPRESSION [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c4 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c5 EXPRESSION [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c6 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c7 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c8 SIMPLE [] -POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c9 SIMPLE [] -PREHOOK: query: insert overwrite table T3 partition (ds='2010-04-17') select 4,5,0 from src tablesample (1 rows) +POSTHOOK: Output: default@t2_n34@ds=2010-04-17 +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c0 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c17 EXPRESSION [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c19 EXPRESSION [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c23 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c24 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c25 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c3 EXPRESSION [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c5 EXPRESSION [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t2_n34 PARTITION(ds=2010-04-17).c9 SIMPLE [] +PREHOOK: query: insert overwrite table T3_n12 partition (ds='2010-04-17') select 4,5,0 from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t3@ds=2010-04-17 -POSTHOOK: query: insert overwrite table T3 partition (ds='2010-04-17') select 4,5,0 from src tablesample (1 rows) +PREHOOK: Output: default@t3_n12@ds=2010-04-17 +POSTHOOK: query: insert overwrite table T3_n12 partition (ds='2010-04-17') select 4,5,0 from src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t3@ds=2010-04-17 -POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c0 EXPRESSION [] -POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c1 EXPRESSION [] -POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c2 SIMPLE [] -PREHOOK: query: insert overwrite table T4 partition(ds='2010-04-17') +POSTHOOK: Output: default@t3_n12@ds=2010-04-17 +POSTHOOK: Lineage: t3_n12 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t3_n12 PARTITION(ds=2010-04-17).c1 EXPRESSION [] +POSTHOOK: Lineage: t3_n12 PARTITION(ds=2010-04-17).c2 SIMPLE [] +PREHOOK: query: insert overwrite table T4_n2 partition(ds='2010-04-17') select 4,'1','1','8','4','5','1','0','9','U','2','2', '0','2','1','1','J','C','A','U', '2','s', '2',NULL, NULL, NULL,NULL, NULL, NULL,'1','j', 'S', '6',NULL,'1', '2', 'J', 'g', '1', 'e', '2', '1', '2', 'U', 'P', 'p', '3', '0', '0', '0', '1', '1', '1', '0', '0', '0', '6', '2', 'j',NULL, NULL, NULL,NULL,NULL, NULL, '5',NULL, 'j', 'j', 2, 2, 1, '2', '2', '1', '1', '1', '1', '1', '1', 1, 1, 32,NULL from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t4@ds=2010-04-17 -POSTHOOK: query: insert overwrite table T4 partition(ds='2010-04-17') +PREHOOK: Output: default@t4_n2@ds=2010-04-17 +POSTHOOK: query: insert overwrite table T4_n2 partition(ds='2010-04-17') select 4,'1','1','8','4','5','1','0','9','U','2','2', '0','2','1','1','J','C','A','U', '2','s', '2',NULL, NULL, NULL,NULL, NULL, NULL,'1','j', 'S', '6',NULL,'1', '2', 'J', 'g', '1', 'e', '2', '1', '2', 'U', 'P', 'p', '3', '0', '0', '0', '1', '1', '1', '0', '0', '0', '6', '2', 'j',NULL, NULL, NULL,NULL,NULL, NULL, '5',NULL, 'j', 'j', 2, 2, 1, '2', '2', '1', '1', '1', '1', '1', '1', 1, 1, 32,NULL from src limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t4@ds=2010-04-17 -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c0 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c1 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c10 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c11 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c12 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c13 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c14 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c15 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c16 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c17 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c18 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c19 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c2 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c20 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c21 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c22 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c23 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c24 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c25 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c26 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c27 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c28 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c29 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c3 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c30 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c31 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c32 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c33 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c34 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c35 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c36 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c37 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c38 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c39 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c4 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c40 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c41 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c42 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c43 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c44 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c45 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c46 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c47 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c48 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c49 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c5 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c50 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c51 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c52 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c53 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c54 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c55 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c56 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c57 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c58 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c59 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c6 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c60 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c61 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c62 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c63 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c64 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c65 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c66 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c67 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c68 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c69 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c7 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c70 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c71 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c72 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c73 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c74 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c75 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c76 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c77 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c78 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c79 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c8 SIMPLE [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c80 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c81 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c82 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c83 EXPRESSION [] -POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c9 SIMPLE [] -PREHOOK: query: select * from T2 +POSTHOOK: Output: default@t4_n2@ds=2010-04-17 +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c0 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c1 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c10 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c11 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c12 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c13 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c14 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c15 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c16 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c17 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c18 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c19 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c2 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c20 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c21 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c22 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c23 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c24 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c25 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c26 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c27 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c28 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c29 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c3 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c30 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c31 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c32 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c33 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c34 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c35 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c36 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c37 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c38 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c39 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c4 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c40 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c41 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c42 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c43 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c44 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c45 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c46 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c47 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c48 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c49 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c5 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c50 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c51 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c52 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c53 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c54 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c55 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c56 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c57 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c58 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c59 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c6 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c60 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c61 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c62 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c63 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c64 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c65 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c66 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c67 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c68 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c69 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c7 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c70 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c71 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c72 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c73 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c74 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c75 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c76 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c77 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c78 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c79 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c8 SIMPLE [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c80 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c81 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c82 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c83 EXPRESSION [] +POSTHOOK: Lineage: t4_n2 PARTITION(ds=2010-04-17).c9 SIMPLE [] +PREHOOK: query: select * from T2_n34 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@ds=2010-04-17 +PREHOOK: Input: default@t2_n34 +PREHOOK: Input: default@t2_n34@ds=2010-04-17 #### A masked pattern was here #### -POSTHOOK: query: select * from T2 +POSTHOOK: query: select * from T2_n34 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@ds=2010-04-17 +POSTHOOK: Input: default@t2_n34 +POSTHOOK: Input: default@t2_n34@ds=2010-04-17 #### A masked pattern was here #### 5 name NULL 2 kavin NULL 9 c 8 0 0 7 1 2 0 3 2 NULL 1 NULL 3 2 0 0 5 10 2010-04-17 -PREHOOK: query: select * from T1 +PREHOOK: query: select * from T1_n53 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=2010-04-17 +PREHOOK: Input: default@t1_n53 +PREHOOK: Input: default@t1_n53@ds=2010-04-17 #### A masked pattern was here #### -POSTHOOK: query: select * from T1 +POSTHOOK: query: select * from T1_n53 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=2010-04-17 +POSTHOOK: Input: default@t1_n53 +POSTHOOK: Input: default@t1_n53@ds=2010-04-17 #### A masked pattern was here #### 5 1 1 1 0 0 4 2010-04-17 -PREHOOK: query: select * from T3 +PREHOOK: query: select * from T3_n12 PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -PREHOOK: Input: default@t3@ds=2010-04-17 +PREHOOK: Input: default@t3_n12 +PREHOOK: Input: default@t3_n12@ds=2010-04-17 #### A masked pattern was here #### -POSTHOOK: query: select * from T3 +POSTHOOK: query: select * from T3_n12 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -POSTHOOK: Input: default@t3@ds=2010-04-17 +POSTHOOK: Input: default@t3_n12 +POSTHOOK: Input: default@t3_n12@ds=2010-04-17 #### A masked pattern was here #### 4 5 0 2010-04-17 -PREHOOK: query: select * from T4 +PREHOOK: query: select * from T4_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@t4 -PREHOOK: Input: default@t4@ds=2010-04-17 +PREHOOK: Input: default@t4_n2 +PREHOOK: Input: default@t4_n2@ds=2010-04-17 #### A masked pattern was here #### -POSTHOOK: query: select * from T4 +POSTHOOK: query: select * from T4_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t4 -POSTHOOK: Input: default@t4@ds=2010-04-17 +POSTHOOK: Input: default@t4_n2 +POSTHOOK: Input: default@t4_n2@ds=2010-04-17 #### A masked pattern was here #### 4 1 1 8 4 5 1 0 9 U 2 2 0 2 1 1 J C A U 2 s 2 NULL NULL NULL NULL NULL NULL 1 j S 6 NULL 1 2 J g 1 e 2 1 2 U P p 3 0 0 0 1 1 1 0 0 0 6 2 j NULL NULL NULL NULL NULL NULL 5 NULL NULL j 2 2 1 2 2 1 1 1 1 1 1 1 1 32 NULL 2010-04-17 WARNING: Comparing a bigint and a string may result in a loss of precision. PREHOOK: query: SELECT a.c1 as a_c1, b.c1 b_c1, d.c0 as d_c0 -FROM T1 a JOIN T2 b +FROM T1_n53 a JOIN T2_n34 b ON (a.c1 = b.c1 AND a.ds='2010-04-17' AND b.ds='2010-04-17') - JOIN T3 c + JOIN T3_n12 c ON (a.c1 = c.c1 AND a.ds='2010-04-17' AND c.ds='2010-04-17') - JOIN T4 d + JOIN T4_n2 d ON (c.c0 = d.c0 AND c.ds='2010-04-17' AND d.ds='2010-04-17') PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=2010-04-17 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@ds=2010-04-17 -PREHOOK: Input: default@t3 -PREHOOK: Input: default@t3@ds=2010-04-17 -PREHOOK: Input: default@t4 -PREHOOK: Input: default@t4@ds=2010-04-17 +PREHOOK: Input: default@t1_n53 +PREHOOK: Input: default@t1_n53@ds=2010-04-17 +PREHOOK: Input: default@t2_n34 +PREHOOK: Input: default@t2_n34@ds=2010-04-17 +PREHOOK: Input: default@t3_n12 +PREHOOK: Input: default@t3_n12@ds=2010-04-17 +PREHOOK: Input: default@t4_n2 +PREHOOK: Input: default@t4_n2@ds=2010-04-17 #### A masked pattern was here #### POSTHOOK: query: SELECT a.c1 as a_c1, b.c1 b_c1, d.c0 as d_c0 -FROM T1 a JOIN T2 b +FROM T1_n53 a JOIN T2_n34 b ON (a.c1 = b.c1 AND a.ds='2010-04-17' AND b.ds='2010-04-17') - JOIN T3 c + JOIN T3_n12 c ON (a.c1 = c.c1 AND a.ds='2010-04-17' AND c.ds='2010-04-17') - JOIN T4 d + JOIN T4_n2 d ON (c.c0 = d.c0 AND c.ds='2010-04-17' AND d.ds='2010-04-17') POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=2010-04-17 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@ds=2010-04-17 -POSTHOOK: Input: default@t3 -POSTHOOK: Input: default@t3@ds=2010-04-17 -POSTHOOK: Input: default@t4 -POSTHOOK: Input: default@t4@ds=2010-04-17 +POSTHOOK: Input: default@t1_n53 +POSTHOOK: Input: default@t1_n53@ds=2010-04-17 +POSTHOOK: Input: default@t2_n34 +POSTHOOK: Input: default@t2_n34@ds=2010-04-17 +POSTHOOK: Input: default@t3_n12 +POSTHOOK: Input: default@t3_n12@ds=2010-04-17 +POSTHOOK: Input: default@t4_n2 +POSTHOOK: Input: default@t4_n2@ds=2010-04-17 #### A masked pattern was here #### 5 5 4 diff --git a/ql/src/test/results/clientpositive/floor_time.q.out b/ql/src/test/results/clientpositive/floor_time.q.out index eaf80c0978..e4ae6664cd 100644 --- a/ql/src/test/results/clientpositive/floor_time.q.out +++ b/ql/src/test/results/clientpositive/floor_time.q.out @@ -1,46 +1,46 @@ -PREHOOK: query: drop table extract_udf +PREHOOK: query: drop table extract_udf_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table extract_udf +POSTHOOK: query: drop table extract_udf_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table extract_udf (t timestamp) +PREHOOK: query: create table extract_udf_n0 (t timestamp) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@extract_udf -POSTHOOK: query: create table extract_udf (t timestamp) +PREHOOK: Output: default@extract_udf_n0 +POSTHOOK: query: create table extract_udf_n0 (t timestamp) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@extract_udf +POSTHOOK: Output: default@extract_udf_n0 PREHOOK: query: from (select * from src tablesample (1 rows)) s - insert overwrite table extract_udf + insert overwrite table extract_udf_n0 select '2011-05-06 07:08:09.1234567' PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@extract_udf +PREHOOK: Output: default@extract_udf_n0 POSTHOOK: query: from (select * from src tablesample (1 rows)) s - insert overwrite table extract_udf + insert overwrite table extract_udf_n0 select '2011-05-06 07:08:09.1234567' POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@extract_udf -POSTHOOK: Lineage: extract_udf.t EXPRESSION [] +POSTHOOK: Output: default@extract_udf_n0 +POSTHOOK: Lineage: extract_udf_n0.t EXPRESSION [] PREHOOK: query: select t -from extract_udf +from extract_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@extract_udf +PREHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select t -from extract_udf +from extract_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@extract_udf +POSTHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### 2011-05-06 07:08:09.1234567 PREHOOK: query: explain select floor_day(t) -from extract_udf +from extract_udf_n0 PREHOOK: type: QUERY POSTHOOK: query: explain select floor_day(t) -from extract_udf +from extract_udf_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -51,7 +51,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: extract_udf + alias: extract_udf_n0 Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: floor_day(t) (type: timestamp) @@ -73,23 +73,23 @@ STAGE PLANS: ListSink PREHOOK: query: select floor_day(t) -from extract_udf +from extract_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@extract_udf +PREHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select floor_day(t) -from extract_udf +from extract_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@extract_udf +POSTHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### 2011-05-06 00:00:00 PREHOOK: query: explain select floor(t to day) -from extract_udf +from extract_udf_n0 PREHOOK: type: QUERY POSTHOOK: query: explain select floor(t to day) -from extract_udf +from extract_udf_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -100,7 +100,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: extract_udf + alias: extract_udf_n0 Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: floor_day(t) (type: timestamp) @@ -122,90 +122,90 @@ STAGE PLANS: ListSink PREHOOK: query: select floor(t to day) -from extract_udf +from extract_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@extract_udf +PREHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select floor(t to day) -from extract_udf +from extract_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@extract_udf +POSTHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### 2011-05-06 00:00:00 PREHOOK: query: select floor(t to second) -from extract_udf +from extract_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@extract_udf +PREHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select floor(t to second) -from extract_udf +from extract_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@extract_udf +POSTHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### 2011-05-06 07:08:09 PREHOOK: query: select floor(t to minute) -from extract_udf +from extract_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@extract_udf +PREHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select floor(t to minute) -from extract_udf +from extract_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@extract_udf +POSTHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### 2011-05-06 07:08:00 PREHOOK: query: select floor(t to hour) -from extract_udf +from extract_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@extract_udf +PREHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select floor(t to hour) -from extract_udf +from extract_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@extract_udf +POSTHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### 2011-05-06 07:00:00 PREHOOK: query: select floor(t to week) -from extract_udf +from extract_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@extract_udf +PREHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select floor(t to week) -from extract_udf +from extract_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@extract_udf +POSTHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### 2011-05-02 00:00:00 PREHOOK: query: select floor(t to month) -from extract_udf +from extract_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@extract_udf +PREHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select floor(t to month) -from extract_udf +from extract_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@extract_udf +POSTHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### 2011-05-01 00:00:00 PREHOOK: query: select floor(t to quarter) -from extract_udf +from extract_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@extract_udf +PREHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select floor(t to quarter) -from extract_udf +from extract_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@extract_udf +POSTHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### 2011-04-01 00:00:00 PREHOOK: query: select floor(t to year) -from extract_udf +from extract_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@extract_udf +PREHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select floor(t to year) -from extract_udf +from extract_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@extract_udf +POSTHOOK: Input: default@extract_udf_n0 #### A masked pattern was here #### 2011-01-01 00:00:00 diff --git a/ql/src/test/results/clientpositive/fm-sketch.q.out b/ql/src/test/results/clientpositive/fm-sketch.q.out index 2ef59c41d6..f6f248d96c 100644 --- a/ql/src/test/results/clientpositive/fm-sketch.q.out +++ b/ql/src/test/results/clientpositive/fm-sketch.q.out @@ -1,23 +1,23 @@ -PREHOOK: query: create table n(key int) +PREHOOK: query: create table n_n0(key int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@n -POSTHOOK: query: create table n(key int) +PREHOOK: Output: default@n_n0 +POSTHOOK: query: create table n_n0(key int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@n -PREHOOK: query: insert overwrite table n select null from src +POSTHOOK: Output: default@n_n0 +PREHOOK: query: insert overwrite table n_n0 select null from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@n -POSTHOOK: query: insert overwrite table n select null from src +PREHOOK: Output: default@n_n0 +POSTHOOK: query: insert overwrite table n_n0 select null from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@n -POSTHOOK: Lineage: n.key EXPRESSION [] -PREHOOK: query: explain analyze table n compute statistics for columns +POSTHOOK: Output: default@n_n0 +POSTHOOK: Lineage: n_n0.key EXPRESSION [] +PREHOOK: query: explain analyze table n_n0 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -POSTHOOK: query: explain analyze table n compute statistics for columns +POSTHOOK: query: explain analyze table n_n0 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -28,7 +28,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: n + alias: n_n0 Statistics: Num rows: 500 Data size: 1000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int) @@ -63,24 +63,24 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: int - Table: default.n + Table: default.n_n0 -PREHOOK: query: analyze table n compute statistics for columns +PREHOOK: query: analyze table n_n0 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@n -PREHOOK: Output: default@n +PREHOOK: Input: default@n_n0 +PREHOOK: Output: default@n_n0 #### A masked pattern was here #### -POSTHOOK: query: analyze table n compute statistics for columns +POSTHOOK: query: analyze table n_n0 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@n -POSTHOOK: Output: default@n +POSTHOOK: Input: default@n_n0 +POSTHOOK: Output: default@n_n0 #### A masked pattern was here #### -PREHOOK: query: desc formatted n key +PREHOOK: query: desc formatted n_n0 key PREHOOK: type: DESCTABLE -PREHOOK: Input: default@n -POSTHOOK: query: desc formatted n key +PREHOOK: Input: default@n_n0 +POSTHOOK: query: desc formatted n_n0 key POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@n +POSTHOOK: Input: default@n_n0 col_name key data_type int min 0 @@ -94,26 +94,26 @@ num_falses bitVector FM comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} -PREHOOK: query: create table i(key int) +PREHOOK: query: create table i_n1(key int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@i -POSTHOOK: query: create table i(key int) +PREHOOK: Output: default@i_n1 +POSTHOOK: query: create table i_n1(key int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@i -PREHOOK: query: insert overwrite table i select key from src +POSTHOOK: Output: default@i_n1 +PREHOOK: query: insert overwrite table i_n1 select key from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@i -POSTHOOK: query: insert overwrite table i select key from src +PREHOOK: Output: default@i_n1 +POSTHOOK: query: insert overwrite table i_n1 select key from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@i -POSTHOOK: Lineage: i.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: explain analyze table i compute statistics for columns +POSTHOOK: Output: default@i_n1 +POSTHOOK: Lineage: i_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: explain analyze table i_n1 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -POSTHOOK: query: explain analyze table i compute statistics for columns +POSTHOOK: query: explain analyze table i_n1 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -124,7 +124,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: i + alias: i_n1 Statistics: Num rows: 500 Data size: 1406 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int) @@ -159,24 +159,24 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: int - Table: default.i + Table: default.i_n1 -PREHOOK: query: analyze table i compute statistics for columns +PREHOOK: query: analyze table i_n1 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@i -PREHOOK: Output: default@i +PREHOOK: Input: default@i_n1 +PREHOOK: Output: default@i_n1 #### A masked pattern was here #### -POSTHOOK: query: analyze table i compute statistics for columns +POSTHOOK: query: analyze table i_n1 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@i -POSTHOOK: Output: default@i +POSTHOOK: Input: default@i_n1 +POSTHOOK: Output: default@i_n1 #### A masked pattern was here #### -PREHOOK: query: desc formatted i key +PREHOOK: query: desc formatted i_n1 key PREHOOK: type: DESCTABLE -PREHOOK: Input: default@i -POSTHOOK: query: desc formatted i key +PREHOOK: Input: default@i_n1 +POSTHOOK: query: desc formatted i_n1 key POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@i +POSTHOOK: Input: default@i_n1 col_name key data_type int min 0 @@ -190,47 +190,47 @@ num_falses bitVector FM comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} -PREHOOK: query: drop table i +PREHOOK: query: drop table i_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@i -PREHOOK: Output: default@i -POSTHOOK: query: drop table i +PREHOOK: Input: default@i_n1 +PREHOOK: Output: default@i_n1 +POSTHOOK: query: drop table i_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@i -POSTHOOK: Output: default@i -PREHOOK: query: create table i(key double) +POSTHOOK: Input: default@i_n1 +POSTHOOK: Output: default@i_n1 +PREHOOK: query: create table i_n1(key double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@i -POSTHOOK: query: create table i(key double) +PREHOOK: Output: default@i_n1 +POSTHOOK: query: create table i_n1(key double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@i -PREHOOK: query: insert overwrite table i select key from src +POSTHOOK: Output: default@i_n1 +PREHOOK: query: insert overwrite table i_n1 select key from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@i -POSTHOOK: query: insert overwrite table i select key from src +PREHOOK: Output: default@i_n1 +POSTHOOK: query: insert overwrite table i_n1 select key from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@i -POSTHOOK: Lineage: i.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: analyze table i compute statistics for columns +POSTHOOK: Output: default@i_n1 +POSTHOOK: Lineage: i_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: analyze table i_n1 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@i -PREHOOK: Output: default@i +PREHOOK: Input: default@i_n1 +PREHOOK: Output: default@i_n1 #### A masked pattern was here #### -POSTHOOK: query: analyze table i compute statistics for columns +POSTHOOK: query: analyze table i_n1 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@i -POSTHOOK: Output: default@i +POSTHOOK: Input: default@i_n1 +POSTHOOK: Output: default@i_n1 #### A masked pattern was here #### -PREHOOK: query: desc formatted i key +PREHOOK: query: desc formatted i_n1 key PREHOOK: type: DESCTABLE -PREHOOK: Input: default@i -POSTHOOK: query: desc formatted i key +PREHOOK: Input: default@i_n1 +POSTHOOK: query: desc formatted i_n1 key POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@i +POSTHOOK: Input: default@i_n1 col_name key data_type double min 0.0 @@ -244,47 +244,47 @@ num_falses bitVector FM comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} -PREHOOK: query: drop table i +PREHOOK: query: drop table i_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@i -PREHOOK: Output: default@i -POSTHOOK: query: drop table i +PREHOOK: Input: default@i_n1 +PREHOOK: Output: default@i_n1 +POSTHOOK: query: drop table i_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@i -POSTHOOK: Output: default@i -PREHOOK: query: create table i(key decimal) +POSTHOOK: Input: default@i_n1 +POSTHOOK: Output: default@i_n1 +PREHOOK: query: create table i_n1(key decimal) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@i -POSTHOOK: query: create table i(key decimal) +PREHOOK: Output: default@i_n1 +POSTHOOK: query: create table i_n1(key decimal) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@i -PREHOOK: query: insert overwrite table i select key from src +POSTHOOK: Output: default@i_n1 +PREHOOK: query: insert overwrite table i_n1 select key from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@i -POSTHOOK: query: insert overwrite table i select key from src +PREHOOK: Output: default@i_n1 +POSTHOOK: query: insert overwrite table i_n1 select key from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@i -POSTHOOK: Lineage: i.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: analyze table i compute statistics for columns +POSTHOOK: Output: default@i_n1 +POSTHOOK: Lineage: i_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: analyze table i_n1 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@i -PREHOOK: Output: default@i +PREHOOK: Input: default@i_n1 +PREHOOK: Output: default@i_n1 #### A masked pattern was here #### -POSTHOOK: query: analyze table i compute statistics for columns +POSTHOOK: query: analyze table i_n1 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@i -POSTHOOK: Output: default@i +POSTHOOK: Input: default@i_n1 +POSTHOOK: Output: default@i_n1 #### A masked pattern was here #### -PREHOOK: query: desc formatted i key +PREHOOK: query: desc formatted i_n1 key PREHOOK: type: DESCTABLE -PREHOOK: Input: default@i -POSTHOOK: query: desc formatted i key +PREHOOK: Input: default@i_n1 +POSTHOOK: query: desc formatted i_n1 key POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@i +POSTHOOK: Input: default@i_n1 col_name key data_type decimal(10,0) min 0 @@ -298,83 +298,83 @@ num_falses bitVector FM comment from deserializer COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}} -PREHOOK: query: drop table i +PREHOOK: query: drop table i_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@i -PREHOOK: Output: default@i -POSTHOOK: query: drop table i +PREHOOK: Input: default@i_n1 +PREHOOK: Output: default@i_n1 +POSTHOOK: query: drop table i_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@i -POSTHOOK: Output: default@i -PREHOOK: query: create table i(key date) +POSTHOOK: Input: default@i_n1 +POSTHOOK: Output: default@i_n1 +PREHOOK: query: create table i_n1(key date) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@i -POSTHOOK: query: create table i(key date) +PREHOOK: Output: default@i_n1 +POSTHOOK: query: create table i_n1(key date) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@i -PREHOOK: query: insert into i values ('2012-08-17') +POSTHOOK: Output: default@i_n1 +PREHOOK: query: insert into i_n1 values ('2012-08-17') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@i -POSTHOOK: query: insert into i values ('2012-08-17') +PREHOOK: Output: default@i_n1 +POSTHOOK: query: insert into i_n1 values ('2012-08-17') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@i -POSTHOOK: Lineage: i.key SCRIPT [] -PREHOOK: query: insert into i values ('2012-08-17') +POSTHOOK: Output: default@i_n1 +POSTHOOK: Lineage: i_n1.key SCRIPT [] +PREHOOK: query: insert into i_n1 values ('2012-08-17') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@i -POSTHOOK: query: insert into i values ('2012-08-17') +PREHOOK: Output: default@i_n1 +POSTHOOK: query: insert into i_n1 values ('2012-08-17') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@i -POSTHOOK: Lineage: i.key SCRIPT [] -PREHOOK: query: insert into i values ('2013-08-17') +POSTHOOK: Output: default@i_n1 +POSTHOOK: Lineage: i_n1.key SCRIPT [] +PREHOOK: query: insert into i_n1 values ('2013-08-17') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@i -POSTHOOK: query: insert into i values ('2013-08-17') +PREHOOK: Output: default@i_n1 +POSTHOOK: query: insert into i_n1 values ('2013-08-17') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@i -POSTHOOK: Lineage: i.key SCRIPT [] -PREHOOK: query: insert into i values ('2012-03-17') +POSTHOOK: Output: default@i_n1 +POSTHOOK: Lineage: i_n1.key SCRIPT [] +PREHOOK: query: insert into i_n1 values ('2012-03-17') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@i -POSTHOOK: query: insert into i values ('2012-03-17') +PREHOOK: Output: default@i_n1 +POSTHOOK: query: insert into i_n1 values ('2012-03-17') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@i -POSTHOOK: Lineage: i.key SCRIPT [] -PREHOOK: query: insert into i values ('2012-05-17') +POSTHOOK: Output: default@i_n1 +POSTHOOK: Lineage: i_n1.key SCRIPT [] +PREHOOK: query: insert into i_n1 values ('2012-05-17') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@i -POSTHOOK: query: insert into i values ('2012-05-17') +PREHOOK: Output: default@i_n1 +POSTHOOK: query: insert into i_n1 values ('2012-05-17') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@i -POSTHOOK: Lineage: i.key SCRIPT [] -PREHOOK: query: analyze table i compute statistics for columns +POSTHOOK: Output: default@i_n1 +POSTHOOK: Lineage: i_n1.key SCRIPT [] +PREHOOK: query: analyze table i_n1 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@i -PREHOOK: Output: default@i +PREHOOK: Input: default@i_n1 +PREHOOK: Output: default@i_n1 #### A masked pattern was here #### -POSTHOOK: query: analyze table i compute statistics for columns +POSTHOOK: query: analyze table i_n1 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@i -POSTHOOK: Output: default@i +POSTHOOK: Input: default@i_n1 +POSTHOOK: Output: default@i_n1 #### A masked pattern was here #### -PREHOOK: query: desc formatted i key +PREHOOK: query: desc formatted i_n1 key PREHOOK: type: DESCTABLE -PREHOOK: Input: default@i -POSTHOOK: query: desc formatted i key +PREHOOK: Input: default@i_n1 +POSTHOOK: query: desc formatted i_n1 key POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@i +POSTHOOK: Input: default@i_n1 col_name key data_type date min 2012-03-17 diff --git a/ql/src/test/results/clientpositive/gen_udf_example_add10.q.out b/ql/src/test/results/clientpositive/gen_udf_example_add10.q.out index 63f0eb185b..7487c587b8 100644 --- a/ql/src/test/results/clientpositive/gen_udf_example_add10.q.out +++ b/ql/src/test/results/clientpositive/gen_udf_example_add10.q.out @@ -4,25 +4,25 @@ PREHOOK: Output: example_add10 POSTHOOK: query: create temporary function example_add10 as 'org.apache.hadoop.hive.contrib.genericudf.example.GenericUDFAdd10' POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: example_add10 -PREHOOK: query: create table t1(x int,y double) +PREHOOK: query: create table t1_n36(x int,y double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(x int,y double) +PREHOOK: Output: default@t1_n36 +POSTHOOK: query: create table t1_n36(x int,y double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table t1 +POSTHOOK: Output: default@t1_n36 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table t1_n36 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table t1 +PREHOOK: Output: default@t1_n36 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table t1_n36 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: explain select example_add10(x) as a,example_add10(y) as b from t1 order by a desc,b limit 10 +POSTHOOK: Output: default@t1_n36 +PREHOOK: query: explain select example_add10(x) as a,example_add10(y) as b from t1_n36 order by a desc,b limit 10 PREHOOK: type: QUERY -POSTHOOK: query: explain select example_add10(x) as a,example_add10(y) as b from t1 order by a desc,b limit 10 +POSTHOOK: query: explain select example_add10(x) as a,example_add10(y) as b from t1_n36 order by a desc,b limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -33,7 +33,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n36 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: add10(x) (type: int), add10(y) (type: double) @@ -67,13 +67,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select example_add10(x) as a,example_add10(y) as b from t1 order by a desc,b limit 10 +PREHOOK: query: select example_add10(x) as a,example_add10(y) as b from t1_n36 order by a desc,b limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n36 #### A masked pattern was here #### -POSTHOOK: query: select example_add10(x) as a,example_add10(y) as b from t1 order by a desc,b limit 10 +POSTHOOK: query: select example_add10(x) as a,example_add10(y) as b from t1_n36 order by a desc,b limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n36 #### A masked pattern was here #### 18 28.0 18 38.0 @@ -81,14 +81,14 @@ POSTHOOK: Input: default@t1 13 23.0 12 22.0 11 21.0 -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n36 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n36 +PREHOOK: Output: default@t1_n36 +POSTHOOK: query: drop table t1_n36 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n36 +POSTHOOK: Output: default@t1_n36 PREHOOK: query: drop temporary function example_add10 PREHOOK: type: DROPFUNCTION PREHOOK: Output: example_add10 diff --git a/ql/src/test/results/clientpositive/groupby10.q.out b/ql/src/test/results/clientpositive/groupby10.q.out index 17dc322d4d..665bf929b5 100644 --- a/ql/src/test/results/clientpositive/groupby10.q.out +++ b/ql/src/test/results/clientpositive/groupby10.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, val1 INT, val2 INT) +PREHOOK: query: CREATE TABLE dest1_n0(key INT, val1 INT, val2 INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, val1 INT, val2 INT) +PREHOOK: Output: default@dest1_n0 +POSTHOOK: query: CREATE TABLE dest1_n0(key INT, val1 INT, val2 INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n0 PREHOOK: query: CREATE TABLE dest2(key INT, val1 INT, val2 INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -32,12 +32,12 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@input PREHOOK: query: EXPLAIN FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5)) GROUP BY INPUT.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5)) GROUP BY INPUT.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -124,7 +124,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n0 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) outputColumnNames: key, val1, val2 @@ -144,7 +144,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n0 Stage: Stage-4 Stats Work @@ -152,7 +152,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val1, val2 Column Types: int, int, int - Table: default.dest1 + Table: default.dest1_n0 Stage: Stage-5 Map Reduce @@ -330,32 +330,32 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5)) GROUP BY INPUT.key PREHOOK: type: QUERY PREHOOK: Input: default@input -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n0 PREHOOK: Output: default@dest2 POSTHOOK: query: FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5)) GROUP BY INPUT.key POSTHOOK: type: QUERY POSTHOOK: Input: default@input -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n0 POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key SIMPLE [(input)input.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dest1.val1 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.val2 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n0.key SIMPLE [(input)input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest1_n0.val1 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n0.val2 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] POSTHOOK: Lineage: dest2.key SIMPLE [(input)input.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: dest2.val1 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] POSTHOOK: Lineage: dest2.val2 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT * from dest1 +PREHOOK: query: SELECT * from dest1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * from dest1 +POSTHOOK: query: SELECT * from dest1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n0 #### A masked pattern was here #### 128 1 1 150 1 1 @@ -407,12 +407,12 @@ POSTHOOK: Input: default@dest2 98 98 98 PREHOOK: query: EXPLAIN FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5)) GROUP BY INPUT.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5)) GROUP BY INPUT.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -499,7 +499,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n0 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) outputColumnNames: key, val1, val2 @@ -519,7 +519,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n0 Stage: Stage-4 Stats Work @@ -527,7 +527,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val1, val2 Column Types: int, int, int - Table: default.dest1 + Table: default.dest1_n0 Stage: Stage-5 Map Reduce @@ -705,32 +705,32 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5)) GROUP BY INPUT.key PREHOOK: type: QUERY PREHOOK: Input: default@input -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n0 PREHOOK: Output: default@dest2 POSTHOOK: query: FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5)) GROUP BY INPUT.key POSTHOOK: type: QUERY POSTHOOK: Input: default@input -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n0 POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key SIMPLE [(input)input.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dest1.val1 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.val2 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n0.key SIMPLE [(input)input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest1_n0.val1 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n0.val2 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] POSTHOOK: Lineage: dest2.key SIMPLE [(input)input.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: dest2.val1 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] POSTHOOK: Lineage: dest2.val2 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT * from dest1 +PREHOOK: query: SELECT * from dest1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * from dest1 +POSTHOOK: query: SELECT * from dest1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n0 #### A masked pattern was here #### 128 1 1 150 1 1 @@ -782,12 +782,12 @@ POSTHOOK: Input: default@dest2 98 98 98 PREHOOK: query: EXPLAIN FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), avg(distinct substr(INPUT.value,5)) GROUP BY INPUT.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), avg(distinct substr(INPUT.value,5)) GROUP BY INPUT.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -836,7 +836,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n0 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) outputColumnNames: key, val1, val2 @@ -884,7 +884,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n0 Stage: Stage-3 Stats Work @@ -892,7 +892,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val1, val2 Column Types: int, int, int - Table: default.dest1 + Table: default.dest1_n0 Stage: Stage-4 Map Reduce @@ -959,32 +959,32 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), avg(distinct substr(INPUT.value,5)) GROUP BY INPUT.key PREHOOK: type: QUERY PREHOOK: Input: default@input -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n0 PREHOOK: Output: default@dest2 POSTHOOK: query: FROM INPUT -INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key +INSERT OVERWRITE TABLE dest1_n0 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), avg(distinct substr(INPUT.value,5)) GROUP BY INPUT.key POSTHOOK: type: QUERY POSTHOOK: Input: default@input -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n0 POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key SIMPLE [(input)input.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dest1.val1 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.val2 EXPRESSION [(input)input.null, ] +POSTHOOK: Lineage: dest1_n0.key SIMPLE [(input)input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest1_n0.val1 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n0.val2 EXPRESSION [(input)input.null, ] POSTHOOK: Lineage: dest2.key SIMPLE [(input)input.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: dest2.val1 EXPRESSION [(input)input.FieldSchema(name:value, type:string, comment:null), ] POSTHOOK: Lineage: dest2.val2 EXPRESSION [(input)input.null, ] -PREHOOK: query: SELECT * from dest1 +PREHOOK: query: SELECT * from dest1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * from dest1 +POSTHOOK: query: SELECT * from dest1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n0 #### A masked pattern was here #### 128 128 1 150 150 1 diff --git a/ql/src/test/results/clientpositive/groupby11.q.out b/ql/src/test/results/clientpositive/groupby11.q.out index 2dcf8cf256..2d90998d33 100644 --- a/ql/src/test/results/clientpositive/groupby11.q.out +++ b/ql/src/test/results/clientpositive/groupby11.q.out @@ -1,31 +1,31 @@ -PREHOOK: query: CREATE TABLE dest1(key STRING, val1 INT, val2 INT) partitioned by (ds string) +PREHOOK: query: CREATE TABLE dest1_n113(key STRING, val1 INT, val2 INT) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key STRING, val1 INT, val2 INT) partitioned by (ds string) +PREHOOK: Output: default@dest1_n113 +POSTHOOK: query: CREATE TABLE dest1_n113(key STRING, val1 INT, val2 INT) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest2(key STRING, val1 INT, val2 INT) partitioned by (ds string) +POSTHOOK: Output: default@dest1_n113 +PREHOOK: query: CREATE TABLE dest2_n15(key STRING, val1 INT, val2 INT) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest2 -POSTHOOK: query: CREATE TABLE dest2(key STRING, val1 INT, val2 INT) partitioned by (ds string) +PREHOOK: Output: default@dest2_n15 +POSTHOOK: query: CREATE TABLE dest2_n15(key STRING, val1 INT, val2 INT) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest2 +POSTHOOK: Output: default@dest2_n15 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 partition(ds='111') +INSERT OVERWRITE TABLE dest1_n113 partition(ds='111') SELECT src.value, count(src.key), count(distinct src.key) GROUP BY src.value -INSERT OVERWRITE TABLE dest2 partition(ds='111') +INSERT OVERWRITE TABLE dest2_n15 partition(ds='111') SELECT substr(src.value, 5), count(src.key), count(distinct src.key) GROUP BY substr(src.value, 5) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 partition(ds='111') +INSERT OVERWRITE TABLE dest1_n113 partition(ds='111') SELECT src.value, count(src.key), count(distinct src.key) GROUP BY src.value -INSERT OVERWRITE TABLE dest2 partition(ds='111') +INSERT OVERWRITE TABLE dest2_n15 partition(ds='111') SELECT substr(src.value, 5), count(src.key), count(distinct src.key) GROUP BY substr(src.value, 5) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -112,7 +112,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n113 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int) outputColumnNames: key, val1, val2 @@ -134,7 +134,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n113 Stage: Stage-4 Stats Work @@ -142,7 +142,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val1, val2 Column Types: string, int, int - Table: default.dest1 + Table: default.dest1_n113 Stage: Stage-5 Map Reduce @@ -205,7 +205,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val1, val2 Column Types: string, int, int - Table: default.dest2 + Table: default.dest2_n15 Stage: Stage-7 Map Reduce @@ -260,7 +260,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n15 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int) outputColumnNames: key, val1, val2 @@ -282,7 +282,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n15 Stage: Stage-10 Map Reduce @@ -340,38 +340,38 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 partition(ds='111') +INSERT OVERWRITE TABLE dest1_n113 partition(ds='111') SELECT src.value, count(src.key), count(distinct src.key) GROUP BY src.value -INSERT OVERWRITE TABLE dest2 partition(ds='111') +INSERT OVERWRITE TABLE dest2_n15 partition(ds='111') SELECT substr(src.value, 5), count(src.key), count(distinct src.key) GROUP BY substr(src.value, 5) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1@ds=111 -PREHOOK: Output: default@dest2@ds=111 +PREHOOK: Output: default@dest1_n113@ds=111 +PREHOOK: Output: default@dest2_n15@ds=111 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 partition(ds='111') +INSERT OVERWRITE TABLE dest1_n113 partition(ds='111') SELECT src.value, count(src.key), count(distinct src.key) GROUP BY src.value -INSERT OVERWRITE TABLE dest2 partition(ds='111') +INSERT OVERWRITE TABLE dest2_n15 partition(ds='111') SELECT substr(src.value, 5), count(src.key), count(distinct src.key) GROUP BY substr(src.value, 5) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1@ds=111 -POSTHOOK: Output: default@dest2@ds=111 -POSTHOOK: Lineage: dest1 PARTITION(ds=111).key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(ds=111).val1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(ds=111).val2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(ds=111).key EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(ds=111).val1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2 PARTITION(ds=111).val2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT * from dest1 +POSTHOOK: Output: default@dest1_n113@ds=111 +POSTHOOK: Output: default@dest2_n15@ds=111 +POSTHOOK: Lineage: dest1_n113 PARTITION(ds=111).key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n113 PARTITION(ds=111).val1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n113 PARTITION(ds=111).val2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n15 PARTITION(ds=111).key EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n15 PARTITION(ds=111).val1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n15 PARTITION(ds=111).val2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT * from dest1_n113 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 -PREHOOK: Input: default@dest1@ds=111 +PREHOOK: Input: default@dest1_n113 +PREHOOK: Input: default@dest1_n113@ds=111 #### A masked pattern was here #### -POSTHOOK: query: SELECT * from dest1 +POSTHOOK: query: SELECT * from dest1_n113 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 -POSTHOOK: Input: default@dest1@ds=111 +POSTHOOK: Input: default@dest1_n113 +POSTHOOK: Input: default@dest1_n113@ds=111 #### A masked pattern was here #### val_0 3 1 111 val_10 1 1 111 @@ -682,15 +682,15 @@ val_95 2 1 111 val_96 1 1 111 val_97 2 1 111 val_98 2 1 111 -PREHOOK: query: SELECT * from dest2 +PREHOOK: query: SELECT * from dest2_n15 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 -PREHOOK: Input: default@dest2@ds=111 +PREHOOK: Input: default@dest2_n15 +PREHOOK: Input: default@dest2_n15@ds=111 #### A masked pattern was here #### -POSTHOOK: query: SELECT * from dest2 +POSTHOOK: query: SELECT * from dest2_n15 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 -POSTHOOK: Input: default@dest2@ds=111 +POSTHOOK: Input: default@dest2_n15 +POSTHOOK: Input: default@dest2_n15@ds=111 #### A masked pattern was here #### 0 3 1 111 10 1 1 111 diff --git a/ql/src/test/results/clientpositive/groupby12.q.out b/ql/src/test/results/clientpositive/groupby12.q.out index 5b00b2ff88..5d5725d6cc 100644 --- a/ql/src/test/results/clientpositive/groupby12.q.out +++ b/ql/src/test/results/clientpositive/groupby12.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n88(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n88 +POSTHOOK: query: CREATE TABLE dest1_n88(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n88 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key +INSERT OVERWRITE TABLE dest1_n88 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key +INSERT OVERWRITE TABLE dest1_n88 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -55,7 +55,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n88 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -75,7 +75,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n88 Stage: Stage-2 Stats Work @@ -83,7 +83,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n88 Stage: Stage-3 Map Reduce @@ -109,24 +109,24 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key +INSERT OVERWRITE TABLE dest1_n88 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n88 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key +INSERT OVERWRITE TABLE dest1_n88 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n88 +POSTHOOK: Lineage: dest1_n88.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n88.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n88.* FROM dest1_n88 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n88 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n88.* FROM dest1_n88 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n88 #### A masked pattern was here #### 3 1 1 1 diff --git a/ql/src/test/results/clientpositive/groupby1_limit.q.out b/ql/src/test/results/clientpositive/groupby1_limit.q.out index 4928cb7e99..ba06db7328 100644 --- a/ql/src/test/results/clientpositive/groupby1_limit.q.out +++ b/ql/src/test/results/clientpositive/groupby1_limit.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n105(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n105 +POSTHOOK: query: CREATE TABLE dest1_n105(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n105 PREHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key LIMIT 5 +FROM src INSERT OVERWRITE TABLE dest1_n105 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key LIMIT 5 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key LIMIT 5 +FROM src INSERT OVERWRITE TABLE dest1_n105 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key LIMIT 5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -88,7 +88,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n105 Stage: Stage-0 Move Operator @@ -98,29 +98,29 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n105 Stage: Stage-3 Stats Work Basic Stats Work: -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key ORDER BY src.key LIMIT 5 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n105 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key ORDER BY src.key LIMIT 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key ORDER BY src.key LIMIT 5 +PREHOOK: Output: default@dest1_n105 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n105 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key ORDER BY src.key LIMIT 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n105 +POSTHOOK: Lineage: dest1_n105.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n105.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n105.* FROM dest1_n105 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n105 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n105.* FROM dest1_n105 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n105 #### A masked pattern was here #### 0 0.0 10 10.0 diff --git a/ql/src/test/results/clientpositive/groupby1_map.q.out b/ql/src/test/results/clientpositive/groupby1_map.q.out index 59394b80cd..00c3fa87cc 100644 --- a/ql/src/test/results/clientpositive/groupby1_map.q.out +++ b/ql/src/test/results/clientpositive/groupby1_map.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n52(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n52 +POSTHOOK: query: CREATE TABLE dest1_n52(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n52 PREHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +FROM src INSERT OVERWRITE TABLE dest1_n52 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +FROM src INSERT OVERWRITE TABLE dest1_n52 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -59,7 +59,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n52 Select Operator expressions: _col0 (type: int), _col1 (type: double) outputColumnNames: key, value @@ -84,7 +84,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n52 Stage: Stage-2 Stats Work @@ -92,7 +92,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, double - Table: default.dest1 + Table: default.dest1_n52 Stage: Stage-3 Map Reduce @@ -117,23 +117,23 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n52 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +PREHOOK: Output: default@dest1_n52 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n52 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n52 +POSTHOOK: Lineage: dest1_n52.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n52.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n52.* FROM dest1_n52 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n52 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n52.* FROM dest1_n52 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n52 #### A masked pattern was here #### 0 0.0 10 10.0 diff --git a/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out b/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out index 59394b80cd..a2f6f48c1e 100644 --- a/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out +++ b/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n58(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n58 +POSTHOOK: query: CREATE TABLE dest1_n58(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n58 PREHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +FROM src INSERT OVERWRITE TABLE dest1_n58 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +FROM src INSERT OVERWRITE TABLE dest1_n58 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -59,7 +59,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n58 Select Operator expressions: _col0 (type: int), _col1 (type: double) outputColumnNames: key, value @@ -84,7 +84,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n58 Stage: Stage-2 Stats Work @@ -92,7 +92,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, double - Table: default.dest1 + Table: default.dest1_n58 Stage: Stage-3 Map Reduce @@ -117,23 +117,23 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n58 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +PREHOOK: Output: default@dest1_n58 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n58 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n58 +POSTHOOK: Lineage: dest1_n58.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n58.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n58.* FROM dest1_n58 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n58 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n58.* FROM dest1_n58 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n58 #### A masked pattern was here #### 0 0.0 10 10.0 diff --git a/ql/src/test/results/clientpositive/groupby1_map_skew.q.out b/ql/src/test/results/clientpositive/groupby1_map_skew.q.out index 399db174b6..a5dabdf4e5 100644 --- a/ql/src/test/results/clientpositive/groupby1_map_skew.q.out +++ b/ql/src/test/results/clientpositive/groupby1_map_skew.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n147(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n147 +POSTHOOK: query: CREATE TABLE dest1_n147(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n147 PREHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +FROM src INSERT OVERWRITE TABLE dest1_n147 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +FROM src INSERT OVERWRITE TABLE dest1_n147 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -85,7 +85,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n147 Select Operator expressions: _col0 (type: int), _col1 (type: double) outputColumnNames: key, value @@ -110,7 +110,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n147 Stage: Stage-3 Stats Work @@ -118,7 +118,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, double - Table: default.dest1 + Table: default.dest1_n147 Stage: Stage-4 Map Reduce @@ -143,23 +143,23 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n147 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +PREHOOK: Output: default@dest1_n147 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n147 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n147 +POSTHOOK: Lineage: dest1_n147.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n147.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n147.* FROM dest1_n147 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n147 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n147.* FROM dest1_n147 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n147 #### A masked pattern was here #### 0 0.0 10 10.0 diff --git a/ql/src/test/results/clientpositive/groupby1_noskew.q.out b/ql/src/test/results/clientpositive/groupby1_noskew.q.out index da67ad00ad..d1f78682d1 100644 --- a/ql/src/test/results/clientpositive/groupby1_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby1_noskew.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g1_n0(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_g1 -POSTHOOK: query: CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest_g1_n0 +POSTHOOK: query: CREATE TABLE dest_g1_n0(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_g1 +POSTHOOK: Output: default@dest_g1_n0 PREHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +FROM src INSERT OVERWRITE TABLE dest_g1_n0 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +FROM src INSERT OVERWRITE TABLE dest_g1_n0 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -54,7 +54,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_g1 + name: default.dest_g1_n0 Select Operator expressions: _col0 (type: int), _col1 (type: double) outputColumnNames: key, value @@ -74,7 +74,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_g1 + name: default.dest_g1_n0 Stage: Stage-2 Stats Work @@ -82,7 +82,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, double - Table: default.dest_g1 + Table: default.dest_g1_n0 Stage: Stage-3 Map Reduce @@ -107,23 +107,23 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest_g1_n0 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest_g1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key +PREHOOK: Output: default@dest_g1_n0 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest_g1_n0 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest_g1 -POSTHOOK: Lineage: dest_g1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_g1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest_g1.* FROM dest_g1 +POSTHOOK: Output: default@dest_g1_n0 +POSTHOOK: Lineage: dest_g1_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_g1_n0.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest_g1_n0.* FROM dest_g1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_g1 +PREHOOK: Input: default@dest_g1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest_g1.* FROM dest_g1 +POSTHOOK: query: SELECT dest_g1_n0.* FROM dest_g1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_g1 +POSTHOOK: Input: default@dest_g1_n0 #### A masked pattern was here #### 0 0.0 10 10.0 diff --git a/ql/src/test/results/clientpositive/groupby2_map.q.out b/ql/src/test/results/clientpositive/groupby2_map.q.out index bc9c81885f..f86f13c1ee 100644 --- a/ql/src/test/results/clientpositive/groupby2_map.q.out +++ b/ql/src/test/results/clientpositive/groupby2_map.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n14(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n14 +POSTHOOK: query: CREATE TABLE dest1_n14(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n14 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n14 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n14 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -61,7 +61,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n14 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) outputColumnNames: key, c1, c2 @@ -86,7 +86,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n14 Stage: Stage-2 Stats Work @@ -94,7 +94,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, c1, c2 Column Types: string, int, string - Table: default.dest1 + Table: default.dest1_n14 Stage: Stage-3 Map Reduce @@ -120,25 +120,25 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n14 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n14 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n14 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n14 +POSTHOOK: Lineage: dest1_n14.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n14.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n14.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n14.* FROM dest1_n14 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n14 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n14.* FROM dest1_n14 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n14 #### A masked pattern was here #### 0 1 00.0 1 71 116414.0 diff --git a/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out index bdc817c49d..7d2d095b59 100644 --- a/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n34(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n34 +POSTHOOK: query: CREATE TABLE dest1_n34(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n34 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n34 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n34 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -61,7 +61,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n34 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int) outputColumnNames: key, c1, c2, c3, c4 @@ -86,7 +86,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n34 Stage: Stage-2 Stats Work @@ -94,7 +94,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, c1, c2, c3, c4 Column Types: string, int, string, int, int - Table: default.dest1 + Table: default.dest1_n34 Stage: Stage-3 Map Reduce @@ -120,27 +120,27 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n34 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n34 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n34 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n34 +POSTHOOK: Lineage: dest1_n34.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n34.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n34.c3 EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest1_n34.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n34.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n34.* FROM dest1_n34 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n34 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n34.* FROM dest1_n34 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n34 #### A masked pattern was here #### 0 1 00.0 0 3 1 71 116414.0 10044 115 @@ -154,11 +154,11 @@ POSTHOOK: Input: default@dest1 9 7 91047.0 577 12 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n34 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n34 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -207,7 +207,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n34 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int) outputColumnNames: key, c1, c2, c3, c4 @@ -232,7 +232,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n34 Stage: Stage-2 Stats Work @@ -240,7 +240,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, c1, c2, c3, c4 Column Types: string, int, string, int, int - Table: default.dest1 + Table: default.dest1_n34 Stage: Stage-3 Map Reduce @@ -266,27 +266,27 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n34 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n34 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n34 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n34 +POSTHOOK: Lineage: dest1_n34.c1 EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest1_n34.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n34.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n34.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n34.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n34.* FROM dest1_n34 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n34 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n34.* FROM dest1_n34 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n34 #### A masked pattern was here #### 0 1 00.0 0 3 1 1 116414.0 10044 115 diff --git a/ql/src/test/results/clientpositive/groupby2_map_skew.q.out b/ql/src/test/results/clientpositive/groupby2_map_skew.q.out index 1eefbd7d1a..bb156c419e 100644 --- a/ql/src/test/results/clientpositive/groupby2_map_skew.q.out +++ b/ql/src/test/results/clientpositive/groupby2_map_skew.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n9(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n9 +POSTHOOK: query: CREATE TABLE dest1_n9(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n9 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n9 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n9 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -87,7 +87,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n9 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) outputColumnNames: key, c1, c2 @@ -112,7 +112,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n9 Stage: Stage-3 Stats Work @@ -120,7 +120,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, c1, c2 Column Types: string, int, string - Table: default.dest1 + Table: default.dest1_n9 Stage: Stage-4 Map Reduce @@ -146,25 +146,25 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n9 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n9 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n9 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 order by key +POSTHOOK: Output: default@dest1_n9 +POSTHOOK: Lineage: dest1_n9.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n9.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n9.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n9.* FROM dest1_n9 order by key PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n9 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 order by key +POSTHOOK: query: SELECT dest1_n9.* FROM dest1_n9 order by key POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n9 #### A masked pattern was here #### 0 1 00.0 1 71 116414.0 diff --git a/ql/src/test/results/clientpositive/groupby2_noskew.q.out b/ql/src/test/results/clientpositive/groupby2_noskew.q.out index 8f2492fd7b..583885c186 100644 --- a/ql/src/test/results/clientpositive/groupby2_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby2_noskew.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g2_n1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_g2 -POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_g2_n1 +POSTHOOK: query: CREATE TABLE dest_g2_n1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_g2 +POSTHOOK: Output: default@dest_g2_n1 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -55,7 +55,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_g2 + name: default.dest_g2_n1 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) outputColumnNames: key, c1, c2 @@ -75,7 +75,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_g2 + name: default.dest_g2_n1 Stage: Stage-2 Stats Work @@ -83,7 +83,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, c1, c2 Column Types: string, int, string - Table: default.dest_g2 + Table: default.dest_g2_n1 Stage: Stage-3 Map Reduce @@ -109,25 +109,25 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest_g2 +PREHOOK: Output: default@dest_g2_n1 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest_g2 -POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest_g2.* FROM dest_g2 +POSTHOOK: Output: default@dest_g2_n1 +POSTHOOK: Lineage: dest_g2_n1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_g2_n1.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_g2_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest_g2_n1.* FROM dest_g2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_g2 +PREHOOK: Input: default@dest_g2_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest_g2.* FROM dest_g2 +POSTHOOK: query: SELECT dest_g2_n1.* FROM dest_g2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_g2 +POSTHOOK: Input: default@dest_g2_n1 #### A masked pattern was here #### 0 1 00.0 1 71 116414.0 diff --git a/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out index 617faeb805..6998b8311b 100644 --- a/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g2_n3(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_g2 -POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest_g2_n3 +POSTHOOK: query: CREATE TABLE dest_g2_n3(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_g2 +POSTHOOK: Output: default@dest_g2_n3 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n3 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n3 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -56,7 +56,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_g2 + name: default.dest_g2_n3 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int) outputColumnNames: key, c1, c2, c3, c4 @@ -76,7 +76,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_g2 + name: default.dest_g2_n3 Stage: Stage-2 Stats Work @@ -84,7 +84,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, c1, c2, c3, c4 Column Types: string, int, string, int, int - Table: default.dest_g2 + Table: default.dest_g2_n3 Stage: Stage-3 Map Reduce @@ -110,27 +110,27 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n3 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest_g2 +PREHOOK: Output: default@dest_g2_n3 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n3 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest_g2 -POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_g2.c3 EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest_g2.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest_g2.* FROM dest_g2 +POSTHOOK: Output: default@dest_g2_n3 +POSTHOOK: Lineage: dest_g2_n3.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_g2_n3.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_g2_n3.c3 EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest_g2_n3.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_g2_n3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest_g2_n3.* FROM dest_g2_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_g2 +PREHOOK: Input: default@dest_g2_n3 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest_g2.* FROM dest_g2 +POSTHOOK: query: SELECT dest_g2_n3.* FROM dest_g2_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_g2 +POSTHOOK: Input: default@dest_g2_n3 #### A masked pattern was here #### 0 1 00.0 0 3 1 71 116414.0 10044 115 diff --git a/ql/src/test/results/clientpositive/groupby3.q.out b/ql/src/test/results/clientpositive/groupby3.q.out index 1425c18c16..502e17f200 100644 --- a/ql/src/test/results/clientpositive/groupby3.q.out +++ b/ql/src/test/results/clientpositive/groupby3.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n99(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n99 +POSTHOOK: query: CREATE TABLE dest1_n99(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n99 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n99 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -21,7 +21,7 @@ INSERT OVERWRITE TABLE dest1 SELECT PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n99 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -96,7 +96,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n99 Select Operator expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double) outputColumnNames: c1, c2, c3, c4, c5, c6, c7, c8, c9 @@ -121,7 +121,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n99 Stage: Stage-3 Stats Work @@ -129,7 +129,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4, c5, c6, c7, c8, c9 Column Types: double, double, double, double, double, double, double, double, double - Table: default.dest1 + Table: default.dest1_n99 Stage: Stage-4 Map Reduce @@ -155,7 +155,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n99 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -167,9 +167,9 @@ INSERT OVERWRITE TABLE dest1 SELECT var_samp(substr(src.value,5)) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n99 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n99 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -181,22 +181,22 @@ INSERT OVERWRITE TABLE dest1 SELECT var_samp(substr(src.value,5)) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n99 +POSTHOOK: Lineage: dest1_n99.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n99.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n99.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n99.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n99.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n99.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n99.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n99.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n99.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n99.* FROM dest1_n99 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n99 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n99.* FROM dest1_n99 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n99 #### A masked pattern was here #### 130091.0 260.182 256.10355987055016 98.0 0.0 142.92680950752379 143.06995106518903 20428.07287599999 20469.010897795582 diff --git a/ql/src/test/results/clientpositive/groupby3_map.q.out b/ql/src/test/results/clientpositive/groupby3_map.q.out index 06c476b145..e11bd479f9 100644 --- a/ql/src/test/results/clientpositive/groupby3_map.q.out +++ b/ql/src/test/results/clientpositive/groupby3_map.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n46(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n46 +POSTHOOK: query: CREATE TABLE dest1_n46(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n46 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n46 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -21,7 +21,7 @@ INSERT OVERWRITE TABLE dest1 SELECT PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n46 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -76,7 +76,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n46 Select Operator expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double) outputColumnNames: c1, c2, c3, c4, c5, c6, c7, c8, c9 @@ -106,7 +106,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n46 Stage: Stage-2 Stats Work @@ -114,10 +114,10 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4, c5, c6, c7, c8, c9 Column Types: double, double, double, double, double, double, double, double, double - Table: default.dest1 + Table: default.dest1_n46 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n46 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -129,9 +129,9 @@ INSERT OVERWRITE TABLE dest1 SELECT var_samp(substr(src.value,5)) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n46 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n46 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -143,16 +143,16 @@ INSERT OVERWRITE TABLE dest1 SELECT var_samp(substr(src.value,5)) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (src)src.null, ] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@dest1_n46 +POSTHOOK: Lineage: dest1_n46.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n46.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n46.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (src)src.null, ] +POSTHOOK: Lineage: dest1_n46.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n46.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n46.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n46.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n46.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n46.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: SELECT c1, c2, @@ -163,9 +163,9 @@ round(c6, 11) c6, round(c7, 11) c7, round(c8, 5) c8, round(c9, 9) c9 -FROM dest1 +FROM dest1_n46 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n46 #### A masked pattern was here #### POSTHOOK: query: SELECT c1, @@ -177,8 +177,8 @@ round(c6, 11) c6, round(c7, 11) c7, round(c8, 5) c8, round(c9, 9) c9 -FROM dest1 +FROM dest1_n46 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n46 #### A masked pattern was here #### 130091.0 260.182 256.10355987055 98.0 0.0 142.92680950752 143.06995106519 20428.07288 20469.010897796 diff --git a/ql/src/test/results/clientpositive/groupby3_map_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby3_map_multi_distinct.q.out index f94ef49c27..d576dd164a 100644 --- a/ql/src/test/results/clientpositive/groupby3_map_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/groupby3_map_multi_distinct.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n59(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n59 +POSTHOOK: query: CREATE TABLE dest1_n59(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n59 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n59 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -23,7 +23,7 @@ INSERT OVERWRITE TABLE dest1 SELECT PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n59 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -80,7 +80,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n59 Select Operator expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double) outputColumnNames: c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11 @@ -110,7 +110,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n59 Stage: Stage-2 Stats Work @@ -118,10 +118,10 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11 Column Types: double, double, double, double, double, double, double, double, double, double, double - Table: default.dest1 + Table: default.dest1_n59 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n59 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -135,9 +135,9 @@ INSERT OVERWRITE TABLE dest1 SELECT count(DISTINCT substr(src.value, 5)) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n59 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n59 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -151,24 +151,24 @@ INSERT OVERWRITE TABLE dest1 SELECT count(DISTINCT substr(src.value, 5)) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c10 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c11 EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (src)src.null, ] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n59 +POSTHOOK: Lineage: dest1_n59.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n59.c10 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n59.c11 EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest1_n59.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n59.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (src)src.null, ] +POSTHOOK: Lineage: dest1_n59.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n59.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n59.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n59.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n59.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n59.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n59.* FROM dest1_n59 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n59 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n59.* FROM dest1_n59 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n59 #### A masked pattern was here #### 130091.0 260.182 256.10355987055016 98.0 0.0 142.9268095075238 143.06995106518906 20428.072876000002 20469.010897795593 79136.0 309.0 diff --git a/ql/src/test/results/clientpositive/groupby3_map_skew.q.out b/ql/src/test/results/clientpositive/groupby3_map_skew.q.out index 744c8110e4..ffa785d875 100644 --- a/ql/src/test/results/clientpositive/groupby3_map_skew.q.out +++ b/ql/src/test/results/clientpositive/groupby3_map_skew.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n109(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n109 +POSTHOOK: query: CREATE TABLE dest1_n109(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n109 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n109 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -21,7 +21,7 @@ INSERT OVERWRITE TABLE dest1 SELECT PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n109 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -100,7 +100,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n109 Select Operator expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double) outputColumnNames: c1, c2, c3, c4, c5, c6, c7, c8, c9 @@ -130,7 +130,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n109 Stage: Stage-3 Stats Work @@ -138,10 +138,10 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4, c5, c6, c7, c8, c9 Column Types: double, double, double, double, double, double, double, double, double - Table: default.dest1 + Table: default.dest1_n109 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n109 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -153,9 +153,9 @@ INSERT OVERWRITE TABLE dest1 SELECT var_samp(substr(src.value,5)) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n109 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n109 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -167,24 +167,24 @@ INSERT OVERWRITE TABLE dest1 SELECT var_samp(substr(src.value,5)) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@dest1_n109 +POSTHOOK: Lineage: dest1_n109.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n109.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n109.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n109.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n109.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n109.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n109.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n109.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n109.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: SELECT ROUND(c1, 1), ROUND(c2, 3), ROUND(c3, 5), ROUND(c4, 1), ROUND(c5, 1), ROUND(c6, 5), -ROUND(c7,5), ROUND(c8, 5), ROUND(c9, 5) FROM dest1 +ROUND(c7,5), ROUND(c8, 5), ROUND(c9, 5) FROM dest1_n109 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n109 #### A masked pattern was here #### POSTHOOK: query: SELECT ROUND(c1, 1), ROUND(c2, 3), ROUND(c3, 5), ROUND(c4, 1), ROUND(c5, 1), ROUND(c6, 5), -ROUND(c7,5), ROUND(c8, 5), ROUND(c9, 5) FROM dest1 +ROUND(c7,5), ROUND(c8, 5), ROUND(c9, 5) FROM dest1_n109 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n109 #### A masked pattern was here #### 130091.0 260.182 256.10356 98.0 0.0 142.92681 143.06995 20428.07288 20469.0109 diff --git a/ql/src/test/results/clientpositive/groupby3_noskew.q.out b/ql/src/test/results/clientpositive/groupby3_noskew.q.out index a639b2decc..948afcbdcf 100644 --- a/ql/src/test/results/clientpositive/groupby3_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby3_noskew.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n54(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n54 +POSTHOOK: query: CREATE TABLE dest1_n54(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n54 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n54 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -21,7 +21,7 @@ INSERT OVERWRITE TABLE dest1 SELECT PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n54 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -71,7 +71,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n54 Select Operator expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double) outputColumnNames: c1, c2, c3, c4, c5, c6, c7, c8, c9 @@ -97,7 +97,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n54 Stage: Stage-2 Stats Work @@ -105,10 +105,10 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4, c5, c6, c7, c8, c9 Column Types: double, double, double, double, double, double, double, double, double - Table: default.dest1 + Table: default.dest1_n54 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n54 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -120,9 +120,9 @@ INSERT OVERWRITE TABLE dest1 SELECT var_samp(substr(src.value,5)) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n54 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n54 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -134,22 +134,22 @@ INSERT OVERWRITE TABLE dest1 SELECT var_samp(substr(src.value,5)) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (src)src.null, ] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n54 +POSTHOOK: Lineage: dest1_n54.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n54.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n54.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (src)src.null, ] +POSTHOOK: Lineage: dest1_n54.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n54.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n54.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n54.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n54.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n54.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n54.* FROM dest1_n54 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n54 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n54.* FROM dest1_n54 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n54 #### A masked pattern was here #### 130091.0 260.182 256.10355987055016 98.0 0.0 142.9268095075238 143.06995106518906 20428.072876000002 20469.010897795593 diff --git a/ql/src/test/results/clientpositive/groupby3_noskew_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby3_noskew_multi_distinct.q.out index 37b908e8f0..9cfe514f20 100644 --- a/ql/src/test/results/clientpositive/groupby3_noskew_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/groupby3_noskew_multi_distinct.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n21(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n21 +POSTHOOK: query: CREATE TABLE dest1_n21(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n21 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n21 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -23,7 +23,7 @@ INSERT OVERWRITE TABLE dest1 SELECT PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n21 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -75,7 +75,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n21 Select Operator expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double) outputColumnNames: c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11 @@ -101,7 +101,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n21 Stage: Stage-2 Stats Work @@ -109,10 +109,10 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11 Column Types: double, double, double, double, double, double, double, double, double, double, double - Table: default.dest1 + Table: default.dest1_n21 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n21 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -126,9 +126,9 @@ INSERT OVERWRITE TABLE dest1 SELECT count(DISTINCT substr(src.value, 5)) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n21 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT +INSERT OVERWRITE TABLE dest1_n21 SELECT sum(substr(src.value,5)), avg(substr(src.value,5)), avg(DISTINCT substr(src.value,5)), @@ -142,24 +142,24 @@ INSERT OVERWRITE TABLE dest1 SELECT count(DISTINCT substr(src.value, 5)) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c10 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c11 EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (src)src.null, ] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n21 +POSTHOOK: Lineage: dest1_n21.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n21.c10 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n21.c11 EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest1_n21.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n21.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (src)src.null, ] +POSTHOOK: Lineage: dest1_n21.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n21.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n21.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n21.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n21.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n21.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n21.* FROM dest1_n21 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n21 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n21.* FROM dest1_n21 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n21 #### A masked pattern was here #### 130091.0 260.182 256.10355987055016 98.0 0.0 142.9268095075238 143.06995106518906 20428.072876000002 20469.010897795593 79136.0 309.0 diff --git a/ql/src/test/results/clientpositive/groupby4.q.out b/ql/src/test/results/clientpositive/groupby4.q.out index 40b478bb24..5f2dc8e5ff 100644 --- a/ql/src/test/results/clientpositive/groupby4.q.out +++ b/ql/src/test/results/clientpositive/groupby4.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n142(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n142 +POSTHOOK: query: CREATE TABLE dest1_n142(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n142 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n142 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n142 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -75,7 +75,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n142 Select Operator expressions: _col0 (type: string) outputColumnNames: c1 @@ -95,7 +95,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n142 Stage: Stage-3 Stats Work @@ -103,7 +103,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1 Column Types: string - Table: default.dest1 + Table: default.dest1_n142 Stage: Stage-4 Map Reduce @@ -152,23 +152,23 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n142 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n142 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n142 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n142 +POSTHOOK: Lineage: dest1_n142.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n142.* FROM dest1_n142 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n142 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n142.* FROM dest1_n142 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n142 #### A masked pattern was here #### 0 1 diff --git a/ql/src/test/results/clientpositive/groupby4_map.q.out b/ql/src/test/results/clientpositive/groupby4_map.q.out index c2f20c9ca0..e0675fe6d5 100644 --- a/ql/src/test/results/clientpositive/groupby4_map.q.out +++ b/ql/src/test/results/clientpositive/groupby4_map.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: CREATE TABLE dest1(key INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n36(key INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n36 +POSTHOOK: query: CREATE TABLE dest1_n36(key INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n36 PREHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1) +FROM src INSERT OVERWRITE TABLE dest1_n36 SELECT count(1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1) +FROM src INSERT OVERWRITE TABLE dest1_n36 SELECT count(1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -53,7 +53,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n36 Select Operator expressions: _col0 (type: int) outputColumnNames: key @@ -83,7 +83,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n36 Stage: Stage-2 Stats Work @@ -91,23 +91,23 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: int - Table: default.dest1 + Table: default.dest1_n36 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1) +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n36 SELECT count(1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1) +PREHOOK: Output: default@dest1_n36 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n36 SELECT count(1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.null, ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n36 +POSTHOOK: Lineage: dest1_n36.key EXPRESSION [(src)src.null, ] +PREHOOK: query: SELECT dest1_n36.* FROM dest1_n36 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n36 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n36.* FROM dest1_n36 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n36 #### A masked pattern was here #### 500 diff --git a/ql/src/test/results/clientpositive/groupby4_map_skew.q.out b/ql/src/test/results/clientpositive/groupby4_map_skew.q.out index 2582728580..795fb57efc 100644 --- a/ql/src/test/results/clientpositive/groupby4_map_skew.q.out +++ b/ql/src/test/results/clientpositive/groupby4_map_skew.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: CREATE TABLE dest1(key INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n117(key INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n117 +POSTHOOK: query: CREATE TABLE dest1_n117(key INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n117 PREHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1) +FROM src INSERT OVERWRITE TABLE dest1_n117 SELECT count(1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1) +FROM src INSERT OVERWRITE TABLE dest1_n117 SELECT count(1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -53,7 +53,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n117 Select Operator expressions: _col0 (type: int) outputColumnNames: key @@ -83,7 +83,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n117 Stage: Stage-2 Stats Work @@ -91,23 +91,23 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: int - Table: default.dest1 + Table: default.dest1_n117 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1) +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n117 SELECT count(1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1) +PREHOOK: Output: default@dest1_n117 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n117 SELECT count(1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.null, ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n117 +POSTHOOK: Lineage: dest1_n117.key EXPRESSION [(src)src.null, ] +PREHOOK: query: SELECT dest1_n117.* FROM dest1_n117 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n117 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n117.* FROM dest1_n117 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n117 #### A masked pattern was here #### 500 diff --git a/ql/src/test/results/clientpositive/groupby4_noskew.q.out b/ql/src/test/results/clientpositive/groupby4_noskew.q.out index 46153129a6..0b0f92419a 100644 --- a/ql/src/test/results/clientpositive/groupby4_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby4_noskew.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n29(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n29 +POSTHOOK: query: CREATE TABLE dest1_n29(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n29 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n29 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n29 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -50,7 +50,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n29 Select Operator expressions: _col0 (type: string) outputColumnNames: c1 @@ -70,7 +70,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n29 Stage: Stage-2 Stats Work @@ -78,7 +78,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1 Column Types: string - Table: default.dest1 + Table: default.dest1_n29 Stage: Stage-3 Map Reduce @@ -104,23 +104,23 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n29 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n29 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest1_n29 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n29 +POSTHOOK: Lineage: dest1_n29.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n29.* FROM dest1_n29 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n29 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n29.* FROM dest1_n29 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n29 #### A masked pattern was here #### 0 1 diff --git a/ql/src/test/results/clientpositive/groupby5.q.out b/ql/src/test/results/clientpositive/groupby5.q.out index e7780b0a8c..2997b438a1 100644 --- a/ql/src/test/results/clientpositive/groupby5.q.out +++ b/ql/src/test/results/clientpositive/groupby5.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n32(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n32 +POSTHOOK: query: CREATE TABLE dest1_n32(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n32 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n32 SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n32 SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key @@ -87,7 +87,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n32 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -107,7 +107,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n32 Stage: Stage-3 Stats Work @@ -115,7 +115,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n32 Stage: Stage-4 Map Reduce @@ -163,29 +163,29 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE dest1 +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n32 SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 +PREHOOK: Output: default@dest1_n32 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n32 SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n32 +POSTHOOK: Lineage: dest1_n32.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n32.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n32.* FROM dest1_n32 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n32 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n32.* FROM dest1_n32 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n32 #### A masked pattern was here #### 0 0.0 10 10.0 diff --git a/ql/src/test/results/clientpositive/groupby5_map.q.out b/ql/src/test/results/clientpositive/groupby5_map.q.out index d04eb44b3c..d50ed5e1c7 100644 --- a/ql/src/test/results/clientpositive/groupby5_map.q.out +++ b/ql/src/test/results/clientpositive/groupby5_map.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: CREATE TABLE dest1(key INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n65(key INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n65 +POSTHOOK: query: CREATE TABLE dest1_n65(key INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n65 PREHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key) +FROM src INSERT OVERWRITE TABLE dest1_n65 SELECT sum(src.key) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key) +FROM src INSERT OVERWRITE TABLE dest1_n65 SELECT sum(src.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -54,7 +54,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n65 Select Operator expressions: _col0 (type: int) outputColumnNames: key @@ -84,7 +84,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n65 Stage: Stage-2 Stats Work @@ -92,23 +92,23 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: int - Table: default.dest1 + Table: default.dest1_n65 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key) +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n65 SELECT sum(src.key) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key) +PREHOOK: Output: default@dest1_n65 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n65 SELECT sum(src.key) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n65 +POSTHOOK: Lineage: dest1_n65.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n65.* FROM dest1_n65 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n65 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n65.* FROM dest1_n65 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n65 #### A masked pattern was here #### 130091 diff --git a/ql/src/test/results/clientpositive/groupby5_map_skew.q.out b/ql/src/test/results/clientpositive/groupby5_map_skew.q.out index 6fa3fc1188..b1ebddff07 100644 --- a/ql/src/test/results/clientpositive/groupby5_map_skew.q.out +++ b/ql/src/test/results/clientpositive/groupby5_map_skew.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: CREATE TABLE dest1(key INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n66(key INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n66 +POSTHOOK: query: CREATE TABLE dest1_n66(key INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n66 PREHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key) +FROM src INSERT OVERWRITE TABLE dest1_n66 SELECT sum(src.key) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key) +FROM src INSERT OVERWRITE TABLE dest1_n66 SELECT sum(src.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -54,7 +54,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n66 Select Operator expressions: _col0 (type: int) outputColumnNames: key @@ -84,7 +84,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n66 Stage: Stage-2 Stats Work @@ -92,23 +92,23 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: int - Table: default.dest1 + Table: default.dest1_n66 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key) +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n66 SELECT sum(src.key) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key) +PREHOOK: Output: default@dest1_n66 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n66 SELECT sum(src.key) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n66 +POSTHOOK: Lineage: dest1_n66.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n66.* FROM dest1_n66 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n66 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n66.* FROM dest1_n66 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n66 #### A masked pattern was here #### 130091 diff --git a/ql/src/test/results/clientpositive/groupby5_noskew.q.out b/ql/src/test/results/clientpositive/groupby5_noskew.q.out index 647cfbddc0..1794b1e0ab 100644 --- a/ql/src/test/results/clientpositive/groupby5_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby5_noskew.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n27(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n27 +POSTHOOK: query: CREATE TABLE dest1_n27(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n27 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n27 SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n27 SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key @@ -60,7 +60,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n27 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -80,7 +80,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n27 Stage: Stage-2 Stats Work @@ -88,7 +88,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n27 Stage: Stage-3 Map Reduce @@ -113,29 +113,29 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE dest1 +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n27 SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 +PREHOOK: Output: default@dest1_n27 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n27 SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n27 +POSTHOOK: Lineage: dest1_n27.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n27.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n27.* FROM dest1_n27 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n27 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n27.* FROM dest1_n27 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n27 #### A masked pattern was here #### 0 0.0 10 10.0 diff --git a/ql/src/test/results/clientpositive/groupby6.q.out b/ql/src/test/results/clientpositive/groupby6.q.out index b3ffd87456..cc2048ae30 100644 --- a/ql/src/test/results/clientpositive/groupby6.q.out +++ b/ql/src/test/results/clientpositive/groupby6.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n77(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n77 +POSTHOOK: query: CREATE TABLE dest1_n77(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n77 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n77 SELECT DISTINCT substr(src.value,5,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n77 SELECT DISTINCT substr(src.value,5,1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -75,7 +75,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n77 Select Operator expressions: _col0 (type: string) outputColumnNames: c1 @@ -95,7 +95,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n77 Stage: Stage-3 Stats Work @@ -103,7 +103,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1 Column Types: string - Table: default.dest1 + Table: default.dest1_n77 Stage: Stage-4 Map Reduce @@ -152,23 +152,23 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n77 SELECT DISTINCT substr(src.value,5,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n77 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n77 SELECT DISTINCT substr(src.value,5,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n77 +POSTHOOK: Lineage: dest1_n77.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n77.* FROM dest1_n77 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n77 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n77.* FROM dest1_n77 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n77 #### A masked pattern was here #### 0 1 diff --git a/ql/src/test/results/clientpositive/groupby6_map.q.out b/ql/src/test/results/clientpositive/groupby6_map.q.out index 073a94ce44..685318678a 100644 --- a/ql/src/test/results/clientpositive/groupby6_map.q.out +++ b/ql/src/test/results/clientpositive/groupby6_map.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n17(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n17 +POSTHOOK: query: CREATE TABLE dest1_n17(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n17 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n17 SELECT DISTINCT substr(src.value,5,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n17 SELECT DISTINCT substr(src.value,5,1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -55,7 +55,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n17 Select Operator expressions: _col0 (type: string) outputColumnNames: c1 @@ -80,7 +80,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n17 Stage: Stage-2 Stats Work @@ -88,7 +88,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1 Column Types: string - Table: default.dest1 + Table: default.dest1_n17 Stage: Stage-3 Map Reduce @@ -114,23 +114,23 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n17 SELECT DISTINCT substr(src.value,5,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n17 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n17 SELECT DISTINCT substr(src.value,5,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n17 +POSTHOOK: Lineage: dest1_n17.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n17.* FROM dest1_n17 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n17 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n17.* FROM dest1_n17 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n17 #### A masked pattern was here #### 0 1 diff --git a/ql/src/test/results/clientpositive/groupby6_map_skew.q.out b/ql/src/test/results/clientpositive/groupby6_map_skew.q.out index af088b1503..e2b9e395b2 100644 --- a/ql/src/test/results/clientpositive/groupby6_map_skew.q.out +++ b/ql/src/test/results/clientpositive/groupby6_map_skew.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n82(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n82 +POSTHOOK: query: CREATE TABLE dest1_n82(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n82 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n82 SELECT DISTINCT substr(src.value,5,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n82 SELECT DISTINCT substr(src.value,5,1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -79,7 +79,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n82 Select Operator expressions: _col0 (type: string) outputColumnNames: c1 @@ -104,7 +104,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n82 Stage: Stage-3 Stats Work @@ -112,7 +112,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1 Column Types: string - Table: default.dest1 + Table: default.dest1_n82 Stage: Stage-4 Map Reduce @@ -138,23 +138,23 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n82 SELECT DISTINCT substr(src.value,5,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n82 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n82 SELECT DISTINCT substr(src.value,5,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n82 +POSTHOOK: Lineage: dest1_n82.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n82.* FROM dest1_n82 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n82 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n82.* FROM dest1_n82 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n82 #### A masked pattern was here #### 0 1 diff --git a/ql/src/test/results/clientpositive/groupby6_noskew.q.out b/ql/src/test/results/clientpositive/groupby6_noskew.q.out index 49545d2cc4..8193f2dfba 100644 --- a/ql/src/test/results/clientpositive/groupby6_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby6_noskew.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n84(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n84 +POSTHOOK: query: CREATE TABLE dest1_n84(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n84 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n84 SELECT DISTINCT substr(src.value,5,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n84 SELECT DISTINCT substr(src.value,5,1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -50,7 +50,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n84 Select Operator expressions: _col0 (type: string) outputColumnNames: c1 @@ -70,7 +70,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n84 Stage: Stage-2 Stats Work @@ -78,7 +78,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1 Column Types: string - Table: default.dest1 + Table: default.dest1_n84 Stage: Stage-3 Map Reduce @@ -104,23 +104,23 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n84 SELECT DISTINCT substr(src.value,5,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n84 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1) +INSERT OVERWRITE TABLE dest1_n84 SELECT DISTINCT substr(src.value,5,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n84 +POSTHOOK: Lineage: dest1_n84.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n84.* FROM dest1_n84 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n84 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n84.* FROM dest1_n84 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n84 #### A masked pattern was here #### 0 1 diff --git a/ql/src/test/results/clientpositive/groupby7.q.out b/ql/src/test/results/clientpositive/groupby7.q.out index ee0153a97b..02f5305466 100644 --- a/ql/src/test/results/clientpositive/groupby7.q.out +++ b/ql/src/test/results/clientpositive/groupby7.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n21(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n21 +POSTHOOK: query: CREATE TABLE DEST1_n21(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n21 +PREHOOK: query: CREATE TABLE DEST2_n18(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n18 +POSTHOOK: query: CREATE TABLE DEST2_n18(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n18 PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n21 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n18 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n21 +PREHOOK: Output: default@dest2_n18 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n21 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n18 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n21 +POSTHOOK: Output: default@dest2_n18 +POSTHOOK: Lineage: dest1_n21.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n21.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n18.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n18.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n21.* FROM DEST1_n21 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n21 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n21.* FROM DEST1_n21 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n21 #### A masked pattern was here #### 0 0.0 10 10.0 @@ -349,13 +349,13 @@ POSTHOOK: Input: default@dest1 96 96.0 97 194.0 98 196.0 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n18.* FROM DEST2_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n18 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n18.* FROM DEST2_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n18 #### A masked pattern was here #### 0 0.0 10 10.0 diff --git a/ql/src/test/results/clientpositive/groupby7_map.q.out b/ql/src/test/results/clientpositive/groupby7_map.q.out index 642cbb85d4..71ae0e321c 100644 --- a/ql/src/test/results/clientpositive/groupby7_map.q.out +++ b/ql/src/test/results/clientpositive/groupby7_map.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n11(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n11 +POSTHOOK: query: CREATE TABLE DEST1_n11(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n11 +PREHOOK: query: CREATE TABLE DEST2_n9(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n9 +POSTHOOK: query: CREATE TABLE DEST2_n9(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n9 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n11 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n9 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n11 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n9 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -91,7 +91,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n11 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -116,7 +116,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n11 Stage: Stage-3 Stats Work @@ -124,7 +124,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n11 Stage: Stage-4 Map Reduce @@ -155,7 +155,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest2 + Table: default.dest2_n9 Stage: Stage-5 Map Reduce @@ -186,7 +186,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n9 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -211,7 +211,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n9 Stage: Stage-7 Map Reduce @@ -237,30 +237,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n11 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n9 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n11 +PREHOOK: Output: default@dest2_n9 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n11 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n9 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n11 +POSTHOOK: Output: default@dest2_n9 +POSTHOOK: Lineage: dest1_n11.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n11.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n9.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n9.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n11.* FROM DEST1_n11 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n11 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n11.* FROM DEST1_n11 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n11 #### A masked pattern was here #### 0 0.0 10 10.0 @@ -571,13 +571,13 @@ POSTHOOK: Input: default@dest1 96 96.0 97 194.0 98 196.0 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n9.* FROM DEST2_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n9 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n9.* FROM DEST2_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n9 #### A masked pattern was here #### 0 0.0 10 10.0 diff --git a/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out index abd2f587e1..bf197e1d50 100644 --- a/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out +++ b/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n0(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n0 +POSTHOOK: query: CREATE TABLE DEST1_n0(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n0 +PREHOOK: query: CREATE TABLE DEST2_n0(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n0 +POSTHOOK: query: CREATE TABLE DEST2_n0(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n0 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n0 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n0 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n0 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n0 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -71,7 +71,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n0 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -104,7 +104,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n0 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -129,7 +129,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n0 Stage: Stage-3 Stats Work @@ -137,7 +137,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n0 Stage: Stage-4 Map Reduce @@ -168,7 +168,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest2 + Table: default.dest2_n0 Stage: Stage-1 Move Operator @@ -178,7 +178,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n0 Stage: Stage-6 Map Reduce @@ -204,30 +204,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n0 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n0 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n0 +PREHOOK: Output: default@dest2_n0 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n0 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n0 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n0 +POSTHOOK: Output: default@dest2_n0 +POSTHOOK: Lineage: dest1_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n0.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n0.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n0.* FROM DEST1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n0.* FROM DEST1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n0 #### A masked pattern was here #### 0 0.0 10 10.0 @@ -538,13 +538,13 @@ POSTHOOK: Input: default@dest1 96 96.0 97 194.0 98 196.0 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n0.* FROM DEST2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n0.* FROM DEST2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n0 #### A masked pattern was here #### 0 0.0 10 10.0 diff --git a/ql/src/test/results/clientpositive/groupby7_map_skew.q.out b/ql/src/test/results/clientpositive/groupby7_map_skew.q.out index b6b727de08..e1c6af2910 100644 --- a/ql/src/test/results/clientpositive/groupby7_map_skew.q.out +++ b/ql/src/test/results/clientpositive/groupby7_map_skew.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n1 +POSTHOOK: query: CREATE TABLE DEST1_n1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n1 +PREHOOK: query: CREATE TABLE DEST2_n1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n1 +POSTHOOK: query: CREATE TABLE DEST2_n1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n1 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -118,7 +118,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n1 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -143,7 +143,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n1 Stage: Stage-4 Stats Work @@ -151,7 +151,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n1 Stage: Stage-5 Map Reduce @@ -182,7 +182,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest2 + Table: default.dest2_n1 Stage: Stage-6 Map Reduce @@ -238,7 +238,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n1 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -263,7 +263,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n1 Stage: Stage-9 Map Reduce @@ -289,30 +289,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n1 +PREHOOK: Output: default@dest2_n1 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n1 +POSTHOOK: Output: default@dest2_n1 +POSTHOOK: Lineage: dest1_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n1.* FROM DEST1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n1.* FROM DEST1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n1 #### A masked pattern was here #### 0 0.0 10 10.0 @@ -623,13 +623,13 @@ POSTHOOK: Input: default@dest1 96 96.0 97 194.0 98 196.0 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n1.* FROM DEST2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n1.* FROM DEST2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n1 #### A masked pattern was here #### 0 0.0 10 10.0 diff --git a/ql/src/test/results/clientpositive/groupby7_noskew.q.out b/ql/src/test/results/clientpositive/groupby7_noskew.q.out index 029e749648..a8b663034b 100644 --- a/ql/src/test/results/clientpositive/groupby7_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby7_noskew.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n15(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n15 +POSTHOOK: query: CREATE TABLE DEST1_n15(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n15 +PREHOOK: query: CREATE TABLE DEST2_n13(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n13 +POSTHOOK: query: CREATE TABLE DEST2_n13(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n13 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n15 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n13 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n15 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n13 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -80,7 +80,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n15 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -100,7 +100,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n15 Stage: Stage-3 Stats Work @@ -108,7 +108,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n15 Stage: Stage-4 Map Reduce @@ -139,7 +139,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest2 + Table: default.dest2_n13 Stage: Stage-5 Map Reduce @@ -170,7 +170,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n13 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -190,7 +190,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n13 Stage: Stage-7 Map Reduce @@ -216,30 +216,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n15 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n13 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n15 +PREHOOK: Output: default@dest2_n13 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n15 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n13 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n15 +POSTHOOK: Output: default@dest2_n13 +POSTHOOK: Lineage: dest1_n15.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n15.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n13.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n13.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n15.* FROM DEST1_n15 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n15 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n15.* FROM DEST1_n15 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n15 #### A masked pattern was here #### 0 0.0 10 10.0 @@ -550,13 +550,13 @@ POSTHOOK: Input: default@dest1 96 96.0 97 194.0 98 196.0 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n13.* FROM DEST2_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n13.* FROM DEST2_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n13 #### A masked pattern was here #### 0 0.0 10 10.0 diff --git a/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out index 1237688387..dfee473ef2 100644 --- a/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out +++ b/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n25(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n25 +POSTHOOK: query: CREATE TABLE DEST1_n25(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n25 +PREHOOK: query: CREATE TABLE DEST2_n22(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n22 +POSTHOOK: query: CREATE TABLE DEST2_n22(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n22 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 +INSERT OVERWRITE TABLE DEST1_n25 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 +INSERT OVERWRITE TABLE DEST2_n22 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 +INSERT OVERWRITE TABLE DEST1_n25 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 +INSERT OVERWRITE TABLE DEST2_n22 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -111,7 +111,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n25 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -131,7 +131,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n25 Stage: Stage-4 Stats Work @@ -139,7 +139,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n25 Stage: Stage-5 Map Reduce @@ -170,7 +170,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest2 + Table: default.dest2_n22 Stage: Stage-6 Map Reduce @@ -202,7 +202,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n22 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -222,7 +222,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n22 Stage: Stage-8 Map Reduce @@ -248,30 +248,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 +INSERT OVERWRITE TABLE DEST1_n25 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 +INSERT OVERWRITE TABLE DEST2_n22 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n25 +PREHOOK: Output: default@dest2_n22 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 +INSERT OVERWRITE TABLE DEST1_n25 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 +INSERT OVERWRITE TABLE DEST2_n22 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n25 +POSTHOOK: Output: default@dest2_n22 +POSTHOOK: Lineage: dest1_n25.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n25.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n22.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n22.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n25.* FROM DEST1_n25 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n25 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n25.* FROM DEST1_n25 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n25 #### A masked pattern was here #### 0 0.0 10 10.0 @@ -283,13 +283,13 @@ POSTHOOK: Input: default@dest1 111 111.0 113 226.0 114 114.0 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n22.* FROM DEST2_n22 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n22 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n22.* FROM DEST2_n22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n22 #### A masked pattern was here #### 0 0.0 10 10.0 diff --git a/ql/src/test/results/clientpositive/groupby8.q.out b/ql/src/test/results/clientpositive/groupby8.q.out index eb5c5ac30d..decf27282b 100644 --- a/ql/src/test/results/clientpositive/groupby8.q.out +++ b/ql/src/test/results/clientpositive/groupby8.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n8(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n8 +POSTHOOK: query: CREATE TABLE DEST1_n8(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n8 +PREHOOK: query: CREATE TABLE DEST2_n6(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n6 +POSTHOOK: query: CREATE TABLE DEST2_n6(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n6 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n8 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n6 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n8 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n6 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -108,7 +108,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n8 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -128,7 +128,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n8 Stage: Stage-4 Stats Work @@ -136,7 +136,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n8 Stage: Stage-5 Map Reduce @@ -190,7 +190,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest2 + Table: default.dest2_n6 Stage: Stage-7 Map Reduce @@ -245,7 +245,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n6 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -265,7 +265,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n6 Stage: Stage-10 Map Reduce @@ -314,30 +314,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n8 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n6 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n8 +PREHOOK: Output: default@dest2_n6 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n8 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n6 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n8 +POSTHOOK: Output: default@dest2_n6 +POSTHOOK: Lineage: dest1_n8.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n8.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n6.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n6.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n8.* FROM DEST1_n8 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n8 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n8.* FROM DEST1_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n8 #### A masked pattern was here #### 0 1 10 1 @@ -648,13 +648,13 @@ POSTHOOK: Input: default@dest1 96 1 97 1 98 1 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n6.* FROM DEST2_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n6 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n6.* FROM DEST2_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n6 #### A masked pattern was here #### 0 1 10 1 @@ -967,13 +967,13 @@ POSTHOOK: Input: default@dest2 98 1 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n8 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n6 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n8 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n6 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -1059,7 +1059,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n8 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -1079,7 +1079,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n8 Stage: Stage-4 Stats Work @@ -1087,7 +1087,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n8 Stage: Stage-5 Map Reduce @@ -1141,7 +1141,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest2 + Table: default.dest2_n6 Stage: Stage-7 Map Reduce @@ -1196,7 +1196,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n6 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -1216,7 +1216,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n6 Stage: Stage-10 Map Reduce @@ -1265,30 +1265,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n8 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n6 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n8 +PREHOOK: Output: default@dest2_n6 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n8 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n6 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n8 +POSTHOOK: Output: default@dest2_n6 +POSTHOOK: Lineage: dest1_n8.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n8.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n6.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n6.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n8.* FROM DEST1_n8 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n8 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n8.* FROM DEST1_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n8 #### A masked pattern was here #### 0 1 10 1 @@ -1599,13 +1599,13 @@ POSTHOOK: Input: default@dest1 96 1 97 1 98 1 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n6.* FROM DEST2_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n6 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n6.* FROM DEST2_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n6 #### A masked pattern was here #### 0 1 10 1 diff --git a/ql/src/test/results/clientpositive/groupby8_map.q.out b/ql/src/test/results/clientpositive/groupby8_map.q.out index eb0dbe87b5..9af1537ad2 100644 --- a/ql/src/test/results/clientpositive/groupby8_map.q.out +++ b/ql/src/test/results/clientpositive/groupby8_map.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n22(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n22 +POSTHOOK: query: CREATE TABLE DEST1_n22(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n22 +PREHOOK: query: CREATE TABLE DEST2_n19(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n19 +POSTHOOK: query: CREATE TABLE DEST2_n19(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n19 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n22 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n19 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n22 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n19 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -70,7 +70,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n22 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -103,7 +103,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n19 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -128,7 +128,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n22 Stage: Stage-3 Stats Work @@ -136,7 +136,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n22 Stage: Stage-4 Map Reduce @@ -167,7 +167,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest2 + Table: default.dest2_n19 Stage: Stage-1 Move Operator @@ -177,7 +177,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n19 Stage: Stage-6 Map Reduce @@ -203,30 +203,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n22 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n19 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n22 +PREHOOK: Output: default@dest2_n19 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n22 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n19 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n22 +POSTHOOK: Output: default@dest2_n19 +POSTHOOK: Lineage: dest1_n22.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n22.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n19.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n19.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n22.* FROM DEST1_n22 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n22 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n22.* FROM DEST1_n22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n22 #### A masked pattern was here #### 0 1 10 1 @@ -537,13 +537,13 @@ POSTHOOK: Input: default@dest1 96 1 97 1 98 1 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n19.* FROM DEST2_n19 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n19 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n19.* FROM DEST2_n19 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n19 #### A masked pattern was here #### 0 1 10 1 diff --git a/ql/src/test/results/clientpositive/groupby8_map_skew.q.out b/ql/src/test/results/clientpositive/groupby8_map_skew.q.out index fc63383c7d..19a1a59427 100644 --- a/ql/src/test/results/clientpositive/groupby8_map_skew.q.out +++ b/ql/src/test/results/clientpositive/groupby8_map_skew.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n13(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n13 +POSTHOOK: query: CREATE TABLE DEST1_n13(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n13 +PREHOOK: query: CREATE TABLE DEST2_n11(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n11 +POSTHOOK: query: CREATE TABLE DEST2_n11(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n11 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n13 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n11 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n13 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n11 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -117,7 +117,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n13 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -142,7 +142,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n13 Stage: Stage-4 Stats Work @@ -150,7 +150,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n13 Stage: Stage-5 Map Reduce @@ -181,7 +181,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest2 + Table: default.dest2_n11 Stage: Stage-6 Map Reduce @@ -236,7 +236,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n11 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -261,7 +261,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n11 Stage: Stage-9 Map Reduce @@ -287,30 +287,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n13 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n11 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n13 +PREHOOK: Output: default@dest2_n11 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n13 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n11 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n13 +POSTHOOK: Output: default@dest2_n11 +POSTHOOK: Lineage: dest1_n13.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n13.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n11.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n11.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n13.* FROM DEST1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n13.* FROM DEST1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n13 #### A masked pattern was here #### 0 1 10 1 @@ -621,13 +621,13 @@ POSTHOOK: Input: default@dest1 96 1 97 1 98 1 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n11.* FROM DEST2_n11 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n11 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n11.* FROM DEST2_n11 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n11 #### A masked pattern was here #### 0 1 10 1 diff --git a/ql/src/test/results/clientpositive/groupby8_noskew.q.out b/ql/src/test/results/clientpositive/groupby8_noskew.q.out index cfa49ef783..77c328ec75 100644 --- a/ql/src/test/results/clientpositive/groupby8_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby8_noskew.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n4(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n4 +POSTHOOK: query: CREATE TABLE DEST1_n4(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n4 +PREHOOK: query: CREATE TABLE DEST2_n4(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n4 +POSTHOOK: query: CREATE TABLE DEST2_n4(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n4 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n4 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n4 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n4 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n4 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -70,7 +70,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n4 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -98,7 +98,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n4 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -118,7 +118,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n4 Stage: Stage-3 Stats Work @@ -126,7 +126,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n4 Stage: Stage-4 Map Reduce @@ -157,7 +157,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest2 + Table: default.dest2_n4 Stage: Stage-1 Move Operator @@ -167,7 +167,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n4 Stage: Stage-6 Map Reduce @@ -193,30 +193,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n4 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n4 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n4 +PREHOOK: Output: default@dest2_n4 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST1_n4 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n4 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n4 +POSTHOOK: Output: default@dest2_n4 +POSTHOOK: Lineage: dest1_n4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n4.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n4.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n4.* FROM DEST1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n4.* FROM DEST1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n4 #### A masked pattern was here #### 0 1 10 1 @@ -527,13 +527,13 @@ POSTHOOK: Input: default@dest1 96 1 97 1 98 1 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n4.* FROM DEST2_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n4.* FROM DEST2_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n4 #### A masked pattern was here #### 0 1 10 1 diff --git a/ql/src/test/results/clientpositive/groupby9.q.out b/ql/src/test/results/clientpositive/groupby9.q.out index da5b749472..bbb87c28eb 100644 --- a/ql/src/test/results/clientpositive/groupby9.q.out +++ b/ql/src/test/results/clientpositive/groupby9.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n18(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n18 +POSTHOOK: query: CREATE TABLE DEST1_n18(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, val1 STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n18 +PREHOOK: query: CREATE TABLE DEST2_n16(key INT, val1 STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key INT, val1 STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n16 +POSTHOOK: query: CREATE TABLE DEST2_n16(key INT, val1 STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n16 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -90,7 +90,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n18 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -115,7 +115,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n18 Stage: Stage-3 Stats Work @@ -123,7 +123,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n18 Stage: Stage-4 Map Reduce @@ -154,7 +154,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val1, val2 Column Types: int, string, string - Table: default.dest2 + Table: default.dest2_n16 Stage: Stage-5 Map Reduce @@ -184,7 +184,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n16 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) outputColumnNames: key, val1, val2 @@ -209,7 +209,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n16 Stage: Stage-7 Map Reduce @@ -235,31 +235,31 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n18 +PREHOOK: Output: default@dest2_n16 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n18 +POSTHOOK: Output: default@dest2_n16 +POSTHOOK: Lineage: dest1_n18.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n18.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.val1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.val2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n18.* FROM DEST1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n18 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n18.* FROM DEST1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n18 #### A masked pattern was here #### 0 1 10 1 @@ -570,13 +570,13 @@ POSTHOOK: Input: default@dest1 96 1 97 1 98 1 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n16.* FROM DEST2_n16 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n16 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n16.* FROM DEST2_n16 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n16 #### A masked pattern was here #### 0 val_0 1 10 val_10 1 @@ -889,13 +889,13 @@ POSTHOOK: Input: default@dest2 98 val_98 1 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -963,7 +963,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n18 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -988,7 +988,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n18 Stage: Stage-3 Stats Work @@ -996,7 +996,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n18 Stage: Stage-4 Map Reduce @@ -1027,7 +1027,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val1, val2 Column Types: int, string, string - Table: default.dest2 + Table: default.dest2_n16 Stage: Stage-5 Map Reduce @@ -1057,7 +1057,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n16 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) outputColumnNames: key, val1, val2 @@ -1082,7 +1082,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n16 Stage: Stage-7 Map Reduce @@ -1108,31 +1108,31 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n18 +PREHOOK: Output: default@dest2_n16 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n18 +POSTHOOK: Output: default@dest2_n16 +POSTHOOK: Lineage: dest1_n18.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n18.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.val1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.val2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n18.* FROM DEST1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n18 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n18.* FROM DEST1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n18 #### A masked pattern was here #### 0 1 10 1 @@ -1443,13 +1443,13 @@ POSTHOOK: Input: default@dest1 96 1 97 1 98 1 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n16.* FROM DEST2_n16 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n16 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n16.* FROM DEST2_n16 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n16 #### A masked pattern was here #### 0 val_0 1 10 val_10 1 @@ -1762,13 +1762,13 @@ POSTHOOK: Input: default@dest2 98 val_98 1 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -1836,7 +1836,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n18 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -1861,7 +1861,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n18 Stage: Stage-3 Stats Work @@ -1869,7 +1869,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n18 Stage: Stage-4 Map Reduce @@ -1900,7 +1900,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val1, val2 Column Types: int, string, string - Table: default.dest2 + Table: default.dest2_n16 Stage: Stage-5 Map Reduce @@ -1930,7 +1930,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n16 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) outputColumnNames: key, val1, val2 @@ -1955,7 +1955,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n16 Stage: Stage-7 Map Reduce @@ -1981,31 +1981,31 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n18 +PREHOOK: Output: default@dest2_n16 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n18 +POSTHOOK: Output: default@dest2_n16 +POSTHOOK: Lineage: dest1_n18.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n18.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.val1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.val2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n18.* FROM DEST1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n18 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n18.* FROM DEST1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n18 #### A masked pattern was here #### 0 1 10 1 @@ -2316,13 +2316,13 @@ POSTHOOK: Input: default@dest1 96 1 97 1 98 1 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n16.* FROM DEST2_n16 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n16 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n16.* FROM DEST2_n16 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n16 #### A masked pattern was here #### 0 val_0 1 10 val_10 1 @@ -2635,13 +2635,13 @@ POSTHOOK: Input: default@dest2 98 val_98 1 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -2711,7 +2711,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n18 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -2736,7 +2736,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n18 Stage: Stage-3 Stats Work @@ -2744,7 +2744,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n18 Stage: Stage-4 Map Reduce @@ -2775,7 +2775,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val1, val2 Column Types: int, string, string - Table: default.dest2 + Table: default.dest2_n16 Stage: Stage-5 Map Reduce @@ -2806,7 +2806,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n16 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) outputColumnNames: key, val1, val2 @@ -2831,7 +2831,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n16 Stage: Stage-7 Map Reduce @@ -2857,31 +2857,31 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n18 +PREHOOK: Output: default@dest2_n16 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n18 +POSTHOOK: Output: default@dest2_n16 +POSTHOOK: Lineage: dest1_n18.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n18.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.val1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.val2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n18.* FROM DEST1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n18 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n18.* FROM DEST1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n18 #### A masked pattern was here #### 0 3 10 1 @@ -3192,13 +3192,13 @@ POSTHOOK: Input: default@dest1 96 1 97 2 98 2 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n16.* FROM DEST2_n16 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n16 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n16.* FROM DEST2_n16 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n16 #### A masked pattern was here #### 0 val_0 3 10 val_10 1 @@ -3511,13 +3511,13 @@ POSTHOOK: Input: default@dest2 98 val_98 2 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -3585,7 +3585,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n18 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -3610,7 +3610,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n18 Stage: Stage-3 Stats Work @@ -3618,7 +3618,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n18 Stage: Stage-4 Map Reduce @@ -3649,7 +3649,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val1, val2 Column Types: int, string, string - Table: default.dest2 + Table: default.dest2_n16 Stage: Stage-5 Map Reduce @@ -3679,7 +3679,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n16 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) outputColumnNames: key, val1, val2 @@ -3704,7 +3704,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n16 Stage: Stage-7 Map Reduce @@ -3730,31 +3730,31 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n18 +PREHOOK: Output: default@dest2_n16 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key -INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key +INSERT OVERWRITE TABLE DEST1_n18 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2_n16 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n18 +POSTHOOK: Output: default@dest2_n16 +POSTHOOK: Lineage: dest1_n18.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n18.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.val1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n16.val2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n18.* FROM DEST1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n18 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n18.* FROM DEST1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n18 #### A masked pattern was here #### 0 1 10 1 @@ -4065,13 +4065,13 @@ POSTHOOK: Input: default@dest1 96 1 97 1 98 1 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n16.* FROM DEST2_n16 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n16 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n16.* FROM DEST2_n16 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n16 #### A masked pattern was here #### 0 val_0 1 10 val_10 1 diff --git a/ql/src/test/results/clientpositive/groupby_complex_types.q.out b/ql/src/test/results/clientpositive/groupby_complex_types.q.out index be3cb05a70..c04895a850 100644 --- a/ql/src/test/results/clientpositive/groupby_complex_types.q.out +++ b/ql/src/test/results/clientpositive/groupby_complex_types.q.out @@ -1,38 +1,38 @@ -PREHOOK: query: CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n24(key ARRAY, value BIGINT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n24 +POSTHOOK: query: CREATE TABLE DEST1_n24(key ARRAY, value BIGINT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key MAP, value BIGINT) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n24 +PREHOOK: query: CREATE TABLE DEST2_n21(key MAP, value BIGINT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key MAP, value BIGINT) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n21 +POSTHOOK: query: CREATE TABLE DEST2_n21(key MAP, value BIGINT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 -PREHOOK: query: CREATE TABLE DEST3(key STRUCT, value BIGINT) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST2_n21 +PREHOOK: query: CREATE TABLE DEST3_n0(key STRUCT, value BIGINT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST3 -POSTHOOK: query: CREATE TABLE DEST3(key STRUCT, value BIGINT) STORED AS TEXTFILE +PREHOOK: Output: default@DEST3_n0 +POSTHOOK: query: CREATE TABLE DEST3_n0(key STRUCT, value BIGINT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST3 +POSTHOOK: Output: default@DEST3_n0 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key), COUNT(1) GROUP BY ARRAY(SRC.key) -INSERT OVERWRITE TABLE DEST2 SELECT MAP(SRC.key, SRC.value), COUNT(1) GROUP BY MAP(SRC.key, SRC.value) -INSERT OVERWRITE TABLE DEST3 SELECT STRUCT(SRC.key, SRC.value), COUNT(1) GROUP BY STRUCT(SRC.key, SRC.value) +INSERT OVERWRITE TABLE DEST1_n24 SELECT ARRAY(SRC.key), COUNT(1) GROUP BY ARRAY(SRC.key) +INSERT OVERWRITE TABLE DEST2_n21 SELECT MAP(SRC.key, SRC.value), COUNT(1) GROUP BY MAP(SRC.key, SRC.value) +INSERT OVERWRITE TABLE DEST3_n0 SELECT STRUCT(SRC.key, SRC.value), COUNT(1) GROUP BY STRUCT(SRC.key, SRC.value) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key), COUNT(1) GROUP BY ARRAY(SRC.key) -INSERT OVERWRITE TABLE DEST2 SELECT MAP(SRC.key, SRC.value), COUNT(1) GROUP BY MAP(SRC.key, SRC.value) -INSERT OVERWRITE TABLE DEST3 SELECT STRUCT(SRC.key, SRC.value), COUNT(1) GROUP BY STRUCT(SRC.key, SRC.value) +INSERT OVERWRITE TABLE DEST1_n24 SELECT ARRAY(SRC.key), COUNT(1) GROUP BY ARRAY(SRC.key) +INSERT OVERWRITE TABLE DEST2_n21 SELECT MAP(SRC.key, SRC.value), COUNT(1) GROUP BY MAP(SRC.key, SRC.value) +INSERT OVERWRITE TABLE DEST3_n0 SELECT STRUCT(SRC.key, SRC.value), COUNT(1) GROUP BY STRUCT(SRC.key, SRC.value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -114,7 +114,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n24 Stage: Stage-0 Move Operator @@ -124,7 +124,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n24 Stage: Stage-4 Stats Work @@ -155,7 +155,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n21 Stage: Stage-1 Move Operator @@ -165,7 +165,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n21 Stage: Stage-6 Stats Work @@ -196,7 +196,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest3 + name: default.dest3_n0 Stage: Stage-2 Move Operator @@ -206,43 +206,43 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest3 + name: default.dest3_n0 Stage: Stage-8 Stats Work Basic Stats Work: PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key), COUNT(1) GROUP BY ARRAY(SRC.key) -INSERT OVERWRITE TABLE DEST2 SELECT MAP(SRC.key, SRC.value), COUNT(1) GROUP BY MAP(SRC.key, SRC.value) -INSERT OVERWRITE TABLE DEST3 SELECT STRUCT(SRC.key, SRC.value), COUNT(1) GROUP BY STRUCT(SRC.key, SRC.value) +INSERT OVERWRITE TABLE DEST1_n24 SELECT ARRAY(SRC.key), COUNT(1) GROUP BY ARRAY(SRC.key) +INSERT OVERWRITE TABLE DEST2_n21 SELECT MAP(SRC.key, SRC.value), COUNT(1) GROUP BY MAP(SRC.key, SRC.value) +INSERT OVERWRITE TABLE DEST3_n0 SELECT STRUCT(SRC.key, SRC.value), COUNT(1) GROUP BY STRUCT(SRC.key, SRC.value) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 -PREHOOK: Output: default@dest3 +PREHOOK: Output: default@dest1_n24 +PREHOOK: Output: default@dest2_n21 +PREHOOK: Output: default@dest3_n0 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key), COUNT(1) GROUP BY ARRAY(SRC.key) -INSERT OVERWRITE TABLE DEST2 SELECT MAP(SRC.key, SRC.value), COUNT(1) GROUP BY MAP(SRC.key, SRC.value) -INSERT OVERWRITE TABLE DEST3 SELECT STRUCT(SRC.key, SRC.value), COUNT(1) GROUP BY STRUCT(SRC.key, SRC.value) +INSERT OVERWRITE TABLE DEST1_n24 SELECT ARRAY(SRC.key), COUNT(1) GROUP BY ARRAY(SRC.key) +INSERT OVERWRITE TABLE DEST2_n21 SELECT MAP(SRC.key, SRC.value), COUNT(1) GROUP BY MAP(SRC.key, SRC.value) +INSERT OVERWRITE TABLE DEST3_n0 SELECT STRUCT(SRC.key, SRC.value), COUNT(1) GROUP BY STRUCT(SRC.key, SRC.value) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Output: default@dest3 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest3.value EXPRESSION [(src)src.null, ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n24 +POSTHOOK: Output: default@dest2_n21 +POSTHOOK: Output: default@dest3_n0 +POSTHOOK: Lineage: dest1_n24.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n24.value EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest2_n21.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n21.value EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest3_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest3_n0.value EXPRESSION [(src)src.null, ] +PREHOOK: query: SELECT DEST1_n24.* FROM DEST1_n24 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n24 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n24.* FROM DEST1_n24 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n24 #### A masked pattern was here #### ["0"] 3 ["10"] 1 @@ -553,13 +553,13 @@ POSTHOOK: Input: default@dest1 ["96"] 1 ["97"] 2 ["98"] 2 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n21.* FROM DEST2_n21 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n21 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n21.* FROM DEST2_n21 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n21 #### A masked pattern was here #### {"0":"val_0"} 3 {"10":"val_10"} 1 @@ -870,13 +870,13 @@ POSTHOOK: Input: default@dest2 {"96":"val_96"} 1 {"97":"val_97"} 2 {"98":"val_98"} 2 -PREHOOK: query: SELECT DEST3.* FROM DEST3 +PREHOOK: query: SELECT DEST3_n0.* FROM DEST3_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@dest3 +PREHOOK: Input: default@dest3_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST3.* FROM DEST3 +POSTHOOK: query: SELECT DEST3_n0.* FROM DEST3_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest3 +POSTHOOK: Input: default@dest3_n0 #### A masked pattern was here #### {"col1":"0","col2":"val_0"} 3 {"col1":"10","col2":"val_10"} 1 diff --git a/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out index 2eb0c2853b..1cb5331ff8 100644 --- a/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out +++ b/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n3(key ARRAY, value BIGINT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n3 +POSTHOOK: query: CREATE TABLE DEST1_n3(key ARRAY, value BIGINT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key MAP, value BIGINT) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n3 +PREHOOK: query: CREATE TABLE DEST2_n3(key MAP, value BIGINT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key MAP, value BIGINT) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n3 +POSTHOOK: query: CREATE TABLE DEST2_n3(key MAP, value BIGINT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n3 PREHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10 -INSERT OVERWRITE TABLE DEST2 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10 +INSERT OVERWRITE TABLE DEST1_n3 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10 +INSERT OVERWRITE TABLE DEST2_n3 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10 -INSERT OVERWRITE TABLE DEST2 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10 +INSERT OVERWRITE TABLE DEST1_n3 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10 +INSERT OVERWRITE TABLE DEST2_n3 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -114,7 +114,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n3 Stage: Stage-0 Move Operator @@ -124,7 +124,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n3 Stage: Stage-4 Stats Work @@ -182,7 +182,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n3 Stage: Stage-1 Move Operator @@ -192,37 +192,37 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n3 Stage: Stage-7 Stats Work Basic Stats Work: PREHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10 -INSERT OVERWRITE TABLE DEST2 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10 +INSERT OVERWRITE TABLE DEST1_n3 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10 +INSERT OVERWRITE TABLE DEST2_n3 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n3 +PREHOOK: Output: default@dest2_n3 POSTHOOK: query: FROM SRC -INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10 -INSERT OVERWRITE TABLE DEST2 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10 +INSERT OVERWRITE TABLE DEST1_n3 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10 +INSERT OVERWRITE TABLE DEST2_n3 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value EXPRESSION [(src)src.null, ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n3 +POSTHOOK: Output: default@dest2_n3 +POSTHOOK: Lineage: dest1_n3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n3.value EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest2_n3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n3.value EXPRESSION [(src)src.null, ] +PREHOOK: query: SELECT DEST1_n3.* FROM DEST1_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n3 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n3.* FROM DEST1_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n3 #### A masked pattern was here #### ["0"] 3 ["10"] 1 @@ -234,13 +234,13 @@ POSTHOOK: Input: default@dest1 ["111"] 1 ["113"] 2 ["114"] 1 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n3.* FROM DEST2_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n3 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n3.* FROM DEST2_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n3 #### A masked pattern was here #### {"0":"val_0"} 3 {"10":"val_10"} 1 diff --git a/ql/src/test/results/clientpositive/groupby_cube1.q.out b/ql/src/test/results/clientpositive/groupby_cube1.q.out index 0ea39f637f..a2f97ca7b0 100644 --- a/ql/src/test/results/clientpositive/groupby_cube1.q.out +++ b/ql/src/test/results/clientpositive/groupby_cube1.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n50(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n50 +POSTHOOK: query: CREATE TABLE T1_n50(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n50 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n50 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n50 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n50 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n50 PREHOOK: query: EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube +SELECT key, val, count(1) FROM T1_n50 GROUP BY key, val with cube PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube +SELECT key, val, count(1) FROM T1_n50 GROUP BY key, val with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -29,7 +29,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n50 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -74,10 +74,10 @@ STAGE PLANS: ListSink PREHOOK: query: EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY CUBE(key, val) +SELECT key, val, count(1) FROM T1_n50 GROUP BY CUBE(key, val) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY CUBE(key, val) +SELECT key, val, count(1) FROM T1_n50 GROUP BY CUBE(key, val) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -88,7 +88,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n50 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -132,13 +132,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube +PREHOOK: query: SELECT key, val, count(1) FROM T1_n50 GROUP BY key, val with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n50 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube +POSTHOOK: query: SELECT key, val, count(1) FROM T1_n50 GROUP BY key, val with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n50 #### A masked pattern was here #### 1 11 1 1 NULL 1 @@ -159,10 +159,10 @@ NULL 18 1 NULL 28 1 NULL NULL 6 PREHOOK: query: EXPLAIN -SELECT key, val, GROUPING__ID, count(1) FROM T1 GROUP BY key, val with cube +SELECT key, val, GROUPING__ID, count(1) FROM T1_n50 GROUP BY key, val with cube PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT key, val, GROUPING__ID, count(1) FROM T1 GROUP BY key, val with cube +SELECT key, val, GROUPING__ID, count(1) FROM T1_n50 GROUP BY key, val with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -173,7 +173,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n50 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -216,13 +216,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT key, val, GROUPING__ID, count(1) FROM T1 GROUP BY key, val with cube +PREHOOK: query: SELECT key, val, GROUPING__ID, count(1) FROM T1_n50 GROUP BY key, val with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n50 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, val, GROUPING__ID, count(1) FROM T1 GROUP BY key, val with cube +POSTHOOK: query: SELECT key, val, GROUPING__ID, count(1) FROM T1_n50 GROUP BY key, val with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n50 #### A masked pattern was here #### 1 11 0 1 1 NULL 1 1 @@ -243,10 +243,10 @@ NULL 18 2 1 NULL 28 2 1 NULL NULL 3 6 PREHOOK: query: EXPLAIN -SELECT key, count(distinct val) FROM T1 GROUP BY key with cube +SELECT key, count(distinct val) FROM T1_n50 GROUP BY key with cube PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT key, count(distinct val) FROM T1 GROUP BY key with cube +SELECT key, count(distinct val) FROM T1_n50 GROUP BY key with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -257,7 +257,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n50 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -300,13 +300,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT key, count(distinct val) FROM T1 GROUP BY key with cube +PREHOOK: query: SELECT key, count(distinct val) FROM T1_n50 GROUP BY key with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n50 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, count(distinct val) FROM T1 GROUP BY key with cube +POSTHOOK: query: SELECT key, count(distinct val) FROM T1_n50 GROUP BY key with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n50 #### A masked pattern was here #### 1 1 2 1 @@ -315,10 +315,10 @@ POSTHOOK: Input: default@t1 8 2 NULL 6 PREHOOK: query: EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube +SELECT key, val, count(1) FROM T1_n50 GROUP BY key, val with cube PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube +SELECT key, val, count(1) FROM T1_n50 GROUP BY key, val with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -330,7 +330,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n50 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -398,13 +398,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube +PREHOOK: query: SELECT key, val, count(1) FROM T1_n50 GROUP BY key, val with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n50 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube +POSTHOOK: query: SELECT key, val, count(1) FROM T1_n50 GROUP BY key, val with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n50 #### A masked pattern was here #### 1 11 1 1 NULL 1 @@ -425,10 +425,10 @@ NULL 18 1 NULL 28 1 NULL NULL 6 PREHOOK: query: EXPLAIN -SELECT key, count(distinct val) FROM T1 GROUP BY key with cube +SELECT key, count(distinct val) FROM T1_n50 GROUP BY key with cube PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT key, count(distinct val) FROM T1 GROUP BY key with cube +SELECT key, count(distinct val) FROM T1_n50 GROUP BY key with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -440,7 +440,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n50 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -507,13 +507,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT key, count(distinct val) FROM T1 GROUP BY key with cube +PREHOOK: query: SELECT key, count(distinct val) FROM T1_n50 GROUP BY key with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n50 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, count(distinct val) FROM T1 GROUP BY key with cube +POSTHOOK: query: SELECT key, count(distinct val) FROM T1_n50 GROUP BY key with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n50 #### A masked pattern was here #### 1 1 2 1 @@ -521,31 +521,31 @@ POSTHOOK: Input: default@t1 7 1 8 2 NULL 6 -PREHOOK: query: CREATE TABLE T2(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T2_n32(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n32 +POSTHOOK: query: CREATE TABLE T2_n32(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: CREATE TABLE T3(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE +POSTHOOK: Output: default@T2_n32 +PREHOOK: query: CREATE TABLE T3_n10(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE +PREHOOK: Output: default@T3_n10 +POSTHOOK: query: CREATE TABLE T3_n10(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 +POSTHOOK: Output: default@T3_n10 PREHOOK: query: EXPLAIN -FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with cube -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by key, val with cube +FROM T1_n50 +INSERT OVERWRITE TABLE T2_n32 SELECT key, val, count(1) group by key, val with cube +INSERT OVERWRITE TABLE T3_n10 SELECT key, val, sum(1) group by key, val with cube PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with cube -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by key, val with cube +FROM T1_n50 +INSERT OVERWRITE TABLE T2_n32 SELECT key, val, count(1) group by key, val with cube +INSERT OVERWRITE TABLE T3_n10 SELECT key, val, sum(1) group by key, val with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -564,7 +564,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n50 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -641,7 +641,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 + name: default.t2_n32 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int) outputColumnNames: key1, key2, val @@ -666,7 +666,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 + name: default.t2_n32 Stage: Stage-4 Stats Work @@ -674,7 +674,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, val Column Types: string, string, int - Table: default.t2 + Table: default.t2_n32 Stage: Stage-5 Map Reduce @@ -704,7 +704,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, val Column Types: string, string, int - Table: default.t3 + Table: default.t3_n10 Stage: Stage-6 Map Reduce @@ -759,7 +759,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t3 + name: default.t3_n10 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int) outputColumnNames: key1, key2, val @@ -784,7 +784,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t3 + name: default.t3_n10 Stage: Stage-9 Map Reduce @@ -808,23 +808,23 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with cube -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by key, val with cube +PREHOOK: query: FROM T1_n50 +INSERT OVERWRITE TABLE T2_n32 SELECT key, val, count(1) group by key, val with cube +INSERT OVERWRITE TABLE T3_n10 SELECT key, val, sum(1) group by key, val with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2 -PREHOOK: Output: default@t3 -POSTHOOK: query: FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with cube -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by key, val with cube +PREHOOK: Input: default@t1_n50 +PREHOOK: Output: default@t2_n32 +PREHOOK: Output: default@t3_n10 +POSTHOOK: query: FROM T1_n50 +INSERT OVERWRITE TABLE T2_n32 SELECT key, val, count(1) group by key, val with cube +INSERT OVERWRITE TABLE T3_n10 SELECT key, val, sum(1) group by key, val with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Output: default@t3 -POSTHOOK: Lineage: t2.key1 SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2.key2 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -POSTHOOK: Lineage: t2.val EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: t3.key1 SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t3.key2 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -POSTHOOK: Lineage: t3.val EXPRESSION [(t1)t1.null, ] +POSTHOOK: Input: default@t1_n50 +POSTHOOK: Output: default@t2_n32 +POSTHOOK: Output: default@t3_n10 +POSTHOOK: Lineage: t2_n32.key1 SIMPLE [(t1_n50)t1_n50.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n32.key2 SIMPLE [(t1_n50)t1_n50.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n32.val EXPRESSION [(t1_n50)t1_n50.null, ] +POSTHOOK: Lineage: t3_n10.key1 SIMPLE [(t1_n50)t1_n50.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t3_n10.key2 SIMPLE [(t1_n50)t1_n50.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Lineage: t3_n10.val EXPRESSION [(t1_n50)t1_n50.null, ] diff --git a/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out b/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out index 6498769cc7..b4d3094bb1 100644 --- a/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out +++ b/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out @@ -1,32 +1,32 @@ -PREHOOK: query: create table t1 like src +PREHOOK: query: create table t1_n7 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 like src +PREHOOK: Output: default@t1_n7 +POSTHOOK: query: create table t1_n7 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2 like src +POSTHOOK: Output: default@t1_n7 +PREHOOK: query: create table t2_n3 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2 like src +PREHOOK: Output: default@t2_n3 +POSTHOOK: query: create table t2_n3 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n3 PREHOOK: query: explain from src -insert into table t1 select +insert into table t1_n7 select key, GROUPING__ID group by cube(key, value) -insert into table t2 select +insert into table t2_n3 select key, value group by key, value grouping sets ((key), (key, value)) PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert into table t1 select +insert into table t1_n7 select key, GROUPING__ID group by cube(key, value) -insert into table t2 select +insert into table t2_n3 select key, value group by key, value grouping sets ((key), (key, value)) POSTHOOK: type: QUERY @@ -94,7 +94,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 + name: default.t1_n7 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -119,7 +119,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 + name: default.t1_n7 Stage: Stage-3 Stats Work @@ -127,7 +127,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.t1 + Table: default.t1_n7 Stage: Stage-4 Map Reduce @@ -158,7 +158,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.t2 + Table: default.t2_n3 Stage: Stage-5 Map Reduce @@ -184,7 +184,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 + name: default.t2_n3 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -209,7 +209,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 + name: default.t2_n3 Stage: Stage-7 Map Reduce diff --git a/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out b/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out index 4c5b0f803f..13ae4a9165 100644 --- a/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out +++ b/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out @@ -1,26 +1,26 @@ -PREHOOK: query: create table t1 (int1 int, int2 int, str1 string, str2 string) +PREHOOK: query: create table t1_n22 (int1_n22 int, int2 int, str1 string, str2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (int1 int, int2 int, str1 string, str2 string) +PREHOOK: Output: default@t1_n22 +POSTHOOK: query: create table t1_n22 (int1_n22 int, int2 int, str1 string, str2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: insert into table t1 select cast(key as int), cast(key as int), value, value from src where key < 6 +POSTHOOK: Output: default@t1_n22 +PREHOOK: query: insert into table t1_n22 select cast(key as int), cast(key as int), value, value from src where key < 6 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t1 -POSTHOOK: query: insert into table t1 select cast(key as int), cast(key as int), value, value from src where key < 6 +PREHOOK: Output: default@t1_n22 +POSTHOOK: query: insert into table t1_n22 select cast(key as int), cast(key as int), value, value from src where key < 6 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.int1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1.int2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1.str1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: t1.str2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain select Q1.int1, sum(distinct Q1.int1) from (select * from t1 order by int1) Q1 group by Q1.int1 +POSTHOOK: Output: default@t1_n22 +POSTHOOK: Lineage: t1_n22.int1_n22 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n22.int2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n22.str1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n22.str2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select Q1.int1_n22, sum(distinct Q1.int1_n22) from (select * from t1_n22 order by int1_n22) Q1 group by Q1.int1_n22 PREHOOK: type: QUERY -POSTHOOK: query: explain select Q1.int1, sum(distinct Q1.int1) from (select * from t1 order by int1) Q1 group by Q1.int1 +POSTHOOK: query: explain select Q1.int1_n22, sum(distinct Q1.int1_n22) from (select * from t1_n22 order by int1_n22) Q1 group by Q1.int1_n22 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -31,15 +31,15 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n22 Statistics: Num rows: 8 Data size: 120 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: int1 (type: int) - outputColumnNames: int1 + expressions: int1_n22 (type: int) + outputColumnNames: int1_n22 Statistics: Num rows: 8 Data size: 120 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: sum(DISTINCT int1) - keys: int1 (type: int) + aggregations: sum(DISTINCT int1_n22) + keys: int1_n22 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 8 Data size: 120 Basic stats: COMPLETE Column stats: NONE @@ -69,9 +69,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select int1, sum(distinct int1) from t1 group by int1 +PREHOOK: query: explain select int1_n22, sum(distinct int1_n22) from t1_n22 group by int1_n22 PREHOOK: type: QUERY -POSTHOOK: query: explain select int1, sum(distinct int1) from t1 group by int1 +POSTHOOK: query: explain select int1_n22, sum(distinct int1_n22) from t1_n22 group by int1_n22 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -82,15 +82,15 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n22 Statistics: Num rows: 8 Data size: 120 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: int1 (type: int) - outputColumnNames: int1 + expressions: int1_n22 (type: int) + outputColumnNames: int1_n22 Statistics: Num rows: 8 Data size: 120 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: sum(DISTINCT int1) - keys: int1 (type: int) + aggregations: sum(DISTINCT int1_n22) + keys: int1_n22 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 8 Data size: 120 Basic stats: COMPLETE Column stats: NONE @@ -120,35 +120,35 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select Q1.int1, sum(distinct Q1.int1) from (select * from t1 order by int1) Q1 group by Q1.int1 +PREHOOK: query: select Q1.int1_n22, sum(distinct Q1.int1_n22) from (select * from t1_n22 order by int1_n22) Q1 group by Q1.int1_n22 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n22 #### A masked pattern was here #### -POSTHOOK: query: select Q1.int1, sum(distinct Q1.int1) from (select * from t1 order by int1) Q1 group by Q1.int1 +POSTHOOK: query: select Q1.int1_n22, sum(distinct Q1.int1_n22) from (select * from t1_n22 order by int1_n22) Q1 group by Q1.int1_n22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n22 #### A masked pattern was here #### 0 0 2 2 4 4 5 5 -PREHOOK: query: select int1, sum(distinct int1) from t1 group by int1 +PREHOOK: query: select int1_n22, sum(distinct int1_n22) from t1_n22 group by int1_n22 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n22 #### A masked pattern was here #### -POSTHOOK: query: select int1, sum(distinct int1) from t1 group by int1 +POSTHOOK: query: select int1_n22, sum(distinct int1_n22) from t1_n22 group by int1_n22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n22 #### A masked pattern was here #### 0 0 2 2 4 4 5 5 -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n22 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n22 +PREHOOK: Output: default@t1_n22 +POSTHOOK: query: drop table t1_n22 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n22 +POSTHOOK: Output: default@t1_n22 diff --git a/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out b/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out index 6182b5748f..cb6a647dde 100644 --- a/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out +++ b/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out @@ -74,11 +74,11 @@ POSTHOOK: Input: default@src 86 98 PREHOOK: query: explain -create table dummy as +create table dummy_n6 as select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows) PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain -create table dummy as +create table dummy_n6 as select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows) POSTHOOK: type: CREATETABLE_AS_SELECT STAGE DEPENDENCIES: @@ -127,7 +127,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dummy + name: default.dummy_n6 Stage: Stage-0 Move Operator @@ -142,34 +142,34 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dummy + name: default.dummy_n6 Stage: Stage-2 Stats Work Basic Stats Work: -PREHOOK: query: create table dummy as +PREHOOK: query: create table dummy_n6 as select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows) PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@dummy -POSTHOOK: query: create table dummy as +PREHOOK: Output: default@dummy_n6 +POSTHOOK: query: create table dummy_n6 as select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows) POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@dummy -POSTHOOK: Lineage: dummy.dummy1 SIMPLE [] -POSTHOOK: Lineage: dummy.dummy2 SIMPLE [] -POSTHOOK: Lineage: dummy.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: select key,dummy1,dummy2 from dummy +POSTHOOK: Output: default@dummy_n6 +POSTHOOK: Lineage: dummy_n6.dummy1 SIMPLE [] +POSTHOOK: Lineage: dummy_n6.dummy2 SIMPLE [] +POSTHOOK: Lineage: dummy_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: select key,dummy1,dummy2 from dummy_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@dummy +PREHOOK: Input: default@dummy_n6 #### A masked pattern was here #### -POSTHOOK: query: select key,dummy1,dummy2 from dummy +POSTHOOK: query: select key,dummy1,dummy2 from dummy_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dummy +POSTHOOK: Input: default@dummy_n6 #### A masked pattern was here #### 165 X X 238 X X diff --git a/ql/src/test/results/clientpositive/groupby_empty.q.out b/ql/src/test/results/clientpositive/groupby_empty.q.out index 27d6e9fda3..22a2060837 100644 --- a/ql/src/test/results/clientpositive/groupby_empty.q.out +++ b/ql/src/test/results/clientpositive/groupby_empty.q.out @@ -1,23 +1,23 @@ -PREHOOK: query: create table t (a int) +PREHOOK: query: create table t_n34 (a int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t (a int) +PREHOOK: Output: default@t_n34 +POSTHOOK: query: create table t_n34 (a int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values (1),(1),(2) +POSTHOOK: Output: default@t_n34 +PREHOOK: query: insert into t_n34 values (1),(1),(2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (1),(1),(2) +PREHOOK: Output: default@t_n34 +POSTHOOK: query: insert into t_n34 values (1),(1),(2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.a SCRIPT [] -PREHOOK: query: explain select count(*) from t group by () +POSTHOOK: Output: default@t_n34 +POSTHOOK: Lineage: t_n34.a SCRIPT [] +PREHOOK: query: explain select count(*) from t_n34 group by () PREHOOK: type: QUERY -POSTHOOK: query: explain select count(*) from t group by () +POSTHOOK: query: explain select count(*) from t_n34 group by () POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -29,12 +29,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from t group by () +PREHOOK: query: select count(*) from t_n34 group by () PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n34 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from t group by () +POSTHOOK: query: select count(*) from t_n34 group by () POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n34 #### A masked pattern was here #### 3 diff --git a/ql/src/test/results/clientpositive/groupby_grouping_id1.q.out b/ql/src/test/results/clientpositive/groupby_grouping_id1.q.out index 8b203e7dbc..81acdccbec 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_id1.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_id1.q.out @@ -1,26 +1,26 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n101(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n101 +POSTHOOK: query: CREATE TABLE T1_n101(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n101 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n101 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n101 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n101 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: SELECT key, val, GROUPING__ID from T1 group by key, val with cube +POSTHOOK: Output: default@t1_n101 +PREHOOK: query: SELECT key, val, GROUPING__ID from T1_n101 group by key, val with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n101 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, val, GROUPING__ID from T1 group by key, val with cube +POSTHOOK: query: SELECT key, val, GROUPING__ID from T1_n101 group by key, val with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n101 #### A masked pattern was here #### 1 11 0 1 NULL 1 @@ -40,13 +40,13 @@ NULL 17 2 NULL 18 2 NULL 28 2 NULL NULL 3 -PREHOOK: query: SELECT key, val, GROUPING__ID from T1 group by cube(key, val) +PREHOOK: query: SELECT key, val, GROUPING__ID from T1_n101 group by cube(key, val) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n101 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, val, GROUPING__ID from T1 group by cube(key, val) +POSTHOOK: query: SELECT key, val, GROUPING__ID from T1_n101 group by cube(key, val) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n101 #### A masked pattern was here #### 1 11 0 1 NULL 1 @@ -66,13 +66,13 @@ NULL 17 2 NULL 18 2 NULL 28 2 NULL NULL 3 -PREHOOK: query: SELECT GROUPING__ID, key, val from T1 group by key, val with rollup +PREHOOK: query: SELECT GROUPING__ID, key, val from T1_n101 group by key, val with rollup PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n101 #### A masked pattern was here #### -POSTHOOK: query: SELECT GROUPING__ID, key, val from T1 group by key, val with rollup +POSTHOOK: query: SELECT GROUPING__ID, key, val from T1_n101 group by key, val with rollup POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n101 #### A masked pattern was here #### 0 1 11 0 2 12 @@ -86,13 +86,13 @@ POSTHOOK: Input: default@t1 1 7 NULL 1 8 NULL 3 NULL NULL -PREHOOK: query: SELECT GROUPING__ID, key, val from T1 group by rollup (key, val) +PREHOOK: query: SELECT GROUPING__ID, key, val from T1_n101 group by rollup (key, val) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n101 #### A masked pattern was here #### -POSTHOOK: query: SELECT GROUPING__ID, key, val from T1 group by rollup (key, val) +POSTHOOK: query: SELECT GROUPING__ID, key, val from T1_n101 group by rollup (key, val) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n101 #### A masked pattern was here #### 0 1 11 0 2 12 @@ -106,13 +106,13 @@ POSTHOOK: Input: default@t1 1 7 NULL 1 8 NULL 3 NULL NULL -PREHOOK: query: SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1 group by key, val with cube +PREHOOK: query: SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1_n101 group by key, val with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n101 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1 group by key, val with cube +POSTHOOK: query: SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1_n101 group by key, val with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n101 #### A masked pattern was here #### 1 11 0 0 1 NULL 1 1 @@ -132,13 +132,13 @@ NULL 17 2 2 NULL 18 2 2 NULL 28 2 2 NULL NULL 3 3 -PREHOOK: query: SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1 group by cube(key, val) +PREHOOK: query: SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1_n101 group by cube(key, val) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n101 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1 group by cube(key, val) +POSTHOOK: query: SELECT key, val, GROUPING__ID, CASE WHEN GROUPING__ID == 0 THEN "0" WHEN GROUPING__ID == 1 THEN "1" WHEN GROUPING__ID == 2 THEN "2" WHEN GROUPING__ID == 3 THEN "3" ELSE "nothing" END from T1_n101 group by cube(key, val) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n101 #### A masked pattern was here #### 1 11 0 0 1 NULL 1 1 diff --git a/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out b/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out index f3f2458d25..d45c0c462c 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n54(key INT, value INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n54 +POSTHOOK: query: CREATE TABLE T1_n54(key INT, value INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n54 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_n54 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n54 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_n54 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n54 PREHOOK: query: EXPLAIN SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n54 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n54 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1 @@ -37,7 +37,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n54 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -85,20 +85,20 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n54 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n54 #### A masked pattern was here #### POSTHOOK: query: SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n54 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n54 #### A masked pattern was here #### 1 NULL 1 2 2 NULL 1 1 @@ -106,14 +106,14 @@ POSTHOOK: Input: default@t1 4 NULL 1 1 PREHOOK: query: EXPLAIN SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n54 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n54 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1 @@ -127,7 +127,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n54 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -174,20 +174,20 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n54 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n54 #### A masked pattern was here #### POSTHOOK: query: SELECT key, value, GROUPING__ID, count(*) -FROM T1 +FROM T1_n54 GROUP BY key, value GROUPING SETS ((), (key)) HAVING GROUPING__ID = 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n54 #### A masked pattern was here #### 1 NULL 1 2 2 NULL 1 1 diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out index 50ede0486b..b67cd0deee 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n26(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: Output: default@T1_n26 +POSTHOOK: query: CREATE TABLE T1_n26(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n26 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n26 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n26 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n26 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: SELECT * FROM T1 +POSTHOOK: Output: default@t1_n26 +PREHOOK: query: SELECT * FROM T1_n26 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n26 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM T1 +POSTHOOK: query: SELECT * FROM T1_n26 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n26 #### A masked pattern was here #### -t1.a t1.b t1.c +t1_n26.a t1_n26.b t1_n26.c 1 1 3 2 2 4 2 3 5 @@ -30,10 +30,10 @@ t1.a t1.b t1.c 5 2 2 8 1 1 PREHOOK: query: EXPLAIN -SELECT a, b, count(*) from T1 group by a, b with cube +SELECT a, b, count(*) from T1_n26 group by a, b with cube PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a, b, count(*) from T1 group by a, b with cube +SELECT a, b, count(*) from T1_n26 group by a, b with cube POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -45,7 +45,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n26 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) @@ -89,13 +89,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, count(*) from T1 group by a, b with cube +PREHOOK: query: SELECT a, b, count(*) from T1_n26 group by a, b with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n26 #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, count(*) from T1 group by a, b with cube +POSTHOOK: query: SELECT a, b, count(*) from T1_n26 group by a, b with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n26 #### A masked pattern was here #### a b _c2 1 1 1 @@ -114,10 +114,10 @@ NULL 2 3 NULL 3 1 NULL NULL 6 PREHOOK: query: EXPLAIN -SELECT a, b, count(*) from T1 group by cube(a, b) +SELECT a, b, count(*) from T1_n26 group by cube(a, b) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a, b, count(*) from T1 group by cube(a, b) +SELECT a, b, count(*) from T1_n26 group by cube(a, b) POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -129,7 +129,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n26 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) @@ -173,13 +173,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, count(*) from T1 group by cube(a, b) +PREHOOK: query: SELECT a, b, count(*) from T1_n26 group by cube(a, b) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n26 #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, count(*) from T1 group by cube(a, b) +POSTHOOK: query: SELECT a, b, count(*) from T1_n26 group by cube(a, b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n26 #### A masked pattern was here #### a b _c2 1 1 1 @@ -198,10 +198,10 @@ NULL 2 3 NULL 3 1 NULL NULL 6 PREHOOK: query: EXPLAIN -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) +SELECT a, b, count(*) FROM T1_n26 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) +SELECT a, b, count(*) FROM T1_n26 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -213,7 +213,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n26 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) @@ -257,13 +257,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) +PREHOOK: query: SELECT a, b, count(*) FROM T1_n26 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n26 #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) +POSTHOOK: query: SELECT a, b, count(*) FROM T1_n26 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n26 #### A masked pattern was here #### a b _c2 1 1 1 @@ -282,10 +282,10 @@ NULL 2 3 NULL 3 1 NULL NULL 6 PREHOOK: query: EXPLAIN -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)) +SELECT a, b, count(*) FROM T1_n26 GROUP BY a, b GROUPING SETS (a, (a, b)) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)) +SELECT a, b, count(*) FROM T1_n26 GROUP BY a, b GROUPING SETS (a, (a, b)) POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -297,7 +297,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n26 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) @@ -341,13 +341,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)) +PREHOOK: query: SELECT a, b, count(*) FROM T1_n26 GROUP BY a, b GROUPING SETS (a, (a, b)) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n26 #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)) +POSTHOOK: query: SELECT a, b, count(*) FROM T1_n26 GROUP BY a, b GROUPING SETS (a, (a, b)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n26 #### A masked pattern was here #### a b _c2 1 1 1 @@ -362,10 +362,10 @@ a b _c2 8 1 1 8 NULL 1 PREHOOK: query: EXPLAIN -SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c) +SELECT a FROM T1_n26 GROUP BY a, b, c GROUPING SETS (a, b, c) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c) +SELECT a FROM T1_n26 GROUP BY a, b, c GROUPING SETS (a, b, c) POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -377,7 +377,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n26 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string), c (type: string) @@ -418,13 +418,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c) +PREHOOK: query: SELECT a FROM T1_n26 GROUP BY a, b, c GROUPING SETS (a, b, c) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n26 #### A masked pattern was here #### -POSTHOOK: query: SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c) +POSTHOOK: query: SELECT a FROM T1_n26 GROUP BY a, b, c GROUPING SETS (a, b, c) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n26 #### A masked pattern was here #### a 1 @@ -442,10 +442,10 @@ NULL NULL NULL PREHOOK: query: EXPLAIN -SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)) +SELECT a FROM T1_n26 GROUP BY a GROUPING SETS ((a), (a)) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)) +SELECT a FROM T1_n26 GROUP BY a GROUPING SETS ((a), (a)) POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -457,7 +457,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n26 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string) @@ -493,13 +493,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)) +PREHOOK: query: SELECT a FROM T1_n26 GROUP BY a GROUPING SETS ((a), (a)) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n26 #### A masked pattern was here #### -POSTHOOK: query: SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)) +POSTHOOK: query: SELECT a FROM T1_n26 GROUP BY a GROUPING SETS ((a), (a)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n26 #### A masked pattern was here #### a 1 @@ -508,10 +508,10 @@ a 5 8 PREHOOK: query: EXPLAIN -SELECT a + b, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b) +SELECT a + b, count(*) FROM T1_n26 GROUP BY a + b GROUPING SETS (a+b) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a + b, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b) +SELECT a + b, count(*) FROM T1_n26 GROUP BY a + b GROUPING SETS (a+b) POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -523,7 +523,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n26 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (UDFToDouble(a) + UDFToDouble(b)) (type: double) @@ -562,13 +562,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a + b, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b) +PREHOOK: query: SELECT a + b, count(*) FROM T1_n26 GROUP BY a + b GROUPING SETS (a+b) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n26 #### A masked pattern was here #### -POSTHOOK: query: SELECT a + b, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b) +POSTHOOK: query: SELECT a + b, count(*) FROM T1_n26 GROUP BY a + b GROUPING SETS (a+b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n26 #### A masked pattern was here #### _c0 _c1 2.0 1 diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out index a41c7b8332..c1b1b338e0 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n49(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: Output: default@T1_n49 +POSTHOOK: query: CREATE TABLE T1_n49(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n49 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n49 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n49 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n49 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n49 PREHOOK: query: EXPLAIN -SELECT a, b, count(*) from T1 group by a, b with cube +SELECT a, b, count(*) from T1_n49 group by a, b with cube PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a, b, count(*) from T1 group by a, b with cube +SELECT a, b, count(*) from T1_n49 group by a, b with cube POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -31,7 +31,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n49 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) @@ -100,10 +100,10 @@ STAGE PLANS: ListSink PREHOOK: query: EXPLAIN -SELECT a, b, count(*) from T1 group by cube(a, b) +SELECT a, b, count(*) from T1_n49 group by cube(a, b) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a, b, count(*) from T1 group by cube(a, b) +SELECT a, b, count(*) from T1_n49 group by cube(a, b) POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -116,7 +116,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n49 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) @@ -184,13 +184,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, count(*) from T1 group by a, b with cube +PREHOOK: query: SELECT a, b, count(*) from T1_n49 group by a, b with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n49 #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, count(*) from T1 group by a, b with cube +POSTHOOK: query: SELECT a, b, count(*) from T1_n49 group by a, b with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n49 #### A masked pattern was here #### a b _c2 1 1 1 @@ -209,10 +209,10 @@ NULL 2 3 NULL 3 1 NULL NULL 6 PREHOOK: query: EXPLAIN -SELECT a, b, sum(c) from T1 group by a, b with cube +SELECT a, b, sum(c) from T1_n49 group by a, b with cube PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a, b, sum(c) from T1 group by a, b with cube +SELECT a, b, sum(c) from T1_n49 group by a, b with cube POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -225,7 +225,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n49 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string), c (type: string) @@ -293,13 +293,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, sum(c) from T1 group by a, b with cube +PREHOOK: query: SELECT a, b, sum(c) from T1_n49 group by a, b with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n49 #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, sum(c) from T1 group by a, b with cube +POSTHOOK: query: SELECT a, b, sum(c) from T1_n49 group by a, b with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n49 #### A masked pattern was here #### a b _c2 1 1 3.0 @@ -317,34 +317,34 @@ NULL 1 4.0 NULL 2 14.0 NULL 3 5.0 NULL NULL 23.0 -PREHOOK: query: CREATE TABLE T2(a STRING, b STRING, c int, d int) +PREHOOK: query: CREATE TABLE T2_n31(a STRING, b STRING, c int, d int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(a STRING, b STRING, c int, d int) +PREHOOK: Output: default@T2_n31 +POSTHOOK: query: CREATE TABLE T2_n31(a STRING, b STRING, c int, d int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: INSERT OVERWRITE TABLE T2 -SELECT a, b, c, c from T1 +POSTHOOK: Output: default@T2_n31 +PREHOOK: query: INSERT OVERWRITE TABLE T2_n31 +SELECT a, b, c, c from T1_n49 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: INSERT OVERWRITE TABLE T2 -SELECT a, b, c, c from T1 +PREHOOK: Input: default@t1_n49 +PREHOOK: Output: default@t2_n31 +POSTHOOK: query: INSERT OVERWRITE TABLE T2_n31 +SELECT a, b, c, c from T1_n49 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.a SIMPLE [(t1)t1.FieldSchema(name:a, type:string, comment:null), ] -POSTHOOK: Lineage: t2.b SIMPLE [(t1)t1.FieldSchema(name:b, type:string, comment:null), ] -POSTHOOK: Lineage: t2.c EXPRESSION [(t1)t1.FieldSchema(name:c, type:string, comment:null), ] -POSTHOOK: Lineage: t2.d EXPRESSION [(t1)t1.FieldSchema(name:c, type:string, comment:null), ] +POSTHOOK: Input: default@t1_n49 +POSTHOOK: Output: default@t2_n31 +POSTHOOK: Lineage: t2_n31.a SIMPLE [(t1_n49)t1_n49.FieldSchema(name:a, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n31.b SIMPLE [(t1_n49)t1_n49.FieldSchema(name:b, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n31.c EXPRESSION [(t1_n49)t1_n49.FieldSchema(name:c, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n31.d EXPRESSION [(t1_n49)t1_n49.FieldSchema(name:c, type:string, comment:null), ] _col0 _col1 _col2 _col3 PREHOOK: query: EXPLAIN -SELECT a, b, sum(c+d) from T2 group by a, b with cube +SELECT a, b, sum(c+d) from T2_n31 group by a, b with cube PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a, b, sum(c+d) from T2 group by a, b with cube +SELECT a, b, sum(c+d) from T2_n31 group by a, b with cube POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -357,7 +357,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n31 Statistics: Num rows: 6 Data size: 42 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string), (c + d) (type: int) @@ -425,13 +425,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, sum(c+d) from T2 group by a, b with cube +PREHOOK: query: SELECT a, b, sum(c+d) from T2_n31 group by a, b with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2_n31 #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, sum(c+d) from T2 group by a, b with cube +POSTHOOK: query: SELECT a, b, sum(c+d) from T2_n31 group by a, b with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2_n31 #### A masked pattern was here #### a b _c2 1 1 6 diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out index 5d27f4ca2c..717035ceed 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out @@ -1,32 +1,32 @@ -PREHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n76(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: Output: default@T1_n76 +POSTHOOK: query: CREATE TABLE T1_n76(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n76 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets1.txt' INTO TABLE T1_n76 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n76 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets1.txt' INTO TABLE T1_n76 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets2.txt' INTO TABLE T1 +POSTHOOK: Output: default@t1_n76 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets2.txt' INTO TABLE T1_n76 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets2.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n76 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets2.txt' INTO TABLE T1_n76 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n76 PREHOOK: query: EXPLAIN -SELECT a, b, avg(c), count(*) from T1 group by a, b with cube +SELECT a, b, avg(c), count(*) from T1_n76 group by a, b with cube PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a, b, avg(c), count(*) from T1 group by a, b with cube +SELECT a, b, avg(c), count(*) from T1_n76 group by a, b with cube POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -38,7 +38,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n76 Statistics: Num rows: 1 Data size: 720 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string), c (type: string) @@ -83,10 +83,10 @@ STAGE PLANS: ListSink PREHOOK: query: EXPLAIN -SELECT a, b, avg(c), count(*) from T1 group by cube(a, b) +SELECT a, b, avg(c), count(*) from T1_n76 group by cube(a, b) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a, b, avg(c), count(*) from T1 group by cube(a, b) +SELECT a, b, avg(c), count(*) from T1_n76 group by cube(a, b) POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -98,7 +98,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n76 Statistics: Num rows: 1 Data size: 720 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string), c (type: string) @@ -142,13 +142,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, avg(c), count(*) from T1 group by a, b with cube +PREHOOK: query: SELECT a, b, avg(c), count(*) from T1_n76 group by a, b with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n76 #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, avg(c), count(*) from T1 group by a, b with cube +POSTHOOK: query: SELECT a, b, avg(c), count(*) from T1_n76 group by a, b with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n76 #### A masked pattern was here #### a b _c2 _c3 1 1 3.0 2 @@ -168,10 +168,10 @@ NULL 2 5.2 5 NULL 3 5.0 2 NULL NULL 3.8333333333333335 12 PREHOOK: query: EXPLAIN -SELECT a, b, avg(c), count(*) from T1 group by a, b with cube +SELECT a, b, avg(c), count(*) from T1_n76 group by a, b with cube PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a, b, avg(c), count(*) from T1 group by a, b with cube +SELECT a, b, avg(c), count(*) from T1_n76 group by a, b with cube POSTHOOK: type: QUERY Explain STAGE DEPENDENCIES: @@ -184,7 +184,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n76 Statistics: Num rows: 1 Data size: 720 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string), c (type: string) @@ -252,13 +252,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, avg(c), count(*) from T1 group by a, b with cube +PREHOOK: query: SELECT a, b, avg(c), count(*) from T1_n76 group by a, b with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n76 #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, avg(c), count(*) from T1 group by a, b with cube +POSTHOOK: query: SELECT a, b, avg(c), count(*) from T1_n76 group by a, b with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n76 #### A masked pattern was here #### a b _c2 _c3 1 1 3.0 2 diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out index 0caaf67235..7a92ce6685 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out @@ -1,31 +1,31 @@ -PREHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n92(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: Output: default@T1_n92 +POSTHOOK: query: CREATE TABLE T1_n92(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n92 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n92 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n92 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n92 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n92 PREHOOK: query: EXPLAIN SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -39,7 +39,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n92 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(a) < 3.0D) (type: boolean) @@ -116,7 +116,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n92 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(a) < 3.0D) (type: boolean) @@ -163,16 +163,16 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by cube(a, b) ) subq1 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by cube(a, b) ) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by cube(a, b) ) subq2 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by cube(a, b) ) subq2 on subq1.a = subq2.a PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by cube(a, b) ) subq1 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by cube(a, b) ) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by cube(a, b) ) subq2 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by cube(a, b) ) subq2 on subq1.a = subq2.a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -186,7 +186,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n92 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(a) < 3.0D) (type: boolean) @@ -263,7 +263,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n92 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(a) < 3.0D) (type: boolean) @@ -309,20 +309,20 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n92 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n92 #### A masked pattern was here #### 1 1 1 1 1 1 1 1 1 1 NULL 1 @@ -339,16 +339,16 @@ POSTHOOK: Input: default@t1 2 NULL 2 2 NULL 2 PREHOOK: query: EXPLAIN SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -364,7 +364,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n92 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(a) < 3.0D) (type: boolean) @@ -465,7 +465,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n92 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(a) < 3.0D) (type: boolean) @@ -535,20 +535,20 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n92 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq1 join -(SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 +(SELECT a, b, count(*) from T1_n92 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n92 #### A masked pattern was here #### 1 1 1 1 1 1 1 1 1 1 NULL 1 diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out index fcd394178d..9941613c40 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out @@ -1,26 +1,26 @@ -PREHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n13(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: Output: default@T1_n13 +POSTHOOK: query: CREATE TABLE T1_n13(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n13 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n13 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n13 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n13 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n13 PREHOOK: query: EXPLAIN SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube +(SELECT a, b, count(1) from T1_n13 group by a, b) subq1 group by a, b with cube PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube +(SELECT a, b, count(1) from T1_n13 group by a, b) subq1 group by a, b with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -32,7 +32,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n13 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) @@ -105,11 +105,11 @@ STAGE PLANS: PREHOOK: query: EXPLAIN SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by cube(a, b) +(SELECT a, b, count(1) from T1_n13 group by a, b) subq1 group by cube(a, b) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by cube(a, b) +(SELECT a, b, count(1) from T1_n13 group by a, b) subq1 group by cube(a, b) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -121,7 +121,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n13 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) @@ -193,14 +193,14 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube +(SELECT a, b, count(1) from T1_n13 group by a, b) subq1 group by a, b with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n13 #### A masked pattern was here #### POSTHOOK: query: SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube +(SELECT a, b, count(1) from T1_n13 group by a, b) subq1 group by a, b with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n13 #### A masked pattern was here #### 1 1 1 1 NULL 1 @@ -219,11 +219,11 @@ NULL 3 1 NULL NULL 6 PREHOOK: query: EXPLAIN SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube +(SELECT a, b, count(1) from T1_n13 group by a, b) subq1 group by a, b with cube PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube +(SELECT a, b, count(1) from T1_n13 group by a, b) subq1 group by a, b with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -236,7 +236,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n13 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) @@ -332,14 +332,14 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube +(SELECT a, b, count(1) from T1_n13 group by a, b) subq1 group by a, b with cube PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n13 #### A masked pattern was here #### POSTHOOK: query: SELECT a, b, count(*) FROM -(SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube +(SELECT a, b, count(1) from T1_n13 group by a, b) subq1 group by a, b with cube POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n13 #### A masked pattern was here #### 1 1 1 1 NULL 1 diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out index 6e7c568f93..5fee9f1382 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out @@ -1,27 +1,27 @@ -PREHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n46(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: Output: default@T1_n46 +POSTHOOK: query: CREATE TABLE T1_n46(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n46 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n46 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n46 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n46 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n46 PREHOOK: query: EXPLAIN SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n46 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n46 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -33,7 +33,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n46 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(a) = 5.0D) (type: boolean) @@ -70,27 +70,27 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n46 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n46 #### A masked pattern was here #### POSTHOOK: query: SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n46 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n46 #### A masked pattern was here #### 5 2 5 NULL PREHOOK: query: EXPLAIN SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n46 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n46 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -102,7 +102,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n46 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(a) = 5.0D) (type: boolean) @@ -139,16 +139,16 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n46 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n46 #### A masked pattern was here #### POSTHOOK: query: SELECT a, b FROM -(SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res +(SELECT a, b from T1_n46 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n46 #### A masked pattern was here #### 5 2 5 NULL diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out index 9e0c9cb95b..e896afe15c 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out @@ -1,27 +1,27 @@ -PREHOOK: query: CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n39(key INT, value INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n39 +POSTHOOK: query: CREATE TABLE T1_n39(key INT, value INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n39 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_n39 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n39 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_n39 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n39 PREHOOK: query: explain select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by rollup(key, value) PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by rollup(key, value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -33,7 +33,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -74,16 +74,16 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by rollup(key, value) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by rollup(key, value) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### 1 1 0 0 0 1 NULL 0 0 0 @@ -98,12 +98,12 @@ POSTHOOK: Input: default@t1 NULL NULL 3 1 1 PREHOOK: query: explain select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by cube(key, value) PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by cube(key, value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -115,7 +115,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -156,16 +156,16 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by cube(key, value) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by cube(key, value) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### 1 1 0 0 0 1 NULL 0 0 0 @@ -185,13 +185,13 @@ NULL NULL 2 1 0 NULL NULL 3 1 1 PREHOOK: query: explain select key, value -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 PREHOOK: type: QUERY POSTHOOK: query: explain select key, value -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 POSTHOOK: type: QUERY @@ -204,7 +204,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -248,18 +248,18 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### NULL 1 NULL 2 @@ -269,14 +269,14 @@ NULL NULL NULL NULL PREHOOK: query: explain select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end @@ -291,7 +291,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -356,20 +356,20 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### 1 NULL 1 2 NULL 1 @@ -383,12 +383,12 @@ NULL NULL 1 NULL NULL 2 PREHOOK: query: explain select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by rollup(key, value) PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by rollup(key, value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -400,7 +400,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -441,16 +441,16 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by rollup(key, value) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by rollup(key, value) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### 1 1 0 0 0 1 NULL 0 0 0 @@ -465,12 +465,12 @@ POSTHOOK: Input: default@t1 NULL NULL 3 1 1 PREHOOK: query: explain select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by cube(key, value) PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by cube(key, value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -482,7 +482,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -523,16 +523,16 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by cube(key, value) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value) -from T1 +from T1_n39 group by cube(key, value) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### 1 1 0 0 0 1 NULL 0 0 0 @@ -552,13 +552,13 @@ NULL NULL 2 1 0 NULL NULL 3 1 1 PREHOOK: query: explain select key, value -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 PREHOOK: type: QUERY POSTHOOK: query: explain select key, value -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 POSTHOOK: type: QUERY @@ -571,7 +571,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -612,18 +612,18 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### NULL 1 NULL 2 @@ -633,14 +633,14 @@ NULL NULL NULL NULL PREHOOK: query: explain select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end @@ -655,7 +655,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -720,20 +720,20 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value, grouping(key)+grouping(value) as x -from T1 +from T1_n39 group by cube(key, value) having grouping(key) = 1 OR grouping(value) = 1 order by x desc, case when x = 1 then key end POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### 1 NULL 1 2 NULL 1 @@ -747,12 +747,12 @@ NULL NULL 1 NULL NULL 2 PREHOOK: query: explain select key, value, grouping(key), grouping(value) -from T1 +from T1_n39 group by key, value PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, grouping(key), grouping(value) -from T1 +from T1_n39 group by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -764,7 +764,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -805,16 +805,16 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, grouping(key), grouping(value) -from T1 +from T1_n39 group by key, value PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value, grouping(key), grouping(value) -from T1 +from T1_n39 group by key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### 1 1 0 0 1 NULL 0 0 @@ -824,12 +824,12 @@ POSTHOOK: Input: default@t1 4 5 0 0 PREHOOK: query: explain select key, value, grouping(value) -from T1 +from T1_n39 group by key, value PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, grouping(value) -from T1 +from T1_n39 group by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -841,7 +841,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -882,16 +882,16 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, grouping(value) -from T1 +from T1_n39 group by key, value PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value, grouping(value) -from T1 +from T1_n39 group by key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### 1 1 0 1 NULL 0 @@ -901,13 +901,13 @@ POSTHOOK: Input: default@t1 4 5 0 PREHOOK: query: explain select key, value -from T1 +from T1_n39 group by key, value having grouping(key) = 0 PREHOOK: type: QUERY POSTHOOK: query: explain select key, value -from T1 +from T1_n39 group by key, value having grouping(key) = 0 POSTHOOK: type: QUERY @@ -920,7 +920,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -957,18 +957,18 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value -from T1 +from T1_n39 group by key, value having grouping(key) = 0 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value -from T1 +from T1_n39 group by key, value having grouping(key) = 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### 1 1 1 NULL @@ -978,12 +978,12 @@ POSTHOOK: Input: default@t1 4 5 PREHOOK: query: explain select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n39 group by cube(key, value) PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n39 group by cube(key, value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -995,7 +995,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -1036,16 +1036,16 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n39 group by cube(key, value) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n39 group by cube(key, value) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### 1 1 0 0 1 NULL 0 0 @@ -1065,12 +1065,12 @@ NULL NULL 2 2 NULL NULL 3 3 PREHOOK: query: explain select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n39 group by cube(key, value) PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n39 group by cube(key, value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1082,7 +1082,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -1123,16 +1123,16 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n39 group by cube(key, value) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n39 group by cube(key, value) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### 1 1 0 0 1 NULL 0 0 @@ -1152,12 +1152,12 @@ NULL NULL 2 1 NULL NULL 3 3 PREHOOK: query: explain select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n39 group by rollup(key, value) PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n39 group by rollup(key, value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1169,7 +1169,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -1210,16 +1210,16 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n39 group by rollup(key, value) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value, `grouping__id`, grouping(key, value) -from T1 +from T1_n39 group by rollup(key, value) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### 1 1 0 0 1 NULL 0 0 @@ -1234,12 +1234,12 @@ POSTHOOK: Input: default@t1 NULL NULL 3 3 PREHOOK: query: explain select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n39 group by rollup(key, value) PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n39 group by rollup(key, value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1251,7 +1251,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n39 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int) @@ -1292,16 +1292,16 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n39 group by rollup(key, value) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n39 #### A masked pattern was here #### POSTHOOK: query: select key, value, `grouping__id`, grouping(value, key) -from T1 +from T1_n39 group by rollup(key, value) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n39 #### A masked pattern was here #### 1 1 0 0 1 NULL 0 0 diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out index eea589cb67..3d63b2aeb0 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n91(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: Output: default@T1_n91 +POSTHOOK: query: CREATE TABLE T1_n91(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n91 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n91 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n91 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n91 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n91 PREHOOK: query: EXPLAIN -SELECT a, b, count(*) from T1 group by a, b with cube LIMIT 10 +SELECT a, b, count(*) from T1_n91 group by a, b with cube LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a, b, count(*) from T1 group by a, b with cube LIMIT 10 +SELECT a, b, count(*) from T1_n91 group by a, b with cube LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -29,7 +29,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n91 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) @@ -78,13 +78,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, count(*) from T1 group by a, b with cube LIMIT 10 +PREHOOK: query: SELECT a, b, count(*) from T1_n91 group by a, b with cube LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n91 #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, count(*) from T1 group by a, b with cube LIMIT 10 +POSTHOOK: query: SELECT a, b, count(*) from T1_n91 group by a, b with cube LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n91 #### A masked pattern was here #### 1 1 1 1 NULL 1 @@ -97,10 +97,10 @@ NULL 2 3 NULL 3 1 NULL NULL 6 PREHOOK: query: EXPLAIN -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) LIMIT 10 +SELECT a, b, count(*) FROM T1_n91 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) LIMIT 10 +SELECT a, b, count(*) FROM T1_n91 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -111,7 +111,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n91 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) @@ -160,13 +160,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) LIMIT 10 +PREHOOK: query: SELECT a, b, count(*) FROM T1_n91 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n91 #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) LIMIT 10 +POSTHOOK: query: SELECT a, b, count(*) FROM T1_n91 GROUP BY a, b GROUPING SETS (a, (a, b), b, ()) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n91 #### A masked pattern was here #### 1 1 1 1 NULL 1 @@ -179,10 +179,10 @@ NULL 2 3 NULL 3 1 NULL NULL 6 PREHOOK: query: EXPLAIN -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10 +SELECT a, b, count(*) FROM T1_n91 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10 +SELECT a, b, count(*) FROM T1_n91 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -193,7 +193,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n91 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) @@ -242,13 +242,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10 +PREHOOK: query: SELECT a, b, count(*) FROM T1_n91 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n91 #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, count(*) FROM T1 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10 +POSTHOOK: query: SELECT a, b, count(*) FROM T1_n91 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n91 #### A masked pattern was here #### 1 1 1 1 NULL 1 @@ -261,10 +261,10 @@ POSTHOOK: Input: default@t1 5 NULL 1 8 NULL 1 PREHOOK: query: EXPLAIN -SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10 +SELECT a FROM T1_n91 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10 +SELECT a FROM T1_n91 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -275,7 +275,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n91 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string), c (type: string) @@ -321,13 +321,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10 +PREHOOK: query: SELECT a FROM T1_n91 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n91 #### A masked pattern was here #### -POSTHOOK: query: SELECT a FROM T1 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10 +POSTHOOK: query: SELECT a FROM T1_n91 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n91 #### A masked pattern was here #### 1 NULL @@ -340,10 +340,10 @@ NULL NULL NULL PREHOOK: query: EXPLAIN -SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10 +SELECT a FROM T1_n91 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10 +SELECT a FROM T1_n91 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -354,7 +354,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n91 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string) @@ -395,13 +395,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10 +PREHOOK: query: SELECT a FROM T1_n91 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n91 #### A masked pattern was here #### -POSTHOOK: query: SELECT a FROM T1 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10 +POSTHOOK: query: SELECT a FROM T1_n91 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n91 #### A masked pattern was here #### 1 2 @@ -409,10 +409,10 @@ POSTHOOK: Input: default@t1 5 8 PREHOOK: query: EXPLAIN -SELECT a + b, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10 +SELECT a + b, count(*) FROM T1_n91 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a + b, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10 +SELECT a + b, count(*) FROM T1_n91 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -423,7 +423,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n91 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (UDFToDouble(a) + UDFToDouble(b)) (type: double) @@ -467,13 +467,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a + b, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10 +PREHOOK: query: SELECT a + b, count(*) FROM T1_n91 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n91 #### A masked pattern was here #### -POSTHOOK: query: SELECT a + b, count(*) FROM T1 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10 +POSTHOOK: query: SELECT a + b, count(*) FROM T1_n91 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n91 #### A masked pattern was here #### 2.0 1 4.0 1 diff --git a/ql/src/test/results/clientpositive/groupby_grouping_window.q.out b/ql/src/test/results/clientpositive/groupby_grouping_window.q.out index 7a88215e6e..f50b8dc662 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_window.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_window.q.out @@ -1,32 +1,32 @@ -PREHOOK: query: create table t(category int, live int, comments int) +PREHOOK: query: create table t_n33(category int, live int, comments int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t(category int, live int, comments int) +PREHOOK: Output: default@t_n33 +POSTHOOK: query: create table t_n33(category int, live int, comments int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into table t select key, 0, 2 from src tablesample(3 rows) +POSTHOOK: Output: default@t_n33 +PREHOOK: query: insert into table t_n33 select key, 0, 2 from src tablesample(3 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t -POSTHOOK: query: insert into table t select key, 0, 2 from src tablesample(3 rows) +PREHOOK: Output: default@t_n33 +POSTHOOK: query: insert into table t_n33 select key, 0, 2 from src tablesample(3 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.category EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t.comments SIMPLE [] -POSTHOOK: Lineage: t.live SIMPLE [] +POSTHOOK: Output: default@t_n33 +POSTHOOK: Lineage: t_n33.category EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t_n33.comments SIMPLE [] +POSTHOOK: Lineage: t_n33.live SIMPLE [] PREHOOK: query: explain select category, max(live) live, max(comments) comments, rank() OVER (PARTITION BY category ORDER BY comments) rank1 -FROM t +FROM t_n33 GROUP BY category GROUPING SETS ((), (category)) HAVING max(comments) > 0 PREHOOK: type: QUERY POSTHOOK: query: explain select category, max(live) live, max(comments) comments, rank() OVER (PARTITION BY category ORDER BY comments) rank1 -FROM t +FROM t_n33 GROUP BY category GROUPING SETS ((), (category)) HAVING max(comments) > 0 @@ -41,7 +41,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t + alias: t_n33 Statistics: Num rows: 3 Data size: 20 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: category (type: int), live (type: int), comments (type: int) @@ -134,69 +134,69 @@ STAGE PLANS: ListSink PREHOOK: query: select category, max(live) live, max(comments) comments, rank() OVER (PARTITION BY category ORDER BY comments) rank1 -FROM t +FROM t_n33 GROUP BY category GROUPING SETS ((), (category)) HAVING max(comments) > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n33 #### A masked pattern was here #### POSTHOOK: query: select category, max(live) live, max(comments) comments, rank() OVER (PARTITION BY category ORDER BY comments) rank1 -FROM t +FROM t_n33 GROUP BY category GROUPING SETS ((), (category)) HAVING max(comments) > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n33 #### A masked pattern was here #### NULL 0 2 1 86 0 2 1 238 0 2 1 311 0 2 1 PREHOOK: query: SELECT grouping(category), lead(live) over(partition by grouping(category)) -FROM t +FROM t_n33 GROUP BY category, live GROUPING SETS ((), (category)) PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n33 #### A masked pattern was here #### POSTHOOK: query: SELECT grouping(category), lead(live) over(partition by grouping(category)) -FROM t +FROM t_n33 GROUP BY category, live GROUPING SETS ((), (category)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n33 #### A masked pattern was here #### 0 NULL 0 NULL 0 NULL 1 NULL PREHOOK: query: SELECT grouping(category), lead(live) over(partition by grouping(category)) -FROM t +FROM t_n33 GROUP BY category, live PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n33 #### A masked pattern was here #### POSTHOOK: query: SELECT grouping(category), lead(live) over(partition by grouping(category)) -FROM t +FROM t_n33 GROUP BY category, live POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n33 #### A masked pattern was here #### 0 0 0 0 0 NULL PREHOOK: query: SELECT grouping(category), lag(live) over(partition by grouping(category)) -FROM t +FROM t_n33 GROUP BY category, live PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n33 #### A masked pattern was here #### POSTHOOK: query: SELECT grouping(category), lag(live) over(partition by grouping(category)) -FROM t +FROM t_n33 GROUP BY category, live POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n33 #### A masked pattern was here #### 0 NULL 0 0 diff --git a/ql/src/test/results/clientpositive/groupby_map_ppr.q.out b/ql/src/test/results/clientpositive/groupby_map_ppr.q.out index e9830e4097..2d517cf49b 100644 --- a/ql/src/test/results/clientpositive/groupby_map_ppr.q.out +++ b/ql/src/test/results/clientpositive/groupby_map_ppr.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n120(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n120 +POSTHOOK: query: CREATE TABLE dest1_n120(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n120 PREHOOK: query: EXPLAIN EXTENDED FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n120 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n120 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1) @@ -188,17 +188,17 @@ STAGE PLANS: columns.comments columns.types string:int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n120 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { string key, i32 c1, string c2} + serialization.ddl struct dest1_n120 { string key, i32 c1, string c2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n120 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -247,17 +247,17 @@ STAGE PLANS: columns.comments columns.types string:int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n120 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { string key, i32 c1, string c2} + serialization.ddl struct dest1_n120 { string key, i32 c1, string c2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n120 Stage: Stage-2 Stats Work @@ -266,7 +266,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, c1, c2 Column Types: string, int, string - Table: default.dest1 + Table: default.dest1_n120 Is Table Level Stats: true Stage: Stage-3 @@ -340,7 +340,7 @@ STAGE PLANS: MultiFileSpray: false PREHOOK: query: FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n120 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1) @@ -348,9 +348,9 @@ PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n120 POSTHOOK: query: FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n120 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1) @@ -358,17 +358,17 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(srcpart)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(srcpart)src.FieldSchema(name:key, type:string, comment:default), (srcpart)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n120 +POSTHOOK: Lineage: dest1_n120.c1 EXPRESSION [(srcpart)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n120.c2 EXPRESSION [(srcpart)src.FieldSchema(name:key, type:string, comment:default), (srcpart)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n120.key EXPRESSION [(srcpart)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n120.* FROM dest1_n120 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n120 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n120.* FROM dest1_n120 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n120 #### A masked pattern was here #### 0 1 00.0 1 71 132828.0 diff --git a/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out index 5757e4ca9d..8a44122ddb 100644 --- a/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n146(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n146 +POSTHOOK: query: CREATE TABLE dest1_n146(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n146 PREHOOK: query: EXPLAIN EXTENDED FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n146 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(DISTINCT src.value) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n146 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(DISTINCT src.value) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1) @@ -188,17 +188,17 @@ STAGE PLANS: columns.comments columns.types string:int:string:int:int #### A masked pattern was here #### - name default.dest1 + name default.dest1_n146 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { string key, i32 c1, string c2, i32 c3, i32 c4} + serialization.ddl struct dest1_n146 { string key, i32 c1, string c2, i32 c3, i32 c4} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n146 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -247,17 +247,17 @@ STAGE PLANS: columns.comments columns.types string:int:string:int:int #### A masked pattern was here #### - name default.dest1 + name default.dest1_n146 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { string key, i32 c1, string c2, i32 c3, i32 c4} + serialization.ddl struct dest1_n146 { string key, i32 c1, string c2, i32 c3, i32 c4} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n146 Stage: Stage-2 Stats Work @@ -266,7 +266,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, c1, c2, c3, c4 Column Types: string, int, string, int, int - Table: default.dest1 + Table: default.dest1_n146 Is Table Level Stats: true Stage: Stage-3 @@ -340,7 +340,7 @@ STAGE PLANS: MultiFileSpray: false PREHOOK: query: FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n146 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(DISTINCT src.value) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1) @@ -348,9 +348,9 @@ PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n146 POSTHOOK: query: FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n146 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(DISTINCT src.value) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1) @@ -358,19 +358,19 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(srcpart)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(srcpart)src.FieldSchema(name:key, type:string, comment:default), (srcpart)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(srcpart)src.null, ] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [(srcpart)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n146 +POSTHOOK: Lineage: dest1_n146.c1 EXPRESSION [(srcpart)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n146.c2 EXPRESSION [(srcpart)src.FieldSchema(name:key, type:string, comment:default), (srcpart)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n146.c3 EXPRESSION [(srcpart)src.null, ] +POSTHOOK: Lineage: dest1_n146.c4 EXPRESSION [(srcpart)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n146.key EXPRESSION [(srcpart)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n146.* FROM dest1_n146 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n146 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n146.* FROM dest1_n146 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n146 #### A masked pattern was here #### 0 1 00.0 0 1 1 71 132828.0 10044 71 diff --git a/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out b/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out index e53b16b2e9..536b261374 100644 --- a/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out +++ b/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: create table dest1(key int, cnt int) +PREHOOK: query: create table dest1_n83(key int, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: create table dest1(key int, cnt int) +PREHOOK: Output: default@dest1_n83 +POSTHOOK: query: create table dest1_n83(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: create table dest2(key int, cnt int) +POSTHOOK: Output: default@dest1_n83 +PREHOOK: query: create table dest2_n13(key int, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest2 -POSTHOOK: query: create table dest2(key int, cnt int) +PREHOOK: Output: default@dest2_n13 +POSTHOOK: query: create table dest2_n13(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest2 +POSTHOOK: Output: default@dest2_n13 PREHOOK: query: explain from src -insert overwrite table dest1 select key, count(distinct value) group by key -insert overwrite table dest2 select key+key, count(distinct value) group by key+key +insert overwrite table dest1_n83 select key, count(distinct value) group by key +insert overwrite table dest2_n13 select key+key, count(distinct value) group by key+key PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table dest1 select key, count(distinct value) group by key -insert overwrite table dest2 select key+key, count(distinct value) group by key+key +insert overwrite table dest1_n83 select key, count(distinct value) group by key +insert overwrite table dest2_n13 select key+key, count(distinct value) group by key+key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -90,7 +90,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n83 Select Operator expressions: _col0 (type: int), _col1 (type: int) outputColumnNames: key, cnt @@ -115,7 +115,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n83 Stage: Stage-3 Stats Work @@ -123,7 +123,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.dest1 + Table: default.dest1_n83 Stage: Stage-4 Map Reduce @@ -154,7 +154,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.dest2 + Table: default.dest2_n13 Stage: Stage-5 Map Reduce @@ -184,7 +184,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n13 Select Operator expressions: _col0 (type: int), _col1 (type: int) outputColumnNames: key, cnt @@ -209,7 +209,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n13 Stage: Stage-7 Map Reduce @@ -235,30 +235,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from src -insert overwrite table dest1 select key, count(distinct value) group by key -insert overwrite table dest2 select key+key, count(distinct value) group by key+key +insert overwrite table dest1_n83 select key, count(distinct value) group by key +insert overwrite table dest2_n13 select key+key, count(distinct value) group by key+key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n83 +PREHOOK: Output: default@dest2_n13 POSTHOOK: query: from src -insert overwrite table dest1 select key, count(distinct value) group by key -insert overwrite table dest2 select key+key, count(distinct value) group by key+key +insert overwrite table dest1_n83 select key, count(distinct value) group by key +insert overwrite table dest2_n13 select key+key, count(distinct value) group by key+key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.cnt EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.cnt EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: select * from dest1 where key < 10 +POSTHOOK: Output: default@dest1_n83 +POSTHOOK: Output: default@dest2_n13 +POSTHOOK: Lineage: dest1_n83.cnt EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n83.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n13.cnt EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n13.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: select * from dest1_n83 where key < 10 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n83 #### A masked pattern was here #### -POSTHOOK: query: select * from dest1 where key < 10 +POSTHOOK: query: select * from dest1_n83 where key < 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n83 #### A masked pattern was here #### 0 1 2 1 @@ -266,13 +266,13 @@ POSTHOOK: Input: default@dest1 5 1 8 1 9 1 -PREHOOK: query: select * from dest2 where key < 20 order by key limit 10 +PREHOOK: query: select * from dest2_n13 where key < 20 order by key limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n13 #### A masked pattern was here #### -POSTHOOK: query: select * from dest2 where key < 20 order by key limit 10 +POSTHOOK: query: select * from dest2_n13 where key < 20 order by key limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n13 #### A masked pattern was here #### 0 1 10 1 diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out index 262f4f17d7..65540a6bc9 100644 --- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out +++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g2_n4(key STRING, c1 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_g2 -POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest_g2_n4 +POSTHOOK: query: CREATE TABLE dest_g2_n4(key STRING, c1 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_g2 -PREHOOK: query: CREATE TABLE dest_g3(key STRING, c1 INT, c2 INT) STORED AS TEXTFILE +POSTHOOK: Output: default@dest_g2_n4 +PREHOOK: query: CREATE TABLE dest_g3_n0(key STRING, c1 INT, c2 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_g3 -POSTHOOK: query: CREATE TABLE dest_g3(key STRING, c1 INT, c2 INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest_g3_n0 +POSTHOOK: query: CREATE TABLE dest_g3_n0(key STRING, c1 INT, c2 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_g3 +POSTHOOK: Output: default@dest_g3_n0 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) -INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n4 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g3_n0 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) -INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n4 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g3_n0 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -73,7 +73,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_g2 + name: default.dest_g2_n4 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, c1 @@ -109,7 +109,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_g3 + name: default.dest_g3_n0 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int) outputColumnNames: key, c1, c2 @@ -134,7 +134,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_g2 + name: default.dest_g2_n4 Stage: Stage-3 Stats Work @@ -142,7 +142,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, c1 Column Types: string, int - Table: default.dest_g2 + Table: default.dest_g2_n4 Stage: Stage-4 Map Reduce @@ -173,7 +173,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, c1, c2 Column Types: string, int, int - Table: default.dest_g3 + Table: default.dest_g3_n0 Stage: Stage-1 Move Operator @@ -183,7 +183,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_g3 + name: default.dest_g3_n0 Stage: Stage-6 Map Reduce @@ -209,63 +209,63 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) -INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n4 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g3_n0 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest_g2 -PREHOOK: Output: default@dest_g3 +PREHOOK: Output: default@dest_g2_n4 +PREHOOK: Output: default@dest_g3_n0 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) -INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n4 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) +INSERT OVERWRITE TABLE dest_g3_n0 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest_g2 -POSTHOOK: Output: default@dest_g3 -POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_g3.c1 EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: dest_g3.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_g3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM dest_g2 +POSTHOOK: Output: default@dest_g2_n4 +POSTHOOK: Output: default@dest_g3_n0 +POSTHOOK: Lineage: dest_g2_n4.c1 EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest_g2_n4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_g3_n0.c1 EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: dest_g3_n0.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_g3_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM dest_g2_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_g2 +PREHOOK: Input: default@dest_g2_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM dest_g2 +POSTHOOK: query: SELECT * FROM dest_g2_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_g2 +POSTHOOK: Input: default@dest_g2_n4 #### A masked pattern was here #### 5 6 6 5 7 6 8 8 9 7 -PREHOOK: query: SELECT * FROM dest_g3 +PREHOOK: query: SELECT * FROM dest_g3_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_g3 +PREHOOK: Input: default@dest_g3_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM dest_g3 +POSTHOOK: query: SELECT * FROM dest_g3_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_g3 +POSTHOOK: Input: default@dest_g3_n0 #### A masked pattern was here #### 0 1 3 1 71 115 2 69 111 3 62 99 4 74 124 -PREHOOK: query: DROP TABLE dest_g2 +PREHOOK: query: DROP TABLE dest_g2_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest_g2 -PREHOOK: Output: default@dest_g2 -POSTHOOK: query: DROP TABLE dest_g2 +PREHOOK: Input: default@dest_g2_n4 +PREHOOK: Output: default@dest_g2_n4 +POSTHOOK: query: DROP TABLE dest_g2_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest_g2 -POSTHOOK: Output: default@dest_g2 -PREHOOK: query: DROP TABLE dest_g3 +POSTHOOK: Input: default@dest_g2_n4 +POSTHOOK: Output: default@dest_g2_n4 +PREHOOK: query: DROP TABLE dest_g3_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest_g3 -PREHOOK: Output: default@dest_g3 -POSTHOOK: query: DROP TABLE dest_g3 +PREHOOK: Input: default@dest_g3_n0 +PREHOOK: Output: default@dest_g3_n0 +POSTHOOK: query: DROP TABLE dest_g3_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest_g3 -POSTHOOK: Output: default@dest_g3 +POSTHOOK: Input: default@dest_g3_n0 +POSTHOOK: Output: default@dest_g3_n0 diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out index 4dcf89267f..aa76313630 100644 --- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out +++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out @@ -1,37 +1,37 @@ -PREHOOK: query: create table e1 (key string, count int) +PREHOOK: query: create table e1_n1 (key string, count int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@e1 -POSTHOOK: query: create table e1 (key string, count int) +PREHOOK: Output: default@e1_n1 +POSTHOOK: query: create table e1_n1 (key string, count int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@e1 -PREHOOK: query: create table e2 (key string, count int) +POSTHOOK: Output: default@e1_n1 +PREHOOK: query: create table e2_n2 (key string, count int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@e2 -POSTHOOK: query: create table e2 (key string, count int) +PREHOOK: Output: default@e2_n2 +POSTHOOK: query: create table e2_n2 (key string, count int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@e2 +POSTHOOK: Output: default@e2_n2 PREHOOK: query: explain from src -insert overwrite table e1 +insert overwrite table e1_n1 select key, count(*) where src.value in ('val_100', 'val_200', 'val_300') AND key in (100, 150, 200) group by key -insert overwrite table e2 +insert overwrite table e2_n2 select key, count(*) where src.value in ('val_400', 'val_500') AND key in (400, 450) group by key PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table e1 +insert overwrite table e1_n1 select key, count(*) where src.value in ('val_100', 'val_200', 'val_300') AND key in (100, 150, 200) group by key -insert overwrite table e2 +insert overwrite table e2_n2 select key, count(*) where src.value in ('val_400', 'val_500') AND key in (400, 450) group by key @@ -84,7 +84,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n1 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -120,7 +120,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n2 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -145,7 +145,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n1 Stage: Stage-3 Stats Work @@ -153,7 +153,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e1 + Table: default.e1_n1 Stage: Stage-4 Map Reduce @@ -184,7 +184,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e2 + Table: default.e2_n2 Stage: Stage-1 Move Operator @@ -194,7 +194,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n2 Stage: Stage-6 Map Reduce @@ -220,72 +220,72 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from src -insert overwrite table e1 +insert overwrite table e1_n1 select key, count(*) where src.value in ('val_100', 'val_200', 'val_300') AND key in (100, 150, 200) group by key -insert overwrite table e2 +insert overwrite table e2_n2 select key, count(*) where src.value in ('val_400', 'val_500') AND key in (400, 450) group by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@e1 -PREHOOK: Output: default@e2 +PREHOOK: Output: default@e1_n1 +PREHOOK: Output: default@e2_n2 POSTHOOK: query: from src -insert overwrite table e1 +insert overwrite table e1_n1 select key, count(*) where src.value in ('val_100', 'val_200', 'val_300') AND key in (100, 150, 200) group by key -insert overwrite table e2 +insert overwrite table e2_n2 select key, count(*) where src.value in ('val_400', 'val_500') AND key in (400, 450) group by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@e1 -POSTHOOK: Output: default@e2 -POSTHOOK: Lineage: e1.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: e2.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: select * from e1 +POSTHOOK: Output: default@e1_n1 +POSTHOOK: Output: default@e2_n2 +POSTHOOK: Lineage: e1_n1.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e1_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: e2_n2.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e2_n2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: select * from e1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@e1 +PREHOOK: Input: default@e1_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from e1 +POSTHOOK: query: select * from e1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e1 +POSTHOOK: Input: default@e1_n1 #### A masked pattern was here #### 100 2 200 2 -PREHOOK: query: select * from e2 +PREHOOK: query: select * from e2_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@e2 +PREHOOK: Input: default@e2_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from e2 +POSTHOOK: query: select * from e2_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e2 +POSTHOOK: Input: default@e2_n2 #### A masked pattern was here #### 400 1 PREHOOK: query: explain from src -insert overwrite table e1 +insert overwrite table e1_n1 select value, count(*) where src.key + src.key = 200 or src.key - 100 = 100 or src.key = 300 AND VALUE IS NOT NULL group by value -insert overwrite table e2 +insert overwrite table e2_n2 select value, count(*) where src.key + src.key = 400 or src.key - 100 = 500 AND VALUE IS NOT NULL group by value PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table e1 +insert overwrite table e1_n1 select value, count(*) where src.key + src.key = 200 or src.key - 100 = 100 or src.key = 300 AND VALUE IS NOT NULL group by value -insert overwrite table e2 +insert overwrite table e2_n2 select value, count(*) where src.key + src.key = 400 or src.key - 100 = 500 AND VALUE IS NOT NULL group by value @@ -339,7 +339,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n1 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -375,7 +375,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n2 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -400,7 +400,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n1 Stage: Stage-3 Stats Work @@ -408,7 +408,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e1 + Table: default.e1_n1 Stage: Stage-4 Map Reduce @@ -439,7 +439,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e2 + Table: default.e2_n2 Stage: Stage-1 Move Operator @@ -449,7 +449,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n2 Stage: Stage-6 Map Reduce @@ -475,72 +475,72 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from src -insert overwrite table e1 +insert overwrite table e1_n1 select value, count(*) where src.key + src.key = 200 or src.key - 100 = 100 or src.key = 300 AND VALUE IS NOT NULL group by value -insert overwrite table e2 +insert overwrite table e2_n2 select value, count(*) where src.key + src.key = 400 or src.key - 100 = 500 AND VALUE IS NOT NULL group by value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@e1 -PREHOOK: Output: default@e2 +PREHOOK: Output: default@e1_n1 +PREHOOK: Output: default@e2_n2 POSTHOOK: query: from src -insert overwrite table e1 +insert overwrite table e1_n1 select value, count(*) where src.key + src.key = 200 or src.key - 100 = 100 or src.key = 300 AND VALUE IS NOT NULL group by value -insert overwrite table e2 +insert overwrite table e2_n2 select value, count(*) where src.key + src.key = 400 or src.key - 100 = 500 AND VALUE IS NOT NULL group by value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@e1 -POSTHOOK: Output: default@e2 -POSTHOOK: Lineage: e1.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e1.key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: e2.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e2.key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from e1 +POSTHOOK: Output: default@e1_n1 +POSTHOOK: Output: default@e2_n2 +POSTHOOK: Lineage: e1_n1.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e1_n1.key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: e2_n2.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e2_n2.key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from e1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@e1 +PREHOOK: Input: default@e1_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from e1 +POSTHOOK: query: select * from e1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e1 +POSTHOOK: Input: default@e1_n1 #### A masked pattern was here #### val_100 2 val_200 2 -PREHOOK: query: select * from e2 +PREHOOK: query: select * from e2_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@e2 +PREHOOK: Input: default@e2_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from e2 +POSTHOOK: query: select * from e2_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e2 +POSTHOOK: Input: default@e2_n2 #### A masked pattern was here #### val_200 2 PREHOOK: query: explain from src -insert overwrite table e1 +insert overwrite table e1_n1 select key, count(*) where src.value in ('val_100', 'val_200', 'val_300') AND key in (100, 150, 200) group by key -insert overwrite table e2 +insert overwrite table e2_n2 select key, count(*) where src.value in ('val_400', 'val_500') AND key in (400, 450) group by key PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table e1 +insert overwrite table e1_n1 select key, count(*) where src.value in ('val_100', 'val_200', 'val_300') AND key in (100, 150, 200) group by key -insert overwrite table e2 +insert overwrite table e2_n2 select key, count(*) where src.value in ('val_400', 'val_500') AND key in (400, 450) group by key @@ -593,7 +593,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n1 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -629,7 +629,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n2 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -654,7 +654,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n1 Stage: Stage-3 Stats Work @@ -662,7 +662,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e1 + Table: default.e1_n1 Stage: Stage-4 Map Reduce @@ -693,7 +693,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e2 + Table: default.e2_n2 Stage: Stage-1 Move Operator @@ -703,7 +703,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n2 Stage: Stage-6 Map Reduce @@ -729,72 +729,72 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from src -insert overwrite table e1 +insert overwrite table e1_n1 select key, count(*) where src.value in ('val_100', 'val_200', 'val_300') AND key in (100, 150, 200) group by key -insert overwrite table e2 +insert overwrite table e2_n2 select key, count(*) where src.value in ('val_400', 'val_500') AND key in (400, 450) group by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@e1 -PREHOOK: Output: default@e2 +PREHOOK: Output: default@e1_n1 +PREHOOK: Output: default@e2_n2 POSTHOOK: query: from src -insert overwrite table e1 +insert overwrite table e1_n1 select key, count(*) where src.value in ('val_100', 'val_200', 'val_300') AND key in (100, 150, 200) group by key -insert overwrite table e2 +insert overwrite table e2_n2 select key, count(*) where src.value in ('val_400', 'val_500') AND key in (400, 450) group by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@e1 -POSTHOOK: Output: default@e2 -POSTHOOK: Lineage: e1.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: e2.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: select * from e1 +POSTHOOK: Output: default@e1_n1 +POSTHOOK: Output: default@e2_n2 +POSTHOOK: Lineage: e1_n1.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e1_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: e2_n2.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e2_n2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: select * from e1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@e1 +PREHOOK: Input: default@e1_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from e1 +POSTHOOK: query: select * from e1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e1 +POSTHOOK: Input: default@e1_n1 #### A masked pattern was here #### 100 2 200 2 -PREHOOK: query: select * from e2 +PREHOOK: query: select * from e2_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@e2 +PREHOOK: Input: default@e2_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from e2 +POSTHOOK: query: select * from e2_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e2 +POSTHOOK: Input: default@e2_n2 #### A masked pattern was here #### 400 1 PREHOOK: query: explain from src -insert overwrite table e1 +insert overwrite table e1_n1 select value, count(*) where src.key + src.key = 200 or src.key - 100 = 100 or src.key = 300 AND VALUE IS NOT NULL group by value -insert overwrite table e2 +insert overwrite table e2_n2 select value, count(*) where src.key + src.key = 400 or src.key - 100 = 500 AND VALUE IS NOT NULL group by value PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table e1 +insert overwrite table e1_n1 select value, count(*) where src.key + src.key = 200 or src.key - 100 = 100 or src.key = 300 AND VALUE IS NOT NULL group by value -insert overwrite table e2 +insert overwrite table e2_n2 select value, count(*) where src.key + src.key = 400 or src.key - 100 = 500 AND VALUE IS NOT NULL group by value @@ -848,7 +848,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n1 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -884,7 +884,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n2 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -909,7 +909,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n1 Stage: Stage-3 Stats Work @@ -917,7 +917,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e1 + Table: default.e1_n1 Stage: Stage-4 Map Reduce @@ -948,7 +948,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e2 + Table: default.e2_n2 Stage: Stage-1 Move Operator @@ -958,7 +958,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n2 Stage: Stage-6 Map Reduce @@ -984,51 +984,51 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from src -insert overwrite table e1 +insert overwrite table e1_n1 select value, count(*) where src.key + src.key = 200 or src.key - 100 = 100 or src.key = 300 AND VALUE IS NOT NULL group by value -insert overwrite table e2 +insert overwrite table e2_n2 select value, count(*) where src.key + src.key = 400 or src.key - 100 = 500 AND VALUE IS NOT NULL group by value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@e1 -PREHOOK: Output: default@e2 +PREHOOK: Output: default@e1_n1 +PREHOOK: Output: default@e2_n2 POSTHOOK: query: from src -insert overwrite table e1 +insert overwrite table e1_n1 select value, count(*) where src.key + src.key = 200 or src.key - 100 = 100 or src.key = 300 AND VALUE IS NOT NULL group by value -insert overwrite table e2 +insert overwrite table e2_n2 select value, count(*) where src.key + src.key = 400 or src.key - 100 = 500 AND VALUE IS NOT NULL group by value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@e1 -POSTHOOK: Output: default@e2 -POSTHOOK: Lineage: e1.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e1.key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: e2.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e2.key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from e1 +POSTHOOK: Output: default@e1_n1 +POSTHOOK: Output: default@e2_n2 +POSTHOOK: Lineage: e1_n1.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e1_n1.key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: e2_n2.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e2_n2.key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from e1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@e1 +PREHOOK: Input: default@e1_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from e1 +POSTHOOK: query: select * from e1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e1 +POSTHOOK: Input: default@e1_n1 #### A masked pattern was here #### val_100 2 val_200 2 -PREHOOK: query: select * from e2 +PREHOOK: query: select * from e2_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@e2 +PREHOOK: Input: default@e2_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from e2 +POSTHOOK: query: select * from e2_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e2 +POSTHOOK: Input: default@e2_n2 #### A masked pattern was here #### val_200 2 diff --git a/ql/src/test/results/clientpositive/groupby_multialias.q.out b/ql/src/test/results/clientpositive/groupby_multialias.q.out index 37e072e0f3..bdb9a75ee7 100644 --- a/ql/src/test/results/clientpositive/groupby_multialias.q.out +++ b/ql/src/test/results/clientpositive/groupby_multialias.q.out @@ -1,20 +1,20 @@ -PREHOOK: query: create table t1 (a int) +PREHOOK: query: create table t1_n54 (a int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (a int) +PREHOOK: Output: default@t1_n54 +POSTHOOK: query: create table t1_n54 (a int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n54 PREHOOK: query: explain -select t1.a as a1, min(t1.a) as a -from t1 -group by t1.a +select t1_n54.a as a1, min(t1_n54.a) as a +from t1_n54 +group by t1_n54.a PREHOOK: type: QUERY POSTHOOK: query: explain -select t1.a as a1, min(t1.a) as a -from t1 -group by t1.a +select t1_n54.a as a1, min(t1_n54.a) as a +from t1_n54 +group by t1_n54.a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -25,7 +25,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n54 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: a (type: int) diff --git a/ql/src/test/results/clientpositive/groupby_ppd.q.out b/ql/src/test/results/clientpositive/groupby_ppd.q.out index 024ec6d473..a2292bd789 100644 --- a/ql/src/test/results/clientpositive/groupby_ppd.q.out +++ b/ql/src/test/results/clientpositive/groupby_ppd.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: create table invites (id int, foo int, bar int) +PREHOOK: query: create table invites_n0 (id int, foo int, bar int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@invites -POSTHOOK: query: create table invites (id int, foo int, bar int) +PREHOOK: Output: default@invites_n0 +POSTHOOK: query: create table invites_n0 (id int, foo int, bar int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@invites -PREHOOK: query: explain select * from (select foo, bar from (select bar, foo from invites c union all select bar, foo from invites d) b) a group by bar, foo having bar=1 +POSTHOOK: Output: default@invites_n0 +PREHOOK: query: explain select * from (select foo, bar from (select bar, foo from invites_n0 c union all select bar, foo from invites_n0 d) b) a group by bar, foo having bar=1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from (select foo, bar from (select bar, foo from invites c union all select bar, foo from invites d) b) a group by bar, foo having bar=1 +POSTHOOK: query: explain select * from (select foo, bar from (select bar, foo from invites_n0 c union all select bar, foo from invites_n0 d) b) a group by bar, foo having bar=1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -86,11 +86,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: drop table invites +PREHOOK: query: drop table invites_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@invites -PREHOOK: Output: default@invites -POSTHOOK: query: drop table invites +PREHOOK: Input: default@invites_n0 +PREHOOK: Output: default@invites_n0 +POSTHOOK: query: drop table invites_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@invites -POSTHOOK: Output: default@invites +POSTHOOK: Input: default@invites_n0 +POSTHOOK: Output: default@invites_n0 diff --git a/ql/src/test/results/clientpositive/groupby_ppr.q.out b/ql/src/test/results/clientpositive/groupby_ppr.q.out index 231c7039e2..231caa6bd3 100644 --- a/ql/src/test/results/clientpositive/groupby_ppr.q.out +++ b/ql/src/test/results/clientpositive/groupby_ppr.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n68(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n68 +POSTHOOK: query: CREATE TABLE dest1_n68(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n68 PREHOOK: query: EXPLAIN EXTENDED FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n68 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n68 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1) @@ -182,17 +182,17 @@ STAGE PLANS: columns.comments columns.types string:int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n68 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { string key, i32 c1, string c2} + serialization.ddl struct dest1_n68 { string key, i32 c1, string c2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n68 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -236,17 +236,17 @@ STAGE PLANS: columns.comments columns.types string:int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n68 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { string key, i32 c1, string c2} + serialization.ddl struct dest1_n68 { string key, i32 c1, string c2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n68 Stage: Stage-2 Stats Work @@ -255,7 +255,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, c1, c2 Column Types: string, int, string - Table: default.dest1 + Table: default.dest1_n68 Is Table Level Stats: true Stage: Stage-3 @@ -329,7 +329,7 @@ STAGE PLANS: MultiFileSpray: false PREHOOK: query: FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n68 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1) @@ -337,9 +337,9 @@ PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n68 POSTHOOK: query: FROM srcpart src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n68 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) WHERE src.ds = '2008-04-08' GROUP BY substr(src.key,1,1) @@ -347,17 +347,17 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(srcpart)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(srcpart)src.FieldSchema(name:key, type:string, comment:default), (srcpart)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n68 +POSTHOOK: Lineage: dest1_n68.c1 EXPRESSION [(srcpart)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n68.c2 EXPRESSION [(srcpart)src.FieldSchema(name:key, type:string, comment:default), (srcpart)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n68.key EXPRESSION [(srcpart)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n68.* FROM dest1_n68 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n68 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n68.* FROM dest1_n68 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n68 #### A masked pattern was here #### 0 1 00.0 1 71 132828.0 diff --git a/ql/src/test/results/clientpositive/groupby_rollup1.q.out b/ql/src/test/results/clientpositive/groupby_rollup1.q.out index 0c622dba6e..aff6c0bccc 100644 --- a/ql/src/test/results/clientpositive/groupby_rollup1.q.out +++ b/ql/src/test/results/clientpositive/groupby_rollup1.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n59(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n59 +POSTHOOK: query: CREATE TABLE T1_n59(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n59 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n59 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n59 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n59 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n59 PREHOOK: query: EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup +SELECT key, val, count(1) FROM T1_n59 GROUP BY key, val with rollup PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup +SELECT key, val, count(1) FROM T1_n59 GROUP BY key, val with rollup POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -29,7 +29,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n59 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -73,13 +73,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup +PREHOOK: query: SELECT key, val, count(1) FROM T1_n59 GROUP BY key, val with rollup PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n59 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup +POSTHOOK: query: SELECT key, val, count(1) FROM T1_n59 GROUP BY key, val with rollup POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n59 #### A masked pattern was here #### 1 11 1 1 NULL 1 @@ -94,10 +94,10 @@ POSTHOOK: Input: default@t1 8 NULL 2 NULL NULL 6 PREHOOK: query: EXPLAIN -SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup +SELECT key, count(distinct val) FROM T1_n59 GROUP BY key with rollup PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup +SELECT key, count(distinct val) FROM T1_n59 GROUP BY key with rollup POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -108,7 +108,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n59 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -151,13 +151,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup +PREHOOK: query: SELECT key, count(distinct val) FROM T1_n59 GROUP BY key with rollup PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n59 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup +POSTHOOK: query: SELECT key, count(distinct val) FROM T1_n59 GROUP BY key with rollup POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n59 #### A masked pattern was here #### 1 1 2 1 @@ -166,10 +166,10 @@ POSTHOOK: Input: default@t1 8 2 NULL 6 PREHOOK: query: EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup +SELECT key, val, count(1) FROM T1_n59 GROUP BY key, val with rollup PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup +SELECT key, val, count(1) FROM T1_n59 GROUP BY key, val with rollup POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -181,7 +181,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n59 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -249,13 +249,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup +PREHOOK: query: SELECT key, val, count(1) FROM T1_n59 GROUP BY key, val with rollup PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n59 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup +POSTHOOK: query: SELECT key, val, count(1) FROM T1_n59 GROUP BY key, val with rollup POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n59 #### A masked pattern was here #### 1 11 1 1 NULL 1 @@ -270,10 +270,10 @@ POSTHOOK: Input: default@t1 8 NULL 2 NULL NULL 6 PREHOOK: query: EXPLAIN -SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup +SELECT key, count(distinct val) FROM T1_n59 GROUP BY key with rollup PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup +SELECT key, count(distinct val) FROM T1_n59 GROUP BY key with rollup POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -285,7 +285,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n59 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -352,13 +352,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup +PREHOOK: query: SELECT key, count(distinct val) FROM T1_n59 GROUP BY key with rollup PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n59 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, count(distinct val) FROM T1 GROUP BY key with rollup +POSTHOOK: query: SELECT key, count(distinct val) FROM T1_n59 GROUP BY key with rollup POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n59 #### A masked pattern was here #### 1 1 2 1 @@ -366,31 +366,31 @@ POSTHOOK: Input: default@t1 7 1 8 2 NULL 6 -PREHOOK: query: CREATE TABLE T2(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T2_n37(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n37 +POSTHOOK: query: CREATE TABLE T2_n37(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: CREATE TABLE T3(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE +POSTHOOK: Output: default@T2_n37 +PREHOOK: query: CREATE TABLE T3_n14(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE +PREHOOK: Output: default@T3_n14 +POSTHOOK: query: CREATE TABLE T3_n14(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 +POSTHOOK: Output: default@T3_n14 PREHOOK: query: EXPLAIN -FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with rollup -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by rollup(key, val) +FROM T1_n59 +INSERT OVERWRITE TABLE T2_n37 SELECT key, val, count(1) group by key, val with rollup +INSERT OVERWRITE TABLE T3_n14 SELECT key, val, sum(1) group by rollup(key, val) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with rollup -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by rollup(key, val) +FROM T1_n59 +INSERT OVERWRITE TABLE T2_n37 SELECT key, val, count(1) group by key, val with rollup +INSERT OVERWRITE TABLE T3_n14 SELECT key, val, sum(1) group by rollup(key, val) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -409,7 +409,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n59 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -486,7 +486,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 + name: default.t2_n37 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int) outputColumnNames: key1, key2, val @@ -511,7 +511,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 + name: default.t2_n37 Stage: Stage-4 Stats Work @@ -519,7 +519,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, val Column Types: string, string, int - Table: default.t2 + Table: default.t2_n37 Stage: Stage-5 Map Reduce @@ -549,7 +549,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, val Column Types: string, string, int - Table: default.t3 + Table: default.t3_n14 Stage: Stage-6 Map Reduce @@ -604,7 +604,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t3 + name: default.t3_n14 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int) outputColumnNames: key1, key2, val @@ -629,7 +629,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t3 + name: default.t3_n14 Stage: Stage-9 Map Reduce @@ -653,23 +653,23 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with rollup -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by key, val with rollup +PREHOOK: query: FROM T1_n59 +INSERT OVERWRITE TABLE T2_n37 SELECT key, val, count(1) group by key, val with rollup +INSERT OVERWRITE TABLE T3_n14 SELECT key, val, sum(1) group by key, val with rollup PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2 -PREHOOK: Output: default@t3 -POSTHOOK: query: FROM T1 -INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with rollup -INSERT OVERWRITE TABLE T3 SELECT key, val, sum(1) group by key, val with rollup +PREHOOK: Input: default@t1_n59 +PREHOOK: Output: default@t2_n37 +PREHOOK: Output: default@t3_n14 +POSTHOOK: query: FROM T1_n59 +INSERT OVERWRITE TABLE T2_n37 SELECT key, val, count(1) group by key, val with rollup +INSERT OVERWRITE TABLE T3_n14 SELECT key, val, sum(1) group by key, val with rollup POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Output: default@t3 -POSTHOOK: Lineage: t2.key1 SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2.key2 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -POSTHOOK: Lineage: t2.val EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: t3.key1 SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t3.key2 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -POSTHOOK: Lineage: t3.val EXPRESSION [(t1)t1.null, ] +POSTHOOK: Input: default@t1_n59 +POSTHOOK: Output: default@t2_n37 +POSTHOOK: Output: default@t3_n14 +POSTHOOK: Lineage: t2_n37.key1 SIMPLE [(t1_n59)t1_n59.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n37.key2 SIMPLE [(t1_n59)t1_n59.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n37.val EXPRESSION [(t1_n59)t1_n59.null, ] +POSTHOOK: Lineage: t3_n14.key1 SIMPLE [(t1_n59)t1_n59.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t3_n14.key2 SIMPLE [(t1_n59)t1_n59.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Lineage: t3_n14.val EXPRESSION [(t1_n59)t1_n59.null, ] diff --git a/ql/src/test/results/clientpositive/groupby_rollup_empty.q.out b/ql/src/test/results/clientpositive/groupby_rollup_empty.q.out index 8263dbd969..be068d12f8 100644 --- a/ql/src/test/results/clientpositive/groupby_rollup_empty.q.out +++ b/ql/src/test/results/clientpositive/groupby_rollup_empty.q.out @@ -1,143 +1,143 @@ -PREHOOK: query: drop table if exists tx1 +PREHOOK: query: drop table if exists tx1_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists tx1 +POSTHOOK: query: drop table if exists tx1_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table if exists tx2 +PREHOOK: query: drop table if exists tx2_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists tx2 +POSTHOOK: query: drop table if exists tx2_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table tx1 (a integer,b integer,c integer) +PREHOOK: query: create table tx1_n2 (a integer,b integer,c integer) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tx1 -POSTHOOK: query: create table tx1 (a integer,b integer,c integer) +PREHOOK: Output: default@tx1_n2 +POSTHOOK: query: create table tx1_n2 (a integer,b integer,c integer) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tx1 +POSTHOOK: Output: default@tx1_n2 PREHOOK: query: select sum(c) -from tx1 +from tx1_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@tx1 +PREHOOK: Input: default@tx1_n2 #### A masked pattern was here #### POSTHOOK: query: select sum(c) -from tx1 +from tx1_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tx1 +POSTHOOK: Input: default@tx1_n2 #### A masked pattern was here #### NULL PREHOOK: query: select sum(c), grouping(b), 'NULL,1' as expected -from tx1 +from tx1_n2 where a<0 group by a,b grouping sets ((), b, a) PREHOOK: type: QUERY -PREHOOK: Input: default@tx1 +PREHOOK: Input: default@tx1_n2 #### A masked pattern was here #### POSTHOOK: query: select sum(c), grouping(b), 'NULL,1' as expected -from tx1 +from tx1_n2 where a<0 group by a,b grouping sets ((), b, a) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tx1 +POSTHOOK: Input: default@tx1_n2 #### A masked pattern was here #### NULL 1 NULL,1 PREHOOK: query: select sum(c), grouping(b), 'NULL,1' as expected -from tx1 +from tx1_n2 where a<0 group by rollup (b) PREHOOK: type: QUERY -PREHOOK: Input: default@tx1 +PREHOOK: Input: default@tx1_n2 #### A masked pattern was here #### POSTHOOK: query: select sum(c), grouping(b), 'NULL,1' as expected -from tx1 +from tx1_n2 where a<0 group by rollup (b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tx1 +POSTHOOK: Input: default@tx1_n2 #### A masked pattern was here #### NULL 1 NULL,1 -PREHOOK: query: select '2 rows expected',sum(c) from tx1 group by rollup (a) +PREHOOK: query: select '2 rows expected',sum(c) from tx1_n2 group by rollup (a) union all -select '2 rows expected',sum(c) from tx1 group by rollup (a) +select '2 rows expected',sum(c) from tx1_n2 group by rollup (a) PREHOOK: type: QUERY -PREHOOK: Input: default@tx1 +PREHOOK: Input: default@tx1_n2 #### A masked pattern was here #### -POSTHOOK: query: select '2 rows expected',sum(c) from tx1 group by rollup (a) +POSTHOOK: query: select '2 rows expected',sum(c) from tx1_n2 group by rollup (a) union all -select '2 rows expected',sum(c) from tx1 group by rollup (a) +select '2 rows expected',sum(c) from tx1_n2 group by rollup (a) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tx1 +POSTHOOK: Input: default@tx1_n2 #### A masked pattern was here #### 2 rows expected NULL 2 rows expected NULL -PREHOOK: query: insert into tx1 values (1,1,1) +PREHOOK: query: insert into tx1_n2 values (1,1,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@tx1 -POSTHOOK: query: insert into tx1 values (1,1,1) +PREHOOK: Output: default@tx1_n2 +POSTHOOK: query: insert into tx1_n2 values (1,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@tx1 -POSTHOOK: Lineage: tx1.a SCRIPT [] -POSTHOOK: Lineage: tx1.b SCRIPT [] -POSTHOOK: Lineage: tx1.c SCRIPT [] +POSTHOOK: Output: default@tx1_n2 +POSTHOOK: Lineage: tx1_n2.a SCRIPT [] +POSTHOOK: Lineage: tx1_n2.b SCRIPT [] +POSTHOOK: Lineage: tx1_n2.c SCRIPT [] PREHOOK: query: select sum(c), grouping(b), 'NULL,1' as expected -from tx1 +from tx1_n2 where a<0 group by rollup (b) PREHOOK: type: QUERY -PREHOOK: Input: default@tx1 +PREHOOK: Input: default@tx1_n2 #### A masked pattern was here #### POSTHOOK: query: select sum(c), grouping(b), 'NULL,1' as expected -from tx1 +from tx1_n2 where a<0 group by rollup (b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tx1 +POSTHOOK: Input: default@tx1_n2 #### A masked pattern was here #### NULL 1 NULL,1 PREHOOK: query: select sum(c), grouping(b), '1,1 and 1,0' as expected -from tx1 +from tx1_n2 group by rollup (b) PREHOOK: type: QUERY -PREHOOK: Input: default@tx1 +PREHOOK: Input: default@tx1_n2 #### A masked pattern was here #### POSTHOOK: query: select sum(c), grouping(b), '1,1 and 1,0' as expected -from tx1 +from tx1_n2 group by rollup (b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tx1 +POSTHOOK: Input: default@tx1_n2 #### A masked pattern was here #### 1 1 1,1 and 1,0 1 0 1,1 and 1,0 -PREHOOK: query: create table tx2 (a integer,b integer,c integer,d double,u string,bi binary) stored as orc +PREHOOK: query: create table tx2_n1 (a integer,b integer,c integer,d double,u string,bi binary) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tx2 -POSTHOOK: query: create table tx2 (a integer,b integer,c integer,d double,u string,bi binary) stored as orc +PREHOOK: Output: default@tx2_n1 +POSTHOOK: query: create table tx2_n1 (a integer,b integer,c integer,d double,u string,bi binary) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tx2 +POSTHOOK: Output: default@tx2_n1 PREHOOK: query: explain select sum(c), grouping(b), 'NULL,1' as expected -from tx2 +from tx2_n1 where a<0 group by a,b grouping sets ((), b, a) PREHOOK: type: QUERY @@ -145,7 +145,7 @@ POSTHOOK: query: explain select sum(c), grouping(b), 'NULL,1' as expected -from tx2 +from tx2_n1 where a<0 group by a,b grouping sets ((), b, a) POSTHOOK: type: QUERY @@ -158,7 +158,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: tx2 + alias: tx2_n1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (a < 0) (type: boolean) @@ -206,14 +206,14 @@ STAGE PLANS: ListSink PREHOOK: query: select sum(c),'NULL' as expected -from tx2 +from tx2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@tx2 +PREHOOK: Input: default@tx2_n1 #### A masked pattern was here #### POSTHOOK: query: select sum(c),'NULL' as expected -from tx2 +from tx2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tx2 +POSTHOOK: Input: default@tx2_n1 #### A masked pattern was here #### NULL NULL PREHOOK: query: select sum(c), @@ -221,67 +221,67 @@ PREHOOK: query: select sum(c), 'asd', grouping(b), 'NULL,1' as expected -from tx2 +from tx2_n1 where a<0 group by a,b,d grouping sets ((), b, a, d) PREHOOK: type: QUERY -PREHOOK: Input: default@tx2 +PREHOOK: Input: default@tx2_n1 #### A masked pattern was here #### POSTHOOK: query: select sum(c), max(u), 'asd', grouping(b), 'NULL,1' as expected -from tx2 +from tx2_n1 where a<0 group by a,b,d grouping sets ((), b, a, d) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tx2 +POSTHOOK: Input: default@tx2_n1 #### A masked pattern was here #### NULL NULL asd 1 NULL,1 -PREHOOK: query: select '2 rows expected',sum(c) from tx2 group by rollup (a) +PREHOOK: query: select '2 rows expected',sum(c) from tx2_n1 group by rollup (a) union all -select '2 rows expected',sum(c) from tx2 group by rollup (a) +select '2 rows expected',sum(c) from tx2_n1 group by rollup (a) PREHOOK: type: QUERY -PREHOOK: Input: default@tx2 +PREHOOK: Input: default@tx2_n1 #### A masked pattern was here #### -POSTHOOK: query: select '2 rows expected',sum(c) from tx2 group by rollup (a) +POSTHOOK: query: select '2 rows expected',sum(c) from tx2_n1 group by rollup (a) union all -select '2 rows expected',sum(c) from tx2 group by rollup (a) +select '2 rows expected',sum(c) from tx2_n1 group by rollup (a) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tx2 +POSTHOOK: Input: default@tx2_n1 #### A masked pattern was here #### 2 rows expected NULL 2 rows expected NULL -PREHOOK: query: insert into tx2 values +PREHOOK: query: insert into tx2_n1 values (1,2,3,1.1,'x','b'), (3,2,3,1.1,'y','b') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@tx2 -POSTHOOK: query: insert into tx2 values +PREHOOK: Output: default@tx2_n1 +POSTHOOK: query: insert into tx2_n1 values (1,2,3,1.1,'x','b'), (3,2,3,1.1,'y','b') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@tx2 -POSTHOOK: Lineage: tx2.a SCRIPT [] -POSTHOOK: Lineage: tx2.b SCRIPT [] -POSTHOOK: Lineage: tx2.bi SCRIPT [] -POSTHOOK: Lineage: tx2.c SCRIPT [] -POSTHOOK: Lineage: tx2.d SCRIPT [] -POSTHOOK: Lineage: tx2.u SCRIPT [] +POSTHOOK: Output: default@tx2_n1 +POSTHOOK: Lineage: tx2_n1.a SCRIPT [] +POSTHOOK: Lineage: tx2_n1.b SCRIPT [] +POSTHOOK: Lineage: tx2_n1.bi SCRIPT [] +POSTHOOK: Lineage: tx2_n1.c SCRIPT [] +POSTHOOK: Lineage: tx2_n1.d SCRIPT [] +POSTHOOK: Lineage: tx2_n1.u SCRIPT [] PREHOOK: query: select sum(a), u, bi, 'asd', grouping(bi), 'NULL,1' as expected -from tx2 +from tx2_n1 where a=2 group by a,u,bi grouping sets ( u, (), bi) PREHOOK: type: QUERY -PREHOOK: Input: default@tx2 +PREHOOK: Input: default@tx2_n1 #### A masked pattern was here #### POSTHOOK: query: select sum(a), u, @@ -289,10 +289,10 @@ POSTHOOK: query: select sum(a), 'asd', grouping(bi), 'NULL,1' as expected -from tx2 +from tx2_n1 where a=2 group by a,u,bi grouping sets ( u, (), bi) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tx2 +POSTHOOK: Input: default@tx2_n1 #### A masked pattern was here #### NULL NULL NULL asd 1 NULL,1 diff --git a/ql/src/test/results/clientpositive/groupby_sort_10.q.out b/ql/src/test/results/clientpositive/groupby_sort_10.q.out index cba8433b8f..09ef04ac58 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_10.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_10.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: query: CREATE TABLE T1_n94(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: Output: default@T1_n94 +POSTHOOK: query: CREATE TABLE T1_n94(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') +POSTHOOK: Output: default@T1_n94 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n94 PARTITION (ds='1') SELECT * from src where key = 0 or key = 11 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') +PREHOOK: Output: default@t1_n94@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n94 PARTITION (ds='1') SELECT * from src where key = 0 or key = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1@ds=1 -POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN select distinct key from T1 +POSTHOOK: Output: default@t1_n94@ds=1 +POSTHOOK: Lineage: t1_n94 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n94 PARTITION(ds=1).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: EXPLAIN select distinct key from T1_n94 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select distinct key from T1 +POSTHOOK: query: EXPLAIN select distinct key from T1_n94 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -33,7 +33,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n94 Statistics: Num rows: 4 Data size: 30 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -59,33 +59,33 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select distinct key from T1 +PREHOOK: query: select distinct key from T1_n94 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 +PREHOOK: Input: default@t1_n94 +PREHOOK: Input: default@t1_n94@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select distinct key from T1 +POSTHOOK: query: select distinct key from T1_n94 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 +POSTHOOK: Input: default@t1_n94 +POSTHOOK: Input: default@t1_n94@ds=1 #### A masked pattern was here #### 0 11 -PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='2') +PREHOOK: query: INSERT OVERWRITE TABLE T1_n94 PARTITION (ds='2') SELECT * from src where key = 0 or key = 11 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t1@ds=2 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='2') +PREHOOK: Output: default@t1_n94@ds=2 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n94 PARTITION (ds='2') SELECT * from src where key = 0 or key = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1@ds=2 -POSTHOOK: Lineage: t1 PARTITION(ds=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=2).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN select distinct key from T1 +POSTHOOK: Output: default@t1_n94@ds=2 +POSTHOOK: Lineage: t1_n94 PARTITION(ds=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n94 PARTITION(ds=2).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: EXPLAIN select distinct key from T1_n94 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select distinct key from T1 +POSTHOOK: query: EXPLAIN select distinct key from T1_n94 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -96,7 +96,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n94 Statistics: Num rows: 8 Data size: 60 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -134,25 +134,25 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select distinct key from T1 +PREHOOK: query: select distinct key from T1_n94 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 -PREHOOK: Input: default@t1@ds=2 +PREHOOK: Input: default@t1_n94 +PREHOOK: Input: default@t1_n94@ds=1 +PREHOOK: Input: default@t1_n94@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select distinct key from T1 +POSTHOOK: query: select distinct key from T1_n94 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 -POSTHOOK: Input: default@t1@ds=2 +POSTHOOK: Input: default@t1_n94 +POSTHOOK: Input: default@t1_n94@ds=1 +POSTHOOK: Input: default@t1_n94@ds=2 #### A masked pattern was here #### 0 11 -PREHOOK: query: DROP TABLE T1 +PREHOOK: query: DROP TABLE T1_n94 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: DROP TABLE T1 +PREHOOK: Input: default@t1_n94 +PREHOOK: Output: default@t1_n94 +POSTHOOK: query: DROP TABLE T1_n94 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n94 +POSTHOOK: Output: default@t1_n94 diff --git a/ql/src/test/results/clientpositive/groupby_sort_11.q.out b/ql/src/test/results/clientpositive/groupby_sort_11.q.out index cbdc526c3a..d6addf4b43 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_11.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_11.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: query: CREATE TABLE T1_n10(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: Output: default@T1_n10 +POSTHOOK: query: CREATE TABLE T1_n10(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') +POSTHOOK: Output: default@T1_n10 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n10 PARTITION (ds='1') SELECT * from src where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') +PREHOOK: Output: default@t1_n10@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n10 PARTITION (ds='1') SELECT * from src where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1@ds=1 -POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN select count(distinct key) from T1 +POSTHOOK: Output: default@t1_n10@ds=1 +POSTHOOK: Lineage: t1_n10 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n10 PARTITION(ds=1).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: EXPLAIN select count(distinct key) from T1_n10 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(distinct key) from T1 +POSTHOOK: query: EXPLAIN select count(distinct key) from T1_n10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -33,7 +33,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n10 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -71,20 +71,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(distinct key) from T1 +PREHOOK: query: select count(distinct key) from T1_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 +PREHOOK: Input: default@t1_n10 +PREHOOK: Input: default@t1_n10@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(distinct key) from T1 +POSTHOOK: query: select count(distinct key) from T1_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 +POSTHOOK: Input: default@t1_n10 +POSTHOOK: Input: default@t1_n10@ds=1 #### A masked pattern was here #### 6 -PREHOOK: query: EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1 +PREHOOK: query: EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1_n10 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1 +POSTHOOK: query: EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1_n10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -95,7 +95,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n10 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -133,20 +133,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(distinct key), count(1), count(key), sum(distinct key) from T1 +PREHOOK: query: select count(distinct key), count(1), count(key), sum(distinct key) from T1_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 +PREHOOK: Input: default@t1_n10 +PREHOOK: Input: default@t1_n10@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(distinct key), count(1), count(key), sum(distinct key) from T1 +POSTHOOK: query: select count(distinct key), count(1), count(key), sum(distinct key) from T1_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 +POSTHOOK: Input: default@t1_n10 +POSTHOOK: Input: default@t1_n10@ds=1 #### A masked pattern was here #### 6 10 10 28.0 -PREHOOK: query: EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key +PREHOOK: query: EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1_n10 group by key PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key +POSTHOOK: query: EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1_n10 group by key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -157,7 +157,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n10 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -201,15 +201,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key +PREHOOK: query: select count(distinct key), count(1), count(key), sum(distinct key) from T1_n10 group by key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 +PREHOOK: Input: default@t1_n10 +PREHOOK: Input: default@t1_n10@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key +POSTHOOK: query: select count(distinct key), count(1), count(key), sum(distinct key) from T1_n10 group by key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 +POSTHOOK: Input: default@t1_n10 +POSTHOOK: Input: default@t1_n10@ds=1 #### A masked pattern was here #### 1 1 1 2.0 1 1 1 4.0 @@ -217,9 +217,9 @@ POSTHOOK: Input: default@t1@ds=1 1 1 1 9.0 1 3 3 0.0 1 3 3 5.0 -PREHOOK: query: EXPLAIN select key, count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key +PREHOOK: query: EXPLAIN select key, count(distinct key), count(1), count(key), sum(distinct key) from T1_n10 group by key PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select key, count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key +POSTHOOK: query: EXPLAIN select key, count(distinct key), count(1), count(key), sum(distinct key) from T1_n10 group by key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -230,7 +230,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n10 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -270,15 +270,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select key, count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key +PREHOOK: query: select key, count(distinct key), count(1), count(key), sum(distinct key) from T1_n10 group by key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 +PREHOOK: Input: default@t1_n10 +PREHOOK: Input: default@t1_n10@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select key, count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key +POSTHOOK: query: select key, count(distinct key), count(1), count(key), sum(distinct key) from T1_n10 group by key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 +POSTHOOK: Input: default@t1_n10 +POSTHOOK: Input: default@t1_n10@ds=1 #### A masked pattern was here #### 0 1 3 3 0.0 2 1 1 1 2.0 @@ -286,9 +286,9 @@ POSTHOOK: Input: default@t1@ds=1 5 1 3 3 5.0 8 1 1 1 8.0 9 1 1 1 9.0 -PREHOOK: query: EXPLAIN select count(distinct key+key) from T1 +PREHOOK: query: EXPLAIN select count(distinct key+key) from T1_n10 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(distinct key+key) from T1 +POSTHOOK: query: EXPLAIN select count(distinct key+key) from T1_n10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -300,7 +300,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n10 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (UDFToDouble(key) + UDFToDouble(key)) (type: double) @@ -362,20 +362,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(distinct key+key) from T1 +PREHOOK: query: select count(distinct key+key) from T1_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 +PREHOOK: Input: default@t1_n10 +PREHOOK: Input: default@t1_n10@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(distinct key+key) from T1 +POSTHOOK: query: select count(distinct key+key) from T1_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 +POSTHOOK: Input: default@t1_n10 +POSTHOOK: Input: default@t1_n10@ds=1 #### A masked pattern was here #### 6 -PREHOOK: query: EXPLAIN select count(distinct 1) from T1 +PREHOOK: query: EXPLAIN select count(distinct 1) from T1_n10 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(distinct 1) from T1 +POSTHOOK: query: EXPLAIN select count(distinct 1) from T1_n10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -386,7 +386,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n10 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE @@ -420,20 +420,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(distinct 1) from T1 +PREHOOK: query: select count(distinct 1) from T1_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 +PREHOOK: Input: default@t1_n10 +PREHOOK: Input: default@t1_n10@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(distinct 1) from T1 +POSTHOOK: query: select count(distinct 1) from T1_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 +POSTHOOK: Input: default@t1_n10 +POSTHOOK: Input: default@t1_n10@ds=1 #### A masked pattern was here #### 1 -PREHOOK: query: EXPLAIN select count(distinct key) from T1 +PREHOOK: query: EXPLAIN select count(distinct key) from T1_n10 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(distinct key) from T1 +POSTHOOK: query: EXPLAIN select count(distinct key) from T1_n10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -444,7 +444,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n10 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -474,14 +474,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(distinct key) from T1 +PREHOOK: query: select count(distinct key) from T1_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 +PREHOOK: Input: default@t1_n10 +PREHOOK: Input: default@t1_n10@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(distinct key) from T1 +POSTHOOK: query: select count(distinct key) from T1_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 +POSTHOOK: Input: default@t1_n10 +POSTHOOK: Input: default@t1_n10@ds=1 #### A masked pattern was here #### 6 diff --git a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out b/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out index c82aad621b..2accfe52d8 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out @@ -1,46 +1,46 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n48(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n48 +POSTHOOK: query: CREATE TABLE T1_n48(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +POSTHOOK: Output: default@T1_n48 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n48 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +PREHOOK: Output: default@t1_n48 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n48 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: Output: default@t1_n48 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n48 select key, val from T1_n48 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: Input: default@t1_n48 +PREHOOK: Output: default@t1_n48 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n48 select key, val from T1_n48 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) +POSTHOOK: Input: default@t1_n48 +POSTHOOK: Output: default@t1_n48 +POSTHOOK: Lineage: t1_n48.key SIMPLE [(t1_n48)t1_n48.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n48.val SIMPLE [(t1_n48)t1_n48.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE outputTbl1_n18(key int, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) +PREHOOK: Output: default@outputTbl1_n18 +POSTHOOK: query: CREATE TABLE outputTbl1_n18(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 +POSTHOOK: Output: default@outputTbl1_n18 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM T1_n48 GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM T1_n48 GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -57,7 +57,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -93,17 +93,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -128,7 +128,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n48 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -142,11 +142,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -166,20 +166,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n48 + name: default.t1_n48 Truncated Path -> Alias: - /t1 [t1] + /t1_n48 [t1_n48] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -236,17 +236,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 Stage: Stage-2 Stats Work @@ -255,7 +255,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n18 Is Table Level Stats: true Stage: Stage-3 @@ -280,17 +280,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -311,11 +311,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -333,18 +333,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n18 + name: default.outputtbl1_n18 Truncated Path -> Alias: #### A masked pattern was here #### @@ -370,17 +370,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -401,11 +401,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -423,18 +423,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n18 + name: default.outputtbl1_n18 Truncated Path -> Alias: #### A masked pattern was here #### @@ -444,46 +444,46 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM T1_n48 GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key +PREHOOK: Input: default@t1_n48 +PREHOOK: Output: default@outputtbl1_n18 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM T1_n48 GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n48 +POSTHOOK: Output: default@outputtbl1_n18 +POSTHOOK: Lineage: outputtbl1_n18.cnt EXPRESSION [(t1_n48)t1_n48.null, ] +POSTHOOK: Lineage: outputtbl1_n18.key EXPRESSION [(t1_n48)t1_n48.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### 1 1 2 1 3 1 7 1 8 2 -PREHOOK: query: CREATE TABLE outputTbl2(key1 int, key2 string, cnt int) +PREHOOK: query: CREATE TABLE outputTbl2_n5(key1 int, key2 string, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: CREATE TABLE outputTbl2(key1 int, key2 string, cnt int) +PREHOOK: Output: default@outputTbl2_n5 +POSTHOOK: query: CREATE TABLE outputTbl2_n5(key1 int, key2 string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl2 +POSTHOOK: Output: default@outputTbl2_n5 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +INSERT OVERWRITE TABLE outputTbl2_n5 +SELECT key, val, count(1) FROM T1_n48 GROUP BY key, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +INSERT OVERWRITE TABLE outputTbl2_n5 +SELECT key, val, count(1) FROM T1_n48 GROUP BY key, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -496,7 +496,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -524,7 +524,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n48 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -538,11 +538,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -562,20 +562,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n48 + name: default.t1_n48 Truncated Path -> Alias: - /t1 [t1] + /t1_n48 [t1_n48] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -607,17 +607,17 @@ STAGE PLANS: columns.comments columns.types int:string:int #### A masked pattern was here #### - name default.outputtbl2 + name default.outputtbl2_n5 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl2 { i32 key1, string key2, i32 cnt} + serialization.ddl struct outputtbl2_n5 { i32 key1, string key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n5 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -666,17 +666,17 @@ STAGE PLANS: columns.comments columns.types int:string:int #### A masked pattern was here #### - name default.outputtbl2 + name default.outputtbl2_n5 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl2 { i32 key1, string key2, i32 cnt} + serialization.ddl struct outputtbl2_n5 { i32 key1, string key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n5 Stage: Stage-2 Stats Work @@ -685,7 +685,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, cnt Column Types: int, string, int - Table: default.outputtbl2 + Table: default.outputtbl2_n5 Is Table Level Stats: true Stage: Stage-3 @@ -758,26 +758,26 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2_n5 +SELECT key, val, count(1) FROM T1_n48 GROUP BY key, val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl2 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +PREHOOK: Input: default@t1_n48 +PREHOOK: Output: default@outputtbl2_n5 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl2_n5 +SELECT key, val, count(1) FROM T1_n48 GROUP BY key, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl2 -POSTHOOK: Lineage: outputtbl2.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl2.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl2.key2 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl2 +POSTHOOK: Input: default@t1_n48 +POSTHOOK: Output: default@outputtbl2_n5 +POSTHOOK: Lineage: outputtbl2_n5.cnt EXPRESSION [(t1_n48)t1_n48.null, ] +POSTHOOK: Lineage: outputtbl2_n5.key1 EXPRESSION [(t1_n48)t1_n48.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl2_n5.key2 SIMPLE [(t1_n48)t1_n48.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl2 +PREHOOK: Input: default@outputtbl2_n5 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl2 +POSTHOOK: query: SELECT * FROM outputTbl2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl2 +POSTHOOK: Input: default@outputtbl2_n5 #### A masked pattern was here #### 1 11 1 2 12 1 @@ -786,12 +786,12 @@ POSTHOOK: Input: default@outputtbl2 8 18 1 8 28 1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n48) subq1 GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n48) subq1 GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -808,7 +808,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -844,17 +844,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -879,7 +879,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n48 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -893,11 +893,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -917,20 +917,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n48 + name: default.t1_n48 Truncated Path -> Alias: - /t1 [t1] + /t1_n48 [t1_n48] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -987,17 +987,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 Stage: Stage-2 Stats Work @@ -1006,7 +1006,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n18 Is Table Level Stats: true Stage: Stage-3 @@ -1031,17 +1031,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -1062,11 +1062,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 @@ -1084,18 +1084,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n18 + name: default.outputtbl1_n18 Truncated Path -> Alias: #### A masked pattern was here #### @@ -1121,17 +1121,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -1152,11 +1152,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 @@ -1174,18 +1174,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n18 + name: default.outputtbl1_n18 Truncated Path -> Alias: #### A masked pattern was here #### @@ -1195,25 +1195,25 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n48) subq1 GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key +PREHOOK: Input: default@t1_n48 +PREHOOK: Output: default@outputtbl1_n18 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n48) subq1 GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n48 +POSTHOOK: Output: default@outputtbl1_n18 +POSTHOOK: Lineage: outputtbl1_n18.cnt EXPRESSION [(t1_n48)t1_n48.null, ] +POSTHOOK: Lineage: outputtbl1_n18.key EXPRESSION [(t1_n48)t1_n48.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### 1 1 2 1 @@ -1221,12 +1221,12 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n48) subq1 GROUP BY k PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n48) subq1 GROUP BY k POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1243,7 +1243,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -1279,17 +1279,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -1314,7 +1314,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n48 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -1328,11 +1328,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -1352,20 +1352,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n48 + name: default.t1_n48 Truncated Path -> Alias: - /t1 [t1] + /t1_n48 [t1_n48] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -1422,17 +1422,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 Stage: Stage-2 Stats Work @@ -1441,7 +1441,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n18 Is Table Level Stats: true Stage: Stage-3 @@ -1466,17 +1466,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -1497,11 +1497,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 @@ -1519,18 +1519,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n18 + name: default.outputtbl1_n18 Truncated Path -> Alias: #### A masked pattern was here #### @@ -1556,17 +1556,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -1587,11 +1587,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 @@ -1609,18 +1609,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n18 + name: default.outputtbl1_n18 Truncated Path -> Alias: #### A masked pattern was here #### @@ -1630,46 +1630,46 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n48) subq1 GROUP BY k PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k +PREHOOK: Input: default@t1_n48 +PREHOOK: Output: default@outputtbl1_n18 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n48) subq1 GROUP BY k POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n48 +POSTHOOK: Output: default@outputtbl1_n18 +POSTHOOK: Lineage: outputtbl1_n18.cnt EXPRESSION [(t1_n48)t1_n48.null, ] +POSTHOOK: Lineage: outputtbl1_n18.key EXPRESSION [(t1_n48)t1_n48.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### 1 1 2 1 3 1 7 1 8 2 -PREHOOK: query: CREATE TABLE outputTbl3(key1 int, key2 int, cnt int) +PREHOOK: query: CREATE TABLE outputTbl3_n2(key1 int, key2 int, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl3 -POSTHOOK: query: CREATE TABLE outputTbl3(key1 int, key2 int, cnt int) +PREHOOK: Output: default@outputTbl3_n2 +POSTHOOK: query: CREATE TABLE outputTbl3_n2(key1 int, key2 int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl3 +POSTHOOK: Output: default@outputTbl3_n2 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key +INSERT OVERWRITE TABLE outputTbl3_n2 +SELECT 1, key, count(1) FROM T1_n48 GROUP BY 1, key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key +INSERT OVERWRITE TABLE outputTbl3_n2 +SELECT 1, key, count(1) FROM T1_n48 GROUP BY 1, key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1686,7 +1686,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -1722,17 +1722,17 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n2 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n2 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -1757,7 +1757,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n48 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -1771,11 +1771,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -1795,20 +1795,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n48 + name: default.t1_n48 Truncated Path -> Alias: - /t1 [t1] + /t1_n48 [t1_n48] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -1865,17 +1865,17 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n2 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n2 Stage: Stage-2 Stats Work @@ -1884,7 +1884,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, cnt Column Types: int, int, int - Table: default.outputtbl3 + Table: default.outputtbl3_n2 Is Table Level Stats: true Stage: Stage-3 @@ -1909,17 +1909,17 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n2 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n2 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -1940,11 +1940,11 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n2 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -1962,18 +1962,18 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n2 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - name: default.outputtbl3 + name: default.outputtbl3_n2 + name: default.outputtbl3_n2 Truncated Path -> Alias: #### A masked pattern was here #### @@ -1999,17 +1999,17 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n2 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n2 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -2030,11 +2030,11 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n2 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -2052,18 +2052,18 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n2 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - name: default.outputtbl3 + name: default.outputtbl3_n2 + name: default.outputtbl3_n2 Truncated Path -> Alias: #### A masked pattern was here #### @@ -2073,47 +2073,47 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3_n2 +SELECT 1, key, count(1) FROM T1_n48 GROUP BY 1, key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl3 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key +PREHOOK: Input: default@t1_n48 +PREHOOK: Output: default@outputtbl3_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl3_n2 +SELECT 1, key, count(1) FROM T1_n48 GROUP BY 1, key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl3 -POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl3.key1 SIMPLE [] -POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl3 +POSTHOOK: Input: default@t1_n48 +POSTHOOK: Output: default@outputtbl3_n2 +POSTHOOK: Lineage: outputtbl3_n2.cnt EXPRESSION [(t1_n48)t1_n48.null, ] +POSTHOOK: Lineage: outputtbl3_n2.key1 SIMPLE [] +POSTHOOK: Lineage: outputtbl3_n2.key2 EXPRESSION [(t1_n48)t1_n48.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl3_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl3 +PREHOOK: Input: default@outputtbl3_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl3 +POSTHOOK: query: SELECT * FROM outputTbl3_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl3 +POSTHOOK: Input: default@outputtbl3_n2 #### A masked pattern was here #### 1 1 1 1 2 1 1 3 1 1 7 1 1 8 2 -PREHOOK: query: CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int) +PREHOOK: query: CREATE TABLE outputTbl4_n2(key1 int, key2 int, key3 string, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl4 -POSTHOOK: query: CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int) +PREHOOK: Output: default@outputTbl4_n2 +POSTHOOK: query: CREATE TABLE outputTbl4_n2(key1 int, key2 int, key3 string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl4 +POSTHOOK: Output: default@outputTbl4_n2 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val +INSERT OVERWRITE TABLE outputTbl4_n2 +SELECT key, 1, val, count(1) FROM T1_n48 GROUP BY key, 1, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val +INSERT OVERWRITE TABLE outputTbl4_n2 +SELECT key, 1, val, count(1) FROM T1_n48 GROUP BY key, 1, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2126,7 +2126,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -2154,7 +2154,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n48 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -2168,11 +2168,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -2192,20 +2192,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n48 + name: default.t1_n48 Truncated Path -> Alias: - /t1 [t1] + /t1_n48 [t1_n48] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -2237,17 +2237,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n2 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -2296,17 +2296,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n2 Stage: Stage-2 Stats Work @@ -2315,7 +2315,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, key3, cnt Column Types: int, int, string, int - Table: default.outputtbl4 + Table: default.outputtbl4_n2 Is Table Level Stats: true Stage: Stage-3 @@ -2388,27 +2388,27 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n2 +SELECT key, 1, val, count(1) FROM T1_n48 GROUP BY key, 1, val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val +PREHOOK: Input: default@t1_n48 +PREHOOK: Output: default@outputtbl4_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n2 +SELECT key, 1, val, count(1) FROM T1_n48 GROUP BY key, 1, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: Input: default@t1_n48 +POSTHOOK: Output: default@outputtbl4_n2 +POSTHOOK: Lineage: outputtbl4_n2.cnt EXPRESSION [(t1_n48)t1_n48.null, ] +POSTHOOK: Lineage: outputtbl4_n2.key1 EXPRESSION [(t1_n48)t1_n48.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl4_n2.key2 SIMPLE [] +POSTHOOK: Lineage: outputtbl4_n2.key3 SIMPLE [(t1_n48)t1_n48.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl4_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 +PREHOOK: Input: default@outputtbl4_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: query: SELECT * FROM outputTbl4_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 +POSTHOOK: Input: default@outputtbl4_n2 #### A masked pattern was here #### 1 1 11 1 2 1 12 1 @@ -2417,12 +2417,12 @@ POSTHOOK: Input: default@outputtbl4 8 1 18 1 8 1 28 1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 +INSERT OVERWRITE TABLE outputTbl3_n2 +SELECT key, key + 1, count(1) FROM T1_n48 GROUP BY key, key + 1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 +INSERT OVERWRITE TABLE outputTbl3_n2 +SELECT key, key + 1, count(1) FROM T1_n48 GROUP BY key, key + 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2435,7 +2435,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -2463,7 +2463,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n48 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -2477,11 +2477,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -2501,20 +2501,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n48 + name: default.t1_n48 Truncated Path -> Alias: - /t1 [$hdt$_0:t1] + /t1_n48 [$hdt$_0:t1_n48] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -2546,17 +2546,17 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n2 numFiles 1 numRows 5 rawDataSize 25 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n2 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n2 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -2605,17 +2605,17 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n2 numFiles 1 numRows 5 rawDataSize 25 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n2 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n2 Stage: Stage-2 Stats Work @@ -2624,7 +2624,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, cnt Column Types: int, int, int - Table: default.outputtbl3 + Table: default.outputtbl3_n2 Is Table Level Stats: true Stage: Stage-3 @@ -2697,26 +2697,26 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3_n2 +SELECT key, key + 1, count(1) FROM T1_n48 GROUP BY key, key + 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl3 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 +PREHOOK: Input: default@t1_n48 +PREHOOK: Output: default@outputtbl3_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl3_n2 +SELECT key, key + 1, count(1) FROM T1_n48 GROUP BY key, key + 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl3 -POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl3.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl3 +POSTHOOK: Input: default@t1_n48 +POSTHOOK: Output: default@outputtbl3_n2 +POSTHOOK: Lineage: outputtbl3_n2.cnt EXPRESSION [(t1_n48)t1_n48.null, ] +POSTHOOK: Lineage: outputtbl3_n2.key1 EXPRESSION [(t1_n48)t1_n48.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl3_n2.key2 EXPRESSION [(t1_n48)t1_n48.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl3_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl3 +PREHOOK: Input: default@outputtbl3_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl3 +POSTHOOK: query: SELECT * FROM outputTbl3_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl3 +POSTHOOK: Input: default@outputtbl3_n2 #### A masked pattern was here #### 1 2 1 2 3 1 @@ -2724,15 +2724,15 @@ POSTHOOK: Input: default@outputtbl3 7 8 1 8 9 2 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n48 GROUP BY key) subq1 group by key + key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n48 GROUP BY key) subq1 group by key + key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -2746,7 +2746,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -2784,7 +2784,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n48 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -2798,11 +2798,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -2822,20 +2822,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n48 + name: default.t1_n48 Truncated Path -> Alias: - /t1 [$hdt$_0:t1] + /t1_n48 [$hdt$_0:t1_n48] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -2867,17 +2867,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -2926,17 +2926,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 Stage: Stage-2 Stats Work @@ -2945,7 +2945,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n18 Is Table Level Stats: true Stage: Stage-3 @@ -3018,29 +3018,29 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n48 GROUP BY key) subq1 group by key + key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: Input: default@t1_n48 +PREHOOK: Output: default@outputtbl1_n18 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 SELECT key + key, sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n48 GROUP BY key) subq1 group by key + key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n48 +POSTHOOK: Output: default@outputtbl1_n18 +POSTHOOK: Lineage: outputtbl1_n18.cnt EXPRESSION [(t1_n48)t1_n48.null, ] +POSTHOOK: Lineage: outputtbl1_n18.key EXPRESSION [(t1_n48)t1_n48.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### 14 1 16 2 @@ -3048,19 +3048,19 @@ POSTHOOK: Input: default@outputtbl1 4 1 6 1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n48 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n48 GROUP BY key ) subq1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n48 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n48 GROUP BY key ) subq1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -3078,7 +3078,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -3116,17 +3116,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -3147,7 +3147,7 @@ STAGE PLANS: value expressions: _col0 (type: struct), _col1 (type: struct) auto parallelism: false TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -3185,17 +3185,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -3220,7 +3220,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n48 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -3234,11 +3234,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -3258,20 +3258,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n48 + name: default.t1_n48 Truncated Path -> Alias: - /t1 [null-subquery1:$hdt$_0-subquery1:t1, null-subquery2:$hdt$_0-subquery2:t1] + /t1_n48 [null-subquery1:$hdt$_0-subquery1:t1_n48, null-subquery2:$hdt$_0-subquery2:t1_n48] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -3328,17 +3328,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 Stage: Stage-2 Stats Work @@ -3347,7 +3347,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n18 Is Table Level Stats: true Stage: Stage-3 @@ -3372,17 +3372,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -3403,11 +3403,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 @@ -3425,18 +3425,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n18 + name: default.outputtbl1_n18 Truncated Path -> Alias: #### A masked pattern was here #### @@ -3462,17 +3462,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -3493,11 +3493,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 @@ -3515,18 +3515,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n18 + name: default.outputtbl1_n18 Truncated Path -> Alias: #### A masked pattern was here #### @@ -3536,33 +3536,33 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n48 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n48 GROUP BY key ) subq1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: Input: default@t1_n48 +PREHOOK: Output: default@outputtbl1_n18 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n48 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n48 GROUP BY key ) subq1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n48 +POSTHOOK: Output: default@outputtbl1_n18 +POSTHOOK: Lineage: outputtbl1_n18.cnt EXPRESSION [(t1_n48)t1_n48.null, ] +POSTHOOK: Lineage: outputtbl1_n18.key EXPRESSION [(t1_n48)t1_n48.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### 1 1 1 1 @@ -3575,19 +3575,19 @@ POSTHOOK: Input: default@outputtbl1 8 2 8 2 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n48 GROUP BY key UNION ALL -SELECT cast(key + key as string) as key, count(1) FROM T1 GROUP BY key + key +SELECT cast(key + key as string) as key, count(1) FROM T1_n48 GROUP BY key + key ) subq1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n48 GROUP BY key UNION ALL -SELECT cast(key + key as string) as key, count(1) FROM T1 GROUP BY key + key +SELECT cast(key + key as string) as key, count(1) FROM T1_n48 GROUP BY key + key ) subq1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -3606,7 +3606,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -3634,7 +3634,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n48 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -3648,11 +3648,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -3672,20 +3672,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n48 + name: default.t1_n48 Truncated Path -> Alias: - /t1 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:t1] + /t1_n48 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:t1_n48] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -3721,7 +3721,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -3759,17 +3759,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -3816,17 +3816,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -3873,7 +3873,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n48 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -3887,11 +3887,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -3911,20 +3911,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n48 + name: default.t1_n48 Truncated Path -> Alias: - /t1 [null-subquery1:$hdt$_0-subquery1:t1] + /t1_n48 [null-subquery1:$hdt$_0-subquery1:t1_n48] #### A masked pattern was here #### Needs Tagging: false Reduce Operator Tree: @@ -3982,17 +3982,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 Stage: Stage-3 Stats Work @@ -4001,7 +4001,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n18 Is Table Level Stats: true Stage: Stage-4 @@ -4026,17 +4026,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -4057,11 +4057,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 @@ -4079,18 +4079,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n18 + name: default.outputtbl1_n18 Truncated Path -> Alias: #### A masked pattern was here #### @@ -4116,17 +4116,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -4147,11 +4147,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 @@ -4169,18 +4169,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n18 + name: default.outputtbl1_n18 Truncated Path -> Alias: #### A masked pattern was here #### @@ -4190,33 +4190,33 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 SELECT * FROM ( -SELECT key, count(1) as cnt FROM T1 GROUP BY key +SELECT key, count(1) as cnt FROM T1_n48 GROUP BY key UNION ALL -SELECT cast(key + key as string) as key, count(1) as cnt FROM T1 GROUP BY key + key +SELECT cast(key + key as string) as key, count(1) as cnt FROM T1_n48 GROUP BY key + key ) subq1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: Input: default@t1_n48 +PREHOOK: Output: default@outputtbl1_n18 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 SELECT * FROM ( -SELECT key, count(1) as cnt FROM T1 GROUP BY key +SELECT key, count(1) as cnt FROM T1_n48 GROUP BY key UNION ALL -SELECT cast(key + key as string) as key, count(1) as cnt FROM T1 GROUP BY key + key +SELECT cast(key + key as string) as key, count(1) as cnt FROM T1_n48 GROUP BY key + key ) subq1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n48 +POSTHOOK: Output: default@outputtbl1_n18 +POSTHOOK: Lineage: outputtbl1_n18.cnt EXPRESSION [(t1_n48)t1_n48.null, ] +POSTHOOK: Lineage: outputtbl1_n18.key EXPRESSION [(t1_n48)t1_n48.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### 1 1 14 1 @@ -4229,19 +4229,19 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n48 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n48 GROUP BY key) subq2 ON subq1.key = subq2.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n18 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n48 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n48 GROUP BY key) subq2 ON subq1.key = subq2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -4255,7 +4255,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -4278,7 +4278,7 @@ STAGE PLANS: value expressions: _col1 (type: bigint) auto parallelism: false TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -4305,7 +4305,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n48 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -4319,11 +4319,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -4343,20 +4343,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n48 + name: default.t1_n48 Truncated Path -> Alias: - /t1 [$hdt$_0:t1, $hdt$_1:t1] + /t1_n48 [$hdt$_0:t1_n48, $hdt$_1:t1_n48] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -4390,17 +4390,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 10 rawDataSize 32 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 42 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -4449,17 +4449,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 10 rawDataSize 32 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 42 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 Stage: Stage-2 Stats Work @@ -4468,7 +4468,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n18 Is Table Level Stats: true Stage: Stage-3 @@ -4541,33 +4541,33 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n48 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n48 GROUP BY key) subq2 ON subq1.key = subq2.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: Input: default@t1_n48 +PREHOOK: Output: default@outputtbl1_n18 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n48 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n48 GROUP BY key) subq2 ON subq1.key = subq2.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n48 +POSTHOOK: Output: default@outputtbl1_n18 +POSTHOOK: Lineage: outputtbl1_n18.cnt EXPRESSION [(t1_n48)t1_n48.null, ] +POSTHOOK: Lineage: outputtbl1_n18.key EXPRESSION [(t1_n48)t1_n48.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### 1 2 2 2 @@ -4576,16 +4576,16 @@ POSTHOOK: Input: default@outputtbl1 8 4 PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM -(SELECT key, count(1) FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) FROM T1_n48 GROUP BY key) subq1 JOIN -(SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 +(SELECT key, val, count(1) FROM T1_n48 GROUP BY key, val) subq2 ON subq1.key = subq2.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM -(SELECT key, count(1) FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) FROM T1_n48 GROUP BY key) subq1 JOIN -(SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 +(SELECT key, val, count(1) FROM T1_n48 GROUP BY key, val) subq2 ON subq1.key = subq2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -4598,7 +4598,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -4626,7 +4626,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n48 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -4640,11 +4640,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -4664,20 +4664,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n48 + name: default.t1_n48 Truncated Path -> Alias: - /t1 [$hdt$_1:t1] + /t1_n48 [$hdt$_1:t1_n48] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -4709,7 +4709,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n48 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -4769,7 +4769,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n48 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -4783,11 +4783,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -4807,20 +4807,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n48 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n48 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n48 + name: default.t1_n48 Truncated Path -> Alias: - /t1 [$hdt$_0:t1] + /t1_n48 [$hdt$_0:t1_n48] #### A masked pattern was here #### Needs Tagging: true Reduce Operator Tree: @@ -4861,33 +4861,33 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T2_n30(key STRING, val STRING) CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n30 +POSTHOOK: query: CREATE TABLE T2_n30(key STRING, val STRING) CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: INSERT OVERWRITE TABLE T2 select key, val from T1 +POSTHOOK: Output: default@T2_n30 +PREHOOK: query: INSERT OVERWRITE TABLE T2_n30 select key, val from T1_n48 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: INSERT OVERWRITE TABLE T2 select key, val from T1 +PREHOOK: Input: default@t1_n48 +PREHOOK: Output: default@t2_n30 +POSTHOOK: query: INSERT OVERWRITE TABLE T2_n30 select key, val from T1_n48 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Input: default@t1_n48 +POSTHOOK: Output: default@t2_n30 +POSTHOOK: Lineage: t2_n30.key SIMPLE [(t1_n48)t1_n48.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n30.val SIMPLE [(t1_n48)t1_n48.FieldSchema(name:val, type:string, comment:null), ] PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM T2_n30 GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM T2_n30 GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4900,7 +4900,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n30 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -4929,7 +4929,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t2 + base file name: t2_n30 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -4943,11 +4943,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n30 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n30 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -4967,20 +4967,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n30 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n30 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 + name: default.t2_n30 + name: default.t2_n30 Truncated Path -> Alias: - /t2 [t2] + /t2_n30 [t2_n30] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -5012,17 +5012,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -5071,17 +5071,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n18 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n18 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n18 Stage: Stage-2 Stats Work @@ -5090,7 +5090,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n18 Is Table Level Stats: true Stage: Stage-3 @@ -5163,25 +5163,25 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM T2_n30 GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key +PREHOOK: Input: default@t2_n30 +PREHOOK: Output: default@outputtbl1_n18 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n18 +SELECT key, count(1) FROM T2_n30 GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t2_n30 +POSTHOOK: Output: default@outputtbl1_n18 +POSTHOOK: Lineage: outputtbl1_n18.cnt EXPRESSION [(t2_n30)t2_n30.null, ] +POSTHOOK: Lineage: outputtbl1_n18.key EXPRESSION [(t2_n30)t2_n30.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n18 #### A masked pattern was here #### 1 1 2 1 @@ -5189,12 +5189,12 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val +INSERT OVERWRITE TABLE outputTbl4_n2 +SELECT key, 1, val, count(1) FROM T2_n30 GROUP BY key, 1, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val +INSERT OVERWRITE TABLE outputTbl4_n2 +SELECT key, 1, val, count(1) FROM T2_n30 GROUP BY key, 1, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -5211,7 +5211,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n30 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -5247,17 +5247,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n2 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -5282,7 +5282,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t2 + base file name: t2_n30 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -5296,11 +5296,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n30 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n30 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -5320,20 +5320,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n30 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n30 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 + name: default.t2_n30 + name: default.t2_n30 Truncated Path -> Alias: - /t2 [t2] + /t2_n30 [t2_n30] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -5390,17 +5390,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n2 Stage: Stage-2 Stats Work @@ -5409,7 +5409,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, key3, cnt Column Types: int, int, string, int - Table: default.outputtbl4 + Table: default.outputtbl4_n2 Is Table Level Stats: true Stage: Stage-3 @@ -5434,17 +5434,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n2 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -5465,11 +5465,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 @@ -5487,18 +5487,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 + name: default.outputtbl4_n2 + name: default.outputtbl4_n2 Truncated Path -> Alias: #### A masked pattern was here #### @@ -5524,17 +5524,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n2 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -5555,11 +5555,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 @@ -5577,18 +5577,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 + name: default.outputtbl4_n2 + name: default.outputtbl4_n2 Truncated Path -> Alias: #### A masked pattern was here #### @@ -5598,27 +5598,27 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n2 +SELECT key, 1, val, count(1) FROM T2_n30 GROUP BY key, 1, val PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val +PREHOOK: Input: default@t2_n30 +PREHOOK: Output: default@outputtbl4_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n2 +SELECT key, 1, val, count(1) FROM T2_n30 GROUP BY key, 1, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: Input: default@t2_n30 +POSTHOOK: Output: default@outputtbl4_n2 +POSTHOOK: Lineage: outputtbl4_n2.cnt EXPRESSION [(t2_n30)t2_n30.null, ] +POSTHOOK: Lineage: outputtbl4_n2.key1 EXPRESSION [(t2_n30)t2_n30.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl4_n2.key2 SIMPLE [] +POSTHOOK: Lineage: outputtbl4_n2.key3 SIMPLE [(t2_n30)t2_n30.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl4_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 +PREHOOK: Input: default@outputtbl4_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: query: SELECT * FROM outputTbl4_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 +POSTHOOK: Input: default@outputtbl4_n2 #### A masked pattern was here #### 1 1 11 1 2 1 12 1 @@ -5626,21 +5626,21 @@ POSTHOOK: Input: default@outputtbl4 7 1 17 1 8 1 18 1 8 1 28 1 -PREHOOK: query: CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int) +PREHOOK: query: CREATE TABLE outputTbl5_n2(key1 int, key2 int, key3 string, key4 int, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl5 -POSTHOOK: query: CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int) +PREHOOK: Output: default@outputTbl5_n2 +POSTHOOK: query: CREATE TABLE outputTbl5_n2(key1 int, key2 int, key3 string, key4 int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl5 +POSTHOOK: Output: default@outputTbl5_n2 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 +INSERT OVERWRITE TABLE outputTbl5_n2 +SELECT key, 1, val, 2, count(1) FROM T2_n30 GROUP BY key, 1, val, 2 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 +INSERT OVERWRITE TABLE outputTbl5_n2 +SELECT key, 1, val, 2, count(1) FROM T2_n30 GROUP BY key, 1, val, 2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -5657,7 +5657,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n30 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -5693,17 +5693,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n2 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 + name: default.outputtbl5_n2 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -5728,7 +5728,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t2 + base file name: t2_n30 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -5742,11 +5742,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n30 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n30 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -5766,20 +5766,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n30 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n30 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 + name: default.t2_n30 + name: default.t2_n30 Truncated Path -> Alias: - /t2 [t2] + /t2_n30 [t2_n30] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -5836,17 +5836,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n2 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 + name: default.outputtbl5_n2 Stage: Stage-2 Stats Work @@ -5855,7 +5855,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, key3, key4, cnt Column Types: int, int, string, int, int - Table: default.outputtbl5 + Table: default.outputtbl5_n2 Is Table Level Stats: true Stage: Stage-3 @@ -5880,17 +5880,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n2 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 + name: default.outputtbl5_n2 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -5911,11 +5911,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n2 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -5933,18 +5933,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n2 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - name: default.outputtbl5 + name: default.outputtbl5_n2 + name: default.outputtbl5_n2 Truncated Path -> Alias: #### A masked pattern was here #### @@ -5970,17 +5970,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n2 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 + name: default.outputtbl5_n2 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -6001,11 +6001,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n2 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -6023,18 +6023,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n2 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n2 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - name: default.outputtbl5 + name: default.outputtbl5_n2 + name: default.outputtbl5_n2 Truncated Path -> Alias: #### A masked pattern was here #### @@ -6044,30 +6044,30 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl5_n2 +SELECT key, 1, val, 2, count(1) FROM T2_n30 GROUP BY key, 1, val, 2 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl5 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 +PREHOOK: Input: default@t2_n30 +PREHOOK: Output: default@outputtbl5_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl5_n2 +SELECT key, 1, val, 2, count(1) FROM T2_n30 GROUP BY key, 1, val, 2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl5 -POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl5.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl5.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl5.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl5.key4 SIMPLE [] -PREHOOK: query: SELECT * FROM outputTbl5 +POSTHOOK: Input: default@t2_n30 +POSTHOOK: Output: default@outputtbl5_n2 +POSTHOOK: Lineage: outputtbl5_n2.cnt EXPRESSION [(t2_n30)t2_n30.null, ] +POSTHOOK: Lineage: outputtbl5_n2.key1 EXPRESSION [(t2_n30)t2_n30.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl5_n2.key2 SIMPLE [] +POSTHOOK: Lineage: outputtbl5_n2.key3 SIMPLE [(t2_n30)t2_n30.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl5_n2.key4 SIMPLE [] +PREHOOK: query: SELECT * FROM outputTbl5_n2 ORDER BY key1, key2, key3, key4 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl5 +PREHOOK: Input: default@outputtbl5_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl5 +POSTHOOK: query: SELECT * FROM outputTbl5_n2 ORDER BY key1, key2, key3, key4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl5 +POSTHOOK: Input: default@outputtbl5_n2 #### A masked pattern was here #### 1 1 11 2 1 2 1 12 2 1 @@ -6076,15 +6076,15 @@ POSTHOOK: Input: default@outputtbl5 8 1 18 2 1 8 1 28 2 1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n2 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n30)subq group by key, constant, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n2 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n30)subq group by key, constant, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -6102,7 +6102,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n30 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -6138,17 +6138,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n2 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -6173,7 +6173,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t2 + base file name: t2_n30 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -6187,11 +6187,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n30 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n30 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -6211,20 +6211,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n30 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n30 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 + name: default.t2_n30 + name: default.t2_n30 Truncated Path -> Alias: - /t2 [t2] + /t2_n30 [t2_n30] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -6281,17 +6281,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n2 Stage: Stage-2 Stats Work @@ -6300,7 +6300,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, key3, cnt Column Types: int, int, string, int - Table: default.outputtbl4 + Table: default.outputtbl4_n2 Is Table Level Stats: true Stage: Stage-3 @@ -6325,17 +6325,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n2 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -6356,11 +6356,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 @@ -6378,18 +6378,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 + name: default.outputtbl4_n2 + name: default.outputtbl4_n2 Truncated Path -> Alias: #### A masked pattern was here #### @@ -6415,17 +6415,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n2 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -6446,11 +6446,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 @@ -6468,18 +6468,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 + name: default.outputtbl4_n2 + name: default.outputtbl4_n2 Truncated Path -> Alias: #### A masked pattern was here #### @@ -6489,31 +6489,31 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n2 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n30)subq group by key, constant, val PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 +PREHOOK: Input: default@t2_n30 +PREHOOK: Output: default@outputtbl4_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n2 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n30)subq group by key, constant, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: Input: default@t2_n30 +POSTHOOK: Output: default@outputtbl4_n2 +POSTHOOK: Lineage: outputtbl4_n2.cnt EXPRESSION [(t2_n30)t2_n30.null, ] +POSTHOOK: Lineage: outputtbl4_n2.key1 EXPRESSION [(t2_n30)t2_n30.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl4_n2.key2 SIMPLE [] +POSTHOOK: Lineage: outputtbl4_n2.key3 SIMPLE [(t2_n30)t2_n30.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl4_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 +PREHOOK: Input: default@outputtbl4_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: query: SELECT * FROM outputTbl4_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 +POSTHOOK: Input: default@outputtbl4_n2 #### A masked pattern was here #### 1 1 11 1 2 1 12 1 @@ -6522,20 +6522,20 @@ POSTHOOK: Input: default@outputtbl4 8 1 18 1 8 1 28 1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n2 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n30)subq )subq2 group by key, constant3, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n2 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n30)subq )subq2 group by key, constant3, val POSTHOOK: type: QUERY @@ -6554,7 +6554,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n30 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -6590,17 +6590,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n2 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -6625,7 +6625,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t2 + base file name: t2_n30 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -6639,11 +6639,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n30 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n30 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -6663,20 +6663,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n30 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n30 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 + name: default.t2_n30 + name: default.t2_n30 Truncated Path -> Alias: - /t2 [t2] + /t2_n30 [t2_n30] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -6733,17 +6733,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n2 Stage: Stage-2 Stats Work @@ -6752,7 +6752,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, key3, cnt Column Types: int, int, string, int - Table: default.outputtbl4 + Table: default.outputtbl4_n2 Is Table Level Stats: true Stage: Stage-3 @@ -6777,17 +6777,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n2 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -6808,11 +6808,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 @@ -6830,18 +6830,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 + name: default.outputtbl4_n2 + name: default.outputtbl4_n2 Truncated Path -> Alias: #### A masked pattern was here #### @@ -6867,17 +6867,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n2 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -6898,11 +6898,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 @@ -6920,18 +6920,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n2 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n2 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 + name: default.outputtbl4_n2 + name: default.outputtbl4_n2 Truncated Path -> Alias: #### A masked pattern was here #### @@ -6941,37 +6941,37 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n2 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n30)subq )subq2 group by key, constant3, val PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 +PREHOOK: Input: default@t2_n30 +PREHOOK: Output: default@outputtbl4_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n2 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n30)subq )subq2 group by key, constant3, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: Input: default@t2_n30 +POSTHOOK: Output: default@outputtbl4_n2 +POSTHOOK: Lineage: outputtbl4_n2.cnt EXPRESSION [(t2_n30)t2_n30.null, ] +POSTHOOK: Lineage: outputtbl4_n2.key1 EXPRESSION [(t2_n30)t2_n30.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl4_n2.key2 SIMPLE [] +POSTHOOK: Lineage: outputtbl4_n2.key3 SIMPLE [(t2_n30)t2_n30.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl4_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 +PREHOOK: Input: default@outputtbl4_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: query: SELECT * FROM outputTbl4_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 +POSTHOOK: Input: default@outputtbl4_n2 #### A masked pattern was here #### 1 2 11 1 2 2 12 1 @@ -6979,31 +6979,31 @@ POSTHOOK: Input: default@outputtbl4 7 2 17 1 8 2 18 1 8 2 28 1 -PREHOOK: query: CREATE TABLE DEST1(key INT, cnt INT) +PREHOOK: query: CREATE TABLE DEST1_n10(key INT, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key INT, cnt INT) +PREHOOK: Output: default@DEST1_n10 +POSTHOOK: query: CREATE TABLE DEST1_n10(key INT, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, val STRING, cnt INT) +POSTHOOK: Output: default@DEST1_n10 +PREHOOK: query: CREATE TABLE DEST2_n8(key INT, val STRING, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key INT, val STRING, cnt INT) +PREHOOK: Output: default@DEST2_n8 +POSTHOOK: query: CREATE TABLE DEST2_n8(key INT, val STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n8 PREHOOK: query: EXPLAIN -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +FROM T2_n30 +INSERT OVERWRITE TABLE DEST1_n10 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n8 SELECT key, val, count(1) GROUP BY key, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +FROM T2_n30 +INSERT OVERWRITE TABLE DEST1_n10 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n8 SELECT key, val, count(1) GROUP BY key, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -7019,7 +7019,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n30 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -7059,7 +7059,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n8 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int) outputColumnNames: key, val, cnt @@ -7093,7 +7093,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n10 Select Operator expressions: _col0 (type: int), _col1 (type: int) outputColumnNames: key, cnt @@ -7118,7 +7118,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n10 Stage: Stage-3 Stats Work @@ -7126,7 +7126,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.dest1 + Table: default.dest1_n10 Stage: Stage-4 Map Reduce @@ -7157,7 +7157,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val, cnt Column Types: int, string, int - Table: default.dest2 + Table: default.dest2_n8 Stage: Stage-1 Move Operator @@ -7167,7 +7167,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n8 Stage: Stage-6 Map Reduce @@ -7192,45 +7192,45 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +PREHOOK: query: FROM T2_n30 +INSERT OVERWRITE TABLE DEST1_n10 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n8 SELECT key, val, count(1) GROUP BY key, val PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 -POSTHOOK: query: FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +PREHOOK: Input: default@t2_n30 +PREHOOK: Output: default@dest1_n10 +PREHOOK: Output: default@dest2_n8 +POSTHOOK: query: FROM T2_n30 +INSERT OVERWRITE TABLE DEST1_n10 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n8 SELECT key, val, count(1) GROUP BY key, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.val SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: select * from DEST1 +POSTHOOK: Input: default@t2_n30 +POSTHOOK: Output: default@dest1_n10 +POSTHOOK: Output: default@dest2_n8 +POSTHOOK: Lineage: dest1_n10.cnt EXPRESSION [(t2_n30)t2_n30.null, ] +POSTHOOK: Lineage: dest1_n10.key EXPRESSION [(t2_n30)t2_n30.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest2_n8.cnt EXPRESSION [(t2_n30)t2_n30.null, ] +POSTHOOK: Lineage: dest2_n8.key EXPRESSION [(t2_n30)t2_n30.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest2_n8.val SIMPLE [(t2_n30)t2_n30.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: select * from DEST1_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n10 #### A masked pattern was here #### -POSTHOOK: query: select * from DEST1 +POSTHOOK: query: select * from DEST1_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n10 #### A masked pattern was here #### 1 1 2 1 3 1 7 1 8 2 -PREHOOK: query: select * from DEST2 +PREHOOK: query: select * from DEST2_n8 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from DEST2 +POSTHOOK: query: select * from DEST2_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n8 #### A masked pattern was here #### 1 11 1 2 12 1 @@ -7239,14 +7239,14 @@ POSTHOOK: Input: default@dest2 8 18 1 8 28 1 PREHOOK: query: EXPLAIN -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +FROM (select key, val from T2_n30 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n10 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n8 SELECT key, val, count(1) GROUP BY key, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +FROM (select key, val from T2_n30 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n10 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n8 SELECT key, val, count(1) GROUP BY key, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -7262,7 +7262,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n30 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(key) = 8.0D) (type: boolean) @@ -7305,7 +7305,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n8 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int) outputColumnNames: key, val, cnt @@ -7339,7 +7339,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n10 Select Operator expressions: _col0 (type: int), _col1 (type: int) outputColumnNames: key, cnt @@ -7364,7 +7364,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n10 Stage: Stage-3 Stats Work @@ -7372,7 +7372,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.dest1 + Table: default.dest1_n10 Stage: Stage-4 Map Reduce @@ -7403,7 +7403,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val, cnt Column Types: int, string, int - Table: default.dest2 + Table: default.dest2_n8 Stage: Stage-1 Move Operator @@ -7413,7 +7413,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n8 Stage: Stage-6 Map Reduce @@ -7438,41 +7438,41 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +PREHOOK: query: FROM (select key, val from T2_n30 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n10 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n8 SELECT key, val, count(1) GROUP BY key, val PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 -POSTHOOK: query: FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +PREHOOK: Input: default@t2_n30 +PREHOOK: Output: default@dest1_n10 +PREHOOK: Output: default@dest2_n8 +POSTHOOK: query: FROM (select key, val from T2_n30 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n10 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n8 SELECT key, val, count(1) GROUP BY key, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.val SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: select * from DEST1 +POSTHOOK: Input: default@t2_n30 +POSTHOOK: Output: default@dest1_n10 +POSTHOOK: Output: default@dest2_n8 +POSTHOOK: Lineage: dest1_n10.cnt EXPRESSION [(t2_n30)t2_n30.null, ] +POSTHOOK: Lineage: dest1_n10.key EXPRESSION [(t2_n30)t2_n30.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest2_n8.cnt EXPRESSION [(t2_n30)t2_n30.null, ] +POSTHOOK: Lineage: dest2_n8.key EXPRESSION [(t2_n30)t2_n30.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest2_n8.val SIMPLE [(t2_n30)t2_n30.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: select * from DEST1_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n10 #### A masked pattern was here #### -POSTHOOK: query: select * from DEST1 +POSTHOOK: query: select * from DEST1_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n10 #### A masked pattern was here #### 8 2 -PREHOOK: query: select * from DEST2 +PREHOOK: query: select * from DEST2_n8 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from DEST2 +POSTHOOK: query: select * from DEST2_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n8 #### A masked pattern was here #### 8 18 1 8 28 1 diff --git a/ql/src/test/results/clientpositive/groupby_sort_2.q.out b/ql/src/test/results/clientpositive/groupby_sort_2.q.out index 437c56f85a..9d7f810520 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_2.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_2.q.out @@ -1,46 +1,46 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n33(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (val) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n33 +POSTHOOK: query: CREATE TABLE T1_n33(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +POSTHOOK: Output: default@T1_n33 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n33 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +PREHOOK: Output: default@t1_n33 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n33 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: Output: default@t1_n33 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n33 select key, val from T1_n33 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: Input: default@t1_n33 +PREHOOK: Output: default@t1_n33 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n33 select key, val from T1_n33 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE outputTbl1(val string, cnt int) +POSTHOOK: Input: default@t1_n33 +POSTHOOK: Output: default@t1_n33 +POSTHOOK: Lineage: t1_n33.key SIMPLE [(t1_n33)t1_n33.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n33.val SIMPLE [(t1_n33)t1_n33.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE outputTbl1_n10(val string, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: CREATE TABLE outputTbl1(val string, cnt int) +PREHOOK: Output: default@outputTbl1_n10 +POSTHOOK: query: CREATE TABLE outputTbl1_n10(val string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 +POSTHOOK: Output: default@outputTbl1_n10 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT val, count(1) FROM T1 GROUP BY val +INSERT OVERWRITE TABLE outputTbl1_n10 +SELECT val, count(1) FROM T1_n33 GROUP BY val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT val, count(1) FROM T1 GROUP BY val +INSERT OVERWRITE TABLE outputTbl1_n10 +SELECT val, count(1) FROM T1_n33 GROUP BY val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -53,7 +53,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n33 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: val (type: string) @@ -91,7 +91,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n10 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: val, cnt @@ -116,7 +116,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n10 Stage: Stage-2 Stats Work @@ -124,7 +124,7 @@ STAGE PLANS: Column Stats Desc: Columns: val, cnt Column Types: string, int - Table: default.outputtbl1 + Table: default.outputtbl1_n10 Stage: Stage-3 Map Reduce @@ -149,25 +149,25 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT val, count(1) FROM T1 GROUP BY val +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n10 +SELECT val, count(1) FROM T1_n33 GROUP BY val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT val, count(1) FROM T1 GROUP BY val +PREHOOK: Input: default@t1_n33 +PREHOOK: Output: default@outputtbl1_n10 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n10 +SELECT val, count(1) FROM T1_n33 GROUP BY val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n33 +POSTHOOK: Output: default@outputtbl1_n10 +POSTHOOK: Lineage: outputtbl1_n10.cnt EXPRESSION [(t1_n33)t1_n33.null, ] +POSTHOOK: Lineage: outputtbl1_n10.val SIMPLE [(t1_n33)t1_n33.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n10 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n10 #### A masked pattern was here #### 11 1 12 1 diff --git a/ql/src/test/results/clientpositive/groupby_sort_3.q.out b/ql/src/test/results/clientpositive/groupby_sort_3.q.out index 4ea5047bda..636b316d46 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_3.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_3.q.out @@ -1,46 +1,46 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n57(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n57 +POSTHOOK: query: CREATE TABLE T1_n57(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +POSTHOOK: Output: default@T1_n57 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n57 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +PREHOOK: Output: default@t1_n57 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n57 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: Output: default@t1_n57 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n57 select key, val from T1_n57 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: Input: default@t1_n57 +PREHOOK: Output: default@t1_n57 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n57 select key, val from T1_n57 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE outputTbl1(key string, val string, cnt int) +POSTHOOK: Input: default@t1_n57 +POSTHOOK: Output: default@t1_n57 +POSTHOOK: Lineage: t1_n57.key SIMPLE [(t1_n57)t1_n57.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n57.val SIMPLE [(t1_n57)t1_n57.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE outputTbl1_n20(key string, val string, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: CREATE TABLE outputTbl1(key string, val string, cnt int) +PREHOOK: Output: default@outputTbl1_n20 +POSTHOOK: query: CREATE TABLE outputTbl1_n20(key string, val string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 +POSTHOOK: Output: default@outputTbl1_n20 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +INSERT OVERWRITE TABLE outputTbl1_n20 +SELECT key, val, count(1) FROM T1_n57 GROUP BY key, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +INSERT OVERWRITE TABLE outputTbl1_n20 +SELECT key, val, count(1) FROM T1_n57 GROUP BY key, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -57,7 +57,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n57 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -80,7 +80,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n20 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int) outputColumnNames: key, val, cnt @@ -125,7 +125,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n20 Stage: Stage-2 Stats Work @@ -133,7 +133,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val, cnt Column Types: string, string, int - Table: default.outputtbl1 + Table: default.outputtbl1_n20 Stage: Stage-3 Map Reduce @@ -145,7 +145,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n20 Stage: Stage-5 Map Reduce @@ -157,7 +157,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n20 Stage: Stage-6 Move Operator @@ -165,26 +165,26 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n20 +SELECT key, val, count(1) FROM T1_n57 GROUP BY key, val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +PREHOOK: Input: default@t1_n57 +PREHOOK: Output: default@outputtbl1_n20 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n20 +SELECT key, val, count(1) FROM T1_n57 GROUP BY key, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n57 +POSTHOOK: Output: default@outputtbl1_n20 +POSTHOOK: Lineage: outputtbl1_n20.cnt EXPRESSION [(t1_n57)t1_n57.null, ] +POSTHOOK: Lineage: outputtbl1_n20.key SIMPLE [(t1_n57)t1_n57.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n20.val SIMPLE [(t1_n57)t1_n57.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n20 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n20 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n20 #### A masked pattern was here #### 1 11 1 2 12 1 @@ -192,21 +192,21 @@ POSTHOOK: Input: default@outputtbl1 7 17 1 8 18 1 8 28 1 -PREHOOK: query: CREATE TABLE outputTbl2(key string, cnt int) +PREHOOK: query: CREATE TABLE outputTbl2_n7(key string, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: CREATE TABLE outputTbl2(key string, cnt int) +PREHOOK: Output: default@outputTbl2_n7 +POSTHOOK: query: CREATE TABLE outputTbl2_n7(key string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl2 +POSTHOOK: Output: default@outputTbl2_n7 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, count(1) FROM T1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl2_n7 +SELECT key, count(1) FROM T1_n57 GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, count(1) FROM T1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl2_n7 +SELECT key, count(1) FROM T1_n57 GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -223,7 +223,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n57 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -246,7 +246,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n7 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, cnt @@ -291,7 +291,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n7 Stage: Stage-2 Stats Work @@ -299,7 +299,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: string, int - Table: default.outputtbl2 + Table: default.outputtbl2_n7 Stage: Stage-3 Map Reduce @@ -311,7 +311,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n7 Stage: Stage-5 Map Reduce @@ -323,7 +323,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n7 Stage: Stage-6 Move Operator @@ -331,25 +331,25 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2 -SELECT key, count(1) FROM T1 GROUP BY key +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2_n7 +SELECT key, count(1) FROM T1_n57 GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl2 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl2 -SELECT key, count(1) FROM T1 GROUP BY key +PREHOOK: Input: default@t1_n57 +PREHOOK: Output: default@outputtbl2_n7 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl2_n7 +SELECT key, count(1) FROM T1_n57 GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl2 -POSTHOOK: Lineage: outputtbl2.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl2 +POSTHOOK: Input: default@t1_n57 +POSTHOOK: Output: default@outputtbl2_n7 +POSTHOOK: Lineage: outputtbl2_n7.cnt EXPRESSION [(t1_n57)t1_n57.null, ] +POSTHOOK: Lineage: outputtbl2_n7.key SIMPLE [(t1_n57)t1_n57.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl2_n7 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl2 +PREHOOK: Input: default@outputtbl2_n7 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl2 +POSTHOOK: query: SELECT * FROM outputTbl2_n7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl2 +POSTHOOK: Input: default@outputtbl2_n7 #### A masked pattern was here #### 1 1 2 1 diff --git a/ql/src/test/results/clientpositive/groupby_sort_4.q.out b/ql/src/test/results/clientpositive/groupby_sort_4.q.out index 6b8efd398c..9dca4c468f 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_4.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_4.q.out @@ -1,46 +1,46 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n85(key STRING, val STRING) CLUSTERED BY (key, val) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n85 +POSTHOOK: query: CREATE TABLE T1_n85(key STRING, val STRING) CLUSTERED BY (key, val) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +POSTHOOK: Output: default@T1_n85 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n85 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +PREHOOK: Output: default@t1_n85 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n85 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: Output: default@t1_n85 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n85 select key, val from T1_n85 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: Input: default@t1_n85 +PREHOOK: Output: default@t1_n85 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n85 select key, val from T1_n85 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE outputTbl1(key STRING, cnt INT) +POSTHOOK: Input: default@t1_n85 +POSTHOOK: Output: default@t1_n85 +POSTHOOK: Lineage: t1_n85.key SIMPLE [(t1_n85)t1_n85.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n85.val SIMPLE [(t1_n85)t1_n85.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE outputTbl1_n31(key STRING, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: CREATE TABLE outputTbl1(key STRING, cnt INT) +PREHOOK: Output: default@outputTbl1_n31 +POSTHOOK: query: CREATE TABLE outputTbl1_n31(key STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 +POSTHOOK: Output: default@outputTbl1_n31 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n31 +SELECT key, count(1) FROM T1_n85 GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n31 +SELECT key, count(1) FROM T1_n85 GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -53,7 +53,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n85 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -91,7 +91,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n31 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, cnt @@ -116,7 +116,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n31 Stage: Stage-2 Stats Work @@ -124,7 +124,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: string, int - Table: default.outputtbl1 + Table: default.outputtbl1_n31 Stage: Stage-3 Map Reduce @@ -149,46 +149,46 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n31 +SELECT key, count(1) FROM T1_n85 GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key +PREHOOK: Input: default@t1_n85 +PREHOOK: Output: default@outputtbl1_n31 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n31 +SELECT key, count(1) FROM T1_n85 GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n85 +POSTHOOK: Output: default@outputtbl1_n31 +POSTHOOK: Lineage: outputtbl1_n31.cnt EXPRESSION [(t1_n85)t1_n85.null, ] +POSTHOOK: Lineage: outputtbl1_n31.key SIMPLE [(t1_n85)t1_n85.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n31 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n31 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n31 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n31 #### A masked pattern was here #### 1 1 2 1 3 1 7 1 8 2 -PREHOOK: query: CREATE TABLE outputTbl2(key STRING, val STRING, cnt INT) +PREHOOK: query: CREATE TABLE outputTbl2_n8(key STRING, val STRING, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: CREATE TABLE outputTbl2(key STRING, val STRING, cnt INT) +PREHOOK: Output: default@outputTbl2_n8 +POSTHOOK: query: CREATE TABLE outputTbl2_n8(key STRING, val STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl2 +POSTHOOK: Output: default@outputTbl2_n8 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +INSERT OVERWRITE TABLE outputTbl2_n8 +SELECT key, val, count(1) FROM T1_n85 GROUP BY key, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +INSERT OVERWRITE TABLE outputTbl2_n8 +SELECT key, val, count(1) FROM T1_n85 GROUP BY key, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -201,7 +201,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n85 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -238,7 +238,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n8 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int) outputColumnNames: key, val, cnt @@ -263,7 +263,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n8 Stage: Stage-2 Stats Work @@ -271,7 +271,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val, cnt Column Types: string, string, int - Table: default.outputtbl2 + Table: default.outputtbl2_n8 Stage: Stage-3 Map Reduce @@ -296,26 +296,26 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2_n8 +SELECT key, val, count(1) FROM T1_n85 GROUP BY key, val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl2 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +PREHOOK: Input: default@t1_n85 +PREHOOK: Output: default@outputtbl2_n8 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl2_n8 +SELECT key, val, count(1) FROM T1_n85 GROUP BY key, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl2 -POSTHOOK: Lineage: outputtbl2.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl2 +POSTHOOK: Input: default@t1_n85 +POSTHOOK: Output: default@outputtbl2_n8 +POSTHOOK: Lineage: outputtbl2_n8.cnt EXPRESSION [(t1_n85)t1_n85.null, ] +POSTHOOK: Lineage: outputtbl2_n8.key SIMPLE [(t1_n85)t1_n85.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl2_n8.val SIMPLE [(t1_n85)t1_n85.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl2_n8 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl2 +PREHOOK: Input: default@outputtbl2_n8 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl2 +POSTHOOK: query: SELECT * FROM outputTbl2_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl2 +POSTHOOK: Input: default@outputtbl2_n8 #### A masked pattern was here #### 1 11 1 2 12 1 diff --git a/ql/src/test/results/clientpositive/groupby_sort_5.q.out b/ql/src/test/results/clientpositive/groupby_sort_5.q.out index f1d4cfdd0a..ce3daa4751 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_5.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_5.q.out @@ -1,46 +1,46 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n3(key STRING, val STRING) CLUSTERED BY (val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n3 +POSTHOOK: query: CREATE TABLE T1_n3(key STRING, val STRING) CLUSTERED BY (val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +POSTHOOK: Output: default@T1_n3 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +PREHOOK: Output: default@t1_n3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: Output: default@t1_n3 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n3 select key, val from T1_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: Input: default@t1_n3 +PREHOOK: Output: default@t1_n3 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n3 select key, val from T1_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE outputTbl1(key STRING, val STRING, cnt INT) +POSTHOOK: Input: default@t1_n3 +POSTHOOK: Output: default@t1_n3 +POSTHOOK: Lineage: t1_n3.key SIMPLE [(t1_n3)t1_n3.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n3.val SIMPLE [(t1_n3)t1_n3.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE outputTbl1_n5(key STRING, val STRING, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: CREATE TABLE outputTbl1(key STRING, val STRING, cnt INT) +PREHOOK: Output: default@outputTbl1_n5 +POSTHOOK: query: CREATE TABLE outputTbl1_n5(key STRING, val STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 +POSTHOOK: Output: default@outputTbl1_n5 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +INSERT OVERWRITE TABLE outputTbl1_n5 +SELECT key, val, count(1) FROM T1_n3 GROUP BY key, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +INSERT OVERWRITE TABLE outputTbl1_n5 +SELECT key, val, count(1) FROM T1_n3 GROUP BY key, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -57,7 +57,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n3 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -80,7 +80,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int) outputColumnNames: key, val, cnt @@ -125,7 +125,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n5 Stage: Stage-2 Stats Work @@ -133,7 +133,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val, cnt Column Types: string, string, int - Table: default.outputtbl1 + Table: default.outputtbl1_n5 Stage: Stage-3 Map Reduce @@ -145,7 +145,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n5 Stage: Stage-5 Map Reduce @@ -157,7 +157,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n5 Stage: Stage-6 Move Operator @@ -165,26 +165,26 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n5 +SELECT key, val, count(1) FROM T1_n3 GROUP BY key, val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +PREHOOK: Input: default@t1_n3 +PREHOOK: Output: default@outputtbl1_n5 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n5 +SELECT key, val, count(1) FROM T1_n3 GROUP BY key, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n3 +POSTHOOK: Output: default@outputtbl1_n5 +POSTHOOK: Lineage: outputtbl1_n5.cnt EXPRESSION [(t1_n3)t1_n3.null, ] +POSTHOOK: Lineage: outputtbl1_n5.key SIMPLE [(t1_n3)t1_n3.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n5.val SIMPLE [(t1_n3)t1_n3.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n5 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n5 #### A masked pattern was here #### 1 11 1 2 12 1 @@ -192,49 +192,49 @@ POSTHOOK: Input: default@outputtbl1 7 17 1 8 18 1 8 28 1 -PREHOOK: query: DROP TABLE T1 +PREHOOK: query: DROP TABLE T1_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: DROP TABLE T1 +PREHOOK: Input: default@t1_n3 +PREHOOK: Output: default@t1_n3 +POSTHOOK: query: DROP TABLE T1_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: Input: default@t1_n3 +POSTHOOK: Output: default@t1_n3 +PREHOOK: query: CREATE TABLE T1_n3(key STRING, val STRING) CLUSTERED BY (val, key) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n3 +POSTHOOK: query: CREATE TABLE T1_n3(key STRING, val STRING) CLUSTERED BY (val, key) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +POSTHOOK: Output: default@T1_n3 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +PREHOOK: Output: default@t1_n3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: Output: default@t1_n3 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n3 select key, val from T1_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: Input: default@t1_n3 +PREHOOK: Output: default@t1_n3 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n3 select key, val from T1_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Input: default@t1_n3 +POSTHOOK: Output: default@t1_n3 +POSTHOOK: Lineage: t1_n3.key SIMPLE [(t1_n3)t1_n3.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n3.val SIMPLE [(t1_n3)t1_n3.FieldSchema(name:val, type:string, comment:null), ] PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +INSERT OVERWRITE TABLE outputTbl1_n5 +SELECT key, val, count(1) FROM T1_n3 GROUP BY key, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +INSERT OVERWRITE TABLE outputTbl1_n5 +SELECT key, val, count(1) FROM T1_n3 GROUP BY key, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -251,7 +251,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n3 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -274,7 +274,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int) outputColumnNames: key, val, cnt @@ -319,7 +319,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n5 Stage: Stage-2 Stats Work @@ -327,7 +327,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val, cnt Column Types: string, string, int - Table: default.outputtbl1 + Table: default.outputtbl1_n5 Stage: Stage-3 Map Reduce @@ -339,7 +339,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n5 Stage: Stage-5 Map Reduce @@ -351,7 +351,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n5 Stage: Stage-6 Move Operator @@ -359,26 +359,26 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n5 +SELECT key, val, count(1) FROM T1_n3 GROUP BY key, val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +PREHOOK: Input: default@t1_n3 +PREHOOK: Output: default@outputtbl1_n5 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n5 +SELECT key, val, count(1) FROM T1_n3 GROUP BY key, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n3 +POSTHOOK: Output: default@outputtbl1_n5 +POSTHOOK: Lineage: outputtbl1_n5.cnt EXPRESSION [(t1_n3)t1_n3.null, ] +POSTHOOK: Lineage: outputtbl1_n5.key SIMPLE [(t1_n3)t1_n3.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n5.val SIMPLE [(t1_n3)t1_n3.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n5 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n5 #### A masked pattern was here #### 1 11 1 2 12 1 @@ -386,57 +386,57 @@ POSTHOOK: Input: default@outputtbl1 7 17 1 8 18 1 8 28 1 -PREHOOK: query: DROP TABLE T1 +PREHOOK: query: DROP TABLE T1_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: DROP TABLE T1 +PREHOOK: Input: default@t1_n3 +PREHOOK: Output: default@t1_n3 +POSTHOOK: query: DROP TABLE T1_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: Input: default@t1_n3 +POSTHOOK: Output: default@t1_n3 +PREHOOK: query: CREATE TABLE T1_n3(key STRING, val STRING) CLUSTERED BY (val) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n3 +POSTHOOK: query: CREATE TABLE T1_n3(key STRING, val STRING) CLUSTERED BY (val) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +POSTHOOK: Output: default@T1_n3 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +PREHOOK: Output: default@t1_n3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: Output: default@t1_n3 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n3 select key, val from T1_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: Input: default@t1_n3 +PREHOOK: Output: default@t1_n3 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n3 select key, val from T1_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE outputTbl2(key STRING, cnt INT) +POSTHOOK: Input: default@t1_n3 +POSTHOOK: Output: default@t1_n3 +POSTHOOK: Lineage: t1_n3.key SIMPLE [(t1_n3)t1_n3.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n3.val SIMPLE [(t1_n3)t1_n3.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE outputTbl2_n1(key STRING, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: CREATE TABLE outputTbl2(key STRING, cnt INT) +PREHOOK: Output: default@outputTbl2_n1 +POSTHOOK: query: CREATE TABLE outputTbl2_n1(key STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl2 +POSTHOOK: Output: default@outputTbl2_n1 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, count(1) FROM T1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl2_n1 +SELECT key, count(1) FROM T1_n3 GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, count(1) FROM T1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl2_n1 +SELECT key, count(1) FROM T1_n3 GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -449,7 +449,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n3 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -487,7 +487,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n1 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, cnt @@ -512,7 +512,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n1 Stage: Stage-2 Stats Work @@ -520,7 +520,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: string, int - Table: default.outputtbl2 + Table: default.outputtbl2_n1 Stage: Stage-3 Map Reduce @@ -545,36 +545,36 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2 -SELECT key, count(1) FROM T1 GROUP BY key +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2_n1 +SELECT key, count(1) FROM T1_n3 GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl2 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl2 -SELECT key, count(1) FROM T1 GROUP BY key +PREHOOK: Input: default@t1_n3 +PREHOOK: Output: default@outputtbl2_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl2_n1 +SELECT key, count(1) FROM T1_n3 GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl2 -POSTHOOK: Lineage: outputtbl2.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl2 +POSTHOOK: Input: default@t1_n3 +POSTHOOK: Output: default@outputtbl2_n1 +POSTHOOK: Lineage: outputtbl2_n1.cnt EXPRESSION [(t1_n3)t1_n3.null, ] +POSTHOOK: Lineage: outputtbl2_n1.key SIMPLE [(t1_n3)t1_n3.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl2 +PREHOOK: Input: default@outputtbl2_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl2 +POSTHOOK: query: SELECT * FROM outputTbl2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl2 +POSTHOOK: Input: default@outputtbl2_n1 #### A masked pattern was here #### 1 1 2 1 3 1 7 1 8 2 -PREHOOK: query: DROP TABLE T1 +PREHOOK: query: DROP TABLE T1_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: DROP TABLE T1 +PREHOOK: Input: default@t1_n3 +PREHOOK: Output: default@t1_n3 +POSTHOOK: query: DROP TABLE T1_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n3 +POSTHOOK: Output: default@t1_n3 diff --git a/ql/src/test/results/clientpositive/groupby_sort_6.q.out b/ql/src/test/results/clientpositive/groupby_sort_6.q.out index fae54ebfb8..db2611ca5a 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_6.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_6.q.out @@ -1,26 +1,26 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: query: CREATE TABLE T1_n37(key STRING, val STRING) PARTITIONED BY (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: Output: default@T1_n37 +POSTHOOK: query: CREATE TABLE T1_n37(key STRING, val STRING) PARTITIONED BY (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) +POSTHOOK: Output: default@T1_n37 +PREHOOK: query: CREATE TABLE outputTbl1_n15(key int, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) +PREHOOK: Output: default@outputTbl1_n15 +POSTHOOK: query: CREATE TABLE outputTbl1_n15(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 +POSTHOOK: Output: default@outputTbl1_n15 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n37 where ds = '1' GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n37 where ds = '1' GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -33,7 +33,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n37 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator @@ -91,17 +91,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n15 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n15 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n15 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -150,17 +150,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n15 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n15 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n15 Stage: Stage-2 Stats Work @@ -169,7 +169,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n15 Is Table Level Stats: true Stage: Stage-3 @@ -242,42 +242,42 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n37 where ds = '1' GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key +PREHOOK: Input: default@t1_n37 +PREHOOK: Output: default@outputtbl1_n15 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n37 where ds = '1' GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n37 +POSTHOOK: Output: default@outputtbl1_n15 +POSTHOOK: Lineage: outputtbl1_n15.cnt EXPRESSION [(t1_n37)t1_n37.null, ] +POSTHOOK: Lineage: outputtbl1_n15.key EXPRESSION [(t1_n37)t1_n37.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n15 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n15 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n15 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n15 #### A masked pattern was here #### -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 PARTITION (ds='2') +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n37 PARTITION (ds='2') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 PARTITION (ds='2') +PREHOOK: Output: default@t1_n37 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n37 PARTITION (ds='2') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t1@ds=2 +POSTHOOK: Output: default@t1_n37 +POSTHOOK: Output: default@t1_n37@ds=2 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n37 where ds = '1' GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n37 where ds = '1' GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -290,7 +290,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n37 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator @@ -348,17 +348,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n15 numFiles 1 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n15 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n15 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -407,17 +407,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n15 numFiles 1 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n15 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n15 Stage: Stage-2 Stats Work @@ -426,7 +426,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n15 Is Table Level Stats: true Stage: Stage-3 @@ -499,33 +499,33 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n37 where ds = '1' GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key +PREHOOK: Input: default@t1_n37 +PREHOOK: Output: default@outputtbl1_n15 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n37 where ds = '1' GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n37 +POSTHOOK: Output: default@outputtbl1_n15 +POSTHOOK: Lineage: outputtbl1_n15.cnt EXPRESSION [(t1_n37)t1_n37.null, ] +POSTHOOK: Lineage: outputtbl1_n15.key EXPRESSION [(t1_n37)t1_n37.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n15 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n15 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n15 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n15 #### A masked pattern was here #### PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '2' GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n37 where ds = '2' GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '2' GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n37 where ds = '2' GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -538,7 +538,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n37 Statistics: Num rows: 1 Data size: 300 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator @@ -578,13 +578,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n37 numFiles 1 numRows 0 partition_columns ds partition_columns.types string rawDataSize 0 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n37 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -601,18 +601,18 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n37 partition_columns ds partition_columns.types string - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n37 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n37 + name: default.t1_n37 Truncated Path -> Alias: - /t1/ds=2 [t1] + /t1_n37/ds=2 [t1_n37] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -644,17 +644,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n15 numFiles 1 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n15 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n15 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -703,17 +703,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n15 numFiles 1 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n15 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n15 Stage: Stage-2 Stats Work @@ -722,7 +722,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n15 Is Table Level Stats: true Stage: Stage-3 @@ -795,27 +795,27 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '2' GROUP BY key +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n37 where ds = '2' GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=2 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 where ds = '2' GROUP BY key +PREHOOK: Input: default@t1_n37 +PREHOOK: Input: default@t1_n37@ds=2 +PREHOOK: Output: default@outputtbl1_n15 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n15 +SELECT key, count(1) FROM T1_n37 where ds = '2' GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=2 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n37 +POSTHOOK: Input: default@t1_n37@ds=2 +POSTHOOK: Output: default@outputtbl1_n15 +POSTHOOK: Lineage: outputtbl1_n15.cnt EXPRESSION [(t1_n37)t1_n37.null, ] +POSTHOOK: Lineage: outputtbl1_n15.key EXPRESSION [(t1_n37)t1_n37.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n15 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n15 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n15 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n15 #### A masked pattern was here #### 1 1 2 1 diff --git a/ql/src/test/results/clientpositive/groupby_sort_7.q.out b/ql/src/test/results/clientpositive/groupby_sort_7.q.out index 377b9e1a58..082944affe 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_7.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_7.q.out @@ -1,49 +1,49 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: query: CREATE TABLE T1_n66(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: Output: default@T1_n66 +POSTHOOK: query: CREATE TABLE T1_n66(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 PARTITION (ds='1') +POSTHOOK: Output: default@T1_n66 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n66 PARTITION (ds='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 PARTITION (ds='1') +PREHOOK: Output: default@t1_n66 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n66 PARTITION (ds='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t1@ds=1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' +POSTHOOK: Output: default@t1_n66 +POSTHOOK: Output: default@t1_n66@ds=1 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n66 PARTITION (ds='1') select key, val from T1_n66 where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 -PREHOOK: Output: default@t1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' +PREHOOK: Input: default@t1_n66 +PREHOOK: Input: default@t1_n66@ds=1 +PREHOOK: Output: default@t1_n66@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n66 PARTITION (ds='1') select key, val from T1_n66 where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 -POSTHOOK: Output: default@t1@ds=1 -POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE outputTbl1(key STRING, val STRING, cnt INT) +POSTHOOK: Input: default@t1_n66 +POSTHOOK: Input: default@t1_n66@ds=1 +POSTHOOK: Output: default@t1_n66@ds=1 +POSTHOOK: Lineage: t1_n66 PARTITION(ds=1).key SIMPLE [(t1_n66)t1_n66.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n66 PARTITION(ds=1).val SIMPLE [(t1_n66)t1_n66.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE outputTbl1_n26(key STRING, val STRING, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: CREATE TABLE outputTbl1(key STRING, val STRING, cnt INT) +PREHOOK: Output: default@outputTbl1_n26 +POSTHOOK: query: CREATE TABLE outputTbl1_n26(key STRING, val STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 +POSTHOOK: Output: default@outputTbl1_n26 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 where ds = '1' GROUP BY key, val +INSERT OVERWRITE TABLE outputTbl1_n26 +SELECT key, val, count(1) FROM T1_n66 where ds = '1' GROUP BY key, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 where ds = '1' GROUP BY key, val +INSERT OVERWRITE TABLE outputTbl1_n26 +SELECT key, val, count(1) FROM T1_n66 where ds = '1' GROUP BY key, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -60,7 +60,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n66 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -83,7 +83,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n26 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int) outputColumnNames: key, val, cnt @@ -128,7 +128,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n26 Stage: Stage-2 Stats Work @@ -136,7 +136,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val, cnt Column Types: string, string, int - Table: default.outputtbl1 + Table: default.outputtbl1_n26 Stage: Stage-3 Map Reduce @@ -148,7 +148,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n26 Stage: Stage-5 Map Reduce @@ -160,7 +160,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n26 Stage: Stage-6 Move Operator @@ -168,28 +168,28 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 where ds = '1' GROUP BY key, val +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n26 +SELECT key, val, count(1) FROM T1_n66 where ds = '1' GROUP BY key, val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, val, count(1) FROM T1 where ds = '1' GROUP BY key, val +PREHOOK: Input: default@t1_n66 +PREHOOK: Input: default@t1_n66@ds=1 +PREHOOK: Output: default@outputtbl1_n26 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n26 +SELECT key, val, count(1) FROM T1_n66 where ds = '1' GROUP BY key, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n66 +POSTHOOK: Input: default@t1_n66@ds=1 +POSTHOOK: Output: default@outputtbl1_n26 +POSTHOOK: Lineage: outputtbl1_n26.cnt EXPRESSION [(t1_n66)t1_n66.null, ] +POSTHOOK: Lineage: outputtbl1_n26.key SIMPLE [(t1_n66)t1_n66.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n26.val SIMPLE [(t1_n66)t1_n66.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n26 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n26 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n26 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n26 #### A masked pattern was here #### 1 11 1 2 12 1 @@ -197,11 +197,11 @@ POSTHOOK: Input: default@outputtbl1 7 17 1 8 18 1 8 28 1 -PREHOOK: query: DROP TABLE T1 +PREHOOK: query: DROP TABLE T1_n66 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: DROP TABLE T1 +PREHOOK: Input: default@t1_n66 +PREHOOK: Output: default@t1_n66 +POSTHOOK: query: DROP TABLE T1_n66 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n66 +POSTHOOK: Output: default@t1_n66 diff --git a/ql/src/test/results/clientpositive/groupby_sort_8.q.out b/ql/src/test/results/clientpositive/groupby_sort_8.q.out index 1401c3075a..999cc8648d 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_8.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_8.q.out @@ -1,39 +1,39 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: query: CREATE TABLE T1_n28(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: Output: default@T1_n28 +POSTHOOK: query: CREATE TABLE T1_n28(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 PARTITION (ds='1') +POSTHOOK: Output: default@T1_n28 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n28 PARTITION (ds='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 PARTITION (ds='1') +PREHOOK: Output: default@t1_n28 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n28 PARTITION (ds='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t1@ds=1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' +POSTHOOK: Output: default@t1_n28 +POSTHOOK: Output: default@t1_n28@ds=1 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n28 PARTITION (ds='1') select key, val from T1_n28 where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 -PREHOOK: Output: default@t1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' +PREHOOK: Input: default@t1_n28 +PREHOOK: Input: default@t1_n28@ds=1 +PREHOOK: Output: default@t1_n28@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n28 PARTITION (ds='1') select key, val from T1_n28 where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 -POSTHOOK: Output: default@t1@ds=1 -POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Input: default@t1_n28 +POSTHOOK: Input: default@t1_n28@ds=1 +POSTHOOK: Output: default@t1_n28@ds=1 +POSTHOOK: Lineage: t1_n28 PARTITION(ds=1).key SIMPLE [(t1_n28)t1_n28.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n28 PARTITION(ds=1).val SIMPLE [(t1_n28)t1_n28.FieldSchema(name:val, type:string, comment:null), ] PREHOOK: query: EXPLAIN -select count(distinct key) from T1 +select count(distinct key) from T1_n28 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -select count(distinct key) from T1 +select count(distinct key) from T1_n28 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -44,7 +44,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n28 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -82,22 +82,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(distinct key) from T1 +PREHOOK: query: select count(distinct key) from T1_n28 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 +PREHOOK: Input: default@t1_n28 +PREHOOK: Input: default@t1_n28@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(distinct key) from T1 +POSTHOOK: query: select count(distinct key) from T1_n28 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 +POSTHOOK: Input: default@t1_n28 +POSTHOOK: Input: default@t1_n28@ds=1 #### A masked pattern was here #### 5 -PREHOOK: query: DROP TABLE T1 +PREHOOK: query: DROP TABLE T1_n28 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: DROP TABLE T1 +PREHOOK: Input: default@t1_n28 +PREHOOK: Output: default@t1_n28 +POSTHOOK: query: DROP TABLE T1_n28 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n28 +POSTHOOK: Output: default@t1_n28 diff --git a/ql/src/test/results/clientpositive/groupby_sort_9.q.out b/ql/src/test/results/clientpositive/groupby_sort_9.q.out index ac7946bc35..a0d6049d70 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_9.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_9.q.out @@ -1,51 +1,51 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: query: CREATE TABLE T1_n62(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: Output: default@T1_n62 +POSTHOOK: query: CREATE TABLE T1_n62(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 PARTITION (ds='1') +POSTHOOK: Output: default@T1_n62 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n62 PARTITION (ds='1') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 PARTITION (ds='1') +PREHOOK: Output: default@t1_n62 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n62 PARTITION (ds='1') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t1@ds=1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' +POSTHOOK: Output: default@t1_n62 +POSTHOOK: Output: default@t1_n62@ds=1 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n62 PARTITION (ds='1') select key, val from T1_n62 where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 -PREHOOK: Output: default@t1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' +PREHOOK: Input: default@t1_n62 +PREHOOK: Input: default@t1_n62@ds=1 +PREHOOK: Output: default@t1_n62@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n62 PARTITION (ds='1') select key, val from T1_n62 where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 -POSTHOOK: Output: default@t1@ds=1 -POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='2') select key, val from T1 where ds = '1' +POSTHOOK: Input: default@t1_n62 +POSTHOOK: Input: default@t1_n62@ds=1 +POSTHOOK: Output: default@t1_n62@ds=1 +POSTHOOK: Lineage: t1_n62 PARTITION(ds=1).key SIMPLE [(t1_n62)t1_n62.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n62 PARTITION(ds=1).val SIMPLE [(t1_n62)t1_n62.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: INSERT OVERWRITE TABLE T1_n62 PARTITION (ds='2') select key, val from T1_n62 where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 -PREHOOK: Output: default@t1@ds=2 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='2') select key, val from T1 where ds = '1' +PREHOOK: Input: default@t1_n62 +PREHOOK: Input: default@t1_n62@ds=1 +PREHOOK: Output: default@t1_n62@ds=2 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n62 PARTITION (ds='2') select key, val from T1_n62 where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 -POSTHOOK: Output: default@t1@ds=2 -POSTHOOK: Lineage: t1 PARTITION(ds=2).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1 PARTITION(ds=2).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Input: default@t1_n62 +POSTHOOK: Input: default@t1_n62@ds=1 +POSTHOOK: Output: default@t1_n62@ds=2 +POSTHOOK: Lineage: t1_n62 PARTITION(ds=2).key SIMPLE [(t1_n62)t1_n62.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n62 PARTITION(ds=2).val SIMPLE [(t1_n62)t1_n62.FieldSchema(name:val, type:string, comment:null), ] PREHOOK: query: EXPLAIN -select key, count(1) from T1 group by key +select key, count(1) from T1_n62 group by key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -select key, count(1) from T1 group by key +select key, count(1) from T1_n62 group by key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -56,7 +56,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n62 Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -97,28 +97,28 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select key, count(1) from T1 group by key +PREHOOK: query: select key, count(1) from T1_n62 group by key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 -PREHOOK: Input: default@t1@ds=2 +PREHOOK: Input: default@t1_n62 +PREHOOK: Input: default@t1_n62@ds=1 +PREHOOK: Input: default@t1_n62@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select key, count(1) from T1 group by key +POSTHOOK: query: select key, count(1) from T1_n62 group by key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 -POSTHOOK: Input: default@t1@ds=2 +POSTHOOK: Input: default@t1_n62 +POSTHOOK: Input: default@t1_n62@ds=1 +POSTHOOK: Input: default@t1_n62@ds=2 #### A masked pattern was here #### 1 2 2 2 3 2 7 2 8 4 -PREHOOK: query: DROP TABLE T1 +PREHOOK: query: DROP TABLE T1_n62 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: DROP TABLE T1 +PREHOOK: Input: default@t1_n62 +PREHOOK: Output: default@t1_n62 +POSTHOOK: query: DROP TABLE T1_n62 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n62 +POSTHOOK: Output: default@t1_n62 diff --git a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out b/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out index 79228cf1ad..2ca04ec553 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out @@ -1,46 +1,46 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n35(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n35 +POSTHOOK: query: CREATE TABLE T1_n35(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +POSTHOOK: Output: default@T1_n35 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n35 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +PREHOOK: Output: default@t1_n35 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n35 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: Output: default@t1_n35 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n35 select key, val from T1_n35 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: Input: default@t1_n35 +PREHOOK: Output: default@t1_n35 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n35 select key, val from T1_n35 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) +POSTHOOK: Input: default@t1_n35 +POSTHOOK: Output: default@t1_n35 +POSTHOOK: Lineage: t1_n35.key SIMPLE [(t1_n35)t1_n35.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n35.val SIMPLE [(t1_n35)t1_n35.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE outputTbl1_n13(key int, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) +PREHOOK: Output: default@outputTbl1_n13 +POSTHOOK: query: CREATE TABLE outputTbl1_n13(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 +POSTHOOK: Output: default@outputTbl1_n13 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM T1_n35 GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM T1_n35 GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -57,7 +57,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -93,17 +93,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -128,7 +128,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n35 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -142,11 +142,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -166,20 +166,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n35 + name: default.t1_n35 Truncated Path -> Alias: - /t1 [t1] + /t1_n35 [t1_n35] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -236,17 +236,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 Stage: Stage-2 Stats Work @@ -255,7 +255,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n13 Is Table Level Stats: true Stage: Stage-3 @@ -280,17 +280,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -311,11 +311,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -333,18 +333,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n13 + name: default.outputtbl1_n13 Truncated Path -> Alias: #### A masked pattern was here #### @@ -370,17 +370,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -401,11 +401,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -423,18 +423,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n13 + name: default.outputtbl1_n13 Truncated Path -> Alias: #### A masked pattern was here #### @@ -444,46 +444,46 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM T1_n35 GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key +PREHOOK: Input: default@t1_n35 +PREHOOK: Output: default@outputtbl1_n13 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM T1_n35 GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n35 +POSTHOOK: Output: default@outputtbl1_n13 +POSTHOOK: Lineage: outputtbl1_n13.cnt EXPRESSION [(t1_n35)t1_n35.null, ] +POSTHOOK: Lineage: outputtbl1_n13.key EXPRESSION [(t1_n35)t1_n35.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### 1 1 2 1 3 1 7 1 8 2 -PREHOOK: query: CREATE TABLE outputTbl2(key1 int, key2 string, cnt int) +PREHOOK: query: CREATE TABLE outputTbl2_n3(key1 int, key2 string, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: CREATE TABLE outputTbl2(key1 int, key2 string, cnt int) +PREHOOK: Output: default@outputTbl2_n3 +POSTHOOK: query: CREATE TABLE outputTbl2_n3(key1 int, key2 string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl2 +POSTHOOK: Output: default@outputTbl2_n3 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +INSERT OVERWRITE TABLE outputTbl2_n3 +SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +INSERT OVERWRITE TABLE outputTbl2_n3 +SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -497,7 +497,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -525,7 +525,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n35 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -539,11 +539,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -563,20 +563,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n35 + name: default.t1_n35 Truncated Path -> Alias: - /t1 [t1] + /t1_n35 [t1_n35] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -677,17 +677,17 @@ STAGE PLANS: columns.comments columns.types int:string:int #### A masked pattern was here #### - name default.outputtbl2 + name default.outputtbl2_n3 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl2 { i32 key1, string key2, i32 cnt} + serialization.ddl struct outputtbl2_n3 { i32 key1, string key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n3 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -736,17 +736,17 @@ STAGE PLANS: columns.comments columns.types int:string:int #### A masked pattern was here #### - name default.outputtbl2 + name default.outputtbl2_n3 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl2 { i32 key1, string key2, i32 cnt} + serialization.ddl struct outputtbl2_n3 { i32 key1, string key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n3 Stage: Stage-3 Stats Work @@ -755,7 +755,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, cnt Column Types: int, string, int - Table: default.outputtbl2 + Table: default.outputtbl2_n3 Is Table Level Stats: true Stage: Stage-4 @@ -828,26 +828,26 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2_n3 +SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl2 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl2 -SELECT key, val, count(1) FROM T1 GROUP BY key, val +PREHOOK: Input: default@t1_n35 +PREHOOK: Output: default@outputtbl2_n3 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl2_n3 +SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl2 -POSTHOOK: Lineage: outputtbl2.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl2.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl2.key2 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl2 +POSTHOOK: Input: default@t1_n35 +POSTHOOK: Output: default@outputtbl2_n3 +POSTHOOK: Lineage: outputtbl2_n3.cnt EXPRESSION [(t1_n35)t1_n35.null, ] +POSTHOOK: Lineage: outputtbl2_n3.key1 EXPRESSION [(t1_n35)t1_n35.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl2_n3.key2 SIMPLE [(t1_n35)t1_n35.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl2_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl2 +PREHOOK: Input: default@outputtbl2_n3 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl2 +POSTHOOK: query: SELECT * FROM outputTbl2_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl2 +POSTHOOK: Input: default@outputtbl2_n3 #### A masked pattern was here #### 1 11 1 2 12 1 @@ -856,12 +856,12 @@ POSTHOOK: Input: default@outputtbl2 8 18 1 8 28 1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n35) subq1 GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n35) subq1 GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -878,7 +878,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -914,17 +914,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -949,7 +949,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n35 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -963,11 +963,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -987,20 +987,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n35 + name: default.t1_n35 Truncated Path -> Alias: - /t1 [t1] + /t1_n35 [t1_n35] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -1057,17 +1057,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 Stage: Stage-2 Stats Work @@ -1076,7 +1076,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n13 Is Table Level Stats: true Stage: Stage-3 @@ -1101,17 +1101,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -1132,11 +1132,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 @@ -1154,18 +1154,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n13 + name: default.outputtbl1_n13 Truncated Path -> Alias: #### A masked pattern was here #### @@ -1191,17 +1191,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -1222,11 +1222,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 @@ -1244,18 +1244,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n13 + name: default.outputtbl1_n13 Truncated Path -> Alias: #### A masked pattern was here #### @@ -1265,25 +1265,25 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n35) subq1 GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key +PREHOOK: Input: default@t1_n35 +PREHOOK: Output: default@outputtbl1_n13 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM (SELECT key, val FROM T1_n35) subq1 GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n35 +POSTHOOK: Output: default@outputtbl1_n13 +POSTHOOK: Lineage: outputtbl1_n13.cnt EXPRESSION [(t1_n35)t1_n35.null, ] +POSTHOOK: Lineage: outputtbl1_n13.key EXPRESSION [(t1_n35)t1_n35.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### 1 1 2 1 @@ -1291,12 +1291,12 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n35) subq1 GROUP BY k PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n35) subq1 GROUP BY k POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1313,7 +1313,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -1349,17 +1349,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -1384,7 +1384,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n35 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -1398,11 +1398,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -1422,20 +1422,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n35 + name: default.t1_n35 Truncated Path -> Alias: - /t1 [t1] + /t1_n35 [t1_n35] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -1492,17 +1492,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 Stage: Stage-2 Stats Work @@ -1511,7 +1511,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n13 Is Table Level Stats: true Stage: Stage-3 @@ -1536,17 +1536,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -1567,11 +1567,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 @@ -1589,18 +1589,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n13 + name: default.outputtbl1_n13 Truncated Path -> Alias: #### A masked pattern was here #### @@ -1626,17 +1626,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -1657,11 +1657,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 @@ -1679,18 +1679,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n13 + name: default.outputtbl1_n13 Truncated Path -> Alias: #### A masked pattern was here #### @@ -1700,46 +1700,46 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n35) subq1 GROUP BY k PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k +PREHOOK: Input: default@t1_n35 +PREHOOK: Output: default@outputtbl1_n13 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n35) subq1 GROUP BY k POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n35 +POSTHOOK: Output: default@outputtbl1_n13 +POSTHOOK: Lineage: outputtbl1_n13.cnt EXPRESSION [(t1_n35)t1_n35.null, ] +POSTHOOK: Lineage: outputtbl1_n13.key EXPRESSION [(t1_n35)t1_n35.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### 1 1 2 1 3 1 7 1 8 2 -PREHOOK: query: CREATE TABLE outputTbl3(key1 int, key2 int, cnt int) +PREHOOK: query: CREATE TABLE outputTbl3_n1(key1 int, key2 int, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl3 -POSTHOOK: query: CREATE TABLE outputTbl3(key1 int, key2 int, cnt int) +PREHOOK: Output: default@outputTbl3_n1 +POSTHOOK: query: CREATE TABLE outputTbl3_n1(key1 int, key2 int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl3 +POSTHOOK: Output: default@outputTbl3_n1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key +INSERT OVERWRITE TABLE outputTbl3_n1 +SELECT 1, key, count(1) FROM T1_n35 GROUP BY 1, key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key +INSERT OVERWRITE TABLE outputTbl3_n1 +SELECT 1, key, count(1) FROM T1_n35 GROUP BY 1, key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1756,7 +1756,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -1792,17 +1792,17 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n1 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n1 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -1827,7 +1827,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n35 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -1841,11 +1841,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -1865,20 +1865,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n35 + name: default.t1_n35 Truncated Path -> Alias: - /t1 [t1] + /t1_n35 [t1_n35] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -1935,17 +1935,17 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n1 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n1 Stage: Stage-2 Stats Work @@ -1954,7 +1954,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, cnt Column Types: int, int, int - Table: default.outputtbl3 + Table: default.outputtbl3_n1 Is Table Level Stats: true Stage: Stage-3 @@ -1979,17 +1979,17 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n1 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n1 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -2010,11 +2010,11 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n1 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -2032,18 +2032,18 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n1 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - name: default.outputtbl3 + name: default.outputtbl3_n1 + name: default.outputtbl3_n1 Truncated Path -> Alias: #### A masked pattern was here #### @@ -2069,17 +2069,17 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n1 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n1 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -2100,11 +2100,11 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n1 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -2122,18 +2122,18 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n1 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - name: default.outputtbl3 + name: default.outputtbl3_n1 + name: default.outputtbl3_n1 Truncated Path -> Alias: #### A masked pattern was here #### @@ -2143,47 +2143,47 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3_n1 +SELECT 1, key, count(1) FROM T1_n35 GROUP BY 1, key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl3 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key +PREHOOK: Input: default@t1_n35 +PREHOOK: Output: default@outputtbl3_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl3_n1 +SELECT 1, key, count(1) FROM T1_n35 GROUP BY 1, key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl3 -POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl3.key1 SIMPLE [] -POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl3 +POSTHOOK: Input: default@t1_n35 +POSTHOOK: Output: default@outputtbl3_n1 +POSTHOOK: Lineage: outputtbl3_n1.cnt EXPRESSION [(t1_n35)t1_n35.null, ] +POSTHOOK: Lineage: outputtbl3_n1.key1 SIMPLE [] +POSTHOOK: Lineage: outputtbl3_n1.key2 EXPRESSION [(t1_n35)t1_n35.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl3_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl3 +PREHOOK: Input: default@outputtbl3_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl3 +POSTHOOK: query: SELECT * FROM outputTbl3_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl3 +POSTHOOK: Input: default@outputtbl3_n1 #### A masked pattern was here #### 1 1 1 1 2 1 1 3 1 1 7 1 1 8 2 -PREHOOK: query: CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int) +PREHOOK: query: CREATE TABLE outputTbl4_n1(key1 int, key2 int, key3 string, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl4 -POSTHOOK: query: CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int) +PREHOOK: Output: default@outputTbl4_n1 +POSTHOOK: query: CREATE TABLE outputTbl4_n1(key1 int, key2 int, key3 string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl4 +POSTHOOK: Output: default@outputTbl4_n1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val +INSERT OVERWRITE TABLE outputTbl4_n1 +SELECT key, 1, val, count(1) FROM T1_n35 GROUP BY key, 1, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val +INSERT OVERWRITE TABLE outputTbl4_n1 +SELECT key, 1, val, count(1) FROM T1_n35 GROUP BY key, 1, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2197,7 +2197,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -2225,7 +2225,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n35 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -2239,11 +2239,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -2263,20 +2263,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n35 + name: default.t1_n35 Truncated Path -> Alias: - /t1 [t1] + /t1_n35 [t1_n35] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -2377,17 +2377,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n1 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -2436,17 +2436,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n1 Stage: Stage-3 Stats Work @@ -2455,7 +2455,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, key3, cnt Column Types: int, int, string, int - Table: default.outputtbl4 + Table: default.outputtbl4_n1 Is Table Level Stats: true Stage: Stage-4 @@ -2528,27 +2528,27 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n1 +SELECT key, 1, val, count(1) FROM T1_n35 GROUP BY key, 1, val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val +PREHOOK: Input: default@t1_n35 +PREHOOK: Output: default@outputtbl4_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n1 +SELECT key, 1, val, count(1) FROM T1_n35 GROUP BY key, 1, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: Input: default@t1_n35 +POSTHOOK: Output: default@outputtbl4_n1 +POSTHOOK: Lineage: outputtbl4_n1.cnt EXPRESSION [(t1_n35)t1_n35.null, ] +POSTHOOK: Lineage: outputtbl4_n1.key1 EXPRESSION [(t1_n35)t1_n35.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl4_n1.key2 SIMPLE [] +POSTHOOK: Lineage: outputtbl4_n1.key3 SIMPLE [(t1_n35)t1_n35.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl4_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 +PREHOOK: Input: default@outputtbl4_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: query: SELECT * FROM outputTbl4_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 +POSTHOOK: Input: default@outputtbl4_n1 #### A masked pattern was here #### 1 1 11 1 2 1 12 1 @@ -2557,12 +2557,12 @@ POSTHOOK: Input: default@outputtbl4 8 1 18 1 8 1 28 1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 +INSERT OVERWRITE TABLE outputTbl3_n1 +SELECT key, key + 1, count(1) FROM T1_n35 GROUP BY key, key + 1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 +INSERT OVERWRITE TABLE outputTbl3_n1 +SELECT key, key + 1, count(1) FROM T1_n35 GROUP BY key, key + 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2576,7 +2576,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -2604,7 +2604,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n35 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -2618,11 +2618,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -2642,20 +2642,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n35 + name: default.t1_n35 Truncated Path -> Alias: - /t1 [$hdt$_0:t1] + /t1_n35 [$hdt$_0:t1_n35] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -2756,17 +2756,17 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n1 numFiles 1 numRows 5 rawDataSize 25 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n1 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n1 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -2815,17 +2815,17 @@ STAGE PLANS: columns.comments columns.types int:int:int #### A masked pattern was here #### - name default.outputtbl3 + name default.outputtbl3_n1 numFiles 1 numRows 5 rawDataSize 25 - serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.ddl struct outputtbl3_n1 { i32 key1, i32 key2, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n1 Stage: Stage-3 Stats Work @@ -2834,7 +2834,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, cnt Column Types: int, int, int - Table: default.outputtbl3 + Table: default.outputtbl3_n1 Is Table Level Stats: true Stage: Stage-4 @@ -2907,26 +2907,26 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3_n1 +SELECT key, key + 1, count(1) FROM T1_n35 GROUP BY key, key + 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl3 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl3 -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 +PREHOOK: Input: default@t1_n35 +PREHOOK: Output: default@outputtbl3_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl3_n1 +SELECT key, key + 1, count(1) FROM T1_n35 GROUP BY key, key + 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl3 -POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl3.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl3 +POSTHOOK: Input: default@t1_n35 +POSTHOOK: Output: default@outputtbl3_n1 +POSTHOOK: Lineage: outputtbl3_n1.cnt EXPRESSION [(t1_n35)t1_n35.null, ] +POSTHOOK: Lineage: outputtbl3_n1.key1 EXPRESSION [(t1_n35)t1_n35.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl3_n1.key2 EXPRESSION [(t1_n35)t1_n35.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl3_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl3 +PREHOOK: Input: default@outputtbl3_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl3 +POSTHOOK: query: SELECT * FROM outputTbl3_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl3 +POSTHOOK: Input: default@outputtbl3_n1 #### A masked pattern was here #### 1 2 1 2 3 1 @@ -2934,15 +2934,15 @@ POSTHOOK: Input: default@outputtbl3 7 8 1 8 9 2 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT cast(key + key as string), sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1 group by key + key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT cast(key + key as string), sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1 group by key + key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -2957,7 +2957,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -2995,7 +2995,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n35 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -3009,11 +3009,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -3033,20 +3033,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n35 + name: default.t1_n35 Truncated Path -> Alias: - /t1 [$hdt$_0:t1] + /t1_n35 [$hdt$_0:t1_n35] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -3147,17 +3147,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -3206,17 +3206,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 Stage: Stage-3 Stats Work @@ -3225,7 +3225,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n13 Is Table Level Stats: true Stage: Stage-4 @@ -3298,29 +3298,29 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 SELECT cast(key + key as string), sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1 group by key + key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: Input: default@t1_n35 +PREHOOK: Output: default@outputtbl1_n13 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 SELECT cast(key + key as string), sum(cnt) from -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1 group by key + key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n35 +POSTHOOK: Output: default@outputtbl1_n13 +POSTHOOK: Lineage: outputtbl1_n13.cnt EXPRESSION [(t1_n35)t1_n35.null, ] +POSTHOOK: Lineage: outputtbl1_n13.key EXPRESSION [(t1_n35)t1_n35.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### 14 1 16 2 @@ -3328,19 +3328,19 @@ POSTHOOK: Input: default@outputtbl1 4 1 6 1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key ) subq1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key ) subq1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -3358,7 +3358,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -3396,17 +3396,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -3427,7 +3427,7 @@ STAGE PLANS: value expressions: _col0 (type: struct), _col1 (type: struct) auto parallelism: false TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -3465,17 +3465,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -3500,7 +3500,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n35 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -3514,11 +3514,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -3538,20 +3538,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n35 + name: default.t1_n35 Truncated Path -> Alias: - /t1 [null-subquery1:$hdt$_0-subquery1:t1, null-subquery2:$hdt$_0-subquery2:t1] + /t1_n35 [null-subquery1:$hdt$_0-subquery1:t1_n35, null-subquery2:$hdt$_0-subquery2:t1_n35] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -3608,17 +3608,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 Stage: Stage-2 Stats Work @@ -3627,7 +3627,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n13 Is Table Level Stats: true Stage: Stage-3 @@ -3652,17 +3652,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -3683,11 +3683,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 @@ -3705,18 +3705,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n13 + name: default.outputtbl1_n13 Truncated Path -> Alias: #### A masked pattern was here #### @@ -3742,17 +3742,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -3773,11 +3773,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 @@ -3795,18 +3795,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 17 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 22 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n13 + name: default.outputtbl1_n13 Truncated Path -> Alias: #### A masked pattern was here #### @@ -3816,33 +3816,33 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key ) subq1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: Input: default@t1_n35 +PREHOOK: Output: default@outputtbl1_n13 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key UNION ALL -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key ) subq1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n35 +POSTHOOK: Output: default@outputtbl1_n13 +POSTHOOK: Lineage: outputtbl1_n13.cnt EXPRESSION [(t1_n35)t1_n35.null, ] +POSTHOOK: Lineage: outputtbl1_n13.key EXPRESSION [(t1_n35)t1_n35.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### 1 1 1 1 @@ -3855,19 +3855,19 @@ POSTHOOK: Input: default@outputtbl1 8 2 8 2 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key UNION ALL -SELECT cast(key + key as string) as key, count(1) FROM T1 GROUP BY key + key +SELECT cast(key + key as string) as key, count(1) FROM T1_n35 GROUP BY key + key ) subq1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT * FROM ( -SELECT key, count(1) FROM T1 GROUP BY key +SELECT key, count(1) FROM T1_n35 GROUP BY key UNION ALL -SELECT cast(key + key as string) as key, count(1) FROM T1 GROUP BY key + key +SELECT cast(key + key as string) as key, count(1) FROM T1_n35 GROUP BY key + key ) subq1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -3887,7 +3887,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -3915,7 +3915,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n35 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -3929,11 +3929,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -3953,20 +3953,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n35 + name: default.t1_n35 Truncated Path -> Alias: - /t1 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:t1] + /t1_n35 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:t1_n35] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -4071,7 +4071,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -4109,17 +4109,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -4166,17 +4166,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -4223,7 +4223,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n35 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -4237,11 +4237,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -4261,20 +4261,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n35 + name: default.t1_n35 Truncated Path -> Alias: - /t1 [null-subquery1:$hdt$_0-subquery1:t1] + /t1_n35 [null-subquery1:$hdt$_0-subquery1:t1_n35] #### A masked pattern was here #### Needs Tagging: false Reduce Operator Tree: @@ -4332,17 +4332,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 Stage: Stage-3 Stats Work @@ -4351,7 +4351,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n13 Is Table Level Stats: true Stage: Stage-4 @@ -4376,17 +4376,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -4407,11 +4407,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 @@ -4429,18 +4429,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n13 + name: default.outputtbl1_n13 Truncated Path -> Alias: #### A masked pattern was here #### @@ -4466,17 +4466,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -4497,11 +4497,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 @@ -4519,18 +4519,18 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 10 rawDataSize 30 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 40 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - name: default.outputtbl1 + name: default.outputtbl1_n13 + name: default.outputtbl1_n13 Truncated Path -> Alias: #### A masked pattern was here #### @@ -4540,33 +4540,33 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 SELECT * FROM ( -SELECT key, count(1) as cnt FROM T1 GROUP BY key +SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key UNION ALL -SELECT cast(key + key as string) as key, count(1) as cnt FROM T1 GROUP BY key + key +SELECT cast(key + key as string) as key, count(1) as cnt FROM T1_n35 GROUP BY key + key ) subq1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: Input: default@t1_n35 +PREHOOK: Output: default@outputtbl1_n13 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 SELECT * FROM ( -SELECT key, count(1) as cnt FROM T1 GROUP BY key +SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key UNION ALL -SELECT cast(key + key as string) as key, count(1) as cnt FROM T1 GROUP BY key + key +SELECT cast(key + key as string) as key, count(1) as cnt FROM T1_n35 GROUP BY key + key ) subq1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n35 +POSTHOOK: Output: default@outputtbl1_n13 +POSTHOOK: Lineage: outputtbl1_n13.cnt EXPRESSION [(t1_n35)t1_n35.null, ] +POSTHOOK: Lineage: outputtbl1_n13.key EXPRESSION [(t1_n35)t1_n35.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### 1 1 14 1 @@ -4579,19 +4579,19 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq2 ON subq1.key = subq2.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n13 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq2 ON subq1.key = subq2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -4605,7 +4605,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -4628,7 +4628,7 @@ STAGE PLANS: value expressions: _col1 (type: bigint) auto parallelism: false TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -4655,7 +4655,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n35 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -4669,11 +4669,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -4693,20 +4693,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n35 + name: default.t1_n35 Truncated Path -> Alias: - /t1 [$hdt$_0:t1, $hdt$_1:t1] + /t1_n35 [$hdt$_0:t1_n35, $hdt$_1:t1_n35] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -4740,17 +4740,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 10 rawDataSize 32 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 42 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -4799,17 +4799,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 10 rawDataSize 32 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 42 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 Stage: Stage-2 Stats Work @@ -4818,7 +4818,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n13 Is Table Level Stats: true Stage: Stage-3 @@ -4891,33 +4891,33 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq2 ON subq1.key = subq2.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: Input: default@t1_n35 +PREHOOK: Output: default@outputtbl1_n13 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 SELECT subq1.key, subq1.cnt+subq2.cnt FROM -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1 JOIN -(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 +(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq2 ON subq1.key = subq2.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t1)t1.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t1_n35 +POSTHOOK: Output: default@outputtbl1_n13 +POSTHOOK: Lineage: outputtbl1_n13.cnt EXPRESSION [(t1_n35)t1_n35.null, ] +POSTHOOK: Lineage: outputtbl1_n13.key EXPRESSION [(t1_n35)t1_n35.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### 1 2 2 2 @@ -4926,16 +4926,16 @@ POSTHOOK: Input: default@outputtbl1 8 4 PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM -(SELECT key, count(1) FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) FROM T1_n35 GROUP BY key) subq1 JOIN -(SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 +(SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val) subq2 ON subq1.key = subq2.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM -(SELECT key, count(1) FROM T1 GROUP BY key) subq1 +(SELECT key, count(1) FROM T1_n35 GROUP BY key) subq1 JOIN -(SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 +(SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val) subq2 ON subq1.key = subq2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -4949,7 +4949,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -4977,7 +4977,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n35 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -4991,11 +4991,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -5015,20 +5015,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n35 + name: default.t1_n35 Truncated Path -> Alias: - /t1 [$hdt$_1:t1] + /t1_n35 [$hdt$_1:t1_n35] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -5129,7 +5129,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n35 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -5189,7 +5189,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe #### A masked pattern was here #### Partition - base file name: t1 + base file name: t1_n35 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -5203,11 +5203,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -5227,20 +5227,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t1 + name default.t1_n35 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t1 { string key, string val} + serialization.ddl struct t1_n35 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t1 - name: default.t1 + name: default.t1_n35 + name: default.t1_n35 Truncated Path -> Alias: - /t1 [$hdt$_0:t1] + /t1_n35 [$hdt$_0:t1_n35] #### A masked pattern was here #### Needs Tagging: true Reduce Operator Tree: @@ -5281,33 +5281,33 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T2_n22(key STRING, val STRING) CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n22 +POSTHOOK: query: CREATE TABLE T2_n22(key STRING, val STRING) CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: INSERT OVERWRITE TABLE T2 select key, val from T1 +POSTHOOK: Output: default@T2_n22 +PREHOOK: query: INSERT OVERWRITE TABLE T2_n22 select key, val from T1_n35 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: INSERT OVERWRITE TABLE T2 select key, val from T1 +PREHOOK: Input: default@t1_n35 +PREHOOK: Output: default@t2_n22 +POSTHOOK: query: INSERT OVERWRITE TABLE T2_n22 select key, val from T1_n35 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Input: default@t1_n35 +POSTHOOK: Output: default@t2_n22 +POSTHOOK: Lineage: t2_n22.key SIMPLE [(t1_n35)t1_n35.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n22.val SIMPLE [(t1_n35)t1_n35.FieldSchema(name:val, type:string, comment:null), ] PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM T2_n22 GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM T2_n22 GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -5321,7 +5321,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n22 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -5350,7 +5350,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t2 + base file name: t2_n22 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -5364,11 +5364,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n22 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n22 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -5388,20 +5388,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n22 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n22 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 + name: default.t2_n22 + name: default.t2_n22 Truncated Path -> Alias: - /t2 [t2] + /t2_n22 [t2_n22] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -5502,17 +5502,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -5561,17 +5561,17 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.outputtbl1 + name default.outputtbl1_n13 numFiles 1 numRows 5 rawDataSize 15 - serialization.ddl struct outputtbl1 { i32 key, i32 cnt} + serialization.ddl struct outputtbl1_n13 { i32 key, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 20 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n13 Stage: Stage-3 Stats Work @@ -5580,7 +5580,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n13 Is Table Level Stats: true Stage: Stage-4 @@ -5653,25 +5653,25 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM T2_n22 GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T2 GROUP BY key +PREHOOK: Input: default@t2_n22 +PREHOOK: Output: default@outputtbl1_n13 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n13 +SELECT key, count(1) FROM T2_n22 GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: Input: default@t2_n22 +POSTHOOK: Output: default@outputtbl1_n13 +POSTHOOK: Lineage: outputtbl1_n13.cnt EXPRESSION [(t2_n22)t2_n22.null, ] +POSTHOOK: Lineage: outputtbl1_n13.key EXPRESSION [(t2_n22)t2_n22.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl1 +POSTHOOK: query: SELECT * FROM outputTbl1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n13 #### A masked pattern was here #### 1 1 2 1 @@ -5679,12 +5679,12 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val +INSERT OVERWRITE TABLE outputTbl4_n1 +SELECT key, 1, val, count(1) FROM T2_n22 GROUP BY key, 1, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val +INSERT OVERWRITE TABLE outputTbl4_n1 +SELECT key, 1, val, count(1) FROM T2_n22 GROUP BY key, 1, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -5701,7 +5701,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n22 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -5737,17 +5737,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n1 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -5772,7 +5772,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t2 + base file name: t2_n22 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -5786,11 +5786,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n22 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n22 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -5810,20 +5810,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n22 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n22 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 + name: default.t2_n22 + name: default.t2_n22 Truncated Path -> Alias: - /t2 [t2] + /t2_n22 [t2_n22] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -5880,17 +5880,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n1 Stage: Stage-2 Stats Work @@ -5899,7 +5899,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, key3, cnt Column Types: int, int, string, int - Table: default.outputtbl4 + Table: default.outputtbl4_n1 Is Table Level Stats: true Stage: Stage-3 @@ -5924,17 +5924,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n1 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -5955,11 +5955,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 @@ -5977,18 +5977,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 + name: default.outputtbl4_n1 + name: default.outputtbl4_n1 Truncated Path -> Alias: #### A masked pattern was here #### @@ -6014,17 +6014,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n1 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -6045,11 +6045,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 @@ -6067,18 +6067,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 + name: default.outputtbl4_n1 + name: default.outputtbl4_n1 Truncated Path -> Alias: #### A masked pattern was here #### @@ -6088,27 +6088,27 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n1 +SELECT key, 1, val, count(1) FROM T2_n22 GROUP BY key, 1, val PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 -SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val +PREHOOK: Input: default@t2_n22 +PREHOOK: Output: default@outputtbl4_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n1 +SELECT key, 1, val, count(1) FROM T2_n22 GROUP BY key, 1, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: Input: default@t2_n22 +POSTHOOK: Output: default@outputtbl4_n1 +POSTHOOK: Lineage: outputtbl4_n1.cnt EXPRESSION [(t2_n22)t2_n22.null, ] +POSTHOOK: Lineage: outputtbl4_n1.key1 EXPRESSION [(t2_n22)t2_n22.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl4_n1.key2 SIMPLE [] +POSTHOOK: Lineage: outputtbl4_n1.key3 SIMPLE [(t2_n22)t2_n22.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl4_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 +PREHOOK: Input: default@outputtbl4_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: query: SELECT * FROM outputTbl4_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 +POSTHOOK: Input: default@outputtbl4_n1 #### A masked pattern was here #### 1 1 11 1 2 1 12 1 @@ -6116,21 +6116,21 @@ POSTHOOK: Input: default@outputtbl4 7 1 17 1 8 1 18 1 8 1 28 1 -PREHOOK: query: CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int) +PREHOOK: query: CREATE TABLE outputTbl5_n1(key1 int, key2 int, key3 string, key4 int, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl5 -POSTHOOK: query: CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int) +PREHOOK: Output: default@outputTbl5_n1 +POSTHOOK: query: CREATE TABLE outputTbl5_n1(key1 int, key2 int, key3 string, key4 int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl5 +POSTHOOK: Output: default@outputTbl5_n1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 +INSERT OVERWRITE TABLE outputTbl5_n1 +SELECT key, 1, val, 2, count(1) FROM T2_n22 GROUP BY key, 1, val, 2 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 +INSERT OVERWRITE TABLE outputTbl5_n1 +SELECT key, 1, val, 2, count(1) FROM T2_n22 GROUP BY key, 1, val, 2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -6147,7 +6147,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n22 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -6183,17 +6183,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n1 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 + name: default.outputtbl5_n1 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -6218,7 +6218,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t2 + base file name: t2_n22 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -6232,11 +6232,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n22 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n22 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -6256,20 +6256,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n22 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n22 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 + name: default.t2_n22 + name: default.t2_n22 Truncated Path -> Alias: - /t2 [t2] + /t2_n22 [t2_n22] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -6326,17 +6326,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n1 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 + name: default.outputtbl5_n1 Stage: Stage-2 Stats Work @@ -6345,7 +6345,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, key3, key4, cnt Column Types: int, int, string, int, int - Table: default.outputtbl5 + Table: default.outputtbl5_n1 Is Table Level Stats: true Stage: Stage-3 @@ -6370,17 +6370,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n1 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 + name: default.outputtbl5_n1 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -6401,11 +6401,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n1 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -6423,18 +6423,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n1 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - name: default.outputtbl5 + name: default.outputtbl5_n1 + name: default.outputtbl5_n1 Truncated Path -> Alias: #### A masked pattern was here #### @@ -6460,17 +6460,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n1 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 + name: default.outputtbl5_n1 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -6491,11 +6491,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n1 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -6513,18 +6513,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int:int #### A masked pattern was here #### - name default.outputtbl5 + name default.outputtbl5_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.ddl struct outputtbl5_n1 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl5 - name: default.outputtbl5 + name: default.outputtbl5_n1 + name: default.outputtbl5_n1 Truncated Path -> Alias: #### A masked pattern was here #### @@ -6534,30 +6534,30 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl5_n1 +SELECT key, 1, val, 2, count(1) FROM T2_n22 GROUP BY key, 1, val, 2 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl5 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl5 -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 +PREHOOK: Input: default@t2_n22 +PREHOOK: Output: default@outputtbl5_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl5_n1 +SELECT key, 1, val, 2, count(1) FROM T2_n22 GROUP BY key, 1, val, 2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl5 -POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl5.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl5.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl5.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl5.key4 SIMPLE [] -PREHOOK: query: SELECT * FROM outputTbl5 +POSTHOOK: Input: default@t2_n22 +POSTHOOK: Output: default@outputtbl5_n1 +POSTHOOK: Lineage: outputtbl5_n1.cnt EXPRESSION [(t2_n22)t2_n22.null, ] +POSTHOOK: Lineage: outputtbl5_n1.key1 EXPRESSION [(t2_n22)t2_n22.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl5_n1.key2 SIMPLE [] +POSTHOOK: Lineage: outputtbl5_n1.key3 SIMPLE [(t2_n22)t2_n22.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl5_n1.key4 SIMPLE [] +PREHOOK: query: SELECT * FROM outputTbl5_n1 ORDER BY key1, key2, key3, key4 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl5 +PREHOOK: Input: default@outputtbl5_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl5 +POSTHOOK: query: SELECT * FROM outputTbl5_n1 ORDER BY key1, key2, key3, key4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl5 +POSTHOOK: Input: default@outputtbl5_n1 #### A masked pattern was here #### 1 1 11 2 1 2 1 12 2 1 @@ -6566,15 +6566,15 @@ POSTHOOK: Input: default@outputtbl5 8 1 18 2 1 8 1 28 2 1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n1 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n22)subq group by key, constant, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n1 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n22)subq group by key, constant, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -6592,7 +6592,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n22 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -6628,17 +6628,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n1 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -6663,7 +6663,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t2 + base file name: t2_n22 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -6677,11 +6677,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n22 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n22 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -6701,20 +6701,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n22 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n22 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 + name: default.t2_n22 + name: default.t2_n22 Truncated Path -> Alias: - /t2 [t2] + /t2_n22 [t2_n22] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -6771,17 +6771,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n1 Stage: Stage-2 Stats Work @@ -6790,7 +6790,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, key3, cnt Column Types: int, int, string, int - Table: default.outputtbl4 + Table: default.outputtbl4_n1 Is Table Level Stats: true Stage: Stage-3 @@ -6815,17 +6815,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n1 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -6846,11 +6846,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 @@ -6868,18 +6868,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 + name: default.outputtbl4_n1 + name: default.outputtbl4_n1 Truncated Path -> Alias: #### A masked pattern was here #### @@ -6905,17 +6905,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n1 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -6936,11 +6936,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 @@ -6958,18 +6958,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 + name: default.outputtbl4_n1 + name: default.outputtbl4_n1 Truncated Path -> Alias: #### A masked pattern was here #### @@ -6979,31 +6979,31 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n1 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n22)subq group by key, constant, val PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 +PREHOOK: Input: default@t2_n22 +PREHOOK: Output: default@outputtbl4_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n1 SELECT key, constant, val, count(1) from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n22)subq group by key, constant, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: Input: default@t2_n22 +POSTHOOK: Output: default@outputtbl4_n1 +POSTHOOK: Lineage: outputtbl4_n1.cnt EXPRESSION [(t2_n22)t2_n22.null, ] +POSTHOOK: Lineage: outputtbl4_n1.key1 EXPRESSION [(t2_n22)t2_n22.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl4_n1.key2 SIMPLE [] +POSTHOOK: Lineage: outputtbl4_n1.key3 SIMPLE [(t2_n22)t2_n22.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl4_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 +PREHOOK: Input: default@outputtbl4_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: query: SELECT * FROM outputTbl4_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 +POSTHOOK: Input: default@outputtbl4_n1 #### A masked pattern was here #### 1 1 11 1 2 1 12 1 @@ -7012,20 +7012,20 @@ POSTHOOK: Input: default@outputtbl4 8 1 18 1 8 1 28 1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n1 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n22)subq )subq2 group by key, constant3, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE outputTbl4 +INSERT OVERWRITE TABLE outputTbl4_n1 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n22)subq )subq2 group by key, constant3, val POSTHOOK: type: QUERY @@ -7044,7 +7044,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n22 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -7080,17 +7080,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n1 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -7115,7 +7115,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: t2 + base file name: t2_n22 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -7129,11 +7129,11 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n22 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n22 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 @@ -7153,20 +7153,20 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.t2 + name default.t2_n22 numFiles 2 numRows 6 rawDataSize 24 - serialization.ddl struct t2 { string key, string val} + serialization.ddl struct t2_n22 { string key, string val} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 30 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 - name: default.t2 + name: default.t2_n22 + name: default.t2_n22 Truncated Path -> Alias: - /t2 [t2] + /t2_n22 [t2_n22] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -7223,17 +7223,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n1 Stage: Stage-2 Stats Work @@ -7242,7 +7242,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, key2, key3, cnt Column Types: int, int, string, int - Table: default.outputtbl4 + Table: default.outputtbl4_n1 Is Table Level Stats: true Stage: Stage-3 @@ -7267,17 +7267,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n1 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -7298,11 +7298,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 @@ -7320,18 +7320,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 + name: default.outputtbl4_n1 + name: default.outputtbl4_n1 Truncated Path -> Alias: #### A masked pattern was here #### @@ -7357,17 +7357,17 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 + name: default.outputtbl4_n1 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -7388,11 +7388,11 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 @@ -7410,18 +7410,18 @@ STAGE PLANS: columns.comments columns.types int:int:string:int #### A masked pattern was here #### - name default.outputtbl4 + name default.outputtbl4_n1 numFiles 1 numRows 6 rawDataSize 48 - serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.ddl struct outputtbl4_n1 { i32 key1, i32 key2, string key3, i32 cnt} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 54 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl4 - name: default.outputtbl4 + name: default.outputtbl4_n1 + name: default.outputtbl4_n1 Truncated Path -> Alias: #### A masked pattern was here #### @@ -7431,37 +7431,37 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n1 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n22)subq )subq2 group by key, constant3, val PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@outputtbl4 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4 +PREHOOK: Input: default@t2_n22 +PREHOOK: Output: default@outputtbl4_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl4_n1 select key, constant3, val, count(1) from ( SELECT key, constant as constant2, val, 2 as constant3 from -(SELECT key, 1 as constant, val from T2)subq +(SELECT key, 1 as constant, val from T2_n22)subq )subq2 group by key, constant3, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@outputtbl4 -POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl4.key2 SIMPLE [] -POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: Input: default@t2_n22 +POSTHOOK: Output: default@outputtbl4_n1 +POSTHOOK: Lineage: outputtbl4_n1.cnt EXPRESSION [(t2_n22)t2_n22.null, ] +POSTHOOK: Lineage: outputtbl4_n1.key1 EXPRESSION [(t2_n22)t2_n22.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl4_n1.key2 SIMPLE [] +POSTHOOK: Lineage: outputtbl4_n1.key3 SIMPLE [(t2_n22)t2_n22.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM outputTbl4_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl4 +PREHOOK: Input: default@outputtbl4_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM outputTbl4 +POSTHOOK: query: SELECT * FROM outputTbl4_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl4 +POSTHOOK: Input: default@outputtbl4_n1 #### A masked pattern was here #### 1 2 11 1 2 2 12 1 @@ -7469,31 +7469,31 @@ POSTHOOK: Input: default@outputtbl4 7 2 17 1 8 2 18 1 8 2 28 1 -PREHOOK: query: CREATE TABLE DEST1(key INT, cnt INT) +PREHOOK: query: CREATE TABLE DEST1_n6(key INT, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key INT, cnt INT) +PREHOOK: Output: default@DEST1_n6 +POSTHOOK: query: CREATE TABLE DEST1_n6(key INT, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key INT, val STRING, cnt INT) +POSTHOOK: Output: default@DEST1_n6 +PREHOOK: query: CREATE TABLE DEST2_n5(key INT, val STRING, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key INT, val STRING, cnt INT) +PREHOOK: Output: default@DEST2_n5 +POSTHOOK: query: CREATE TABLE DEST2_n5(key INT, val STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n5 PREHOOK: query: EXPLAIN -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +FROM T2_n22 +INSERT OVERWRITE TABLE DEST1_n6 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n5 SELECT key, val, count(1) GROUP BY key, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +FROM T2_n22 +INSERT OVERWRITE TABLE DEST1_n6 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n5 SELECT key, val, count(1) GROUP BY key, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -7510,7 +7510,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n22 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -7550,7 +7550,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n5 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int) outputColumnNames: key, val, cnt @@ -7609,7 +7609,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n6 Select Operator expressions: _col0 (type: int), _col1 (type: int) outputColumnNames: key, cnt @@ -7634,7 +7634,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n6 Stage: Stage-4 Stats Work @@ -7642,7 +7642,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.dest1 + Table: default.dest1_n6 Stage: Stage-5 Map Reduce @@ -7673,7 +7673,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val, cnt Column Types: int, string, int - Table: default.dest2 + Table: default.dest2_n5 Stage: Stage-1 Move Operator @@ -7683,7 +7683,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n5 Stage: Stage-7 Map Reduce @@ -7708,45 +7708,45 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +PREHOOK: query: FROM T2_n22 +INSERT OVERWRITE TABLE DEST1_n6 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n5 SELECT key, val, count(1) GROUP BY key, val PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 -POSTHOOK: query: FROM T2 -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +PREHOOK: Input: default@t2_n22 +PREHOOK: Output: default@dest1_n6 +PREHOOK: Output: default@dest2_n5 +POSTHOOK: query: FROM T2_n22 +INSERT OVERWRITE TABLE DEST1_n6 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n5 SELECT key, val, count(1) GROUP BY key, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.val SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: select * from DEST1 +POSTHOOK: Input: default@t2_n22 +POSTHOOK: Output: default@dest1_n6 +POSTHOOK: Output: default@dest2_n5 +POSTHOOK: Lineage: dest1_n6.cnt EXPRESSION [(t2_n22)t2_n22.null, ] +POSTHOOK: Lineage: dest1_n6.key EXPRESSION [(t2_n22)t2_n22.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest2_n5.cnt EXPRESSION [(t2_n22)t2_n22.null, ] +POSTHOOK: Lineage: dest2_n5.key EXPRESSION [(t2_n22)t2_n22.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest2_n5.val SIMPLE [(t2_n22)t2_n22.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: select * from DEST1_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n6 #### A masked pattern was here #### -POSTHOOK: query: select * from DEST1 +POSTHOOK: query: select * from DEST1_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n6 #### A masked pattern was here #### 1 1 2 1 3 1 7 1 8 2 -PREHOOK: query: select * from DEST2 +PREHOOK: query: select * from DEST2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from DEST2 +POSTHOOK: query: select * from DEST2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n5 #### A masked pattern was here #### 1 11 1 2 12 1 @@ -7755,14 +7755,14 @@ POSTHOOK: Input: default@dest2 8 18 1 8 28 1 PREHOOK: query: EXPLAIN -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +FROM (select key, val from T2_n22 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n6 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n5 SELECT key, val, count(1) GROUP BY key, val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +FROM (select key, val from T2_n22 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n6 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n5 SELECT key, val, count(1) GROUP BY key, val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -7779,7 +7779,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n22 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(key) = 8.0D) (type: boolean) @@ -7822,7 +7822,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n5 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int) outputColumnNames: key, val, cnt @@ -7881,7 +7881,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n6 Select Operator expressions: _col0 (type: int), _col1 (type: int) outputColumnNames: key, cnt @@ -7906,7 +7906,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n6 Stage: Stage-4 Stats Work @@ -7914,7 +7914,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.dest1 + Table: default.dest1_n6 Stage: Stage-5 Map Reduce @@ -7945,7 +7945,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val, cnt Column Types: int, string, int - Table: default.dest2 + Table: default.dest2_n5 Stage: Stage-1 Move Operator @@ -7955,7 +7955,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n5 Stage: Stage-7 Map Reduce @@ -7980,41 +7980,41 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +PREHOOK: query: FROM (select key, val from T2_n22 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n6 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n5 SELECT key, val, count(1) GROUP BY key, val PREHOOK: type: QUERY -PREHOOK: Input: default@t2 -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 -POSTHOOK: query: FROM (select key, val from T2 where key = 8) x -INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key -INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val +PREHOOK: Input: default@t2_n22 +PREHOOK: Output: default@dest1_n6 +PREHOOK: Output: default@dest2_n5 +POSTHOOK: query: FROM (select key, val from T2_n22 where key = 8) x +INSERT OVERWRITE TABLE DEST1_n6 SELECT key, count(1) GROUP BY key +INSERT OVERWRITE TABLE DEST2_n5 SELECT key, val, count(1) GROUP BY key, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.cnt EXPRESSION [(t2)t2.null, ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.val SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: select * from DEST1 +POSTHOOK: Input: default@t2_n22 +POSTHOOK: Output: default@dest1_n6 +POSTHOOK: Output: default@dest2_n5 +POSTHOOK: Lineage: dest1_n6.cnt EXPRESSION [(t2_n22)t2_n22.null, ] +POSTHOOK: Lineage: dest1_n6.key EXPRESSION [(t2_n22)t2_n22.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest2_n5.cnt EXPRESSION [(t2_n22)t2_n22.null, ] +POSTHOOK: Lineage: dest2_n5.key EXPRESSION [(t2_n22)t2_n22.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest2_n5.val SIMPLE [(t2_n22)t2_n22.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: select * from DEST1_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n6 #### A masked pattern was here #### -POSTHOOK: query: select * from DEST1 +POSTHOOK: query: select * from DEST1_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n6 #### A masked pattern was here #### 8 2 -PREHOOK: query: select * from DEST2 +PREHOOK: query: select * from DEST2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from DEST2 +POSTHOOK: query: select * from DEST2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n5 #### A masked pattern was here #### 8 18 1 8 28 1 diff --git a/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out b/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out index 76adf6fa7d..8e65ce4395 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out @@ -1,46 +1,46 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n106(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n106 +POSTHOOK: query: CREATE TABLE T1_n106(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +POSTHOOK: Output: default@T1_n106 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n106 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +PREHOOK: Output: default@t1_n106 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n106 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: Output: default@t1_n106 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n106 select key, val from T1_n106 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: Input: default@t1_n106 +PREHOOK: Output: default@t1_n106 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n106 select key, val from T1_n106 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) +POSTHOOK: Input: default@t1_n106 +POSTHOOK: Output: default@t1_n106 +POSTHOOK: Lineage: t1_n106.key SIMPLE [(t1_n106)t1_n106.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n106.val SIMPLE [(t1_n106)t1_n106.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE outputTbl1_n35(key int, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) +PREHOOK: Output: default@outputTbl1_n35 +POSTHOOK: query: CREATE TABLE outputTbl1_n35(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 +POSTHOOK: Output: default@outputTbl1_n35 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n35 +SELECT key, count(1) FROM T1_n106 GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 -SELECT key, count(1) FROM T1 GROUP BY key +INSERT OVERWRITE TABLE outputTbl1_n35 +SELECT key, count(1) FROM T1_n106 GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -57,7 +57,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n106 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -80,7 +80,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n35 Select Operator expressions: _col0 (type: int), _col1 (type: int) outputColumnNames: key, cnt @@ -125,7 +125,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n35 Stage: Stage-2 Stats Work @@ -133,7 +133,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.outputtbl1 + Table: default.outputtbl1_n35 Stage: Stage-3 Map Reduce @@ -145,7 +145,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n35 Stage: Stage-5 Map Reduce @@ -157,7 +157,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n35 Stage: Stage-6 Move Operator diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort.q.out index fee672b3e3..cd1489d86f 100644 --- a/ql/src/test/results/clientpositive/infer_bucket_sort.q.out +++ b/ql/src/test/results/clientpositive/infer_bucket_sort.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE test_table_n5 (key STRING, value STRING) PARTITIONED BY (part STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table -POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@test_table_n5 +POSTHOOK: query: CREATE TABLE test_table_n5 (key STRING, value STRING) PARTITIONED BY (part STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5 +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value EXPRESSION [(src)src.null, ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value EXPRESSION [(src)src.null, ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -35,7 +35,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -55,24 +55,24 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key, value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value EXPRESSION [(src)src.null, ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value EXPRESSION [(src)src.null, ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -84,7 +84,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -104,24 +104,24 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)a.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SIMPLE [(src)a.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -133,7 +133,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -153,24 +153,24 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key AND a.value = b.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key AND a.value = b.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)a.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SIMPLE [(src)a.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -182,7 +182,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -202,24 +202,24 @@ Bucket Columns: [key, value] Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, '1' FROM src a JOIN src b ON a.key = b.key AND a.value = b.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, '1' FROM src a JOIN src b ON a.key = b.key AND a.value = b.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SIMPLE [] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -231,7 +231,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -251,24 +251,24 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.key = c.key) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.key = c.key) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)c.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SIMPLE [(src)c.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -280,7 +280,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -300,24 +300,24 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.value = c.value) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.value = c.value) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)c.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SIMPLE [(src)c.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -329,7 +329,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -349,24 +349,24 @@ Bucket Columns: [value] Sort Columns: [Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src DISTRIBUTE BY key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src DISTRIBUTE BY key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -378,7 +378,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -398,24 +398,24 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src SORT BY key ASC PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src SORT BY key ASC POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -427,7 +427,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -447,24 +447,24 @@ Bucket Columns: [] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src SORT BY key DESC PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src SORT BY key DESC POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -476,7 +476,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -496,24 +496,24 @@ Bucket Columns: [] Sort Columns: [Order(col:key, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src CLUSTER BY key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src CLUSTER BY key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -525,7 +525,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -545,24 +545,24 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src DISTRIBUTE BY key SORT BY value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM src DISTRIBUTE BY key SORT BY value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -574,7 +574,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -594,24 +594,24 @@ Bucket Columns: [key] Sort Columns: [Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value from (SELECT a.key, b.value FROM src a JOIN src b ON (a.key = b.key)) subq PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value from (SELECT a.key, b.value FROM src a JOIN src b ON (a.key = b.key)) subq POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -623,7 +623,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -643,24 +643,24 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT k, value FROM (SELECT a.key as k, b.value FROM src a JOIN src b ON (a.key = b.key)) subq PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT k, value FROM (SELECT a.key as k, b.value FROM src a JOIN src b ON (a.key = b.key)) subq POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -672,7 +672,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -692,24 +692,24 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, cnt from (SELECT key, count(*) as cnt FROM src GROUP BY key) subq PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, cnt from (SELECT key, count(*) as cnt FROM src GROUP BY key) subq POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value EXPRESSION [(src)src.null, ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value EXPRESSION [(src)src.null, ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -721,7 +721,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -741,24 +741,24 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT k, cnt FROM (SELECT key as k, count(*) as cnt FROM src GROUP BY key) subq PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT k, cnt FROM (SELECT key as k, count(*) as cnt FROM src GROUP BY key) subq POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value EXPRESSION [(src)src.null, ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value EXPRESSION [(src)src.null, ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -770,7 +770,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -790,24 +790,24 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) AS value FROM src group by key) a where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) AS value FROM src group by key) a where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value EXPRESSION [(src)src.null, ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value EXPRESSION [(src)src.null, ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -819,7 +819,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -839,24 +839,24 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value + 1 FROM (SELECT key, count(1) AS value FROM src group by key) a where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value + 1 FROM (SELECT key, count(1) AS value FROM src group by key) a where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value EXPRESSION [(src)src.null, ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value EXPRESSION [(src)src.null, ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -868,7 +868,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -888,24 +888,24 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT key FROM src group by key) a lateral view explode(array(1, 2)) value as value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT key FROM src group by key) a lateral view explode(array(1, 2)) value as value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SCRIPT [] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SCRIPT [] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -917,7 +917,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -937,24 +937,24 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT count(1), value FROM (SELECT key, count(1) as value FROM src group by key) a group by value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT count(1), value FROM (SELECT key, count(1) as value FROM src group by key) a group by value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value EXPRESSION [(src)src.null, ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value EXPRESSION [(src)src.null, ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -966,7 +966,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -986,24 +986,24 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT value, key FROM (SELECT key, count(1) as value FROM src group by key) a PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT value, key FROM (SELECT key, count(1) as value FROM src group by key) a POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -1015,7 +1015,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -1035,24 +1035,24 @@ Bucket Columns: [value] Sort Columns: [Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) as value FROM src group by key) a distribute by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) as value FROM src group by key) a distribute by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value EXPRESSION [(src)src.null, ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value EXPRESSION [(src)src.null, ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -1064,7 +1064,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -1084,24 +1084,24 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) as value FROM src group by key) a sort by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) as value FROM src group by key) a sort by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value EXPRESSION [(src)src.null, ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value EXPRESSION [(src)src.null, ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -1113,7 +1113,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -1133,24 +1133,24 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT TRANSFORM (a.key, a.value) USING 'cat' AS (key, value) FROM (SELECT key, count(1) AS value FROM src GROUP BY KEY) a PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT TRANSFORM (a.key, a.value) USING 'cat' AS (key, value) FROM (SELECT key, count(1) AS value FROM src GROUP BY KEY) a POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.null, ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.null, ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.null, ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.null, ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -1162,7 +1162,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -1182,24 +1182,24 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT concat(key, "a") AS key, value, count(*) FROM src GROUP BY concat(key, "a"), value) a PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') SELECT key, value FROM (SELECT concat(key, "a") AS key, value, count(*) FROM src GROUP BY concat(key, "a"), value) a POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n5@part=1 +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n5 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n5 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n5 # col_name data_type comment key string value string @@ -1211,7 +1211,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n5 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out index 09ab2d9fd2..5cc876eeaf 100644 --- a/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out +++ b/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE test_table_n11 (key STRING, value STRING) PARTITIONED BY (part STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table -POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@test_table_n11 +POSTHOOK: query: CREATE TABLE test_table_n11 (key STRING, value STRING) PARTITIONED BY (part STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n11 +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n11 PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: Output: default@test_table_n11@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n11 PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n11@part=1 +POSTHOOK: Lineage: test_table_n11 PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n11 PARTITION(part=1).value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n11 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n11 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n11 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n11 # col_name data_type comment key string value string @@ -35,7 +35,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n11 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -55,27 +55,27 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_n11 PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table@part=1 +PREHOOK: Output: default@test_table_n11@part=1 Hive Runtime Error: Map local work exhausted memory FAILED: Execution Error, return code 3 from org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask -POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_n11 PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table@part=1 -POSTHOOK: Lineage: test_table PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table PARTITION(part=1).value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +POSTHOOK: Output: default@test_table_n11@part=1 +POSTHOOK: Lineage: test_table_n11 PARTITION(part=1).key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table_n11 PARTITION(part=1).value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESCRIBE FORMATTED test_table_n11 PARTITION (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_table -POSTHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '1') +PREHOOK: Input: default@test_table_n11 +POSTHOOK: query: DESCRIBE FORMATTED test_table_n11 PARTITION (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n11 # col_name data_type comment key string value string @@ -87,7 +87,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_table +Table: test_table_n11 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} diff --git a/ql/src/test/results/clientpositive/infer_join_preds.q.out b/ql/src/test/results/clientpositive/infer_join_preds.q.out index a35faf31eb..03c91da969 100644 --- a/ql/src/test/results/clientpositive/infer_join_preds.q.out +++ b/ql/src/test/results/clientpositive/infer_join_preds.q.out @@ -674,7 +674,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: CREATE TABLE `table1`( +PREHOOK: query: CREATE TABLE `table1_n7`( `idp_warehouse_id` bigint, `idp_audit_id` bigint, `idp_effective_date` date, @@ -701,8 +701,8 @@ PREHOOK: query: CREATE TABLE `table1`( `practsum` decimal(38,20)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table1 -POSTHOOK: query: CREATE TABLE `table1`( +PREHOOK: Output: default@table1_n7 +POSTHOOK: query: CREATE TABLE `table1_n7`( `idp_warehouse_id` bigint, `idp_audit_id` bigint, `idp_effective_date` date, @@ -729,8 +729,8 @@ POSTHOOK: query: CREATE TABLE `table1`( `practsum` decimal(38,20)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table1 -PREHOOK: query: CREATE TABLE `table2`( +POSTHOOK: Output: default@table1_n7 +PREHOOK: query: CREATE TABLE `table2_n4`( `idp_warehouse_id` bigint, `idp_audit_id` bigint, `idp_effective_date` date, @@ -757,8 +757,8 @@ PREHOOK: query: CREATE TABLE `table2`( `practsum` decimal(38,20)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table2 -POSTHOOK: query: CREATE TABLE `table2`( +PREHOOK: Output: default@table2_n4 +POSTHOOK: query: CREATE TABLE `table2_n4`( `idp_warehouse_id` bigint, `idp_audit_id` bigint, `idp_effective_date` date, @@ -785,16 +785,16 @@ POSTHOOK: query: CREATE TABLE `table2`( `practsum` decimal(38,20)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table2 +POSTHOOK: Output: default@table2_n4 PREHOOK: query: explain SELECT s.idp_warehouse_id AS source_warehouse_id - FROM table1 s + FROM table1_n7 s JOIN - table2 d + table2_n4 d ON ( s.prid = d.prid ) JOIN - table2 e + table2_n4 e ON s.prid = e.prid WHERE @@ -949,14 +949,14 @@ PREHOOK: query: explain SELECT s.idp_warehouse_id AS source_warehouse_i END ) PREHOOK: type: QUERY POSTHOOK: query: explain SELECT s.idp_warehouse_id AS source_warehouse_id - FROM table1 s + FROM table1_n7 s JOIN - table2 d + table2_n4 d ON ( s.prid = d.prid ) JOIN - table2 e + table2_n4 e ON s.prid = e.prid WHERE @@ -1185,19 +1185,19 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: drop table table2 +PREHOOK: query: drop table table2_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@table2 -PREHOOK: Output: default@table2 -POSTHOOK: query: drop table table2 +PREHOOK: Input: default@table2_n4 +PREHOOK: Output: default@table2_n4 +POSTHOOK: query: drop table table2_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@table2 -POSTHOOK: Output: default@table2 -PREHOOK: query: drop table table1 +POSTHOOK: Input: default@table2_n4 +POSTHOOK: Output: default@table2_n4 +PREHOOK: query: drop table table1_n7 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@table1 -PREHOOK: Output: default@table1 -POSTHOOK: query: drop table table1 +PREHOOK: Input: default@table1_n7 +PREHOOK: Output: default@table1_n7 +POSTHOOK: query: drop table table1_n7 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@table1 -POSTHOOK: Output: default@table1 +POSTHOOK: Input: default@table1_n7 +POSTHOOK: Output: default@table1_n7 diff --git a/ql/src/test/results/clientpositive/innerjoin.q.out b/ql/src/test/results/clientpositive/innerjoin.q.out index 55eadef086..779b0742d3 100644 --- a/ql/src/test/results/clientpositive/innerjoin.q.out +++ b/ql/src/test/results/clientpositive/innerjoin.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n20(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n20 +POSTHOOK: query: CREATE TABLE dest_j1_n20(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n20 PREHOOK: query: EXPLAIN FROM src src1 INNER JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n20 SELECT src1.key, src2.value PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src src1 INNER JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n20 SELECT src1.key, src2.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -75,7 +75,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n20 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -100,7 +100,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n20 Stage: Stage-2 Stats Work @@ -108,7 +108,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest_j1 + Table: default.dest_j1_n20 Stage: Stage-3 Map Reduce @@ -134,24 +134,24 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src src1 INNER JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n20 SELECT src1.key, src2.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest_j1 +PREHOOK: Output: default@dest_j1_n20 POSTHOOK: query: FROM src src1 INNER JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n20 SELECT src1.key, src2.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest_j1.* FROM dest_j1 +POSTHOOK: Output: default@dest_j1_n20 +POSTHOOK: Lineage: dest_j1_n20.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n20.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest_j1_n20.* FROM dest_j1_n20 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n20 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest_j1.* FROM dest_j1 +POSTHOOK: query: SELECT dest_j1_n20.* FROM dest_j1_n20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n20 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -1181,37 +1181,37 @@ POSTHOOK: Input: default@dest_j1 98 val_98 98 val_98 98 val_98 -PREHOOK: query: create table `inner`(i int) +PREHOOK: query: create table `inner`(i_n2 int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inner -POSTHOOK: query: create table `inner`(i int) +POSTHOOK: query: create table `inner`(i_n2 int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inner -PREHOOK: query: select i from `inner` +PREHOOK: query: select i_n2 from `inner` PREHOOK: type: QUERY PREHOOK: Input: default@inner #### A masked pattern was here #### -POSTHOOK: query: select i from `inner` +POSTHOOK: query: select i_n2 from `inner` POSTHOOK: type: QUERY POSTHOOK: Input: default@inner #### A masked pattern was here #### -PREHOOK: query: create table i(`inner` int) +PREHOOK: query: create table i_n2(`inner` int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@i -POSTHOOK: query: create table i(`inner` int) +PREHOOK: Output: default@i_n2 +POSTHOOK: query: create table i_n2(`inner` int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@i -PREHOOK: query: select `inner` from i +POSTHOOK: Output: default@i_n2 +PREHOOK: query: select `inner` from i_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@i +PREHOOK: Input: default@i_n2 #### A masked pattern was here #### -POSTHOOK: query: select `inner` from i +POSTHOOK: query: select `inner` from i_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@i +POSTHOOK: Input: default@i_n2 #### A masked pattern was here #### PREHOOK: query: explain select * from (select * from src) `inner` left outer join src on `inner`.key=src.key diff --git a/ql/src/test/results/clientpositive/inoutdriver.q.out b/ql/src/test/results/clientpositive/inoutdriver.q.out index b23a5c2d11..df6ba15557 100644 --- a/ql/src/test/results/clientpositive/inoutdriver.q.out +++ b/ql/src/test/results/clientpositive/inoutdriver.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: create table test (a int) stored as inputformat 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' outputformat 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'RCFileInDriver' outputdriver 'RCFileOutDriver' +PREHOOK: query: create table test_n3 (a int) stored as inputformat 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' outputformat 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'RCFileInDriver' outputdriver 'RCFileOutDriver' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test -POSTHOOK: query: create table test (a int) stored as inputformat 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' outputformat 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'RCFileInDriver' outputdriver 'RCFileOutDriver' +PREHOOK: Output: default@test_n3 +POSTHOOK: query: create table test_n3 (a int) stored as inputformat 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' outputformat 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'RCFileInDriver' outputdriver 'RCFileOutDriver' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test -PREHOOK: query: desc extended test +POSTHOOK: Output: default@test_n3 +PREHOOK: query: desc extended test_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test -POSTHOOK: query: desc extended test +PREHOOK: Input: default@test_n3 +POSTHOOK: query: desc extended test_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n3 a int #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/input11.q.out b/ql/src/test/results/clientpositive/input11.q.out index 1e98764506..839360eb5b 100644 --- a/ql/src/test/results/clientpositive/input11.q.out +++ b/ql/src/test/results/clientpositive/input11.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n127(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n127 +POSTHOOK: query: CREATE TABLE dest1_n127(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n127 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 +INSERT OVERWRITE TABLE dest1_n127 SELECT src.key, src.value WHERE src.key < 100 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 +INSERT OVERWRITE TABLE dest1_n127 SELECT src.key, src.value WHERE src.key < 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -45,7 +45,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n127 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -90,7 +90,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n127 Stage: Stage-2 Stats Work @@ -98,7 +98,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n127 Stage: Stage-3 Map Reduce @@ -110,7 +110,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n127 Stage: Stage-5 Map Reduce @@ -122,7 +122,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n127 Stage: Stage-6 Move Operator @@ -131,24 +131,24 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 +INSERT OVERWRITE TABLE dest1_n127 SELECT src.key, src.value WHERE src.key < 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n127 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 +INSERT OVERWRITE TABLE dest1_n127 SELECT src.key, src.value WHERE src.key < 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n127 +POSTHOOK: Lineage: dest1_n127.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n127.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n127.* FROM dest1_n127 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n127 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n127.* FROM dest1_n127 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n127 #### A masked pattern was here #### 86 val_86 27 val_27 diff --git a/ql/src/test/results/clientpositive/input11_limit.q.out b/ql/src/test/results/clientpositive/input11_limit.q.out index e87f67e71e..537f538de2 100644 --- a/ql/src/test/results/clientpositive/input11_limit.q.out +++ b/ql/src/test/results/clientpositive/input11_limit.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n128(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n128 +POSTHOOK: query: CREATE TABLE dest1_n128(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n128 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 +INSERT OVERWRITE TABLE dest1_n128 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 +INSERT OVERWRITE TABLE dest1_n128 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -61,7 +61,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n128 Stage: Stage-0 Move Operator @@ -71,31 +71,31 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n128 Stage: Stage-2 Stats Work Basic Stats Work: PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 +INSERT OVERWRITE TABLE dest1_n128 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n128 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 +INSERT OVERWRITE TABLE dest1_n128 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n128 +POSTHOOK: Lineage: dest1_n128.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n128.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n128.* FROM dest1_n128 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n128 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n128.* FROM dest1_n128 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n128 #### A masked pattern was here #### 0 val_0 15 val_15 diff --git a/ql/src/test/results/clientpositive/input12.q.out b/ql/src/test/results/clientpositive/input12.q.out index 490e442149..69b3af9d7e 100644 --- a/ql/src/test/results/clientpositive/input12.q.out +++ b/ql/src/test/results/clientpositive/input12.q.out @@ -1,38 +1,38 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n102(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n102 +POSTHOOK: query: CREATE TABLE dest1_n102(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@dest1_n102 +PREHOOK: query: CREATE TABLE dest2_n14(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest2 -POSTHOOK: query: CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest2_n14 +POSTHOOK: query: CREATE TABLE dest2_n14(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest2 -PREHOOK: query: CREATE TABLE dest3(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@dest2_n14 +PREHOOK: query: CREATE TABLE dest3_n4(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest3 -POSTHOOK: query: CREATE TABLE dest3(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest3_n4 +POSTHOOK: query: CREATE TABLE dest3_n4(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest3 +POSTHOOK: Output: default@dest3_n4 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 +INSERT OVERWRITE TABLE dest1_n102 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n14 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest3_n4 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 +INSERT OVERWRITE TABLE dest1_n102 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n14 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest3_n4 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -71,7 +71,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n102 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -99,7 +99,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n14 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -129,7 +129,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest3 + name: default.dest3_n4 Select Operator expressions: _col0 (type: int) outputColumnNames: key @@ -177,7 +177,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n102 Stage: Stage-4 Stats Work @@ -185,7 +185,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n102 Stage: Stage-5 Map Reduce @@ -197,7 +197,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n102 Stage: Stage-7 Map Reduce @@ -209,7 +209,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n102 Stage: Stage-8 Move Operator @@ -225,7 +225,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n14 Stage: Stage-10 Stats Work @@ -233,7 +233,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest2 + Table: default.dest2_n14 Stage: Stage-11 Map Reduce @@ -264,7 +264,7 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: int - Table: default.dest3 + Table: default.dest3_n4 Stage: Stage-2 Move Operator @@ -277,7 +277,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest3 + name: default.dest3_n4 Stage: Stage-13 Map Reduce @@ -310,35 +310,35 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 +INSERT OVERWRITE TABLE dest1_n102 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n14 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest3_n4 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 -PREHOOK: Output: default@dest3@ds=2008-04-08/hr=12 +PREHOOK: Output: default@dest1_n102 +PREHOOK: Output: default@dest2_n14 +PREHOOK: Output: default@dest3_n4@ds=2008-04-08/hr=12 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 +INSERT OVERWRITE TABLE dest1_n102 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n14 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest3_n4 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Output: default@dest3@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest3 PARTITION(ds=2008-04-08,hr=12).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n102 +POSTHOOK: Output: default@dest2_n14 +POSTHOOK: Output: default@dest3_n4@ds=2008-04-08/hr=12 +POSTHOOK: Lineage: dest1_n102.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n102.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n14.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n14.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest3_n4 PARTITION(ds=2008-04-08,hr=12).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n102.* FROM dest1_n102 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n102 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n102.* FROM dest1_n102 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n102 #### A masked pattern was here #### 86 val_86 27 val_27 @@ -424,13 +424,13 @@ POSTHOOK: Input: default@dest1 37 val_37 90 val_90 97 val_97 -PREHOOK: query: SELECT dest2.* FROM dest2 +PREHOOK: query: SELECT dest2_n14.* FROM dest2_n14 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n14 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest2.* FROM dest2 +POSTHOOK: query: SELECT dest2_n14.* FROM dest2_n14 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n14 #### A masked pattern was here #### 165 val_165 193 val_193 @@ -537,15 +537,15 @@ POSTHOOK: Input: default@dest2 194 val_194 126 val_126 169 val_169 -PREHOOK: query: SELECT dest3.* FROM dest3 +PREHOOK: query: SELECT dest3_n4.* FROM dest3_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@dest3 -PREHOOK: Input: default@dest3@ds=2008-04-08/hr=12 +PREHOOK: Input: default@dest3_n4 +PREHOOK: Input: default@dest3_n4@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest3.* FROM dest3 +POSTHOOK: query: SELECT dest3_n4.* FROM dest3_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest3 -POSTHOOK: Input: default@dest3@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@dest3_n4 +POSTHOOK: Input: default@dest3_n4@ds=2008-04-08/hr=12 #### A masked pattern was here #### 238 2008-04-08 12 311 2008-04-08 12 diff --git a/ql/src/test/results/clientpositive/input13.q.out b/ql/src/test/results/clientpositive/input13.q.out index 74cae4f6da..5b067138ac 100644 --- a/ql/src/test/results/clientpositive/input13.q.out +++ b/ql/src/test/results/clientpositive/input13.q.out @@ -1,39 +1,39 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n67(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n67 +POSTHOOK: query: CREATE TABLE dest1_n67(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@dest1_n67 +PREHOOK: query: CREATE TABLE dest2_n8(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest2 -POSTHOOK: query: CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest2_n8 +POSTHOOK: query: CREATE TABLE dest2_n8(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest2 -PREHOOK: query: CREATE TABLE dest3(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@dest2_n8 +PREHOOK: query: CREATE TABLE dest3_n1(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest3 -POSTHOOK: query: CREATE TABLE dest3(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest3_n1 +POSTHOOK: query: CREATE TABLE dest3_n1(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest3 +POSTHOOK: Output: default@dest3_n1 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 +INSERT OVERWRITE TABLE dest1_n67 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n8 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest3_n1 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 +INSERT OVERWRITE TABLE dest1_n67 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n8 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest3_n1 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -74,7 +74,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n67 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -102,7 +102,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n8 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -132,7 +132,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest3 + name: default.dest3_n1 Select Operator expressions: _col0 (type: int) outputColumnNames: key @@ -194,7 +194,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n67 Stage: Stage-5 Stats Work @@ -202,7 +202,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n67 Stage: Stage-6 Map Reduce @@ -214,7 +214,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n67 Stage: Stage-8 Map Reduce @@ -226,7 +226,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n67 Stage: Stage-9 Move Operator @@ -242,7 +242,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n8 Stage: Stage-11 Stats Work @@ -250,7 +250,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest2 + Table: default.dest2_n8 Stage: Stage-12 Map Reduce @@ -281,7 +281,7 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: int - Table: default.dest3 + Table: default.dest3_n1 Stage: Stage-2 Move Operator @@ -294,7 +294,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest3 + name: default.dest3_n1 Stage: Stage-14 Map Reduce @@ -333,39 +333,39 @@ STAGE PLANS: destination: target/warehouse/dest4.out PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 +INSERT OVERWRITE TABLE dest1_n67 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n8 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest3_n1 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 -PREHOOK: Output: default@dest3@ds=2008-04-08/hr=12 +PREHOOK: Output: default@dest1_n67 +PREHOOK: Output: default@dest2_n8 +PREHOOK: Output: default@dest3_n1@ds=2008-04-08/hr=12 PREHOOK: Output: target/warehouse/dest4.out POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 -INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 +INSERT OVERWRITE TABLE dest1_n67 SELECT src.* WHERE src.key < 100 +INSERT OVERWRITE TABLE dest2_n8 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 +INSERT OVERWRITE TABLE dest3_n1 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Output: default@dest3@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@dest1_n67 +POSTHOOK: Output: default@dest2_n8 +POSTHOOK: Output: default@dest3_n1@ds=2008-04-08/hr=12 POSTHOOK: Output: target/warehouse/dest4.out -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest3 PARTITION(ds=2008-04-08,hr=12).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Lineage: dest1_n67.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n67.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n8.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n8.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest3_n1 PARTITION(ds=2008-04-08,hr=12).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n67.* FROM dest1_n67 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n67 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n67.* FROM dest1_n67 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n67 #### A masked pattern was here #### 86 val_86 27 val_27 @@ -451,13 +451,13 @@ POSTHOOK: Input: default@dest1 37 val_37 90 val_90 97 val_97 -PREHOOK: query: SELECT dest2.* FROM dest2 +PREHOOK: query: SELECT dest2_n8.* FROM dest2_n8 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n8 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest2.* FROM dest2 +POSTHOOK: query: SELECT dest2_n8.* FROM dest2_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n8 #### A masked pattern was here #### 165 val_165 193 val_193 @@ -564,15 +564,15 @@ POSTHOOK: Input: default@dest2 194 val_194 126 val_126 169 val_169 -PREHOOK: query: SELECT dest3.* FROM dest3 +PREHOOK: query: SELECT dest3_n1.* FROM dest3_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@dest3 -PREHOOK: Input: default@dest3@ds=2008-04-08/hr=12 +PREHOOK: Input: default@dest3_n1 +PREHOOK: Input: default@dest3_n1@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest3.* FROM dest3 +POSTHOOK: query: SELECT dest3_n1.* FROM dest3_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest3 -POSTHOOK: Input: default@dest3@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@dest3_n1 +POSTHOOK: Input: default@dest3_n1@ds=2008-04-08/hr=12 #### A masked pattern was here #### 238 2008-04-08 12 255 2008-04-08 12 diff --git a/ql/src/test/results/clientpositive/input14.q.out b/ql/src/test/results/clientpositive/input14.q.out index f5321f4e43..eff0549990 100644 --- a/ql/src/test/results/clientpositive/input14.q.out +++ b/ql/src/test/results/clientpositive/input14.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n38(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n38 +POSTHOOK: query: CREATE TABLE dest1_n38(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n38 PREHOOK: query: EXPLAIN FROM ( FROM src @@ -13,7 +13,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 +INSERT OVERWRITE TABLE dest1_n38 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM ( @@ -22,7 +22,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 +INSERT OVERWRITE TABLE dest1_n38 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -69,7 +69,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n38 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -94,7 +94,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n38 Stage: Stage-2 Stats Work @@ -102,7 +102,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n38 Stage: Stage-3 Map Reduce @@ -133,29 +133,29 @@ PREHOOK: query: FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 +INSERT OVERWRITE TABLE dest1_n38 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n38 POSTHOOK: query: FROM ( FROM src SELECT TRANSFORM(src.key, src.value) USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 +INSERT OVERWRITE TABLE dest1_n38 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n38 +POSTHOOK: Lineage: dest1_n38.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n38.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n38.* FROM dest1_n38 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n38 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n38.* FROM dest1_n38 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n38 #### A masked pattern was here #### 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/input14_limit.q.out b/ql/src/test/results/clientpositive/input14_limit.q.out index 2cfae98e41..c32443c78b 100644 --- a/ql/src/test/results/clientpositive/input14_limit.q.out +++ b/ql/src/test/results/clientpositive/input14_limit.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n12(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n12 +POSTHOOK: query: CREATE TABLE dest1_n12(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n12 PREHOOK: query: EXPLAIN FROM ( FROM src @@ -13,7 +13,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey LIMIT 20 ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 +INSERT OVERWRITE TABLE dest1_n12 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM ( @@ -22,7 +22,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey LIMIT 20 ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 +INSERT OVERWRITE TABLE dest1_n12 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -104,7 +104,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n12 Stage: Stage-0 Move Operator @@ -114,7 +114,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n12 Stage: Stage-3 Stats Work @@ -126,29 +126,29 @@ PREHOOK: query: FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey LIMIT 20 ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 +INSERT OVERWRITE TABLE dest1_n12 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n12 POSTHOOK: query: FROM ( FROM src SELECT TRANSFORM(src.key, src.value) USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey LIMIT 20 ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 +INSERT OVERWRITE TABLE dest1_n12 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n12 +POSTHOOK: Lineage: dest1_n12.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n12.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n12.* FROM dest1_n12 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n12 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n12.* FROM dest1_n12 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n12 #### A masked pattern was here #### 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/input17.q.out b/ql/src/test/results/clientpositive/input17.q.out index 731b81b799..3e4b9938ea 100644 --- a/ql/src/test/results/clientpositive/input17.q.out +++ b/ql/src/test/results/clientpositive/input17.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n69(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n69 +POSTHOOK: query: CREATE TABLE dest1_n69(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n69 PREHOOK: query: EXPLAIN FROM ( FROM src_thrift @@ -13,7 +13,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue +INSERT OVERWRITE TABLE dest1_n69 SELECT tmap.tkey, tmap.tvalue PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM ( @@ -22,7 +22,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue +INSERT OVERWRITE TABLE dest1_n69 SELECT tmap.tkey, tmap.tvalue POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -66,7 +66,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n69 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -91,7 +91,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n69 Stage: Stage-2 Stats Work @@ -99,7 +99,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n69 Stage: Stage-3 Map Reduce @@ -130,29 +130,29 @@ PREHOOK: query: FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue +INSERT OVERWRITE TABLE dest1_n69 SELECT tmap.tkey, tmap.tvalue PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n69 POSTHOOK: query: FROM ( FROM src_thrift SELECT TRANSFORM(src_thrift.aint + src_thrift.lint[0], src_thrift.lintstring[0]) USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue +INSERT OVERWRITE TABLE dest1_n69 SELECT tmap.tkey, tmap.tvalue POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src_thrift)src_thrift.FieldSchema(name:aint, type:int, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src_thrift)src_thrift.FieldSchema(name:aint, type:int, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n69 +POSTHOOK: Lineage: dest1_n69.key SCRIPT [(src_thrift)src_thrift.FieldSchema(name:aint, type:int, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n69.value SCRIPT [(src_thrift)src_thrift.FieldSchema(name:aint, type:int, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] +PREHOOK: query: SELECT dest1_n69.* FROM dest1_n69 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n69 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n69.* FROM dest1_n69 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n69 #### A masked pattern was here #### -1461153966 {"myint":49,"mystring":"343","underscore_int":7} -1952710705 {"myint":25,"mystring":"125","underscore_int":5} diff --git a/ql/src/test/results/clientpositive/input18.q.out b/ql/src/test/results/clientpositive/input18.q.out index 06a014f1e5..f28ccba6df 100644 --- a/ql/src/test/results/clientpositive/input18.q.out +++ b/ql/src/test/results/clientpositive/input18.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n104(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n104 +POSTHOOK: query: CREATE TABLE dest1_n104(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n104 PREHOOK: query: EXPLAIN FROM ( FROM src @@ -13,7 +13,7 @@ FROM ( USING 'cat' CLUSTER BY key ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100 +INSERT OVERWRITE TABLE dest1_n104 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM ( @@ -22,7 +22,7 @@ FROM ( USING 'cat' CLUSTER BY key ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100 +INSERT OVERWRITE TABLE dest1_n104 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -69,7 +69,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n104 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -94,7 +94,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n104 Stage: Stage-2 Stats Work @@ -102,7 +102,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n104 Stage: Stage-3 Map Reduce @@ -133,29 +133,29 @@ PREHOOK: query: FROM ( USING 'cat' CLUSTER BY key ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100 +INSERT OVERWRITE TABLE dest1_n104 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n104 POSTHOOK: query: FROM ( FROM src SELECT TRANSFORM(src.key, src.value, 1+2, 3+4) USING 'cat' CLUSTER BY key ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100 +INSERT OVERWRITE TABLE dest1_n104 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n104 +POSTHOOK: Lineage: dest1_n104.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n104.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n104.* FROM dest1_n104 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n104 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n104.* FROM dest1_n104 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n104 #### A masked pattern was here #### 0 val_0+3+7 0 val_0+3+7 diff --git a/ql/src/test/results/clientpositive/input1_limit.q.out b/ql/src/test/results/clientpositive/input1_limit.q.out index df2c8ab5df..2af96e8e39 100644 --- a/ql/src/test/results/clientpositive/input1_limit.q.out +++ b/ql/src/test/results/clientpositive/input1_limit.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n11(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n11 +POSTHOOK: query: CREATE TABLE dest1_n11(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@dest1_n11 +PREHOOK: query: CREATE TABLE dest2_n1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest2 -POSTHOOK: query: CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest2_n1 +POSTHOOK: query: CREATE TABLE dest2_n1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest2 +POSTHOOK: Output: default@dest2_n1 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5 +INSERT OVERWRITE TABLE dest1_n11 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 +INSERT OVERWRITE TABLE dest2_n1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5 +INSERT OVERWRITE TABLE dest1_n11 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 +INSERT OVERWRITE TABLE dest2_n1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -91,7 +91,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n11 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -116,7 +116,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n11 Stage: Stage-3 Stats Work @@ -124,7 +124,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n11 Stage: Stage-4 Map Reduce @@ -155,7 +155,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest2 + Table: default.dest2_n1 Stage: Stage-5 Map Reduce @@ -185,7 +185,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n1 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -210,7 +210,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n1 Stage: Stage-7 Map Reduce @@ -236,30 +236,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5 +INSERT OVERWRITE TABLE dest1_n11 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 +INSERT OVERWRITE TABLE dest2_n1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n11 +PREHOOK: Output: default@dest2_n1 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 -INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5 +INSERT OVERWRITE TABLE dest1_n11 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 +INSERT OVERWRITE TABLE dest2_n1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n11 +POSTHOOK: Output: default@dest2_n1 +POSTHOOK: Lineage: dest1_n11.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n11.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n11.* FROM dest1_n11 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n11 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n11.* FROM dest1_n11 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n11 #### A masked pattern was here #### 0 val_0 15 val_15 @@ -271,13 +271,13 @@ POSTHOOK: Input: default@dest1 82 val_82 86 val_86 98 val_98 -PREHOOK: query: SELECT dest2.* FROM dest2 +PREHOOK: query: SELECT dest2_n1.* FROM dest2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest2.* FROM dest2 +POSTHOOK: query: SELECT dest2_n1.* FROM dest2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n1 #### A masked pattern was here #### 27 val_27 37 val_37 diff --git a/ql/src/test/results/clientpositive/input20.q.out b/ql/src/test/results/clientpositive/input20.q.out index 06b2d86b73..78322250a1 100644 --- a/ql/src/test/results/clientpositive/input20.q.out +++ b/ql/src/test/results/clientpositive/input20.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n114(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n114 +POSTHOOK: query: CREATE TABLE dest1_n114(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n114 PREHOOK: query: EXPLAIN FROM ( FROM src @@ -13,7 +13,7 @@ FROM ( USING 'cat' DISTRIBUTE BY key, value ) tmap -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n114 REDUCE tmap.key, tmap.value USING 'python input20_script.py' AS key, value @@ -25,7 +25,7 @@ FROM ( USING 'cat' DISTRIBUTE BY key, value ) tmap -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n114 REDUCE tmap.key, tmap.value USING 'python input20_script.py' AS key, value @@ -82,7 +82,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n114 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -107,7 +107,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n114 Stage: Stage-2 Stats Work @@ -115,7 +115,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n114 Stage: Stage-3 Map Reduce @@ -146,35 +146,35 @@ PREHOOK: query: FROM ( USING 'cat' DISTRIBUTE BY key, value ) tmap -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n114 REDUCE tmap.key, tmap.value USING 'python input20_script.py' AS key, value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n114 POSTHOOK: query: FROM ( FROM src MAP src.key, src.key USING 'cat' DISTRIBUTE BY key, value ) tmap -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n114 REDUCE tmap.key, tmap.value USING 'python input20_script.py' AS key, value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM dest1 ORDER BY key, value +POSTHOOK: Output: default@dest1_n114 +POSTHOOK: Lineage: dest1_n114.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n114.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM dest1_n114 ORDER BY key, value PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n114 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM dest1 ORDER BY key, value +POSTHOOK: query: SELECT * FROM dest1_n114 ORDER BY key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n114 #### A masked pattern was here #### 1 105_105 1 10_10 diff --git a/ql/src/test/results/clientpositive/input24.q.out b/ql/src/test/results/clientpositive/input24.q.out index 935fff4047..b30c1fb993 100644 --- a/ql/src/test/results/clientpositive/input24.q.out +++ b/ql/src/test/results/clientpositive/input24.q.out @@ -1,23 +1,23 @@ -PREHOOK: query: create table tst(a int, b int) partitioned by (d string) +PREHOOK: query: create table tst_n1(a int, b int) partitioned by (d string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tst -POSTHOOK: query: create table tst(a int, b int) partitioned by (d string) +PREHOOK: Output: default@tst_n1 +POSTHOOK: query: create table tst_n1(a int, b int) partitioned by (d string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tst -PREHOOK: query: alter table tst add partition (d='2009-01-01') +POSTHOOK: Output: default@tst_n1 +PREHOOK: query: alter table tst_n1 add partition (d='2009-01-01') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@tst -POSTHOOK: query: alter table tst add partition (d='2009-01-01') +PREHOOK: Output: default@tst_n1 +POSTHOOK: query: alter table tst_n1 add partition (d='2009-01-01') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@tst -POSTHOOK: Output: default@tst@d=2009-01-01 +POSTHOOK: Output: default@tst_n1 +POSTHOOK: Output: default@tst_n1@d=2009-01-01 PREHOOK: query: explain -select count(1) from tst x where x.d='2009-01-01' +select count(1) from tst_n1 x where x.d='2009-01-01' PREHOOK: type: QUERY POSTHOOK: query: explain -select count(1) from tst x where x.d='2009-01-01' +select count(1) from tst_n1 x where x.d='2009-01-01' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -29,12 +29,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(1) from tst x where x.d='2009-01-01' +PREHOOK: query: select count(1) from tst_n1 x where x.d='2009-01-01' PREHOOK: type: QUERY -PREHOOK: Input: default@tst +PREHOOK: Input: default@tst_n1 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from tst x where x.d='2009-01-01' +POSTHOOK: query: select count(1) from tst_n1 x where x.d='2009-01-01' POSTHOOK: type: QUERY -POSTHOOK: Input: default@tst +POSTHOOK: Input: default@tst_n1 #### A masked pattern was here #### 0 diff --git a/ql/src/test/results/clientpositive/input28.q.out b/ql/src/test/results/clientpositive/input28.q.out index 227fc65d14..39c07893e8 100644 --- a/ql/src/test/results/clientpositive/input28.q.out +++ b/ql/src/test/results/clientpositive/input28.q.out @@ -1,41 +1,41 @@ -PREHOOK: query: create table tst(a string, b string) partitioned by (d string) +PREHOOK: query: create table tst_n0(a string, b string) partitioned by (d string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tst -POSTHOOK: query: create table tst(a string, b string) partitioned by (d string) +PREHOOK: Output: default@tst_n0 +POSTHOOK: query: create table tst_n0(a string, b string) partitioned by (d string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tst -PREHOOK: query: alter table tst add partition (d='2009-01-01') +POSTHOOK: Output: default@tst_n0 +PREHOOK: query: alter table tst_n0 add partition (d='2009-01-01') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@tst -POSTHOOK: query: alter table tst add partition (d='2009-01-01') +PREHOOK: Output: default@tst_n0 +POSTHOOK: query: alter table tst_n0 add partition (d='2009-01-01') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@tst -POSTHOOK: Output: default@tst@d=2009-01-01 -PREHOOK: query: insert overwrite table tst partition(d='2009-01-01') -select tst.a, src.value from tst join src ON (tst.a = src.key) +POSTHOOK: Output: default@tst_n0 +POSTHOOK: Output: default@tst_n0@d=2009-01-01 +PREHOOK: query: insert overwrite table tst_n0 partition(d='2009-01-01') +select tst_n0.a, src.value from tst_n0 join src ON (tst_n0.a = src.key) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@tst -PREHOOK: Input: default@tst@d=2009-01-01 -PREHOOK: Output: default@tst@d=2009-01-01 -POSTHOOK: query: insert overwrite table tst partition(d='2009-01-01') -select tst.a, src.value from tst join src ON (tst.a = src.key) +PREHOOK: Input: default@tst_n0 +PREHOOK: Input: default@tst_n0@d=2009-01-01 +PREHOOK: Output: default@tst_n0@d=2009-01-01 +POSTHOOK: query: insert overwrite table tst_n0 partition(d='2009-01-01') +select tst_n0.a, src.value from tst_n0 join src ON (tst_n0.a = src.key) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@tst -POSTHOOK: Input: default@tst@d=2009-01-01 -POSTHOOK: Output: default@tst@d=2009-01-01 -POSTHOOK: Lineage: tst PARTITION(d=2009-01-01).a SIMPLE [(tst)tst.FieldSchema(name:a, type:string, comment:null), ] -POSTHOOK: Lineage: tst PARTITION(d=2009-01-01).b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from tst where tst.d='2009-01-01' +POSTHOOK: Input: default@tst_n0 +POSTHOOK: Input: default@tst_n0@d=2009-01-01 +POSTHOOK: Output: default@tst_n0@d=2009-01-01 +POSTHOOK: Lineage: tst_n0 PARTITION(d=2009-01-01).a SIMPLE [(tst_n0)tst_n0.FieldSchema(name:a, type:string, comment:null), ] +POSTHOOK: Lineage: tst_n0 PARTITION(d=2009-01-01).b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from tst_n0 where tst_n0.d='2009-01-01' PREHOOK: type: QUERY -PREHOOK: Input: default@tst -PREHOOK: Input: default@tst@d=2009-01-01 +PREHOOK: Input: default@tst_n0 +PREHOOK: Input: default@tst_n0@d=2009-01-01 #### A masked pattern was here #### -POSTHOOK: query: select * from tst where tst.d='2009-01-01' +POSTHOOK: query: select * from tst_n0 where tst_n0.d='2009-01-01' POSTHOOK: type: QUERY -POSTHOOK: Input: default@tst -POSTHOOK: Input: default@tst@d=2009-01-01 +POSTHOOK: Input: default@tst_n0 +POSTHOOK: Input: default@tst_n0@d=2009-01-01 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/input33.q.out b/ql/src/test/results/clientpositive/input33.q.out index a84b7dbcaa..6d793a4608 100644 --- a/ql/src/test/results/clientpositive/input33.q.out +++ b/ql/src/test/results/clientpositive/input33.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n112(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n112 +POSTHOOK: query: CREATE TABLE dest1_n112(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n112 PREHOOK: query: EXPLAIN FROM ( FROM src @@ -13,7 +13,7 @@ FROM ( USING 'cat' DISTRIBUTE BY key, value ) tmap -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n112 REDUCE tmap.key, tmap.value USING 'python input20_script.py' AS (key STRING, value STRING) @@ -25,7 +25,7 @@ FROM ( USING 'cat' DISTRIBUTE BY key, value ) tmap -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n112 REDUCE tmap.key, tmap.value USING 'python input20_script.py' AS (key STRING, value STRING) @@ -82,7 +82,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n112 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -107,7 +107,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n112 Stage: Stage-2 Stats Work @@ -115,7 +115,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n112 Stage: Stage-3 Map Reduce @@ -146,35 +146,35 @@ PREHOOK: query: FROM ( USING 'cat' DISTRIBUTE BY key, value ) tmap -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n112 REDUCE tmap.key, tmap.value USING 'python input20_script.py' AS (key STRING, value STRING) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n112 POSTHOOK: query: FROM ( FROM src MAP src.key, src.key USING 'cat' DISTRIBUTE BY key, value ) tmap -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n112 REDUCE tmap.key, tmap.value USING 'python input20_script.py' AS (key STRING, value STRING) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM dest1 ORDER BY key, value +POSTHOOK: Output: default@dest1_n112 +POSTHOOK: Lineage: dest1_n112.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n112.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM dest1_n112 ORDER BY key, value PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n112 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM dest1 ORDER BY key, value +POSTHOOK: query: SELECT * FROM dest1_n112 ORDER BY key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n112 #### A masked pattern was here #### 1 105_105 1 10_10 diff --git a/ql/src/test/results/clientpositive/input34.q.out b/ql/src/test/results/clientpositive/input34.q.out index b6e0c054dd..264ffccd50 100644 --- a/ql/src/test/results/clientpositive/input34.q.out +++ b/ql/src/test/results/clientpositive/input34.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n136(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n136 +POSTHOOK: query: CREATE TABLE dest1_n136(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n136 PREHOOK: query: EXPLAIN FROM ( FROM src @@ -13,7 +13,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue +INSERT OVERWRITE TABLE dest1_n136 SELECT tkey, tvalue PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM ( @@ -22,7 +22,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue +INSERT OVERWRITE TABLE dest1_n136 SELECT tkey, tvalue POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -63,7 +63,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n136 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -108,7 +108,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n136 Stage: Stage-2 Stats Work @@ -116,7 +116,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n136 Stage: Stage-3 Map Reduce @@ -128,7 +128,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n136 Stage: Stage-5 Map Reduce @@ -140,7 +140,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n136 Stage: Stage-6 Move Operator @@ -154,29 +154,29 @@ PREHOOK: query: FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue +INSERT OVERWRITE TABLE dest1_n136 SELECT tkey, tvalue PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n136 POSTHOOK: query: FROM ( FROM src SELECT TRANSFORM(src.key, src.value) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' USING 'cat' AS (tkey, tvalue) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue +INSERT OVERWRITE TABLE dest1_n136 SELECT tkey, tvalue POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n136 +POSTHOOK: Lineage: dest1_n136.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n136.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n136.* FROM dest1_n136 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n136 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n136.* FROM dest1_n136 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n136 #### A masked pattern was here #### 238 val_238 86 val_86 diff --git a/ql/src/test/results/clientpositive/input35.q.out b/ql/src/test/results/clientpositive/input35.q.out index 7e89fb477f..659def646a 100644 --- a/ql/src/test/results/clientpositive/input35.q.out +++ b/ql/src/test/results/clientpositive/input35.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n22(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n22 +POSTHOOK: query: CREATE TABLE dest1_n22(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n22 PREHOOK: query: EXPLAIN FROM ( FROM src @@ -13,7 +13,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue +INSERT OVERWRITE TABLE dest1_n22 SELECT tkey, tvalue PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM ( @@ -22,7 +22,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue +INSERT OVERWRITE TABLE dest1_n22 SELECT tkey, tvalue POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -63,7 +63,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n22 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -108,7 +108,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n22 Stage: Stage-2 Stats Work @@ -116,7 +116,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n22 Stage: Stage-3 Map Reduce @@ -128,7 +128,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n22 Stage: Stage-5 Map Reduce @@ -140,7 +140,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n22 Stage: Stage-6 Move Operator @@ -154,29 +154,29 @@ PREHOOK: query: FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue +INSERT OVERWRITE TABLE dest1_n22 SELECT tkey, tvalue PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n22 POSTHOOK: query: FROM ( FROM src SELECT TRANSFORM(src.key, src.value) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' USING 'cat' AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue +INSERT OVERWRITE TABLE dest1_n22 SELECT tkey, tvalue POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n22 +POSTHOOK: Lineage: dest1_n22.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n22.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n22.* FROM dest1_n22 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n22 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n22.* FROM dest1_n22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n22 #### A masked pattern was here #### 238 val_238 86 val_86 diff --git a/ql/src/test/results/clientpositive/input36.q.out b/ql/src/test/results/clientpositive/input36.q.out index 64e34cc539..95daf03279 100644 --- a/ql/src/test/results/clientpositive/input36.q.out +++ b/ql/src/test/results/clientpositive/input36.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n61(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n61 +POSTHOOK: query: CREATE TABLE dest1_n61(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n61 PREHOOK: query: EXPLAIN FROM ( FROM src @@ -13,7 +13,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\003' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue +INSERT OVERWRITE TABLE dest1_n61 SELECT tkey, tvalue PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM ( @@ -22,7 +22,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\003' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue +INSERT OVERWRITE TABLE dest1_n61 SELECT tkey, tvalue POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -63,7 +63,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n61 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -108,7 +108,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n61 Stage: Stage-2 Stats Work @@ -116,7 +116,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n61 Stage: Stage-3 Map Reduce @@ -128,7 +128,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n61 Stage: Stage-5 Map Reduce @@ -140,7 +140,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n61 Stage: Stage-6 Move Operator @@ -154,29 +154,29 @@ PREHOOK: query: FROM ( USING 'cat' AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\003' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue +INSERT OVERWRITE TABLE dest1_n61 SELECT tkey, tvalue PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n61 POSTHOOK: query: FROM ( FROM src SELECT TRANSFORM(src.key, src.value) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' USING 'cat' AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\003' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue +INSERT OVERWRITE TABLE dest1_n61 SELECT tkey, tvalue POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n61 +POSTHOOK: Lineage: dest1_n61.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n61.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n61.* FROM dest1_n61 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n61 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n61.* FROM dest1_n61 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n61 #### A masked pattern was here #### NULL NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/input38.q.out b/ql/src/test/results/clientpositive/input38.q.out index cd459078a2..bf598d3b41 100644 --- a/ql/src/test/results/clientpositive/input38.q.out +++ b/ql/src/test/results/clientpositive/input38.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n76(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n76 +POSTHOOK: query: CREATE TABLE dest1_n76(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n76 PREHOOK: query: EXPLAIN FROM ( FROM src SELECT TRANSFORM(src.key, src.value, 1+2, 3+4) USING 'cat' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.key, tmap.value +INSERT OVERWRITE TABLE dest1_n76 SELECT tmap.key, tmap.value PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM ( @@ -20,7 +20,7 @@ FROM ( SELECT TRANSFORM(src.key, src.value, 1+2, 3+4) USING 'cat' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.key, tmap.value +INSERT OVERWRITE TABLE dest1_n76 SELECT tmap.key, tmap.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -57,7 +57,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n76 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -102,7 +102,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n76 Stage: Stage-2 Stats Work @@ -110,7 +110,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.dest1 + Table: default.dest1_n76 Stage: Stage-3 Map Reduce @@ -122,7 +122,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n76 Stage: Stage-5 Map Reduce @@ -134,7 +134,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n76 Stage: Stage-6 Move Operator @@ -147,28 +147,28 @@ PREHOOK: query: FROM ( SELECT TRANSFORM(src.key, src.value, 1+2, 3+4) USING 'cat' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.key, tmap.value +INSERT OVERWRITE TABLE dest1_n76 SELECT tmap.key, tmap.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n76 POSTHOOK: query: FROM ( FROM src SELECT TRANSFORM(src.key, src.value, 1+2, 3+4) USING 'cat' ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.key, tmap.value +INSERT OVERWRITE TABLE dest1_n76 SELECT tmap.key, tmap.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n76 +POSTHOOK: Lineage: dest1_n76.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n76.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n76.* FROM dest1_n76 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n76 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n76.* FROM dest1_n76 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n76 #### A masked pattern was here #### 238 val_238 3 7 86 val_86 3 7 diff --git a/ql/src/test/results/clientpositive/input39.q.out b/ql/src/test/results/clientpositive/input39.q.out index a6613d0b62..78405cb18f 100644 --- a/ql/src/test/results/clientpositive/input39.q.out +++ b/ql/src/test/results/clientpositive/input39.q.out @@ -1,60 +1,60 @@ -PREHOOK: query: create table t1(key string, value string) partitioned by (ds string) +PREHOOK: query: create table t1_n42(key string, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(key string, value string) partitioned by (ds string) +PREHOOK: Output: default@t1_n42 +POSTHOOK: query: create table t1_n42(key string, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2(key string, value string) partitioned by (ds string) +POSTHOOK: Output: default@t1_n42 +PREHOOK: query: create table t2_n23(key string, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2(key string, value string) partitioned by (ds string) +PREHOOK: Output: default@t2_n23 +POSTHOOK: query: create table t2_n23(key string, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: insert overwrite table t1 partition (ds='1') +POSTHOOK: Output: default@t2_n23 +PREHOOK: query: insert overwrite table t1_n42 partition (ds='1') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t1@ds=1 -POSTHOOK: query: insert overwrite table t1 partition (ds='1') +PREHOOK: Output: default@t1_n42@ds=1 +POSTHOOK: query: insert overwrite table t1_n42 partition (ds='1') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1@ds=1 -POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table t1 partition (ds='2') +POSTHOOK: Output: default@t1_n42@ds=1 +POSTHOOK: Lineage: t1_n42 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n42 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table t1_n42 partition (ds='2') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t1@ds=2 -POSTHOOK: query: insert overwrite table t1 partition (ds='2') +PREHOOK: Output: default@t1_n42@ds=2 +POSTHOOK: query: insert overwrite table t1_n42 partition (ds='2') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1@ds=2 -POSTHOOK: Lineage: t1 PARTITION(ds=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table t2 partition (ds='1') +POSTHOOK: Output: default@t1_n42@ds=2 +POSTHOOK: Lineage: t1_n42 PARTITION(ds=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n42 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table t2_n23 partition (ds='1') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t2@ds=1 -POSTHOOK: query: insert overwrite table t2 partition (ds='1') +PREHOOK: Output: default@t2_n23@ds=1 +POSTHOOK: query: insert overwrite table t2_n23 partition (ds='1') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t2@ds=1 -POSTHOOK: Lineage: t2 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@t2_n23@ds=1 +POSTHOOK: Lineage: t2_n23 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2_n23 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain -select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1' +select count(1) from t1_n42 join t2_n23 on t1_n42.key=t2_n23.key where t1_n42.ds='1' and t2_n23.ds='1' PREHOOK: type: QUERY POSTHOOK: query: explain -select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1' +select count(1) from t1_n42 join t2_n23 on t1_n42.key=t2_n23.key where t1_n42.ds='1' and t2_n23.ds='1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -66,7 +66,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n42 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((hash(rand(460476415)) & 2147483647) % 32) = 0) (type: boolean) @@ -84,7 +84,7 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE TableScan - alias: t2 + alias: t2_n23 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((hash(rand(460476415)) & 2147483647) % 32) = 0) (type: boolean) @@ -150,19 +150,19 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1' +PREHOOK: query: select count(1) from t1_n42 join t2_n23 on t1_n42.key=t2_n23.key where t1_n42.ds='1' and t2_n23.ds='1' PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@ds=1 +PREHOOK: Input: default@t1_n42 +PREHOOK: Input: default@t1_n42@ds=1 +PREHOOK: Input: default@t2_n23 +PREHOOK: Input: default@t2_n23@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1' +POSTHOOK: query: select count(1) from t1_n42 join t2_n23 on t1_n42.key=t2_n23.key where t1_n42.ds='1' and t2_n23.ds='1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@ds=1 +POSTHOOK: Input: default@t1_n42 +POSTHOOK: Input: default@t1_n42@ds=1 +POSTHOOK: Input: default@t2_n23 +POSTHOOK: Input: default@t2_n23@ds=1 #### A masked pattern was here #### 15 mapreduce.framework.name=yarn diff --git a/ql/src/test/results/clientpositive/input3_limit.q.out b/ql/src/test/results/clientpositive/input3_limit.q.out index 9103a51c0d..5afd80bf25 100644 --- a/ql/src/test/results/clientpositive/input3_limit.q.out +++ b/ql/src/test/results/clientpositive/input3_limit.q.out @@ -1,40 +1,40 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n47(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n47 +POSTHOOK: query: CREATE TABLE T1_n47(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n47 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1_n47 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n47 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1_n47 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T1 +POSTHOOK: Output: default@t1_n47 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T1_n47 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n47 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T1_n47 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, value STRING) +POSTHOOK: Output: default@t1_n47 +PREHOOK: query: CREATE TABLE T2_n29(key STRING, value STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, value STRING) +PREHOOK: Output: default@T2_n29 +POSTHOOK: query: CREATE TABLE T2_n29(key STRING, value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 +POSTHOOK: Output: default@T2_n29 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE T2 SELECT * FROM (SELECT * FROM T1 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20 +INSERT OVERWRITE TABLE T2_n29 SELECT * FROM (SELECT * FROM T1_n47 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE T2 SELECT * FROM (SELECT * FROM T1 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20 +INSERT OVERWRITE TABLE T2_n29 SELECT * FROM (SELECT * FROM T1_n47 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -48,7 +48,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n47 Statistics: Num rows: 1 Data size: 116030 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -96,7 +96,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 + name: default.t2_n29 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -121,7 +121,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t2 + name: default.t2_n29 Stage: Stage-3 Stats Work @@ -129,7 +129,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.t2 + Table: default.t2_n29 Stage: Stage-4 Map Reduce @@ -154,23 +154,23 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE T2 SELECT * FROM (SELECT * FROM T1 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20 +PREHOOK: query: INSERT OVERWRITE TABLE T2_n29 SELECT * FROM (SELECT * FROM T1_n47 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: INSERT OVERWRITE TABLE T2 SELECT * FROM (SELECT * FROM T1 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20 +PREHOOK: Input: default@t1_n47 +PREHOOK: Output: default@t2_n29 +POSTHOOK: query: INSERT OVERWRITE TABLE T2_n29 SELECT * FROM (SELECT * FROM T1_n47 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2.value SIMPLE [(t1)t1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM T2 ORDER BY key, value +POSTHOOK: Input: default@t1_n47 +POSTHOOK: Output: default@t2_n29 +POSTHOOK: Lineage: t2_n29.key SIMPLE [(t1_n47)t1_n47.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n29.value SIMPLE [(t1_n47)t1_n47.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM T2_n29 ORDER BY key, value PREHOOK: type: QUERY -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2_n29 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM T2 ORDER BY key, value +POSTHOOK: query: SELECT * FROM T2_n29 ORDER BY key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2_n29 #### A masked pattern was here #### 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/input4.q.out b/ql/src/test/results/clientpositive/input4.q.out index ae253e993f..8aa83da759 100644 --- a/ql/src/test/results/clientpositive/input4.q.out +++ b/ql/src/test/results/clientpositive/input4.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE INPUT4_n0(KEY STRING, VALUE STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@INPUT4 -POSTHOOK: query: CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE +PREHOOK: Output: default@INPUT4_n0 +POSTHOOK: query: CREATE TABLE INPUT4_n0(KEY STRING, VALUE STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@INPUT4 +POSTHOOK: Output: default@INPUT4_n0 PREHOOK: query: EXPLAIN -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4 +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4_n0 PREHOOK: type: LOAD POSTHOOK: query: EXPLAIN -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4 +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4_n0 POSTHOOK: type: LOAD STAGE DEPENDENCIES: Stage-0 is a root stage @@ -25,34 +25,34 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.input4 + name: default.input4_n0 Stage: Stage-1 Stats Work Basic Stats Work: -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@input4 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4 +PREHOOK: Output: default@input4_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@input4 +POSTHOOK: Output: default@input4_n0 PREHOOK: query: EXPLAIN FORMATTED -SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias +SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4_n0 AS Input4Alias PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FORMATTED -SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias +SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4_n0 AS Input4Alias POSTHOOK: type: QUERY -{"STAGE DEPENDENCIES":{"Stage-0":{"ROOT STAGE":"TRUE"}},"STAGE PLANS":{"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"TableScan":{"alias:":"input4alias","columns:":["value","key"],"database:":"default","Statistics:":"Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE","table:":"input4","isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"value (type: string), key (type: string)","columnExprMap:":{"_col0":"value","_col1":"key"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_1","children":{"ListSink":{"OperatorId:":"LIST_SINK_3"}}}}}}}}}} -PREHOOK: query: SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias +{"STAGE DEPENDENCIES":{"Stage-0":{"ROOT STAGE":"TRUE"}},"STAGE PLANS":{"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"TableScan":{"alias:":"input4alias","columns:":["value","key"],"database:":"default","Statistics:":"Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE","table:":"input4_n0","isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"value (type: string), key (type: string)","columnExprMap:":{"_col0":"value","_col1":"key"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_1","children":{"ListSink":{"OperatorId:":"LIST_SINK_3"}}}}}}}}}} +PREHOOK: query: SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4_n0 AS Input4Alias PREHOOK: type: QUERY -PREHOOK: Input: default@input4 +PREHOOK: Input: default@input4_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias +POSTHOOK: query: SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4_n0 AS Input4Alias POSTHOOK: type: QUERY -POSTHOOK: Input: default@input4 +POSTHOOK: Input: default@input4_n0 #### A masked pattern was here #### val_238 238 val_86 86 diff --git a/ql/src/test/results/clientpositive/input44.q.out b/ql/src/test/results/clientpositive/input44.q.out index ecd116626d..c8dca7b1d4 100644 --- a/ql/src/test/results/clientpositive/input44.q.out +++ b/ql/src/test/results/clientpositive/input44.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE TABLE dest(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_n0(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest -POSTHOOK: query: CREATE TABLE dest(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_n0 +POSTHOOK: query: CREATE TABLE dest_n0(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest -PREHOOK: query: INSERT OVERWRITE TABLE dest SELECT src.* FROM src +POSTHOOK: Output: default@dest_n0 +PREHOOK: query: INSERT OVERWRITE TABLE dest_n0 SELECT src.* FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest -POSTHOOK: query: INSERT OVERWRITE TABLE dest SELECT src.* FROM src +PREHOOK: Output: default@dest_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_n0 SELECT src.* FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest -POSTHOOK: Lineage: dest.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@dest_n0 +POSTHOOK: Lineage: dest_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] 238val_238 86val_86 311val_311 diff --git a/ql/src/test/results/clientpositive/input5.q.out b/ql/src/test/results/clientpositive/input5.q.out index e57fd16720..772c6ce1fa 100644 --- a/ql/src/test/results/clientpositive/input5.q.out +++ b/ql/src/test/results/clientpositive/input5.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n79(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n79 +POSTHOOK: query: CREATE TABLE dest1_n79(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n79 PREHOOK: query: EXPLAIN FROM ( FROM src_thrift @@ -13,7 +13,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue +INSERT OVERWRITE TABLE dest1_n79 SELECT tmap.tkey, tmap.tvalue PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM ( @@ -22,7 +22,7 @@ FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue +INSERT OVERWRITE TABLE dest1_n79 SELECT tmap.tkey, tmap.tvalue POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -66,7 +66,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n79 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -91,7 +91,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n79 Stage: Stage-2 Stats Work @@ -99,7 +99,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.dest1 + Table: default.dest1_n79 Stage: Stage-3 Map Reduce @@ -130,29 +130,29 @@ PREHOOK: query: FROM ( USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue +INSERT OVERWRITE TABLE dest1_n79 SELECT tmap.tkey, tmap.tvalue PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n79 POSTHOOK: query: FROM ( FROM src_thrift SELECT TRANSFORM(src_thrift.lint, src_thrift.lintstring) USING 'cat' AS (tkey, tvalue) CLUSTER BY tkey ) tmap -INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue +INSERT OVERWRITE TABLE dest1_n79 SELECT tmap.tkey, tmap.tvalue POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n79 +POSTHOOK: Lineage: dest1_n79.key SCRIPT [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n79.value SCRIPT [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] +PREHOOK: query: SELECT dest1_n79.* FROM dest1_n79 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n79 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n79.* FROM dest1_n79 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n79 #### A masked pattern was here #### NULL NULL [0,0,0] [{"myint":0,"mystring":"0","underscore_int":0}] diff --git a/ql/src/test/results/clientpositive/input6.q.out b/ql/src/test/results/clientpositive/input6.q.out index f8183cdea6..8395bac6a8 100644 --- a/ql/src/test/results/clientpositive/input6.q.out +++ b/ql/src/test/results/clientpositive/input6.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n31(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n31 +POSTHOOK: query: CREATE TABLE dest1_n31(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n31 PREHOOK: query: EXPLAIN FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src1.value WHERE src1.key is null +INSERT OVERWRITE TABLE dest1_n31 SELECT src1.key, src1.value WHERE src1.key is null PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src1.value WHERE src1.key is null +INSERT OVERWRITE TABLE dest1_n31 SELECT src1.key, src1.value WHERE src1.key is null POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -45,7 +45,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n31 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -90,7 +90,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n31 Stage: Stage-2 Stats Work @@ -98,7 +98,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.dest1 + Table: default.dest1_n31 Stage: Stage-3 Map Reduce @@ -110,7 +110,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n31 Stage: Stage-5 Map Reduce @@ -122,7 +122,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n31 Stage: Stage-6 Move Operator @@ -131,22 +131,22 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src1.value WHERE src1.key is null +INSERT OVERWRITE TABLE dest1_n31 SELECT src1.key, src1.value WHERE src1.key is null PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n31 POSTHOOK: query: FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src1.value WHERE src1.key is null +INSERT OVERWRITE TABLE dest1_n31 SELECT src1.key, src1.value WHERE src1.key is null POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SIMPLE [] -POSTHOOK: Lineage: dest1.value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n31 +POSTHOOK: Lineage: dest1_n31.key SIMPLE [] +POSTHOOK: Lineage: dest1_n31.value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n31.* FROM dest1_n31 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n31 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n31.* FROM dest1_n31 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n31 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/input7.q.out b/ql/src/test/results/clientpositive/input7.q.out index 0b7279a664..b31ff12b14 100644 --- a/ql/src/test/results/clientpositive/input7.q.out +++ b/ql/src/test/results/clientpositive/input7.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n141(c1 DOUBLE, c2 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n141 +POSTHOOK: query: CREATE TABLE dest1_n141(c1 DOUBLE, c2 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n141 PREHOOK: query: EXPLAIN FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key +INSERT OVERWRITE TABLE dest1_n141 SELECT NULL, src1.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key +INSERT OVERWRITE TABLE dest1_n141 SELECT NULL, src1.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -42,7 +42,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n141 Select Operator expressions: _col0 (type: double), _col1 (type: int) outputColumnNames: c1, c2 @@ -87,7 +87,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n141 Stage: Stage-2 Stats Work @@ -95,7 +95,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2 Column Types: double, int - Table: default.dest1 + Table: default.dest1_n141 Stage: Stage-3 Map Reduce @@ -107,7 +107,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n141 Stage: Stage-5 Map Reduce @@ -119,7 +119,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n141 Stage: Stage-6 Move Operator @@ -128,24 +128,24 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key +INSERT OVERWRITE TABLE dest1_n141 SELECT NULL, src1.key PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n141 POSTHOOK: query: FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key +INSERT OVERWRITE TABLE dest1_n141 SELECT NULL, src1.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n141 +POSTHOOK: Lineage: dest1_n141.c1 EXPRESSION [] +POSTHOOK: Lineage: dest1_n141.c2 EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n141.* FROM dest1_n141 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n141 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n141.* FROM dest1_n141 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n141 #### A masked pattern was here #### NULL 238 NULL NULL diff --git a/ql/src/test/results/clientpositive/input8.q.out b/ql/src/test/results/clientpositive/input8.q.out index 310195863b..fe30913f5b 100644 --- a/ql/src/test/results/clientpositive/input8.q.out +++ b/ql/src/test/results/clientpositive/input8.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING, c2 INT, c3 DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n25(c1 STRING, c2 INT, c3 DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING, c2 INT, c3 DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n25 +POSTHOOK: query: CREATE TABLE dest1_n25(c1 STRING, c2 INT, c3 DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n25 PREHOOK: query: EXPLAIN FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT 4 + NULL, src1.key - NULL, NULL + NULL +INSERT OVERWRITE TABLE dest1_n25 SELECT 4 + NULL, src1.key - NULL, NULL + NULL PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT 4 + NULL, src1.key - NULL, NULL + NULL +INSERT OVERWRITE TABLE dest1_n25 SELECT 4 + NULL, src1.key - NULL, NULL + NULL POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -42,7 +42,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n25 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: double) outputColumnNames: c1, c2, c3 @@ -87,7 +87,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n25 Stage: Stage-2 Stats Work @@ -95,7 +95,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3 Column Types: string, int, double - Table: default.dest1 + Table: default.dest1_n25 Stage: Stage-3 Map Reduce @@ -107,7 +107,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n25 Stage: Stage-5 Map Reduce @@ -119,7 +119,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n25 Stage: Stage-6 Move Operator @@ -128,25 +128,25 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT 4 + NULL, src1.key - NULL, NULL + NULL +INSERT OVERWRITE TABLE dest1_n25 SELECT 4 + NULL, src1.key - NULL, NULL + NULL PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n25 POSTHOOK: query: FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT 4 + NULL, src1.key - NULL, NULL + NULL +INSERT OVERWRITE TABLE dest1_n25 SELECT 4 + NULL, src1.key - NULL, NULL + NULL POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n25 +POSTHOOK: Lineage: dest1_n25.c1 EXPRESSION [] +POSTHOOK: Lineage: dest1_n25.c2 EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n25.c3 EXPRESSION [] +PREHOOK: query: SELECT dest1_n25.* FROM dest1_n25 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n25 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n25.* FROM dest1_n25 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n25 #### A masked pattern was here #### NULL NULL NULL NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/input9.q.out b/ql/src/test/results/clientpositive/input9.q.out index 32ead4f837..60cbbbb7ca 100644 --- a/ql/src/test/results/clientpositive/input9.q.out +++ b/ql/src/test/results/clientpositive/input9.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(value STRING, key INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n134(value STRING, key INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(value STRING, key INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n134 +POSTHOOK: query: CREATE TABLE dest1_n134(value STRING, key INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n134 PREHOOK: query: EXPLAIN FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key where NULL = NULL +INSERT OVERWRITE TABLE dest1_n134 SELECT NULL, src1.key where NULL = NULL PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key where NULL = NULL +INSERT OVERWRITE TABLE dest1_n134 SELECT NULL, src1.key where NULL = NULL POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -45,7 +45,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n134 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: value, key @@ -90,7 +90,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n134 Stage: Stage-2 Stats Work @@ -98,7 +98,7 @@ STAGE PLANS: Column Stats Desc: Columns: value, key Column Types: string, int - Table: default.dest1 + Table: default.dest1_n134 Stage: Stage-3 Map Reduce @@ -110,7 +110,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n134 Stage: Stage-5 Map Reduce @@ -122,7 +122,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n134 Stage: Stage-6 Move Operator @@ -131,22 +131,22 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key where NULL = NULL +INSERT OVERWRITE TABLE dest1_n134 SELECT NULL, src1.key where NULL = NULL PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n134 POSTHOOK: query: FROM src1 -INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key where NULL = NULL +INSERT OVERWRITE TABLE dest1_n134 SELECT NULL, src1.key where NULL = NULL POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n134 +POSTHOOK: Lineage: dest1_n134.key EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n134.value EXPRESSION [] +PREHOOK: query: SELECT dest1_n134.* FROM dest1_n134 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n134 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n134.* FROM dest1_n134 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n134 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/input_dynamicserde.q.out b/ql/src/test/results/clientpositive/input_dynamicserde.q.out index 694b99d1cc..cc5bcc59d0 100644 --- a/ql/src/test/results/clientpositive/input_dynamicserde.q.out +++ b/ql/src/test/results/clientpositive/input_dynamicserde.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE dest1(a array, b array, c map, d int, e string) +PREHOOK: query: CREATE TABLE dest1_n96(a array, b array, c map, d int, e string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' COLLECTION ITEMS TERMINATED BY '2' @@ -7,8 +7,8 @@ LINES TERMINATED BY '10' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(a array, b array, c map, d int, e string) +PREHOOK: Output: default@dest1_n96 +POSTHOOK: query: CREATE TABLE dest1_n96(a array, b array, c map, d int, e string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' COLLECTION ITEMS TERMINATED BY '2' @@ -17,14 +17,14 @@ LINES TERMINATED BY '10' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n96 PREHOOK: query: EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring +INSERT OVERWRITE TABLE dest1_n96 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring +INSERT OVERWRITE TABLE dest1_n96 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -54,7 +54,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n96 Execution mode: vectorized Stage: Stage-7 @@ -74,7 +74,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n96 Stage: Stage-2 Stats Work @@ -90,7 +90,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n96 Stage: Stage-5 Map Reduce @@ -102,7 +102,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n96 Stage: Stage-6 Move Operator @@ -111,27 +111,27 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring +INSERT OVERWRITE TABLE dest1_n96 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n96 POSTHOOK: query: FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring +INSERT OVERWRITE TABLE dest1_n96 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.b SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lstring, type:array, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.c SIMPLE [(src_thrift)src_thrift.FieldSchema(name:mstringstring, type:map, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.d SIMPLE [(src_thrift)src_thrift.FieldSchema(name:aint, type:int, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.e SIMPLE [(src_thrift)src_thrift.FieldSchema(name:astring, type:string, comment:from deserializer), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n96 +POSTHOOK: Lineage: dest1_n96.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n96.b SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lstring, type:array, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n96.c SIMPLE [(src_thrift)src_thrift.FieldSchema(name:mstringstring, type:map, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n96.d SIMPLE [(src_thrift)src_thrift.FieldSchema(name:aint, type:int, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n96.e SIMPLE [(src_thrift)src_thrift.FieldSchema(name:astring, type:string, comment:from deserializer), ] +PREHOOK: query: SELECT dest1_n96.* FROM dest1_n96 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n96 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n96.* FROM dest1_n96 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n96 #### A masked pattern was here #### [0,0,0] ["0","0","0"] {"key_0":"value_0"} 1712634731 record_0 [1,2,3] ["10","100","1000"] {"key_1":"value_1"} 465985200 record_1 @@ -144,13 +144,13 @@ POSTHOOK: Input: default@dest1 [8,16,24] ["80","800","8000"] {"key_8":"value_8"} 1638581578 record_8 [9,18,27] ["90","900","9000"] {"key_9":"value_9"} 336964413 record_9 NULL NULL NULL 0 NULL -PREHOOK: query: SELECT dest1.a[0], dest1.b[0], dest1.c['key2'], dest1.d, dest1.e FROM dest1 +PREHOOK: query: SELECT dest1_n96.a[0], dest1_n96.b[0], dest1_n96.c['key2'], dest1_n96.d, dest1_n96.e FROM dest1_n96 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n96 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.a[0], dest1.b[0], dest1.c['key2'], dest1.d, dest1.e FROM dest1 +POSTHOOK: query: SELECT dest1_n96.a[0], dest1_n96.b[0], dest1_n96.c['key2'], dest1_n96.d, dest1_n96.e FROM dest1_n96 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n96 #### A masked pattern was here #### 0 0 NULL 1712634731 record_0 1 10 NULL 465985200 record_1 diff --git a/ql/src/test/results/clientpositive/input_lazyserde.q.out b/ql/src/test/results/clientpositive/input_lazyserde.q.out index d230abfca8..2d4f29f1fd 100644 --- a/ql/src/test/results/clientpositive/input_lazyserde.q.out +++ b/ql/src/test/results/clientpositive/input_lazyserde.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n39 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE dest1 +POSTHOOK: query: DROP TABLE dest1_n39 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE dest1(a array, b array, c map, d int, e string) +PREHOOK: query: CREATE TABLE dest1_n39(a array, b array, c map, d int, e string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' COLLECTION ITEMS TERMINATED BY '2' @@ -11,8 +11,8 @@ LINES TERMINATED BY '10' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(a array, b array, c map, d int, e string) +PREHOOK: Output: default@dest1_n39 +POSTHOOK: query: CREATE TABLE dest1_n39(a array, b array, c map, d int, e string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' COLLECTION ITEMS TERMINATED BY '2' @@ -21,14 +21,14 @@ LINES TERMINATED BY '10' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n39 PREHOOK: query: EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 +INSERT OVERWRITE TABLE dest1_n39 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 +INSERT OVERWRITE TABLE dest1_n39 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -63,7 +63,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n39 Stage: Stage-0 Move Operator @@ -73,34 +73,34 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n39 Stage: Stage-2 Stats Work Basic Stats Work: PREHOOK: query: FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 +INSERT OVERWRITE TABLE dest1_n39 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n39 POSTHOOK: query: FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 +INSERT OVERWRITE TABLE dest1_n39 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.b SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lstring, type:array, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.c SIMPLE [(src_thrift)src_thrift.FieldSchema(name:mstringstring, type:map, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.d SIMPLE [(src_thrift)src_thrift.FieldSchema(name:aint, type:int, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.e SIMPLE [(src_thrift)src_thrift.FieldSchema(name:astring, type:string, comment:from deserializer), ] -PREHOOK: query: SELECT dest1.* FROM dest1 CLUSTER BY 1 +POSTHOOK: Output: default@dest1_n39 +POSTHOOK: Lineage: dest1_n39.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n39.b SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lstring, type:array, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n39.c SIMPLE [(src_thrift)src_thrift.FieldSchema(name:mstringstring, type:map, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n39.d SIMPLE [(src_thrift)src_thrift.FieldSchema(name:aint, type:int, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n39.e SIMPLE [(src_thrift)src_thrift.FieldSchema(name:astring, type:string, comment:from deserializer), ] +PREHOOK: query: SELECT dest1_n39.* FROM dest1_n39 CLUSTER BY 1 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n39 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 CLUSTER BY 1 +POSTHOOK: query: SELECT dest1_n39.* FROM dest1_n39 CLUSTER BY 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n39 #### A masked pattern was here #### NULL NULL NULL 0 NULL [0,0,0] ["0","0","0"] {"key_0":"value_0"} 1712634731 record_0 @@ -113,13 +113,13 @@ NULL NULL NULL 0 NULL [7,14,21] ["70","700","7000"] {"key_7":"value_7"} -1461153973 record_7 [8,16,24] ["80","800","8000"] {"key_8":"value_8"} 1638581578 record_8 [9,18,27] ["90","900","9000"] {"key_9":"value_9"} 336964413 record_9 -PREHOOK: query: SELECT dest1.a[0], dest1.b[0], dest1.c['key2'], dest1.d, dest1.e FROM dest1 CLUSTER BY 1 +PREHOOK: query: SELECT dest1_n39.a[0], dest1_n39.b[0], dest1_n39.c['key2'], dest1_n39.d, dest1_n39.e FROM dest1_n39 CLUSTER BY 1 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n39 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.a[0], dest1.b[0], dest1.c['key2'], dest1.d, dest1.e FROM dest1 CLUSTER BY 1 +POSTHOOK: query: SELECT dest1_n39.a[0], dest1_n39.b[0], dest1_n39.c['key2'], dest1_n39.d, dest1_n39.e FROM dest1_n39 CLUSTER BY 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n39 #### A masked pattern was here #### 0 0 NULL 1712634731 record_0 1 10 NULL 465985200 record_1 @@ -132,38 +132,38 @@ POSTHOOK: Input: default@dest1 8 80 NULL 1638581578 record_8 9 90 NULL 336964413 record_9 NULL NULL NULL 0 NULL -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n39 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n39 +PREHOOK: Output: default@dest1_n39 +POSTHOOK: query: DROP TABLE dest1_n39 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest1(a array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' +POSTHOOK: Input: default@dest1_n39 +POSTHOOK: Output: default@dest1_n39 +PREHOOK: query: CREATE TABLE dest1_n39(a array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(a array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' +PREHOOK: Output: default@dest1_n39 +POSTHOOK: query: CREATE TABLE dest1_n39(a array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1 +POSTHOOK: Output: default@dest1_n39 +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n39 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1 PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1 +PREHOOK: Output: default@dest1_n39 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n39 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] -PREHOOK: query: SELECT * from dest1 +POSTHOOK: Output: default@dest1_n39 +POSTHOOK: Lineage: dest1_n39.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] +PREHOOK: query: SELECT * from dest1_n39 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n39 #### A masked pattern was here #### -POSTHOOK: query: SELECT * from dest1 +POSTHOOK: query: SELECT * from dest1_n39 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n39 #### A masked pattern was here #### NULL [0,0,0] @@ -176,38 +176,38 @@ NULL [7,14,21] [8,16,24] [9,18,27] -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n39 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n39 +PREHOOK: Output: default@dest1_n39 +POSTHOOK: query: DROP TABLE dest1_n39 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest1(a map) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' +POSTHOOK: Input: default@dest1_n39 +POSTHOOK: Output: default@dest1_n39 +PREHOOK: query: CREATE TABLE dest1_n39(a map) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(a map) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' +PREHOOK: Output: default@dest1_n39 +POSTHOOK: query: CREATE TABLE dest1_n39(a map) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1 +POSTHOOK: Output: default@dest1_n39 +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n39 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1 PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1 +PREHOOK: Output: default@dest1_n39 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n39 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:mstringstring, type:map, comment:from deserializer), ] -PREHOOK: query: SELECT * from dest1 +POSTHOOK: Output: default@dest1_n39 +POSTHOOK: Lineage: dest1_n39.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:mstringstring, type:map, comment:from deserializer), ] +PREHOOK: query: SELECT * from dest1_n39 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n39 #### A masked pattern was here #### -POSTHOOK: query: SELECT * from dest1 +POSTHOOK: query: SELECT * from dest1_n39 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n39 #### A masked pattern was here #### NULL {"key_0":"value_0"} @@ -753,40 +753,40 @@ POSTHOOK: query: DROP TABLE destBin POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@destbin POSTHOOK: Output: default@destbin -PREHOOK: query: DROP TABLE dest2 +PREHOOK: query: DROP TABLE dest2_n3 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE dest2 +POSTHOOK: query: DROP TABLE dest2_n3 POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE dest3 PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE dest3 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE dest2 (a map, map>>>>) +PREHOOK: query: CREATE TABLE dest2_n3 (a map, map>>>>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' STORED AS SEQUENCEFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest2 -POSTHOOK: query: CREATE TABLE dest2 (a map, map>>>>) +PREHOOK: Output: default@dest2_n3 +POSTHOOK: query: CREATE TABLE dest2_n3 (a map, map>>>>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' STORED AS SEQUENCEFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest2 -PREHOOK: query: INSERT OVERWRITE TABLE dest2 SELECT src_thrift.attributes FROM src_thrift +POSTHOOK: Output: default@dest2_n3 +PREHOOK: query: INSERT OVERWRITE TABLE dest2_n3 SELECT src_thrift.attributes FROM src_thrift PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest2 -POSTHOOK: query: INSERT OVERWRITE TABLE dest2 SELECT src_thrift.attributes FROM src_thrift +PREHOOK: Output: default@dest2_n3 +POSTHOOK: query: INSERT OVERWRITE TABLE dest2_n3 SELECT src_thrift.attributes FROM src_thrift POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest2.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:attributes, type:map,map>>>>, comment:from deserializer), ] -PREHOOK: query: SELECT a from dest2 limit 10 +POSTHOOK: Output: default@dest2_n3 +POSTHOOK: Lineage: dest2_n3.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:attributes, type:map,map>>>>, comment:from deserializer), ] +PREHOOK: query: SELECT a from dest2_n3 limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n3 #### A masked pattern was here #### -POSTHOOK: query: SELECT a from dest2 limit 10 +POSTHOOK: query: SELECT a from dest2_n3 limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n3 #### A masked pattern was here #### {"key_0":{"erVal0":{"value_0":{3:1.0}}}} {"key_1":{"erVal1":{"value_1":{3:1.0}}}} diff --git a/ql/src/test/results/clientpositive/input_lazyserde2.q.out b/ql/src/test/results/clientpositive/input_lazyserde2.q.out index d136b56130..a14a24e159 100644 --- a/ql/src/test/results/clientpositive/input_lazyserde2.q.out +++ b/ql/src/test/results/clientpositive/input_lazyserde2.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n124 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE dest1 +POSTHOOK: query: DROP TABLE dest1_n124 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE dest1(a array, b array, c map, d int, e string) +PREHOOK: query: CREATE TABLE dest1_n124(a array, b array, c map, d int, e string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' COLLECTION ITEMS TERMINATED BY '2' @@ -11,8 +11,8 @@ LINES TERMINATED BY '10' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(a array, b array, c map, d int, e string) +PREHOOK: Output: default@dest1_n124 +POSTHOOK: query: CREATE TABLE dest1_n124(a array, b array, c map, d int, e string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' COLLECTION ITEMS TERMINATED BY '2' @@ -21,14 +21,14 @@ LINES TERMINATED BY '10' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n124 PREHOOK: query: EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 +INSERT OVERWRITE TABLE dest1_n124 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 +INSERT OVERWRITE TABLE dest1_n124 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -63,7 +63,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n124 Stage: Stage-0 Move Operator @@ -73,34 +73,34 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n124 Stage: Stage-2 Stats Work Basic Stats Work: PREHOOK: query: FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 +INSERT OVERWRITE TABLE dest1_n124 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n124 POSTHOOK: query: FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 +INSERT OVERWRITE TABLE dest1_n124 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.b SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lstring, type:array, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.c SIMPLE [(src_thrift)src_thrift.FieldSchema(name:mstringstring, type:map, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.d SIMPLE [(src_thrift)src_thrift.FieldSchema(name:aint, type:int, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.e SIMPLE [(src_thrift)src_thrift.FieldSchema(name:astring, type:string, comment:from deserializer), ] -PREHOOK: query: SELECT dest1.* FROM dest1 CLUSTER BY 1 +POSTHOOK: Output: default@dest1_n124 +POSTHOOK: Lineage: dest1_n124.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n124.b SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lstring, type:array, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n124.c SIMPLE [(src_thrift)src_thrift.FieldSchema(name:mstringstring, type:map, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n124.d SIMPLE [(src_thrift)src_thrift.FieldSchema(name:aint, type:int, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n124.e SIMPLE [(src_thrift)src_thrift.FieldSchema(name:astring, type:string, comment:from deserializer), ] +PREHOOK: query: SELECT dest1_n124.* FROM dest1_n124 CLUSTER BY 1 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n124 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 CLUSTER BY 1 +POSTHOOK: query: SELECT dest1_n124.* FROM dest1_n124 CLUSTER BY 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n124 #### A masked pattern was here #### NULL NULL NULL 0 NULL [0,0,0] ["0","0","0"] {"key_0":"value_0"} 1712634731 record_0 @@ -113,13 +113,13 @@ NULL NULL NULL 0 NULL [7,14,21] ["70","700","7000"] {"key_7":"value_7"} -1461153973 record_7 [8,16,24] ["80","800","8000"] {"key_8":"value_8"} 1638581578 record_8 [9,18,27] ["90","900","9000"] {"key_9":"value_9"} 336964413 record_9 -PREHOOK: query: SELECT dest1.a[0], dest1.b[0], dest1.c['key2'], dest1.d, dest1.e FROM dest1 CLUSTER BY 1 +PREHOOK: query: SELECT dest1_n124.a[0], dest1_n124.b[0], dest1_n124.c['key2'], dest1_n124.d, dest1_n124.e FROM dest1_n124 CLUSTER BY 1 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n124 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.a[0], dest1.b[0], dest1.c['key2'], dest1.d, dest1.e FROM dest1 CLUSTER BY 1 +POSTHOOK: query: SELECT dest1_n124.a[0], dest1_n124.b[0], dest1_n124.c['key2'], dest1_n124.d, dest1_n124.e FROM dest1_n124 CLUSTER BY 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n124 #### A masked pattern was here #### 0 0 NULL 1712634731 record_0 1 10 NULL 465985200 record_1 @@ -132,38 +132,38 @@ POSTHOOK: Input: default@dest1 8 80 NULL 1638581578 record_8 9 90 NULL 336964413 record_9 NULL NULL NULL 0 NULL -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n124 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n124 +PREHOOK: Output: default@dest1_n124 +POSTHOOK: query: DROP TABLE dest1_n124 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest1(a array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' +POSTHOOK: Input: default@dest1_n124 +POSTHOOK: Output: default@dest1_n124 +PREHOOK: query: CREATE TABLE dest1_n124(a array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(a array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' +PREHOOK: Output: default@dest1_n124 +POSTHOOK: query: CREATE TABLE dest1_n124(a array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1 +POSTHOOK: Output: default@dest1_n124 +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n124 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1 PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1 +PREHOOK: Output: default@dest1_n124 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n124 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] -PREHOOK: query: SELECT * from dest1 +POSTHOOK: Output: default@dest1_n124 +POSTHOOK: Lineage: dest1_n124.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] +PREHOOK: query: SELECT * from dest1_n124 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n124 #### A masked pattern was here #### -POSTHOOK: query: SELECT * from dest1 +POSTHOOK: query: SELECT * from dest1_n124 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n124 #### A masked pattern was here #### NULL [0,0,0] @@ -176,38 +176,38 @@ NULL [7,14,21] [8,16,24] [9,18,27] -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n124 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n124 +PREHOOK: Output: default@dest1_n124 +POSTHOOK: query: DROP TABLE dest1_n124 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest1(a map) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' +POSTHOOK: Input: default@dest1_n124 +POSTHOOK: Output: default@dest1_n124 +PREHOOK: query: CREATE TABLE dest1_n124(a map) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(a map) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' +PREHOOK: Output: default@dest1_n124 +POSTHOOK: query: CREATE TABLE dest1_n124(a map) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1 +POSTHOOK: Output: default@dest1_n124 +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n124 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1 PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1 +PREHOOK: Output: default@dest1_n124 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n124 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:mstringstring, type:map, comment:from deserializer), ] -PREHOOK: query: SELECT * from dest1 +POSTHOOK: Output: default@dest1_n124 +POSTHOOK: Lineage: dest1_n124.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:mstringstring, type:map, comment:from deserializer), ] +PREHOOK: query: SELECT * from dest1_n124 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n124 #### A masked pattern was here #### -POSTHOOK: query: SELECT * from dest1 +POSTHOOK: query: SELECT * from dest1_n124 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n124 #### A masked pattern was here #### NULL {"key_0":"value_0"} @@ -220,30 +220,30 @@ NULL {"key_7":"value_7"} {"key_8":"value_8"} {"key_9":"value_9"} -PREHOOK: query: CREATE TABLE destBin(a UNIONTYPE, struct>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE +PREHOOK: query: CREATE TABLE destBin_n0(a UNIONTYPE, struct>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@destBin -POSTHOOK: query: CREATE TABLE destBin(a UNIONTYPE, struct>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE +PREHOOK: Output: default@destBin_n0 +POSTHOOK: query: CREATE TABLE destBin_n0(a UNIONTYPE, struct>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@destBin -PREHOOK: query: INSERT OVERWRITE TABLE destBin SELECT create_union( CASE WHEN key < 100 THEN 0 WHEN key < 200 THEN 1 WHEN key < 300 THEN 2 WHEN key < 400 THEN 3 ELSE 0 END, key, 2.0D, array("one","two"), struct(5,"five")) FROM srcbucket2 +POSTHOOK: Output: default@destBin_n0 +PREHOOK: query: INSERT OVERWRITE TABLE destBin_n0 SELECT create_union( CASE WHEN key < 100 THEN 0 WHEN key < 200 THEN 1 WHEN key < 300 THEN 2 WHEN key < 400 THEN 3 ELSE 0 END, key, 2.0D, array("one","two"), struct(5,"five")) FROM srcbucket2 PREHOOK: type: QUERY PREHOOK: Input: default@srcbucket2 -PREHOOK: Output: default@destbin -POSTHOOK: query: INSERT OVERWRITE TABLE destBin SELECT create_union( CASE WHEN key < 100 THEN 0 WHEN key < 200 THEN 1 WHEN key < 300 THEN 2 WHEN key < 400 THEN 3 ELSE 0 END, key, 2.0D, array("one","two"), struct(5,"five")) FROM srcbucket2 +PREHOOK: Output: default@destbin_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE destBin_n0 SELECT create_union( CASE WHEN key < 100 THEN 0 WHEN key < 200 THEN 1 WHEN key < 300 THEN 2 WHEN key < 400 THEN 3 ELSE 0 END, key, 2.0D, array("one","two"), struct(5,"five")) FROM srcbucket2 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket2 -POSTHOOK: Output: default@destbin -POSTHOOK: Lineage: destbin.a EXPRESSION [(srcbucket2)srcbucket2.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: SELECT * from destBin +POSTHOOK: Output: default@destbin_n0 +POSTHOOK: Lineage: destbin_n0.a EXPRESSION [(srcbucket2)srcbucket2.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: SELECT * from destBin_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@destbin +PREHOOK: Input: default@destbin_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * from destBin +POSTHOOK: query: SELECT * from destBin_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@destbin +POSTHOOK: Input: default@destbin_n0 #### A masked pattern was here #### {0:0} {0:0} @@ -745,48 +745,48 @@ POSTHOOK: Input: default@destbin {3:{"col1":5,"col2":"five"}} {3:{"col1":5,"col2":"five"}} {3:{"col1":5,"col2":"five"}} -PREHOOK: query: DROP TABLE destBin +PREHOOK: query: DROP TABLE destBin_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@destbin -PREHOOK: Output: default@destbin -POSTHOOK: query: DROP TABLE destBin +PREHOOK: Input: default@destbin_n0 +PREHOOK: Output: default@destbin_n0 +POSTHOOK: query: DROP TABLE destBin_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@destbin -POSTHOOK: Output: default@destbin -PREHOOK: query: DROP TABLE dest2 +POSTHOOK: Input: default@destbin_n0 +POSTHOOK: Output: default@destbin_n0 +PREHOOK: query: DROP TABLE dest2_n17 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE dest2 +POSTHOOK: query: DROP TABLE dest2_n17 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE dest3 +PREHOOK: query: DROP TABLE dest3_n5 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE dest3 +POSTHOOK: query: DROP TABLE dest3_n5 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE dest2 (a map, map>>>>) +PREHOOK: query: CREATE TABLE dest2_n17 (a map, map>>>>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest2 -POSTHOOK: query: CREATE TABLE dest2 (a map, map>>>>) +PREHOOK: Output: default@dest2_n17 +POSTHOOK: query: CREATE TABLE dest2_n17 (a map, map>>>>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest2 -PREHOOK: query: INSERT OVERWRITE TABLE dest2 SELECT src_thrift.attributes FROM src_thrift +POSTHOOK: Output: default@dest2_n17 +PREHOOK: query: INSERT OVERWRITE TABLE dest2_n17 SELECT src_thrift.attributes FROM src_thrift PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest2 -POSTHOOK: query: INSERT OVERWRITE TABLE dest2 SELECT src_thrift.attributes FROM src_thrift +PREHOOK: Output: default@dest2_n17 +POSTHOOK: query: INSERT OVERWRITE TABLE dest2_n17 SELECT src_thrift.attributes FROM src_thrift POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest2.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:attributes, type:map,map>>>>, comment:from deserializer), ] -PREHOOK: query: SELECT a from dest2 limit 10 +POSTHOOK: Output: default@dest2_n17 +POSTHOOK: Lineage: dest2_n17.a SIMPLE [(src_thrift)src_thrift.FieldSchema(name:attributes, type:map,map>>>>, comment:from deserializer), ] +PREHOOK: query: SELECT a from dest2_n17 limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n17 #### A masked pattern was here #### -POSTHOOK: query: SELECT a from dest2 limit 10 +POSTHOOK: query: SELECT a from dest2_n17 limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n17 #### A masked pattern was here #### {"key_0":{"erVal0":{"value_0":{3:1.0}}}} {"key_1":{"erVal1":{"value_1":{3:1.0}}}} @@ -798,40 +798,40 @@ POSTHOOK: Input: default@dest2 {"key_7":{"erVal7":{"value_7":{3:1.0}}}} {"key_8":{"erVal8":{"value_8":{3:1.0}}}} {"key_9":{"erVal9":{"value_9":{3:1.0}}}} -PREHOOK: query: CREATE TABLE dest3 ( +PREHOOK: query: CREATE TABLE dest3_n5 ( unionfield1 uniontype, map>, unionfield2 uniontype, map>, unionfield3 uniontype, map> ) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest3 -POSTHOOK: query: CREATE TABLE dest3 ( +PREHOOK: Output: default@dest3_n5 +POSTHOOK: query: CREATE TABLE dest3_n5 ( unionfield1 uniontype, map>, unionfield2 uniontype, map>, unionfield3 uniontype, map> ) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest3 -PREHOOK: query: INSERT OVERWRITE TABLE dest3 SELECT src_thrift.unionField1,src_thrift.unionField2,src_thrift.unionField3 from src_thrift +POSTHOOK: Output: default@dest3_n5 +PREHOOK: query: INSERT OVERWRITE TABLE dest3_n5 SELECT src_thrift.unionField1,src_thrift.unionField2,src_thrift.unionField3 from src_thrift PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest3 -POSTHOOK: query: INSERT OVERWRITE TABLE dest3 SELECT src_thrift.unionField1,src_thrift.unionField2,src_thrift.unionField3 from src_thrift +PREHOOK: Output: default@dest3_n5 +POSTHOOK: query: INSERT OVERWRITE TABLE dest3_n5 SELECT src_thrift.unionField1,src_thrift.unionField2,src_thrift.unionField3 from src_thrift POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest3 -POSTHOOK: Lineage: dest3.unionfield1 SIMPLE [(src_thrift)src_thrift.FieldSchema(name:unionfield1, type:uniontype,map>, comment:from deserializer), ] -POSTHOOK: Lineage: dest3.unionfield2 SIMPLE [(src_thrift)src_thrift.FieldSchema(name:unionfield2, type:uniontype,map>, comment:from deserializer), ] -POSTHOOK: Lineage: dest3.unionfield3 SIMPLE [(src_thrift)src_thrift.FieldSchema(name:unionfield3, type:uniontype,map>, comment:from deserializer), ] -PREHOOK: query: SELECT unionfield1, unionField2, unionfield3 from dest3 limit 10 +POSTHOOK: Output: default@dest3_n5 +POSTHOOK: Lineage: dest3_n5.unionfield1 SIMPLE [(src_thrift)src_thrift.FieldSchema(name:unionfield1, type:uniontype,map>, comment:from deserializer), ] +POSTHOOK: Lineage: dest3_n5.unionfield2 SIMPLE [(src_thrift)src_thrift.FieldSchema(name:unionfield2, type:uniontype,map>, comment:from deserializer), ] +POSTHOOK: Lineage: dest3_n5.unionfield3 SIMPLE [(src_thrift)src_thrift.FieldSchema(name:unionfield3, type:uniontype,map>, comment:from deserializer), ] +PREHOOK: query: SELECT unionfield1, unionField2, unionfield3 from dest3_n5 limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@dest3 +PREHOOK: Input: default@dest3_n5 #### A masked pattern was here #### -POSTHOOK: query: SELECT unionfield1, unionField2, unionfield3 from dest3 limit 10 +POSTHOOK: query: SELECT unionfield1, unionField2, unionfield3 from dest3_n5 limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest3 +POSTHOOK: Input: default@dest3_n5 #### A masked pattern was here #### {2:"test0"} {6:{"key_0":"value_0"}} {5:["0","0","0"]} {2:"test1"} {6:{"key_1":"value_1"}} {5:["10","100","1000"]} diff --git a/ql/src/test/results/clientpositive/input_part1.q.out b/ql/src/test/results/clientpositive/input_part1.q.out index 9a322d3c17..71720c53c7 100644 --- a/ql/src/test/results/clientpositive/input_part1.q.out +++ b/ql/src/test/results/clientpositive/input_part1.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n41(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n41 +POSTHOOK: query: CREATE TABLE dest1_n41(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n41 PREHOOK: query: EXPLAIN EXTENDED FROM srcpart -INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' +INSERT OVERWRITE TABLE dest1_n41 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED FROM srcpart -INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' +INSERT OVERWRITE TABLE dest1_n41 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -59,17 +59,17 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n41 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n41 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n41 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -199,17 +199,17 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n41 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n41 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n41 Stage: Stage-2 Stats Work @@ -218,7 +218,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, hr, ds Column Types: int, string, string, string - Table: default.dest1 + Table: default.dest1_n41 Is Table Level Stats: true Stage: Stage-3 @@ -243,17 +243,17 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n41 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n41 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n41 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -274,11 +274,11 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n41 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n41 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -296,18 +296,18 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n41 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n41 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n41 + name: default.dest1_n41 Truncated Path -> Alias: #### A masked pattern was here #### @@ -333,17 +333,17 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n41 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n41 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n41 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -364,11 +364,11 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n41 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n41 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -386,18 +386,18 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n41 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n41 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n41 + name: default.dest1_n41 Truncated Path -> Alias: #### A masked pattern was here #### @@ -408,28 +408,28 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: FROM srcpart -INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' +INSERT OVERWRITE TABLE dest1_n41 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n41 POSTHOOK: query: FROM srcpart -INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' +INSERT OVERWRITE TABLE dest1_n41 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.ds SIMPLE [] -POSTHOOK: Lineage: dest1.hr SIMPLE [] -POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n41 +POSTHOOK: Lineage: dest1_n41.ds SIMPLE [] +POSTHOOK: Lineage: dest1_n41.hr SIMPLE [] +POSTHOOK: Lineage: dest1_n41.key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n41.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n41.* FROM dest1_n41 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n41 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n41.* FROM dest1_n41 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n41 #### A masked pattern was here #### 86 val_86 12 2008-04-08 27 val_27 12 2008-04-08 diff --git a/ql/src/test/results/clientpositive/input_part2.q.out b/ql/src/test/results/clientpositive/input_part2.q.out index 0dfbf60665..9a68feee6b 100644 --- a/ql/src/test/results/clientpositive/input_part2.q.out +++ b/ql/src/test/results/clientpositive/input_part2.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n71(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n71 +POSTHOOK: query: CREATE TABLE dest1_n71(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest2(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@dest1_n71 +PREHOOK: query: CREATE TABLE dest2_n9(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest2 -POSTHOOK: query: CREATE TABLE dest2(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest2_n9 +POSTHOOK: query: CREATE TABLE dest2_n9(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest2 +POSTHOOK: Output: default@dest2_n9 PREHOOK: query: EXPLAIN EXTENDED FROM srcpart -INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' -INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12' +INSERT OVERWRITE TABLE dest1_n71 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' +INSERT OVERWRITE TABLE dest2_n9 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED FROM srcpart -INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' -INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12' +INSERT OVERWRITE TABLE dest1_n71 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' +INSERT OVERWRITE TABLE dest2_n9 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -72,17 +72,17 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n71 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n71 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n71 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -129,17 +129,17 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest2 + name default.dest2_n9 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest2 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest2_n9 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n9 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -330,17 +330,17 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n71 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n71 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n71 Stage: Stage-3 Stats Work @@ -349,7 +349,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, hr, ds Column Types: int, string, string, string - Table: default.dest1 + Table: default.dest1_n71 Is Table Level Stats: true Stage: Stage-4 @@ -374,17 +374,17 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n71 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n71 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n71 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -405,11 +405,11 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n71 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n71 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -427,18 +427,18 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n71 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n71 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n71 + name: default.dest1_n71 Truncated Path -> Alias: #### A masked pattern was here #### @@ -464,17 +464,17 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n71 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n71 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n71 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -495,11 +495,11 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n71 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n71 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -517,18 +517,18 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n71 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest1_n71 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n71 + name: default.dest1_n71 Truncated Path -> Alias: #### A masked pattern was here #### @@ -555,17 +555,17 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest2 + name default.dest2_n9 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest2 { i32 key, string value, string hr, string ds} + serialization.ddl struct dest2_n9 { i32 key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n9 Stage: Stage-9 Stats Work @@ -574,7 +574,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, hr, ds Column Types: int, string, string, string - Table: default.dest2 + Table: default.dest2_n9 Is Table Level Stats: true Stage: Stage-10 @@ -648,38 +648,38 @@ STAGE PLANS: MultiFileSpray: false PREHOOK: query: FROM srcpart -INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' -INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12' +INSERT OVERWRITE TABLE dest1_n71 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' +INSERT OVERWRITE TABLE dest2_n9 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n71 +PREHOOK: Output: default@dest2_n9 POSTHOOK: query: FROM srcpart -INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' -INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12' +INSERT OVERWRITE TABLE dest1_n71 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' +INSERT OVERWRITE TABLE dest2_n9 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 sort by key,value,ds,hr +POSTHOOK: Output: default@dest1_n71 +POSTHOOK: Output: default@dest2_n9 +POSTHOOK: Lineage: dest1_n71.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n71.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n71.key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n71.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n9.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: dest2_n9.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] +POSTHOOK: Lineage: dest2_n9.key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n9.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n71.* FROM dest1_n71 sort by key,value,ds,hr PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n71 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 sort by key,value,ds,hr +POSTHOOK: query: SELECT dest1_n71.* FROM dest1_n71 sort by key,value,ds,hr POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n71 #### A masked pattern was here #### 0 val_0 12 2008-04-08 0 val_0 12 2008-04-08 @@ -765,13 +765,13 @@ POSTHOOK: Input: default@dest1 97 val_97 12 2008-04-08 98 val_98 12 2008-04-08 98 val_98 12 2008-04-08 -PREHOOK: query: SELECT dest2.* FROM dest2 sort by key,value,ds,hr +PREHOOK: query: SELECT dest2_n9.* FROM dest2_n9 sort by key,value,ds,hr PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n9 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest2.* FROM dest2 sort by key,value,ds,hr +POSTHOOK: query: SELECT dest2_n9.* FROM dest2_n9 sort by key,value,ds,hr POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n9 #### A masked pattern was here #### 0 val_0 12 2008-04-09 0 val_0 12 2008-04-09 diff --git a/ql/src/test/results/clientpositive/input_part5.q.out b/ql/src/test/results/clientpositive/input_part5.q.out index 0a5be8c66b..3b4c49096f 100644 --- a/ql/src/test/results/clientpositive/input_part5.q.out +++ b/ql/src/test/results/clientpositive/input_part5.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: create table tmptable(key string, value string, hr string, ds string) +PREHOOK: query: create table tmptable_n2(key string, value string, hr string, ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmptable -POSTHOOK: query: create table tmptable(key string, value string, hr string, ds string) +PREHOOK: Output: default@tmptable_n2 +POSTHOOK: query: create table tmptable_n2(key string, value string, hr string, ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmptable +POSTHOOK: Output: default@tmptable_n2 PREHOOK: query: EXPLAIN -insert overwrite table tmptable +insert overwrite table tmptable_n2 SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.key < 100 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -insert overwrite table tmptable +insert overwrite table tmptable_n2 SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.key < 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -45,7 +45,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n2 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) outputColumnNames: key, value, hr, ds @@ -90,7 +90,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n2 Stage: Stage-2 Stats Work @@ -98,7 +98,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, hr, ds Column Types: string, string, string, string - Table: default.tmptable + Table: default.tmptable_n2 Stage: Stage-3 Map Reduce @@ -110,7 +110,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n2 Stage: Stage-5 Map Reduce @@ -122,7 +122,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n2 Stage: Stage-6 Move Operator @@ -130,31 +130,31 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table tmptable +PREHOOK: query: insert overwrite table tmptable_n2 SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.key < 100 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@tmptable -POSTHOOK: query: insert overwrite table tmptable +PREHOOK: Output: default@tmptable_n2 +POSTHOOK: query: insert overwrite table tmptable_n2 SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.key < 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@tmptable -POSTHOOK: Lineage: tmptable.ds SIMPLE [(srcpart)x.FieldSchema(name:hr, type:string, comment:null), ] -POSTHOOK: Lineage: tmptable.hr SIMPLE [] -POSTHOOK: Lineage: tmptable.key SIMPLE [(srcpart)x.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tmptable.value SIMPLE [(srcpart)x.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from tmptable x sort by x.key,x.value,x.ds,x.hr +POSTHOOK: Output: default@tmptable_n2 +POSTHOOK: Lineage: tmptable_n2.ds SIMPLE [(srcpart)x.FieldSchema(name:hr, type:string, comment:null), ] +POSTHOOK: Lineage: tmptable_n2.hr SIMPLE [] +POSTHOOK: Lineage: tmptable_n2.key SIMPLE [(srcpart)x.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tmptable_n2.value SIMPLE [(srcpart)x.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from tmptable_n2 x sort by x.key,x.value,x.ds,x.hr PREHOOK: type: QUERY -PREHOOK: Input: default@tmptable +PREHOOK: Input: default@tmptable_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from tmptable x sort by x.key,x.value,x.ds,x.hr +POSTHOOK: query: select * from tmptable_n2 x sort by x.key,x.value,x.ds,x.hr POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmptable +POSTHOOK: Input: default@tmptable_n2 #### A masked pattern was here #### 0 val_0 2008-04-08 11 0 val_0 2008-04-08 11 diff --git a/ql/src/test/results/clientpositive/input_testxpath.q.out b/ql/src/test/results/clientpositive/input_testxpath.q.out index 23be3e7436..c0ba678252 100644 --- a/ql/src/test/results/clientpositive/input_testxpath.q.out +++ b/ql/src/test/results/clientpositive/input_testxpath.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING, mapvalue STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n6(key INT, value STRING, mapvalue STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING, mapvalue STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n6 +POSTHOOK: query: CREATE TABLE dest1_n6(key INT, value STRING, mapvalue STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n6 PREHOOK: query: EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'] +INSERT OVERWRITE TABLE dest1_n6 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'] PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'] +INSERT OVERWRITE TABLE dest1_n6 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'] POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -42,7 +42,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n6 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) outputColumnNames: key, value, mapvalue @@ -87,7 +87,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n6 Stage: Stage-2 Stats Work @@ -95,7 +95,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, mapvalue Column Types: int, string, string - Table: default.dest1 + Table: default.dest1_n6 Stage: Stage-3 Map Reduce @@ -107,7 +107,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n6 Stage: Stage-5 Map Reduce @@ -119,7 +119,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n6 Stage: Stage-6 Move Operator @@ -128,25 +128,25 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'] +INSERT OVERWRITE TABLE dest1_n6 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'] PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n6 POSTHOOK: query: FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'] +INSERT OVERWRITE TABLE dest1_n6 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'] POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.mapvalue EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:mstringstring, type:map, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n6 +POSTHOOK: Lineage: dest1_n6.key EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n6.mapvalue EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:mstringstring, type:map, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n6.value EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] +PREHOOK: query: SELECT dest1_n6.* FROM dest1_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n6 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n6.* FROM dest1_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n6 #### A masked pattern was here #### 0 0 NULL 2 1 NULL diff --git a/ql/src/test/results/clientpositive/input_testxpath2.q.out b/ql/src/test/results/clientpositive/input_testxpath2.q.out index 361fab8276..62a4baf7f8 100644 --- a/ql/src/test/results/clientpositive/input_testxpath2.q.out +++ b/ql/src/test/results/clientpositive/input_testxpath2.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(lint_size INT, lintstring_size INT, mstringstring_size INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n28(lint_size INT, lintstring_size INT, mstringstring_size INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(lint_size INT, lintstring_size INT, mstringstring_size INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n28 +POSTHOOK: query: CREATE TABLE dest1_n28(lint_size INT, lintstring_size INT, mstringstring_size INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n28 PREHOOK: query: EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT size(src_thrift.lint), size(src_thrift.lintstring), size(src_thrift.mstringstring) where src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL) +INSERT OVERWRITE TABLE dest1_n28 SELECT size(src_thrift.lint), size(src_thrift.lintstring), size(src_thrift.mstringstring) where src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT size(src_thrift.lint), size(src_thrift.lintstring), size(src_thrift.mstringstring) where src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL) +INSERT OVERWRITE TABLE dest1_n28 SELECT size(src_thrift.lint), size(src_thrift.lintstring), size(src_thrift.mstringstring) where src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -45,7 +45,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n28 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) outputColumnNames: lint_size, lintstring_size, mstringstring_size @@ -90,7 +90,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n28 Stage: Stage-2 Stats Work @@ -98,7 +98,7 @@ STAGE PLANS: Column Stats Desc: Columns: lint_size, lintstring_size, mstringstring_size Column Types: int, int, int - Table: default.dest1 + Table: default.dest1_n28 Stage: Stage-3 Map Reduce @@ -110,7 +110,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n28 Stage: Stage-5 Map Reduce @@ -122,7 +122,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n28 Stage: Stage-6 Move Operator @@ -131,25 +131,25 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT size(src_thrift.lint), size(src_thrift.lintstring), size(src_thrift.mstringstring) where src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL) +INSERT OVERWRITE TABLE dest1_n28 SELECT size(src_thrift.lint), size(src_thrift.lintstring), size(src_thrift.mstringstring) where src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL) PREHOOK: type: QUERY PREHOOK: Input: default@src_thrift -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n28 POSTHOOK: query: FROM src_thrift -INSERT OVERWRITE TABLE dest1 SELECT size(src_thrift.lint), size(src_thrift.lintstring), size(src_thrift.mstringstring) where src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL) +INSERT OVERWRITE TABLE dest1_n28 SELECT size(src_thrift.lint), size(src_thrift.lintstring), size(src_thrift.mstringstring) where src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL) POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thrift -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.lint_size EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.lintstring_size EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] -POSTHOOK: Lineage: dest1.mstringstring_size EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:mstringstring, type:map, comment:from deserializer), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n28 +POSTHOOK: Lineage: dest1_n28.lint_size EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n28.lintstring_size EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] +POSTHOOK: Lineage: dest1_n28.mstringstring_size EXPRESSION [(src_thrift)src_thrift.FieldSchema(name:mstringstring, type:map, comment:from deserializer), ] +PREHOOK: query: SELECT dest1_n28.* FROM dest1_n28 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n28 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n28.* FROM dest1_n28 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n28 #### A masked pattern was here #### 3 1 1 3 1 1 diff --git a/ql/src/test/results/clientpositive/inputddl7.q.out b/ql/src/test/results/clientpositive/inputddl7.q.out index 32070f40f5..826f5a4cfe 100644 --- a/ql/src/test/results/clientpositive/inputddl7.q.out +++ b/ql/src/test/results/clientpositive/inputddl7.q.out @@ -1,133 +1,133 @@ -PREHOOK: query: CREATE TABLE T1(name STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n75(name STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(name STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n75 +POSTHOOK: query: CREATE TABLE T1_n75(name STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n75 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1_n75 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n75 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1_n75 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: SELECT COUNT(1) FROM T1 +POSTHOOK: Output: default@t1_n75 +PREHOOK: query: SELECT COUNT(1) FROM T1_n75 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n75 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(1) FROM T1 +POSTHOOK: query: SELECT COUNT(1) FROM T1_n75 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n75 #### A masked pattern was here #### 500 -PREHOOK: query: CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE +PREHOOK: query: CREATE TABLE T2_n45(name STRING) STORED AS SEQUENCEFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE +PREHOOK: Output: default@T2_n45 +POSTHOOK: query: CREATE TABLE T2_n45(name STRING) STORED AS SEQUENCEFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T2 +POSTHOOK: Output: default@T2_n45 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T2_n45 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T2 +PREHOOK: Output: default@t2_n45 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T2_n45 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: SELECT COUNT(1) FROM T2 +POSTHOOK: Output: default@t2_n45 +PREHOOK: query: SELECT COUNT(1) FROM T2_n45 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2_n45 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(1) FROM T2 +POSTHOOK: query: SELECT COUNT(1) FROM T2_n45 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2_n45 #### A masked pattern was here #### 500 -PREHOOK: query: CREATE TABLE T3(name STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T3_n17(name STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(name STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T3_n17 +POSTHOOK: query: CREATE TABLE T3_n17(name STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3 PARTITION (ds='2008-04-09') +POSTHOOK: Output: default@T3_n17 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3_n17 PARTITION (ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3 PARTITION (ds='2008-04-09') +PREHOOK: Output: default@t3_n17 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3_n17 PARTITION (ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 -POSTHOOK: Output: default@t3@ds=2008-04-09 -PREHOOK: query: SELECT COUNT(1) FROM T3 where T3.ds='2008-04-09' +POSTHOOK: Output: default@t3_n17 +POSTHOOK: Output: default@t3_n17@ds=2008-04-09 +PREHOOK: query: SELECT COUNT(1) FROM T3_n17 where T3_n17.ds='2008-04-09' PREHOOK: type: QUERY -PREHOOK: Input: default@t3 -PREHOOK: Input: default@t3@ds=2008-04-09 +PREHOOK: Input: default@t3_n17 +PREHOOK: Input: default@t3_n17@ds=2008-04-09 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(1) FROM T3 where T3.ds='2008-04-09' +POSTHOOK: query: SELECT COUNT(1) FROM T3_n17 where T3_n17.ds='2008-04-09' POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 -POSTHOOK: Input: default@t3@ds=2008-04-09 +POSTHOOK: Input: default@t3_n17 +POSTHOOK: Input: default@t3_n17@ds=2008-04-09 #### A masked pattern was here #### 500 -PREHOOK: query: CREATE TABLE T4(name STRING) PARTITIONED BY(ds STRING) STORED AS SEQUENCEFILE +PREHOOK: query: CREATE TABLE T4_n5(name STRING) PARTITIONED BY(ds STRING) STORED AS SEQUENCEFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T4 -POSTHOOK: query: CREATE TABLE T4(name STRING) PARTITIONED BY(ds STRING) STORED AS SEQUENCEFILE +PREHOOK: Output: default@T4_n5 +POSTHOOK: query: CREATE TABLE T4_n5(name STRING) PARTITIONED BY(ds STRING) STORED AS SEQUENCEFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T4 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T4 PARTITION (ds='2008-04-09') +POSTHOOK: Output: default@T4_n5 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T4_n5 PARTITION (ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t4 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T4 PARTITION (ds='2008-04-09') +PREHOOK: Output: default@t4_n5 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T4_n5 PARTITION (ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t4 -POSTHOOK: Output: default@t4@ds=2008-04-09 -PREHOOK: query: SELECT COUNT(1) FROM T4 where T4.ds='2008-04-09' +POSTHOOK: Output: default@t4_n5 +POSTHOOK: Output: default@t4_n5@ds=2008-04-09 +PREHOOK: query: SELECT COUNT(1) FROM T4_n5 where T4_n5.ds='2008-04-09' PREHOOK: type: QUERY -PREHOOK: Input: default@t4 -PREHOOK: Input: default@t4@ds=2008-04-09 +PREHOOK: Input: default@t4_n5 +PREHOOK: Input: default@t4_n5@ds=2008-04-09 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(1) FROM T4 where T4.ds='2008-04-09' +POSTHOOK: query: SELECT COUNT(1) FROM T4_n5 where T4_n5.ds='2008-04-09' POSTHOOK: type: QUERY -POSTHOOK: Input: default@t4 -POSTHOOK: Input: default@t4@ds=2008-04-09 +POSTHOOK: Input: default@t4_n5 +POSTHOOK: Input: default@t4_n5@ds=2008-04-09 #### A masked pattern was here #### 500 -PREHOOK: query: DESCRIBE EXTENDED T1 +PREHOOK: query: DESCRIBE EXTENDED T1_n75 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: DESCRIBE EXTENDED T1 +PREHOOK: Input: default@t1_n75 +POSTHOOK: query: DESCRIBE EXTENDED T1_n75 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n75 name string #### A masked pattern was here #### -PREHOOK: query: DESCRIBE EXTENDED T2 +PREHOOK: query: DESCRIBE EXTENDED T2_n45 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t2 -POSTHOOK: query: DESCRIBE EXTENDED T2 +PREHOOK: Input: default@t2_n45 +POSTHOOK: query: DESCRIBE EXTENDED T2_n45 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2_n45 name string #### A masked pattern was here #### -PREHOOK: query: DESCRIBE EXTENDED T3 PARTITION (ds='2008-04-09') +PREHOOK: query: DESCRIBE EXTENDED T3_n17 PARTITION (ds='2008-04-09') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t3 -POSTHOOK: query: DESCRIBE EXTENDED T3 PARTITION (ds='2008-04-09') +PREHOOK: Input: default@t3_n17 +POSTHOOK: query: DESCRIBE EXTENDED T3_n17 PARTITION (ds='2008-04-09') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t3_n17 name string ds string @@ -136,12 +136,12 @@ ds string ds string #### A masked pattern was here #### -PREHOOK: query: DESCRIBE EXTENDED T4 PARTITION (ds='2008-04-09') +PREHOOK: query: DESCRIBE EXTENDED T4_n5 PARTITION (ds='2008-04-09') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t4 -POSTHOOK: query: DESCRIBE EXTENDED T4 PARTITION (ds='2008-04-09') +PREHOOK: Input: default@t4_n5 +POSTHOOK: query: DESCRIBE EXTENDED T4_n5 PARTITION (ds='2008-04-09') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t4_n5 name string ds string diff --git a/ql/src/test/results/clientpositive/insert0.q.out b/ql/src/test/results/clientpositive/insert0.q.out index e8a48845ee..4c1fa2f14f 100644 --- a/ql/src/test/results/clientpositive/insert0.q.out +++ b/ql/src/test/results/clientpositive/insert0.q.out @@ -1,6 +1,6 @@ -PREHOOK: query: DROP TABLE insert_into1 +PREHOOK: query: DROP TABLE insert_into1_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE insert_into1 +POSTHOOK: query: DROP TABLE insert_into1_n1 POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE ctas_table PREHOOK: type: DROPTABLE @@ -10,31 +10,31 @@ PREHOOK: query: DROP TABLE ctas_part PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE ctas_part POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE insert_into1 (key int, value string) +PREHOOK: query: CREATE TABLE insert_into1_n1 (key int, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@insert_into1 -POSTHOOK: query: CREATE TABLE insert_into1 (key int, value string) +PREHOOK: Output: default@insert_into1_n1 +POSTHOOK: query: CREATE TABLE insert_into1_n1 (key int, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@insert_into1 -PREHOOK: query: INSERT OVERWRITE TABLE insert_into1 SELECT * from src ORDER BY key LIMIT 10 +POSTHOOK: Output: default@insert_into1_n1 +PREHOOK: query: INSERT OVERWRITE TABLE insert_into1_n1 SELECT * from src ORDER BY key LIMIT 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@insert_into1 -POSTHOOK: query: INSERT OVERWRITE TABLE insert_into1 SELECT * from src ORDER BY key LIMIT 10 +PREHOOK: Output: default@insert_into1_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE insert_into1_n1 SELECT * from src ORDER BY key LIMIT 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@insert_into1 -POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from insert_into1 order by key +POSTHOOK: Output: default@insert_into1_n1 +POSTHOOK: Lineage: insert_into1_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from insert_into1_n1 order by key PREHOOK: type: QUERY -PREHOOK: Input: default@insert_into1 +PREHOOK: Input: default@insert_into1_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from insert_into1 order by key +POSTHOOK: query: select * from insert_into1_n1 order by key POSTHOOK: type: QUERY -POSTHOOK: Input: default@insert_into1 +POSTHOOK: Input: default@insert_into1_n1 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -46,23 +46,23 @@ POSTHOOK: Input: default@insert_into1 103 val_103 104 val_104 104 val_104 -PREHOOK: query: INSERT INTO TABLE insert_into1 SELECT * from src ORDER BY key DESC LIMIT 10 +PREHOOK: query: INSERT INTO TABLE insert_into1_n1 SELECT * from src ORDER BY key DESC LIMIT 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@insert_into1 -POSTHOOK: query: INSERT INTO TABLE insert_into1 SELECT * from src ORDER BY key DESC LIMIT 10 +PREHOOK: Output: default@insert_into1_n1 +POSTHOOK: query: INSERT INTO TABLE insert_into1_n1 SELECT * from src ORDER BY key DESC LIMIT 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@insert_into1 -POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from insert_into1 order by key +POSTHOOK: Output: default@insert_into1_n1 +POSTHOOK: Lineage: insert_into1_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: insert_into1_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from insert_into1_n1 order by key PREHOOK: type: QUERY -PREHOOK: Input: default@insert_into1 +PREHOOK: Input: default@insert_into1_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from insert_into1 order by key +POSTHOOK: query: select * from insert_into1_n1 order by key POSTHOOK: type: QUERY -POSTHOOK: Input: default@insert_into1 +POSTHOOK: Input: default@insert_into1_n1 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -184,14 +184,14 @@ POSTHOOK: Input: default@ctas_part@modkey=5 311 val_311 4 409 val_409 5 484 val_484 5 -PREHOOK: query: DROP TABLE insert_into1 +PREHOOK: query: DROP TABLE insert_into1_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@insert_into1 -PREHOOK: Output: default@insert_into1 -POSTHOOK: query: DROP TABLE insert_into1 +PREHOOK: Input: default@insert_into1_n1 +PREHOOK: Output: default@insert_into1_n1 +POSTHOOK: query: DROP TABLE insert_into1_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@insert_into1 -POSTHOOK: Output: default@insert_into1 +POSTHOOK: Input: default@insert_into1_n1 +POSTHOOK: Output: default@insert_into1_n1 PREHOOK: query: DROP TABLE ctas_table PREHOOK: type: DROPTABLE PREHOOK: Input: default@ctas_table diff --git a/ql/src/test/results/clientpositive/insert1.q.out b/ql/src/test/results/clientpositive/insert1.q.out index 2500b79529..7272a89d42 100644 --- a/ql/src/test/results/clientpositive/insert1.q.out +++ b/ql/src/test/results/clientpositive/insert1.q.out @@ -778,39 +778,39 @@ PREHOOK: Output: database:db1 POSTHOOK: query: CREATE DATABASE db1 POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:db1 -PREHOOK: query: CREATE TABLE db1.result(col1 STRING) +PREHOOK: query: CREATE TABLE db1.result_n0(col1 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:db1 -PREHOOK: Output: db1@result -POSTHOOK: query: CREATE TABLE db1.result(col1 STRING) +PREHOOK: Output: db1@result_n0 +POSTHOOK: query: CREATE TABLE db1.result_n0(col1 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@result -PREHOOK: query: INSERT OVERWRITE TABLE db1.result SELECT 'db1_insert1' FROM src LIMIT 1 +POSTHOOK: Output: db1@result_n0 +PREHOOK: query: INSERT OVERWRITE TABLE db1.result_n0 SELECT 'db1_insert1' FROM src LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: db1@result -POSTHOOK: query: INSERT OVERWRITE TABLE db1.result SELECT 'db1_insert1' FROM src LIMIT 1 +PREHOOK: Output: db1@result_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE db1.result_n0 SELECT 'db1_insert1' FROM src LIMIT 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: db1@result -POSTHOOK: Lineage: result.col1 SIMPLE [] -PREHOOK: query: INSERT INTO TABLE db1.result SELECT 'db1_insert2' FROM src LIMIT 1 +POSTHOOK: Output: db1@result_n0 +POSTHOOK: Lineage: result_n0.col1 SIMPLE [] +PREHOOK: query: INSERT INTO TABLE db1.result_n0 SELECT 'db1_insert2' FROM src LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: db1@result -POSTHOOK: query: INSERT INTO TABLE db1.result SELECT 'db1_insert2' FROM src LIMIT 1 +PREHOOK: Output: db1@result_n0 +POSTHOOK: query: INSERT INTO TABLE db1.result_n0 SELECT 'db1_insert2' FROM src LIMIT 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: db1@result -POSTHOOK: Lineage: result.col1 SIMPLE [] -PREHOOK: query: SELECT * FROM db1.result +POSTHOOK: Output: db1@result_n0 +POSTHOOK: Lineage: result_n0.col1 SIMPLE [] +PREHOOK: query: SELECT * FROM db1.result_n0 PREHOOK: type: QUERY -PREHOOK: Input: db1@result +PREHOOK: Input: db1@result_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM db1.result +POSTHOOK: query: SELECT * FROM db1.result_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: db1@result +POSTHOOK: Input: db1@result_n0 #### A masked pattern was here #### db1_insert1 db1_insert2 diff --git a/ql/src/test/results/clientpositive/insert_acid_not_bucketed.q.out b/ql/src/test/results/clientpositive/insert_acid_not_bucketed.q.out index 985ae402fc..d1a42f37fc 100644 --- a/ql/src/test/results/clientpositive/insert_acid_not_bucketed.q.out +++ b/ql/src/test/results/clientpositive/insert_acid_not_bucketed.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc +PREHOOK: query: create table acid_notbucketed_n0(a int, b varchar(128)) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@acid_notbucketed -POSTHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc +PREHOOK: Output: default@acid_notbucketed_n0 +POSTHOOK: query: create table acid_notbucketed_n0(a int, b varchar(128)) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@acid_notbucketed -PREHOOK: query: insert into table acid_notbucketed select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 +POSTHOOK: Output: default@acid_notbucketed_n0 +PREHOOK: query: insert into table acid_notbucketed_n0 select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc -PREHOOK: Output: default@acid_notbucketed -POSTHOOK: query: insert into table acid_notbucketed select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 +PREHOOK: Output: default@acid_notbucketed_n0 +POSTHOOK: query: insert into table acid_notbucketed_n0 select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: default@acid_notbucketed -POSTHOOK: Lineage: acid_notbucketed.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: acid_notbucketed.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -PREHOOK: query: select * from acid_notbucketed +POSTHOOK: Output: default@acid_notbucketed_n0 +POSTHOOK: Lineage: acid_notbucketed_n0.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_notbucketed_n0.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: select * from acid_notbucketed_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@acid_notbucketed +PREHOOK: Input: default@acid_notbucketed_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from acid_notbucketed +POSTHOOK: query: select * from acid_notbucketed_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@acid_notbucketed +POSTHOOK: Input: default@acid_notbucketed_n0 #### A masked pattern was here #### -1073279343 oj1YrV5Wa -1073051226 A34p7oRr2WvUJNf diff --git a/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out b/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out index 3cb55c74f9..82788f5b69 100644 --- a/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out +++ b/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out @@ -1,20 +1,20 @@ -PREHOOK: query: create table studenttab10k (age2 int) +PREHOOK: query: create table studenttab10k_n0 (age2 int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@studenttab10k -POSTHOOK: query: create table studenttab10k (age2 int) +PREHOOK: Output: default@studenttab10k_n0 +POSTHOOK: query: create table studenttab10k_n0 (age2 int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@studenttab10k -PREHOOK: query: insert into studenttab10k values(1) +POSTHOOK: Output: default@studenttab10k_n0 +PREHOOK: query: insert into studenttab10k_n0 values(1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@studenttab10k -POSTHOOK: query: insert into studenttab10k values(1) +PREHOOK: Output: default@studenttab10k_n0 +POSTHOOK: query: insert into studenttab10k_n0 values(1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@studenttab10k -POSTHOOK: Lineage: studenttab10k.age2 SCRIPT [] +POSTHOOK: Output: default@studenttab10k_n0 +POSTHOOK: Lineage: studenttab10k_n0.age2 SCRIPT [] PREHOOK: query: create table student_acid (age int, grade int) clustered by (age) into 1 buckets PREHOOK: type: CREATETABLE @@ -25,15 +25,15 @@ POSTHOOK: query: create table student_acid (age int, grade int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@student_acid -PREHOOK: query: insert into student_acid(age) select * from studenttab10k +PREHOOK: query: insert into student_acid(age) select * from studenttab10k_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@studenttab10k +PREHOOK: Input: default@studenttab10k_n0 PREHOOK: Output: default@student_acid -POSTHOOK: query: insert into student_acid(age) select * from studenttab10k +POSTHOOK: query: insert into student_acid(age) select * from studenttab10k_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@studenttab10k +POSTHOOK: Input: default@studenttab10k_n0 POSTHOOK: Output: default@student_acid -POSTHOOK: Lineage: student_acid.age SIMPLE [(studenttab10k)studenttab10k.FieldSchema(name:age2, type:int, comment:null), ] +POSTHOOK: Lineage: student_acid.age SIMPLE [(studenttab10k_n0)studenttab10k_n0.FieldSchema(name:age2, type:int, comment:null), ] POSTHOOK: Lineage: student_acid.grade SIMPLE [] PREHOOK: query: select * from student_acid PREHOOK: type: QUERY @@ -44,15 +44,15 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@student_acid #### A masked pattern was here #### 1 NULL -PREHOOK: query: insert into student_acid(grade, age) select 3 g, * from studenttab10k +PREHOOK: query: insert into student_acid(grade, age) select 3 g, * from studenttab10k_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@studenttab10k +PREHOOK: Input: default@studenttab10k_n0 PREHOOK: Output: default@student_acid -POSTHOOK: query: insert into student_acid(grade, age) select 3 g, * from studenttab10k +POSTHOOK: query: insert into student_acid(grade, age) select 3 g, * from studenttab10k_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@studenttab10k +POSTHOOK: Input: default@studenttab10k_n0 POSTHOOK: Output: default@student_acid -POSTHOOK: Lineage: student_acid.age SIMPLE [(studenttab10k)studenttab10k.FieldSchema(name:age2, type:int, comment:null), ] +POSTHOOK: Lineage: student_acid.age SIMPLE [(studenttab10k_n0)studenttab10k_n0.FieldSchema(name:age2, type:int, comment:null), ] POSTHOOK: Lineage: student_acid.grade SIMPLE [] PREHOOK: query: select * from student_acid PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out b/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out index 139053dc28..daf466e5ba 100644 --- a/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out +++ b/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out @@ -30,32 +30,32 @@ bbb 32 3.00 ccc 32 3.00 ddd 35 3.00 eee 32 3.00 -PREHOOK: query: create table tab1 (name varchar(50), age int, gpa decimal(3, 2)) +PREHOOK: query: create table tab1_n2 (name varchar(50), age int, gpa decimal(3, 2)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tab1 -POSTHOOK: query: create table tab1 (name varchar(50), age int, gpa decimal(3, 2)) +PREHOOK: Output: default@tab1_n2 +POSTHOOK: query: create table tab1_n2 (name varchar(50), age int, gpa decimal(3, 2)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab1 -PREHOOK: query: insert into table tab1 select * from sample_06 where gpa = 3.00 +POSTHOOK: Output: default@tab1_n2 +PREHOOK: query: insert into table tab1_n2 select * from sample_06 where gpa = 3.00 PREHOOK: type: QUERY PREHOOK: Input: default@sample_06 -PREHOOK: Output: default@tab1 -POSTHOOK: query: insert into table tab1 select * from sample_06 where gpa = 3.00 +PREHOOK: Output: default@tab1_n2 +POSTHOOK: query: insert into table tab1_n2 select * from sample_06 where gpa = 3.00 POSTHOOK: type: QUERY POSTHOOK: Input: default@sample_06 -POSTHOOK: Output: default@tab1 -POSTHOOK: Lineage: tab1.age SIMPLE [(sample_06)sample_06.FieldSchema(name:age, type:int, comment:null), ] -POSTHOOK: Lineage: tab1.gpa SIMPLE [] -POSTHOOK: Lineage: tab1.name SIMPLE [(sample_06)sample_06.FieldSchema(name:name, type:varchar(50), comment:null), ] -PREHOOK: query: select * from tab1 +POSTHOOK: Output: default@tab1_n2 +POSTHOOK: Lineage: tab1_n2.age SIMPLE [(sample_06)sample_06.FieldSchema(name:age, type:int, comment:null), ] +POSTHOOK: Lineage: tab1_n2.gpa SIMPLE [] +POSTHOOK: Lineage: tab1_n2.name SIMPLE [(sample_06)sample_06.FieldSchema(name:name, type:varchar(50), comment:null), ] +PREHOOK: query: select * from tab1_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@tab1 +PREHOOK: Input: default@tab1_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from tab1 +POSTHOOK: query: select * from tab1_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab1 +POSTHOOK: Input: default@tab1_n2 #### A masked pattern was here #### aaa 35 3.00 bbb 32 3.00 diff --git a/ql/src/test/results/clientpositive/insert_overwrite_directory.q.out b/ql/src/test/results/clientpositive/insert_overwrite_directory.q.out index 40441a0c93..94483de590 100644 --- a/ql/src/test/results/clientpositive/insert_overwrite_directory.q.out +++ b/ql/src/test/results/clientpositive/insert_overwrite_directory.q.out @@ -1022,37 +1022,37 @@ POSTHOOK: Input: default@src 400:val_400 200:val_200 97:val_97 -PREHOOK: query: create table array_table (a array, b array) +PREHOOK: query: create table array_table_n1 (a array, b array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ',' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@array_table -POSTHOOK: query: create table array_table (a array, b array) +PREHOOK: Output: default@array_table_n1 +POSTHOOK: query: create table array_table_n1 (a array, b array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ',' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@array_table -PREHOOK: query: load data local inpath "../../data/files/array_table.txt" overwrite into table array_table +POSTHOOK: Output: default@array_table_n1 +PREHOOK: query: load data local inpath "../../data/files/array_table.txt" overwrite into table array_table_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@array_table -POSTHOOK: query: load data local inpath "../../data/files/array_table.txt" overwrite into table array_table +PREHOOK: Output: default@array_table_n1 +POSTHOOK: query: load data local inpath "../../data/files/array_table.txt" overwrite into table array_table_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@array_table +POSTHOOK: Output: default@array_table_n1 PREHOOK: query: insert overwrite directory '../../data/files/array_table_1' -select * from array_table +select * from array_table_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@array_table +PREHOOK: Input: default@array_table_n1 #### A masked pattern was here #### POSTHOOK: query: insert overwrite directory '../../data/files/array_table_1' -select * from array_table +select * from array_table_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@array_table +POSTHOOK: Input: default@array_table_n1 #### A masked pattern was here #### a1a2a3b1b2b3b4 a21a22a23b21b22b23b24 @@ -1060,17 +1060,17 @@ PREHOOK: query: insert overwrite directory '../../data/files/array_table_2' ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' -select * from array_table +select * from array_table_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@array_table +PREHOOK: Input: default@array_table_n1 #### A masked pattern was here #### POSTHOOK: query: insert overwrite directory '../../data/files/array_table_2' ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' -select * from array_table +select * from array_table_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@array_table +POSTHOOK: Input: default@array_table_n1 #### A masked pattern was here #### a1#a2#a3:b1#b2#b3#b4 a21#a22#a23:b21#b22#b23#b24 @@ -1078,21 +1078,21 @@ PREHOOK: query: insert overwrite directory '../../data/files/array_table_2_withf ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' -select b,a from array_table +select b,a from array_table_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@array_table +PREHOOK: Input: default@array_table_n1 #### A masked pattern was here #### POSTHOOK: query: insert overwrite directory '../../data/files/array_table_2_withfields' ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' -select b,a from array_table +select b,a from array_table_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@array_table +POSTHOOK: Input: default@array_table_n1 #### A masked pattern was here #### b1#b2#b3#b4:a1#a2#a3 b21#b22#b23#b24:a21#a22#a23 -PREHOOK: query: create table map_table (foo STRING , bar MAP) +PREHOOK: query: create table map_table_n2 (foo STRING , bar MAP) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ',' @@ -1100,8 +1100,8 @@ MAP KEYS TERMINATED BY ':' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@map_table -POSTHOOK: query: create table map_table (foo STRING , bar MAP) +PREHOOK: Output: default@map_table_n2 +POSTHOOK: query: create table map_table_n2 (foo STRING , bar MAP) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ',' @@ -1109,24 +1109,24 @@ MAP KEYS TERMINATED BY ':' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@map_table -PREHOOK: query: load data local inpath "../../data/files/map_table.txt" overwrite into table map_table +POSTHOOK: Output: default@map_table_n2 +PREHOOK: query: load data local inpath "../../data/files/map_table.txt" overwrite into table map_table_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@map_table -POSTHOOK: query: load data local inpath "../../data/files/map_table.txt" overwrite into table map_table +PREHOOK: Output: default@map_table_n2 +POSTHOOK: query: load data local inpath "../../data/files/map_table.txt" overwrite into table map_table_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@map_table +POSTHOOK: Output: default@map_table_n2 PREHOOK: query: insert overwrite directory '../../data/files/map_table_1' -select * from map_table +select * from map_table_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@map_table +PREHOOK: Input: default@map_table_n2 #### A masked pattern was here #### POSTHOOK: query: insert overwrite directory '../../data/files/map_table_1' -select * from map_table +select * from map_table_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@map_table +POSTHOOK: Input: default@map_table_n2 #### A masked pattern was here #### foo1k1v1k2v2k3v3 foo2k21v21k22v22k31v31 @@ -1135,18 +1135,18 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' MAP KEYS TERMINATED BY '=' -select * from map_table +select * from map_table_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@map_table +PREHOOK: Input: default@map_table_n2 #### A masked pattern was here #### POSTHOOK: query: insert overwrite directory '../../data/files/map_table_2' ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' MAP KEYS TERMINATED BY '=' -select * from map_table +select * from map_table_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@map_table +POSTHOOK: Input: default@map_table_n2 #### A masked pattern was here #### foo1:k1=v1#k2=v2#k3=v3 foo2:k21=v21#k22=v22#k31=v31 @@ -1155,34 +1155,34 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' MAP KEYS TERMINATED BY '=' -select bar,foo from map_table +select bar,foo from map_table_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@map_table +PREHOOK: Input: default@map_table_n2 #### A masked pattern was here #### POSTHOOK: query: insert overwrite directory '../../data/files/map_table_2_withfields' ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' MAP KEYS TERMINATED BY '=' -select bar,foo from map_table +select bar,foo from map_table_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@map_table +POSTHOOK: Input: default@map_table_n2 #### A masked pattern was here #### k1=v1#k2=v2#k3=v3:foo1 k21=v21#k22=v22#k31=v31:foo2 PREHOOK: query: insert overwrite directory '../../data/files/array_table_3' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe' STORED AS TEXTFILE -select * from array_table +select * from array_table_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@array_table +PREHOOK: Input: default@array_table_n1 #### A masked pattern was here #### POSTHOOK: query: insert overwrite directory '../../data/files/array_table_3' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe' STORED AS TEXTFILE -select * from array_table +select * from array_table_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@array_table +POSTHOOK: Input: default@array_table_n1 #### A masked pattern was here #### ["a1","a2","a3"]["b1","b2","b3","b4"] ["a21","a22","a23"]["b21","b22","b23","b24"] @@ -1192,9 +1192,9 @@ WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '(\"|\\[|\\])', 'field.delim'=',', 'serialization.null.format'='-NA-', 'collection.delim'='#') STORED AS TEXTFILE -select a, null, b from array_table +select a, null, b from array_table_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@array_table +PREHOOK: Input: default@array_table_n1 #### A masked pattern was here #### POSTHOOK: query: insert overwrite directory '../../data/files/array_table_4' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' @@ -1202,25 +1202,25 @@ WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '(\"|\\[|\\])', 'field.delim'=',', 'serialization.null.format'='-NA-', 'collection.delim'='#') STORED AS TEXTFILE -select a, null, b from array_table +select a, null, b from array_table_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@array_table +POSTHOOK: Input: default@array_table_n1 #### A masked pattern was here #### a1#a2#a3,-NA-,b1#b2#b3#b4 a21#a22#a23,-NA-,b21#b22#b23#b24 PREHOOK: query: insert overwrite directory '../../data/files/map_table_3' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe' STORED AS TEXTFILE -select * from map_table +select * from map_table_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@map_table +PREHOOK: Input: default@map_table_n2 #### A masked pattern was here #### POSTHOOK: query: insert overwrite directory '../../data/files/map_table_3' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe' STORED AS TEXTFILE -select * from map_table +select * from map_table_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@map_table +POSTHOOK: Input: default@map_table_n2 #### A masked pattern was here #### foo1{"k1":"v1","k2":"v2","k3":"v3"} foo2{"k21":"v21","k22":"v22","k31":"v31"} @@ -1230,9 +1230,9 @@ WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '(\"|\\[|\\])', 'field.delim'=':', 'serialization.null.format'='-NA-', 'collection.delim'='#', 'mapkey.delim'='%') STORED AS TEXTFILE -select foo, null, bar from map_table +select foo, null, bar from map_table_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@map_table +PREHOOK: Input: default@map_table_n2 #### A masked pattern was here #### POSTHOOK: query: insert overwrite directory '../../data/files/map_table_4' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' @@ -1240,9 +1240,9 @@ WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '(\"|\\[|\\])', 'field.delim'=':', 'serialization.null.format'='-NA-', 'collection.delim'='#', 'mapkey.delim'='%') STORED AS TEXTFILE -select foo, null, bar from map_table +select foo, null, bar from map_table_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@map_table +POSTHOOK: Input: default@map_table_n2 #### A masked pattern was here #### foo1:-NA-:k1%v1#k2%v2#k3%v3 foo2:-NA-:k21%v21#k22%v22#k31%v31 @@ -1794,20 +1794,20 @@ POSTHOOK: query: drop table rctable POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@rctable POSTHOOK: Output: default@rctable -PREHOOK: query: drop table array_table +PREHOOK: query: drop table array_table_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@array_table -PREHOOK: Output: default@array_table -POSTHOOK: query: drop table array_table +PREHOOK: Input: default@array_table_n1 +PREHOOK: Output: default@array_table_n1 +POSTHOOK: query: drop table array_table_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@array_table -POSTHOOK: Output: default@array_table -PREHOOK: query: drop table map_table +POSTHOOK: Input: default@array_table_n1 +POSTHOOK: Output: default@array_table_n1 +PREHOOK: query: drop table map_table_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@map_table -PREHOOK: Output: default@map_table -POSTHOOK: query: drop table map_table +PREHOOK: Input: default@map_table_n2 +PREHOOK: Output: default@map_table_n2 +POSTHOOK: query: drop table map_table_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@map_table -POSTHOOK: Output: default@map_table +POSTHOOK: Input: default@map_table_n2 +POSTHOOK: Output: default@map_table_n2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/insert_overwrite_directory2.q.out b/ql/src/test/results/clientpositive/insert_overwrite_directory2.q.out index c3232e7ea3..e37c665e04 100644 --- a/ql/src/test/results/clientpositive/insert_overwrite_directory2.q.out +++ b/ql/src/test/results/clientpositive/insert_overwrite_directory2.q.out @@ -2,12 +2,12 @@ PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@result +PREHOOK: Output: default@result_n0 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@result +POSTHOOK: Output: default@result_n0 #### A masked pattern was here #### select key from src group by key PREHOOK: type: QUERY @@ -17,13 +17,13 @@ select key from src group by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -PREHOOK: query: select count(*) from result +PREHOOK: query: select count(*) from result_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@result +PREHOOK: Input: default@result_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from result +POSTHOOK: query: select count(*) from result_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@result +POSTHOOK: Input: default@result_n0 #### A masked pattern was here #### 309 #### A masked pattern was here #### @@ -35,20 +35,20 @@ select key from src group by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -PREHOOK: query: select count(*) from result +PREHOOK: query: select count(*) from result_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@result +PREHOOK: Input: default@result_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from result +POSTHOOK: query: select count(*) from result_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@result +POSTHOOK: Input: default@result_n0 #### A masked pattern was here #### 309 -PREHOOK: query: drop table result +PREHOOK: query: drop table result_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@result -PREHOOK: Output: default@result -POSTHOOK: query: drop table result +PREHOOK: Input: default@result_n0 +PREHOOK: Output: default@result_n0 +POSTHOOK: query: drop table result_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@result -POSTHOOK: Output: default@result +POSTHOOK: Input: default@result_n0 +POSTHOOK: Output: default@result_n0 diff --git a/ql/src/test/results/clientpositive/insert_overwrite_local_directory_1.q.out b/ql/src/test/results/clientpositive/insert_overwrite_local_directory_1.q.out index 9a4e861f9a..6ddd85b154 100644 --- a/ql/src/test/results/clientpositive/insert_overwrite_local_directory_1.q.out +++ b/ql/src/test/results/clientpositive/insert_overwrite_local_directory_1.q.out @@ -1022,37 +1022,37 @@ POSTHOOK: Input: default@src 400:val_400 200:val_200 97:val_97 -PREHOOK: query: create table array_table (a array, b array) +PREHOOK: query: create table array_table_n0 (a array, b array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ',' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@array_table -POSTHOOK: query: create table array_table (a array, b array) +PREHOOK: Output: default@array_table_n0 +POSTHOOK: query: create table array_table_n0 (a array, b array) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ',' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@array_table -PREHOOK: query: load data local inpath "../../data/files/array_table.txt" overwrite into table array_table +POSTHOOK: Output: default@array_table_n0 +PREHOOK: query: load data local inpath "../../data/files/array_table.txt" overwrite into table array_table_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@array_table -POSTHOOK: query: load data local inpath "../../data/files/array_table.txt" overwrite into table array_table +PREHOOK: Output: default@array_table_n0 +POSTHOOK: query: load data local inpath "../../data/files/array_table.txt" overwrite into table array_table_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@array_table +POSTHOOK: Output: default@array_table_n0 PREHOOK: query: insert overwrite local directory '../../data/files/local_array_table_1' -select * from array_table +select * from array_table_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@array_table +PREHOOK: Input: default@array_table_n0 #### A masked pattern was here #### POSTHOOK: query: insert overwrite local directory '../../data/files/local_array_table_1' -select * from array_table +select * from array_table_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@array_table +POSTHOOK: Input: default@array_table_n0 #### A masked pattern was here #### a1a2a3b1b2b3b4 a21a22a23b21b22b23b24 @@ -1060,17 +1060,17 @@ PREHOOK: query: insert overwrite local directory '../../data/files/local_array_t ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' -select * from array_table +select * from array_table_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@array_table +PREHOOK: Input: default@array_table_n0 #### A masked pattern was here #### POSTHOOK: query: insert overwrite local directory '../../data/files/local_array_table_2' ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' -select * from array_table +select * from array_table_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@array_table +POSTHOOK: Input: default@array_table_n0 #### A masked pattern was here #### a1#a2#a3:b1#b2#b3#b4 a21#a22#a23:b21#b22#b23#b24 @@ -1078,21 +1078,21 @@ PREHOOK: query: insert overwrite local directory '../../data/files/local_array_t ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' -select b,a from array_table +select b,a from array_table_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@array_table +PREHOOK: Input: default@array_table_n0 #### A masked pattern was here #### POSTHOOK: query: insert overwrite local directory '../../data/files/local_array_table_2_withfields' ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' -select b,a from array_table +select b,a from array_table_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@array_table +POSTHOOK: Input: default@array_table_n0 #### A masked pattern was here #### b1#b2#b3#b4:a1#a2#a3 b21#b22#b23#b24:a21#a22#a23 -PREHOOK: query: create table map_table (foo STRING , bar MAP) +PREHOOK: query: create table map_table_n1 (foo STRING , bar MAP) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ',' @@ -1100,8 +1100,8 @@ MAP KEYS TERMINATED BY ':' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@map_table -POSTHOOK: query: create table map_table (foo STRING , bar MAP) +PREHOOK: Output: default@map_table_n1 +POSTHOOK: query: create table map_table_n1 (foo STRING , bar MAP) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ',' @@ -1109,24 +1109,24 @@ MAP KEYS TERMINATED BY ':' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@map_table -PREHOOK: query: load data local inpath "../../data/files/map_table.txt" overwrite into table map_table +POSTHOOK: Output: default@map_table_n1 +PREHOOK: query: load data local inpath "../../data/files/map_table.txt" overwrite into table map_table_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@map_table -POSTHOOK: query: load data local inpath "../../data/files/map_table.txt" overwrite into table map_table +PREHOOK: Output: default@map_table_n1 +POSTHOOK: query: load data local inpath "../../data/files/map_table.txt" overwrite into table map_table_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@map_table +POSTHOOK: Output: default@map_table_n1 PREHOOK: query: insert overwrite local directory '../../data/files/local_map_table_1' -select * from map_table +select * from map_table_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@map_table +PREHOOK: Input: default@map_table_n1 #### A masked pattern was here #### POSTHOOK: query: insert overwrite local directory '../../data/files/local_map_table_1' -select * from map_table +select * from map_table_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@map_table +POSTHOOK: Input: default@map_table_n1 #### A masked pattern was here #### foo1k1v1k2v2k3v3 foo2k21v21k22v22k31v31 @@ -1135,18 +1135,18 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' MAP KEYS TERMINATED BY '=' -select * from map_table +select * from map_table_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@map_table +PREHOOK: Input: default@map_table_n1 #### A masked pattern was here #### POSTHOOK: query: insert overwrite local directory '../../data/files/local_map_table_2' ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' MAP KEYS TERMINATED BY '=' -select * from map_table +select * from map_table_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@map_table +POSTHOOK: Input: default@map_table_n1 #### A masked pattern was here #### foo1:k1=v1#k2=v2#k3=v3 foo2:k21=v21#k22=v22#k31=v31 @@ -1155,50 +1155,50 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' MAP KEYS TERMINATED BY '=' -select bar,foo from map_table +select bar,foo from map_table_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@map_table +PREHOOK: Input: default@map_table_n1 #### A masked pattern was here #### POSTHOOK: query: insert overwrite local directory '../../data/files/local_map_table_2_withfields' ROW FORMAT DELIMITED FIELDS TERMINATED BY ':' COLLECTION ITEMS TERMINATED BY '#' MAP KEYS TERMINATED BY '=' -select bar,foo from map_table +select bar,foo from map_table_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@map_table +POSTHOOK: Input: default@map_table_n1 #### A masked pattern was here #### k1=v1#k2=v2#k3=v3:foo1 k21=v21#k22=v22#k31=v31:foo2 PREHOOK: query: insert overwrite local directory '../../data/files/local_array_table_3' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe' STORED AS TEXTFILE -select * from array_table +select * from array_table_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@array_table +PREHOOK: Input: default@array_table_n0 #### A masked pattern was here #### POSTHOOK: query: insert overwrite local directory '../../data/files/local_array_table_3' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe' STORED AS TEXTFILE -select * from array_table +select * from array_table_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@array_table +POSTHOOK: Input: default@array_table_n0 #### A masked pattern was here #### ["a1","a2","a3"]["b1","b2","b3","b4"] ["a21","a22","a23"]["b21","b22","b23","b24"] PREHOOK: query: insert overwrite local directory '../../data/files/local_map_table_3' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe' STORED AS TEXTFILE -select * from map_table +select * from map_table_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@map_table +PREHOOK: Input: default@map_table_n1 #### A masked pattern was here #### POSTHOOK: query: insert overwrite local directory '../../data/files/local_map_table_3' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe' STORED AS TEXTFILE -select * from map_table +select * from map_table_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@map_table +POSTHOOK: Input: default@map_table_n1 #### A masked pattern was here #### foo1{"k1":"v1","k2":"v2","k3":"v3"} foo2{"k21":"v21","k22":"v22","k31":"v31"} @@ -1750,20 +1750,20 @@ POSTHOOK: query: drop table local_rctable POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@local_rctable POSTHOOK: Output: default@local_rctable -PREHOOK: query: drop table array_table +PREHOOK: query: drop table array_table_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@array_table -PREHOOK: Output: default@array_table -POSTHOOK: query: drop table array_table +PREHOOK: Input: default@array_table_n0 +PREHOOK: Output: default@array_table_n0 +POSTHOOK: query: drop table array_table_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@array_table -POSTHOOK: Output: default@array_table -PREHOOK: query: drop table map_table +POSTHOOK: Input: default@array_table_n0 +POSTHOOK: Output: default@array_table_n0 +PREHOOK: query: drop table map_table_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@map_table -PREHOOK: Output: default@map_table -POSTHOOK: query: drop table map_table +PREHOOK: Input: default@map_table_n1 +PREHOOK: Output: default@map_table_n1 +POSTHOOK: query: drop table map_table_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@map_table -POSTHOOK: Output: default@map_table +POSTHOOK: Input: default@map_table_n1 +POSTHOOK: Output: default@map_table_n1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/insertexternal1.q.out b/ql/src/test/results/clientpositive/insertexternal1.q.out index c3c7a7b1d8..d154ac207e 100644 --- a/ql/src/test/results/clientpositive/insertexternal1.q.out +++ b/ql/src/test/results/clientpositive/insertexternal1.q.out @@ -1,39 +1,39 @@ -PREHOOK: query: create table texternal(key string, val string) partitioned by (insertdate string) +PREHOOK: query: create table texternal_n0(key string, val string) partitioned by (insertdate string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@texternal -POSTHOOK: query: create table texternal(key string, val string) partitioned by (insertdate string) +PREHOOK: Output: default@texternal_n0 +POSTHOOK: query: create table texternal_n0(key string, val string) partitioned by (insertdate string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@texternal +POSTHOOK: Output: default@texternal_n0 #### A masked pattern was here #### PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -PREHOOK: Output: default@texternal +PREHOOK: Output: default@texternal_n0 #### A masked pattern was here #### POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -POSTHOOK: Output: default@texternal -POSTHOOK: Output: default@texternal@insertdate=2008-01-01 -PREHOOK: query: from src insert overwrite table texternal partition (insertdate='2008-01-01') select * +POSTHOOK: Output: default@texternal_n0 +POSTHOOK: Output: default@texternal_n0@insertdate=2008-01-01 +PREHOOK: query: from src insert overwrite table texternal_n0 partition (insertdate='2008-01-01') select * PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@texternal@insertdate=2008-01-01 -POSTHOOK: query: from src insert overwrite table texternal partition (insertdate='2008-01-01') select * +PREHOOK: Output: default@texternal_n0@insertdate=2008-01-01 +POSTHOOK: query: from src insert overwrite table texternal_n0 partition (insertdate='2008-01-01') select * POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@texternal@insertdate=2008-01-01 -POSTHOOK: Lineage: texternal PARTITION(insertdate=2008-01-01).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: texternal PARTITION(insertdate=2008-01-01).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from texternal where insertdate='2008-01-01' +POSTHOOK: Output: default@texternal_n0@insertdate=2008-01-01 +POSTHOOK: Lineage: texternal_n0 PARTITION(insertdate=2008-01-01).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: texternal_n0 PARTITION(insertdate=2008-01-01).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from texternal_n0 where insertdate='2008-01-01' PREHOOK: type: QUERY -PREHOOK: Input: default@texternal -PREHOOK: Input: default@texternal@insertdate=2008-01-01 +PREHOOK: Input: default@texternal_n0 +PREHOOK: Input: default@texternal_n0@insertdate=2008-01-01 #### A masked pattern was here #### -POSTHOOK: query: select * from texternal where insertdate='2008-01-01' +POSTHOOK: query: select * from texternal_n0 where insertdate='2008-01-01' POSTHOOK: type: QUERY -POSTHOOK: Input: default@texternal -POSTHOOK: Input: default@texternal@insertdate=2008-01-01 +POSTHOOK: Input: default@texternal_n0 +POSTHOOK: Input: default@texternal_n0@insertdate=2008-01-01 #### A masked pattern was here #### 238 val_238 2008-01-01 86 val_86 2008-01-01 diff --git a/ql/src/test/results/clientpositive/interval_alt.q.out b/ql/src/test/results/clientpositive/interval_alt.q.out index add690eee7..a8f7292f22 100644 --- a/ql/src/test/results/clientpositive/interval_alt.q.out +++ b/ql/src/test/results/clientpositive/interval_alt.q.out @@ -94,30 +94,30 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 2011-12-02 00:00:00 -PREHOOK: query: create table t (dt int) +PREHOOK: query: create table t_n18 (dt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t (dt int) +PREHOOK: Output: default@t_n18 +POSTHOOK: query: create table t_n18 (dt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values (1),(2) +POSTHOOK: Output: default@t_n18 +PREHOOK: query: insert into t_n18 values (1),(2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (1),(2) +PREHOOK: Output: default@t_n18 +POSTHOOK: query: insert into t_n18 values (1),(2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.dt SCRIPT [] +POSTHOOK: Output: default@t_n18 +POSTHOOK: Lineage: t_n18.dt SCRIPT [] PREHOOK: query: explain select date '2012-01-01' + interval (-dt*dt) day, date '2012-01-01' - interval (-dt*dt) day, date '2012-01-01' + 1 day + '2' days, date '2012-01-01' + interval (dt || '-1') year to month - from t + from t_n18 PREHOOK: type: QUERY POSTHOOK: query: explain select @@ -125,7 +125,7 @@ select date '2012-01-01' - interval (-dt*dt) day, date '2012-01-01' + 1 day + '2' days, date '2012-01-01' + interval (dt || '-1') year to month - from t + from t_n18 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -136,7 +136,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t + alias: t_n18 Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (DATE'2012-01-01' + IntervalDayLiteralProcessor(((- dt) * dt))) (type: timestamp), (DATE'2012-01-01' - IntervalDayLiteralProcessor(((- dt) * dt))) (type: timestamp), TIMESTAMP'2012-01-04 00:00:00.0' (type: timestamp), (DATE'2012-01-01' + IntervalYearMonthLiteralProcessor(concat(dt, '-1'))) (type: date) @@ -162,18 +162,18 @@ PREHOOK: query: select date '2012-01-01' - interval (-dt*dt) day, date '2012-01-01' + 1 day + '2' days, date '2012-01-01' + interval (dt || '-1') year to month - from t + from t_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n18 #### A masked pattern was here #### POSTHOOK: query: select date '2012-01-01' + interval (-dt*dt) day, date '2012-01-01' - interval (-dt*dt) day, date '2012-01-01' + 1 day + '2' days, date '2012-01-01' + interval (dt || '-1') year to month - from t + from t_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n18 #### A masked pattern was here #### 2011-12-31 00:00:00 2012-01-02 00:00:00 2012-01-04 00:00:00 2013-02-01 2011-12-28 00:00:00 2012-01-05 00:00:00 2012-01-04 00:00:00 2014-02-01 diff --git a/ql/src/test/results/clientpositive/interval_arithmetic.q.out b/ql/src/test/results/clientpositive/interval_arithmetic.q.out index 0e5506a465..7cb7270c29 100644 --- a/ql/src/test/results/clientpositive/interval_arithmetic.q.out +++ b/ql/src/test/results/clientpositive/interval_arithmetic.q.out @@ -1,23 +1,23 @@ -PREHOOK: query: create table interval_arithmetic_1 (dateval date, tsval timestamp) +PREHOOK: query: create table interval_arithmetic_1_n0 (dateval date, tsval timestamp) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@interval_arithmetic_1 -POSTHOOK: query: create table interval_arithmetic_1 (dateval date, tsval timestamp) +PREHOOK: Output: default@interval_arithmetic_1_n0 +POSTHOOK: query: create table interval_arithmetic_1_n0 (dateval date, tsval timestamp) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@interval_arithmetic_1 -PREHOOK: query: insert overwrite table interval_arithmetic_1 +POSTHOOK: Output: default@interval_arithmetic_1_n0 +PREHOOK: query: insert overwrite table interval_arithmetic_1_n0 select cast(ctimestamp1 as date), ctimestamp1 from alltypesorc PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc -PREHOOK: Output: default@interval_arithmetic_1 -POSTHOOK: query: insert overwrite table interval_arithmetic_1 +PREHOOK: Output: default@interval_arithmetic_1_n0 +POSTHOOK: query: insert overwrite table interval_arithmetic_1_n0 select cast(ctimestamp1 as date), ctimestamp1 from alltypesorc POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: default@interval_arithmetic_1 -POSTHOOK: Lineage: interval_arithmetic_1.dateval EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: interval_arithmetic_1.tsval SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Output: default@interval_arithmetic_1_n0 +POSTHOOK: Lineage: interval_arithmetic_1_n0.dateval EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: interval_arithmetic_1_n0.tsval SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] PREHOOK: query: explain select dateval, @@ -27,7 +27,7 @@ select dateval + interval '-2-2' year to month, - interval '2-2' year to month + dateval, interval '2-2' year to month + dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY POSTHOOK: query: explain @@ -39,7 +39,7 @@ select dateval + interval '-2-2' year to month, - interval '2-2' year to month + dateval, interval '2-2' year to month + dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -51,7 +51,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: interval_arithmetic_1 + alias: interval_arithmetic_1_n0 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: dateval (type: date), (dateval - INTERVAL'2-2') (type: date), (dateval - INTERVAL'-2-2') (type: date), (dateval + INTERVAL'2-2') (type: date), (dateval + INTERVAL'-2-2') (type: date), (INTERVAL'-2-2' + dateval) (type: date), (INTERVAL'2-2' + dateval) (type: date) @@ -83,10 +83,10 @@ PREHOOK: query: select dateval + interval '-2-2' year to month, - interval '2-2' year to month + dateval, interval '2-2' year to month + dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY -PREHOOK: Input: default@interval_arithmetic_1 +PREHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### POSTHOOK: query: select dateval, @@ -96,10 +96,10 @@ POSTHOOK: query: select dateval + interval '-2-2' year to month, - interval '2-2' year to month + dateval, interval '2-2' year to month + dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@interval_arithmetic_1 +POSTHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### 1969-12-31 1967-10-31 1972-02-29 1972-02-29 1967-10-31 1967-10-31 1972-02-29 NULL NULL NULL NULL NULL NULL NULL @@ -109,7 +109,7 @@ select dateval - date '1999-06-07', date '1999-06-07' - dateval, dateval - dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY POSTHOOK: query: explain @@ -118,7 +118,7 @@ select dateval - date '1999-06-07', date '1999-06-07' - dateval, dateval - dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -130,7 +130,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: interval_arithmetic_1 + alias: interval_arithmetic_1_n0 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: dateval (type: date), (dateval - DATE'1999-06-07') (type: interval_day_time), (DATE'1999-06-07' - dateval) (type: interval_day_time), (dateval - dateval) (type: interval_day_time) @@ -159,20 +159,20 @@ PREHOOK: query: select dateval - date '1999-06-07', date '1999-06-07' - dateval, dateval - dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY -PREHOOK: Input: default@interval_arithmetic_1 +PREHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### POSTHOOK: query: select dateval, dateval - date '1999-06-07', date '1999-06-07' - dateval, dateval - dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@interval_arithmetic_1 +POSTHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### 1969-12-31 -10749 23:00:00.000000000 10749 23:00:00.000000000 0 00:00:00.000000000 NULL NULL NULL NULL @@ -185,7 +185,7 @@ select tsval + interval '-2-2' year to month, - interval '2-2' year to month + tsval, interval '2-2' year to month + tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY POSTHOOK: query: explain @@ -197,7 +197,7 @@ select tsval + interval '-2-2' year to month, - interval '2-2' year to month + tsval, interval '2-2' year to month + tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -209,7 +209,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: interval_arithmetic_1 + alias: interval_arithmetic_1_n0 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: tsval (type: timestamp), (tsval - INTERVAL'2-2') (type: timestamp), (tsval - INTERVAL'-2-2') (type: timestamp), (tsval + INTERVAL'2-2') (type: timestamp), (tsval + INTERVAL'-2-2') (type: timestamp), (INTERVAL'-2-2' + tsval) (type: timestamp), (INTERVAL'2-2' + tsval) (type: timestamp) @@ -241,10 +241,10 @@ PREHOOK: query: select tsval + interval '-2-2' year to month, - interval '2-2' year to month + tsval, interval '2-2' year to month + tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY -PREHOOK: Input: default@interval_arithmetic_1 +PREHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### POSTHOOK: query: select tsval, @@ -254,10 +254,10 @@ POSTHOOK: query: select tsval + interval '-2-2' year to month, - interval '2-2' year to month + tsval, interval '2-2' year to month + tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@interval_arithmetic_1 +POSTHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### 1969-12-31 15:59:46.674 1967-10-31 15:59:46.674 1972-02-29 15:59:46.674 1972-02-29 15:59:46.674 1967-10-31 15:59:46.674 1967-10-31 15:59:46.674 1972-02-29 15:59:46.674 NULL NULL NULL NULL NULL NULL NULL @@ -265,14 +265,14 @@ PREHOOK: query: explain select interval '2-2' year to month + interval '3-3' year to month, interval '2-2' year to month - interval '3-3' year to month -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY POSTHOOK: query: explain select interval '2-2' year to month + interval '3-3' year to month, interval '2-2' year to month - interval '3-3' year to month -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -284,7 +284,7 @@ STAGE PLANS: limit: 2 Processor Tree: TableScan - alias: interval_arithmetic_1 + alias: interval_arithmetic_1_n0 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: INTERVAL'5-5' (type: interval_year_month), INTERVAL'-1-1' (type: interval_year_month) @@ -298,18 +298,18 @@ STAGE PLANS: PREHOOK: query: select interval '2-2' year to month + interval '3-3' year to month, interval '2-2' year to month - interval '3-3' year to month -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY -PREHOOK: Input: default@interval_arithmetic_1 +PREHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### POSTHOOK: query: select interval '2-2' year to month + interval '3-3' year to month, interval '2-2' year to month - interval '3-3' year to month -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@interval_arithmetic_1 +POSTHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### 5-5 -1-1 5-5 -1-1 @@ -322,7 +322,7 @@ select dateval + interval '-99 11:22:33.123456789' day to second, -interval '99 11:22:33.123456789' day to second + dateval, interval '99 11:22:33.123456789' day to second + dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY POSTHOOK: query: explain @@ -334,7 +334,7 @@ select dateval + interval '-99 11:22:33.123456789' day to second, -interval '99 11:22:33.123456789' day to second + dateval, interval '99 11:22:33.123456789' day to second + dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -346,7 +346,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: interval_arithmetic_1 + alias: interval_arithmetic_1_n0 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: dateval (type: date), (dateval - INTERVAL'99 11:22:33.123456789') (type: timestamp), (dateval - INTERVAL'-99 11:22:33.123456789') (type: timestamp), (dateval + INTERVAL'99 11:22:33.123456789') (type: timestamp), (dateval + INTERVAL'-99 11:22:33.123456789') (type: timestamp), (INTERVAL'-99 11:22:33.123456789' + dateval) (type: timestamp), (INTERVAL'99 11:22:33.123456789' + dateval) (type: timestamp) @@ -378,10 +378,10 @@ PREHOOK: query: select dateval + interval '-99 11:22:33.123456789' day to second, -interval '99 11:22:33.123456789' day to second + dateval, interval '99 11:22:33.123456789' day to second + dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY -PREHOOK: Input: default@interval_arithmetic_1 +PREHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### POSTHOOK: query: select dateval, @@ -391,10 +391,10 @@ POSTHOOK: query: select dateval + interval '-99 11:22:33.123456789' day to second, -interval '99 11:22:33.123456789' day to second + dateval, interval '99 11:22:33.123456789' day to second + dateval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@interval_arithmetic_1 +POSTHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### 1969-12-31 1969-09-22 13:37:26.876543211 1970-04-09 11:22:33.123456789 1970-04-09 11:22:33.123456789 1969-09-22 13:37:26.876543211 1969-09-22 13:37:26.876543211 1970-04-09 11:22:33.123456789 NULL NULL NULL NULL NULL NULL NULL @@ -405,7 +405,7 @@ select dateval - tsval, tsval - dateval, tsval - tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY POSTHOOK: query: explain @@ -415,7 +415,7 @@ select dateval - tsval, tsval - dateval, tsval - tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -427,7 +427,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: interval_arithmetic_1 + alias: interval_arithmetic_1_n0 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: dateval (type: date), tsval (type: timestamp), (dateval - tsval) (type: interval_day_time), (tsval - dateval) (type: interval_day_time), (tsval - tsval) (type: interval_day_time) @@ -457,10 +457,10 @@ PREHOOK: query: select dateval - tsval, tsval - dateval, tsval - tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY -PREHOOK: Input: default@interval_arithmetic_1 +PREHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### POSTHOOK: query: select dateval, @@ -468,10 +468,10 @@ POSTHOOK: query: select dateval - tsval, tsval - dateval, tsval - tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@interval_arithmetic_1 +POSTHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### 1969-12-31 1969-12-31 15:59:46.674 -0 15:59:47.674000000 0 15:59:47.674000000 0 00:00:00.000000000 NULL NULL NULL NULL NULL @@ -484,7 +484,7 @@ select tsval + interval '-99 11:22:33.123456789' day to second, -interval '99 11:22:33.123456789' day to second + tsval, interval '99 11:22:33.123456789' day to second + tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY POSTHOOK: query: explain @@ -496,7 +496,7 @@ select tsval + interval '-99 11:22:33.123456789' day to second, -interval '99 11:22:33.123456789' day to second + tsval, interval '99 11:22:33.123456789' day to second + tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -508,7 +508,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: interval_arithmetic_1 + alias: interval_arithmetic_1_n0 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: tsval (type: timestamp), (tsval - INTERVAL'99 11:22:33.123456789') (type: timestamp), (tsval - INTERVAL'-99 11:22:33.123456789') (type: timestamp), (tsval + INTERVAL'99 11:22:33.123456789') (type: timestamp), (tsval + INTERVAL'-99 11:22:33.123456789') (type: timestamp), (INTERVAL'-99 11:22:33.123456789' + tsval) (type: timestamp), (INTERVAL'99 11:22:33.123456789' + tsval) (type: timestamp) @@ -540,10 +540,10 @@ PREHOOK: query: select tsval + interval '-99 11:22:33.123456789' day to second, -interval '99 11:22:33.123456789' day to second + tsval, interval '99 11:22:33.123456789' day to second + tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY -PREHOOK: Input: default@interval_arithmetic_1 +PREHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### POSTHOOK: query: select tsval, @@ -553,10 +553,10 @@ POSTHOOK: query: select tsval + interval '-99 11:22:33.123456789' day to second, -interval '99 11:22:33.123456789' day to second + tsval, interval '99 11:22:33.123456789' day to second + tsval -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@interval_arithmetic_1 +POSTHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### 1969-12-31 15:59:46.674 1969-09-23 05:37:13.550543211 1970-04-10 03:22:19.797456789 1970-04-10 03:22:19.797456789 1969-09-23 05:37:13.550543211 1969-09-23 05:37:13.550543211 1970-04-10 03:22:19.797456789 NULL NULL NULL NULL NULL NULL NULL @@ -564,14 +564,14 @@ PREHOOK: query: explain select interval '99 11:22:33.123456789' day to second + interval '10 9:8:7.123456789' day to second, interval '99 11:22:33.123456789' day to second - interval '10 9:8:7.123456789' day to second -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY POSTHOOK: query: explain select interval '99 11:22:33.123456789' day to second + interval '10 9:8:7.123456789' day to second, interval '99 11:22:33.123456789' day to second - interval '10 9:8:7.123456789' day to second -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -583,7 +583,7 @@ STAGE PLANS: limit: 2 Processor Tree: TableScan - alias: interval_arithmetic_1 + alias: interval_arithmetic_1_n0 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: INTERVAL'109 20:30:40.246913578' (type: interval_day_time), INTERVAL'89 02:14:26.000000000' (type: interval_day_time) @@ -597,26 +597,26 @@ STAGE PLANS: PREHOOK: query: select interval '99 11:22:33.123456789' day to second + interval '10 9:8:7.123456789' day to second, interval '99 11:22:33.123456789' day to second - interval '10 9:8:7.123456789' day to second -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 PREHOOK: type: QUERY -PREHOOK: Input: default@interval_arithmetic_1 +PREHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### POSTHOOK: query: select interval '99 11:22:33.123456789' day to second + interval '10 9:8:7.123456789' day to second, interval '99 11:22:33.123456789' day to second - interval '10 9:8:7.123456789' day to second -from interval_arithmetic_1 +from interval_arithmetic_1_n0 limit 2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@interval_arithmetic_1 +POSTHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### 109 20:30:40.246913578 89 02:14:26.000000000 109 20:30:40.246913578 89 02:14:26.000000000 PREHOOK: query: explain -select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' day + interval '1' hour + interval '1' minute + interval '60' second from interval_arithmetic_1 limit 1 +select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' day + interval '1' hour + interval '1' minute + interval '60' second from interval_arithmetic_1_n0 limit 1 PREHOOK: type: QUERY POSTHOOK: query: explain -select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' day + interval '1' hour + interval '1' minute + interval '60' second from interval_arithmetic_1 limit 1 +select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' day + interval '1' hour + interval '1' minute + interval '60' second from interval_arithmetic_1_n0 limit 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -627,7 +627,7 @@ STAGE PLANS: limit: 1 Processor Tree: TableScan - alias: interval_arithmetic_1 + alias: interval_arithmetic_1_n0 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: TIMESTAMP'2016-11-11 03:04:00.0' (type: timestamp) @@ -638,20 +638,20 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' day + interval '1' hour + interval '1' minute + interval '60' second from interval_arithmetic_1 limit 1 +PREHOOK: query: select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' day + interval '1' hour + interval '1' minute + interval '60' second from interval_arithmetic_1_n0 limit 1 PREHOOK: type: QUERY -PREHOOK: Input: default@interval_arithmetic_1 +PREHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### -POSTHOOK: query: select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' day + interval '1' hour + interval '1' minute + interval '60' second from interval_arithmetic_1 limit 1 +POSTHOOK: query: select date '2016-11-08' + interval '1 2:02:00' day to second + interval '2' day + interval '1' hour + interval '1' minute + interval '60' second from interval_arithmetic_1_n0 limit 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@interval_arithmetic_1 +POSTHOOK: Input: default@interval_arithmetic_1_n0 #### A masked pattern was here #### 2016-11-11 03:04:00 -PREHOOK: query: drop table interval_arithmetic_1 +PREHOOK: query: drop table interval_arithmetic_1_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@interval_arithmetic_1 -PREHOOK: Output: default@interval_arithmetic_1 -POSTHOOK: query: drop table interval_arithmetic_1 +PREHOOK: Input: default@interval_arithmetic_1_n0 +PREHOOK: Output: default@interval_arithmetic_1_n0 +POSTHOOK: query: drop table interval_arithmetic_1_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@interval_arithmetic_1 -POSTHOOK: Output: default@interval_arithmetic_1 +POSTHOOK: Input: default@interval_arithmetic_1_n0 +POSTHOOK: Output: default@interval_arithmetic_1_n0 diff --git a/ql/src/test/results/clientpositive/join14.q.out b/ql/src/test/results/clientpositive/join14.q.out index 5aaf1bcbd5..1c69d40821 100644 --- a/ql/src/test/results/clientpositive/join14.q.out +++ b/ql/src/test/results/clientpositive/join14.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n138(c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n138 +POSTHOOK: query: CREATE TABLE dest1_n138(c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n138 PREHOOK: query: EXPLAIN FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value +INSERT OVERWRITE TABLE dest1_n138 SELECT src.key, srcpart.value PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value +INSERT OVERWRITE TABLE dest1_n138 SELECT src.key, srcpart.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -75,7 +75,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n138 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: c1, c2 @@ -100,7 +100,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n138 Stage: Stage-2 Stats Work @@ -108,7 +108,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2 Column Types: int, string - Table: default.dest1 + Table: default.dest1_n138 Stage: Stage-3 Map Reduce @@ -134,30 +134,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value +INSERT OVERWRITE TABLE dest1_n138 SELECT src.key, srcpart.value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n138 POSTHOOK: query: FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 -INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value +INSERT OVERWRITE TABLE dest1_n138 SELECT src.key, srcpart.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select dest1.* from dest1 +POSTHOOK: Output: default@dest1_n138 +POSTHOOK: Lineage: dest1_n138.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n138.c2 SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select dest1_n138.* from dest1_n138 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n138 #### A masked pattern was here #### -POSTHOOK: query: select dest1.* from dest1 +POSTHOOK: query: select dest1_n138.* from dest1_n138 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n138 #### A masked pattern was here #### 103 val_103 103 val_103 diff --git a/ql/src/test/results/clientpositive/join17.q.out b/ql/src/test/results/clientpositive/join17.q.out index feb70dd1ec..6e5391f276 100644 --- a/ql/src/test/results/clientpositive/join17.q.out +++ b/ql/src/test/results/clientpositive/join17.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n101(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n101 +POSTHOOK: query: CREATE TABLE dest1_n101(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n101 PREHOOK: query: EXPLAIN EXTENDED FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.* +INSERT OVERWRITE TABLE dest1_n101 SELECT src1.*, src2.* PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.* +INSERT OVERWRITE TABLE dest1_n101 SELECT src1.*, src2.* POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -152,17 +152,17 @@ STAGE PLANS: columns.comments columns.types int:string:int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n101 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key1, string value1, i32 key2, string value2} + serialization.ddl struct dest1_n101 { i32 key1, string value1, i32 key2, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n101 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -211,17 +211,17 @@ STAGE PLANS: columns.comments columns.types int:string:int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n101 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key1, string value1, i32 key2, string value2} + serialization.ddl struct dest1_n101 { i32 key1, string value1, i32 key2, string value2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n101 Stage: Stage-2 Stats Work @@ -230,7 +230,7 @@ STAGE PLANS: Column Stats Desc: Columns: key1, value1, key2, value2 Column Types: int, string, int, string - Table: default.dest1 + Table: default.dest1_n101 Is Table Level Stats: true Stage: Stage-3 @@ -304,26 +304,26 @@ STAGE PLANS: MultiFileSpray: false PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.* +INSERT OVERWRITE TABLE dest1_n101 SELECT src1.*, src2.* PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n101 POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.* +INSERT OVERWRITE TABLE dest1_n101 SELECT src1.*, src2.* POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key2 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value1 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value2 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n101 +POSTHOOK: Lineage: dest1_n101.key1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n101.key2 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n101.value1 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n101.value2 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n101.* FROM dest1_n101 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n101 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n101.* FROM dest1_n101 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n101 #### A masked pattern was here #### 0 val_0 0 val_0 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/join2.q.out b/ql/src/test/results/clientpositive/join2.q.out index b2067421ec..d4560da3b4 100644 --- a/ql/src/test/results/clientpositive/join2.q.out +++ b/ql/src/test/results/clientpositive/join2.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j2_n2(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j2 -POSTHOOK: query: CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j2_n2 +POSTHOOK: query: CREATE TABLE dest_j2_n2(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j2 +POSTHOOK: Output: default@dest_j2_n2 PREHOOK: query: EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) -INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest_j2_n2 SELECT src1.key, src3.value PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) -INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest_j2_n2 SELECT src1.key, src3.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -116,7 +116,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j2 + name: default.dest_j2_n2 Stage: Stage-0 Move Operator @@ -126,31 +126,31 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j2 + name: default.dest_j2_n2 Stage: Stage-3 Stats Work Basic Stats Work: PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) -INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest_j2_n2 SELECT src1.key, src3.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest_j2 +PREHOOK: Output: default@dest_j2_n2 POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) -INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest_j2_n2 SELECT src1.key, src3.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest_j2 -POSTHOOK: Lineage: dest_j2.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j2.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest_j2.* FROM dest_j2 +POSTHOOK: Output: default@dest_j2_n2 +POSTHOOK: Lineage: dest_j2_n2.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j2_n2.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest_j2_n2.* FROM dest_j2_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j2 +PREHOOK: Input: default@dest_j2_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest_j2.* FROM dest_j2 +POSTHOOK: query: SELECT dest_j2_n2.* FROM dest_j2_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j2 +POSTHOOK: Input: default@dest_j2_n2 #### A masked pattern was here #### 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/join25.q.out b/ql/src/test/results/clientpositive/join25.q.out index 88b7566f4d..94db4a4ba4 100644 --- a/ql/src/test/results/clientpositive/join25.q.out +++ b/ql/src/test/results/clientpositive/join25.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n18(key INT, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n18 +POSTHOOK: query: CREATE TABLE dest_j1_n18(key INT, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n18 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n18 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n18 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) POSTHOOK: type: QUERY @@ -79,7 +79,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n18 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) outputColumnNames: key, value, val2 @@ -106,7 +106,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n18 Stage: Stage-2 Stats Work @@ -114,7 +114,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, val2 Column Types: int, string, string - Table: default.dest_j1 + Table: default.dest_j1_n18 Stage: Stage-3 Map Reduce @@ -139,30 +139,30 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n18 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n18 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n18 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src1)x.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 x +POSTHOOK: Output: default@dest_j1_n18 +POSTHOOK: Lineage: dest_j1_n18.key EXPRESSION [(src1)x.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n18.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n18.value SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n18 x PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n18 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 x +POSTHOOK: query: select * from dest_j1_n18 x POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n18 #### A masked pattern was here #### 128 val_128 128 val_128 diff --git a/ql/src/test/results/clientpositive/join26.q.out b/ql/src/test/results/clientpositive/join26.q.out index 0f9c55cee4..06b6d69812 100644 --- a/ql/src/test/results/clientpositive/join26.q.out +++ b/ql/src/test/results/clientpositive/join26.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n10(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n10 +POSTHOOK: query: CREATE TABLE dest_j1_n10(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n10 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n10 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key and z.ds='2008-04-08' and z.hr=11) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n10 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key and z.ds='2008-04-08' and z.hr=11) @@ -124,17 +124,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n10 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n10 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n10 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -335,17 +335,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n10 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n10 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n10 Stage: Stage-2 Stats Work @@ -354,7 +354,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, val2 Column Types: string, string, string - Table: default.dest_j1 + Table: default.dest_j1_n10 Is Table Level Stats: true Stage: Stage-3 @@ -427,7 +427,7 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n10 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key and z.ds='2008-04-08' and z.hr=11) @@ -436,8 +436,8 @@ PREHOOK: Input: default@src PREHOOK: Input: default@src1 PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n10 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n10 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key and z.ds='2008-04-08' and z.hr=11) @@ -446,17 +446,17 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 x +POSTHOOK: Output: default@dest_j1_n10 +POSTHOOK: Lineage: dest_j1_n10.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n10.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n10.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n10 x PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n10 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 x +POSTHOOK: query: select * from dest_j1_n10 x POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n10 #### A masked pattern was here #### 128 val_128 val_128 128 val_128 val_128 diff --git a/ql/src/test/results/clientpositive/join27.q.out b/ql/src/test/results/clientpositive/join27.q.out index 50cdf87ae9..d21e232032 100644 --- a/ql/src/test/results/clientpositive/join27.q.out +++ b/ql/src/test/results/clientpositive/join27.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n2(key INT, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n2 +POSTHOOK: query: CREATE TABLE dest_j1_n2(key INT, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n2 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n2 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.value = y.value) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n2 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.value = y.value) POSTHOOK: type: QUERY @@ -79,7 +79,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n2 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) outputColumnNames: key, value, val2 @@ -106,7 +106,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n2 Stage: Stage-2 Stats Work @@ -114,7 +114,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, val2 Column Types: int, string, string - Table: default.dest_j1 + Table: default.dest_j1_n2 Stage: Stage-3 Map Reduce @@ -139,30 +139,30 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n2 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.value = y.value) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n2 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.value = y.value) POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src1)x.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 +POSTHOOK: Output: default@dest_j1_n2 +POSTHOOK: Lineage: dest_j1_n2.key EXPRESSION [(src1)x.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n2.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n2.value SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 +POSTHOOK: query: select * from dest_j1_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n2 #### A masked pattern was here #### 146 val_146 val_146 146 val_146 val_146 diff --git a/ql/src/test/results/clientpositive/join28.q.out b/ql/src/test/results/clientpositive/join28.q.out index ad65941d7c..e8098ab247 100644 --- a/ql/src/test/results/clientpositive/join28.q.out +++ b/ql/src/test/results/clientpositive/join28.q.out @@ -1,13 +1,13 @@ -PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n11(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n11 +POSTHOOK: query: CREATE TABLE dest_j1_n11(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n11 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n11 SELECT subq.key1, z.value FROM (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 @@ -15,7 +15,7 @@ FROM JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n11 SELECT subq.key1, z.value FROM (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 @@ -115,7 +115,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n11 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -142,7 +142,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n11 Stage: Stage-2 Stats Work @@ -150,7 +150,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.dest_j1 + Table: default.dest_j1_n11 Stage: Stage-3 Map Reduce @@ -175,7 +175,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n11 SELECT subq.key1, z.value FROM (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 @@ -186,8 +186,8 @@ PREHOOK: Input: default@src PREHOOK: Input: default@src1 PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n11 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n11 SELECT subq.key1, z.value FROM (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 @@ -198,16 +198,16 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 +POSTHOOK: Output: default@dest_j1_n11 +POSTHOOK: Lineage: dest_j1_n11.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n11.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n11 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n11 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 +POSTHOOK: query: select * from dest_j1_n11 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n11 #### A masked pattern was here #### 128 val_128 128 val_128 diff --git a/ql/src/test/results/clientpositive/join29.q.out b/ql/src/test/results/clientpositive/join29.q.out index b2429b623e..6afca35280 100644 --- a/ql/src/test/results/clientpositive/join29.q.out +++ b/ql/src/test/results/clientpositive/join29.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: CREATE TABLE dest_j1(key STRING, cnt1 INT, cnt2 INT) +PREHOOK: query: CREATE TABLE dest_j1_n6(key STRING, cnt1 INT, cnt2 INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key STRING, cnt1 INT, cnt2 INT) +PREHOOK: Output: default@dest_j1_n6 +POSTHOOK: query: CREATE TABLE dest_j1_n6(key STRING, cnt1 INT, cnt2 INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n6 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n6 SELECT subq1.key, subq1.cnt, subq2.cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n6 SELECT subq1.key, subq1.cnt, subq2.cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key) @@ -108,7 +108,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n6 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int) outputColumnNames: key, cnt1, cnt2 @@ -135,7 +135,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n6 Stage: Stage-3 Stats Work @@ -143,7 +143,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt1, cnt2 Column Types: string, int, int - Table: default.dest_j1 + Table: default.dest_j1_n6 Stage: Stage-4 Map Reduce @@ -205,7 +205,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n6 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int) outputColumnNames: key, cnt1, cnt2 @@ -261,7 +261,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n6 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int) outputColumnNames: key, cnt1, cnt2 @@ -314,32 +314,32 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n6 SELECT subq1.key, subq1.cnt, subq2.cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n6 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n6 SELECT subq1.key, subq1.cnt, subq2.cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key) POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.cnt1 EXPRESSION [(src1)x.null, ] -POSTHOOK: Lineage: dest_j1.cnt2 EXPRESSION [(src)y.null, ] -POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 +POSTHOOK: Output: default@dest_j1_n6 +POSTHOOK: Lineage: dest_j1_n6.cnt1 EXPRESSION [(src1)x.null, ] +POSTHOOK: Lineage: dest_j1_n6.cnt2 EXPRESSION [(src)y.null, ] +POSTHOOK: Lineage: dest_j1_n6.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n6 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 +POSTHOOK: query: select * from dest_j1_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n6 #### A masked pattern was here #### 128 1 3 146 1 2 diff --git a/ql/src/test/results/clientpositive/join3.q.out b/ql/src/test/results/clientpositive/join3.q.out index 5b5e5e9cfc..89ecd0ebd2 100644 --- a/ql/src/test/results/clientpositive/join3.q.out +++ b/ql/src/test/results/clientpositive/join3.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n42(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n42 +POSTHOOK: query: CREATE TABLE dest1_n42(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n42 PREHOOK: query: EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest1_n42 SELECT src1.key, src3.value PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest1_n42 SELECT src1.key, src3.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -92,7 +92,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n42 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -117,7 +117,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n42 Stage: Stage-2 Stats Work @@ -125,7 +125,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n42 Stage: Stage-3 Map Reduce @@ -151,24 +151,24 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest1_n42 SELECT src1.key, src3.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n42 POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest1_n42 SELECT src1.key, src3.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n42 +POSTHOOK: Lineage: dest1_n42.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n42.value SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n42.* FROM dest1_n42 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n42 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n42.* FROM dest1_n42 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n42 #### A masked pattern was here #### 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/join30.q.out b/ql/src/test/results/clientpositive/join30.q.out index eadd079f88..0f13aa6a03 100644 --- a/ql/src/test/results/clientpositive/join30.q.out +++ b/ql/src/test/results/clientpositive/join30.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: CREATE TABLE dest_j1(key INT, cnt INT) +PREHOOK: query: CREATE TABLE dest_j1_n0(key INT, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key INT, cnt INT) +PREHOOK: Output: default@dest_j1_n0 +POSTHOOK: query: CREATE TABLE dest_j1_n0(key INT, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n0 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n0 SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n0 SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -99,7 +99,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n0 Select Operator expressions: _col0 (type: int), _col1 (type: int) outputColumnNames: key, cnt @@ -124,7 +124,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n0 Stage: Stage-3 Stats Work @@ -132,7 +132,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: int, int - Table: default.dest_j1 + Table: default.dest_j1_n0 Stage: Stage-4 Map Reduce @@ -157,27 +157,27 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n0 SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n0 SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.cnt EXPRESSION [(src1)x.null, (src)y.null, ] -POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src1)x.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 +POSTHOOK: Output: default@dest_j1_n0 +POSTHOOK: Lineage: dest_j1_n0.cnt EXPRESSION [(src1)x.null, (src)y.null, ] +POSTHOOK: Lineage: dest_j1_n0.key EXPRESSION [(src1)x.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 +POSTHOOK: query: select * from dest_j1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n0 #### A masked pattern was here #### 128 3 146 2 diff --git a/ql/src/test/results/clientpositive/join31.q.out b/ql/src/test/results/clientpositive/join31.q.out index e37ffb661e..124a861533 100644 --- a/ql/src/test/results/clientpositive/join31.q.out +++ b/ql/src/test/results/clientpositive/join31.q.out @@ -1,20 +1,20 @@ -PREHOOK: query: CREATE TABLE dest_j1(key STRING, cnt INT) +PREHOOK: query: CREATE TABLE dest_j1_n22(key STRING, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key STRING, cnt INT) +PREHOOK: Output: default@dest_j1_n22 +POSTHOOK: query: CREATE TABLE dest_j1_n22(key STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n22 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n22 SELECT subq1.key, count(1) as cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key) group by subq1.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n22 SELECT subq1.key, count(1) as cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key) @@ -135,7 +135,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n22 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, cnt @@ -160,7 +160,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n22 Stage: Stage-4 Stats Work @@ -168,7 +168,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: string, int - Table: default.dest_j1 + Table: default.dest_j1_n22 Stage: Stage-5 Map Reduce @@ -193,7 +193,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n22 SELECT subq1.key, count(1) as cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key) @@ -201,8 +201,8 @@ group by subq1.key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n22 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n22 SELECT subq1.key, count(1) as cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key) @@ -210,16 +210,16 @@ group by subq1.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.cnt EXPRESSION [(src1)x.null, ] -POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 +POSTHOOK: Output: default@dest_j1_n22 +POSTHOOK: Lineage: dest_j1_n22.cnt EXPRESSION [(src1)x.null, ] +POSTHOOK: Lineage: dest_j1_n22.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n22 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n22 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 +POSTHOOK: query: select * from dest_j1_n22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n22 #### A masked pattern was here #### 128 1 146 1 diff --git a/ql/src/test/results/clientpositive/join32.q.out b/ql/src/test/results/clientpositive/join32.q.out index 4e9d2058d1..a7af569794 100644 --- a/ql/src/test/results/clientpositive/join32.q.out +++ b/ql/src/test/results/clientpositive/join32.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n12(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n12 +POSTHOOK: query: CREATE TABLE dest_j1_n12(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n12 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n12 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n12 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11) @@ -178,17 +178,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n12 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n12 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n12 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -389,17 +389,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n12 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n12 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n12 Stage: Stage-3 Stats Work @@ -408,7 +408,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, val2 Column Types: string, string, string - Table: default.dest_j1 + Table: default.dest_j1_n12 Is Table Level Stats: true Stage: Stage-4 @@ -481,7 +481,7 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n12 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11) @@ -490,8 +490,8 @@ PREHOOK: Input: default@src PREHOOK: Input: default@src1 PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n12 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n12 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11) @@ -500,17 +500,17 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 +POSTHOOK: Output: default@dest_j1_n12 +POSTHOOK: Lineage: dest_j1_n12.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n12.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n12.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n12 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n12 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 +POSTHOOK: query: select * from dest_j1_n12 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n12 #### A masked pattern was here #### 146 val_146 val_146 146 val_146 val_146 diff --git a/ql/src/test/results/clientpositive/join33.q.out b/ql/src/test/results/clientpositive/join33.q.out index 4e9d2058d1..7d44eb175c 100644 --- a/ql/src/test/results/clientpositive/join33.q.out +++ b/ql/src/test/results/clientpositive/join33.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n7(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n7 +POSTHOOK: query: CREATE TABLE dest_j1_n7(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n7 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n7 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n7 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11) @@ -178,17 +178,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n7 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n7 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n7 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -389,17 +389,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n7 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n7 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n7 Stage: Stage-3 Stats Work @@ -408,7 +408,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, val2 Column Types: string, string, string - Table: default.dest_j1 + Table: default.dest_j1_n7 Is Table Level Stats: true Stage: Stage-4 @@ -481,7 +481,7 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n7 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11) @@ -490,8 +490,8 @@ PREHOOK: Input: default@src PREHOOK: Input: default@src1 PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n7 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n7 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11) @@ -500,17 +500,17 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 +POSTHOOK: Output: default@dest_j1_n7 +POSTHOOK: Lineage: dest_j1_n7.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n7.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n7.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n7 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n7 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 +POSTHOOK: query: select * from dest_j1_n7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n7 #### A masked pattern was here #### 146 val_146 val_146 146 val_146 val_146 diff --git a/ql/src/test/results/clientpositive/join34.q.out b/ql/src/test/results/clientpositive/join34.q.out index ae7459acfd..bb226cb15a 100644 --- a/ql/src/test/results/clientpositive/join34.q.out +++ b/ql/src/test/results/clientpositive/join34.q.out @@ -1,13 +1,13 @@ -PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n1 +POSTHOOK: query: CREATE TABLE dest_j1_n1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n1 SELECT x.key, x.value, subq1.value FROM ( SELECT x.key as key, x.value as value from src x where x.key < 20 @@ -17,7 +17,7 @@ FROM JOIN src1 x ON (x.key = subq1.key) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n1 SELECT x.key, x.value, subq1.value FROM ( SELECT x.key as key, x.value as value from src x where x.key < 20 @@ -109,17 +109,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n1 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n1 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -196,17 +196,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n1 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n1 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -358,17 +358,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n1 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n1 Stage: Stage-2 Stats Work @@ -377,7 +377,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, val2 Column Types: string, string, string - Table: default.dest_j1 + Table: default.dest_j1_n1 Is Table Level Stats: true Stage: Stage-3 @@ -450,7 +450,7 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n1 SELECT x.key, x.value, subq1.value FROM ( SELECT x.key as key, x.value as value from src x where x.key < 20 @@ -461,8 +461,8 @@ JOIN src1 x ON (x.key = subq1.key) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n1 SELECT x.key, x.value, subq1.value FROM ( SELECT x.key as key, x.value as value from src x where x.key < 20 @@ -473,17 +473,17 @@ JOIN src1 x ON (x.key = subq1.key) POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.val2 EXPRESSION [(src)x.FieldSchema(name:value, type:string, comment:default), (src)x1.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 +POSTHOOK: Output: default@dest_j1_n1 +POSTHOOK: Lineage: dest_j1_n1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n1.val2 EXPRESSION [(src)x.FieldSchema(name:value, type:string, comment:default), (src)x1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n1.value SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 +POSTHOOK: query: select * from dest_j1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n1 #### A masked pattern was here #### 128 val_128 128 val_128 diff --git a/ql/src/test/results/clientpositive/join35.q.out b/ql/src/test/results/clientpositive/join35.q.out index f8af6a8523..9bd3bff8c9 100644 --- a/ql/src/test/results/clientpositive/join35.q.out +++ b/ql/src/test/results/clientpositive/join35.q.out @@ -1,13 +1,13 @@ -PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n24(key STRING, value STRING, val2 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 INT) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n24 +POSTHOOK: query: CREATE TABLE dest_j1_n24(key STRING, value STRING, val2 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n24 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n24 SELECT x.key, x.value, subq1.cnt FROM ( SELECT x.key as key, count(1) as cnt from src x where x.key < 20 group by x.key @@ -17,7 +17,7 @@ FROM JOIN src1 x ON (x.key = subq1.key) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n24 SELECT x.key, x.value, subq1.cnt FROM ( SELECT x.key as key, count(1) as cnt from src x where x.key < 20 group by x.key @@ -208,17 +208,17 @@ STAGE PLANS: columns.comments columns.types string:string:int #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n24 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, i32 val2} + serialization.ddl struct dest_j1_n24 { string key, string value, i32 val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n24 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -285,17 +285,17 @@ STAGE PLANS: columns.comments columns.types string:string:int #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n24 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, i32 val2} + serialization.ddl struct dest_j1_n24 { string key, string value, i32 val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n24 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -443,17 +443,17 @@ STAGE PLANS: columns.comments columns.types string:string:int #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n24 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, i32 val2} + serialization.ddl struct dest_j1_n24 { string key, string value, i32 val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n24 Stage: Stage-3 Stats Work @@ -462,7 +462,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, val2 Column Types: string, string, int - Table: default.dest_j1 + Table: default.dest_j1_n24 Is Table Level Stats: true Stage: Stage-4 @@ -642,7 +642,7 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n24 SELECT x.key, x.value, subq1.cnt FROM ( SELECT x.key as key, count(1) as cnt from src x where x.key < 20 group by x.key @@ -653,8 +653,8 @@ JOIN src1 x ON (x.key = subq1.key) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n24 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n24 SELECT x.key, x.value, subq1.cnt FROM ( SELECT x.key as key, count(1) as cnt from src x where x.key < 20 group by x.key @@ -665,17 +665,17 @@ JOIN src1 x ON (x.key = subq1.key) POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.val2 EXPRESSION [(src)x.null, (src)x1.null, ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 +POSTHOOK: Output: default@dest_j1_n24 +POSTHOOK: Lineage: dest_j1_n24.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n24.val2 EXPRESSION [(src)x.null, (src)x1.null, ] +POSTHOOK: Lineage: dest_j1_n24.value SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n24 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n24 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 +POSTHOOK: query: select * from dest_j1_n24 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n24 #### A masked pattern was here #### 128 3 146 val_146 2 diff --git a/ql/src/test/results/clientpositive/join36.q.out b/ql/src/test/results/clientpositive/join36.q.out index 2a2e17c6ce..ded429031b 100644 --- a/ql/src/test/results/clientpositive/join36.q.out +++ b/ql/src/test/results/clientpositive/join36.q.out @@ -1,60 +1,60 @@ -PREHOOK: query: CREATE TABLE tmp1(key INT, cnt INT) +PREHOOK: query: CREATE TABLE tmp1_n0(key INT, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmp1 -POSTHOOK: query: CREATE TABLE tmp1(key INT, cnt INT) +PREHOOK: Output: default@tmp1_n0 +POSTHOOK: query: CREATE TABLE tmp1_n0(key INT, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmp1 -PREHOOK: query: CREATE TABLE tmp2(key INT, cnt INT) +POSTHOOK: Output: default@tmp1_n0 +PREHOOK: query: CREATE TABLE tmp2_n0(key INT, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmp2 -POSTHOOK: query: CREATE TABLE tmp2(key INT, cnt INT) +PREHOOK: Output: default@tmp2_n0 +POSTHOOK: query: CREATE TABLE tmp2_n0(key INT, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmp2 -PREHOOK: query: CREATE TABLE dest_j1(key INT, value INT, val2 INT) +POSTHOOK: Output: default@tmp2_n0 +PREHOOK: query: CREATE TABLE dest_j1_n13(key INT, value INT, val2 INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key INT, value INT, val2 INT) +PREHOOK: Output: default@dest_j1_n13 +POSTHOOK: query: CREATE TABLE dest_j1_n13(key INT, value INT, val2 INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 -PREHOOK: query: INSERT OVERWRITE TABLE tmp1 +POSTHOOK: Output: default@dest_j1_n13 +PREHOOK: query: INSERT OVERWRITE TABLE tmp1_n0 SELECT key, count(1) from src group by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmp1 -POSTHOOK: query: INSERT OVERWRITE TABLE tmp1 +PREHOOK: Output: default@tmp1_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE tmp1_n0 SELECT key, count(1) from src group by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmp1 -POSTHOOK: Lineage: tmp1.cnt EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: tmp1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE tmp2 +POSTHOOK: Output: default@tmp1_n0 +POSTHOOK: Lineage: tmp1_n0.cnt EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: tmp1_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: INSERT OVERWRITE TABLE tmp2_n0 SELECT key, count(1) from src group by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmp2 -POSTHOOK: query: INSERT OVERWRITE TABLE tmp2 +PREHOOK: Output: default@tmp2_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE tmp2_n0 SELECT key, count(1) from src group by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmp2 -POSTHOOK: Lineage: tmp2.cnt EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: tmp2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Output: default@tmp2_n0 +POSTHOOK: Lineage: tmp2_n0.cnt EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: tmp2_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n13 SELECT /*+ MAPJOIN(x) */ x.key, x.cnt, y.cnt -FROM tmp1 x JOIN tmp2 y ON (x.key = y.key) +FROM tmp1_n0 x JOIN tmp2_n0 y ON (x.key = y.key) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n13 SELECT /*+ MAPJOIN(x) */ x.key, x.cnt, y.cnt -FROM tmp1 x JOIN tmp2 y ON (x.key = y.key) +FROM tmp1_n0 x JOIN tmp2_n0 y ON (x.key = y.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-6 is a root stage @@ -119,7 +119,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n13 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) outputColumnNames: key, value, val2 @@ -146,7 +146,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n13 Stage: Stage-2 Stats Work @@ -154,7 +154,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, val2 Column Types: int, int, int - Table: default.dest_j1 + Table: default.dest_j1_n13 Stage: Stage-3 Map Reduce @@ -179,30 +179,30 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n13 SELECT /*+ MAPJOIN(x) */ x.key, x.cnt, y.cnt -FROM tmp1 x JOIN tmp2 y ON (x.key = y.key) +FROM tmp1_n0 x JOIN tmp2_n0 y ON (x.key = y.key) PREHOOK: type: QUERY -PREHOOK: Input: default@tmp1 -PREHOOK: Input: default@tmp2 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Input: default@tmp1_n0 +PREHOOK: Input: default@tmp2_n0 +PREHOOK: Output: default@dest_j1_n13 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n13 SELECT /*+ MAPJOIN(x) */ x.key, x.cnt, y.cnt -FROM tmp1 x JOIN tmp2 y ON (x.key = y.key) +FROM tmp1_n0 x JOIN tmp2_n0 y ON (x.key = y.key) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp1 -POSTHOOK: Input: default@tmp2 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key SIMPLE [(tmp1)x.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(tmp2)y.FieldSchema(name:cnt, type:int, comment:null), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(tmp1)x.FieldSchema(name:cnt, type:int, comment:null), ] -PREHOOK: query: select * from dest_j1 +POSTHOOK: Input: default@tmp1_n0 +POSTHOOK: Input: default@tmp2_n0 +POSTHOOK: Output: default@dest_j1_n13 +POSTHOOK: Lineage: dest_j1_n13.key SIMPLE [(tmp1_n0)x.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest_j1_n13.val2 SIMPLE [(tmp2_n0)y.FieldSchema(name:cnt, type:int, comment:null), ] +POSTHOOK: Lineage: dest_j1_n13.value SIMPLE [(tmp1_n0)x.FieldSchema(name:cnt, type:int, comment:null), ] +PREHOOK: query: select * from dest_j1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n13 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 +POSTHOOK: query: select * from dest_j1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n13 #### A masked pattern was here #### 0 3 3 10 1 1 diff --git a/ql/src/test/results/clientpositive/join37.q.out b/ql/src/test/results/clientpositive/join37.q.out index 1f15bf5fff..7b8487600c 100644 --- a/ql/src/test/results/clientpositive/join37.q.out +++ b/ql/src/test/results/clientpositive/join37.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n9(key INT, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n9 +POSTHOOK: query: CREATE TABLE dest_j1_n9(key INT, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n9 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n9 SELECT /*+ MAPJOIN(X) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n9 SELECT /*+ MAPJOIN(X) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) POSTHOOK: type: QUERY @@ -79,7 +79,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n9 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) outputColumnNames: key, value, val2 @@ -106,7 +106,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n9 Stage: Stage-2 Stats Work @@ -114,7 +114,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, val2 Column Types: int, string, string - Table: default.dest_j1 + Table: default.dest_j1_n9 Stage: Stage-3 Map Reduce @@ -139,30 +139,30 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n9 SELECT /*+ MAPJOIN(X) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n9 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n9 SELECT /*+ MAPJOIN(X) */ x.key, x.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src1)x.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 +POSTHOOK: Output: default@dest_j1_n9 +POSTHOOK: Lineage: dest_j1_n9.key EXPRESSION [(src1)x.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n9.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n9.value SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 +POSTHOOK: query: select * from dest_j1_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n9 #### A masked pattern was here #### 128 val_128 128 val_128 diff --git a/ql/src/test/results/clientpositive/join38.q.out b/ql/src/test/results/clientpositive/join38.q.out index 2b29b4d115..ce76c306fe 100644 --- a/ql/src/test/results/clientpositive/join38.q.out +++ b/ql/src/test/results/clientpositive/join38.q.out @@ -1,49 +1,49 @@ -PREHOOK: query: create table tmp(col0 string, col1 string,col2 string,col3 string,col4 string,col5 string,col6 string,col7 string,col8 string,col9 string,col10 string,col11 string) +PREHOOK: query: create table tmp_n1(col0 string, col1 string,col2 string,col3 string,col4 string,col5 string,col6 string,col7 string,col8 string,col9 string,col10 string,col11 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmp -POSTHOOK: query: create table tmp(col0 string, col1 string,col2 string,col3 string,col4 string,col5 string,col6 string,col7 string,col8 string,col9 string,col10 string,col11 string) +PREHOOK: Output: default@tmp_n1 +POSTHOOK: query: create table tmp_n1(col0 string, col1 string,col2 string,col3 string,col4 string,col5 string,col6 string,col7 string,col8 string,col9 string,col10 string,col11 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmp -PREHOOK: query: insert overwrite table tmp select key, cast(key + 1 as int), key +2, key+3, key+4, cast(key+5 as int), key+6, key+7, key+8, key+9, key+10, cast(key+11 as int) from src where key = 100 +POSTHOOK: Output: default@tmp_n1 +PREHOOK: query: insert overwrite table tmp_n1 select key, cast(key + 1 as int), key +2, key+3, key+4, cast(key+5 as int), key+6, key+7, key+8, key+9, key+10, cast(key+11 as int) from src where key = 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmp -POSTHOOK: query: insert overwrite table tmp select key, cast(key + 1 as int), key +2, key+3, key+4, cast(key+5 as int), key+6, key+7, key+8, key+9, key+10, cast(key+11 as int) from src where key = 100 +PREHOOK: Output: default@tmp_n1 +POSTHOOK: query: insert overwrite table tmp_n1 select key, cast(key + 1 as int), key +2, key+3, key+4, cast(key+5 as int), key+6, key+7, key+8, key+9, key+10, cast(key+11 as int) from src where key = 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmp -POSTHOOK: Lineage: tmp.col0 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tmp.col1 EXPRESSION [] -POSTHOOK: Lineage: tmp.col10 EXPRESSION [] -POSTHOOK: Lineage: tmp.col11 EXPRESSION [] -POSTHOOK: Lineage: tmp.col2 EXPRESSION [] -POSTHOOK: Lineage: tmp.col3 EXPRESSION [] -POSTHOOK: Lineage: tmp.col4 EXPRESSION [] -POSTHOOK: Lineage: tmp.col5 EXPRESSION [] -POSTHOOK: Lineage: tmp.col6 EXPRESSION [] -POSTHOOK: Lineage: tmp.col7 EXPRESSION [] -POSTHOOK: Lineage: tmp.col8 EXPRESSION [] -POSTHOOK: Lineage: tmp.col9 EXPRESSION [] -PREHOOK: query: select * from tmp +POSTHOOK: Output: default@tmp_n1 +POSTHOOK: Lineage: tmp_n1.col0 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_n1.col1 EXPRESSION [] +POSTHOOK: Lineage: tmp_n1.col10 EXPRESSION [] +POSTHOOK: Lineage: tmp_n1.col11 EXPRESSION [] +POSTHOOK: Lineage: tmp_n1.col2 EXPRESSION [] +POSTHOOK: Lineage: tmp_n1.col3 EXPRESSION [] +POSTHOOK: Lineage: tmp_n1.col4 EXPRESSION [] +POSTHOOK: Lineage: tmp_n1.col5 EXPRESSION [] +POSTHOOK: Lineage: tmp_n1.col6 EXPRESSION [] +POSTHOOK: Lineage: tmp_n1.col7 EXPRESSION [] +POSTHOOK: Lineage: tmp_n1.col8 EXPRESSION [] +POSTHOOK: Lineage: tmp_n1.col9 EXPRESSION [] +PREHOOK: query: select * from tmp_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@tmp +PREHOOK: Input: default@tmp_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from tmp +POSTHOOK: query: select * from tmp_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp +POSTHOOK: Input: default@tmp_n1 #### A masked pattern was here #### 100 101 102.0 103.0 104.0 105 106.0 107.0 108.0 109.0 110.0 111 100 101 102.0 103.0 104.0 105 106.0 107.0 108.0 109.0 110.0 111 PREHOOK: query: explain -FROM src a JOIN tmp b ON (a.key = b.col11) +FROM src a JOIN tmp_n1 b ON (a.key = b.col11) SELECT /*+ MAPJOIN(a) */ a.value, b.col5, count(1) as count where b.col11 = 111 group by a.value, b.col5 PREHOOK: type: QUERY POSTHOOK: query: explain -FROM src a JOIN tmp b ON (a.key = b.col11) +FROM src a JOIN tmp_n1 b ON (a.key = b.col11) SELECT /*+ MAPJOIN(a) */ a.value, b.col5, count(1) as count where b.col11 = 111 group by a.value, b.col5 @@ -134,20 +134,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: FROM src a JOIN tmp b ON (a.key = b.col11) +PREHOOK: query: FROM src a JOIN tmp_n1 b ON (a.key = b.col11) SELECT /*+ MAPJOIN(a) */ a.value, b.col5, count(1) as count where b.col11 = 111 group by a.value, b.col5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@tmp +PREHOOK: Input: default@tmp_n1 #### A masked pattern was here #### -POSTHOOK: query: FROM src a JOIN tmp b ON (a.key = b.col11) +POSTHOOK: query: FROM src a JOIN tmp_n1 b ON (a.key = b.col11) SELECT /*+ MAPJOIN(a) */ a.value, b.col5, count(1) as count where b.col11 = 111 group by a.value, b.col5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@tmp +POSTHOOK: Input: default@tmp_n1 #### A masked pattern was here #### val_111 105 2 diff --git a/ql/src/test/results/clientpositive/join39.q.out b/ql/src/test/results/clientpositive/join39.q.out index 44e74960cd..78ef5217e8 100644 --- a/ql/src/test/results/clientpositive/join39.q.out +++ b/ql/src/test/results/clientpositive/join39.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, key1 string, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n8(key STRING, value STRING, key1 string, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, key1 string, val2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n8 +POSTHOOK: query: CREATE TABLE dest_j1_n8(key STRING, value STRING, key1 string, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n8 PREHOOK: query: explain -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n8 SELECT /*+ MAPJOIN(y) */ x.key, x.value, y.key, y.value FROM src x left outer JOIN (select * from src where key <= 100) y ON (x.key = y.key) PREHOOK: type: QUERY POSTHOOK: query: explain -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n8 SELECT /*+ MAPJOIN(y) */ x.key, x.value, y.key, y.value FROM src x left outer JOIN (select * from src where key <= 100) y ON (x.key = y.key) POSTHOOK: type: QUERY @@ -72,7 +72,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n8 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) outputColumnNames: key, value, key1, val2 @@ -99,7 +99,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n8 Stage: Stage-2 Stats Work @@ -107,7 +107,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, key1, val2 Column Types: string, string, string, string - Table: default.dest_j1 + Table: default.dest_j1_n8 Stage: Stage-3 Map Reduce @@ -132,29 +132,29 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n8 SELECT /*+ MAPJOIN(y) */ x.key, x.value, y.key, y.value FROM src x left outer JOIN (select * from src where key <= 100) y ON (x.key = y.key) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n8 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n8 SELECT /*+ MAPJOIN(y) */ x.key, x.value, y.key, y.value FROM src x left outer JOIN (select * from src where key <= 100) y ON (x.key = y.key) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key SIMPLE [(src)x.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.key1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)x.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 +POSTHOOK: Output: default@dest_j1_n8 +POSTHOOK: Lineage: dest_j1_n8.key SIMPLE [(src)x.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n8.key1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n8.val2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n8.value SIMPLE [(src)x.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n8 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 +POSTHOOK: query: select * from dest_j1_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n8 #### A masked pattern was here #### 0 val_0 0 val_0 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/join4.q.out b/ql/src/test/results/clientpositive/join4.q.out index 1e3fa90526..2ecbeb2ed2 100644 --- a/ql/src/test/results/clientpositive/join4.q.out +++ b/ql/src/test/results/clientpositive/join4.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n62(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n62 +POSTHOOK: query: CREATE TABLE dest1_n62(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n62 PREHOOK: query: EXPLAIN FROM ( FROM @@ -19,7 +19,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n62 SELECT c.c1, c.c2, c.c3, c.c4 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM ( @@ -34,7 +34,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n62 SELECT c.c1, c.c2, c.c3, c.c4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -98,7 +98,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n62 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string) outputColumnNames: c1, c2, c3, c4 @@ -123,7 +123,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n62 Stage: Stage-2 Stats Work @@ -131,7 +131,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4 Column Types: int, string, int, string - Table: default.dest1 + Table: default.dest1_n62 Stage: Stage-3 Map Reduce @@ -168,10 +168,10 @@ PREHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n62 SELECT c.c1, c.c2, c.c3, c.c4 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n62 POSTHOOK: query: FROM ( FROM ( @@ -184,21 +184,21 @@ POSTHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n62 SELECT c.c1, c.c2, c.c3, c.c4 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n62 +POSTHOOK: Lineage: dest1_n62.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n62.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n62.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n62.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n62.* FROM dest1_n62 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n62 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n62.* FROM dest1_n62 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n62 #### A masked pattern was here #### 11 val_11 NULL NULL 12 val_12 NULL NULL diff --git a/ql/src/test/results/clientpositive/join42.q.out b/ql/src/test/results/clientpositive/join42.q.out index 70e22536f0..8cbd4cfbae 100644 --- a/ql/src/test/results/clientpositive/join42.q.out +++ b/ql/src/test/results/clientpositive/join42.q.out @@ -9,19 +9,19 @@ POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: database:default POSTHOOK: Output: default@L POSTHOOK: Lineage: l.id SIMPLE [] -PREHOOK: query: create table LA as select 4436 loan_id, 4748 aid, 4415 pi_id +PREHOOK: query: create table LA_n1 as select 4436 loan_id, 4748 aid, 4415 pi_id PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: _dummy_database@_dummy_table PREHOOK: Output: database:default -PREHOOK: Output: default@LA -POSTHOOK: query: create table LA as select 4436 loan_id, 4748 aid, 4415 pi_id +PREHOOK: Output: default@LA_n1 +POSTHOOK: query: create table LA_n1 as select 4436 loan_id, 4748 aid, 4415 pi_id POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: database:default -POSTHOOK: Output: default@LA -POSTHOOK: Lineage: la.aid SIMPLE [] -POSTHOOK: Lineage: la.loan_id SIMPLE [] -POSTHOOK: Lineage: la.pi_id SIMPLE [] +POSTHOOK: Output: default@LA_n1 +POSTHOOK: Lineage: la_n1.aid SIMPLE [] +POSTHOOK: Lineage: la_n1.loan_id SIMPLE [] +POSTHOOK: Lineage: la_n1.pi_id SIMPLE [] PREHOOK: query: create table FR as select 4436 loan_id PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: _dummy_database@_dummy_table @@ -33,17 +33,17 @@ POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: database:default POSTHOOK: Output: default@FR POSTHOOK: Lineage: fr.loan_id SIMPLE [] -PREHOOK: query: create table A as select 4748 id +PREHOOK: query: create table A_n1 as select 4748 id PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: _dummy_database@_dummy_table PREHOOK: Output: database:default -PREHOOK: Output: default@A -POSTHOOK: query: create table A as select 4748 id +PREHOOK: Output: default@A_n1 +POSTHOOK: query: create table A_n1 as select 4748 id POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: database:default -POSTHOOK: Output: default@A -POSTHOOK: Lineage: a.id SIMPLE [] +POSTHOOK: Output: default@A_n1 +POSTHOOK: Lineage: a_n1.id SIMPLE [] PREHOOK: query: create table PI as select 4415 id PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: _dummy_database@_dummy_table @@ -95,11 +95,11 @@ PREHOOK: query: explain select acct.ACC_N, acct.brn FROM L -JOIN LA ON L.id = LA.loan_id +JOIN LA_n1 ON L.id = LA_n1.loan_id JOIN FR ON L.id = FR.loan_id -JOIN A ON LA.aid = A.id -JOIN PI ON PI.id = LA.pi_id -JOIN acct ON A.id = acct.aid +JOIN A_n1 ON LA_n1.aid = A_n1.id +JOIN PI ON PI.id = LA_n1.pi_id +JOIN acct ON A_n1.id = acct.aid WHERE L.id = 4436 and acct.brn is not null @@ -108,11 +108,11 @@ POSTHOOK: query: explain select acct.ACC_N, acct.brn FROM L -JOIN LA ON L.id = LA.loan_id +JOIN LA_n1 ON L.id = LA_n1.loan_id JOIN FR ON L.id = FR.loan_id -JOIN A ON LA.aid = A.id -JOIN PI ON PI.id = LA.pi_id -JOIN acct ON A.id = acct.aid +JOIN A_n1 ON LA_n1.aid = A_n1.id +JOIN PI ON PI.id = LA_n1.pi_id +JOIN acct ON A_n1.id = acct.aid WHERE L.id = 4436 and acct.brn is not null @@ -223,7 +223,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: la + alias: la_n1 Statistics: Num rows: 1 Data size: 14 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((loan_id = 4436) and aid is not null and pi_id is not null) (type: boolean) @@ -239,7 +239,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 14 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: int) TableScan - alias: a + alias: a_n1 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: id is not null (type: boolean) @@ -321,40 +321,40 @@ PREHOOK: query: select acct.ACC_N, acct.brn FROM L -JOIN LA ON L.id = LA.loan_id +JOIN LA_n1 ON L.id = LA_n1.loan_id JOIN FR ON L.id = FR.loan_id -JOIN A ON LA.aid = A.id -JOIN PI ON PI.id = LA.pi_id -JOIN acct ON A.id = acct.aid +JOIN A_n1 ON LA_n1.aid = A_n1.id +JOIN PI ON PI.id = LA_n1.pi_id +JOIN acct ON A_n1.id = acct.aid WHERE L.id = 4436 and acct.brn is not null PREHOOK: type: QUERY -PREHOOK: Input: default@a +PREHOOK: Input: default@a_n1 PREHOOK: Input: default@acct PREHOOK: Input: default@fr PREHOOK: Input: default@l -PREHOOK: Input: default@la +PREHOOK: Input: default@la_n1 PREHOOK: Input: default@pi #### A masked pattern was here #### POSTHOOK: query: select acct.ACC_N, acct.brn FROM L -JOIN LA ON L.id = LA.loan_id +JOIN LA_n1 ON L.id = LA_n1.loan_id JOIN FR ON L.id = FR.loan_id -JOIN A ON LA.aid = A.id -JOIN PI ON PI.id = LA.pi_id -JOIN acct ON A.id = acct.aid +JOIN A_n1 ON LA_n1.aid = A_n1.id +JOIN PI ON PI.id = LA_n1.pi_id +JOIN acct ON A_n1.id = acct.aid WHERE L.id = 4436 and acct.brn is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n1 POSTHOOK: Input: default@acct POSTHOOK: Input: default@fr POSTHOOK: Input: default@l -POSTHOOK: Input: default@la +POSTHOOK: Input: default@la_n1 POSTHOOK: Input: default@pi #### A masked pattern was here #### 10 122 diff --git a/ql/src/test/results/clientpositive/join44.q.out b/ql/src/test/results/clientpositive/join44.q.out index 84e44c5f80..1ca04b39aa 100644 --- a/ql/src/test/results/clientpositive/join44.q.out +++ b/ql/src/test/results/clientpositive/join44.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE TABLE mytable(val1 INT, val2 INT, val3 INT) +PREHOOK: query: CREATE TABLE mytable_n1(val1 INT, val2 INT, val3 INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@mytable -POSTHOOK: query: CREATE TABLE mytable(val1 INT, val2 INT, val3 INT) +PREHOOK: Output: default@mytable_n1 +POSTHOOK: query: CREATE TABLE mytable_n1(val1 INT, val2 INT, val3 INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@mytable +POSTHOOK: Output: default@mytable_n1 PREHOOK: query: EXPLAIN SELECT * -FROM mytable src1, mytable src2 +FROM mytable_n1 src1, mytable_n1 src2 WHERE src1.val1=src2.val1 AND src1.val2 between 2450816 and 2451500 AND src2.val2 between 2450816 and 2451500 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM mytable src1, mytable src2 +FROM mytable_n1 src1, mytable_n1 src2 WHERE src1.val1=src2.val1 AND src1.val2 between 2450816 and 2451500 AND src2.val2 between 2450816 and 2451500 diff --git a/ql/src/test/results/clientpositive/join46.q.out b/ql/src/test/results/clientpositive/join46.q.out index 0847ca6894..07582e1101 100644 --- a/ql/src/test/results/clientpositive/join46.q.out +++ b/ql/src/test/results/clientpositive/join46.q.out @@ -1,54 +1,54 @@ -PREHOOK: query: CREATE TABLE test1 (key INT, value INT, col_1 STRING) +PREHOOK: query: CREATE TABLE test1_n2 (key INT, value INT, col_1 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test1 -POSTHOOK: query: CREATE TABLE test1 (key INT, value INT, col_1 STRING) +PREHOOK: Output: default@test1_n2 +POSTHOOK: query: CREATE TABLE test1_n2 (key INT, value INT, col_1 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test1 -PREHOOK: query: INSERT INTO test1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), +POSTHOOK: Output: default@test1_n2 +PREHOOK: query: INSERT INTO test1_n2 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test1 -POSTHOOK: query: INSERT INTO test1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), +PREHOOK: Output: default@test1_n2 +POSTHOOK: query: INSERT INTO test1_n2 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test1 -POSTHOOK: Lineage: test1.col_1 SCRIPT [] -POSTHOOK: Lineage: test1.key SCRIPT [] -POSTHOOK: Lineage: test1.value SCRIPT [] -PREHOOK: query: CREATE TABLE test2 (key INT, value INT, col_2 STRING) +POSTHOOK: Output: default@test1_n2 +POSTHOOK: Lineage: test1_n2.col_1 SCRIPT [] +POSTHOOK: Lineage: test1_n2.key SCRIPT [] +POSTHOOK: Lineage: test1_n2.value SCRIPT [] +PREHOOK: query: CREATE TABLE test2_n0 (key INT, value INT, col_2 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test2 -POSTHOOK: query: CREATE TABLE test2 (key INT, value INT, col_2 STRING) +PREHOOK: Output: default@test2_n0 +POSTHOOK: query: CREATE TABLE test2_n0 (key INT, value INT, col_2 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test2 -PREHOOK: query: INSERT INTO test2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), +POSTHOOK: Output: default@test2_n0 +PREHOOK: query: INSERT INTO test2_n0 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), (104, 3, 'Fli'), (105, NULL, 'None') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test2 -POSTHOOK: query: INSERT INTO test2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), +PREHOOK: Output: default@test2_n0 +POSTHOOK: query: INSERT INTO test2_n0 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), (104, 3, 'Fli'), (105, NULL, 'None') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test2 -POSTHOOK: Lineage: test2.col_2 SCRIPT [] -POSTHOOK: Lineage: test2.key SCRIPT [] -POSTHOOK: Lineage: test2.value SCRIPT [] +POSTHOOK: Output: default@test2_n0 +POSTHOOK: Lineage: test2_n0.col_2 SCRIPT [] +POSTHOOK: Lineage: test2_n0.key SCRIPT [] +POSTHOOK: Lineage: test2_n0.value SCRIPT [] PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -59,7 +59,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -72,7 +72,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -108,18 +108,18 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 98 NULL None NULL NULL NULL NULL NULL None NULL NULL NULL @@ -131,17 +131,17 @@ NULL NULL None NULL NULL NULL 99 2 Mat 102 2 Del PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND test1_n2.key between 100 and 102 + AND test2_n0.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND test1_n2.key between 100 and 102 + AND test2_n0.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -152,7 +152,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -165,7 +165,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key BETWEEN 100 AND 102 (type: boolean) @@ -207,22 +207,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND test1_n2.key between 100 and 102 + AND test2_n0.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND test1_n2.key between 100 and 102 + AND test2_n0.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 98 NULL None NULL NULL NULL NULL NULL None NULL NULL NULL @@ -233,15 +233,15 @@ NULL NULL None NULL NULL NULL Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.key between 100 and 102 + AND test2_n0.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.key between 100 and 102 + AND test2_n0.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -252,7 +252,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -263,7 +263,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key BETWEEN 100 AND 102 (type: boolean) @@ -304,20 +304,20 @@ STAGE PLANS: Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.key between 100 and 102 + AND test2_n0.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.key between 100 and 102 + AND test2_n0.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 101 2 Car 102 2 Del 100 1 Bob 102 2 Del @@ -327,13 +327,13 @@ POSTHOOK: Input: default@test2 NULL NULL None NULL NULL NULL PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value AND true) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value AND true) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -344,7 +344,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -357,7 +357,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -393,18 +393,18 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value AND true) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value AND true) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### NULL NULL NULL 105 NULL None 101 2 Car 103 2 Ema @@ -415,13 +415,13 @@ NULL NULL NULL 104 3 Fli Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -432,7 +432,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -443,7 +443,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -481,18 +481,18 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 101 2 Car 105 NULL None 101 2 Car 104 3 Fli @@ -509,17 +509,17 @@ NULL NULL None NULL NULL NULL Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -530,7 +530,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -541,7 +541,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -577,22 +577,22 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 101 2 Car 105 NULL None 101 2 Car 104 3 Fli @@ -610,15 +610,15 @@ NULL NULL None 102 2 Del Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -629,7 +629,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -640,7 +640,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -676,20 +676,20 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 101 2 Car 105 NULL None 101 2 Car 104 3 Fli @@ -707,15 +707,15 @@ NULL NULL None NULL NULL NULL Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -726,7 +726,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -737,7 +737,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -773,20 +773,20 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 101 2 Car 103 2 Ema 101 2 Car 102 2 Del @@ -798,17 +798,17 @@ POSTHOOK: Input: default@test2 NULL NULL None 102 2 Del PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -819,7 +819,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -832,7 +832,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -869,22 +869,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 98 NULL None NULL NULL NULL NULL NULL None NULL NULL NULL @@ -896,15 +896,15 @@ NULL NULL None NULL NULL NULL Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT col_1, col_2 -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key=test2.key) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key=test2_n0.key) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT col_1, col_2 -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key=test2.key) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key=test2_n0.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -915,7 +915,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -926,7 +926,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -966,20 +966,20 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT col_1, col_2 -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key=test2.key) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key=test2_n0.key) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT col_1, col_2 -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key=test2.key) +FROM test1_n2 LEFT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key=test2_n0.key) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### Car Ema Car Del @@ -992,17 +992,17 @@ None NULL Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1013,7 +1013,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1024,7 +1024,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1060,22 +1060,22 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 101 2 Car 105 NULL None 101 2 Car 104 3 Fli @@ -1093,15 +1093,15 @@ NULL NULL None 102 2 Del Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1112,7 +1112,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1123,7 +1123,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1159,20 +1159,20 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 101 2 Car 105 NULL None 101 2 Car 104 3 Fli @@ -1187,15 +1187,15 @@ POSTHOOK: Input: default@test2 Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1206,7 +1206,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1217,7 +1217,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1253,20 +1253,20 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 101 2 Car 103 2 Ema 101 2 Car 102 2 Del @@ -1280,17 +1280,17 @@ NULL NULL NULL 105 NULL None NULL NULL NULL 104 3 Fli PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1301,7 +1301,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1314,7 +1314,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1351,22 +1351,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n2 RIGHT OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### NULL NULL NULL 105 NULL None 101 2 Car 103 2 Ema @@ -1376,17 +1376,17 @@ NULL NULL NULL 104 3 Fli Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1397,7 +1397,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1408,7 +1408,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1444,22 +1444,22 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 101 2 Car 105 NULL None 101 2 Car 104 3 Fli @@ -1477,15 +1477,15 @@ NULL NULL None 102 2 Del Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1496,7 +1496,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1507,7 +1507,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1543,20 +1543,20 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test1_n2.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 101 2 Car 105 NULL None 101 2 Car 104 3 Fli @@ -1574,15 +1574,15 @@ NULL NULL None NULL NULL NULL Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1593,7 +1593,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1604,7 +1604,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1640,20 +1640,20 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + OR test2_n0.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 101 2 Car 103 2 Ema 101 2 Car 102 2 Del @@ -1667,17 +1667,17 @@ NULL NULL NULL 105 NULL None NULL NULL NULL 104 3 Fli PREHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1688,7 +1688,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1701,7 +1701,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1738,22 +1738,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n2 FULL OUTER JOIN test2_n0 +ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### 98 NULL None NULL NULL NULL NULL NULL None NULL NULL NULL @@ -1768,40 +1768,40 @@ Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'S PREHOOK: query: EXPLAIN SELECT * FROM ( - SELECT test1.key AS key1, test1.value AS value1, test1.col_1 AS col_1, - test2.key AS key2, test2.value AS value2, test2.col_2 AS col_2 - FROM test1 RIGHT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n2.key AS key1, test1_n2.value AS value1, test1_n2.col_1 AS col_1, + test2_n0.key AS key2, test2_n0.value AS value2, test2_n0.col_2 AS col_2 + FROM test1_n2 RIGHT OUTER JOIN test2_n0 + ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) ) sq1 FULL OUTER JOIN ( - SELECT test1.key AS key3, test1.value AS value3, test1.col_1 AS col_3, - test2.key AS key4, test2.value AS value4, test2.col_2 AS col_4 - FROM test1 LEFT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n2.key AS key3, test1_n2.value AS value3, test1_n2.col_1 AS col_3, + test2_n0.key AS key4, test2_n0.value AS value4, test2_n0.col_2 AS col_4 + FROM test1_n2 LEFT OUTER JOIN test2_n0 + ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) ) sq2 ON (sq1.value1 is null or sq2.value4 is null and sq2.value3 != sq1.value2) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * FROM ( - SELECT test1.key AS key1, test1.value AS value1, test1.col_1 AS col_1, - test2.key AS key2, test2.value AS value2, test2.col_2 AS col_2 - FROM test1 RIGHT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n2.key AS key1, test1_n2.value AS value1, test1_n2.col_1 AS col_1, + test2_n0.key AS key2, test2_n0.value AS value2, test2_n0.col_2 AS col_2 + FROM test1_n2 RIGHT OUTER JOIN test2_n0 + ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) ) sq1 FULL OUTER JOIN ( - SELECT test1.key AS key3, test1.value AS value3, test1.col_1 AS col_3, - test2.key AS key4, test2.value AS value4, test2.col_2 AS col_4 - FROM test1 LEFT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n2.key AS key3, test1_n2.value AS value3, test1_n2.col_1 AS col_3, + test2_n0.key AS key4, test2_n0.value AS value4, test2_n0.col_2 AS col_4 + FROM test1_n2 LEFT OUTER JOIN test2_n0 + ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) ) sq2 ON (sq1.value1 is null or sq2.value4 is null and sq2.value3 != sq1.value2) POSTHOOK: type: QUERY @@ -1816,7 +1816,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1829,7 +1829,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1893,7 +1893,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n2 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1906,7 +1906,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n0 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1944,47 +1944,47 @@ STAGE PLANS: Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: SELECT * FROM ( - SELECT test1.key AS key1, test1.value AS value1, test1.col_1 AS col_1, - test2.key AS key2, test2.value AS value2, test2.col_2 AS col_2 - FROM test1 RIGHT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n2.key AS key1, test1_n2.value AS value1, test1_n2.col_1 AS col_1, + test2_n0.key AS key2, test2_n0.value AS value2, test2_n0.col_2 AS col_2 + FROM test1_n2 RIGHT OUTER JOIN test2_n0 + ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) ) sq1 FULL OUTER JOIN ( - SELECT test1.key AS key3, test1.value AS value3, test1.col_1 AS col_3, - test2.key AS key4, test2.value AS value4, test2.col_2 AS col_4 - FROM test1 LEFT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n2.key AS key3, test1_n2.value AS value3, test1_n2.col_1 AS col_3, + test2_n0.key AS key4, test2_n0.value AS value4, test2_n0.col_2 AS col_4 + FROM test1_n2 LEFT OUTER JOIN test2_n0 + ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) ) sq2 ON (sq1.value1 is null or sq2.value4 is null and sq2.value3 != sq1.value2) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n2 +PREHOOK: Input: default@test2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM ( - SELECT test1.key AS key1, test1.value AS value1, test1.col_1 AS col_1, - test2.key AS key2, test2.value AS value2, test2.col_2 AS col_2 - FROM test1 RIGHT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n2.key AS key1, test1_n2.value AS value1, test1_n2.col_1 AS col_1, + test2_n0.key AS key2, test2_n0.value AS value2, test2_n0.col_2 AS col_2 + FROM test1_n2 RIGHT OUTER JOIN test2_n0 + ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) ) sq1 FULL OUTER JOIN ( - SELECT test1.key AS key3, test1.value AS value3, test1.col_1 AS col_3, - test2.key AS key4, test2.value AS value4, test2.col_2 AS col_4 - FROM test1 LEFT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n2.key AS key3, test1_n2.value AS value3, test1_n2.col_1 AS col_3, + test2_n0.key AS key4, test2_n0.value AS value4, test2_n0.col_2 AS col_4 + FROM test1_n2 LEFT OUTER JOIN test2_n0 + ON (test1_n2.value=test2_n0.value + AND (test1_n2.key between 100 and 102 + OR test2_n0.key between 100 and 102)) ) sq2 ON (sq1.value1 is null or sq2.value4 is null and sq2.value3 != sq1.value2) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n2 +POSTHOOK: Input: default@test2_n0 #### A masked pattern was here #### NULL NULL NULL 104 3 Fli 99 2 Mat 102 2 Del NULL NULL NULL 104 3 Fli 101 2 Car 102 2 Del diff --git a/ql/src/test/results/clientpositive/join5.q.out b/ql/src/test/results/clientpositive/join5.q.out index 277ea58b34..91a4a52499 100644 --- a/ql/src/test/results/clientpositive/join5.q.out +++ b/ql/src/test/results/clientpositive/join5.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n106(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n106 +POSTHOOK: query: CREATE TABLE dest1_n106(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n106 PREHOOK: query: EXPLAIN FROM ( FROM @@ -19,7 +19,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n106 SELECT c.c1, c.c2, c.c3, c.c4 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM ( @@ -34,7 +34,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n106 SELECT c.c1, c.c2, c.c3, c.c4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -98,7 +98,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n106 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string) outputColumnNames: c1, c2, c3, c4 @@ -123,7 +123,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n106 Stage: Stage-2 Stats Work @@ -131,7 +131,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4 Column Types: int, string, int, string - Table: default.dest1 + Table: default.dest1_n106 Stage: Stage-3 Map Reduce @@ -168,10 +168,10 @@ PREHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n106 SELECT c.c1, c.c2, c.c3, c.c4 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n106 POSTHOOK: query: FROM ( FROM ( @@ -184,21 +184,21 @@ POSTHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n106 SELECT c.c1, c.c2, c.c3, c.c4 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n106 +POSTHOOK: Lineage: dest1_n106.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n106.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n106.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n106.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n106.* FROM dest1_n106 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n106 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n106.* FROM dest1_n106 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n106 #### A masked pattern was here #### 17 val_17 17 val_17 18 val_18 18 val_18 diff --git a/ql/src/test/results/clientpositive/join6.q.out b/ql/src/test/results/clientpositive/join6.q.out index 56cbfa6ce2..88dc1926bb 100644 --- a/ql/src/test/results/clientpositive/join6.q.out +++ b/ql/src/test/results/clientpositive/join6.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n131(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n131 +POSTHOOK: query: CREATE TABLE dest1_n131(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n131 PREHOOK: query: EXPLAIN FROM ( FROM @@ -19,7 +19,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n131 SELECT c.c1, c.c2, c.c3, c.c4 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM ( @@ -34,7 +34,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n131 SELECT c.c1, c.c2, c.c3, c.c4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -98,7 +98,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n131 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string) outputColumnNames: c1, c2, c3, c4 @@ -123,7 +123,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n131 Stage: Stage-2 Stats Work @@ -131,7 +131,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4 Column Types: int, string, int, string - Table: default.dest1 + Table: default.dest1_n131 Stage: Stage-3 Map Reduce @@ -168,10 +168,10 @@ PREHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n131 SELECT c.c1, c.c2, c.c3, c.c4 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n131 POSTHOOK: query: FROM ( FROM ( @@ -184,21 +184,21 @@ POSTHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 +INSERT OVERWRITE TABLE dest1_n131 SELECT c.c1, c.c2, c.c3, c.c4 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n131 +POSTHOOK: Lineage: dest1_n131.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n131.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n131.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n131.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n131.* FROM dest1_n131 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n131 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n131.* FROM dest1_n131 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n131 #### A masked pattern was here #### 11 val_11 NULL NULL 12 val_12 NULL NULL diff --git a/ql/src/test/results/clientpositive/join7.q.out b/ql/src/test/results/clientpositive/join7.q.out index e42f849937..6c43fb35ff 100644 --- a/ql/src/test/results/clientpositive/join7.q.out +++ b/ql/src/test/results/clientpositive/join7.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n15(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n15 +POSTHOOK: query: CREATE TABLE dest1_n15(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n15 PREHOOK: query: EXPLAIN FROM ( FROM @@ -24,7 +24,7 @@ FROM ( ON (a.c1 = c.c5) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 +INSERT OVERWRITE TABLE dest1_n15 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM ( @@ -44,7 +44,7 @@ FROM ( ON (a.c1 = c.c5) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 +INSERT OVERWRITE TABLE dest1_n15 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -126,7 +126,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n15 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: int), _col5 (type: string) outputColumnNames: c1, c2, c3, c4, c5, c6 @@ -151,7 +151,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n15 Stage: Stage-2 Stats Work @@ -159,7 +159,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4, c5, c6 Column Types: int, string, int, string, int, string - Table: default.dest1 + Table: default.dest1_n15 Stage: Stage-3 Map Reduce @@ -201,10 +201,10 @@ PREHOOK: query: FROM ( ON (a.c1 = c.c5) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 +INSERT OVERWRITE TABLE dest1_n15 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n15 POSTHOOK: query: FROM ( FROM ( @@ -222,23 +222,23 @@ POSTHOOK: query: FROM ( ON (a.c1 = c.c5) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 +INSERT OVERWRITE TABLE dest1_n15 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src3.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c6 SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n15 +POSTHOOK: Lineage: dest1_n15.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n15.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n15.c3 EXPRESSION [(src)src2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n15.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n15.c5 EXPRESSION [(src)src3.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n15.c6 SIMPLE [(src)src3.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n15.* FROM dest1_n15 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n15 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n15.* FROM dest1_n15 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n15 #### A masked pattern was here #### 11 val_11 NULL NULL NULL NULL 12 val_12 NULL NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/join8.q.out b/ql/src/test/results/clientpositive/join8.q.out index 5ca34e92ad..95bd733ce9 100644 --- a/ql/src/test/results/clientpositive/join8.q.out +++ b/ql/src/test/results/clientpositive/join8.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n145(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n145 +POSTHOOK: query: CREATE TABLE dest1_n145(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n145 PREHOOK: query: EXPLAIN FROM ( FROM @@ -19,7 +19,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL +INSERT OVERWRITE TABLE dest1_n145 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM ( @@ -34,7 +34,7 @@ FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL +INSERT OVERWRITE TABLE dest1_n145 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -101,7 +101,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n145 Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string) outputColumnNames: c1, c2, c3, c4 @@ -126,7 +126,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n145 Stage: Stage-2 Stats Work @@ -134,7 +134,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4 Column Types: int, string, int, string - Table: default.dest1 + Table: default.dest1_n145 Stage: Stage-3 Map Reduce @@ -171,10 +171,10 @@ PREHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL +INSERT OVERWRITE TABLE dest1_n145 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n145 POSTHOOK: query: FROM ( FROM ( @@ -187,21 +187,21 @@ POSTHOOK: query: FROM ( ON (a.c1 = b.c3) SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 ) c -INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL +INSERT OVERWRITE TABLE dest1_n145 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [] -POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n145 +POSTHOOK: Lineage: dest1_n145.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n145.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n145.c3 EXPRESSION [] +POSTHOOK: Lineage: dest1_n145.c4 SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n145.* FROM dest1_n145 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n145 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n145.* FROM dest1_n145 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n145 #### A masked pattern was here #### 11 val_11 NULL NULL 12 val_12 NULL NULL diff --git a/ql/src/test/results/clientpositive/join9.q.out b/ql/src/test/results/clientpositive/join9.q.out index 0a86a6e12a..dd57d0f64f 100644 --- a/ql/src/test/results/clientpositive/join9.q.out +++ b/ql/src/test/results/clientpositive/join9.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n35(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n35 +POSTHOOK: query: CREATE TABLE dest1_n35(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n35 PREHOOK: query: EXPLAIN EXTENDED FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' +INSERT OVERWRITE TABLE dest1_n35 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' +INSERT OVERWRITE TABLE dest1_n35 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -201,17 +201,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n35 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n35 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n35 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -260,17 +260,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n35 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n35 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n35 Stage: Stage-2 Stats Work @@ -279,7 +279,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n35 Is Table Level Stats: true Stage: Stage-3 @@ -353,28 +353,28 @@ STAGE PLANS: MultiFileSpray: false PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' +INSERT OVERWRITE TABLE dest1_n35 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n35 POSTHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' +INSERT OVERWRITE TABLE dest1_n35 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12' POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n35 +POSTHOOK: Lineage: dest1_n35.key EXPRESSION [(srcpart)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n35.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n35.* FROM dest1_n35 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n35 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n35.* FROM dest1_n35 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n35 #### A masked pattern was here #### 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out index ab62fa451e..9f273dcaed 100644 --- a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out +++ b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table part2( +PREHOOK: query: create table part2_n0( p2_partkey INT, p2_name STRING, p2_mfgr STRING, @@ -11,8 +11,8 @@ PREHOOK: query: create table part2( ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part2 -POSTHOOK: query: create table part2( +PREHOOK: Output: default@part2_n0 +POSTHOOK: query: create table part2_n0( p2_partkey INT, p2_name STRING, p2_mfgr STRING, @@ -25,8 +25,8 @@ POSTHOOK: query: create table part2( ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part2 -PREHOOK: query: create table part3( +POSTHOOK: Output: default@part2_n0 +PREHOOK: query: create table part3_n0( p3_partkey INT, p3_name STRING, p3_mfgr STRING, @@ -39,8 +39,8 @@ PREHOOK: query: create table part3( ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part3 -POSTHOOK: query: create table part3( +PREHOOK: Output: default@part3_n0 +POSTHOOK: query: create table part3_n0( p3_partkey INT, p3_name STRING, p3_mfgr STRING, @@ -53,12 +53,12 @@ POSTHOOK: query: create table part3( ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part3 +POSTHOOK: Output: default@part3_n0 PREHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 on p1.p_name = p2_name and p2_name = p3_name +from part p1 join part2_n0 p2 join part3_n0 p3 on p1.p_name = p2_name and p2_name = p3_name PREHOOK: type: QUERY POSTHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 on p1.p_name = p2_name and p2_name = p3_name +from part p1 join part2_n0 p2 join part3_n0 p3 on p1.p_name = p2_name and p2_name = p3_name POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -146,10 +146,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 on p2_name = p1.p_name and p3_name = p2_name +from part p1 join part2_n0 p2 join part3_n0 p3 on p2_name = p1.p_name and p3_name = p2_name PREHOOK: type: QUERY POSTHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 on p2_name = p1.p_name and p3_name = p2_name +from part p1 join part2_n0 p2 join part3_n0 p3 on p2_name = p1.p_name and p3_name = p2_name POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -238,10 +238,10 @@ STAGE PLANS: Warning: Shuffle Join JOIN[13][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 on p2_partkey + p_partkey = p1.p_partkey and p3_name = p2_name +from part p1 join part2_n0 p2 join part3_n0 p3 on p2_partkey + p_partkey = p1.p_partkey and p3_name = p2_name PREHOOK: type: QUERY POSTHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 on p2_partkey + p_partkey = p1.p_partkey and p3_name = p2_name +from part p1 join part2_n0 p2 join part3_n0 p3 on p2_partkey + p_partkey = p1.p_partkey and p3_name = p2_name POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -351,10 +351,10 @@ STAGE PLANS: Warning: Shuffle Join JOIN[13][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 on p2_partkey = 1 and p3_name = p2_name +from part p1 join part2_n0 p2 join part3_n0 p3 on p2_partkey = 1 and p3_name = p2_name PREHOOK: type: QUERY POSTHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 on p2_partkey = 1 and p3_name = p2_name +from part p1 join part2_n0 p2 join part3_n0 p3 on p2_partkey = 1 and p3_name = p2_name POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out index cfb727f7ae..1e27c288b0 100644 --- a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out +++ b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table part2( +PREHOOK: query: create table part2_n5( p2_partkey INT, p2_name STRING, p2_mfgr STRING, @@ -11,8 +11,8 @@ PREHOOK: query: create table part2( ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part2 -POSTHOOK: query: create table part2( +PREHOOK: Output: default@part2_n5 +POSTHOOK: query: create table part2_n5( p2_partkey INT, p2_name STRING, p2_mfgr STRING, @@ -25,8 +25,8 @@ POSTHOOK: query: create table part2( ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part2 -PREHOOK: query: create table part3( +POSTHOOK: Output: default@part2_n5 +PREHOOK: query: create table part3_n2( p3_partkey INT, p3_name STRING, p3_mfgr STRING, @@ -39,8 +39,8 @@ PREHOOK: query: create table part3( ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part3 -POSTHOOK: query: create table part3( +PREHOOK: Output: default@part3_n2 +POSTHOOK: query: create table part3_n2( p3_partkey INT, p3_name STRING, p3_mfgr STRING, @@ -53,13 +53,13 @@ POSTHOOK: query: create table part3( ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part3 +POSTHOOK: Output: default@part3_n2 PREHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 +from part p1 join part2_n5 p2 join part3_n2 p3 where p1.p_name = p2_name and p2_name = p3_name PREHOOK: type: QUERY POSTHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 +from part p1 join part2_n5 p2 join part3_n2 p3 where p1.p_name = p2_name and p2_name = p3_name POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -148,11 +148,11 @@ STAGE PLANS: ListSink PREHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 +from part p1 join part2_n5 p2 join part3_n2 p3 where p2_name = p1.p_name and p3_name = p2_name PREHOOK: type: QUERY POSTHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 +from part p1 join part2_n5 p2 join part3_n2 p3 where p2_name = p1.p_name and p3_name = p2_name POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -242,11 +242,11 @@ STAGE PLANS: Warning: Shuffle Join JOIN[13][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 +from part p1 join part2_n5 p2 join part3_n2 p3 where p2_partkey + p1.p_partkey = p1.p_partkey and p3_name = p2_name PREHOOK: type: QUERY POSTHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 +from part p1 join part2_n5 p2 join part3_n2 p3 where p2_partkey + p1.p_partkey = p1.p_partkey and p3_name = p2_name POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -357,11 +357,11 @@ STAGE PLANS: Warning: Shuffle Join JOIN[13][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 +from part p1 join part2_n5 p2 join part3_n2 p3 where p2_partkey = 1 and p3_name = p2_name PREHOOK: type: QUERY POSTHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 +from part p1 join part2_n5 p2 join part3_n2 p3 where p2_partkey = 1 and p3_name = p2_name POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out index 59ca4c9383..68c6b81e6b 100644 --- a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out +++ b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table part2( +PREHOOK: query: create table part2_n4( p2_partkey INT, p2_name STRING, p2_mfgr STRING, @@ -11,8 +11,8 @@ PREHOOK: query: create table part2( ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part2 -POSTHOOK: query: create table part2( +PREHOOK: Output: default@part2_n4 +POSTHOOK: query: create table part2_n4( p2_partkey INT, p2_name STRING, p2_mfgr STRING, @@ -25,8 +25,8 @@ POSTHOOK: query: create table part2( ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part2 -PREHOOK: query: create table part3( +POSTHOOK: Output: default@part2_n4 +PREHOOK: query: create table part3_n1( p3_partkey INT, p3_name STRING, p3_mfgr STRING, @@ -39,8 +39,8 @@ PREHOOK: query: create table part3( ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part3 -POSTHOOK: query: create table part3( +PREHOOK: Output: default@part3_n1 +POSTHOOK: query: create table part3_n1( p3_partkey INT, p3_name STRING, p3_mfgr STRING, @@ -53,13 +53,13 @@ POSTHOOK: query: create table part3( ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part3 +POSTHOOK: Output: default@part3_n1 PREHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 on p1.p_name = p2_name join part p4 +from part p1 join part2_n4 p2 join part3_n1 p3 on p1.p_name = p2_name join part p4 where p2_name = p3_name and p1.p_name = p4.p_name PREHOOK: type: QUERY POSTHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 on p1.p_name = p2_name join part p4 +from part p1 join part2_n4 p2 join part3_n1 p3 on p1.p_name = p2_name join part p4 where p2_name = p3_name and p1.p_name = p4.p_name POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -166,12 +166,12 @@ STAGE PLANS: ListSink PREHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 on p2_name = p1.p_name join part p4 +from part p1 join part2_n4 p2 join part3_n1 p3 on p2_name = p1.p_name join part p4 where p2_name = p3_name and p1.p_partkey = p4.p_partkey and p1.p_partkey = p2_partkey PREHOOK: type: QUERY POSTHOOK: query: explain select * -from part p1 join part2 p2 join part3 p3 on p2_name = p1.p_name join part p4 +from part p1 join part2_n4 p2 join part3_n1 p3 on p2_name = p1.p_name join part p4 where p2_name = p3_name and p1.p_partkey = p4.p_partkey and p1.p_partkey = p2_partkey POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/join_emit_interval.q.out b/ql/src/test/results/clientpositive/join_emit_interval.q.out index d28b15d78e..7bd8edba75 100644 --- a/ql/src/test/results/clientpositive/join_emit_interval.q.out +++ b/ql/src/test/results/clientpositive/join_emit_interval.q.out @@ -1,54 +1,54 @@ -PREHOOK: query: CREATE TABLE test1 (key INT, value INT, col_1 STRING) +PREHOOK: query: CREATE TABLE test1_n6 (key INT, value INT, col_1 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test1 -POSTHOOK: query: CREATE TABLE test1 (key INT, value INT, col_1 STRING) +PREHOOK: Output: default@test1_n6 +POSTHOOK: query: CREATE TABLE test1_n6 (key INT, value INT, col_1 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test1 -PREHOOK: query: INSERT INTO test1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), +POSTHOOK: Output: default@test1_n6 +PREHOOK: query: INSERT INTO test1_n6 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test1 -POSTHOOK: query: INSERT INTO test1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), +PREHOOK: Output: default@test1_n6 +POSTHOOK: query: INSERT INTO test1_n6 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test1 -POSTHOOK: Lineage: test1.col_1 SCRIPT [] -POSTHOOK: Lineage: test1.key SCRIPT [] -POSTHOOK: Lineage: test1.value SCRIPT [] -PREHOOK: query: CREATE TABLE test2 (key INT, value INT, col_2 STRING) +POSTHOOK: Output: default@test1_n6 +POSTHOOK: Lineage: test1_n6.col_1 SCRIPT [] +POSTHOOK: Lineage: test1_n6.key SCRIPT [] +POSTHOOK: Lineage: test1_n6.value SCRIPT [] +PREHOOK: query: CREATE TABLE test2_n4 (key INT, value INT, col_2 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test2 -POSTHOOK: query: CREATE TABLE test2 (key INT, value INT, col_2 STRING) +PREHOOK: Output: default@test2_n4 +POSTHOOK: query: CREATE TABLE test2_n4 (key INT, value INT, col_2 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test2 -PREHOOK: query: INSERT INTO test2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), +POSTHOOK: Output: default@test2_n4 +PREHOOK: query: INSERT INTO test2_n4 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), (104, 3, 'Fli'), (105, NULL, 'None') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test2 -POSTHOOK: query: INSERT INTO test2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), +PREHOOK: Output: default@test2_n4 +POSTHOOK: query: INSERT INTO test2_n4 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), (104, 3, 'Fli'), (105, NULL, 'None') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test2 -POSTHOOK: Lineage: test2.col_2 SCRIPT [] -POSTHOOK: Lineage: test2.key SCRIPT [] -POSTHOOK: Lineage: test2.value SCRIPT [] +POSTHOOK: Output: default@test2_n4 +POSTHOOK: Lineage: test2_n4.col_2 SCRIPT [] +POSTHOOK: Lineage: test2_n4.key SCRIPT [] +POSTHOOK: Lineage: test2_n4.value SCRIPT [] PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value AND test1.key between 100 and 102) +FROM test1_n6 LEFT OUTER JOIN test2_n4 +ON (test1_n6.value=test2_n4.value AND test1_n6.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value AND test1.key between 100 and 102) +FROM test1_n6 LEFT OUTER JOIN test2_n4 +ON (test1_n6.value=test2_n4.value AND test1_n6.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -59,7 +59,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n6 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -72,7 +72,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n4 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -111,18 +111,18 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value AND test1.key between 100 and 102) +FROM test1_n6 LEFT OUTER JOIN test2_n4 +ON (test1_n6.value=test2_n4.value AND test1_n6.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n6 +PREHOOK: Input: default@test2_n4 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value AND test1.key between 100 and 102) +FROM test1_n6 LEFT OUTER JOIN test2_n4 +ON (test1_n6.value=test2_n4.value AND test1_n6.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n6 +POSTHOOK: Input: default@test2_n4 #### A masked pattern was here #### 98 NULL None NULL NULL NULL NULL NULL None NULL NULL NULL @@ -134,13 +134,13 @@ NULL NULL None NULL NULL NULL Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n6 LEFT OUTER JOIN test2_n4 +ON (test1_n6.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n6 LEFT OUTER JOIN test2_n4 +ON (test1_n6.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -151,7 +151,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n6 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -162,7 +162,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n4 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -200,18 +200,18 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n6 LEFT OUTER JOIN test2_n4 +ON (test1_n6.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n6 +PREHOOK: Input: default@test2_n4 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n6 LEFT OUTER JOIN test2_n4 +ON (test1_n6.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n6 +POSTHOOK: Input: default@test2_n4 #### A masked pattern was here #### 101 2 Car 105 NULL None 101 2 Car 104 3 Fli diff --git a/ql/src/test/results/clientpositive/join_filters_overlap.q.out b/ql/src/test/results/clientpositive/join_filters_overlap.q.out index 7d4855ae2d..3a9ec0f754 100644 --- a/ql/src/test/results/clientpositive/join_filters_overlap.q.out +++ b/ql/src/test/results/clientpositive/join_filters_overlap.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: create table a as SELECT 100 as key, a.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a as value limit 3 +PREHOOK: query: create table a_n3 as SELECT 100 as key, a_n3.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a_n3 as value limit 3 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@a -POSTHOOK: query: create table a as SELECT 100 as key, a.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a as value limit 3 +PREHOOK: Output: default@a_n3 +POSTHOOK: query: create table a_n3 as SELECT 100 as key, a_n3.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a_n3 as value limit 3 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@a -POSTHOOK: Lineage: a.key SIMPLE [] -POSTHOOK: Lineage: a.value SCRIPT [] -PREHOOK: query: explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) +POSTHOOK: Output: default@a_n3 +POSTHOOK: Lineage: a_n3.key SIMPLE [] +POSTHOOK: Lineage: a_n3.value SCRIPT [] +PREHOOK: query: explain extended select * from a_n3 left outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (a_n3.key=c.key AND a_n3.value=60 AND c.value=60) PREHOOK: type: QUERY -POSTHOOK: query: explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) +POSTHOOK: query: explain extended select * from a_n3 left outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (a_n3.key=c.key AND a_n3.value=60 AND c.value=60) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -23,7 +23,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n3 Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -86,7 +86,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: a + base file name: a_n3 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -98,11 +98,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.a + name default.a_n3 numFiles 1 numRows 3 rawDataSize 18 - serialization.ddl struct a { i32 key, i32 value} + serialization.ddl struct a_n3 { i32 key, i32 value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 21 @@ -120,20 +120,20 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.a + name default.a_n3 numFiles 1 numRows 3 rawDataSize 18 - serialization.ddl struct a { i32 key, i32 value} + serialization.ddl struct a_n3 { i32 key, i32 value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 21 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.a - name: default.a + name: default.a_n3 + name: default.a_n3 Truncated Path -> Alias: - /a [$hdt$_0:a, $hdt$_1:b, $hdt$_2:c] + /a_n3 [$hdt$_0:a_n3, $hdt$_1:b, $hdt$_2:c] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -181,31 +181,31 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) +PREHOOK: query: select * from a_n3 left outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (a_n3.key=c.key AND a_n3.value=60 AND c.value=60) PREHOOK: type: QUERY -PREHOOK: Input: default@a +PREHOOK: Input: default@a_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) +POSTHOOK: query: select * from a_n3 left outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (a_n3.key=c.key AND a_n3.value=60 AND c.value=60) POSTHOOK: type: QUERY -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n3 #### A masked pattern was here #### 100 40 NULL NULL NULL NULL 100 50 100 50 NULL NULL 100 60 NULL NULL 100 60 -PREHOOK: query: select /*+ MAPJOIN(b,c)*/ * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) +PREHOOK: query: select /*+ MAPJOIN(b,c)*/ * from a_n3 left outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (a_n3.key=c.key AND a_n3.value=60 AND c.value=60) PREHOOK: type: QUERY -PREHOOK: Input: default@a +PREHOOK: Input: default@a_n3 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(b,c)*/ * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) +POSTHOOK: query: select /*+ MAPJOIN(b,c)*/ * from a_n3 left outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (a_n3.key=c.key AND a_n3.value=60 AND c.value=60) POSTHOOK: type: QUERY -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n3 #### A masked pattern was here #### 100 40 NULL NULL NULL NULL 100 50 100 50 NULL NULL 100 60 NULL NULL 100 60 -PREHOOK: query: explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) +PREHOOK: query: explain extended select * from a_n3 right outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND c.value=60) PREHOOK: type: QUERY -POSTHOOK: query: explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) +POSTHOOK: query: explain extended select * from a_n3 right outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND c.value=60) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -216,7 +216,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n3 Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -279,7 +279,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: a + base file name: a_n3 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -291,11 +291,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.a + name default.a_n3 numFiles 1 numRows 3 rawDataSize 18 - serialization.ddl struct a { i32 key, i32 value} + serialization.ddl struct a_n3 { i32 key, i32 value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 21 @@ -313,20 +313,20 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.a + name default.a_n3 numFiles 1 numRows 3 rawDataSize 18 - serialization.ddl struct a { i32 key, i32 value} + serialization.ddl struct a_n3 { i32 key, i32 value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 21 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.a - name: default.a + name: default.a_n3 + name: default.a_n3 Truncated Path -> Alias: - /a [$hdt$_0:a, $hdt$_1:b, $hdt$_2:c] + /a_n3 [$hdt$_0:a_n3, $hdt$_1:b, $hdt$_2:c] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -374,31 +374,31 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) +PREHOOK: query: select * from a_n3 right outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND c.value=60) PREHOOK: type: QUERY -PREHOOK: Input: default@a +PREHOOK: Input: default@a_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) +POSTHOOK: query: select * from a_n3 right outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND c.value=60) POSTHOOK: type: QUERY -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n3 #### A masked pattern was here #### 100 50 100 50 NULL NULL NULL NULL 100 40 NULL NULL NULL NULL 100 60 100 60 -PREHOOK: query: select /*+ MAPJOIN(a,c)*/ * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) +PREHOOK: query: select /*+ MAPJOIN(a_n3,c)*/ * from a_n3 right outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND c.value=60) PREHOOK: type: QUERY -PREHOOK: Input: default@a +PREHOOK: Input: default@a_n3 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a,c)*/ * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) +POSTHOOK: query: select /*+ MAPJOIN(a_n3,c)*/ * from a_n3 right outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND c.value=60) POSTHOOK: type: QUERY -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n3 #### A masked pattern was here #### 100 50 100 50 NULL NULL NULL NULL 100 40 NULL NULL NULL NULL 100 60 100 60 -PREHOOK: query: explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) +PREHOOK: query: explain extended select * from a_n3 right outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50 AND b.value>10) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) PREHOOK: type: QUERY -POSTHOOK: query: explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) +POSTHOOK: query: explain extended select * from a_n3 right outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50 AND b.value>10) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -409,7 +409,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n3 Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -472,7 +472,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: a + base file name: a_n3 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -484,11 +484,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.a + name default.a_n3 numFiles 1 numRows 3 rawDataSize 18 - serialization.ddl struct a { i32 key, i32 value} + serialization.ddl struct a_n3 { i32 key, i32 value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 21 @@ -506,20 +506,20 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.a + name default.a_n3 numFiles 1 numRows 3 rawDataSize 18 - serialization.ddl struct a { i32 key, i32 value} + serialization.ddl struct a_n3 { i32 key, i32 value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 21 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.a - name: default.a + name: default.a_n3 + name: default.a_n3 Truncated Path -> Alias: - /a [$hdt$_0:a, $hdt$_1:b, $hdt$_2:c] + /a_n3 [$hdt$_0:a_n3, $hdt$_1:b, $hdt$_2:c] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -567,31 +567,31 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) +PREHOOK: query: select * from a_n3 right outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50 AND b.value>10) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) PREHOOK: type: QUERY -PREHOOK: Input: default@a +PREHOOK: Input: default@a_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) +POSTHOOK: query: select * from a_n3 right outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50 AND b.value>10) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) POSTHOOK: type: QUERY -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n3 #### A masked pattern was here #### 100 50 100 50 NULL NULL NULL NULL 100 40 NULL NULL NULL NULL 100 60 100 60 -PREHOOK: query: select /*+ MAPJOIN(a,c)*/ * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) +PREHOOK: query: select /*+ MAPJOIN(a_n3,c)*/ * from a_n3 right outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50 AND b.value>10) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) PREHOOK: type: QUERY -PREHOOK: Input: default@a +PREHOOK: Input: default@a_n3 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(a,c)*/ * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) +POSTHOOK: query: select /*+ MAPJOIN(a_n3,c)*/ * from a_n3 right outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50 AND b.value>10) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) POSTHOOK: type: QUERY -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n3 #### A masked pattern was here #### 100 50 100 50 NULL NULL NULL NULL 100 40 NULL NULL NULL NULL 100 60 100 60 -PREHOOK: query: explain extended select * from a full outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +PREHOOK: query: explain extended select * from a_n3 full outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a_n3 d on (a_n3.key=d.key AND a_n3.value=40 AND d.value=40) PREHOOK: type: QUERY -POSTHOOK: query: explain extended select * from a full outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +POSTHOOK: query: explain extended select * from a_n3 full outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a_n3 d on (a_n3.key=d.key AND a_n3.value=40 AND d.value=40) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -602,7 +602,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n3 Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -682,7 +682,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: a + base file name: a_n3 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -694,11 +694,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.a + name default.a_n3 numFiles 1 numRows 3 rawDataSize 18 - serialization.ddl struct a { i32 key, i32 value} + serialization.ddl struct a_n3 { i32 key, i32 value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 21 @@ -716,20 +716,20 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.a + name default.a_n3 numFiles 1 numRows 3 rawDataSize 18 - serialization.ddl struct a { i32 key, i32 value} + serialization.ddl struct a_n3 { i32 key, i32 value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 21 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.a - name: default.a + name: default.a_n3 + name: default.a_n3 Truncated Path -> Alias: - /a [$hdt$_0:a, $hdt$_1:b, $hdt$_2:c, $hdt$_3:d] + /a_n3 [$hdt$_0:a_n3, $hdt$_1:b, $hdt$_2:c, $hdt$_3:d] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -781,22 +781,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from a full outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +PREHOOK: query: select * from a_n3 full outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a_n3 d on (a_n3.key=d.key AND a_n3.value=40 AND d.value=40) PREHOOK: type: QUERY -PREHOOK: Input: default@a +PREHOOK: Input: default@a_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from a full outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +POSTHOOK: query: select * from a_n3 full outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a_n3 d on (a_n3.key=d.key AND a_n3.value=40 AND d.value=40) POSTHOOK: type: QUERY -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n3 #### A masked pattern was here #### 100 40 NULL NULL NULL NULL 100 40 100 50 100 50 NULL NULL NULL NULL 100 60 NULL NULL NULL NULL NULL NULL NULL NULL 100 40 NULL NULL NULL NULL NULL NULL 100 60 100 60 NULL NULL -PREHOOK: query: explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +PREHOOK: query: explain extended select * from a_n3 left outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (a_n3.key=c.key AND a_n3.value=60 AND c.value=60) left outer join a_n3 d on (a_n3.key=d.key AND a_n3.value=40 AND d.value=40) PREHOOK: type: QUERY -POSTHOOK: query: explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +POSTHOOK: query: explain extended select * from a_n3 left outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (a_n3.key=c.key AND a_n3.value=60 AND c.value=60) left outer join a_n3 d on (a_n3.key=d.key AND a_n3.value=40 AND d.value=40) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -807,7 +807,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n3 Statistics: Num rows: 3 Data size: 18 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator @@ -891,7 +891,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: a + base file name: a_n3 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -903,11 +903,11 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.a + name default.a_n3 numFiles 1 numRows 3 rawDataSize 18 - serialization.ddl struct a { i32 key, i32 value} + serialization.ddl struct a_n3 { i32 key, i32 value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 21 @@ -925,20 +925,20 @@ STAGE PLANS: columns.comments columns.types int:int #### A masked pattern was here #### - name default.a + name default.a_n3 numFiles 1 numRows 3 rawDataSize 18 - serialization.ddl struct a { i32 key, i32 value} + serialization.ddl struct a_n3 { i32 key, i32 value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 21 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.a - name: default.a + name: default.a_n3 + name: default.a_n3 Truncated Path -> Alias: - /a [$hdt$_0:a, $hdt$_1:b, $hdt$_2:c, $hdt$_3:d] + /a_n3 [$hdt$_0:a_n3, $hdt$_1:b, $hdt$_2:c, $hdt$_3:d] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -989,24 +989,24 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +PREHOOK: query: select * from a_n3 left outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (a_n3.key=c.key AND a_n3.value=60 AND c.value=60) left outer join a_n3 d on (a_n3.key=d.key AND a_n3.value=40 AND d.value=40) PREHOOK: type: QUERY -PREHOOK: Input: default@a +PREHOOK: Input: default@a_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +POSTHOOK: query: select * from a_n3 left outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (a_n3.key=c.key AND a_n3.value=60 AND c.value=60) left outer join a_n3 d on (a_n3.key=d.key AND a_n3.value=40 AND d.value=40) POSTHOOK: type: QUERY -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n3 #### A masked pattern was here #### 100 40 NULL NULL NULL NULL 100 40 100 50 100 50 NULL NULL NULL NULL 100 60 NULL NULL 100 60 NULL NULL -PREHOOK: query: select /*+ MAPJOIN(b,c, d)*/ * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +PREHOOK: query: select /*+ MAPJOIN(b,c, d)*/ * from a_n3 left outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (a_n3.key=c.key AND a_n3.value=60 AND c.value=60) left outer join a_n3 d on (a_n3.key=d.key AND a_n3.value=40 AND d.value=40) PREHOOK: type: QUERY -PREHOOK: Input: default@a +PREHOOK: Input: default@a_n3 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(b,c, d)*/ * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +POSTHOOK: query: select /*+ MAPJOIN(b,c, d)*/ * from a_n3 left outer join a_n3 b on (a_n3.key=b.key AND a_n3.value=50 AND b.value=50) left outer join a_n3 c on (a_n3.key=c.key AND a_n3.value=60 AND c.value=60) left outer join a_n3 d on (a_n3.key=d.key AND a_n3.value=40 AND d.value=40) POSTHOOK: type: QUERY -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n3 #### A masked pattern was here #### 100 40 NULL NULL NULL NULL 100 40 100 50 100 50 NULL NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/join_map_ppr.q.out b/ql/src/test/results/clientpositive/join_map_ppr.q.out index b16185e239..8712d81cfd 100644 --- a/ql/src/test/results/clientpositive/join_map_ppr.q.out +++ b/ql/src/test/results/clientpositive/join_map_ppr.q.out @@ -1,20 +1,20 @@ -PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n4(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n4 +POSTHOOK: query: CREATE TABLE dest_j1_n4(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n4 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n4 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key) WHERE z.ds='2008-04-08' and z.hr=11 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n4 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key) @@ -118,17 +118,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n4 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -260,17 +260,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n4 Stage: Stage-4 Stats Work @@ -279,7 +279,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, val2 Column Types: string, string, string - Table: default.dest_j1 + Table: default.dest_j1_n4 Is Table Level Stats: true Stage: Stage-5 @@ -304,17 +304,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n4 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -335,11 +335,11 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -357,18 +357,18 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 - name: default.dest_j1 + name: default.dest_j1_n4 + name: default.dest_j1_n4 Truncated Path -> Alias: #### A masked pattern was here #### @@ -394,17 +394,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n4 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -425,11 +425,11 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -447,18 +447,18 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 - name: default.dest_j1 + name: default.dest_j1_n4 + name: default.dest_j1_n4 Truncated Path -> Alias: #### A masked pattern was here #### @@ -468,7 +468,7 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n4 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key) @@ -478,8 +478,8 @@ PREHOOK: Input: default@src PREHOOK: Input: default@src1 PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n4 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n4 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key) @@ -489,17 +489,17 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 +POSTHOOK: Output: default@dest_j1_n4 +POSTHOOK: Lineage: dest_j1_n4.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n4.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n4.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 +POSTHOOK: query: select * from dest_j1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n4 #### A masked pattern was here #### 128 val_128 val_128 128 val_128 val_128 @@ -645,14 +645,14 @@ POSTHOOK: Output: default@src1_copy POSTHOOK: Lineage: src1_copy.key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: src1_copy.value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n4 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1_copy x JOIN src_copy y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key) WHERE z.ds='2008-04-08' and z.hr=11 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest_j1 +INSERT OVERWRITE TABLE dest_j1_n4 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1_copy x JOIN src_copy y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key) @@ -756,17 +756,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 1 numRows 107 rawDataSize 2018 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2125 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n4 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -898,17 +898,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 1 numRows 107 rawDataSize 2018 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2125 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n4 Stage: Stage-4 Stats Work @@ -917,7 +917,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, val2 Column Types: string, string, string - Table: default.dest_j1 + Table: default.dest_j1_n4 Is Table Level Stats: true Stage: Stage-5 @@ -942,17 +942,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 1 numRows 107 rawDataSize 2018 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2125 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n4 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -973,11 +973,11 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 1 numRows 107 rawDataSize 2018 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2125 @@ -995,18 +995,18 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 1 numRows 107 rawDataSize 2018 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2125 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 - name: default.dest_j1 + name: default.dest_j1_n4 + name: default.dest_j1_n4 Truncated Path -> Alias: #### A masked pattern was here #### @@ -1032,17 +1032,17 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 1 numRows 107 rawDataSize 2018 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2125 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n4 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -1063,11 +1063,11 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 1 numRows 107 rawDataSize 2018 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2125 @@ -1085,18 +1085,18 @@ STAGE PLANS: columns.comments columns.types string:string:string #### A masked pattern was here #### - name default.dest_j1 + name default.dest_j1_n4 numFiles 1 numRows 107 rawDataSize 2018 - serialization.ddl struct dest_j1 { string key, string value, string val2} + serialization.ddl struct dest_j1_n4 { string key, string value, string val2} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2125 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 - name: default.dest_j1 + name: default.dest_j1_n4 + name: default.dest_j1_n4 Truncated Path -> Alias: #### A masked pattern was here #### @@ -1106,7 +1106,7 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: query: INSERT OVERWRITE TABLE dest_j1_n4 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1_copy x JOIN src_copy y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key) @@ -1116,8 +1116,8 @@ PREHOOK: Input: default@src1_copy PREHOOK: Input: default@src_copy PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1 +PREHOOK: Output: default@dest_j1_n4 +POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1_n4 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value FROM src1_copy x JOIN src_copy y ON (x.key = y.key) JOIN srcpart z ON (x.key = z.key) @@ -1127,17 +1127,17 @@ POSTHOOK: Input: default@src1_copy POSTHOOK: Input: default@src_copy POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1_copy)x.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src_copy)y.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from dest_j1 +POSTHOOK: Output: default@dest_j1_n4 +POSTHOOK: Lineage: dest_j1_n4.key SIMPLE [(src1_copy)x.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest_j1_n4.val2 SIMPLE [(src_copy)y.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest_j1_n4.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from dest_j1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from dest_j1 +POSTHOOK: query: select * from dest_j1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n4 #### A masked pattern was here #### 128 val_128 val_128 128 val_128 val_128 diff --git a/ql/src/test/results/clientpositive/join_on_varchar.q.out b/ql/src/test/results/clientpositive/join_on_varchar.q.out index 9608240d9e..fb207a0a90 100644 --- a/ql/src/test/results/clientpositive/join_on_varchar.q.out +++ b/ql/src/test/results/clientpositive/join_on_varchar.q.out @@ -1,62 +1,62 @@ -PREHOOK: query: create table tbl1(c1 varchar(10), intcol int) +PREHOOK: query: create table tbl1_n3(c1 varchar(10), intcol int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tbl1 -POSTHOOK: query: create table tbl1(c1 varchar(10), intcol int) +PREHOOK: Output: default@tbl1_n3 +POSTHOOK: query: create table tbl1_n3(c1 varchar(10), intcol int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tbl1 -PREHOOK: query: create table tbl2(c2 varchar(30)) +POSTHOOK: Output: default@tbl1_n3 +PREHOOK: query: create table tbl2_n2(c2 varchar(30)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tbl2 -POSTHOOK: query: create table tbl2(c2 varchar(30)) +PREHOOK: Output: default@tbl2_n2 +POSTHOOK: query: create table tbl2_n2(c2 varchar(30)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tbl2 -PREHOOK: query: insert into table tbl1 select repeat('t', 10), 11 from src limit 1 +POSTHOOK: Output: default@tbl2_n2 +PREHOOK: query: insert into table tbl1_n3 select repeat('t', 10), 11 from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tbl1 -POSTHOOK: query: insert into table tbl1 select repeat('t', 10), 11 from src limit 1 +PREHOOK: Output: default@tbl1_n3 +POSTHOOK: query: insert into table tbl1_n3 select repeat('t', 10), 11 from src limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tbl1 -POSTHOOK: Lineage: tbl1.c1 EXPRESSION [] -POSTHOOK: Lineage: tbl1.intcol SIMPLE [] -PREHOOK: query: insert into table tbl1 select repeat('s', 10), 22 from src limit 1 +POSTHOOK: Output: default@tbl1_n3 +POSTHOOK: Lineage: tbl1_n3.c1 EXPRESSION [] +POSTHOOK: Lineage: tbl1_n3.intcol SIMPLE [] +PREHOOK: query: insert into table tbl1_n3 select repeat('s', 10), 22 from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tbl1 -POSTHOOK: query: insert into table tbl1 select repeat('s', 10), 22 from src limit 1 +PREHOOK: Output: default@tbl1_n3 +POSTHOOK: query: insert into table tbl1_n3 select repeat('s', 10), 22 from src limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tbl1 -POSTHOOK: Lineage: tbl1.c1 EXPRESSION [] -POSTHOOK: Lineage: tbl1.intcol SIMPLE [] -PREHOOK: query: insert into table tbl2 select concat(repeat('t', 10), 'ppp') from src limit 1 +POSTHOOK: Output: default@tbl1_n3 +POSTHOOK: Lineage: tbl1_n3.c1 EXPRESSION [] +POSTHOOK: Lineage: tbl1_n3.intcol SIMPLE [] +PREHOOK: query: insert into table tbl2_n2 select concat(repeat('t', 10), 'ppp') from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tbl2 -POSTHOOK: query: insert into table tbl2 select concat(repeat('t', 10), 'ppp') from src limit 1 +PREHOOK: Output: default@tbl2_n2 +POSTHOOK: query: insert into table tbl2_n2 select concat(repeat('t', 10), 'ppp') from src limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tbl2 -POSTHOOK: Lineage: tbl2.c2 EXPRESSION [] -PREHOOK: query: insert into table tbl2 select repeat('s', 10) from src limit 1 +POSTHOOK: Output: default@tbl2_n2 +POSTHOOK: Lineage: tbl2_n2.c2 EXPRESSION [] +PREHOOK: query: insert into table tbl2_n2 select repeat('s', 10) from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tbl2 -POSTHOOK: query: insert into table tbl2 select repeat('s', 10) from src limit 1 +PREHOOK: Output: default@tbl2_n2 +POSTHOOK: query: insert into table tbl2_n2 select repeat('s', 10) from src limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tbl2 -POSTHOOK: Lineage: tbl2.c2 EXPRESSION [] +POSTHOOK: Output: default@tbl2_n2 +POSTHOOK: Lineage: tbl2_n2.c2 EXPRESSION [] PREHOOK: query: explain -select /*+ MAPJOIN(tbl2) */ c1,c2 from tbl1 join tbl2 on (c1 = c2) order by c1,c2 +select /*+ MAPJOIN(tbl2_n2) */ c1,c2 from tbl1_n3 join tbl2_n2 on (c1 = c2) order by c1,c2 PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+ MAPJOIN(tbl2) */ c1,c2 from tbl1 join tbl2 on (c1 = c2) order by c1,c2 +select /*+ MAPJOIN(tbl2_n2) */ c1,c2 from tbl1_n3 join tbl2_n2 on (c1 = c2) order by c1,c2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-5 is a root stage @@ -67,13 +67,13 @@ STAGE PLANS: Stage: Stage-5 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:tbl2 + $hdt$_1:tbl2_n2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:tbl2 + $hdt$_1:tbl2_n2 TableScan - alias: tbl2 + alias: tbl2_n2 Statistics: Num rows: 2 Data size: 23 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: c2 is not null (type: boolean) @@ -91,7 +91,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: tbl1 + alias: tbl1_n3 Statistics: Num rows: 2 Data size: 26 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: c1 is not null (type: boolean) @@ -134,14 +134,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ MAPJOIN(tbl2) */ c1,c2 from tbl1 join tbl2 on (c1 = c2) order by c1,c2 +PREHOOK: query: select /*+ MAPJOIN(tbl2_n2) */ c1,c2 from tbl1_n3 join tbl2_n2 on (c1 = c2) order by c1,c2 PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n3 +PREHOOK: Input: default@tbl2_n2 #### A masked pattern was here #### -POSTHOOK: query: select /*+ MAPJOIN(tbl2) */ c1,c2 from tbl1 join tbl2 on (c1 = c2) order by c1,c2 +POSTHOOK: query: select /*+ MAPJOIN(tbl2_n2) */ c1,c2 from tbl1_n3 join tbl2_n2 on (c1 = c2) order by c1,c2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n3 +POSTHOOK: Input: default@tbl2_n2 #### A masked pattern was here #### ssssssssss ssssssssss diff --git a/ql/src/test/results/clientpositive/join_reorder.q.out b/ql/src/test/results/clientpositive/join_reorder.q.out index 4f00728d64..e0ef36051e 100644 --- a/ql/src/test/results/clientpositive/join_reorder.q.out +++ b/ql/src/test/results/clientpositive/join_reorder.q.out @@ -1,55 +1,55 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n22(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n22 +POSTHOOK: query: CREATE TABLE T1_n22(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T1_n22 +PREHOOK: query: CREATE TABLE T2_n15(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n15 +POSTHOOK: query: CREATE TABLE T2_n15(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T2_n15 +PREHOOK: query: CREATE TABLE T3_n5(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T3_n5 +POSTHOOK: query: CREATE TABLE T3_n5(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T3_n5 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n22 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n22 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n22 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@t1_n22 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n15 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n15 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n15 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: Output: default@t2_n15 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n5 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: Output: default@t3_n5 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n5 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 -PREHOOK: query: EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key +POSTHOOK: Output: default@t3_n5 +PREHOOK: query: EXPLAIN FROM T1_n22 a JOIN src c ON c.key+1=a.key SELECT a.key, a.val, c.key PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key +POSTHOOK: query: EXPLAIN FROM T1_n22 a JOIN src c ON c.key+1=a.key SELECT a.key, a.val, c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -111,10 +111,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key +PREHOOK: query: EXPLAIN FROM T1_n22 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ a.key, a.val, c.key PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key +POSTHOOK: query: EXPLAIN FROM T1_n22 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ a.key, a.val, c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -176,46 +176,46 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: FROM T1 a JOIN src c ON c.key+1=a.key +PREHOOK: query: FROM T1_n22 a JOIN src c ON c.key+1=a.key SELECT a.key, a.val, c.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n22 #### A masked pattern was here #### -POSTHOOK: query: FROM T1 a JOIN src c ON c.key+1=a.key +POSTHOOK: query: FROM T1_n22 a JOIN src c ON c.key+1=a.key SELECT a.key, a.val, c.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n22 #### A masked pattern was here #### 1 11 0 1 11 0 1 11 0 3 13 2 -PREHOOK: query: FROM T1 a JOIN src c ON c.key+1=a.key +PREHOOK: query: FROM T1_n22 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ a.key, a.val, c.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n22 #### A masked pattern was here #### -POSTHOOK: query: FROM T1 a JOIN src c ON c.key+1=a.key +POSTHOOK: query: FROM T1_n22 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ a.key, a.val, c.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n22 #### A masked pattern was here #### 1 11 0 1 11 0 1 11 0 3 13 2 -PREHOOK: query: EXPLAIN FROM T1 a - LEFT OUTER JOIN T2 b ON (b.key=a.key) - RIGHT OUTER JOIN T3 c ON (c.val = a.val) +PREHOOK: query: EXPLAIN FROM T1_n22 a + LEFT OUTER JOIN T2_n15 b ON (b.key=a.key) + RIGHT OUTER JOIN T3_n5 c ON (c.val = a.val) SELECT a.key, b.key, a.val, c.val PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN FROM T1 a - LEFT OUTER JOIN T2 b ON (b.key=a.key) - RIGHT OUTER JOIN T3 c ON (c.val = a.val) +POSTHOOK: query: EXPLAIN FROM T1_n22 a + LEFT OUTER JOIN T2_n15 b ON (b.key=a.key) + RIGHT OUTER JOIN T3_n5 c ON (c.val = a.val) SELECT a.key, b.key, a.val, c.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -305,14 +305,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: EXPLAIN FROM T1 a - LEFT OUTER JOIN T2 b ON (b.key=a.key) - RIGHT OUTER JOIN T3 c ON (c.val = a.val) +PREHOOK: query: EXPLAIN FROM T1_n22 a + LEFT OUTER JOIN T2_n15 b ON (b.key=a.key) + RIGHT OUTER JOIN T3_n5 c ON (c.val = a.val) SELECT /*+ STREAMTABLE(a) */ a.key, b.key, a.val, c.val PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN FROM T1 a - LEFT OUTER JOIN T2 b ON (b.key=a.key) - RIGHT OUTER JOIN T3 c ON (c.val = a.val) +POSTHOOK: query: EXPLAIN FROM T1_n22 a + LEFT OUTER JOIN T2_n15 b ON (b.key=a.key) + RIGHT OUTER JOIN T3_n5 c ON (c.val = a.val) SELECT /*+ STREAMTABLE(a) */ a.key, b.key, a.val, c.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -402,60 +402,60 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: FROM T1 a - LEFT OUTER JOIN T2 b ON (b.key=a.key) - RIGHT OUTER JOIN T3 c ON (c.val = a.val) +PREHOOK: query: FROM T1_n22 a + LEFT OUTER JOIN T2_n15 b ON (b.key=a.key) + RIGHT OUTER JOIN T3_n5 c ON (c.val = a.val) SELECT a.key, b.key, a.val, c.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n22 +PREHOOK: Input: default@t2_n15 +PREHOOK: Input: default@t3_n5 #### A masked pattern was here #### -POSTHOOK: query: FROM T1 a - LEFT OUTER JOIN T2 b ON (b.key=a.key) - RIGHT OUTER JOIN T3 c ON (c.val = a.val) +POSTHOOK: query: FROM T1_n22 a + LEFT OUTER JOIN T2_n15 b ON (b.key=a.key) + RIGHT OUTER JOIN T3_n5 c ON (c.val = a.val) SELECT a.key, b.key, a.val, c.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n22 +POSTHOOK: Input: default@t2_n15 +POSTHOOK: Input: default@t3_n5 #### A masked pattern was here #### 2 2 12 12 7 NULL 17 17 NULL NULL NULL 14 NULL NULL NULL 16 -PREHOOK: query: FROM T1 a - LEFT OUTER JOIN T2 b ON (b.key=a.key) - RIGHT OUTER JOIN T3 c ON (c.val = a.val) +PREHOOK: query: FROM T1_n22 a + LEFT OUTER JOIN T2_n15 b ON (b.key=a.key) + RIGHT OUTER JOIN T3_n5 c ON (c.val = a.val) SELECT /*+ STREAMTABLE(a) */ a.key, b.key, a.val, c.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n22 +PREHOOK: Input: default@t2_n15 +PREHOOK: Input: default@t3_n5 #### A masked pattern was here #### -POSTHOOK: query: FROM T1 a - LEFT OUTER JOIN T2 b ON (b.key=a.key) - RIGHT OUTER JOIN T3 c ON (c.val = a.val) +POSTHOOK: query: FROM T1_n22 a + LEFT OUTER JOIN T2_n15 b ON (b.key=a.key) + RIGHT OUTER JOIN T3_n5 c ON (c.val = a.val) SELECT /*+ STREAMTABLE(a) */ a.key, b.key, a.val, c.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n22 +POSTHOOK: Input: default@t2_n15 +POSTHOOK: Input: default@t3_n5 #### A masked pattern was here #### 2 2 12 12 7 NULL 17 17 NULL NULL NULL 14 NULL NULL NULL 16 PREHOOK: query: EXPLAIN FROM UNIQUEJOIN - PRESERVE T1 a (a.key, a.val), - PRESERVE T2 b (b.key, b.val), - PRESERVE T3 c (c.key, c.val) + PRESERVE T1_n22 a (a.key, a.val), + PRESERVE T2_n15 b (b.key, b.val), + PRESERVE T3_n5 c (c.key, c.val) SELECT a.key, b.key, c.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM UNIQUEJOIN - PRESERVE T1 a (a.key, a.val), - PRESERVE T2 b (b.key, b.val), - PRESERVE T3 c (c.key, c.val) + PRESERVE T1_n22 a (a.key, a.val), + PRESERVE T2_n15 b (b.key, b.val), + PRESERVE T3_n5 c (c.key, c.val) SELECT a.key, b.key, c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -521,15 +521,15 @@ STAGE PLANS: ListSink PREHOOK: query: EXPLAIN FROM UNIQUEJOIN - PRESERVE T1 a (a.key, a.val), - PRESERVE T2 b (b.key, b.val), - PRESERVE T3 c (c.key, c.val) + PRESERVE T1_n22 a (a.key, a.val), + PRESERVE T2_n15 b (b.key, b.val), + PRESERVE T3_n5 c (c.key, c.val) SELECT /*+ STREAMTABLE(b) */ a.key, b.key, c.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM UNIQUEJOIN - PRESERVE T1 a (a.key, a.val), - PRESERVE T2 b (b.key, b.val), - PRESERVE T3 c (c.key, c.val) + PRESERVE T1_n22 a (a.key, a.val), + PRESERVE T2_n15 b (b.key, b.val), + PRESERVE T3_n5 c (c.key, c.val) SELECT /*+ STREAMTABLE(b) */ a.key, b.key, c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -595,24 +595,24 @@ STAGE PLANS: ListSink PREHOOK: query: FROM UNIQUEJOIN - PRESERVE T1 a (a.key, a.val), - PRESERVE T2 b (b.key, b.val), - PRESERVE T3 c (c.key, c.val) + PRESERVE T1_n22 a (a.key, a.val), + PRESERVE T2_n15 b (b.key, b.val), + PRESERVE T3_n5 c (c.key, c.val) SELECT a.key, b.key, c.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n22 +PREHOOK: Input: default@t2_n15 +PREHOOK: Input: default@t3_n5 #### A masked pattern was here #### POSTHOOK: query: FROM UNIQUEJOIN - PRESERVE T1 a (a.key, a.val), - PRESERVE T2 b (b.key, b.val), - PRESERVE T3 c (c.key, c.val) + PRESERVE T1_n22 a (a.key, a.val), + PRESERVE T2_n15 b (b.key, b.val), + PRESERVE T3_n5 c (c.key, c.val) SELECT a.key, b.key, c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n22 +POSTHOOK: Input: default@t2_n15 +POSTHOOK: Input: default@t3_n5 #### A masked pattern was here #### 1 NULL NULL 2 NULL 2 @@ -626,24 +626,24 @@ NULL 4 4 NULL 5 NULL NULL NULL 6 PREHOOK: query: FROM UNIQUEJOIN - PRESERVE T1 a (a.key, a.val), - PRESERVE T2 b (b.key, b.val), - PRESERVE T3 c (c.key, c.val) + PRESERVE T1_n22 a (a.key, a.val), + PRESERVE T2_n15 b (b.key, b.val), + PRESERVE T3_n5 c (c.key, c.val) SELECT /*+ STREAMTABLE(b) */ a.key, b.key, c.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n22 +PREHOOK: Input: default@t2_n15 +PREHOOK: Input: default@t3_n5 #### A masked pattern was here #### POSTHOOK: query: FROM UNIQUEJOIN - PRESERVE T1 a (a.key, a.val), - PRESERVE T2 b (b.key, b.val), - PRESERVE T3 c (c.key, c.val) + PRESERVE T1_n22 a (a.key, a.val), + PRESERVE T2_n15 b (b.key, b.val), + PRESERVE T3_n5 c (c.key, c.val) SELECT /*+ STREAMTABLE(b) */ a.key, b.key, c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n22 +POSTHOOK: Input: default@t2_n15 +POSTHOOK: Input: default@t3_n5 #### A masked pattern was here #### 1 NULL NULL 2 NULL 2 diff --git a/ql/src/test/results/clientpositive/join_reorder2.q.out b/ql/src/test/results/clientpositive/join_reorder2.q.out index 7d93aaee28..22ed396be8 100644 --- a/ql/src/test/results/clientpositive/join_reorder2.q.out +++ b/ql/src/test/results/clientpositive/join_reorder2.q.out @@ -1,78 +1,78 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n31(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n31 +POSTHOOK: query: CREATE TABLE T1_n31(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T1_n31 +PREHOOK: query: CREATE TABLE T2_n19(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n19 +POSTHOOK: query: CREATE TABLE T2_n19(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T2_n19 +PREHOOK: query: CREATE TABLE T3_n7(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T3_n7 +POSTHOOK: query: CREATE TABLE T3_n7(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T3_n7 +PREHOOK: query: CREATE TABLE T4_n0(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T4 -POSTHOOK: query: CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T4_n0 +POSTHOOK: query: CREATE TABLE T4_n0(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T4 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T4_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n31 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n31 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n31 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@t1_n31 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n19 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n19 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n19 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: Output: default@t2_n19 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n7 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: Output: default@t3_n7 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n7 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4 +POSTHOOK: Output: default@t3_n7 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t4 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4 +PREHOOK: Output: default@t4_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t4 +POSTHOOK: Output: default@t4_n0 PREHOOK: query: EXPLAIN SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n31 a JOIN T2_n19 b ON a.key = b.key + JOIN T3_n7 c ON b.key = c.key + JOIN T4_n0 d ON c.key = d.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n31 a JOIN T2_n19 b ON a.key = b.key + JOIN T3_n7 c ON b.key = c.key + JOIN T4_n0 d ON c.key = d.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -162,37 +162,37 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n31 a JOIN T2_n19 b ON a.key = b.key + JOIN T3_n7 c ON b.key = c.key + JOIN T4_n0 d ON c.key = d.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 -PREHOOK: Input: default@t4 +PREHOOK: Input: default@t1_n31 +PREHOOK: Input: default@t2_n19 +PREHOOK: Input: default@t3_n7 +PREHOOK: Input: default@t4_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n31 a JOIN T2_n19 b ON a.key = b.key + JOIN T3_n7 c ON b.key = c.key + JOIN T4_n0 d ON c.key = d.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 -POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t1_n31 +POSTHOOK: Input: default@t2_n19 +POSTHOOK: Input: default@t3_n7 +POSTHOOK: Input: default@t4_n0 #### A masked pattern was here #### 2 12 2 22 2 12 2 12 PREHOOK: query: EXPLAIN SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON a.val = c.val - JOIN T4 d ON a.key + 1 = d.key + 1 +FROM T1_n31 a JOIN T2_n19 b ON a.key = b.key + JOIN T3_n7 c ON a.val = c.val + JOIN T4_n0 d ON a.key + 1 = d.key + 1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON a.val = c.val - JOIN T4 d ON a.key + 1 = d.key + 1 +FROM T1_n31 a JOIN T2_n19 b ON a.key = b.key + JOIN T3_n7 c ON a.val = c.val + JOIN T4_n0 d ON a.key + 1 = d.key + 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -332,23 +332,23 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON a.val = c.val - JOIN T4 d ON a.key + 1 = d.key + 1 +FROM T1_n31 a JOIN T2_n19 b ON a.key = b.key + JOIN T3_n7 c ON a.val = c.val + JOIN T4_n0 d ON a.key + 1 = d.key + 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 -PREHOOK: Input: default@t4 +PREHOOK: Input: default@t1_n31 +PREHOOK: Input: default@t2_n19 +PREHOOK: Input: default@t3_n7 +PREHOOK: Input: default@t4_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON a.val = c.val - JOIN T4 d ON a.key + 1 = d.key + 1 +FROM T1_n31 a JOIN T2_n19 b ON a.key = b.key + JOIN T3_n7 c ON a.val = c.val + JOIN T4_n0 d ON a.key + 1 = d.key + 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 -POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t1_n31 +POSTHOOK: Input: default@t2_n19 +POSTHOOK: Input: default@t3_n7 +POSTHOOK: Input: default@t4_n0 #### A masked pattern was here #### 2 12 2 22 2 12 2 12 diff --git a/ql/src/test/results/clientpositive/join_reorder3.q.out b/ql/src/test/results/clientpositive/join_reorder3.q.out index 898134ecc2..d987914fa6 100644 --- a/ql/src/test/results/clientpositive/join_reorder3.q.out +++ b/ql/src/test/results/clientpositive/join_reorder3.q.out @@ -1,78 +1,78 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n60(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n60 +POSTHOOK: query: CREATE TABLE T1_n60(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T1_n60 +PREHOOK: query: CREATE TABLE T2_n38(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n38 +POSTHOOK: query: CREATE TABLE T2_n38(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T2_n38 +PREHOOK: query: CREATE TABLE T3_n15(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T3_n15 +POSTHOOK: query: CREATE TABLE T3_n15(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T3_n15 +PREHOOK: query: CREATE TABLE T4_n3(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T4 -POSTHOOK: query: CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T4_n3 +POSTHOOK: query: CREATE TABLE T4_n3(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T4 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T4_n3 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n60 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n60 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n60 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@t1_n60 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n38 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n38 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n38 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: Output: default@t2_n38 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n15 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: Output: default@t3_n15 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n15 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4 +POSTHOOK: Output: default@t3_n15 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t4 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4 +PREHOOK: Output: default@t4_n3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t4 +POSTHOOK: Output: default@t4_n3 PREHOOK: query: EXPLAIN SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n60 a JOIN T2_n38 b ON a.key = b.key + JOIN T3_n15 c ON b.key = c.key + JOIN T4_n3 d ON c.key = d.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n60 a JOIN T2_n38 b ON a.key = b.key + JOIN T3_n15 c ON b.key = c.key + JOIN T4_n3 d ON c.key = d.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -162,37 +162,37 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n60 a JOIN T2_n38 b ON a.key = b.key + JOIN T3_n15 c ON b.key = c.key + JOIN T4_n3 d ON c.key = d.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 -PREHOOK: Input: default@t4 +PREHOOK: Input: default@t1_n60 +PREHOOK: Input: default@t2_n38 +PREHOOK: Input: default@t3_n15 +PREHOOK: Input: default@t4_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n60 a JOIN T2_n38 b ON a.key = b.key + JOIN T3_n15 c ON b.key = c.key + JOIN T4_n3 d ON c.key = d.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 -POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t1_n60 +POSTHOOK: Input: default@t2_n38 +POSTHOOK: Input: default@t3_n15 +POSTHOOK: Input: default@t4_n3 #### A masked pattern was here #### 2 12 2 22 2 12 2 12 PREHOOK: query: EXPLAIN SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON a.val = c.val - JOIN T4 d ON a.key + 1 = d.key + 1 +FROM T1_n60 a JOIN T2_n38 b ON a.key = b.key + JOIN T3_n15 c ON a.val = c.val + JOIN T4_n3 d ON a.key + 1 = d.key + 1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON a.val = c.val - JOIN T4 d ON a.key + 1 = d.key + 1 +FROM T1_n60 a JOIN T2_n38 b ON a.key = b.key + JOIN T3_n15 c ON a.val = c.val + JOIN T4_n3 d ON a.key + 1 = d.key + 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -332,23 +332,23 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON a.val = c.val - JOIN T4 d ON a.key + 1 = d.key + 1 +FROM T1_n60 a JOIN T2_n38 b ON a.key = b.key + JOIN T3_n15 c ON a.val = c.val + JOIN T4_n3 d ON a.key + 1 = d.key + 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 -PREHOOK: Input: default@t4 +PREHOOK: Input: default@t1_n60 +PREHOOK: Input: default@t2_n38 +PREHOOK: Input: default@t3_n15 +PREHOOK: Input: default@t4_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON a.val = c.val - JOIN T4 d ON a.key + 1 = d.key + 1 +FROM T1_n60 a JOIN T2_n38 b ON a.key = b.key + JOIN T3_n15 c ON a.val = c.val + JOIN T4_n3 d ON a.key + 1 = d.key + 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 -POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t1_n60 +POSTHOOK: Input: default@t2_n38 +POSTHOOK: Input: default@t3_n15 +POSTHOOK: Input: default@t4_n3 #### A masked pattern was here #### 2 12 2 22 2 12 2 12 diff --git a/ql/src/test/results/clientpositive/join_reorder4.q.out b/ql/src/test/results/clientpositive/join_reorder4.q.out index faa13724c4..a1c018635b 100644 --- a/ql/src/test/results/clientpositive/join_reorder4.q.out +++ b/ql/src/test/results/clientpositive/join_reorder4.q.out @@ -1,54 +1,54 @@ -PREHOOK: query: CREATE TABLE T1(key1 STRING, val1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n86(key1 STRING, val1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key1 STRING, val1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n86 +POSTHOOK: query: CREATE TABLE T1_n86(key1 STRING, val1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: CREATE TABLE T2(key2 STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T1_n86 +PREHOOK: query: CREATE TABLE T2_n53(key2 STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key2 STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n53 +POSTHOOK: query: CREATE TABLE T2_n53(key2 STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: CREATE TABLE T3(key3 STRING, val3 STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T2_n53 +PREHOOK: query: CREATE TABLE T3_n23(key3 STRING, val3 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key3 STRING, val3 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T3_n23 +POSTHOOK: query: CREATE TABLE T3_n23(key3 STRING, val3 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T3_n23 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n86 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n86 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n86 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@t1_n86 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n53 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n53 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n53 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: Output: default@t2_n53 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n23 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: Output: default@t3_n23 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n23 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 -PREHOOK: query: explain select /*+ STREAMTABLE(a) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3 +POSTHOOK: Output: default@t3_n23 +PREHOOK: query: explain select /*+ STREAMTABLE(a) */ a.*, b.*, c.* from T1_n86 a join T2_n53 b on a.key1=b.key2 join T3_n23 c on a.key1=c.key3 PREHOOK: type: QUERY -POSTHOOK: query: explain select /*+ STREAMTABLE(a) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3 +POSTHOOK: query: explain select /*+ STREAMTABLE(a) */ a.*, b.*, c.* from T1_n86 a join T2_n53 b on a.key1=b.key2 join T3_n23 c on a.key1=c.key3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -123,22 +123,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ STREAMTABLE(a) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3 +PREHOOK: query: select /*+ STREAMTABLE(a) */ a.*, b.*, c.* from T1_n86 a join T2_n53 b on a.key1=b.key2 join T3_n23 c on a.key1=c.key3 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n86 +PREHOOK: Input: default@t2_n53 +PREHOOK: Input: default@t3_n23 #### A masked pattern was here #### -POSTHOOK: query: select /*+ STREAMTABLE(a) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3 +POSTHOOK: query: select /*+ STREAMTABLE(a) */ a.*, b.*, c.* from T1_n86 a join T2_n53 b on a.key1=b.key2 join T3_n23 c on a.key1=c.key3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n86 +POSTHOOK: Input: default@t2_n53 +POSTHOOK: Input: default@t3_n23 #### A masked pattern was here #### 2 12 2 22 2 12 -PREHOOK: query: explain select /*+ STREAMTABLE(b) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3 +PREHOOK: query: explain select /*+ STREAMTABLE(b) */ a.*, b.*, c.* from T1_n86 a join T2_n53 b on a.key1=b.key2 join T3_n23 c on a.key1=c.key3 PREHOOK: type: QUERY -POSTHOOK: query: explain select /*+ STREAMTABLE(b) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3 +POSTHOOK: query: explain select /*+ STREAMTABLE(b) */ a.*, b.*, c.* from T1_n86 a join T2_n53 b on a.key1=b.key2 join T3_n23 c on a.key1=c.key3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -213,22 +213,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ STREAMTABLE(b) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3 +PREHOOK: query: select /*+ STREAMTABLE(b) */ a.*, b.*, c.* from T1_n86 a join T2_n53 b on a.key1=b.key2 join T3_n23 c on a.key1=c.key3 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n86 +PREHOOK: Input: default@t2_n53 +PREHOOK: Input: default@t3_n23 #### A masked pattern was here #### -POSTHOOK: query: select /*+ STREAMTABLE(b) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3 +POSTHOOK: query: select /*+ STREAMTABLE(b) */ a.*, b.*, c.* from T1_n86 a join T2_n53 b on a.key1=b.key2 join T3_n23 c on a.key1=c.key3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n86 +POSTHOOK: Input: default@t2_n53 +POSTHOOK: Input: default@t3_n23 #### A masked pattern was here #### 2 12 2 22 2 12 -PREHOOK: query: explain select /*+ STREAMTABLE(c) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3 +PREHOOK: query: explain select /*+ STREAMTABLE(c) */ a.*, b.*, c.* from T1_n86 a join T2_n53 b on a.key1=b.key2 join T3_n23 c on a.key1=c.key3 PREHOOK: type: QUERY -POSTHOOK: query: explain select /*+ STREAMTABLE(c) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3 +POSTHOOK: query: explain select /*+ STREAMTABLE(c) */ a.*, b.*, c.* from T1_n86 a join T2_n53 b on a.key1=b.key2 join T3_n23 c on a.key1=c.key3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -303,16 +303,16 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ STREAMTABLE(c) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3 +PREHOOK: query: select /*+ STREAMTABLE(c) */ a.*, b.*, c.* from T1_n86 a join T2_n53 b on a.key1=b.key2 join T3_n23 c on a.key1=c.key3 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n86 +PREHOOK: Input: default@t2_n53 +PREHOOK: Input: default@t3_n23 #### A masked pattern was here #### -POSTHOOK: query: select /*+ STREAMTABLE(c) */ a.*, b.*, c.* from T1 a join T2 b on a.key1=b.key2 join T3 c on a.key1=c.key3 +POSTHOOK: query: select /*+ STREAMTABLE(c) */ a.*, b.*, c.* from T1_n86 a join T2_n53 b on a.key1=b.key2 join T3_n23 c on a.key1=c.key3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n86 +POSTHOOK: Input: default@t2_n53 +POSTHOOK: Input: default@t3_n23 #### A masked pattern was here #### 2 12 2 22 2 12 diff --git a/ql/src/test/results/clientpositive/json_serde_tsformat.q.out b/ql/src/test/results/clientpositive/json_serde_tsformat.q.out index eb5eeb1373..28103d5264 100644 --- a/ql/src/test/results/clientpositive/json_serde_tsformat.q.out +++ b/ql/src/test/results/clientpositive/json_serde_tsformat.q.out @@ -1,40 +1,40 @@ -PREHOOK: query: CREATE TABLE t1 (c1 int, c2 string, c3 timestamp) +PREHOOK: query: CREATE TABLE t1_n55 (c1 int, c2 string, c3 timestamp) ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe' WITH SERDEPROPERTIES ('timestamp.formats'='yyyy-MM-dd\'T\'HH:mm:ss') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: CREATE TABLE t1 (c1 int, c2 string, c3 timestamp) +PREHOOK: Output: default@t1_n55 +POSTHOOK: query: CREATE TABLE t1_n55 (c1 int, c2 string, c3 timestamp) ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe' WITH SERDEPROPERTIES ('timestamp.formats'='yyyy-MM-dd\'T\'HH:mm:ss') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/tsformat.json" INTO TABLE t1 +POSTHOOK: Output: default@t1_n55 +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/tsformat.json" INTO TABLE t1_n55 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/tsformat.json" INTO TABLE t1 +PREHOOK: Output: default@t1_n55 +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/tsformat.json" INTO TABLE t1_n55 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n55 PREHOOK: query: select a.c1, a.c2, b.c3 -from t1 a join t1 b on a.c1 = b.c1 +from t1_n55 a join t1_n55 b on a.c1 = b.c1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n55 #### A masked pattern was here #### POSTHOOK: query: select a.c1, a.c2, b.c3 -from t1 a join t1 b on a.c1 = b.c1 +from t1_n55 a join t1_n55 b on a.c1 = b.c1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n55 #### A masked pattern was here #### 123 abc 2001-02-03 12:34:56 456 xyz 1906-04-18 05:12:00 -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n55 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n55 +PREHOOK: Output: default@t1_n55 +POSTHOOK: query: drop table t1_n55 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n55 +POSTHOOK: Output: default@t1_n55 diff --git a/ql/src/test/results/clientpositive/keyword_2.q.out b/ql/src/test/results/clientpositive/keyword_2.q.out index c8389f5c6a..f1d63b6e5f 100644 --- a/ql/src/test/results/clientpositive/keyword_2.q.out +++ b/ql/src/test/results/clientpositive/keyword_2.q.out @@ -1,51 +1,51 @@ -PREHOOK: query: drop table varchar_udf_1 +PREHOOK: query: drop table varchar_udf_1_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table varchar_udf_1 +POSTHOOK: query: drop table varchar_udf_1_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)) +PREHOOK: query: create table varchar_udf_1_n1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@varchar_udf_1 -POSTHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)) +PREHOOK: Output: default@varchar_udf_1_n1 +POSTHOOK: query: create table varchar_udf_1_n1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@varchar_udf_1 -PREHOOK: query: insert overwrite table varchar_udf_1 +POSTHOOK: Output: default@varchar_udf_1_n1 +PREHOOK: query: insert overwrite table varchar_udf_1_n1 select key, value, key, value from src where key = '238' limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@varchar_udf_1 -POSTHOOK: query: insert overwrite table varchar_udf_1 +PREHOOK: Output: default@varchar_udf_1_n1 +POSTHOOK: query: insert overwrite table varchar_udf_1_n1 select key, value, key, value from src where key = '238' limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@varchar_udf_1 -POSTHOOK: Lineage: varchar_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: varchar_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: varchar_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: varchar_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@varchar_udf_1_n1 +POSTHOOK: Lineage: varchar_udf_1_n1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: varchar_udf_1_n1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: varchar_udf_1_n1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: varchar_udf_1_n1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: select c2 regexp 'val', c4 regexp 'val', (c2 regexp 'val') = (c4 regexp 'val') -from varchar_udf_1 limit 1 +from varchar_udf_1_n1 limit 1 PREHOOK: type: QUERY -PREHOOK: Input: default@varchar_udf_1 +PREHOOK: Input: default@varchar_udf_1_n1 #### A masked pattern was here #### POSTHOOK: query: select c2 regexp 'val', c4 regexp 'val', (c2 regexp 'val') = (c4 regexp 'val') -from varchar_udf_1 limit 1 +from varchar_udf_1_n1 limit 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@varchar_udf_1 +POSTHOOK: Input: default@varchar_udf_1_n1 #### A masked pattern was here #### true true true -PREHOOK: query: drop table varchar_udf_1 +PREHOOK: query: drop table varchar_udf_1_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@varchar_udf_1 -PREHOOK: Output: default@varchar_udf_1 -POSTHOOK: query: drop table varchar_udf_1 +PREHOOK: Input: default@varchar_udf_1_n1 +PREHOOK: Output: default@varchar_udf_1_n1 +POSTHOOK: query: drop table varchar_udf_1_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@varchar_udf_1 -POSTHOOK: Output: default@varchar_udf_1 +POSTHOOK: Input: default@varchar_udf_1_n1 +POSTHOOK: Output: default@varchar_udf_1_n1 diff --git a/ql/src/test/results/clientpositive/lateral_view_multi_lateralviews.q.out b/ql/src/test/results/clientpositive/lateral_view_multi_lateralviews.q.out index b9c827fe0c..c786a4b8be 100644 --- a/ql/src/test/results/clientpositive/lateral_view_multi_lateralviews.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_multi_lateralviews.q.out @@ -1,22 +1,22 @@ -PREHOOK: query: CREATE TABLE t1(x5 STRUCT>>> >) +PREHOOK: query: CREATE TABLE t1_n47(x5 STRUCT>>> >) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: CREATE TABLE t1(x5 STRUCT>>> >) +PREHOOK: Output: default@t1_n47 +POSTHOOK: query: CREATE TABLE t1_n47(x5 STRUCT>>> >) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: INSERT INTO t1 SELECT NAMED_STRUCT('x4', NAMED_STRUCT('x3', ARRAY(NAMED_STRUCT('x1', 'x1_1', 'x2', ARRAY('x2_1', 'x2_2'))))) +POSTHOOK: Output: default@t1_n47 +PREHOOK: query: INSERT INTO t1_n47 SELECT NAMED_STRUCT('x4', NAMED_STRUCT('x3', ARRAY(NAMED_STRUCT('x1', 'x1_1', 'x2', ARRAY('x2_1', 'x2_2'))))) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t1 -POSTHOOK: query: INSERT INTO t1 SELECT NAMED_STRUCT('x4', NAMED_STRUCT('x3', ARRAY(NAMED_STRUCT('x1', 'x1_1', 'x2', ARRAY('x2_1', 'x2_2'))))) +PREHOOK: Output: default@t1_n47 +POSTHOOK: query: INSERT INTO t1_n47 SELECT NAMED_STRUCT('x4', NAMED_STRUCT('x3', ARRAY(NAMED_STRUCT('x1', 'x1_1', 'x2', ARRAY('x2_1', 'x2_2'))))) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.x5 EXPRESSION [] +POSTHOOK: Output: default@t1_n47 +POSTHOOK: Lineage: t1_n47.x5 EXPRESSION [] PREHOOK: query: SELECT c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16 -FROM t1 +FROM t1_n47 LATERAL VIEW EXPLODE(x5.x4.x3) lv as c1 LATERAL VIEW EXPLODE(c1.x2) lv as c2 LATERAL VIEW EXPLODE(x5.x4.x3) lv as c3 @@ -35,10 +35,10 @@ LATERAL VIEW EXPLODE(x5.x4.x3) lv as c15 LATERAL VIEW EXPLODE(c1.x2) lv as c16 LIMIT 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n47 #### A masked pattern was here #### POSTHOOK: query: SELECT c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16 -FROM t1 +FROM t1_n47 LATERAL VIEW EXPLODE(x5.x4.x3) lv as c1 LATERAL VIEW EXPLODE(c1.x2) lv as c2 LATERAL VIEW EXPLODE(x5.x4.x3) lv as c3 @@ -57,6 +57,6 @@ LATERAL VIEW EXPLODE(x5.x4.x3) lv as c15 LATERAL VIEW EXPLODE(c1.x2) lv as c16 LIMIT 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n47 #### A masked pattern was here #### {"x1":"x1_1","x2":["x2_1","x2_2"]} x2_1 {"x1":"x1_1","x2":["x2_1","x2_2"]} x2_1 {"x1":"x1_1","x2":["x2_1","x2_2"]} x2_1 {"x1":"x1_1","x2":["x2_1","x2_2"]} x2_1 {"x1":"x1_1","x2":["x2_1","x2_2"]} x2_1 {"x1":"x1_1","x2":["x2_1","x2_2"]} x2_1 {"x1":"x1_1","x2":["x2_1","x2_2"]} x2_1 {"x1":"x1_1","x2":["x2_1","x2_2"]} x2_1 diff --git a/ql/src/test/results/clientpositive/lateral_view_onview.q.out b/ql/src/test/results/clientpositive/lateral_view_onview.q.out index 6a7907f974..4de4f2efa3 100644 --- a/ql/src/test/results/clientpositive/lateral_view_onview.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_onview.q.out @@ -1,37 +1,37 @@ -PREHOOK: query: CREATE TABLE lv_table( c1 STRING, c2 ARRAY, c3 INT, c4 CHAR(1)) +PREHOOK: query: CREATE TABLE lv_table_n0( c1 STRING, c2 ARRAY, c3 INT, c4 CHAR(1)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@lv_table -POSTHOOK: query: CREATE TABLE lv_table( c1 STRING, c2 ARRAY, c3 INT, c4 CHAR(1)) +PREHOOK: Output: default@lv_table_n0 +POSTHOOK: query: CREATE TABLE lv_table_n0( c1 STRING, c2 ARRAY, c3 INT, c4 CHAR(1)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@lv_table -PREHOOK: query: INSERT OVERWRITE TABLE lv_table SELECT 'abc ', array(1,2,3), 100, 't' FROM src +POSTHOOK: Output: default@lv_table_n0 +PREHOOK: query: INSERT OVERWRITE TABLE lv_table_n0 SELECT 'abc ', array(1,2,3), 100, 't' FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@lv_table -POSTHOOK: query: INSERT OVERWRITE TABLE lv_table SELECT 'abc ', array(1,2,3), 100, 't' FROM src +PREHOOK: Output: default@lv_table_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE lv_table_n0 SELECT 'abc ', array(1,2,3), 100, 't' FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@lv_table -POSTHOOK: Lineage: lv_table.c1 SIMPLE [] -POSTHOOK: Lineage: lv_table.c2 EXPRESSION [] -POSTHOOK: Lineage: lv_table.c3 SIMPLE [] -POSTHOOK: Lineage: lv_table.c4 EXPRESSION [] -PREHOOK: query: CREATE OR REPLACE VIEW lv_view AS SELECT * FROM lv_table +POSTHOOK: Output: default@lv_table_n0 +POSTHOOK: Lineage: lv_table_n0.c1 SIMPLE [] +POSTHOOK: Lineage: lv_table_n0.c2 EXPRESSION [] +POSTHOOK: Lineage: lv_table_n0.c3 SIMPLE [] +POSTHOOK: Lineage: lv_table_n0.c4 EXPRESSION [] +PREHOOK: query: CREATE OR REPLACE VIEW lv_view AS SELECT * FROM lv_table_n0 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@lv_table +PREHOOK: Input: default@lv_table_n0 PREHOOK: Output: database:default PREHOOK: Output: default@lv_view -POSTHOOK: query: CREATE OR REPLACE VIEW lv_view AS SELECT * FROM lv_table +POSTHOOK: query: CREATE OR REPLACE VIEW lv_view AS SELECT * FROM lv_table_n0 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@lv_table +POSTHOOK: Input: default@lv_table_n0 POSTHOOK: Output: database:default POSTHOOK: Output: default@lv_view -POSTHOOK: Lineage: lv_view.c1 SIMPLE [(lv_table)lv_table.FieldSchema(name:c1, type:string, comment:null), ] -POSTHOOK: Lineage: lv_view.c2 SIMPLE [(lv_table)lv_table.FieldSchema(name:c2, type:array, comment:null), ] -POSTHOOK: Lineage: lv_view.c3 SIMPLE [(lv_table)lv_table.FieldSchema(name:c3, type:int, comment:null), ] -POSTHOOK: Lineage: lv_view.c4 SIMPLE [(lv_table)lv_table.FieldSchema(name:c4, type:char(1), comment:null), ] +POSTHOOK: Lineage: lv_view.c1 SIMPLE [(lv_table_n0)lv_table_n0.FieldSchema(name:c1, type:string, comment:null), ] +POSTHOOK: Lineage: lv_view.c2 SIMPLE [(lv_table_n0)lv_table_n0.FieldSchema(name:c2, type:array, comment:null), ] +POSTHOOK: Lineage: lv_view.c3 SIMPLE [(lv_table_n0)lv_table_n0.FieldSchema(name:c3, type:int, comment:null), ] +POSTHOOK: Lineage: lv_view.c4 SIMPLE [(lv_table_n0)lv_table_n0.FieldSchema(name:c4, type:char(1), comment:null), ] PREHOOK: query: EXPLAIN SELECT * FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol SORT BY c1 ASC, myCol ASC LIMIT 1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol SORT BY c1 ASC, myCol ASC LIMIT 1 @@ -46,7 +46,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: lv_table + alias: lv_table_n0 Statistics: Num rows: 500 Data size: 8500 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c1 (type: string), c2 (type: array), c3 (type: int), c4 (type: char(1)) @@ -144,7 +144,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: lv_table + alias: lv_table_n0 Statistics: Num rows: 500 Data size: 8500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE @@ -213,7 +213,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: lv_table + alias: lv_table_n0 Statistics: Num rows: 500 Data size: 8500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE @@ -348,7 +348,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: lv_table + alias: lv_table_n0 Statistics: Num rows: 500 Data size: 8500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE @@ -468,23 +468,23 @@ STAGE PLANS: PREHOOK: query: SELECT * FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol SORT BY c1 ASC, myCol ASC LIMIT 1 PREHOOK: type: QUERY -PREHOOK: Input: default@lv_table +PREHOOK: Input: default@lv_table_n0 PREHOOK: Input: default@lv_view #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol SORT BY c1 ASC, myCol ASC LIMIT 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@lv_table +POSTHOOK: Input: default@lv_table_n0 POSTHOOK: Input: default@lv_view #### A masked pattern was here #### abc [1,2,3] 100 t 1 PREHOOK: query: SELECT myTable.* FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LIMIT 3 PREHOOK: type: QUERY -PREHOOK: Input: default@lv_table +PREHOOK: Input: default@lv_table_n0 PREHOOK: Input: default@lv_view #### A masked pattern was here #### POSTHOOK: query: SELECT myTable.* FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LIMIT 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@lv_table +POSTHOOK: Input: default@lv_table_n0 POSTHOOK: Input: default@lv_view #### A masked pattern was here #### 1 @@ -492,12 +492,12 @@ POSTHOOK: Input: default@lv_view 3 PREHOOK: query: SELECT myTable.myCol, myTable2.myCol2 FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array('a', 'b', 'c')) myTable2 AS myCol2 LIMIT 9 PREHOOK: type: QUERY -PREHOOK: Input: default@lv_table +PREHOOK: Input: default@lv_table_n0 PREHOOK: Input: default@lv_view #### A masked pattern was here #### POSTHOOK: query: SELECT myTable.myCol, myTable2.myCol2 FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array('a', 'b', 'c')) myTable2 AS myCol2 LIMIT 9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@lv_table +POSTHOOK: Input: default@lv_table_n0 POSTHOOK: Input: default@lv_view #### A masked pattern was here #### 1 a @@ -511,12 +511,12 @@ POSTHOOK: Input: default@lv_view 3 c PREHOOK: query: SELECT myTable2.* FROM lv_view LATERAL VIEW explode(array(array(1,2,3))) myTable AS myCol LATERAL VIEW explode(myTable.myCol) myTable2 AS myCol2 LIMIT 3 PREHOOK: type: QUERY -PREHOOK: Input: default@lv_table +PREHOOK: Input: default@lv_table_n0 PREHOOK: Input: default@lv_view #### A masked pattern was here #### POSTHOOK: query: SELECT myTable2.* FROM lv_view LATERAL VIEW explode(array(array(1,2,3))) myTable AS myCol LATERAL VIEW explode(myTable.myCol) myTable2 AS myCol2 LIMIT 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@lv_table +POSTHOOK: Input: default@lv_table_n0 POSTHOOK: Input: default@lv_view #### A masked pattern was here #### 1 @@ -537,7 +537,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: lv_table + alias: lv_table_n0 Statistics: Num rows: 500 Data size: 8500 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c1 (type: string), c2 (type: array), c3 (type: int), c4 (type: char(1)) @@ -599,25 +599,25 @@ STAGE PLANS: PREHOOK: query: SELECT SIZE(c2),c3,TRIM(c1),c4,myCol from lv_view LATERAL VIEW explode(array(1,2,3)) myTab as myCol limit 3 PREHOOK: type: QUERY -PREHOOK: Input: default@lv_table +PREHOOK: Input: default@lv_table_n0 PREHOOK: Input: default@lv_view #### A masked pattern was here #### POSTHOOK: query: SELECT SIZE(c2),c3,TRIM(c1),c4,myCol from lv_view LATERAL VIEW explode(array(1,2,3)) myTab as myCol limit 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@lv_table +POSTHOOK: Input: default@lv_table_n0 POSTHOOK: Input: default@lv_view #### A masked pattern was here #### 3 100 abc t 1 3 100 abc t 2 3 100 abc t 3 -PREHOOK: query: CREATE TABLE lv_table1( c1 STRING, c3 INT, c4 CHAR(1), c5 STRING, c6 STRING, c7 STRING, c8 STRING, c9 STRING, c10 STRING, c11 STRING, c12 STRING, c13 STRING) +PREHOOK: query: CREATE TABLE lv_table1_n0( c1 STRING, c3 INT, c4 CHAR(1), c5 STRING, c6 STRING, c7 STRING, c8 STRING, c9 STRING, c10 STRING, c11 STRING, c12 STRING, c13 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@lv_table1 -POSTHOOK: query: CREATE TABLE lv_table1( c1 STRING, c3 INT, c4 CHAR(1), c5 STRING, c6 STRING, c7 STRING, c8 STRING, c9 STRING, c10 STRING, c11 STRING, c12 STRING, c13 STRING) +PREHOOK: Output: default@lv_table1_n0 +POSTHOOK: query: CREATE TABLE lv_table1_n0( c1 STRING, c3 INT, c4 CHAR(1), c5 STRING, c6 STRING, c7 STRING, c8 STRING, c9 STRING, c10 STRING, c11 STRING, c12 STRING, c13 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@lv_table1 +POSTHOOK: Output: default@lv_table1_n0 PREHOOK: query: CREATE TABLE lv_table2( c1 STRING, c2 ARRAY) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -626,26 +626,26 @@ POSTHOOK: query: CREATE TABLE lv_table2( c1 STRING, c2 ARRAY) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@lv_table2 -PREHOOK: query: INSERT OVERWRITE TABLE lv_table1 SELECT 'abc ', 100, 't', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test' FROM src +PREHOOK: query: INSERT OVERWRITE TABLE lv_table1_n0 SELECT 'abc ', 100, 't', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test' FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@lv_table1 -POSTHOOK: query: INSERT OVERWRITE TABLE lv_table1 SELECT 'abc ', 100, 't', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test' FROM src +PREHOOK: Output: default@lv_table1_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE lv_table1_n0 SELECT 'abc ', 100, 't', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test' FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@lv_table1 -POSTHOOK: Lineage: lv_table1.c1 SIMPLE [] -POSTHOOK: Lineage: lv_table1.c10 SIMPLE [] -POSTHOOK: Lineage: lv_table1.c11 SIMPLE [] -POSTHOOK: Lineage: lv_table1.c12 SIMPLE [] -POSTHOOK: Lineage: lv_table1.c13 SIMPLE [] -POSTHOOK: Lineage: lv_table1.c3 SIMPLE [] -POSTHOOK: Lineage: lv_table1.c4 EXPRESSION [] -POSTHOOK: Lineage: lv_table1.c5 SIMPLE [] -POSTHOOK: Lineage: lv_table1.c6 SIMPLE [] -POSTHOOK: Lineage: lv_table1.c7 SIMPLE [] -POSTHOOK: Lineage: lv_table1.c8 SIMPLE [] -POSTHOOK: Lineage: lv_table1.c9 SIMPLE [] +POSTHOOK: Output: default@lv_table1_n0 +POSTHOOK: Lineage: lv_table1_n0.c1 SIMPLE [] +POSTHOOK: Lineage: lv_table1_n0.c10 SIMPLE [] +POSTHOOK: Lineage: lv_table1_n0.c11 SIMPLE [] +POSTHOOK: Lineage: lv_table1_n0.c12 SIMPLE [] +POSTHOOK: Lineage: lv_table1_n0.c13 SIMPLE [] +POSTHOOK: Lineage: lv_table1_n0.c3 SIMPLE [] +POSTHOOK: Lineage: lv_table1_n0.c4 EXPRESSION [] +POSTHOOK: Lineage: lv_table1_n0.c5 SIMPLE [] +POSTHOOK: Lineage: lv_table1_n0.c6 SIMPLE [] +POSTHOOK: Lineage: lv_table1_n0.c7 SIMPLE [] +POSTHOOK: Lineage: lv_table1_n0.c8 SIMPLE [] +POSTHOOK: Lineage: lv_table1_n0.c9 SIMPLE [] PREHOOK: query: INSERT OVERWRITE TABLE lv_table2 SELECT 'abc ', array(1,2,3) FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -656,9 +656,9 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@lv_table2 POSTHOOK: Lineage: lv_table2.c1 SIMPLE [] POSTHOOK: Lineage: lv_table2.c2 EXPRESSION [] -PREHOOK: query: EXPLAIN WITH lv_view1 AS (SELECT lv_table1.*, c2 FROM lv_table1 JOIN lv_table2 ON lv_table1.c1 = lv_table2.c1), lv_view2 AS (SELECT * FROM lv_view1 LATERAL VIEW explode(c2) myTable AS myCol) SELECT * FROM lv_view2 SORT BY c1 ASC, myCol ASC LIMIT 1 +PREHOOK: query: EXPLAIN WITH lv_view1 AS (SELECT lv_table1_n0.*, c2 FROM lv_table1_n0 JOIN lv_table2 ON lv_table1_n0.c1 = lv_table2.c1), lv_view2 AS (SELECT * FROM lv_view1 LATERAL VIEW explode(c2) myTable AS myCol) SELECT * FROM lv_view2 SORT BY c1 ASC, myCol ASC LIMIT 1 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN WITH lv_view1 AS (SELECT lv_table1.*, c2 FROM lv_table1 JOIN lv_table2 ON lv_table1.c1 = lv_table2.c1), lv_view2 AS (SELECT * FROM lv_view1 LATERAL VIEW explode(c2) myTable AS myCol) SELECT * FROM lv_view2 SORT BY c1 ASC, myCol ASC LIMIT 1 +POSTHOOK: query: EXPLAIN WITH lv_view1 AS (SELECT lv_table1_n0.*, c2 FROM lv_table1_n0 JOIN lv_table2 ON lv_table1_n0.c1 = lv_table2.c1), lv_view2 AS (SELECT * FROM lv_view1 LATERAL VIEW explode(c2) myTable AS myCol) SELECT * FROM lv_view2 SORT BY c1 ASC, myCol ASC LIMIT 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -671,7 +671,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: lv_table1 + alias: lv_table1_n0 Statistics: Num rows: 500 Data size: 28000 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: c1 is not null (type: boolean) @@ -798,14 +798,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: WITH lv_view1 AS (SELECT lv_table1.*, c2 FROM lv_table1 JOIN lv_table2 ON lv_table1.c1 = lv_table2.c1), lv_view2 AS (SELECT * FROM lv_view1 LATERAL VIEW explode(c2) myTable AS myCol) SELECT * FROM lv_view2 SORT BY c1 ASC, myCol ASC LIMIT 1 +PREHOOK: query: WITH lv_view1 AS (SELECT lv_table1_n0.*, c2 FROM lv_table1_n0 JOIN lv_table2 ON lv_table1_n0.c1 = lv_table2.c1), lv_view2 AS (SELECT * FROM lv_view1 LATERAL VIEW explode(c2) myTable AS myCol) SELECT * FROM lv_view2 SORT BY c1 ASC, myCol ASC LIMIT 1 PREHOOK: type: QUERY -PREHOOK: Input: default@lv_table1 +PREHOOK: Input: default@lv_table1_n0 PREHOOK: Input: default@lv_table2 #### A masked pattern was here #### -POSTHOOK: query: WITH lv_view1 AS (SELECT lv_table1.*, c2 FROM lv_table1 JOIN lv_table2 ON lv_table1.c1 = lv_table2.c1), lv_view2 AS (SELECT * FROM lv_view1 LATERAL VIEW explode(c2) myTable AS myCol) SELECT * FROM lv_view2 SORT BY c1 ASC, myCol ASC LIMIT 1 +POSTHOOK: query: WITH lv_view1 AS (SELECT lv_table1_n0.*, c2 FROM lv_table1_n0 JOIN lv_table2 ON lv_table1_n0.c1 = lv_table2.c1), lv_view2 AS (SELECT * FROM lv_view1 LATERAL VIEW explode(c2) myTable AS myCol) SELECT * FROM lv_view2 SORT BY c1 ASC, myCol ASC LIMIT 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@lv_table1 +POSTHOOK: Input: default@lv_table1_n0 POSTHOOK: Input: default@lv_table2 #### A masked pattern was here #### abc 100 t test test test test test test test test test [1,2,3] 1 diff --git a/ql/src/test/results/clientpositive/lateral_view_onview2.q.out b/ql/src/test/results/clientpositive/lateral_view_onview2.q.out index aec90de7e5..9c825aa4dd 100644 --- a/ql/src/test/results/clientpositive/lateral_view_onview2.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_onview2.q.out @@ -1,37 +1,37 @@ -PREHOOK: query: CREATE TABLE lv_table( c1 STRING, c2 ARRAY, c3 INT, c4 CHAR(1)) +PREHOOK: query: CREATE TABLE lv_table_n1( c1 STRING, c2 ARRAY, c3 INT, c4 CHAR(1)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@lv_table -POSTHOOK: query: CREATE TABLE lv_table( c1 STRING, c2 ARRAY, c3 INT, c4 CHAR(1)) +PREHOOK: Output: default@lv_table_n1 +POSTHOOK: query: CREATE TABLE lv_table_n1( c1 STRING, c2 ARRAY, c3 INT, c4 CHAR(1)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@lv_table -PREHOOK: query: INSERT OVERWRITE TABLE lv_table SELECT 'abc ', array(1,2,3), 100, 't' FROM src +POSTHOOK: Output: default@lv_table_n1 +PREHOOK: query: INSERT OVERWRITE TABLE lv_table_n1 SELECT 'abc ', array(1,2,3), 100, 't' FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@lv_table -POSTHOOK: query: INSERT OVERWRITE TABLE lv_table SELECT 'abc ', array(1,2,3), 100, 't' FROM src +PREHOOK: Output: default@lv_table_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE lv_table_n1 SELECT 'abc ', array(1,2,3), 100, 't' FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@lv_table -POSTHOOK: Lineage: lv_table.c1 SIMPLE [] -POSTHOOK: Lineage: lv_table.c2 EXPRESSION [] -POSTHOOK: Lineage: lv_table.c3 SIMPLE [] -POSTHOOK: Lineage: lv_table.c4 EXPRESSION [] -PREHOOK: query: CREATE OR REPLACE VIEW lv_view AS SELECT * FROM lv_table +POSTHOOK: Output: default@lv_table_n1 +POSTHOOK: Lineage: lv_table_n1.c1 SIMPLE [] +POSTHOOK: Lineage: lv_table_n1.c2 EXPRESSION [] +POSTHOOK: Lineage: lv_table_n1.c3 SIMPLE [] +POSTHOOK: Lineage: lv_table_n1.c4 EXPRESSION [] +PREHOOK: query: CREATE OR REPLACE VIEW lv_view AS SELECT * FROM lv_table_n1 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@lv_table +PREHOOK: Input: default@lv_table_n1 PREHOOK: Output: database:default PREHOOK: Output: default@lv_view -POSTHOOK: query: CREATE OR REPLACE VIEW lv_view AS SELECT * FROM lv_table +POSTHOOK: query: CREATE OR REPLACE VIEW lv_view AS SELECT * FROM lv_table_n1 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@lv_table +POSTHOOK: Input: default@lv_table_n1 POSTHOOK: Output: database:default POSTHOOK: Output: default@lv_view -POSTHOOK: Lineage: lv_view.c1 SIMPLE [(lv_table)lv_table.FieldSchema(name:c1, type:string, comment:null), ] -POSTHOOK: Lineage: lv_view.c2 SIMPLE [(lv_table)lv_table.FieldSchema(name:c2, type:array, comment:null), ] -POSTHOOK: Lineage: lv_view.c3 SIMPLE [(lv_table)lv_table.FieldSchema(name:c3, type:int, comment:null), ] -POSTHOOK: Lineage: lv_view.c4 SIMPLE [(lv_table)lv_table.FieldSchema(name:c4, type:char(1), comment:null), ] +POSTHOOK: Lineage: lv_view.c1 SIMPLE [(lv_table_n1)lv_table_n1.FieldSchema(name:c1, type:string, comment:null), ] +POSTHOOK: Lineage: lv_view.c2 SIMPLE [(lv_table_n1)lv_table_n1.FieldSchema(name:c2, type:array, comment:null), ] +POSTHOOK: Lineage: lv_view.c3 SIMPLE [(lv_table_n1)lv_table_n1.FieldSchema(name:c3, type:int, comment:null), ] +POSTHOOK: Lineage: lv_view.c4 SIMPLE [(lv_table_n1)lv_table_n1.FieldSchema(name:c4, type:char(1), comment:null), ] PREHOOK: query: EXPLAIN SELECT myTable.myCol, myTable2.myCol2 FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array('a', 'b', 'c')) myTable2 AS myCol2 LIMIT 9 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT myTable.myCol, myTable2.myCol2 FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array('a', 'b', 'c')) myTable2 AS myCol2 LIMIT 9 @@ -45,7 +45,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: lv_table + alias: lv_table_n1 Statistics: Num rows: 500 Data size: 8500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/lb_fs_stats.q.out b/ql/src/test/results/clientpositive/lb_fs_stats.q.out index 65953640aa..ac4f7b55b4 100644 --- a/ql/src/test/results/clientpositive/lb_fs_stats.q.out +++ b/ql/src/test/results/clientpositive/lb_fs_stats.q.out @@ -1,35 +1,35 @@ -PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab_n0 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_tab -POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE +PREHOOK: Output: default@test_tab_n0 +POSTHOOK: query: CREATE TABLE test_tab_n0 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_tab -PREHOOK: query: ALTER TABLE test_tab SKEWED BY (key) ON ("484") STORED AS DIRECTORIES +POSTHOOK: Output: default@test_tab_n0 +PREHOOK: query: ALTER TABLE test_tab_n0 SKEWED BY (key) ON ("484") STORED AS DIRECTORIES PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@test_tab -PREHOOK: Output: default@test_tab -POSTHOOK: query: ALTER TABLE test_tab SKEWED BY (key) ON ("484") STORED AS DIRECTORIES +PREHOOK: Input: default@test_tab_n0 +PREHOOK: Output: default@test_tab_n0 +POSTHOOK: query: ALTER TABLE test_tab_n0 SKEWED BY (key) ON ("484") STORED AS DIRECTORIES POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@test_tab -POSTHOOK: Output: default@test_tab -PREHOOK: query: INSERT OVERWRITE TABLE test_tab PARTITION (part = '1') SELECT * FROM src +POSTHOOK: Input: default@test_tab_n0 +POSTHOOK: Output: default@test_tab_n0 +PREHOOK: query: INSERT OVERWRITE TABLE test_tab_n0 PARTITION (part = '1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_tab@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_tab PARTITION (part = '1') SELECT * FROM src +PREHOOK: Output: default@test_tab_n0@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_tab_n0 PARTITION (part = '1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_tab@part=1 -POSTHOOK: Lineage: test_tab PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_tab PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: describe formatted test_tab partition (part='1') +POSTHOOK: Output: default@test_tab_n0@part=1 +POSTHOOK: Lineage: test_tab_n0 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_tab_n0 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted test_tab_n0 partition (part='1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_tab -POSTHOOK: query: describe formatted test_tab partition (part='1') +PREHOOK: Input: default@test_tab_n0 +POSTHOOK: query: describe formatted test_tab_n0 partition (part='1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n0 # col_name data_type comment key string value string @@ -41,7 +41,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: test_tab +Table: test_tab_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -63,6 +63,6 @@ Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[484]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[484]=/test_tab/part=1/key=484} +Skewed Value to Truncated Path: {[484]=/test_tab_n0/part=1/key=484} Storage Desc Params: serialization.format 1 diff --git a/ql/src/test/results/clientpositive/leftsemijoin.q.out b/ql/src/test/results/clientpositive/leftsemijoin.q.out index 28229cd76a..77d50a6d27 100644 --- a/ql/src/test/results/clientpositive/leftsemijoin.q.out +++ b/ql/src/test/results/clientpositive/leftsemijoin.q.out @@ -1,113 +1,113 @@ -PREHOOK: query: drop table sales +PREHOOK: query: drop table sales_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table sales +POSTHOOK: query: drop table sales_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table things +PREHOOK: query: drop table things_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table things +POSTHOOK: query: drop table things_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE sales (name STRING, id INT) +PREHOOK: query: CREATE TABLE sales_n1 (name STRING, id INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@sales -POSTHOOK: query: CREATE TABLE sales (name STRING, id INT) +PREHOOK: Output: default@sales_n1 +POSTHOOK: query: CREATE TABLE sales_n1 (name STRING, id INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@sales -PREHOOK: query: CREATE TABLE things (id INT, name STRING) partitioned by (ds string) +POSTHOOK: Output: default@sales_n1 +PREHOOK: query: CREATE TABLE things_n1 (id INT, name STRING) partitioned by (ds string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@things -POSTHOOK: query: CREATE TABLE things (id INT, name STRING) partitioned by (ds string) +PREHOOK: Output: default@things_n1 +POSTHOOK: query: CREATE TABLE things_n1 (id INT, name STRING) partitioned by (ds string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@things -PREHOOK: query: load data local inpath '../../data/files/sales.txt' INTO TABLE sales +POSTHOOK: Output: default@things_n1 +PREHOOK: query: load data local inpath '../../data/files/sales.txt' INTO TABLE sales_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@sales -POSTHOOK: query: load data local inpath '../../data/files/sales.txt' INTO TABLE sales +PREHOOK: Output: default@sales_n1 +POSTHOOK: query: load data local inpath '../../data/files/sales.txt' INTO TABLE sales_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@sales -PREHOOK: query: load data local inpath '../../data/files/things.txt' INTO TABLE things partition(ds='2011-10-23') +POSTHOOK: Output: default@sales_n1 +PREHOOK: query: load data local inpath '../../data/files/things.txt' INTO TABLE things_n1 partition(ds='2011-10-23') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@things -POSTHOOK: query: load data local inpath '../../data/files/things.txt' INTO TABLE things partition(ds='2011-10-23') +PREHOOK: Output: default@things_n1 +POSTHOOK: query: load data local inpath '../../data/files/things.txt' INTO TABLE things_n1 partition(ds='2011-10-23') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@things -POSTHOOK: Output: default@things@ds=2011-10-23 -PREHOOK: query: load data local inpath '../../data/files/things2.txt' INTO TABLE things partition(ds='2011-10-24') +POSTHOOK: Output: default@things_n1 +POSTHOOK: Output: default@things_n1@ds=2011-10-23 +PREHOOK: query: load data local inpath '../../data/files/things2.txt' INTO TABLE things_n1 partition(ds='2011-10-24') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@things -POSTHOOK: query: load data local inpath '../../data/files/things2.txt' INTO TABLE things partition(ds='2011-10-24') +PREHOOK: Output: default@things_n1 +POSTHOOK: query: load data local inpath '../../data/files/things2.txt' INTO TABLE things_n1 partition(ds='2011-10-24') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@things -POSTHOOK: Output: default@things@ds=2011-10-24 -PREHOOK: query: SELECT name,id FROM sales +POSTHOOK: Output: default@things_n1 +POSTHOOK: Output: default@things_n1@ds=2011-10-24 +PREHOOK: query: SELECT name,id FROM sales_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@sales +PREHOOK: Input: default@sales_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT name,id FROM sales +POSTHOOK: query: SELECT name,id FROM sales_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@sales +POSTHOOK: Input: default@sales_n1 #### A masked pattern was here #### Hank 2 Joe 2 -PREHOOK: query: SELECT id,name FROM things +PREHOOK: query: SELECT id,name FROM things_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@things -PREHOOK: Input: default@things@ds=2011-10-23 -PREHOOK: Input: default@things@ds=2011-10-24 +PREHOOK: Input: default@things_n1 +PREHOOK: Input: default@things_n1@ds=2011-10-23 +PREHOOK: Input: default@things_n1@ds=2011-10-24 #### A masked pattern was here #### -POSTHOOK: query: SELECT id,name FROM things +POSTHOOK: query: SELECT id,name FROM things_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@things -POSTHOOK: Input: default@things@ds=2011-10-23 -POSTHOOK: Input: default@things@ds=2011-10-24 +POSTHOOK: Input: default@things_n1 +POSTHOOK: Input: default@things_n1@ds=2011-10-23 +POSTHOOK: Input: default@things_n1@ds=2011-10-24 #### A masked pattern was here #### 2 Tie 2 Tie -PREHOOK: query: SELECT name,id FROM sales LEFT SEMI JOIN things ON (sales.id = things.id) +PREHOOK: query: SELECT name,id FROM sales_n1 LEFT SEMI JOIN things_n1 ON (sales_n1.id = things_n1.id) PREHOOK: type: QUERY -PREHOOK: Input: default@sales -PREHOOK: Input: default@things -PREHOOK: Input: default@things@ds=2011-10-23 -PREHOOK: Input: default@things@ds=2011-10-24 +PREHOOK: Input: default@sales_n1 +PREHOOK: Input: default@things_n1 +PREHOOK: Input: default@things_n1@ds=2011-10-23 +PREHOOK: Input: default@things_n1@ds=2011-10-24 #### A masked pattern was here #### -POSTHOOK: query: SELECT name,id FROM sales LEFT SEMI JOIN things ON (sales.id = things.id) +POSTHOOK: query: SELECT name,id FROM sales_n1 LEFT SEMI JOIN things_n1 ON (sales_n1.id = things_n1.id) POSTHOOK: type: QUERY -POSTHOOK: Input: default@sales -POSTHOOK: Input: default@things -POSTHOOK: Input: default@things@ds=2011-10-23 -POSTHOOK: Input: default@things@ds=2011-10-24 +POSTHOOK: Input: default@sales_n1 +POSTHOOK: Input: default@things_n1 +POSTHOOK: Input: default@things_n1@ds=2011-10-23 +POSTHOOK: Input: default@things_n1@ds=2011-10-24 #### A masked pattern was here #### Hank 2 Joe 2 -PREHOOK: query: drop table sales +PREHOOK: query: drop table sales_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@sales -PREHOOK: Output: default@sales -POSTHOOK: query: drop table sales +PREHOOK: Input: default@sales_n1 +PREHOOK: Output: default@sales_n1 +POSTHOOK: query: drop table sales_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@sales -POSTHOOK: Output: default@sales -PREHOOK: query: drop table things +POSTHOOK: Input: default@sales_n1 +POSTHOOK: Output: default@sales_n1 +PREHOOK: query: drop table things_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@things -PREHOOK: Output: default@things -POSTHOOK: query: drop table things +PREHOOK: Input: default@things_n1 +PREHOOK: Output: default@things_n1 +POSTHOOK: query: drop table things_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@things -POSTHOOK: Output: default@things +POSTHOOK: Input: default@things_n1 +POSTHOOK: Output: default@things_n1 Warning: Shuffle Join JOIN[10][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: explain select part.p_type from part join (select p1.p_name from part p1, part p2 group by p1.p_name) pp ON pp.p_name = part.p_name PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out index e2f18a165d..d2308bb06f 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out @@ -1,22 +1,22 @@ -PREHOOK: query: create table list_bucketing_dynamic_part (key String, value String) +PREHOOK: query: create table list_bucketing_dynamic_part_n0 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: create table list_bucketing_dynamic_part (key String, value String) +PREHOOK: Output: default@list_bucketing_dynamic_part_n0 +POSTHOOK: query: create table list_bucketing_dynamic_part_n0 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@list_bucketing_dynamic_part +POSTHOOK: Output: default@list_bucketing_dynamic_part_n0 PREHOOK: query: explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' +insert overwrite table list_bucketing_dynamic_part_n0 partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' +insert overwrite table list_bucketing_dynamic_part_n0 partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -54,15 +54,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n0 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n0 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -243,15 +243,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n0 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n0 Stage: Stage-2 Stats Work @@ -260,32 +260,32 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.list_bucketing_dynamic_part + Table: default.list_bucketing_dynamic_part_n0 Is Table Level Stats: false -PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' +PREHOOK: query: insert overwrite table list_bucketing_dynamic_part_n0 partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08 -POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' +PREHOOK: Output: default@list_bucketing_dynamic_part_n0@ds=2008-04-08 +POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part_n0 partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11') +POSTHOOK: Output: default@list_bucketing_dynamic_part_n0@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@list_bucketing_dynamic_part_n0@ds=2008-04-08/hr=12 +POSTHOOK: Lineage: list_bucketing_dynamic_part_n0 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n0 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n0 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n0 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted list_bucketing_dynamic_part_n0 partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11') +PREHOOK: Input: default@list_bucketing_dynamic_part_n0 +POSTHOOK: query: desc formatted list_bucketing_dynamic_part_n0 partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n0 # col_name data_type comment key string value string @@ -298,7 +298,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: list_bucketing_dynamic_part +Table: list_bucketing_dynamic_part_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -320,15 +320,15 @@ Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[484]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=484} +Skewed Value to Truncated Path: {[484]=/list_bucketing_dynamic_part_n0/ds=2008-04-08/hr=11/key=484} Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='12') +PREHOOK: query: desc formatted list_bucketing_dynamic_part_n0 partition (ds='2008-04-08', hr='12') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='12') +PREHOOK: Input: default@list_bucketing_dynamic_part_n0 +POSTHOOK: query: desc formatted list_bucketing_dynamic_part_n0 partition (ds='2008-04-08', hr='12') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n0 # col_name data_type comment key string value string @@ -341,7 +341,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 12] Database: default -Table: list_bucketing_dynamic_part +Table: list_bucketing_dynamic_part_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -363,7 +363,7 @@ Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[484]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=12/key=484} +Skewed Value to Truncated Path: {[484]=/list_bucketing_dynamic_part_n0/ds=2008-04-08/hr=12/key=484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds='2008-04-08' @@ -375,13 +375,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart #### A masked pattern was here #### 1000 -PREHOOK: query: select count(1) from list_bucketing_dynamic_part where ds='2008-04-08' +PREHOOK: query: select count(1) from list_bucketing_dynamic_part_n0 where ds='2008-04-08' PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_dynamic_part +PREHOOK: Input: default@list_bucketing_dynamic_part_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from list_bucketing_dynamic_part where ds='2008-04-08' +POSTHOOK: query: select count(1) from list_bucketing_dynamic_part_n0 where ds='2008-04-08' POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n0 #### A masked pattern was here #### 1000 PREHOOK: query: select key, value from srcpart where ds='2008-04-08' and hr='11' and key = "484" @@ -396,10 +396,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### 484 val_484 PREHOOK: query: explain extended -select key, value from list_bucketing_dynamic_part where ds='2008-04-08' and hr='11' and key = "484" +select key, value from list_bucketing_dynamic_part_n0 where ds='2008-04-08' and hr='11' and key = "484" PREHOOK: type: QUERY POSTHOOK: query: explain extended -select key, value from list_bucketing_dynamic_part where ds='2008-04-08' and hr='11' and key = "484" +select key, value from list_bucketing_dynamic_part_n0 where ds='2008-04-08' and hr='11' and key = "484" POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -423,13 +423,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n0 numFiles 2 numRows 500 partition_columns ds/hr partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -446,19 +446,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n0 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.list_bucketing_dynamic_part - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n0 + name: default.list_bucketing_dynamic_part_n0 Processor Tree: TableScan - alias: list_bucketing_dynamic_part + alias: list_bucketing_dynamic_part_n0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -471,22 +471,22 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select key, value from list_bucketing_dynamic_part where ds='2008-04-08' and hr='11' and key = "484" +PREHOOK: query: select key, value from list_bucketing_dynamic_part_n0 where ds='2008-04-08' and hr='11' and key = "484" PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_dynamic_part -PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_dynamic_part_n0 +PREHOOK: Input: default@list_bucketing_dynamic_part_n0@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select key, value from list_bucketing_dynamic_part where ds='2008-04-08' and hr='11' and key = "484" +POSTHOOK: query: select key, value from list_bucketing_dynamic_part_n0 where ds='2008-04-08' and hr='11' and key = "484" POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_dynamic_part_n0 +POSTHOOK: Input: default@list_bucketing_dynamic_part_n0@ds=2008-04-08/hr=11 #### A masked pattern was here #### 484 val_484 -PREHOOK: query: drop table list_bucketing_dynamic_part +PREHOOK: query: drop table list_bucketing_dynamic_part_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: drop table list_bucketing_dynamic_part +PREHOOK: Input: default@list_bucketing_dynamic_part_n0 +PREHOOK: Output: default@list_bucketing_dynamic_part_n0 +POSTHOOK: query: drop table list_bucketing_dynamic_part_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: Output: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n0 +POSTHOOK: Output: default@list_bucketing_dynamic_part_n0 diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out index d708fe8a73..550d378ac0 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out @@ -1,25 +1,25 @@ -PREHOOK: query: create table list_bucketing_static_part (key String, value String) +PREHOOK: query: create table list_bucketing_static_part_n3 (key String, value String) partitioned by (ds String, hr String) skewed by (value) on ('val_466','val_287','val_82') stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: create table list_bucketing_static_part (key String, value String) +PREHOOK: Output: default@list_bucketing_static_part_n3 +POSTHOOK: query: create table list_bucketing_static_part_n3 (key String, value String) partitioned by (ds String, hr String) skewed by (value) on ('val_466','val_287','val_82') stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@list_bucketing_static_part +POSTHOOK: Output: default@list_bucketing_static_part_n3 PREHOOK: query: explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n3 partition (ds = '2008-04-08', hr = '11') select key, value from src PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n3 partition (ds = '2008-04-08', hr = '11') select key, value from src POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -58,15 +58,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n3 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n3 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -196,15 +196,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n3 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n3 Stage: Stage-2 Stats Work @@ -213,34 +213,34 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.list_bucketing_static_part + Table: default.list_bucketing_static_part_n3 Is Table Level Stats: false -PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +PREHOOK: query: insert overwrite table list_bucketing_static_part_n3 partition (ds = '2008-04-08', hr = '11') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 -POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +PREHOOK: Output: default@list_bucketing_static_part_n3@ds=2008-04-08/hr=11 +POSTHOOK: query: insert overwrite table list_bucketing_static_part_n3 partition (ds = '2008-04-08', hr = '11') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show partitions list_bucketing_static_part +POSTHOOK: Output: default@list_bucketing_static_part_n3@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: list_bucketing_static_part_n3 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_static_part_n3 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions list_bucketing_static_part_n3 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: show partitions list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n3 +POSTHOOK: query: show partitions list_bucketing_static_part_n3 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n3 ds=2008-04-08/hr=11 -PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +PREHOOK: query: desc formatted list_bucketing_static_part_n3 partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +PREHOOK: Input: default@list_bucketing_static_part_n3 +POSTHOOK: query: desc formatted list_bucketing_static_part_n3 partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n3 # col_name data_type comment key string value string @@ -253,7 +253,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: list_bucketing_static_part +Table: list_bucketing_static_part_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -275,14 +275,14 @@ Stored As SubDirectories: Yes Skewed Columns: [value] Skewed Values: [[val_287], [val_466], [val_82]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[val_287]=/list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_287, [val_466]=/list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_466, [val_82]=/list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_82} +Skewed Value to Truncated Path: {[val_287]=/list_bucketing_static_part_n3/ds=2008-04-08/hr=11/value=val_287, [val_466]=/list_bucketing_static_part_n3/ds=2008-04-08/hr=11/value=val_466, [val_82]=/list_bucketing_static_part_n3/ds=2008-04-08/hr=11/value=val_82} Storage Desc Params: serialization.format 1 PREHOOK: query: explain extended -select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466" +select key, value from list_bucketing_static_part_n3 where ds='2008-04-08' and hr='11' and value = "val_466" PREHOOK: type: QUERY POSTHOOK: query: explain extended -select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466" +select key, value from list_bucketing_static_part_n3 where ds='2008-04-08' and hr='11' and value = "val_466" POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -306,13 +306,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n3 numFiles 4 numRows 500 partition_columns ds/hr partition_columns.types string:string rawDataSize 4812 - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe totalSize 5522 @@ -329,19 +329,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n3 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n3 + name: default.list_bucketing_static_part_n3 Processor Tree: TableScan - alias: list_bucketing_static_part + alias: list_bucketing_static_part_n3 Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -354,24 +354,24 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466" +PREHOOK: query: select key, value from list_bucketing_static_part_n3 where ds='2008-04-08' and hr='11' and value = "val_466" PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_static_part_n3 +PREHOOK: Input: default@list_bucketing_static_part_n3@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466" +POSTHOOK: query: select key, value from list_bucketing_static_part_n3 where ds='2008-04-08' and hr='11' and value = "val_466" POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_static_part_n3 +POSTHOOK: Input: default@list_bucketing_static_part_n3@ds=2008-04-08/hr=11 #### A masked pattern was here #### 466 val_466 466 val_466 466 val_466 -PREHOOK: query: drop table list_bucketing_static_part +PREHOOK: query: drop table list_bucketing_static_part_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: drop table list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n3 +PREHOOK: Output: default@list_bucketing_static_part_n3 +POSTHOOK: query: drop table list_bucketing_static_part_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Output: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n3 +POSTHOOK: Output: default@list_bucketing_static_part_n3 diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out index 83d27c152e..07b07fb094 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out @@ -1,25 +1,25 @@ -PREHOOK: query: create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) +PREHOOK: query: create table list_bucketing_mul_col_n0 (col1 String, col2 String, col3 String, col4 String, col5 string) partitioned by (ds String, hr String) skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82')) stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@list_bucketing_mul_col -POSTHOOK: query: create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) +PREHOOK: Output: default@list_bucketing_mul_col_n0 +POSTHOOK: query: create table list_bucketing_mul_col_n0 (col1 String, col2 String, col3 String, col4 String, col5 string) partitioned by (ds String, hr String) skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82')) stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@list_bucketing_mul_col +POSTHOOK: Output: default@list_bucketing_mul_col_n0 PREHOOK: query: explain extended -insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_mul_col_n0 partition (ds = '2008-04-08', hr = '11') select 1, key, 1, value, 1 from src PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_mul_col_n0 partition (ds = '2008-04-08', hr = '11') select 1, key, 1, value, 1 from src POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -58,15 +58,15 @@ STAGE PLANS: columns.comments columns.types string:string:string:string:string #### A masked pattern was here #### - name default.list_bucketing_mul_col + name default.list_bucketing_mul_col_n0 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5} + serialization.ddl struct list_bucketing_mul_col_n0 { string col1, string col2, string col3, string col4, string col5} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_mul_col + name: default.list_bucketing_mul_col_n0 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -196,15 +196,15 @@ STAGE PLANS: columns.comments columns.types string:string:string:string:string #### A masked pattern was here #### - name default.list_bucketing_mul_col + name default.list_bucketing_mul_col_n0 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5} + serialization.ddl struct list_bucketing_mul_col_n0 { string col1, string col2, string col3, string col4, string col5} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_mul_col + name: default.list_bucketing_mul_col_n0 Stage: Stage-2 Stats Work @@ -213,37 +213,37 @@ STAGE PLANS: Column Stats Desc: Columns: col1, col2, col3, col4, col5 Column Types: string, string, string, string, string - Table: default.list_bucketing_mul_col + Table: default.list_bucketing_mul_col_n0 Is Table Level Stats: false -PREHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11') +PREHOOK: query: insert overwrite table list_bucketing_mul_col_n0 partition (ds = '2008-04-08', hr = '11') select 1, key, 1, value, 1 from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@list_bucketing_mul_col@ds=2008-04-08/hr=11 -POSTHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11') +PREHOOK: Output: default@list_bucketing_mul_col_n0@ds=2008-04-08/hr=11 +POSTHOOK: query: insert overwrite table list_bucketing_mul_col_n0 partition (ds = '2008-04-08', hr = '11') select 1, key, 1, value, 1 from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@list_bucketing_mul_col@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col1 EXPRESSION [] -POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col2 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col3 EXPRESSION [] -POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col4 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col5 EXPRESSION [] -PREHOOK: query: show partitions list_bucketing_mul_col +POSTHOOK: Output: default@list_bucketing_mul_col_n0@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: list_bucketing_mul_col_n0 PARTITION(ds=2008-04-08,hr=11).col1 EXPRESSION [] +POSTHOOK: Lineage: list_bucketing_mul_col_n0 PARTITION(ds=2008-04-08,hr=11).col2 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_mul_col_n0 PARTITION(ds=2008-04-08,hr=11).col3 EXPRESSION [] +POSTHOOK: Lineage: list_bucketing_mul_col_n0 PARTITION(ds=2008-04-08,hr=11).col4 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_mul_col_n0 PARTITION(ds=2008-04-08,hr=11).col5 EXPRESSION [] +PREHOOK: query: show partitions list_bucketing_mul_col_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@list_bucketing_mul_col -POSTHOOK: query: show partitions list_bucketing_mul_col +PREHOOK: Input: default@list_bucketing_mul_col_n0 +POSTHOOK: query: show partitions list_bucketing_mul_col_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@list_bucketing_mul_col +POSTHOOK: Input: default@list_bucketing_mul_col_n0 ds=2008-04-08/hr=11 -PREHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='11') +PREHOOK: query: desc formatted list_bucketing_mul_col_n0 partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_mul_col -POSTHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='11') +PREHOOK: Input: default@list_bucketing_mul_col_n0 +POSTHOOK: query: desc formatted list_bucketing_mul_col_n0 partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_mul_col +POSTHOOK: Input: default@list_bucketing_mul_col_n0 # col_name data_type comment col1 string col2 string @@ -259,7 +259,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: list_bucketing_mul_col +Table: list_bucketing_mul_col_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\",\"col2\":\"true\",\"col3\":\"true\",\"col4\":\"true\",\"col5\":\"true\"}} @@ -281,15 +281,15 @@ Stored As SubDirectories: Yes Skewed Columns: [col2, col4] Skewed Values: [[287, val_287], [466, val_466], [82, val_82]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[287, val_287]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=287/col4=val_287, [466, val_466]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=466/col4=val_466, [82, val_82]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=82/col4=val_82} +Skewed Value to Truncated Path: {[287, val_287]=/list_bucketing_mul_col_n0/ds=2008-04-08/hr=11/col2=287/col4=val_287, [466, val_466]=/list_bucketing_mul_col_n0/ds=2008-04-08/hr=11/col2=466/col4=val_466, [82, val_82]=/list_bucketing_mul_col_n0/ds=2008-04-08/hr=11/col2=82/col4=val_82} Storage Desc Params: serialization.format 1 PREHOOK: query: explain extended -select * from list_bucketing_mul_col +select * from list_bucketing_mul_col_n0 where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466" PREHOOK: type: QUERY POSTHOOK: query: explain extended -select * from list_bucketing_mul_col +select * from list_bucketing_mul_col_n0 where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466" POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -314,13 +314,13 @@ STAGE PLANS: columns.comments columns.types string:string:string:string:string #### A masked pattern was here #### - name default.list_bucketing_mul_col + name default.list_bucketing_mul_col_n0 numFiles 4 numRows 500 partition_columns ds/hr partition_columns.types string:string rawDataSize 6312 - serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5} + serialization.ddl struct list_bucketing_mul_col_n0 { string col1, string col2, string col3, string col4, string col5} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe totalSize 7094 @@ -337,19 +337,19 @@ STAGE PLANS: columns.comments columns.types string:string:string:string:string #### A masked pattern was here #### - name default.list_bucketing_mul_col + name default.list_bucketing_mul_col_n0 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5} + serialization.ddl struct list_bucketing_mul_col_n0 { string col1, string col2, string col3, string col4, string col5} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_mul_col - name: default.list_bucketing_mul_col + name: default.list_bucketing_mul_col_n0 + name: default.list_bucketing_mul_col_n0 Processor Tree: TableScan - alias: list_bucketing_mul_col + alias: list_bucketing_mul_col_n0 Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -362,27 +362,27 @@ STAGE PLANS: Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select * from list_bucketing_mul_col +PREHOOK: query: select * from list_bucketing_mul_col_n0 where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466" PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_mul_col -PREHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_mul_col_n0 +PREHOOK: Input: default@list_bucketing_mul_col_n0@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select * from list_bucketing_mul_col +POSTHOOK: query: select * from list_bucketing_mul_col_n0 where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466" POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_mul_col -POSTHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_mul_col_n0 +POSTHOOK: Input: default@list_bucketing_mul_col_n0@ds=2008-04-08/hr=11 #### A masked pattern was here #### 1 466 1 val_466 1 2008-04-08 11 1 466 1 val_466 1 2008-04-08 11 1 466 1 val_466 1 2008-04-08 11 PREHOOK: query: explain extended -select * from list_bucketing_mul_col +select * from list_bucketing_mul_col_n0 where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382" PREHOOK: type: QUERY POSTHOOK: query: explain extended -select * from list_bucketing_mul_col +select * from list_bucketing_mul_col_n0 where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382" POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -407,13 +407,13 @@ STAGE PLANS: columns.comments columns.types string:string:string:string:string #### A masked pattern was here #### - name default.list_bucketing_mul_col + name default.list_bucketing_mul_col_n0 numFiles 4 numRows 500 partition_columns ds/hr partition_columns.types string:string rawDataSize 6312 - serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5} + serialization.ddl struct list_bucketing_mul_col_n0 { string col1, string col2, string col3, string col4, string col5} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe totalSize 7094 @@ -430,19 +430,19 @@ STAGE PLANS: columns.comments columns.types string:string:string:string:string #### A masked pattern was here #### - name default.list_bucketing_mul_col + name default.list_bucketing_mul_col_n0 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5} + serialization.ddl struct list_bucketing_mul_col_n0 { string col1, string col2, string col3, string col4, string col5} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_mul_col - name: default.list_bucketing_mul_col + name: default.list_bucketing_mul_col_n0 + name: default.list_bucketing_mul_col_n0 Processor Tree: TableScan - alias: list_bucketing_mul_col + alias: list_bucketing_mul_col_n0 Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -455,25 +455,25 @@ STAGE PLANS: Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select * from list_bucketing_mul_col +PREHOOK: query: select * from list_bucketing_mul_col_n0 where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382" PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_mul_col -PREHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_mul_col_n0 +PREHOOK: Input: default@list_bucketing_mul_col_n0@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select * from list_bucketing_mul_col +POSTHOOK: query: select * from list_bucketing_mul_col_n0 where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382" POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_mul_col -POSTHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_mul_col_n0 +POSTHOOK: Input: default@list_bucketing_mul_col_n0@ds=2008-04-08/hr=11 #### A masked pattern was here #### 1 382 1 val_382 1 2008-04-08 11 1 382 1 val_382 1 2008-04-08 11 -PREHOOK: query: drop table list_bucketing_mul_col +PREHOOK: query: drop table list_bucketing_mul_col_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@list_bucketing_mul_col -PREHOOK: Output: default@list_bucketing_mul_col -POSTHOOK: query: drop table list_bucketing_mul_col +PREHOOK: Input: default@list_bucketing_mul_col_n0 +PREHOOK: Output: default@list_bucketing_mul_col_n0 +POSTHOOK: query: drop table list_bucketing_mul_col_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@list_bucketing_mul_col -POSTHOOK: Output: default@list_bucketing_mul_col +POSTHOOK: Input: default@list_bucketing_mul_col_n0 +POSTHOOK: Output: default@list_bucketing_mul_col_n0 diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out index fd8de07cc4..ea621b8e67 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out @@ -1,25 +1,25 @@ -PREHOOK: query: create table list_bucketing_static_part (key String, value String) +PREHOOK: query: create table list_bucketing_static_part_n4 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: create table list_bucketing_static_part (key String, value String) +PREHOOK: Output: default@list_bucketing_static_part_n4 +POSTHOOK: query: create table list_bucketing_static_part_n4 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@list_bucketing_static_part +POSTHOOK: Output: default@list_bucketing_static_part_n4 PREHOOK: query: explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n4 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n4 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -58,15 +58,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n4 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n4 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n4 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -247,15 +247,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n4 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n4 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n4 Stage: Stage-2 Stats Work @@ -264,38 +264,38 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.list_bucketing_static_part + Table: default.list_bucketing_static_part_n4 Is Table Level Stats: false -PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +PREHOOK: query: insert overwrite table list_bucketing_static_part_n4 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 -POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +PREHOOK: Output: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 +POSTHOOK: query: insert overwrite table list_bucketing_static_part_n4 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show partitions list_bucketing_static_part +POSTHOOK: Output: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: list_bucketing_static_part_n4 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_static_part_n4 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions list_bucketing_static_part_n4 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: show partitions list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n4 +POSTHOOK: query: show partitions list_bucketing_static_part_n4 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n4 ds=2008-04-08/hr=11 -PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +PREHOOK: query: desc formatted list_bucketing_static_part_n4 partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +PREHOOK: Input: default@list_bucketing_static_part_n4 +POSTHOOK: query: desc formatted list_bucketing_static_part_n4 partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n4 # col_name data_type comment key string value string @@ -308,7 +308,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: list_bucketing_static_part +Table: list_bucketing_static_part_n4 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -330,7 +330,7 @@ Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484} +Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_static_part_n4/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part_n4/ds=2008-04-08/hr=11/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' @@ -342,20 +342,20 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart #### A masked pattern was here #### 1000 -PREHOOK: query: select count(*) from list_bucketing_static_part +PREHOOK: query: select count(*) from list_bucketing_static_part_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n4 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from list_bucketing_static_part +POSTHOOK: query: select count(*) from list_bucketing_static_part_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n4 #### A masked pattern was here #### 1000 PREHOOK: query: explain extended -select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' +select * from list_bucketing_static_part_n4 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' PREHOOK: type: QUERY POSTHOOK: query: explain extended -select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' +select * from list_bucketing_static_part_n4 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -379,13 +379,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n4 numFiles 6 numRows 1000 partition_columns ds/hr partition_columns.types string:string rawDataSize 9624 - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n4 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe totalSize 10898 @@ -402,19 +402,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n4 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n4 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n4 + name: default.list_bucketing_static_part_n4 Processor Tree: TableScan - alias: list_bucketing_static_part + alias: list_bucketing_static_part_n4 Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -427,15 +427,15 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' +PREHOOK: query: select * from list_bucketing_static_part_n4 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_static_part_n4 +PREHOOK: Input: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' +POSTHOOK: query: select * from list_bucketing_static_part_n4 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_static_part_n4 +POSTHOOK: Input: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 #### A masked pattern was here #### 484 val_484 2008-04-08 11 484 val_484 2008-04-08 11 @@ -469,15 +469,15 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 51 val_51 2008-04-08 11 51 val_51 2008-04-08 12 51 val_51 2008-04-08 12 -PREHOOK: query: select * from list_bucketing_static_part where key = '51' +PREHOOK: query: select * from list_bucketing_static_part_n4 where key = '51' PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_static_part_n4 +PREHOOK: Input: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select * from list_bucketing_static_part where key = '51' +POSTHOOK: query: select * from list_bucketing_static_part_n4 where key = '51' POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_static_part_n4 +POSTHOOK: Input: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 #### A masked pattern was here #### 51 val_51 2008-04-08 11 51 val_51 2008-04-08 11 @@ -495,15 +495,15 @@ POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### -PREHOOK: query: select * from list_bucketing_static_part where key = '51' and value = 'val_14' +PREHOOK: query: select * from list_bucketing_static_part_n4 where key = '51' and value = 'val_14' PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_static_part_n4 +PREHOOK: Input: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select * from list_bucketing_static_part where key = '51' and value = 'val_14' +POSTHOOK: query: select * from list_bucketing_static_part_n4 where key = '51' and value = 'val_14' POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_static_part_n4 +POSTHOOK: Input: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 #### A masked pattern was here #### PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key < '51' PREHOOK: type: QUERY @@ -518,15 +518,15 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 910 -PREHOOK: query: select count(1) from list_bucketing_static_part where key < '51' +PREHOOK: query: select count(1) from list_bucketing_static_part_n4 where key < '51' PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_static_part_n4 +PREHOOK: Input: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from list_bucketing_static_part where key < '51' +POSTHOOK: query: select count(1) from list_bucketing_static_part_n4 where key < '51' POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_static_part_n4 +POSTHOOK: Input: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 #### A masked pattern was here #### 910 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key <= '51' @@ -542,15 +542,15 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 914 -PREHOOK: query: select count(1) from list_bucketing_static_part where key <= '51' +PREHOOK: query: select count(1) from list_bucketing_static_part_n4 where key <= '51' PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_static_part_n4 +PREHOOK: Input: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from list_bucketing_static_part where key <= '51' +POSTHOOK: query: select count(1) from list_bucketing_static_part_n4 where key <= '51' POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_static_part_n4 +POSTHOOK: Input: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 #### A masked pattern was here #### 914 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key > '51' @@ -566,15 +566,15 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 86 -PREHOOK: query: select count(1) from list_bucketing_static_part where key > '51' +PREHOOK: query: select count(1) from list_bucketing_static_part_n4 where key > '51' PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_static_part_n4 +PREHOOK: Input: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from list_bucketing_static_part where key > '51' +POSTHOOK: query: select count(1) from list_bucketing_static_part_n4 where key > '51' POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_static_part_n4 +POSTHOOK: Input: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 #### A masked pattern was here #### 86 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key >= '51' @@ -590,22 +590,22 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 90 -PREHOOK: query: select count(1) from list_bucketing_static_part where key >= '51' +PREHOOK: query: select count(1) from list_bucketing_static_part_n4 where key >= '51' PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_static_part_n4 +PREHOOK: Input: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from list_bucketing_static_part where key >= '51' +POSTHOOK: query: select count(1) from list_bucketing_static_part_n4 where key >= '51' POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_static_part_n4 +POSTHOOK: Input: default@list_bucketing_static_part_n4@ds=2008-04-08/hr=11 #### A masked pattern was here #### 90 -PREHOOK: query: drop table list_bucketing_static_part +PREHOOK: query: drop table list_bucketing_static_part_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: drop table list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n4 +PREHOOK: Output: default@list_bucketing_static_part_n4 +POSTHOOK: query: drop table list_bucketing_static_part_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Output: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n4 +POSTHOOK: Output: default@list_bucketing_static_part_n4 diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out index c75d161012..3a6b27be2e 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES +PREHOOK: query: create table list_bucketing_static_part_n1 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES +PREHOOK: Output: default@list_bucketing_static_part_n1 +POSTHOOK: query: create table list_bucketing_static_part_n1 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@list_bucketing_static_part +POSTHOOK: Output: default@list_bucketing_static_part_n1 PREHOOK: query: explain extended -insert overwrite table list_bucketing_static_part partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' +insert overwrite table list_bucketing_static_part_n1 partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table list_bucketing_static_part partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' +insert overwrite table list_bucketing_static_part_n1 partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -48,15 +48,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n1 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n1 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -237,15 +237,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n1 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n1 Stage: Stage-2 Stats Work @@ -254,29 +254,29 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.list_bucketing_static_part + Table: default.list_bucketing_static_part_n1 Is Table Level Stats: false -PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' +PREHOOK: query: insert overwrite table list_bucketing_static_part_n1 partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 -POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' +PREHOOK: Output: default@list_bucketing_static_part_n1@ds=2008-04-08/hr=11 +POSTHOOK: query: insert overwrite table list_bucketing_static_part_n1 partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +POSTHOOK: Output: default@list_bucketing_static_part_n1@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: list_bucketing_static_part_n1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_static_part_n1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted list_bucketing_static_part_n1 partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +PREHOOK: Input: default@list_bucketing_static_part_n1 +POSTHOOK: query: desc formatted list_bucketing_static_part_n1 partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n1 # col_name data_type comment key string value string @@ -289,7 +289,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: list_bucketing_static_part +Table: list_bucketing_static_part_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -311,7 +311,7 @@ Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[484]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484} +Skewed Value to Truncated Path: {[484]=/list_bucketing_static_part_n1/ds=2008-04-08/hr=11/key=484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds='2008-04-08' @@ -323,13 +323,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart #### A masked pattern was here #### 1000 -PREHOOK: query: select count(1) from list_bucketing_static_part where ds='2008-04-08' +PREHOOK: query: select count(1) from list_bucketing_static_part_n1 where ds='2008-04-08' PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n1 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from list_bucketing_static_part where ds='2008-04-08' +POSTHOOK: query: select count(1) from list_bucketing_static_part_n1 where ds='2008-04-08' POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n1 #### A masked pattern was here #### 1000 PREHOOK: query: select key, value from srcpart where ds='2008-04-08' and hr='11' and key = "484" @@ -344,10 +344,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### 484 val_484 PREHOOK: query: explain extended -select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and key = "484" +select key, value from list_bucketing_static_part_n1 where ds='2008-04-08' and hr='11' and key = "484" PREHOOK: type: QUERY POSTHOOK: query: explain extended -select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and key = "484" +select key, value from list_bucketing_static_part_n1 where ds='2008-04-08' and hr='11' and key = "484" POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -371,13 +371,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n1 numFiles 4 numRows 1000 partition_columns ds/hr partition_columns.types string:string rawDataSize 10624 - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 11624 @@ -394,19 +394,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n1 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.list_bucketing_static_part - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n1 + name: default.list_bucketing_static_part_n1 Processor Tree: TableScan - alias: list_bucketing_static_part + alias: list_bucketing_static_part_n1 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -419,23 +419,23 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and key = "484" +PREHOOK: query: select key, value from list_bucketing_static_part_n1 where ds='2008-04-08' and hr='11' and key = "484" PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_static_part_n1 +PREHOOK: Input: default@list_bucketing_static_part_n1@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and key = "484" +POSTHOOK: query: select key, value from list_bucketing_static_part_n1 where ds='2008-04-08' and hr='11' and key = "484" POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_static_part_n1 +POSTHOOK: Input: default@list_bucketing_static_part_n1@ds=2008-04-08/hr=11 #### A masked pattern was here #### 484 val_484 484 val_484 -PREHOOK: query: drop table list_bucketing_static_part +PREHOOK: query: drop table list_bucketing_static_part_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: drop table list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n1 +PREHOOK: Output: default@list_bucketing_static_part_n1 +POSTHOOK: query: drop table list_bucketing_static_part_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Output: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n1 +POSTHOOK: Output: default@list_bucketing_static_part_n1 diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out index 967b84dc42..823e9e1135 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out @@ -1,25 +1,25 @@ -PREHOOK: query: create table list_bucketing_static_part (key String, value String) +PREHOOK: query: create table list_bucketing_static_part_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: create table list_bucketing_static_part (key String, value String) +PREHOOK: Output: default@list_bucketing_static_part_n2 +POSTHOOK: query: create table list_bucketing_static_part_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@list_bucketing_static_part +POSTHOOK: Output: default@list_bucketing_static_part_n2 PREHOOK: query: explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n2 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n2 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -58,15 +58,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n2 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n2 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -247,15 +247,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n2 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n2 Stage: Stage-2 Stats Work @@ -264,38 +264,38 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.list_bucketing_static_part + Table: default.list_bucketing_static_part_n2 Is Table Level Stats: false -PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +PREHOOK: query: insert overwrite table list_bucketing_static_part_n2 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 -POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +PREHOOK: Output: default@list_bucketing_static_part_n2@ds=2008-04-08/hr=11 +POSTHOOK: query: insert overwrite table list_bucketing_static_part_n2 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show partitions list_bucketing_static_part +POSTHOOK: Output: default@list_bucketing_static_part_n2@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: list_bucketing_static_part_n2 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_static_part_n2 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions list_bucketing_static_part_n2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: show partitions list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n2 +POSTHOOK: query: show partitions list_bucketing_static_part_n2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n2 ds=2008-04-08/hr=11 -PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +PREHOOK: query: desc formatted list_bucketing_static_part_n2 partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +PREHOOK: Input: default@list_bucketing_static_part_n2 +POSTHOOK: query: desc formatted list_bucketing_static_part_n2 partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n2 # col_name data_type comment key string value string @@ -308,7 +308,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: list_bucketing_static_part +Table: list_bucketing_static_part_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -330,15 +330,15 @@ Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484} +Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_static_part_n2/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part_n2/ds=2008-04-08/hr=11/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n2 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n2 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -382,14 +382,14 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n2 partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n2 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -579,14 +579,14 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n2 partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n2 Stage: Stage-2 Stats Work @@ -595,7 +595,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.list_bucketing_static_part + Table: default.list_bucketing_static_part_n2 Is Table Level Stats: false Stage: Stage-3 @@ -618,9 +618,9 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n2 partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### @@ -636,15 +636,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n2 partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n2 + name: default.list_bucketing_static_part_n2 input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat Truncated Path -> Alias: #### A masked pattern was here #### @@ -669,9 +669,9 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n2 partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### @@ -687,15 +687,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n2 partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n2 + name: default.list_bucketing_static_part_n2 input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat Truncated Path -> Alias: #### A masked pattern was here #### @@ -706,35 +706,35 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +PREHOOK: query: insert overwrite table list_bucketing_static_part_n2 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 -POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +PREHOOK: Output: default@list_bucketing_static_part_n2@ds=2008-04-08/hr=11 +POSTHOOK: query: insert overwrite table list_bucketing_static_part_n2 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show partitions list_bucketing_static_part +POSTHOOK: Output: default@list_bucketing_static_part_n2@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: list_bucketing_static_part_n2 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_static_part_n2 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions list_bucketing_static_part_n2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: show partitions list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n2 +POSTHOOK: query: show partitions list_bucketing_static_part_n2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n2 ds=2008-04-08/hr=11 -PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +PREHOOK: query: desc formatted list_bucketing_static_part_n2 partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +PREHOOK: Input: default@list_bucketing_static_part_n2 +POSTHOOK: query: desc formatted list_bucketing_static_part_n2 partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n2 # col_name data_type comment key string value string @@ -747,7 +747,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: list_bucketing_static_part +Table: list_bucketing_static_part_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -769,7 +769,7 @@ Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484} +Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_static_part_n2/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part_n2/ds=2008-04-08/hr=11/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' @@ -781,20 +781,20 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart #### A masked pattern was here #### 1000 -PREHOOK: query: select count(*) from list_bucketing_static_part +PREHOOK: query: select count(*) from list_bucketing_static_part_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n2 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from list_bucketing_static_part +POSTHOOK: query: select count(*) from list_bucketing_static_part_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n2 #### A masked pattern was here #### 1000 PREHOOK: query: explain extended -select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' +select * from list_bucketing_static_part_n2 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' PREHOOK: type: QUERY POSTHOOK: query: explain extended -select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' +select * from list_bucketing_static_part_n2 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -818,13 +818,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n2 numFiles 4 numRows 1000 partition_columns ds/hr partition_columns.types string:string rawDataSize 9624 - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe totalSize 10786 @@ -841,19 +841,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n2 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n2 + name: default.list_bucketing_static_part_n2 Processor Tree: TableScan - alias: list_bucketing_static_part + alias: list_bucketing_static_part_n2 Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -866,15 +866,15 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' +PREHOOK: query: select * from list_bucketing_static_part_n2 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_static_part_n2 +PREHOOK: Input: default@list_bucketing_static_part_n2@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' +POSTHOOK: query: select * from list_bucketing_static_part_n2 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_static_part_n2 +POSTHOOK: Input: default@list_bucketing_static_part_n2@ds=2008-04-08/hr=11 #### A masked pattern was here #### 484 val_484 2008-04-08 11 484 val_484 2008-04-08 11 @@ -892,11 +892,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 484 val_484 2008-04-08 11 484 val_484 2008-04-08 12 -PREHOOK: query: drop table list_bucketing_static_part +PREHOOK: query: drop table list_bucketing_static_part_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: drop table list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n2 +PREHOOK: Output: default@list_bucketing_static_part_n2 +POSTHOOK: query: drop table list_bucketing_static_part_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Output: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n2 +POSTHOOK: Output: default@list_bucketing_static_part_n2 diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out index d67ec0afeb..d71834f94b 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out @@ -1,22 +1,22 @@ -PREHOOK: query: create table list_bucketing_dynamic_part (key String, value String) +PREHOOK: query: create table list_bucketing_dynamic_part_n1 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: create table list_bucketing_dynamic_part (key String, value String) +PREHOOK: Output: default@list_bucketing_dynamic_part_n1 +POSTHOOK: query: create table list_bucketing_dynamic_part_n1 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@list_bucketing_dynamic_part +POSTHOOK: Output: default@list_bucketing_dynamic_part_n1 PREHOOK: query: explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' +insert overwrite table list_bucketing_dynamic_part_n1 partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' +insert overwrite table list_bucketing_dynamic_part_n1 partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -54,15 +54,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n1 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n1 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -243,15 +243,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n1 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n1 Stage: Stage-2 Stats Work @@ -260,32 +260,32 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.list_bucketing_dynamic_part + Table: default.list_bucketing_dynamic_part_n1 Is Table Level Stats: false -PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' +PREHOOK: query: insert overwrite table list_bucketing_dynamic_part_n1 partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08 -POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' +PREHOOK: Output: default@list_bucketing_dynamic_part_n1@ds=2008-04-08 +POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part_n1 partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11') +POSTHOOK: Output: default@list_bucketing_dynamic_part_n1@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@list_bucketing_dynamic_part_n1@ds=2008-04-08/hr=12 +POSTHOOK: Lineage: list_bucketing_dynamic_part_n1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted list_bucketing_dynamic_part_n1 partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11') +PREHOOK: Input: default@list_bucketing_dynamic_part_n1 +POSTHOOK: query: desc formatted list_bucketing_dynamic_part_n1 partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n1 # col_name data_type comment key string value string @@ -298,7 +298,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: list_bucketing_dynamic_part +Table: list_bucketing_dynamic_part_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -320,15 +320,15 @@ Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=484/value=val_484} +Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part_n1/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part_n1/ds=2008-04-08/hr=11/key=484/value=val_484} Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='12') +PREHOOK: query: desc formatted list_bucketing_dynamic_part_n1 partition (ds='2008-04-08', hr='12') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='12') +PREHOOK: Input: default@list_bucketing_dynamic_part_n1 +POSTHOOK: query: desc formatted list_bucketing_dynamic_part_n1 partition (ds='2008-04-08', hr='12') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n1 # col_name data_type comment key string value string @@ -341,7 +341,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 12] Database: default -Table: list_bucketing_dynamic_part +Table: list_bucketing_dynamic_part_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -363,7 +363,7 @@ Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=12/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=12/key=484/value=val_484} +Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part_n1/ds=2008-04-08/hr=12/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part_n1/ds=2008-04-08/hr=12/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds='2008-04-08' @@ -375,13 +375,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart #### A masked pattern was here #### 1000 -PREHOOK: query: select count(1) from list_bucketing_dynamic_part where ds='2008-04-08' +PREHOOK: query: select count(1) from list_bucketing_dynamic_part_n1 where ds='2008-04-08' PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_dynamic_part +PREHOOK: Input: default@list_bucketing_dynamic_part_n1 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from list_bucketing_dynamic_part where ds='2008-04-08' +POSTHOOK: query: select count(1) from list_bucketing_dynamic_part_n1 where ds='2008-04-08' POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n1 #### A masked pattern was here #### 1000 PREHOOK: query: select key, value from srcpart where ds='2008-04-08' and key = "103" and value ="val_103" @@ -401,10 +401,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 103 val_103 103 val_103 PREHOOK: query: explain extended -select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103" +select key, value, ds, hr from list_bucketing_dynamic_part_n1 where ds='2008-04-08' and key = "103" and value ="val_103" PREHOOK: type: QUERY POSTHOOK: query: explain extended -select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103" +select key, value, ds, hr from list_bucketing_dynamic_part_n1 where ds='2008-04-08' and key = "103" and value ="val_103" POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -428,13 +428,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n1 numFiles 3 numRows 500 partition_columns ds/hr partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -451,16 +451,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n1 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.list_bucketing_dynamic_part - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n1 + name: default.list_bucketing_dynamic_part_n1 Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -475,13 +475,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n1 numFiles 3 numRows 500 partition_columns ds/hr partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -498,19 +498,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n1 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.list_bucketing_dynamic_part - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n1 + name: default.list_bucketing_dynamic_part_n1 Processor Tree: TableScan - alias: list_bucketing_dynamic_part + alias: list_bucketing_dynamic_part_n1 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -523,27 +523,27 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103" +PREHOOK: query: select key, value, ds, hr from list_bucketing_dynamic_part_n1 where ds='2008-04-08' and key = "103" and value ="val_103" PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_dynamic_part -PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11 -PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12 +PREHOOK: Input: default@list_bucketing_dynamic_part_n1 +PREHOOK: Input: default@list_bucketing_dynamic_part_n1@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_dynamic_part_n1@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103" +POSTHOOK: query: select key, value, ds, hr from list_bucketing_dynamic_part_n1 where ds='2008-04-08' and key = "103" and value ="val_103" POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@list_bucketing_dynamic_part_n1 +POSTHOOK: Input: default@list_bucketing_dynamic_part_n1@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_dynamic_part_n1@ds=2008-04-08/hr=12 #### A masked pattern was here #### 103 val_103 2008-04-08 11 103 val_103 2008-04-08 11 103 val_103 2008-04-08 12 103 val_103 2008-04-08 12 -PREHOOK: query: drop table list_bucketing_dynamic_part +PREHOOK: query: drop table list_bucketing_dynamic_part_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: drop table list_bucketing_dynamic_part +PREHOOK: Input: default@list_bucketing_dynamic_part_n1 +PREHOOK: Output: default@list_bucketing_dynamic_part_n1 +POSTHOOK: query: drop table list_bucketing_dynamic_part_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: Output: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n1 +POSTHOOK: Output: default@list_bucketing_dynamic_part_n1 diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out index 42afe47d88..cd3228e2a8 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out @@ -1,25 +1,25 @@ -PREHOOK: query: create table list_bucketing_dynamic_part (key String, value String) +PREHOOK: query: create table list_bucketing_dynamic_part_n3 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: create table list_bucketing_dynamic_part (key String, value String) +PREHOOK: Output: default@list_bucketing_dynamic_part_n3 +POSTHOOK: query: create table list_bucketing_dynamic_part_n3 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@list_bucketing_dynamic_part +POSTHOOK: Output: default@list_bucketing_dynamic_part_n3 PREHOOK: query: explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +insert overwrite table list_bucketing_dynamic_part_n3 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +insert overwrite table list_bucketing_dynamic_part_n3 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -58,15 +58,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n3 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n3 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -247,15 +247,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n3 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n3 Stage: Stage-2 Stats Work @@ -264,42 +264,42 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.list_bucketing_dynamic_part + Table: default.list_bucketing_dynamic_part_n3 Is Table Level Stats: false -PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +PREHOOK: query: insert overwrite table list_bucketing_dynamic_part_n3 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08 -POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +PREHOOK: Output: default@list_bucketing_dynamic_part_n3@ds=2008-04-08 +POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part_n3 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1 -POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1 -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show partitions list_bucketing_dynamic_part +POSTHOOK: Output: default@list_bucketing_dynamic_part_n3@ds=2008-04-08/hr=a1 +POSTHOOK: Output: default@list_bucketing_dynamic_part_n3@ds=2008-04-08/hr=b1 +POSTHOOK: Lineage: list_bucketing_dynamic_part_n3 PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n3 PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n3 PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n3 PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions list_bucketing_dynamic_part_n3 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: show partitions list_bucketing_dynamic_part +PREHOOK: Input: default@list_bucketing_dynamic_part_n3 +POSTHOOK: query: show partitions list_bucketing_dynamic_part_n3 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n3 ds=2008-04-08/hr=a1 ds=2008-04-08/hr=b1 -PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1') +PREHOOK: query: desc formatted list_bucketing_dynamic_part_n3 partition (ds='2008-04-08', hr='a1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1') +PREHOOK: Input: default@list_bucketing_dynamic_part_n3 +POSTHOOK: query: desc formatted list_bucketing_dynamic_part_n3 partition (ds='2008-04-08', hr='a1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n3 # col_name data_type comment key string value string @@ -312,7 +312,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, a1] Database: default -Table: list_bucketing_dynamic_part +Table: list_bucketing_dynamic_part_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -335,12 +335,12 @@ Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') +PREHOOK: query: desc formatted list_bucketing_dynamic_part_n3 partition (ds='2008-04-08', hr='b1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') +PREHOOK: Input: default@list_bucketing_dynamic_part_n3 +POSTHOOK: query: desc formatted list_bucketing_dynamic_part_n3 partition (ds='2008-04-08', hr='b1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n3 # col_name data_type comment key string value string @@ -353,7 +353,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, b1] Database: default -Table: list_bucketing_dynamic_part +Table: list_bucketing_dynamic_part_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -375,15 +375,15 @@ Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484} +Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part_n3/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part_n3/ds=2008-04-08/hr=b1/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +insert overwrite table list_bucketing_dynamic_part_n3 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +insert overwrite table list_bucketing_dynamic_part_n3 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -427,15 +427,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n3 partition_columns hr partition_columns.types string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n3 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -625,15 +625,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n3 partition_columns hr partition_columns.types string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n3 Stage: Stage-2 Stats Work @@ -642,7 +642,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.list_bucketing_dynamic_part + Table: default.list_bucketing_dynamic_part_n3 Is Table Level Stats: false Stage: Stage-3 @@ -665,10 +665,10 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n3 partition_columns hr partition_columns.types string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### @@ -684,16 +684,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n3 partition_columns hr partition_columns.types string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_dynamic_part - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n3 + name: default.list_bucketing_dynamic_part_n3 input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat Truncated Path -> Alias: #### A masked pattern was here #### @@ -718,10 +718,10 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n3 partition_columns hr partition_columns.types string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### @@ -737,16 +737,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n3 partition_columns hr partition_columns.types string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_dynamic_part - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n3 + name: default.list_bucketing_dynamic_part_n3 input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat Truncated Path -> Alias: #### A masked pattern was here #### @@ -757,39 +757,39 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +PREHOOK: query: insert overwrite table list_bucketing_dynamic_part_n3 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08 -POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +PREHOOK: Output: default@list_bucketing_dynamic_part_n3@ds=2008-04-08 +POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part_n3 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1 -POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1 -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show partitions list_bucketing_dynamic_part +POSTHOOK: Output: default@list_bucketing_dynamic_part_n3@ds=2008-04-08/hr=a1 +POSTHOOK: Output: default@list_bucketing_dynamic_part_n3@ds=2008-04-08/hr=b1 +POSTHOOK: Lineage: list_bucketing_dynamic_part_n3 PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n3 PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n3 PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n3 PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions list_bucketing_dynamic_part_n3 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: show partitions list_bucketing_dynamic_part +PREHOOK: Input: default@list_bucketing_dynamic_part_n3 +POSTHOOK: query: show partitions list_bucketing_dynamic_part_n3 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n3 ds=2008-04-08/hr=a1 ds=2008-04-08/hr=b1 -PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1') +PREHOOK: query: desc formatted list_bucketing_dynamic_part_n3 partition (ds='2008-04-08', hr='a1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1') +PREHOOK: Input: default@list_bucketing_dynamic_part_n3 +POSTHOOK: query: desc formatted list_bucketing_dynamic_part_n3 partition (ds='2008-04-08', hr='a1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n3 # col_name data_type comment key string value string @@ -802,7 +802,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, a1] Database: default -Table: list_bucketing_dynamic_part +Table: list_bucketing_dynamic_part_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -825,12 +825,12 @@ Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') +PREHOOK: query: desc formatted list_bucketing_dynamic_part_n3 partition (ds='2008-04-08', hr='b1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') +PREHOOK: Input: default@list_bucketing_dynamic_part_n3 +POSTHOOK: query: desc formatted list_bucketing_dynamic_part_n3 partition (ds='2008-04-08', hr='b1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n3 # col_name data_type comment key string value string @@ -843,7 +843,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, b1] Database: default -Table: list_bucketing_dynamic_part +Table: list_bucketing_dynamic_part_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -865,7 +865,7 @@ Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484} +Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part_n3/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part_n3/ds=2008-04-08/hr=b1/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' @@ -877,20 +877,20 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart #### A masked pattern was here #### 1000 -PREHOOK: query: select count(*) from list_bucketing_dynamic_part +PREHOOK: query: select count(*) from list_bucketing_dynamic_part_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_dynamic_part +PREHOOK: Input: default@list_bucketing_dynamic_part_n3 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from list_bucketing_dynamic_part +POSTHOOK: query: select count(*) from list_bucketing_dynamic_part_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n3 #### A masked pattern was here #### 1000 PREHOOK: query: explain extended -select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484' +select * from list_bucketing_dynamic_part_n3 where key = '484' and value = 'val_484' PREHOOK: type: QUERY POSTHOOK: query: explain extended -select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484' +select * from list_bucketing_dynamic_part_n3 where key = '484' and value = 'val_484' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -914,13 +914,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n3 numFiles 1 numRows 16 partition_columns ds/hr partition_columns.types string:string rawDataSize 136 - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe totalSize 254 @@ -937,16 +937,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n3 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_dynamic_part - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n3 + name: default.list_bucketing_dynamic_part_n3 Partition input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat @@ -961,13 +961,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n3 numFiles 4 numRows 984 partition_columns ds/hr partition_columns.types string:string rawDataSize 9488 - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe totalSize 10622 @@ -984,19 +984,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n3 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_dynamic_part - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n3 + name: default.list_bucketing_dynamic_part_n3 Processor Tree: TableScan - alias: list_bucketing_dynamic_part + alias: list_bucketing_dynamic_part_n3 Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -1009,17 +1009,17 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484' +PREHOOK: query: select * from list_bucketing_dynamic_part_n3 where key = '484' and value = 'val_484' PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_dynamic_part -PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1 -PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1 +PREHOOK: Input: default@list_bucketing_dynamic_part_n3 +PREHOOK: Input: default@list_bucketing_dynamic_part_n3@ds=2008-04-08/hr=a1 +PREHOOK: Input: default@list_bucketing_dynamic_part_n3@ds=2008-04-08/hr=b1 #### A masked pattern was here #### -POSTHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484' +POSTHOOK: query: select * from list_bucketing_dynamic_part_n3 where key = '484' and value = 'val_484' POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1 -POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1 +POSTHOOK: Input: default@list_bucketing_dynamic_part_n3 +POSTHOOK: Input: default@list_bucketing_dynamic_part_n3@ds=2008-04-08/hr=a1 +POSTHOOK: Input: default@list_bucketing_dynamic_part_n3@ds=2008-04-08/hr=b1 #### A masked pattern was here #### 484 val_484 2008-04-08 b1 484 val_484 2008-04-08 b1 @@ -1037,11 +1037,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 484 val_484 2008-04-08 11 484 val_484 2008-04-08 12 -PREHOOK: query: drop table list_bucketing_dynamic_part +PREHOOK: query: drop table list_bucketing_dynamic_part_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: drop table list_bucketing_dynamic_part +PREHOOK: Input: default@list_bucketing_dynamic_part_n3 +PREHOOK: Output: default@list_bucketing_dynamic_part_n3 +POSTHOOK: query: drop table list_bucketing_dynamic_part_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: Output: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n3 +POSTHOOK: Output: default@list_bucketing_dynamic_part_n3 diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out index 6b407ad476..3aa3940132 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out @@ -1,25 +1,25 @@ -PREHOOK: query: create table list_bucketing_dynamic_part (key String, value String) +PREHOOK: query: create table list_bucketing_dynamic_part_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: create table list_bucketing_dynamic_part (key String, value String) +PREHOOK: Output: default@list_bucketing_dynamic_part_n2 +POSTHOOK: query: create table list_bucketing_dynamic_part_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@list_bucketing_dynamic_part +POSTHOOK: Output: default@list_bucketing_dynamic_part_n2 PREHOOK: query: explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +insert overwrite table list_bucketing_dynamic_part_n2 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +insert overwrite table list_bucketing_dynamic_part_n2 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -58,15 +58,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n2 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n2 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -247,15 +247,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n2 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n2 Stage: Stage-2 Stats Work @@ -264,42 +264,42 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.list_bucketing_dynamic_part + Table: default.list_bucketing_dynamic_part_n2 Is Table Level Stats: false -PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +PREHOOK: query: insert overwrite table list_bucketing_dynamic_part_n2 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08 -POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) +PREHOOK: Output: default@list_bucketing_dynamic_part_n2@ds=2008-04-08 +POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part_n2 partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1 -POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1 -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show partitions list_bucketing_dynamic_part +POSTHOOK: Output: default@list_bucketing_dynamic_part_n2@ds=2008-04-08/hr=a1 +POSTHOOK: Output: default@list_bucketing_dynamic_part_n2@ds=2008-04-08/hr=b1 +POSTHOOK: Lineage: list_bucketing_dynamic_part_n2 PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n2 PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n2 PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_dynamic_part_n2 PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions list_bucketing_dynamic_part_n2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: show partitions list_bucketing_dynamic_part +PREHOOK: Input: default@list_bucketing_dynamic_part_n2 +POSTHOOK: query: show partitions list_bucketing_dynamic_part_n2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n2 ds=2008-04-08/hr=a1 ds=2008-04-08/hr=b1 -PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1') +PREHOOK: query: desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', hr='a1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1') +PREHOOK: Input: default@list_bucketing_dynamic_part_n2 +POSTHOOK: query: desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', hr='a1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n2 # col_name data_type comment key string value string @@ -312,7 +312,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, a1] Database: default -Table: list_bucketing_dynamic_part +Table: list_bucketing_dynamic_part_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -335,12 +335,12 @@ Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') +PREHOOK: query: desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', hr='b1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') +PREHOOK: Input: default@list_bucketing_dynamic_part_n2 +POSTHOOK: query: desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', hr='b1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n2 # col_name data_type comment key string value string @@ -353,7 +353,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, b1] Database: default -Table: list_bucketing_dynamic_part +Table: list_bucketing_dynamic_part_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -375,23 +375,23 @@ Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484} +Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part_n2/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part_n2/ds=2008-04-08/hr=b1/key=484/value=val_484} Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') concatenate +PREHOOK: query: alter table list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', hr='b1') concatenate PREHOOK: type: ALTER_PARTITION_MERGE -PREHOOK: Input: default@list_bucketing_dynamic_part -PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1 -POSTHOOK: query: alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') concatenate +PREHOOK: Input: default@list_bucketing_dynamic_part_n2 +PREHOOK: Output: default@list_bucketing_dynamic_part_n2@ds=2008-04-08/hr=b1 +POSTHOOK: query: alter table list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', hr='b1') concatenate POSTHOOK: type: ALTER_PARTITION_MERGE -POSTHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1 -PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') +POSTHOOK: Input: default@list_bucketing_dynamic_part_n2 +POSTHOOK: Output: default@list_bucketing_dynamic_part_n2@ds=2008-04-08/hr=b1 +PREHOOK: query: desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', hr='b1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') +PREHOOK: Input: default@list_bucketing_dynamic_part_n2 +POSTHOOK: query: desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', hr='b1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n2 # col_name data_type comment key string value string @@ -404,7 +404,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, b1] Database: default -Table: list_bucketing_dynamic_part +Table: list_bucketing_dynamic_part_n2 #### A masked pattern was here #### Partition Parameters: numFiles 3 @@ -425,7 +425,7 @@ Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484} +Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part_n2/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part_n2/ds=2008-04-08/hr=b1/key=484/value=val_484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' @@ -437,24 +437,24 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart #### A masked pattern was here #### 1000 -PREHOOK: query: select count(*) from list_bucketing_dynamic_part +PREHOOK: query: select count(*) from list_bucketing_dynamic_part_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_dynamic_part -PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1 -PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1 +PREHOOK: Input: default@list_bucketing_dynamic_part_n2 +PREHOOK: Input: default@list_bucketing_dynamic_part_n2@ds=2008-04-08/hr=a1 +PREHOOK: Input: default@list_bucketing_dynamic_part_n2@ds=2008-04-08/hr=b1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from list_bucketing_dynamic_part +POSTHOOK: query: select count(*) from list_bucketing_dynamic_part_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1 -POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1 +POSTHOOK: Input: default@list_bucketing_dynamic_part_n2 +POSTHOOK: Input: default@list_bucketing_dynamic_part_n2@ds=2008-04-08/hr=a1 +POSTHOOK: Input: default@list_bucketing_dynamic_part_n2@ds=2008-04-08/hr=b1 #### A masked pattern was here #### 1000 PREHOOK: query: explain extended -select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484' +select * from list_bucketing_dynamic_part_n2 where key = '484' and value = 'val_484' PREHOOK: type: QUERY POSTHOOK: query: explain extended -select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484' +select * from list_bucketing_dynamic_part_n2 where key = '484' and value = 'val_484' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -478,13 +478,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n2 numFiles 2 numRows 16 partition_columns ds/hr partition_columns.types string:string rawDataSize 136 - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe totalSize 310 @@ -501,16 +501,16 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n2 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_dynamic_part - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n2 + name: default.list_bucketing_dynamic_part_n2 Partition input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat @@ -524,13 +524,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n2 numFiles 3 numRows 984 partition_columns ds/hr partition_columns.types string:string rawDataSize 9488 - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe totalSize 10586 @@ -547,19 +547,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_dynamic_part + name default.list_bucketing_dynamic_part_n2 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_dynamic_part { string key, string value} + serialization.ddl struct list_bucketing_dynamic_part_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_dynamic_part - name: default.list_bucketing_dynamic_part + name: default.list_bucketing_dynamic_part_n2 + name: default.list_bucketing_dynamic_part_n2 Processor Tree: TableScan - alias: list_bucketing_dynamic_part + alias: list_bucketing_dynamic_part_n2 Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -572,17 +572,17 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484' +PREHOOK: query: select * from list_bucketing_dynamic_part_n2 where key = '484' and value = 'val_484' PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_dynamic_part -PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1 -PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1 +PREHOOK: Input: default@list_bucketing_dynamic_part_n2 +PREHOOK: Input: default@list_bucketing_dynamic_part_n2@ds=2008-04-08/hr=a1 +PREHOOK: Input: default@list_bucketing_dynamic_part_n2@ds=2008-04-08/hr=b1 #### A masked pattern was here #### -POSTHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484' +POSTHOOK: query: select * from list_bucketing_dynamic_part_n2 where key = '484' and value = 'val_484' POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1 -POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1 +POSTHOOK: Input: default@list_bucketing_dynamic_part_n2 +POSTHOOK: Input: default@list_bucketing_dynamic_part_n2@ds=2008-04-08/hr=a1 +POSTHOOK: Input: default@list_bucketing_dynamic_part_n2@ds=2008-04-08/hr=b1 #### A masked pattern was here #### 484 val_484 2008-04-08 b1 484 val_484 2008-04-08 b1 @@ -600,11 +600,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 484 val_484 2008-04-08 11 484 val_484 2008-04-08 12 -PREHOOK: query: drop table list_bucketing_dynamic_part +PREHOOK: query: drop table list_bucketing_dynamic_part_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@list_bucketing_dynamic_part -PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: drop table list_bucketing_dynamic_part +PREHOOK: Input: default@list_bucketing_dynamic_part_n2 +PREHOOK: Output: default@list_bucketing_dynamic_part_n2 +POSTHOOK: query: drop table list_bucketing_dynamic_part_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: Output: default@list_bucketing_dynamic_part +POSTHOOK: Input: default@list_bucketing_dynamic_part_n2 +POSTHOOK: Output: default@list_bucketing_dynamic_part_n2 diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out index 7afece8807..78f1d8ee8f 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out @@ -1,25 +1,25 @@ -PREHOOK: query: create table list_bucketing_static_part (key String, value String) +PREHOOK: query: create table list_bucketing_static_part_n0 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','103') stored as DIRECTORIES STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: create table list_bucketing_static_part (key String, value String) +PREHOOK: Output: default@list_bucketing_static_part_n0 +POSTHOOK: query: create table list_bucketing_static_part_n0 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','103') stored as DIRECTORIES STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@list_bucketing_static_part +POSTHOOK: Output: default@list_bucketing_static_part_n0 PREHOOK: query: explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n0 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n0 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -58,15 +58,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n0 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n0 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -247,15 +247,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n0 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n0 Stage: Stage-2 Stats Work @@ -264,38 +264,38 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.list_bucketing_static_part + Table: default.list_bucketing_static_part_n0 Is Table Level Stats: false -PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +PREHOOK: query: insert overwrite table list_bucketing_static_part_n0 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 -POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +PREHOOK: Output: default@list_bucketing_static_part_n0@ds=2008-04-08/hr=11 +POSTHOOK: query: insert overwrite table list_bucketing_static_part_n0 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show partitions list_bucketing_static_part +POSTHOOK: Output: default@list_bucketing_static_part_n0@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: list_bucketing_static_part_n0 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_static_part_n0 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions list_bucketing_static_part_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: show partitions list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n0 +POSTHOOK: query: show partitions list_bucketing_static_part_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n0 ds=2008-04-08/hr=11 -PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +PREHOOK: query: desc formatted list_bucketing_static_part_n0 partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +PREHOOK: Input: default@list_bucketing_static_part_n0 +POSTHOOK: query: desc formatted list_bucketing_static_part_n0 partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n0 # col_name data_type comment key string value string @@ -308,7 +308,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: list_bucketing_static_part +Table: list_bucketing_static_part_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -330,15 +330,15 @@ Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[103], [484]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484} +Skewed Value to Truncated Path: {[103]=/list_bucketing_static_part_n0/ds=2008-04-08/hr=11/key=103, [484]=/list_bucketing_static_part_n0/ds=2008-04-08/hr=11/key=484} Storage Desc Params: serialization.format 1 PREHOOK: query: explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n0 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +insert overwrite table list_bucketing_static_part_n0 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -382,14 +382,14 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n0 partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n0 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -579,14 +579,14 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n0 partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n0 Stage: Stage-2 Stats Work @@ -595,7 +595,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.list_bucketing_static_part + Table: default.list_bucketing_static_part_n0 Is Table Level Stats: false Stage: Stage-3 @@ -618,9 +618,9 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n0 partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### @@ -636,15 +636,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n0 partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n0 + name: default.list_bucketing_static_part_n0 input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat Truncated Path -> Alias: #### A masked pattern was here #### @@ -669,9 +669,9 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n0 partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### @@ -687,15 +687,15 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n0 partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n0 + name: default.list_bucketing_static_part_n0 input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat Truncated Path -> Alias: #### A masked pattern was here #### @@ -706,35 +706,35 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +PREHOOK: query: insert overwrite table list_bucketing_static_part_n0 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 -POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') +PREHOOK: Output: default@list_bucketing_static_part_n0@ds=2008-04-08/hr=11 +POSTHOOK: query: insert overwrite table list_bucketing_static_part_n0 partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show partitions list_bucketing_static_part +POSTHOOK: Output: default@list_bucketing_static_part_n0@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: list_bucketing_static_part_n0 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: list_bucketing_static_part_n0 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions list_bucketing_static_part_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: show partitions list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n0 +POSTHOOK: query: show partitions list_bucketing_static_part_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n0 ds=2008-04-08/hr=11 -PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +PREHOOK: query: desc formatted list_bucketing_static_part_n0 partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +PREHOOK: Input: default@list_bucketing_static_part_n0 +POSTHOOK: query: desc formatted list_bucketing_static_part_n0 partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n0 # col_name data_type comment key string value string @@ -747,7 +747,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: list_bucketing_static_part +Table: list_bucketing_static_part_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -769,7 +769,7 @@ Stored As SubDirectories: Yes Skewed Columns: [key] Skewed Values: [[103], [484]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484} +Skewed Value to Truncated Path: {[103]=/list_bucketing_static_part_n0/ds=2008-04-08/hr=11/key=103, [484]=/list_bucketing_static_part_n0/ds=2008-04-08/hr=11/key=484} Storage Desc Params: serialization.format 1 PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' @@ -781,20 +781,20 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart #### A masked pattern was here #### 1000 -PREHOOK: query: select count(*) from list_bucketing_static_part +PREHOOK: query: select count(*) from list_bucketing_static_part_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from list_bucketing_static_part +POSTHOOK: query: select count(*) from list_bucketing_static_part_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n0 #### A masked pattern was here #### 1000 PREHOOK: query: explain extended -select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' +select * from list_bucketing_static_part_n0 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' PREHOOK: type: QUERY POSTHOOK: query: explain extended -select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' +select * from list_bucketing_static_part_n0 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -818,13 +818,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n0 numFiles 4 numRows 1000 partition_columns ds/hr partition_columns.types string:string rawDataSize 9624 - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe totalSize 10786 @@ -841,19 +841,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.list_bucketing_static_part + name default.list_bucketing_static_part_n0 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct list_bucketing_static_part { string key, string value} + serialization.ddl struct list_bucketing_static_part_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.list_bucketing_static_part - name: default.list_bucketing_static_part + name: default.list_bucketing_static_part_n0 + name: default.list_bucketing_static_part_n0 Processor Tree: TableScan - alias: list_bucketing_static_part + alias: list_bucketing_static_part_n0 Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -866,15 +866,15 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' +PREHOOK: query: select * from list_bucketing_static_part_n0 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' PREHOOK: type: QUERY -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@list_bucketing_static_part_n0 +PREHOOK: Input: default@list_bucketing_static_part_n0@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' +POSTHOOK: query: select * from list_bucketing_static_part_n0 where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' POSTHOOK: type: QUERY -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@list_bucketing_static_part_n0 +POSTHOOK: Input: default@list_bucketing_static_part_n0@ds=2008-04-08/hr=11 #### A masked pattern was here #### 484 val_484 2008-04-08 11 484 val_484 2008-04-08 11 @@ -892,11 +892,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 484 val_484 2008-04-08 11 484 val_484 2008-04-08 12 -PREHOOK: query: drop table list_bucketing_static_part +PREHOOK: query: drop table list_bucketing_static_part_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@list_bucketing_static_part -PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: drop table list_bucketing_static_part +PREHOOK: Input: default@list_bucketing_static_part_n0 +PREHOOK: Output: default@list_bucketing_static_part_n0 +POSTHOOK: query: drop table list_bucketing_static_part_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@list_bucketing_static_part -POSTHOOK: Output: default@list_bucketing_static_part +POSTHOOK: Input: default@list_bucketing_static_part_n0 +POSTHOOK: Output: default@list_bucketing_static_part_n0 diff --git a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out index 54e9009a9f..a7cf4e90c4 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out @@ -1,35 +1,35 @@ -PREHOOK: query: create table fact_daily (key String, value String) +PREHOOK: query: create table fact_daily_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@fact_daily -POSTHOOK: query: create table fact_daily (key String, value String) +PREHOOK: Output: default@fact_daily_n2 +POSTHOOK: query: create table fact_daily_n2 (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@fact_daily -PREHOOK: query: insert overwrite table fact_daily partition (ds = '1', hr = '4') +POSTHOOK: Output: default@fact_daily_n2 +PREHOOK: query: insert overwrite table fact_daily_n2 partition (ds = '1', hr = '4') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@fact_daily@ds=1/hr=4 -POSTHOOK: query: insert overwrite table fact_daily partition (ds = '1', hr = '4') +PREHOOK: Output: default@fact_daily_n2@ds=1/hr=4 +POSTHOOK: query: insert overwrite table fact_daily_n2 partition (ds = '1', hr = '4') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@fact_daily@ds=1/hr=4 -POSTHOOK: Lineage: fact_daily PARTITION(ds=1,hr=4).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: fact_daily PARTITION(ds=1,hr=4).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: describe formatted fact_daily PARTITION (ds = '1', hr='4') +POSTHOOK: Output: default@fact_daily_n2@ds=1/hr=4 +POSTHOOK: Lineage: fact_daily_n2 PARTITION(ds=1,hr=4).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: fact_daily_n2 PARTITION(ds=1,hr=4).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted fact_daily_n2 PARTITION (ds = '1', hr='4') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@fact_daily -POSTHOOK: query: describe formatted fact_daily PARTITION (ds = '1', hr='4') +PREHOOK: Input: default@fact_daily_n2 +POSTHOOK: query: describe formatted fact_daily_n2 PARTITION (ds = '1', hr='4') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@fact_daily +POSTHOOK: Input: default@fact_daily_n2 # col_name data_type comment key string value string @@ -42,7 +42,7 @@ hr string # Detailed Partition Information Partition Value: [1, 4] Database: default -Table: fact_daily +Table: fact_daily_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -64,21 +64,21 @@ Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[238, val_238], [484, val_484]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[238, val_238]=/fact_daily/ds=1/hr=4/key=238/value=val_238, [484, val_484]=/fact_daily/ds=1/hr=4/key=484/value=val_484} +Skewed Value to Truncated Path: {[238, val_238]=/fact_daily_n2/ds=1/hr=4/key=238/value=val_238, [484, val_484]=/fact_daily_n2/ds=1/hr=4/key=484/value=val_484} Storage Desc Params: serialization.format 1 -PREHOOK: query: SELECT count(1) FROM fact_daily WHERE ds='1' and hr='4' +PREHOOK: query: SELECT count(1) FROM fact_daily_n2 WHERE ds='1' and hr='4' PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily +PREHOOK: Input: default@fact_daily_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(1) FROM fact_daily WHERE ds='1' and hr='4' +POSTHOOK: query: SELECT count(1) FROM fact_daily_n2 WHERE ds='1' and hr='4' POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily +POSTHOOK: Input: default@fact_daily_n2 #### A masked pattern was here #### 500 -PREHOOK: query: explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484' +PREHOOK: query: explain extended SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and value= 'val_484' PREHOOK: type: QUERY -POSTHOOK: query: explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484' +POSTHOOK: query: explain extended SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and value= 'val_484' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -102,13 +102,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n2 numFiles 3 numRows 500 partition_columns ds/hr partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct fact_daily { string key, string value} + serialization.ddl struct fact_daily_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -125,19 +125,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n2 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct fact_daily { string key, string value} + serialization.ddl struct fact_daily_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.fact_daily - name: default.fact_daily + name: default.fact_daily_n2 + name: default.fact_daily_n2 Processor Tree: TableScan - alias: fact_daily + alias: fact_daily_n2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -150,20 +150,20 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484' +PREHOOK: query: SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and value= 'val_484' PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1/hr=4 +PREHOOK: Input: default@fact_daily_n2 +PREHOOK: Input: default@fact_daily_n2@ds=1/hr=4 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484' +POSTHOOK: query: SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and value= 'val_484' POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1/hr=4 +POSTHOOK: Input: default@fact_daily_n2 +POSTHOOK: Input: default@fact_daily_n2@ds=1/hr=4 #### A masked pattern was here #### 484 val_484 -PREHOOK: query: explain extended SELECT key FROM fact_daily WHERE ds='1' and hr='4' and key= '406' +PREHOOK: query: explain extended SELECT key FROM fact_daily_n2 WHERE ds='1' and hr='4' and key= '406' PREHOOK: type: QUERY -POSTHOOK: query: explain extended SELECT key FROM fact_daily WHERE ds='1' and hr='4' and key= '406' +POSTHOOK: query: explain extended SELECT key FROM fact_daily_n2 WHERE ds='1' and hr='4' and key= '406' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -187,13 +187,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n2 numFiles 3 numRows 500 partition_columns ds/hr partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct fact_daily { string key, string value} + serialization.ddl struct fact_daily_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -210,19 +210,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n2 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct fact_daily { string key, string value} + serialization.ddl struct fact_daily_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.fact_daily - name: default.fact_daily + name: default.fact_daily_n2 + name: default.fact_daily_n2 Processor Tree: TableScan - alias: fact_daily + alias: fact_daily_n2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -235,23 +235,23 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and key= '406' +PREHOOK: query: SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and key= '406' PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1/hr=4 +PREHOOK: Input: default@fact_daily_n2 +PREHOOK: Input: default@fact_daily_n2@ds=1/hr=4 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and key= '406' +POSTHOOK: query: SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and key= '406' POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1/hr=4 +POSTHOOK: Input: default@fact_daily_n2 +POSTHOOK: Input: default@fact_daily_n2@ds=1/hr=4 #### A masked pattern was here #### 406 val_406 406 val_406 406 val_406 406 val_406 -PREHOOK: query: explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) +PREHOOK: query: explain extended SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) PREHOOK: type: QUERY -POSTHOOK: query: explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) +POSTHOOK: query: explain extended SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -275,13 +275,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n2 numFiles 3 numRows 500 partition_columns ds/hr partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct fact_daily { string key, string value} + serialization.ddl struct fact_daily_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -298,19 +298,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n2 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct fact_daily { string key, string value} + serialization.ddl struct fact_daily_n2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.fact_daily - name: default.fact_daily + name: default.fact_daily_n2 + name: default.fact_daily_n2 Processor Tree: TableScan - alias: fact_daily + alias: fact_daily_n2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -323,24 +323,24 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) +PREHOOK: query: SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1/hr=4 +PREHOOK: Input: default@fact_daily_n2 +PREHOOK: Input: default@fact_daily_n2@ds=1/hr=4 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) +POSTHOOK: query: SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1/hr=4 +POSTHOOK: Input: default@fact_daily_n2 +POSTHOOK: Input: default@fact_daily_n2@ds=1/hr=4 #### A masked pattern was here #### 238 val_238 238 val_238 484 val_484 -PREHOOK: query: drop table fact_daily +PREHOOK: query: drop table fact_daily_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@fact_daily -PREHOOK: Output: default@fact_daily -POSTHOOK: query: drop table fact_daily +PREHOOK: Input: default@fact_daily_n2 +PREHOOK: Output: default@fact_daily_n2 +POSTHOOK: query: drop table fact_daily_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@fact_daily -POSTHOOK: Output: default@fact_daily +POSTHOOK: Input: default@fact_daily_n2 +POSTHOOK: Output: default@fact_daily_n2 diff --git a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out index cdf8339bd4..273f2d0b02 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out @@ -1,31 +1,31 @@ -PREHOOK: query: create table fact_daily (key String, value String) +PREHOOK: query: create table fact_daily_n3 (key String, value String) partitioned by (ds String, hr String) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@fact_daily -POSTHOOK: query: create table fact_daily (key String, value String) +PREHOOK: Output: default@fact_daily_n3 +POSTHOOK: query: create table fact_daily_n3 (key String, value String) partitioned by (ds String, hr String) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@fact_daily -PREHOOK: query: insert overwrite table fact_daily partition (ds = '1', hr = '1') +POSTHOOK: Output: default@fact_daily_n3 +PREHOOK: query: insert overwrite table fact_daily_n3 partition (ds = '1', hr = '1') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@fact_daily@ds=1/hr=1 -POSTHOOK: query: insert overwrite table fact_daily partition (ds = '1', hr = '1') +PREHOOK: Output: default@fact_daily_n3@ds=1/hr=1 +POSTHOOK: query: insert overwrite table fact_daily_n3 partition (ds = '1', hr = '1') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@fact_daily@ds=1/hr=1 -POSTHOOK: Lineage: fact_daily PARTITION(ds=1,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: fact_daily PARTITION(ds=1,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: describe formatted fact_daily PARTITION (ds = '1', hr='1') +POSTHOOK: Output: default@fact_daily_n3@ds=1/hr=1 +POSTHOOK: Lineage: fact_daily_n3 PARTITION(ds=1,hr=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: fact_daily_n3 PARTITION(ds=1,hr=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted fact_daily_n3 PARTITION (ds = '1', hr='1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@fact_daily -POSTHOOK: query: describe formatted fact_daily PARTITION (ds = '1', hr='1') +PREHOOK: Input: default@fact_daily_n3 +POSTHOOK: query: describe formatted fact_daily_n3 PARTITION (ds = '1', hr='1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@fact_daily +POSTHOOK: Input: default@fact_daily_n3 # col_name data_type comment key string value string @@ -38,7 +38,7 @@ hr string # Detailed Partition Information Partition Value: [1, 1] Database: default -Table: fact_daily +Table: fact_daily_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -58,32 +58,32 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table fact_daily skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES +PREHOOK: query: alter table fact_daily_n3 skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@fact_daily -PREHOOK: Output: default@fact_daily -POSTHOOK: query: alter table fact_daily skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES +PREHOOK: Input: default@fact_daily_n3 +PREHOOK: Output: default@fact_daily_n3 +POSTHOOK: query: alter table fact_daily_n3 skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@fact_daily -POSTHOOK: Output: default@fact_daily -PREHOOK: query: insert overwrite table fact_daily partition (ds = '1', hr = '2') +POSTHOOK: Input: default@fact_daily_n3 +POSTHOOK: Output: default@fact_daily_n3 +PREHOOK: query: insert overwrite table fact_daily_n3 partition (ds = '1', hr = '2') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@fact_daily@ds=1/hr=2 -POSTHOOK: query: insert overwrite table fact_daily partition (ds = '1', hr = '2') +PREHOOK: Output: default@fact_daily_n3@ds=1/hr=2 +POSTHOOK: query: insert overwrite table fact_daily_n3 partition (ds = '1', hr = '2') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@fact_daily@ds=1/hr=2 -POSTHOOK: Lineage: fact_daily PARTITION(ds=1,hr=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: fact_daily PARTITION(ds=1,hr=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: describe formatted fact_daily PARTITION (ds = '1', hr='2') +POSTHOOK: Output: default@fact_daily_n3@ds=1/hr=2 +POSTHOOK: Lineage: fact_daily_n3 PARTITION(ds=1,hr=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: fact_daily_n3 PARTITION(ds=1,hr=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted fact_daily_n3 PARTITION (ds = '1', hr='2') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@fact_daily -POSTHOOK: query: describe formatted fact_daily PARTITION (ds = '1', hr='2') +PREHOOK: Input: default@fact_daily_n3 +POSTHOOK: query: describe formatted fact_daily_n3 PARTITION (ds = '1', hr='2') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@fact_daily +POSTHOOK: Input: default@fact_daily_n3 # col_name data_type comment key string value string @@ -96,7 +96,7 @@ hr string # Detailed Partition Information Partition Value: [1, 2] Database: default -Table: fact_daily +Table: fact_daily_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -118,35 +118,35 @@ Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[238, val_238], [484, val_484]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[238, val_238]=/fact_daily/ds=1/hr=2/key=238/value=val_238, [484, val_484]=/fact_daily/ds=1/hr=2/key=484/value=val_484} +Skewed Value to Truncated Path: {[238, val_238]=/fact_daily_n3/ds=1/hr=2/key=238/value=val_238, [484, val_484]=/fact_daily_n3/ds=1/hr=2/key=484/value=val_484} Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table fact_daily skewed by (key, value) on (('327','val_327')) stored as DIRECTORIES +PREHOOK: query: alter table fact_daily_n3 skewed by (key, value) on (('327','val_327')) stored as DIRECTORIES PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@fact_daily -PREHOOK: Output: default@fact_daily -POSTHOOK: query: alter table fact_daily skewed by (key, value) on (('327','val_327')) stored as DIRECTORIES +PREHOOK: Input: default@fact_daily_n3 +PREHOOK: Output: default@fact_daily_n3 +POSTHOOK: query: alter table fact_daily_n3 skewed by (key, value) on (('327','val_327')) stored as DIRECTORIES POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@fact_daily -POSTHOOK: Output: default@fact_daily -PREHOOK: query: insert overwrite table fact_daily partition (ds = '1', hr = '3') +POSTHOOK: Input: default@fact_daily_n3 +POSTHOOK: Output: default@fact_daily_n3 +PREHOOK: query: insert overwrite table fact_daily_n3 partition (ds = '1', hr = '3') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@fact_daily@ds=1/hr=3 -POSTHOOK: query: insert overwrite table fact_daily partition (ds = '1', hr = '3') +PREHOOK: Output: default@fact_daily_n3@ds=1/hr=3 +POSTHOOK: query: insert overwrite table fact_daily_n3 partition (ds = '1', hr = '3') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@fact_daily@ds=1/hr=3 -POSTHOOK: Lineage: fact_daily PARTITION(ds=1,hr=3).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: fact_daily PARTITION(ds=1,hr=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: describe formatted fact_daily PARTITION (ds = '1', hr='3') +POSTHOOK: Output: default@fact_daily_n3@ds=1/hr=3 +POSTHOOK: Lineage: fact_daily_n3 PARTITION(ds=1,hr=3).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: fact_daily_n3 PARTITION(ds=1,hr=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted fact_daily_n3 PARTITION (ds = '1', hr='3') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@fact_daily -POSTHOOK: query: describe formatted fact_daily PARTITION (ds = '1', hr='3') +PREHOOK: Input: default@fact_daily_n3 +POSTHOOK: query: describe formatted fact_daily_n3 PARTITION (ds = '1', hr='3') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@fact_daily +POSTHOOK: Input: default@fact_daily_n3 # col_name data_type comment key string value string @@ -159,7 +159,7 @@ hr string # Detailed Partition Information Partition Value: [1, 3] Database: default -Table: fact_daily +Table: fact_daily_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -181,14 +181,14 @@ Stored As SubDirectories: Yes Skewed Columns: [key, value] Skewed Values: [[327, val_327]] #### A masked pattern was here #### -Skewed Value to Truncated Path: {[327, val_327]=/fact_daily/ds=1/hr=3/key=327/value=val_327} +Skewed Value to Truncated Path: {[327, val_327]=/fact_daily_n3/ds=1/hr=3/key=327/value=val_327} Storage Desc Params: serialization.format 1 PREHOOK: query: explain extended -select * from fact_daily where ds = '1' and hr='1' and key='145' +select * from fact_daily_n3 where ds = '1' and hr='1' and key='145' PREHOOK: type: QUERY POSTHOOK: query: explain extended -select * from fact_daily where ds = '1' and hr='1' and key='145' +select * from fact_daily_n3 where ds = '1' and hr='1' and key='145' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -212,13 +212,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n3 numFiles 1 numRows 500 partition_columns ds/hr partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct fact_daily { string key, string value} + serialization.ddl struct fact_daily_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -235,19 +235,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n3 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct fact_daily { string key, string value} + serialization.ddl struct fact_daily_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.fact_daily - name: default.fact_daily + name: default.fact_daily_n3 + name: default.fact_daily_n3 Processor Tree: TableScan - alias: fact_daily + alias: fact_daily_n3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -260,22 +260,22 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select * from fact_daily where ds = '1' and hr='1' and key='145' +PREHOOK: query: select * from fact_daily_n3 where ds = '1' and hr='1' and key='145' PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1/hr=1 +PREHOOK: Input: default@fact_daily_n3 +PREHOOK: Input: default@fact_daily_n3@ds=1/hr=1 #### A masked pattern was here #### -POSTHOOK: query: select * from fact_daily where ds = '1' and hr='1' and key='145' +POSTHOOK: query: select * from fact_daily_n3 where ds = '1' and hr='1' and key='145' POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1/hr=1 +POSTHOOK: Input: default@fact_daily_n3 +POSTHOOK: Input: default@fact_daily_n3@ds=1/hr=1 #### A masked pattern was here #### 145 val_145 1 1 PREHOOK: query: explain extended -select count(*) from fact_daily where ds = '1' and hr='1' +select count(*) from fact_daily_n3 where ds = '1' and hr='1' PREHOOK: type: QUERY POSTHOOK: query: explain extended -select count(*) from fact_daily where ds = '1' and hr='1' +select count(*) from fact_daily_n3 where ds = '1' and hr='1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -287,20 +287,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from fact_daily where ds = '1' and hr='1' +PREHOOK: query: select count(*) from fact_daily_n3 where ds = '1' and hr='1' PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily +PREHOOK: Input: default@fact_daily_n3 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from fact_daily where ds = '1' and hr='1' +POSTHOOK: query: select count(*) from fact_daily_n3 where ds = '1' and hr='1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily +POSTHOOK: Input: default@fact_daily_n3 #### A masked pattern was here #### 500 PREHOOK: query: explain extended -SELECT * FROM fact_daily WHERE ds='1' and hr='2' and (key='484' and value='val_484') +SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='2' and (key='484' and value='val_484') PREHOOK: type: QUERY POSTHOOK: query: explain extended -SELECT * FROM fact_daily WHERE ds='1' and hr='2' and (key='484' and value='val_484') +SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='2' and (key='484' and value='val_484') POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -324,13 +324,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n3 numFiles 3 numRows 500 partition_columns ds/hr partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct fact_daily { string key, string value} + serialization.ddl struct fact_daily_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -347,19 +347,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n3 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct fact_daily { string key, string value} + serialization.ddl struct fact_daily_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.fact_daily - name: default.fact_daily + name: default.fact_daily_n3 + name: default.fact_daily_n3 Processor Tree: TableScan - alias: fact_daily + alias: fact_daily_n3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -372,22 +372,22 @@ STAGE PLANS: Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: SELECT * FROM fact_daily WHERE ds='1' and hr='2' and (key='484' and value='val_484') +PREHOOK: query: SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='2' and (key='484' and value='val_484') PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1/hr=2 +PREHOOK: Input: default@fact_daily_n3 +PREHOOK: Input: default@fact_daily_n3@ds=1/hr=2 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM fact_daily WHERE ds='1' and hr='2' and (key='484' and value='val_484') +POSTHOOK: query: SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='2' and (key='484' and value='val_484') POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1/hr=2 +POSTHOOK: Input: default@fact_daily_n3 +POSTHOOK: Input: default@fact_daily_n3@ds=1/hr=2 #### A masked pattern was here #### 484 val_484 1 2 PREHOOK: query: explain extended -SELECT * FROM fact_daily WHERE ds='1' and hr='3' and (key='327' and value='val_327') +SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='3' and (key='327' and value='val_327') PREHOOK: type: QUERY POSTHOOK: query: explain extended -SELECT * FROM fact_daily WHERE ds='1' and hr='3' and (key='327' and value='val_327') +SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='3' and (key='327' and value='val_327') POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -411,13 +411,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n3 numFiles 2 numRows 500 partition_columns ds/hr partition_columns.types string:string rawDataSize 5312 - serialization.ddl struct fact_daily { string key, string value} + serialization.ddl struct fact_daily_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -434,19 +434,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n3 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct fact_daily { string key, string value} + serialization.ddl struct fact_daily_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.fact_daily - name: default.fact_daily + name: default.fact_daily_n3 + name: default.fact_daily_n3 Processor Tree: TableScan - alias: fact_daily + alias: fact_daily_n3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -459,15 +459,15 @@ STAGE PLANS: Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: SELECT * FROM fact_daily WHERE ds='1' and hr='3' and (key='327' and value='val_327') +PREHOOK: query: SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='3' and (key='327' and value='val_327') PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1/hr=3 +PREHOOK: Input: default@fact_daily_n3 +PREHOOK: Input: default@fact_daily_n3@ds=1/hr=3 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM fact_daily WHERE ds='1' and hr='3' and (key='327' and value='val_327') +POSTHOOK: query: SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='3' and (key='327' and value='val_327') POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1/hr=3 +POSTHOOK: Input: default@fact_daily_n3 +POSTHOOK: Input: default@fact_daily_n3@ds=1/hr=3 #### A masked pattern was here #### 327 val_327 1 3 327 val_327 1 3 diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out index 1868691575..e2db72b5b3 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out @@ -1,89 +1,89 @@ -PREHOOK: query: CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE fact_daily_n4(x int) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@fact_daily -POSTHOOK: query: CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@fact_daily_n4 +POSTHOOK: query: CREATE TABLE fact_daily_n4(x int) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@fact_daily -PREHOOK: query: CREATE TABLE fact_tz(x int) PARTITIONED BY (ds STRING, hr STRING) +POSTHOOK: Output: default@fact_daily_n4 +PREHOOK: query: CREATE TABLE fact_tz_n1(x int) PARTITIONED BY (ds STRING, hr STRING) #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@fact_tz -POSTHOOK: query: CREATE TABLE fact_tz(x int) PARTITIONED BY (ds STRING, hr STRING) +PREHOOK: Output: default@fact_tz_n1 +POSTHOOK: query: CREATE TABLE fact_tz_n1(x int) PARTITIONED BY (ds STRING, hr STRING) #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@fact_tz -PREHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +POSTHOOK: Output: default@fact_tz_n1 +PREHOOK: query: INSERT OVERWRITE TABLE fact_tz_n1 PARTITION (ds='1', hr='1') SELECT key FROM src WHERE key=484 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@fact_tz@ds=1/hr=1 -POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +PREHOOK: Output: default@fact_tz_n1@ds=1/hr=1 +POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz_n1 PARTITION (ds='1', hr='1') SELECT key FROM src WHERE key=484 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@fact_tz@ds=1/hr=1 -POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=1).x EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') +POSTHOOK: Output: default@fact_tz_n1@ds=1/hr=1 +POSTHOOK: Lineage: fact_tz_n1 PARTITION(ds=1,hr=1).x EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: INSERT OVERWRITE TABLE fact_tz_n1 PARTITION (ds='1', hr='2') SELECT key+11 FROM src WHERE key=484 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@fact_tz@ds=1/hr=2 -POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') +PREHOOK: Output: default@fact_tz_n1@ds=1/hr=2 +POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz_n1 PARTITION (ds='1', hr='2') SELECT key+11 FROM src WHERE key=484 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@fact_tz@ds=1/hr=2 -POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=2).x EXPRESSION [] +POSTHOOK: Output: default@fact_tz_n1@ds=1/hr=2 +POSTHOOK: Lineage: fact_tz_n1 PARTITION(ds=1,hr=2).x EXPRESSION [] #### A masked pattern was here #### -PREHOOK: query: alter table fact_daily skewed by (x) on (484) +PREHOOK: query: alter table fact_daily_n4 skewed by (x) on (484) PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@fact_daily -PREHOOK: Output: default@fact_daily -POSTHOOK: query: alter table fact_daily skewed by (x) on (484) +PREHOOK: Input: default@fact_daily_n4 +PREHOOK: Output: default@fact_daily_n4 +POSTHOOK: query: alter table fact_daily_n4 skewed by (x) on (484) POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@fact_daily -POSTHOOK: Output: default@fact_daily -PREHOOK: query: ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE') +POSTHOOK: Input: default@fact_daily_n4 +POSTHOOK: Output: default@fact_daily_n4 +PREHOOK: query: ALTER TABLE fact_daily_n4 SET TBLPROPERTIES('EXTERNAL'='TRUE') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@fact_daily -PREHOOK: Output: default@fact_daily -POSTHOOK: query: ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE') +PREHOOK: Input: default@fact_daily_n4 +PREHOOK: Output: default@fact_daily_n4 +POSTHOOK: query: ALTER TABLE fact_daily_n4 SET TBLPROPERTIES('EXTERNAL'='TRUE') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@fact_daily -POSTHOOK: Output: default@fact_daily -PREHOOK: query: ALTER TABLE fact_daily ADD PARTITION (ds='1') +POSTHOOK: Input: default@fact_daily_n4 +POSTHOOK: Output: default@fact_daily_n4 +PREHOOK: query: ALTER TABLE fact_daily_n4 ADD PARTITION (ds='1') #### A masked pattern was here #### PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -PREHOOK: Output: default@fact_daily -POSTHOOK: query: ALTER TABLE fact_daily ADD PARTITION (ds='1') +PREHOOK: Output: default@fact_daily_n4 +POSTHOOK: query: ALTER TABLE fact_daily_n4 ADD PARTITION (ds='1') #### A masked pattern was here #### POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -POSTHOOK: Output: default@fact_daily -POSTHOOK: Output: default@fact_daily@ds=1 +POSTHOOK: Output: default@fact_daily_n4 +POSTHOOK: Output: default@fact_daily_n4@ds=1 #### A masked pattern was here #### PREHOOK: type: ALTERTBLPART_SKEWED_LOCATION -PREHOOK: Input: default@fact_daily -PREHOOK: Output: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n4 +PREHOOK: Output: default@fact_daily_n4@ds=1 #### A masked pattern was here #### POSTHOOK: type: ALTERTBLPART_SKEWED_LOCATION -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 -POSTHOOK: Output: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n4 +POSTHOOK: Input: default@fact_daily_n4@ds=1 +POSTHOOK: Output: default@fact_daily_n4@ds=1 #### A masked pattern was here #### -PREHOOK: query: describe formatted fact_daily PARTITION (ds = '1') +PREHOOK: query: describe formatted fact_daily_n4 PARTITION (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@fact_daily -POSTHOOK: query: describe formatted fact_daily PARTITION (ds = '1') +PREHOOK: Input: default@fact_daily_n4 +POSTHOOK: query: describe formatted fact_daily_n4 PARTITION (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@fact_daily +POSTHOOK: Input: default@fact_daily_n4 # col_name data_type comment x int @@ -94,7 +94,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: fact_daily +Table: fact_daily_n4 #### A masked pattern was here #### Partition Parameters: #### A masked pattern was here #### @@ -115,21 +115,21 @@ Skewed Values: [[484]] #### A masked pattern was here #### Storage Desc Params: serialization.format 1 -PREHOOK: query: SELECT * FROM fact_daily WHERE ds='1' +PREHOOK: query: SELECT * FROM fact_daily_n4 WHERE ds='1' PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n4 +PREHOOK: Input: default@fact_daily_n4@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM fact_daily WHERE ds='1' +POSTHOOK: query: SELECT * FROM fact_daily_n4 WHERE ds='1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n4 +POSTHOOK: Input: default@fact_daily_n4@ds=1 #### A masked pattern was here #### 484 1 495 1 -PREHOOK: query: explain extended SELECT x FROM fact_daily WHERE ds='1' and x=484 +PREHOOK: query: explain extended SELECT x FROM fact_daily_n4 WHERE ds='1' and x=484 PREHOOK: type: QUERY -POSTHOOK: query: explain extended SELECT x FROM fact_daily WHERE ds='1' and x=484 +POSTHOOK: query: explain extended SELECT x FROM fact_daily_n4 WHERE ds='1' and x=484 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -151,11 +151,11 @@ STAGE PLANS: columns.comments columns.types int #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n4 numFiles 2 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x} + serialization.ddl struct fact_daily_n4 { i32 x} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 8 @@ -173,19 +173,19 @@ STAGE PLANS: columns.comments columns.types int #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n4 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x} + serialization.ddl struct fact_daily_n4 { i32 x} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.fact_daily - name: default.fact_daily + name: default.fact_daily_n4 + name: default.fact_daily_n4 Processor Tree: TableScan - alias: fact_daily + alias: fact_daily_n4 Statistics: Num rows: 1 Data size: 80 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator @@ -198,20 +198,20 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 80 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: SELECT x FROM fact_daily WHERE ds='1' and x=484 +PREHOOK: query: SELECT x FROM fact_daily_n4 WHERE ds='1' and x=484 PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n4 +PREHOOK: Input: default@fact_daily_n4@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT x FROM fact_daily WHERE ds='1' and x=484 +POSTHOOK: query: SELECT x FROM fact_daily_n4 WHERE ds='1' and x=484 POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n4 +POSTHOOK: Input: default@fact_daily_n4@ds=1 #### A masked pattern was here #### 484 -PREHOOK: query: explain extended SELECT x FROM fact_daily WHERE ds='1' and x=495 +PREHOOK: query: explain extended SELECT x FROM fact_daily_n4 WHERE ds='1' and x=495 PREHOOK: type: QUERY -POSTHOOK: query: explain extended SELECT x FROM fact_daily WHERE ds='1' and x=495 +POSTHOOK: query: explain extended SELECT x FROM fact_daily_n4 WHERE ds='1' and x=495 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -233,11 +233,11 @@ STAGE PLANS: columns.comments columns.types int #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n4 numFiles 2 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x} + serialization.ddl struct fact_daily_n4 { i32 x} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 8 @@ -255,19 +255,19 @@ STAGE PLANS: columns.comments columns.types int #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n4 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x} + serialization.ddl struct fact_daily_n4 { i32 x} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.fact_daily - name: default.fact_daily + name: default.fact_daily_n4 + name: default.fact_daily_n4 Processor Tree: TableScan - alias: fact_daily + alias: fact_daily_n4 Statistics: Num rows: 1 Data size: 80 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator @@ -280,20 +280,20 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 80 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: SELECT x FROM fact_daily WHERE ds='1' and x=495 +PREHOOK: query: SELECT x FROM fact_daily_n4 WHERE ds='1' and x=495 PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n4 +PREHOOK: Input: default@fact_daily_n4@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT x FROM fact_daily WHERE ds='1' and x=495 +POSTHOOK: query: SELECT x FROM fact_daily_n4 WHERE ds='1' and x=495 POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n4 +POSTHOOK: Input: default@fact_daily_n4@ds=1 #### A masked pattern was here #### 495 -PREHOOK: query: explain extended SELECT x FROM fact_daily WHERE ds='1' and x=1 +PREHOOK: query: explain extended SELECT x FROM fact_daily_n4 WHERE ds='1' and x=1 PREHOOK: type: QUERY -POSTHOOK: query: explain extended SELECT x FROM fact_daily WHERE ds='1' and x=1 +POSTHOOK: query: explain extended SELECT x FROM fact_daily_n4 WHERE ds='1' and x=1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -315,11 +315,11 @@ STAGE PLANS: columns.comments columns.types int #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n4 numFiles 2 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x} + serialization.ddl struct fact_daily_n4 { i32 x} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 8 @@ -337,19 +337,19 @@ STAGE PLANS: columns.comments columns.types int #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n4 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x} + serialization.ddl struct fact_daily_n4 { i32 x} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.fact_daily - name: default.fact_daily + name: default.fact_daily_n4 + name: default.fact_daily_n4 Processor Tree: TableScan - alias: fact_daily + alias: fact_daily_n4 Statistics: Num rows: 1 Data size: 80 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator @@ -362,13 +362,13 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 80 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: SELECT x FROM fact_daily WHERE ds='1' and x=1 +PREHOOK: query: SELECT x FROM fact_daily_n4 WHERE ds='1' and x=1 PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n4 +PREHOOK: Input: default@fact_daily_n4@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT x FROM fact_daily WHERE ds='1' and x=1 +POSTHOOK: query: SELECT x FROM fact_daily_n4 WHERE ds='1' and x=1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n4 +POSTHOOK: Input: default@fact_daily_n4@ds=1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out index d190c2d021..2537d94aa5 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out @@ -1,91 +1,91 @@ -PREHOOK: query: CREATE TABLE fact_daily(x int, y STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE fact_daily_n5(x int, y STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@fact_daily -POSTHOOK: query: CREATE TABLE fact_daily(x int, y STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@fact_daily_n5 +POSTHOOK: query: CREATE TABLE fact_daily_n5(x int, y STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@fact_daily -PREHOOK: query: CREATE TABLE fact_tz(x int, y STRING) PARTITIONED BY (ds STRING, hr STRING) +POSTHOOK: Output: default@fact_daily_n5 +PREHOOK: query: CREATE TABLE fact_tz_n2(x int, y STRING) PARTITIONED BY (ds STRING, hr STRING) #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@fact_tz -POSTHOOK: query: CREATE TABLE fact_tz(x int, y STRING) PARTITIONED BY (ds STRING, hr STRING) +PREHOOK: Output: default@fact_tz_n2 +POSTHOOK: query: CREATE TABLE fact_tz_n2(x int, y STRING) PARTITIONED BY (ds STRING, hr STRING) #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@fact_tz -PREHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +POSTHOOK: Output: default@fact_tz_n2 +PREHOOK: query: INSERT OVERWRITE TABLE fact_tz_n2 PARTITION (ds='1', hr='1') SELECT key, value FROM src WHERE key=484 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@fact_tz@ds=1/hr=1 -POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +PREHOOK: Output: default@fact_tz_n2@ds=1/hr=1 +POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz_n2 PARTITION (ds='1', hr='1') SELECT key, value FROM src WHERE key=484 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@fact_tz@ds=1/hr=1 -POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=1).x EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=1).y SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') +POSTHOOK: Output: default@fact_tz_n2@ds=1/hr=1 +POSTHOOK: Lineage: fact_tz_n2 PARTITION(ds=1,hr=1).x EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: fact_tz_n2 PARTITION(ds=1,hr=1).y SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: INSERT OVERWRITE TABLE fact_tz_n2 PARTITION (ds='1', hr='2') SELECT key+11, value FROM src WHERE key=484 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@fact_tz@ds=1/hr=2 -POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') +PREHOOK: Output: default@fact_tz_n2@ds=1/hr=2 +POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz_n2 PARTITION (ds='1', hr='2') SELECT key+11, value FROM src WHERE key=484 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@fact_tz@ds=1/hr=2 -POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=2).x EXPRESSION [] -POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=2).y SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@fact_tz_n2@ds=1/hr=2 +POSTHOOK: Lineage: fact_tz_n2 PARTITION(ds=1,hr=2).x EXPRESSION [] +POSTHOOK: Lineage: fact_tz_n2 PARTITION(ds=1,hr=2).y SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] #### A masked pattern was here #### -PREHOOK: query: alter table fact_daily skewed by (x) on (484) +PREHOOK: query: alter table fact_daily_n5 skewed by (x) on (484) PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@fact_daily -PREHOOK: Output: default@fact_daily -POSTHOOK: query: alter table fact_daily skewed by (x) on (484) +PREHOOK: Input: default@fact_daily_n5 +PREHOOK: Output: default@fact_daily_n5 +POSTHOOK: query: alter table fact_daily_n5 skewed by (x) on (484) POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@fact_daily -POSTHOOK: Output: default@fact_daily -PREHOOK: query: ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE') +POSTHOOK: Input: default@fact_daily_n5 +POSTHOOK: Output: default@fact_daily_n5 +PREHOOK: query: ALTER TABLE fact_daily_n5 SET TBLPROPERTIES('EXTERNAL'='TRUE') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@fact_daily -PREHOOK: Output: default@fact_daily -POSTHOOK: query: ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE') +PREHOOK: Input: default@fact_daily_n5 +PREHOOK: Output: default@fact_daily_n5 +POSTHOOK: query: ALTER TABLE fact_daily_n5 SET TBLPROPERTIES('EXTERNAL'='TRUE') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@fact_daily -POSTHOOK: Output: default@fact_daily -PREHOOK: query: ALTER TABLE fact_daily ADD PARTITION (ds='1') +POSTHOOK: Input: default@fact_daily_n5 +POSTHOOK: Output: default@fact_daily_n5 +PREHOOK: query: ALTER TABLE fact_daily_n5 ADD PARTITION (ds='1') #### A masked pattern was here #### PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -PREHOOK: Output: default@fact_daily -POSTHOOK: query: ALTER TABLE fact_daily ADD PARTITION (ds='1') +PREHOOK: Output: default@fact_daily_n5 +POSTHOOK: query: ALTER TABLE fact_daily_n5 ADD PARTITION (ds='1') #### A masked pattern was here #### POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -POSTHOOK: Output: default@fact_daily -POSTHOOK: Output: default@fact_daily@ds=1 +POSTHOOK: Output: default@fact_daily_n5 +POSTHOOK: Output: default@fact_daily_n5@ds=1 #### A masked pattern was here #### PREHOOK: type: ALTERTBLPART_SKEWED_LOCATION -PREHOOK: Input: default@fact_daily -PREHOOK: Output: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n5 +PREHOOK: Output: default@fact_daily_n5@ds=1 #### A masked pattern was here #### POSTHOOK: type: ALTERTBLPART_SKEWED_LOCATION -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 -POSTHOOK: Output: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n5 +POSTHOOK: Input: default@fact_daily_n5@ds=1 +POSTHOOK: Output: default@fact_daily_n5@ds=1 #### A masked pattern was here #### -PREHOOK: query: describe formatted fact_daily PARTITION (ds = '1') +PREHOOK: query: describe formatted fact_daily_n5 PARTITION (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@fact_daily -POSTHOOK: query: describe formatted fact_daily PARTITION (ds = '1') +PREHOOK: Input: default@fact_daily_n5 +POSTHOOK: query: describe formatted fact_daily_n5 PARTITION (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@fact_daily +POSTHOOK: Input: default@fact_daily_n5 # col_name data_type comment x int y string @@ -97,7 +97,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: fact_daily +Table: fact_daily_n5 #### A masked pattern was here #### Partition Parameters: #### A masked pattern was here #### @@ -118,21 +118,21 @@ Skewed Values: [[484]] #### A masked pattern was here #### Storage Desc Params: serialization.format 1 -PREHOOK: query: SELECT * FROM fact_daily WHERE ds='1' +PREHOOK: query: SELECT * FROM fact_daily_n5 WHERE ds='1' PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n5 +PREHOOK: Input: default@fact_daily_n5@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM fact_daily WHERE ds='1' +POSTHOOK: query: SELECT * FROM fact_daily_n5 WHERE ds='1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n5 +POSTHOOK: Input: default@fact_daily_n5@ds=1 #### A masked pattern was here #### 484 val_484 1 495 val_484 1 -PREHOOK: query: explain extended select x from (select x from fact_daily where ds = '1') subq where x = 484 +PREHOOK: query: explain extended select x from (select x from fact_daily_n5 where ds = '1') subq where x = 484 PREHOOK: type: QUERY -POSTHOOK: query: explain extended select x from (select x from fact_daily where ds = '1') subq where x = 484 +POSTHOOK: query: explain extended select x from (select x from fact_daily_n5 where ds = '1') subq where x = 484 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -154,11 +154,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n5 numFiles 2 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x, string y} + serialization.ddl struct fact_daily_n5 { i32 x, string y} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 24 @@ -176,19 +176,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n5 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x, string y} + serialization.ddl struct fact_daily_n5 { i32 x, string y} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.fact_daily - name: default.fact_daily + name: default.fact_daily_n5 + name: default.fact_daily_n5 Processor Tree: TableScan - alias: fact_daily + alias: fact_daily_n5 Statistics: Num rows: 1 Data size: 240 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator @@ -201,20 +201,20 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 240 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: select x from (select * from fact_daily where ds = '1') subq where x = 484 +PREHOOK: query: select x from (select * from fact_daily_n5 where ds = '1') subq where x = 484 PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n5 +PREHOOK: Input: default@fact_daily_n5@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select x from (select * from fact_daily where ds = '1') subq where x = 484 +POSTHOOK: query: select x from (select * from fact_daily_n5 where ds = '1') subq where x = 484 POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n5 +POSTHOOK: Input: default@fact_daily_n5@ds=1 #### A masked pattern was here #### 484 -PREHOOK: query: explain extended select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484 +PREHOOK: query: explain extended select x1, y1 from(select x as x1, y as y1 from fact_daily_n5 where ds ='1') subq where x1 = 484 PREHOOK: type: QUERY -POSTHOOK: query: explain extended select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484 +POSTHOOK: query: explain extended select x1, y1 from(select x as x1, y as y1 from fact_daily_n5 where ds ='1') subq where x1 = 484 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -236,11 +236,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n5 numFiles 2 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x, string y} + serialization.ddl struct fact_daily_n5 { i32 x, string y} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 24 @@ -258,19 +258,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n5 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x, string y} + serialization.ddl struct fact_daily_n5 { i32 x, string y} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.fact_daily - name: default.fact_daily + name: default.fact_daily_n5 + name: default.fact_daily_n5 Processor Tree: TableScan - alias: fact_daily + alias: fact_daily_n5 Statistics: Num rows: 1 Data size: 240 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator @@ -283,20 +283,20 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 240 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484 +PREHOOK: query: select x1, y1 from(select x as x1, y as y1 from fact_daily_n5 where ds ='1') subq where x1 = 484 PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n5 +PREHOOK: Input: default@fact_daily_n5@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484 +POSTHOOK: query: select x1, y1 from(select x as x1, y as y1 from fact_daily_n5 where ds ='1') subq where x1 = 484 POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n5 +POSTHOOK: Input: default@fact_daily_n5@ds=1 #### A masked pattern was here #### 484 val_484 -PREHOOK: query: explain extended select y, count(1) from fact_daily where ds ='1' and x = 484 group by y +PREHOOK: query: explain extended select y, count(1) from fact_daily_n5 where ds ='1' and x = 484 group by y PREHOOK: type: QUERY -POSTHOOK: query: explain extended select y, count(1) from fact_daily where ds ='1' and x = 484 group by y +POSTHOOK: query: explain extended select y, count(1) from fact_daily_n5 where ds ='1' and x = 484 group by y POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -307,7 +307,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: fact_daily + alias: fact_daily_n5 Statistics: Num rows: 1 Data size: 240 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator @@ -351,11 +351,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n5 numFiles 2 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x, string y} + serialization.ddl struct fact_daily_n5 { i32 x, string y} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 24 @@ -373,18 +373,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n5 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x, string y} + serialization.ddl struct fact_daily_n5 { i32 x, string y} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.fact_daily - name: default.fact_daily + name: default.fact_daily_n5 + name: default.fact_daily_n5 Truncated Path -> Alias: - /fact_tz/ds=1/x=484 [fact_daily] + /fact_tz/ds=1/x=484 [fact_daily_n5] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -422,20 +422,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select y, count(1) from fact_daily where ds ='1' and x = 484 group by y +PREHOOK: query: select y, count(1) from fact_daily_n5 where ds ='1' and x = 484 group by y PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n5 +PREHOOK: Input: default@fact_daily_n5@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select y, count(1) from fact_daily where ds ='1' and x = 484 group by y +POSTHOOK: query: select y, count(1) from fact_daily_n5 where ds ='1' and x = 484 group by y POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n5 +POSTHOOK: Input: default@fact_daily_n5@ds=1 #### A masked pattern was here #### val_484 1 -PREHOOK: query: explain extended select x, c from (select x, count(1) as c from fact_daily where ds = '1' group by x) subq where x = 484 +PREHOOK: query: explain extended select x, c from (select x, count(1) as c from fact_daily_n5 where ds = '1' group by x) subq where x = 484 PREHOOK: type: QUERY -POSTHOOK: query: explain extended select x, c from (select x, count(1) as c from fact_daily where ds = '1' group by x) subq where x = 484 +POSTHOOK: query: explain extended select x, c from (select x, count(1) as c from fact_daily_n5 where ds = '1' group by x) subq where x = 484 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -446,7 +446,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: fact_daily + alias: fact_daily_n5 Statistics: Num rows: 1 Data size: 240 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator @@ -488,11 +488,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n5 numFiles 2 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x, string y} + serialization.ddl struct fact_daily_n5 { i32 x, string y} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 24 @@ -510,18 +510,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n5 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x, string y} + serialization.ddl struct fact_daily_n5 { i32 x, string y} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.fact_daily - name: default.fact_daily + name: default.fact_daily_n5 + name: default.fact_daily_n5 Truncated Path -> Alias: - /fact_tz/ds=1/x=484 [$hdt$_0:fact_daily] + /fact_tz/ds=1/x=484 [$hdt$_0:fact_daily_n5] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -563,14 +563,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select x, c from (select x, count(1) as c from fact_daily where ds = '1' group by x) subq where x = 484 +PREHOOK: query: select x, c from (select x, count(1) as c from fact_daily_n5 where ds = '1' group by x) subq where x = 484 PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n5 +PREHOOK: Input: default@fact_daily_n5@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select x, c from (select x, count(1) as c from fact_daily where ds = '1' group by x) subq where x = 484 +POSTHOOK: query: select x, c from (select x, count(1) as c from fact_daily_n5 where ds = '1' group by x) subq where x = 484 POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n5 +POSTHOOK: Input: default@fact_daily_n5@ds=1 #### A masked pattern was here #### 484 1 diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out index 7b0e8d4a85..59aeda884c 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE fact_daily(x int, y STRING, z STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE fact_daily_n0(x int, y STRING, z STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@fact_daily -POSTHOOK: query: CREATE TABLE fact_daily(x int, y STRING, z STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@fact_daily_n0 +POSTHOOK: query: CREATE TABLE fact_daily_n0(x int, y STRING, z STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@fact_daily +POSTHOOK: Output: default@fact_daily_n0 PREHOOK: query: CREATE TABLE fact_tz(x int, y STRING, z STRING) PARTITIONED BY (ds STRING, hr STRING) #### A masked pattern was here #### PREHOOK: type: CREATETABLE @@ -58,49 +58,49 @@ POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=3).x EXPRESSION [(src)src.FieldSche POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=3).y SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=3).z SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] #### A masked pattern was here #### -PREHOOK: query: alter table fact_daily skewed by (x) on (484,238) +PREHOOK: query: alter table fact_daily_n0 skewed by (x) on (484,238) PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@fact_daily -PREHOOK: Output: default@fact_daily -POSTHOOK: query: alter table fact_daily skewed by (x) on (484,238) +PREHOOK: Input: default@fact_daily_n0 +PREHOOK: Output: default@fact_daily_n0 +POSTHOOK: query: alter table fact_daily_n0 skewed by (x) on (484,238) POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@fact_daily -POSTHOOK: Output: default@fact_daily -PREHOOK: query: ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE') +POSTHOOK: Input: default@fact_daily_n0 +POSTHOOK: Output: default@fact_daily_n0 +PREHOOK: query: ALTER TABLE fact_daily_n0 SET TBLPROPERTIES('EXTERNAL'='TRUE') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@fact_daily -PREHOOK: Output: default@fact_daily -POSTHOOK: query: ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE') +PREHOOK: Input: default@fact_daily_n0 +PREHOOK: Output: default@fact_daily_n0 +POSTHOOK: query: ALTER TABLE fact_daily_n0 SET TBLPROPERTIES('EXTERNAL'='TRUE') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@fact_daily -POSTHOOK: Output: default@fact_daily -PREHOOK: query: ALTER TABLE fact_daily ADD PARTITION (ds='1') +POSTHOOK: Input: default@fact_daily_n0 +POSTHOOK: Output: default@fact_daily_n0 +PREHOOK: query: ALTER TABLE fact_daily_n0 ADD PARTITION (ds='1') #### A masked pattern was here #### PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -PREHOOK: Output: default@fact_daily -POSTHOOK: query: ALTER TABLE fact_daily ADD PARTITION (ds='1') +PREHOOK: Output: default@fact_daily_n0 +POSTHOOK: query: ALTER TABLE fact_daily_n0 ADD PARTITION (ds='1') #### A masked pattern was here #### POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -POSTHOOK: Output: default@fact_daily -POSTHOOK: Output: default@fact_daily@ds=1 +POSTHOOK: Output: default@fact_daily_n0 +POSTHOOK: Output: default@fact_daily_n0@ds=1 #### A masked pattern was here #### PREHOOK: type: ALTERTBLPART_SKEWED_LOCATION -PREHOOK: Input: default@fact_daily -PREHOOK: Output: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n0 +PREHOOK: Output: default@fact_daily_n0@ds=1 #### A masked pattern was here #### POSTHOOK: type: ALTERTBLPART_SKEWED_LOCATION -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 -POSTHOOK: Output: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n0 +POSTHOOK: Input: default@fact_daily_n0@ds=1 +POSTHOOK: Output: default@fact_daily_n0@ds=1 #### A masked pattern was here #### -PREHOOK: query: describe formatted fact_daily PARTITION (ds = '1') +PREHOOK: query: describe formatted fact_daily_n0 PARTITION (ds = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@fact_daily -POSTHOOK: query: describe formatted fact_daily PARTITION (ds = '1') +PREHOOK: Input: default@fact_daily_n0 +POSTHOOK: query: describe formatted fact_daily_n0 PARTITION (ds = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@fact_daily +POSTHOOK: Input: default@fact_daily_n0 # col_name data_type comment x int y string @@ -113,7 +113,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: fact_daily +Table: fact_daily_n0 #### A masked pattern was here #### Partition Parameters: #### A masked pattern was here #### @@ -134,15 +134,15 @@ Skewed Values: [[238], [484]] #### A masked pattern was here #### Storage Desc Params: serialization.format 1 -PREHOOK: query: SELECT * FROM fact_daily WHERE ds='1' +PREHOOK: query: SELECT * FROM fact_daily_n0 WHERE ds='1' PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n0 +PREHOOK: Input: default@fact_daily_n0@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM fact_daily WHERE ds='1' +POSTHOOK: query: SELECT * FROM fact_daily_n0 WHERE ds='1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n0 +POSTHOOK: Input: default@fact_daily_n0@ds=1 #### A masked pattern was here #### 238 val_238 val_238 1 238 val_238 val_238 1 @@ -150,9 +150,9 @@ POSTHOOK: Input: default@fact_daily@ds=1 278 val_278 val_278 1 484 val_484 val_484 1 86 val_86 val_86 1 -PREHOOK: query: explain extended SELECT x FROM fact_daily WHERE ds='1' and not (x = 86) +PREHOOK: query: explain extended SELECT x FROM fact_daily_n0 WHERE ds='1' and not (x = 86) PREHOOK: type: QUERY -POSTHOOK: query: explain extended SELECT x FROM fact_daily WHERE ds='1' and not (x = 86) +POSTHOOK: query: explain extended SELECT x FROM fact_daily_n0 WHERE ds='1' and not (x = 86) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -174,11 +174,11 @@ STAGE PLANS: columns.comments columns.types int:string:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n0 numFiles 3 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x, string y, string z} + serialization.ddl struct fact_daily_n0 { i32 x, string y, string z} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 117 @@ -196,19 +196,19 @@ STAGE PLANS: columns.comments columns.types int:string:string #### A masked pattern was here #### - name default.fact_daily + name default.fact_daily_n0 partition_columns ds partition_columns.types string - serialization.ddl struct fact_daily { i32 x, string y, string z} + serialization.ddl struct fact_daily_n0 { i32 x, string y, string z} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.fact_daily - name: default.fact_daily + name: default.fact_daily_n0 + name: default.fact_daily_n0 Processor Tree: TableScan - alias: fact_daily + alias: fact_daily_n0 Statistics: Num rows: 2 Data size: 1170 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -221,15 +221,15 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 1170 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: SELECT x FROM fact_daily WHERE ds='1' and not (x = 86) +PREHOOK: query: SELECT x FROM fact_daily_n0 WHERE ds='1' and not (x = 86) PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n0 +PREHOOK: Input: default@fact_daily_n0@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT x FROM fact_daily WHERE ds='1' and not (x = 86) +POSTHOOK: query: SELECT x FROM fact_daily_n0 WHERE ds='1' and not (x = 86) POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n0 +POSTHOOK: Input: default@fact_daily_n0@ds=1 #### A masked pattern was here #### 238 238 diff --git a/ql/src/test/results/clientpositive/llap_reader.q.out b/ql/src/test/results/clientpositive/llap_reader.q.out index 7561587e6c..ec07da97bf 100644 --- a/ql/src/test/results/clientpositive/llap_reader.q.out +++ b/ql/src/test/results/clientpositive/llap_reader.q.out @@ -1,34 +1,34 @@ -PREHOOK: query: CREATE TABLE test(f1 int, f2 int, f3 int) stored as orc +PREHOOK: query: CREATE TABLE test_n7(f1 int, f2 int, f3 int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test -PREHOOK: query: INSERT INTO TABLE test VALUES (1,1,1), (2,2,2), (3,3,3), (4,4,4) +PREHOOK: Output: default@test_n7 +PREHOOK: query: INSERT INTO TABLE test_n7 VALUES (1,1,1), (2,2,2), (3,3,3), (4,4,4) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test -PREHOOK: query: ALTER TABLE test CHANGE f1 f1 bigint +PREHOOK: Output: default@test_n7 +PREHOOK: query: ALTER TABLE test_n7 CHANGE f1 f1 bigint PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@test -PREHOOK: Output: default@test -PREHOOK: query: ALTER TABLE test CHANGE f2 f2 bigint +PREHOOK: Input: default@test_n7 +PREHOOK: Output: default@test_n7 +PREHOOK: query: ALTER TABLE test_n7 CHANGE f2 f2 bigint PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@test -PREHOOK: Output: default@test -PREHOOK: query: ALTER TABLE test CHANGE f3 f3 bigint +PREHOOK: Input: default@test_n7 +PREHOOK: Output: default@test_n7 +PREHOOK: query: ALTER TABLE test_n7 CHANGE f3 f3 bigint PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@test -PREHOOK: Output: default@test -PREHOOK: query: SELECT count(f1) FROM test GROUP BY f1 +PREHOOK: Input: default@test_n7 +PREHOOK: Output: default@test_n7 +PREHOOK: query: SELECT count(f1) FROM test_n7 GROUP BY f1 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n7 #### A masked pattern was here #### 1 1 1 1 -PREHOOK: query: SELECT count(f1) FROM test GROUP BY f1 +PREHOOK: query: SELECT count(f1) FROM test_n7 GROUP BY f1 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n7 #### A masked pattern was here #### 1 1 @@ -38,9 +38,9 @@ PREHOOK: query: CREATE TABLE test_bigint(f1 bigint, f2 bigint, f3 bigint) stored PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_bigint -PREHOOK: query: INSERT OVERWRITE TABLE test_bigint select * from test +PREHOOK: query: INSERT OVERWRITE TABLE test_bigint select * from test_n7 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n7 PREHOOK: Output: default@test_bigint PREHOOK: query: ALTER TABLE test_bigint CHANGE f1 f1 double PREHOOK: type: ALTERTABLE_RENAMECOL @@ -70,17 +70,17 @@ PREHOOK: Input: default@test_bigint 1 1 1 -PREHOOK: query: CREATE TABLE test_acid (f1 int, f2 int, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: query: CREATE TABLE test_acid_n0 (f1 int, f2 int, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_acid -PREHOOK: query: INSERT INTO TABLE test_acid VALUES (1,1,'b1'), (2,2,'b2'), (3,3,'b3'), (4,4,'b4') +PREHOOK: Output: default@test_acid_n0 +PREHOOK: query: INSERT INTO TABLE test_acid_n0 VALUES (1,1,'b1'), (2,2,'b2'), (3,3,'b3'), (4,4,'b4') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_acid -PREHOOK: query: SELECT count(f1) FROM test_acid GROUP BY f1 +PREHOOK: Output: default@test_acid_n0 +PREHOOK: query: SELECT count(f1) FROM test_acid_n0 GROUP BY f1 PREHOOK: type: QUERY -PREHOOK: Input: default@test_acid +PREHOOK: Input: default@test_acid_n0 #### A masked pattern was here #### 1 1 diff --git a/ql/src/test/results/clientpositive/llap_uncompressed.q.out b/ql/src/test/results/clientpositive/llap_uncompressed.q.out index 99bc85b220..a11a30c612 100644 --- a/ql/src/test/results/clientpositive/llap_uncompressed.q.out +++ b/ql/src/test/results/clientpositive/llap_uncompressed.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: DROP TABLE orc_llap +PREHOOK: query: DROP TABLE orc_llap_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE orc_llap +POSTHOOK: query: DROP TABLE orc_llap_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE orc_llap( +PREHOOK: query: CREATE TABLE orc_llap_n0( ctinyint TINYINT, csmallint SMALLINT, cint INT, @@ -18,8 +18,8 @@ PREHOOK: query: CREATE TABLE orc_llap( STORED AS ORC tblproperties ("orc.compress"="NONE") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orc_llap -POSTHOOK: query: CREATE TABLE orc_llap( +PREHOOK: Output: default@orc_llap_n0 +POSTHOOK: query: CREATE TABLE orc_llap_n0( ctinyint TINYINT, csmallint SMALLINT, cint INT, @@ -35,40 +35,40 @@ POSTHOOK: query: CREATE TABLE orc_llap( STORED AS ORC tblproperties ("orc.compress"="NONE") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orc_llap -PREHOOK: query: insert into table orc_llap +POSTHOOK: Output: default@orc_llap_n0 +PREHOOK: query: insert into table orc_llap_n0 select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 from alltypesorc PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc -PREHOOK: Output: default@orc_llap -POSTHOOK: query: insert into table orc_llap +PREHOOK: Output: default@orc_llap_n0 +POSTHOOK: query: insert into table orc_llap_n0 select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 from alltypesorc POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: default@orc_llap -POSTHOOK: Lineage: orc_llap.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: orc_llap.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: orc_llap.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: orc_llap.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: orc_llap.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: orc_llap.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: orc_llap.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: orc_llap.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_llap.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: orc_llap.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_llap.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_llap.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +POSTHOOK: Output: default@orc_llap_n0 +POSTHOOK: Lineage: orc_llap_n0.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_llap_n0.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: orc_llap_n0.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: orc_llap_n0.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: orc_llap_n0.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: orc_llap_n0.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: orc_llap_n0.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: orc_llap_n0.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_llap_n0.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: orc_llap_n0.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_llap_n0.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_llap_n0.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] PREHOOK: query: drop table llap_temp_table PREHOOK: type: DROPTABLE POSTHOOK: query: drop table llap_temp_table POSTHOOK: type: DROPTABLE PREHOOK: query: explain -select * from orc_llap where cint > 10 and cbigint is not null +select * from orc_llap_n0 where cint > 10 and cbigint is not null PREHOOK: type: QUERY POSTHOOK: query: explain -select * from orc_llap where cint > 10 and cbigint is not null +select * from orc_llap_n0 where cint > 10 and cbigint is not null POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -79,7 +79,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: orc_llap + alias: orc_llap_n0 filterExpr: ((cint > 10) and cbigint is not null) (type: boolean) Statistics: Num rows: 12288 Data size: 2907994 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -106,29 +106,29 @@ STAGE PLANS: ListSink PREHOOK: query: create table llap_temp_table as -select * from orc_llap where cint > 10 and cbigint is not null +select * from orc_llap_n0 where cint > 10 and cbigint is not null PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@orc_llap +PREHOOK: Input: default@orc_llap_n0 PREHOOK: Output: database:default PREHOOK: Output: default@llap_temp_table POSTHOOK: query: create table llap_temp_table as -select * from orc_llap where cint > 10 and cbigint is not null +select * from orc_llap_n0 where cint > 10 and cbigint is not null POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@orc_llap +POSTHOOK: Input: default@orc_llap_n0 POSTHOOK: Output: database:default POSTHOOK: Output: default@llap_temp_table -POSTHOOK: Lineage: llap_temp_table.cbigint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: llap_temp_table.cboolean1 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: llap_temp_table.cboolean2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: llap_temp_table.cdouble SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: llap_temp_table.cfloat SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: llap_temp_table.cint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: llap_temp_table.csmallint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: llap_temp_table.cstring1 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: llap_temp_table.cstring2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: llap_temp_table.ctimestamp1 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: llap_temp_table.ctimestamp2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: llap_temp_table.ctinyint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +POSTHOOK: Lineage: llap_temp_table.cbigint SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: llap_temp_table.cboolean1 SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: llap_temp_table.cboolean2 SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: llap_temp_table.cdouble SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: llap_temp_table.cfloat SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: llap_temp_table.cint SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: llap_temp_table.csmallint SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: llap_temp_table.cstring1 SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: llap_temp_table.cstring2 SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: llap_temp_table.ctimestamp1 SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: llap_temp_table.ctimestamp2 SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: llap_temp_table.ctinyint SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] PREHOOK: query: select sum(hash(*)) from llap_temp_table PREHOOK: type: QUERY PREHOOK: Input: default@llap_temp_table @@ -139,10 +139,10 @@ POSTHOOK: Input: default@llap_temp_table #### A masked pattern was here #### -42787391908 PREHOOK: query: explain -select * from orc_llap where cint > 10 and cint < 5000000 +select * from orc_llap_n0 where cint > 10 and cint < 5000000 PREHOOK: type: QUERY POSTHOOK: query: explain -select * from orc_llap where cint > 10 and cint < 5000000 +select * from orc_llap_n0 where cint > 10 and cint < 5000000 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -153,7 +153,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: orc_llap + alias: orc_llap_n0 filterExpr: ((cint > 10) and (cint < 5000000)) (type: boolean) Statistics: Num rows: 12288 Data size: 2907994 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -179,13 +179,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from orc_llap where cint > 10 and cint < 5000000 +PREHOOK: query: select * from orc_llap_n0 where cint > 10 and cint < 5000000 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_llap +PREHOOK: Input: default@orc_llap_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from orc_llap where cint > 10 and cint < 5000000 +POSTHOOK: query: select * from orc_llap_n0 where cint > 10 and cint < 5000000 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_llap +POSTHOOK: Input: default@orc_llap_n0 #### A masked pattern was here #### -51 NULL 6981 707684071 -51.0 NULL YdG61y00526u5 G71l66F25 1969-12-31 16:00:08.451 NULL false true -51 NULL 762 1587111633 -51.0 NULL q5y2Vy1 UbUx5 1969-12-31 16:00:08.451 NULL true false @@ -222,14 +222,14 @@ NULL 359 762 -1645852809 NULL 9763215.5639 40ks5556SV xH7445Rals48VOulSyR5F NULL NULL -75 6981 -1645852809 NULL -863.257 o5mb0QP5Y48Qd4vdB0 xH7445Rals48VOulSyR5F NULL 1969-12-31 15:59:44.062 true false NULL -75 6981 -1645852809 NULL -863.257 1FNNhmiFLGw425NA13g xH7445Rals48VOulSyR5F NULL 1969-12-31 15:59:58.463 false false NULL -13036 1288927 -1645852809 NULL -13036.0 yinBY725P7V2 xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:00.763 true false -PREHOOK: query: DROP TABLE orc_llap +PREHOOK: query: DROP TABLE orc_llap_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@orc_llap -PREHOOK: Output: default@orc_llap -POSTHOOK: query: DROP TABLE orc_llap +PREHOOK: Input: default@orc_llap_n0 +PREHOOK: Output: default@orc_llap_n0 +POSTHOOK: query: DROP TABLE orc_llap_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@orc_llap -POSTHOOK: Output: default@orc_llap +POSTHOOK: Input: default@orc_llap_n0 +POSTHOOK: Output: default@orc_llap_n0 PREHOOK: query: drop table llap_temp_table PREHOOK: type: DROPTABLE PREHOOK: Input: default@llap_temp_table diff --git a/ql/src/test/results/clientpositive/load_binary_data.q.out b/ql/src/test/results/clientpositive/load_binary_data.q.out index c6d4e61a9d..1211c44b41 100644 --- a/ql/src/test/results/clientpositive/load_binary_data.q.out +++ b/ql/src/test/results/clientpositive/load_binary_data.q.out @@ -1,50 +1,50 @@ -PREHOOK: query: CREATE TABLE mytable(key binary, value int) +PREHOOK: query: CREATE TABLE mytable_n2(key binary, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '9' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@mytable -POSTHOOK: query: CREATE TABLE mytable(key binary, value int) +PREHOOK: Output: default@mytable_n2 +POSTHOOK: query: CREATE TABLE mytable_n2(key binary, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '9' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@mytable -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable +POSTHOOK: Output: default@mytable_n2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@mytable -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable +PREHOOK: Output: default@mytable_n2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@mytable -PREHOOK: query: create table dest1 (key binary, value int) +POSTHOOK: Output: default@mytable_n2 +PREHOOK: query: create table dest1_n130 (key binary, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: create table dest1 (key binary, value int) +PREHOOK: Output: default@dest1_n130 +POSTHOOK: query: create table dest1_n130 (key binary, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: insert overwrite table dest1 select transform(*) using 'cat' as key binary, value int from mytable +POSTHOOK: Output: default@dest1_n130 +PREHOOK: query: insert overwrite table dest1_n130 select transform(*) using 'cat' as key binary, value int from mytable_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@mytable -PREHOOK: Output: default@dest1 -POSTHOOK: query: insert overwrite table dest1 select transform(*) using 'cat' as key binary, value int from mytable +PREHOOK: Input: default@mytable_n2 +PREHOOK: Output: default@dest1_n130 +POSTHOOK: query: insert overwrite table dest1_n130 select transform(*) using 'cat' as key binary, value int from mytable_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@mytable -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(mytable)mytable.FieldSchema(name:key, type:binary, comment:null), (mytable)mytable.FieldSchema(name:value, type:int, comment:null), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(mytable)mytable.FieldSchema(name:key, type:binary, comment:null), (mytable)mytable.FieldSchema(name:value, type:int, comment:null), ] -PREHOOK: query: select key, value, length (key) from dest1 +POSTHOOK: Input: default@mytable_n2 +POSTHOOK: Output: default@dest1_n130 +POSTHOOK: Lineage: dest1_n130.key SCRIPT [(mytable_n2)mytable_n2.FieldSchema(name:key, type:binary, comment:null), (mytable_n2)mytable_n2.FieldSchema(name:value, type:int, comment:null), ] +POSTHOOK: Lineage: dest1_n130.value SCRIPT [(mytable_n2)mytable_n2.FieldSchema(name:key, type:binary, comment:null), (mytable_n2)mytable_n2.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: select key, value, length (key) from dest1_n130 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n130 #### A masked pattern was here #### -POSTHOOK: query: select key, value, length (key) from dest1 +POSTHOOK: query: select key, value, length (key) from dest1_n130 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n130 #### A masked pattern was here #### abc 1 8 test 2 6 diff --git a/ql/src/test/results/clientpositive/load_dyn_part1.q.out b/ql/src/test/results/clientpositive/load_dyn_part1.q.out index 706e9c77dd..eae65bb216 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part1.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part1.q.out @@ -8,28 +8,28 @@ ds=2008-04-08/hr=11 ds=2008-04-08/hr=12 ds=2008-04-09/hr=11 ds=2008-04-09/hr=12 -PREHOOK: query: create table if not exists nzhang_part1 like srcpart +PREHOOK: query: create table if not exists nzhang_part1_n0 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@nzhang_part1 -POSTHOOK: query: create table if not exists nzhang_part1 like srcpart +PREHOOK: Output: default@nzhang_part1_n0 +POSTHOOK: query: create table if not exists nzhang_part1_n0 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@nzhang_part1 -PREHOOK: query: create table if not exists nzhang_part2 like srcpart +POSTHOOK: Output: default@nzhang_part1_n0 +PREHOOK: query: create table if not exists nzhang_part2_n0 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@nzhang_part2 -POSTHOOK: query: create table if not exists nzhang_part2 like srcpart +PREHOOK: Output: default@nzhang_part2_n0 +POSTHOOK: query: create table if not exists nzhang_part2_n0 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@nzhang_part2 -PREHOOK: query: describe extended nzhang_part1 +POSTHOOK: Output: default@nzhang_part2_n0 +PREHOOK: query: describe extended nzhang_part1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@nzhang_part1 -POSTHOOK: query: describe extended nzhang_part1 +PREHOOK: Input: default@nzhang_part1_n0 +POSTHOOK: query: describe extended nzhang_part1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@nzhang_part1 +POSTHOOK: Input: default@nzhang_part1_n0 key string default value string default ds string @@ -43,13 +43,13 @@ hr string #### A masked pattern was here #### PREHOOK: query: explain from srcpart -insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' -insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +insert overwrite table nzhang_part1_n0 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part2_n0 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain from srcpart -insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' -insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +insert overwrite table nzhang_part1_n0 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part2_n0 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -85,7 +85,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part1 + name: default.nzhang_part1_n0 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) outputColumnNames: key, value, ds, hr @@ -116,7 +116,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part2 + name: default.nzhang_part2_n0 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) outputColumnNames: key, value, hr @@ -172,7 +172,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part1 + name: default.nzhang_part1_n0 Stage: Stage-3 Stats Work @@ -180,7 +180,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.nzhang_part1 + Table: default.nzhang_part1_n0 Stage: Stage-4 Map Reduce @@ -192,7 +192,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part1 + name: default.nzhang_part1_n0 Stage: Stage-6 Map Reduce @@ -204,7 +204,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part1 + name: default.nzhang_part1_n0 Stage: Stage-7 Move Operator @@ -223,7 +223,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part2 + name: default.nzhang_part2_n0 Stage: Stage-9 Stats Work @@ -231,7 +231,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.nzhang_part2 + Table: default.nzhang_part2_n0 Stage: Stage-10 Map Reduce @@ -264,64 +264,64 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from srcpart -insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' -insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +insert overwrite table nzhang_part1_n0 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part2_n0 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@nzhang_part1 -PREHOOK: Output: default@nzhang_part2@ds=2008-12-31 +PREHOOK: Output: default@nzhang_part1_n0 +PREHOOK: Output: default@nzhang_part2_n0@ds=2008-12-31 POSTHOOK: query: from srcpart -insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' -insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +insert overwrite table nzhang_part1_n0 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part2_n0 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@nzhang_part1@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@nzhang_part1@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@nzhang_part2@ds=2008-12-31/hr=11 -POSTHOOK: Output: default@nzhang_part2@ds=2008-12-31/hr=12 -POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show partitions nzhang_part1 +POSTHOOK: Output: default@nzhang_part1_n0@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@nzhang_part1_n0@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@nzhang_part2_n0@ds=2008-12-31/hr=11 +POSTHOOK: Output: default@nzhang_part2_n0@ds=2008-12-31/hr=12 +POSTHOOK: Lineage: nzhang_part1_n0 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1_n0 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1_n0 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part1_n0 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2_n0 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2_n0 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2_n0 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part2_n0 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions nzhang_part1_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@nzhang_part1 -POSTHOOK: query: show partitions nzhang_part1 +PREHOOK: Input: default@nzhang_part1_n0 +POSTHOOK: query: show partitions nzhang_part1_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@nzhang_part1 +POSTHOOK: Input: default@nzhang_part1_n0 ds=2008-04-08/hr=11 ds=2008-04-08/hr=12 -PREHOOK: query: show partitions nzhang_part2 +PREHOOK: query: show partitions nzhang_part2_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@nzhang_part2 -POSTHOOK: query: show partitions nzhang_part2 +PREHOOK: Input: default@nzhang_part2_n0 +POSTHOOK: query: show partitions nzhang_part2_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@nzhang_part2 +POSTHOOK: Input: default@nzhang_part2_n0 ds=2008-12-31/hr=11 ds=2008-12-31/hr=12 -PREHOOK: query: select * from nzhang_part1 where ds is not null and hr is not null +PREHOOK: query: select * from nzhang_part1_n0 where ds is not null and hr is not null PREHOOK: type: QUERY -PREHOOK: Input: default@nzhang_part1 -PREHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=11 -PREHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=12 +PREHOOK: Input: default@nzhang_part1_n0 +PREHOOK: Input: default@nzhang_part1_n0@ds=2008-04-08/hr=11 +PREHOOK: Input: default@nzhang_part1_n0@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select * from nzhang_part1 where ds is not null and hr is not null +POSTHOOK: query: select * from nzhang_part1_n0 where ds is not null and hr is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@nzhang_part1 -POSTHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@nzhang_part1_n0 +POSTHOOK: Input: default@nzhang_part1_n0@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@nzhang_part1_n0@ds=2008-04-08/hr=12 #### A masked pattern was here #### 0 val_0 2008-04-08 11 0 val_0 2008-04-08 11 @@ -1323,17 +1323,17 @@ POSTHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=12 98 val_98 2008-04-08 11 98 val_98 2008-04-08 12 98 val_98 2008-04-08 12 -PREHOOK: query: select * from nzhang_part2 where ds is not null and hr is not null +PREHOOK: query: select * from nzhang_part2_n0 where ds is not null and hr is not null PREHOOK: type: QUERY -PREHOOK: Input: default@nzhang_part2 -PREHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=11 -PREHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=12 +PREHOOK: Input: default@nzhang_part2_n0 +PREHOOK: Input: default@nzhang_part2_n0@ds=2008-12-31/hr=11 +PREHOOK: Input: default@nzhang_part2_n0@ds=2008-12-31/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select * from nzhang_part2 where ds is not null and hr is not null +POSTHOOK: query: select * from nzhang_part2_n0 where ds is not null and hr is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@nzhang_part2 -POSTHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=11 -POSTHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=12 +POSTHOOK: Input: default@nzhang_part2_n0 +POSTHOOK: Input: default@nzhang_part2_n0@ds=2008-12-31/hr=11 +POSTHOOK: Input: default@nzhang_part2_n0@ds=2008-12-31/hr=12 #### A masked pattern was here #### 0 val_0 2008-12-31 11 0 val_0 2008-12-31 11 diff --git a/ql/src/test/results/clientpositive/load_dyn_part11.q.out b/ql/src/test/results/clientpositive/load_dyn_part11.q.out index 6ad41ec6a4..5fb5262363 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part11.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part11.q.out @@ -8,20 +8,20 @@ ds=2008-04-08/hr=11 ds=2008-04-08/hr=12 ds=2008-04-09/hr=11 ds=2008-04-09/hr=12 -PREHOOK: query: create table if not exists nzhang_part like srcpart +PREHOOK: query: create table if not exists nzhang_part_n0 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@nzhang_part -POSTHOOK: query: create table if not exists nzhang_part like srcpart +PREHOOK: Output: default@nzhang_part_n0 +POSTHOOK: query: create table if not exists nzhang_part_n0 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@nzhang_part -PREHOOK: query: describe extended nzhang_part +POSTHOOK: Output: default@nzhang_part_n0 +PREHOOK: query: describe extended nzhang_part_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@nzhang_part -POSTHOOK: query: describe extended nzhang_part +PREHOOK: Input: default@nzhang_part_n0 +POSTHOOK: query: describe extended nzhang_part_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@nzhang_part +POSTHOOK: Input: default@nzhang_part_n0 key string default value string default ds string @@ -33,36 +33,36 @@ ds string hr string #### A masked pattern was here #### -PREHOOK: query: insert overwrite table nzhang_part partition (ds="2010-03-03", hr) select key, value, hr from srcpart where ds is not null and hr is not null +PREHOOK: query: insert overwrite table nzhang_part_n0 partition (ds="2010-03-03", hr) select key, value, hr from srcpart where ds is not null and hr is not null PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@nzhang_part@ds=2010-03-03 -POSTHOOK: query: insert overwrite table nzhang_part partition (ds="2010-03-03", hr) select key, value, hr from srcpart where ds is not null and hr is not null +PREHOOK: Output: default@nzhang_part_n0@ds=2010-03-03 +POSTHOOK: query: insert overwrite table nzhang_part_n0 partition (ds="2010-03-03", hr) select key, value, hr from srcpart where ds is not null and hr is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@nzhang_part@ds=2010-03-03/hr=11 -POSTHOOK: Output: default@nzhang_part@ds=2010-03-03/hr=12 -POSTHOOK: Lineage: nzhang_part PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from nzhang_part where ds = '2010-03-03' and hr = '11' +POSTHOOK: Output: default@nzhang_part_n0@ds=2010-03-03/hr=11 +POSTHOOK: Output: default@nzhang_part_n0@ds=2010-03-03/hr=12 +POSTHOOK: Lineage: nzhang_part_n0 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_n0 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_n0 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part_n0 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from nzhang_part_n0 where ds = '2010-03-03' and hr = '11' PREHOOK: type: QUERY -PREHOOK: Input: default@nzhang_part -PREHOOK: Input: default@nzhang_part@ds=2010-03-03/hr=11 +PREHOOK: Input: default@nzhang_part_n0 +PREHOOK: Input: default@nzhang_part_n0@ds=2010-03-03/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select * from nzhang_part where ds = '2010-03-03' and hr = '11' +POSTHOOK: query: select * from nzhang_part_n0 where ds = '2010-03-03' and hr = '11' POSTHOOK: type: QUERY -POSTHOOK: Input: default@nzhang_part -POSTHOOK: Input: default@nzhang_part@ds=2010-03-03/hr=11 +POSTHOOK: Input: default@nzhang_part_n0 +POSTHOOK: Input: default@nzhang_part_n0@ds=2010-03-03/hr=11 #### A masked pattern was here #### 238 val_238 2010-03-03 11 86 val_86 2010-03-03 11 @@ -1064,15 +1064,15 @@ POSTHOOK: Input: default@nzhang_part@ds=2010-03-03/hr=11 400 val_400 2010-03-03 11 200 val_200 2010-03-03 11 97 val_97 2010-03-03 11 -PREHOOK: query: select * from nzhang_part where ds = '2010-03-03' and hr = '12' +PREHOOK: query: select * from nzhang_part_n0 where ds = '2010-03-03' and hr = '12' PREHOOK: type: QUERY -PREHOOK: Input: default@nzhang_part -PREHOOK: Input: default@nzhang_part@ds=2010-03-03/hr=12 +PREHOOK: Input: default@nzhang_part_n0 +PREHOOK: Input: default@nzhang_part_n0@ds=2010-03-03/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select * from nzhang_part where ds = '2010-03-03' and hr = '12' +POSTHOOK: query: select * from nzhang_part_n0 where ds = '2010-03-03' and hr = '12' POSTHOOK: type: QUERY -POSTHOOK: Input: default@nzhang_part -POSTHOOK: Input: default@nzhang_part@ds=2010-03-03/hr=12 +POSTHOOK: Input: default@nzhang_part_n0 +POSTHOOK: Input: default@nzhang_part_n0@ds=2010-03-03/hr=12 #### A masked pattern was here #### 238 val_238 2010-03-03 12 86 val_86 2010-03-03 12 diff --git a/ql/src/test/results/clientpositive/load_dyn_part14.q.out b/ql/src/test/results/clientpositive/load_dyn_part14.q.out index f46f651a7b..ebc563e44d 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part14.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part14.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: create table if not exists nzhang_part14 (key string) +PREHOOK: query: create table if not exists nzhang_part14_n0 (key string) partitioned by (value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@nzhang_part14 -POSTHOOK: query: create table if not exists nzhang_part14 (key string) +PREHOOK: Output: default@nzhang_part14_n0 +POSTHOOK: query: create table if not exists nzhang_part14_n0 (key string) partitioned by (value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@nzhang_part14 -PREHOOK: query: describe extended nzhang_part14 +POSTHOOK: Output: default@nzhang_part14_n0 +PREHOOK: query: describe extended nzhang_part14_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@nzhang_part14 -POSTHOOK: query: describe extended nzhang_part14 +PREHOOK: Input: default@nzhang_part14_n0 +POSTHOOK: query: describe extended nzhang_part14_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Input: default@nzhang_part14_n0 key string value string @@ -23,7 +23,7 @@ value string #### A masked pattern was here #### PREHOOK: query: explain -insert overwrite table nzhang_part14 partition(value) +insert overwrite table nzhang_part14_n0 partition(value) select key, value from ( select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a union all @@ -33,7 +33,7 @@ select key, value from ( ) T PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table nzhang_part14 partition(value) +insert overwrite table nzhang_part14_n0 partition(value) select key, value from ( select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a union all @@ -99,7 +99,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part14 + name: default.nzhang_part14_n0 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -126,7 +126,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part14 + name: default.nzhang_part14_n0 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -153,7 +153,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part14 + name: default.nzhang_part14_n0 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -208,7 +208,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part14 + name: default.nzhang_part14_n0 Stage: Stage-3 Stats Work @@ -216,7 +216,7 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: string - Table: default.nzhang_part14 + Table: default.nzhang_part14_n0 Stage: Stage-4 Map Reduce @@ -228,7 +228,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part14 + name: default.nzhang_part14_n0 Stage: Stage-6 Map Reduce @@ -240,7 +240,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part14 + name: default.nzhang_part14_n0 Stage: Stage-7 Move Operator @@ -308,7 +308,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe -PREHOOK: query: insert overwrite table nzhang_part14 partition(value) +PREHOOK: query: insert overwrite table nzhang_part14_n0 partition(value) select key, value from ( select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a union all @@ -318,8 +318,8 @@ select key, value from ( ) T PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@nzhang_part14 -POSTHOOK: query: insert overwrite table nzhang_part14 partition(value) +PREHOOK: Output: default@nzhang_part14_n0 +POSTHOOK: query: insert overwrite table nzhang_part14_n0 partition(value) select key, value from ( select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a union all @@ -329,29 +329,29 @@ select key, value from ( ) T POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@nzhang_part14@value= -POSTHOOK: Output: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION [] -POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION [] -PREHOOK: query: show partitions nzhang_part14 +POSTHOOK: Output: default@nzhang_part14_n0@value= +POSTHOOK: Output: default@nzhang_part14_n0@value=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Lineage: nzhang_part14_n0 PARTITION(value= ).key EXPRESSION [] +POSTHOOK: Lineage: nzhang_part14_n0 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION [] +PREHOOK: query: show partitions nzhang_part14_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@nzhang_part14 -POSTHOOK: query: show partitions nzhang_part14 +PREHOOK: Input: default@nzhang_part14_n0 +POSTHOOK: query: show partitions nzhang_part14_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@nzhang_part14 +POSTHOOK: Input: default@nzhang_part14_n0 value= value=__HIVE_DEFAULT_PARTITION__ -PREHOOK: query: select * from nzhang_part14 where value <> 'a' +PREHOOK: query: select * from nzhang_part14_n0 where value <> 'a' PREHOOK: type: QUERY -PREHOOK: Input: default@nzhang_part14 -PREHOOK: Input: default@nzhang_part14@value= -PREHOOK: Input: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__ +PREHOOK: Input: default@nzhang_part14_n0 +PREHOOK: Input: default@nzhang_part14_n0@value= +PREHOOK: Input: default@nzhang_part14_n0@value=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### -POSTHOOK: query: select * from nzhang_part14 where value <> 'a' +POSTHOOK: query: select * from nzhang_part14_n0 where value <> 'a' POSTHOOK: type: QUERY -POSTHOOK: Input: default@nzhang_part14 -POSTHOOK: Input: default@nzhang_part14@value= -POSTHOOK: Input: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Input: default@nzhang_part14_n0 +POSTHOOK: Input: default@nzhang_part14_n0@value= +POSTHOOK: Input: default@nzhang_part14_n0@value=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### k1 __HIVE_DEFAULT_PARTITION__ k1 __HIVE_DEFAULT_PARTITION__ diff --git a/ql/src/test/results/clientpositive/load_dyn_part8.q.out b/ql/src/test/results/clientpositive/load_dyn_part8.q.out index 8bdba66805..0bf2145afd 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part8.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part8.q.out @@ -8,20 +8,20 @@ ds=2008-04-08/hr=11 ds=2008-04-08/hr=12 ds=2008-04-09/hr=11 ds=2008-04-09/hr=12 -PREHOOK: query: create table if not exists nzhang_part8 like srcpart +PREHOOK: query: create table if not exists nzhang_part8_n0 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@nzhang_part8 -POSTHOOK: query: create table if not exists nzhang_part8 like srcpart +PREHOOK: Output: default@nzhang_part8_n0 +POSTHOOK: query: create table if not exists nzhang_part8_n0 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@nzhang_part8 -PREHOOK: query: describe extended nzhang_part8 +POSTHOOK: Output: default@nzhang_part8_n0 +PREHOOK: query: describe extended nzhang_part8_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@nzhang_part8 -POSTHOOK: query: describe extended nzhang_part8 +PREHOOK: Input: default@nzhang_part8_n0 +POSTHOOK: query: describe extended nzhang_part8_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@nzhang_part8 +POSTHOOK: Input: default@nzhang_part8_n0 key string default value string default ds string @@ -35,13 +35,13 @@ hr string #### A masked pattern was here #### PREHOOK: query: explain extended from srcpart -insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' -insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +insert overwrite table nzhang_part8_n0 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part8_n0 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain extended from srcpart -insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' -insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +insert overwrite table nzhang_part8_n0 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part8_n0 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -84,15 +84,15 @@ STAGE PLANS: columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.nzhang_part8 + name default.nzhang_part8_n0 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct nzhang_part8 { string key, string value} + serialization.ddl struct nzhang_part8_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part8 + name: default.nzhang_part8_n0 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -141,15 +141,15 @@ STAGE PLANS: columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.nzhang_part8 + name default.nzhang_part8_n0 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct nzhang_part8 { string key, string value} + serialization.ddl struct nzhang_part8_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part8 + name: default.nzhang_part8_n0 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -438,15 +438,15 @@ STAGE PLANS: columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.nzhang_part8 + name default.nzhang_part8_n0 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct nzhang_part8 { string key, string value} + serialization.ddl struct nzhang_part8_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part8 + name: default.nzhang_part8_n0 Stage: Stage-3 Stats Work @@ -471,15 +471,15 @@ STAGE PLANS: columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.nzhang_part8 + name default.nzhang_part8_n0 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct nzhang_part8 { string key, string value} + serialization.ddl struct nzhang_part8_n0 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part8 + name: default.nzhang_part8_n0 Stage: Stage-4 Stats Work @@ -488,7 +488,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.nzhang_part8 + Table: default.nzhang_part8_n0 Is Table Level Stats: false Stage: Stage-5 @@ -569,62 +569,62 @@ STAGE PLANS: MultiFileSpray: false PREHOOK: query: from srcpart -insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' -insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +insert overwrite table nzhang_part8_n0 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part8_n0 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@nzhang_part8 -PREHOOK: Output: default@nzhang_part8@ds=2008-12-31 +PREHOOK: Output: default@nzhang_part8_n0 +PREHOOK: Output: default@nzhang_part8_n0@ds=2008-12-31 POSTHOOK: query: from srcpart -insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' -insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' +insert overwrite table nzhang_part8_n0 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table nzhang_part8_n0 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@nzhang_part8@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@nzhang_part8@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@nzhang_part8@ds=2008-12-31/hr=11 -POSTHOOK: Output: default@nzhang_part8@ds=2008-12-31/hr=12 -POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: nzhang_part8 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show partitions nzhang_part8 +POSTHOOK: Output: default@nzhang_part8_n0@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@nzhang_part8_n0@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@nzhang_part8_n0@ds=2008-12-31/hr=11 +POSTHOOK: Output: default@nzhang_part8_n0@ds=2008-12-31/hr=12 +POSTHOOK: Lineage: nzhang_part8_n0 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part8_n0 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part8_n0 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part8_n0 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part8_n0 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part8_n0 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part8_n0 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: nzhang_part8_n0 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions nzhang_part8_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@nzhang_part8 -POSTHOOK: query: show partitions nzhang_part8 +PREHOOK: Input: default@nzhang_part8_n0 +POSTHOOK: query: show partitions nzhang_part8_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@nzhang_part8 +POSTHOOK: Input: default@nzhang_part8_n0 ds=2008-04-08/hr=11 ds=2008-04-08/hr=12 ds=2008-12-31/hr=11 ds=2008-12-31/hr=12 -PREHOOK: query: select * from nzhang_part8 where ds is not null and hr is not null +PREHOOK: query: select * from nzhang_part8_n0 where ds is not null and hr is not null PREHOOK: type: QUERY -PREHOOK: Input: default@nzhang_part8 -PREHOOK: Input: default@nzhang_part8@ds=2008-04-08/hr=11 -PREHOOK: Input: default@nzhang_part8@ds=2008-04-08/hr=12 -PREHOOK: Input: default@nzhang_part8@ds=2008-12-31/hr=11 -PREHOOK: Input: default@nzhang_part8@ds=2008-12-31/hr=12 +PREHOOK: Input: default@nzhang_part8_n0 +PREHOOK: Input: default@nzhang_part8_n0@ds=2008-04-08/hr=11 +PREHOOK: Input: default@nzhang_part8_n0@ds=2008-04-08/hr=12 +PREHOOK: Input: default@nzhang_part8_n0@ds=2008-12-31/hr=11 +PREHOOK: Input: default@nzhang_part8_n0@ds=2008-12-31/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select * from nzhang_part8 where ds is not null and hr is not null +POSTHOOK: query: select * from nzhang_part8_n0 where ds is not null and hr is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@nzhang_part8 -POSTHOOK: Input: default@nzhang_part8@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@nzhang_part8@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@nzhang_part8@ds=2008-12-31/hr=11 -POSTHOOK: Input: default@nzhang_part8@ds=2008-12-31/hr=12 +POSTHOOK: Input: default@nzhang_part8_n0 +POSTHOOK: Input: default@nzhang_part8_n0@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@nzhang_part8_n0@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@nzhang_part8_n0@ds=2008-12-31/hr=11 +POSTHOOK: Input: default@nzhang_part8_n0@ds=2008-12-31/hr=12 #### A masked pattern was here #### 0 val_0 2008-04-08 11 0 val_0 2008-04-08 11 diff --git a/ql/src/test/results/clientpositive/load_exist_part_authsuccess.q.out b/ql/src/test/results/clientpositive/load_exist_part_authsuccess.q.out index 8ec7e628ec..dc0da6c3f3 100644 --- a/ql/src/test/results/clientpositive/load_exist_part_authsuccess.q.out +++ b/ql/src/test/results/clientpositive/load_exist_part_authsuccess.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: create table hive_test_src ( col1 string ) partitioned by (pcol1 string) stored as textfile +PREHOOK: query: create table hive_test_src_n1 ( col1 string ) partitioned by (pcol1 string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@hive_test_src -POSTHOOK: query: create table hive_test_src ( col1 string ) partitioned by (pcol1 string) stored as textfile +PREHOOK: Output: default@hive_test_src_n1 +POSTHOOK: query: create table hive_test_src_n1 ( col1 string ) partitioned by (pcol1 string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@hive_test_src -PREHOOK: query: alter table hive_test_src add partition (pcol1 = 'test_part') +POSTHOOK: Output: default@hive_test_src_n1 +PREHOOK: query: alter table hive_test_src_n1 add partition (pcol1 = 'test_part') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@hive_test_src -POSTHOOK: query: alter table hive_test_src add partition (pcol1 = 'test_part') +PREHOOK: Output: default@hive_test_src_n1 +POSTHOOK: query: alter table hive_test_src_n1 add partition (pcol1 = 'test_part') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@hive_test_src -POSTHOOK: Output: default@hive_test_src@pcol1=test_part -PREHOOK: query: grant Update on table hive_test_src to user hive_test_user +POSTHOOK: Output: default@hive_test_src_n1 +POSTHOOK: Output: default@hive_test_src_n1@pcol1=test_part +PREHOOK: query: grant Update on table hive_test_src_n1 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@hive_test_src -POSTHOOK: query: grant Update on table hive_test_src to user hive_test_user +PREHOOK: Output: default@hive_test_src_n1 +POSTHOOK: query: grant Update on table hive_test_src_n1 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@hive_test_src -PREHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src partition (pcol1 = 'test_part') +POSTHOOK: Output: default@hive_test_src_n1 +PREHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n1 partition (pcol1 = 'test_part') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@hive_test_src@pcol1=test_part -POSTHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src partition (pcol1 = 'test_part') +PREHOOK: Output: default@hive_test_src_n1@pcol1=test_part +POSTHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n1 partition (pcol1 = 'test_part') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@hive_test_src@pcol1=test_part +POSTHOOK: Output: default@hive_test_src_n1@pcol1=test_part diff --git a/ql/src/test/results/clientpositive/load_file_with_space_in_the_name.q.out b/ql/src/test/results/clientpositive/load_file_with_space_in_the_name.q.out index 78dee617b5..e62f788e1b 100644 --- a/ql/src/test/results/clientpositive/load_file_with_space_in_the_name.q.out +++ b/ql/src/test/results/clientpositive/load_file_with_space_in_the_name.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: CREATE TABLE load_file_with_space_in_the_name(name STRING, age INT) +PREHOOK: query: CREATE TABLE load_file_with_space_in_the_name_n0(name STRING, age INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@load_file_with_space_in_the_name -POSTHOOK: query: CREATE TABLE load_file_with_space_in_the_name(name STRING, age INT) +PREHOOK: Output: default@load_file_with_space_in_the_name_n0 +POSTHOOK: query: CREATE TABLE load_file_with_space_in_the_name_n0(name STRING, age INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@load_file_with_space_in_the_name -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/person age.txt' INTO TABLE load_file_with_space_in_the_name +POSTHOOK: Output: default@load_file_with_space_in_the_name_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/person age.txt' INTO TABLE load_file_with_space_in_the_name_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@load_file_with_space_in_the_name -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/person age.txt' INTO TABLE load_file_with_space_in_the_name +PREHOOK: Output: default@load_file_with_space_in_the_name_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/person age.txt' INTO TABLE load_file_with_space_in_the_name_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@load_file_with_space_in_the_name -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/person+age.txt' INTO TABLE load_file_with_space_in_the_name +POSTHOOK: Output: default@load_file_with_space_in_the_name_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/person+age.txt' INTO TABLE load_file_with_space_in_the_name_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@load_file_with_space_in_the_name -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/person+age.txt' INTO TABLE load_file_with_space_in_the_name +PREHOOK: Output: default@load_file_with_space_in_the_name_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/person+age.txt' INTO TABLE load_file_with_space_in_the_name_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@load_file_with_space_in_the_name +POSTHOOK: Output: default@load_file_with_space_in_the_name_n0 diff --git a/ql/src/test/results/clientpositive/load_fs_overwrite.q.out b/ql/src/test/results/clientpositive/load_fs_overwrite.q.out index 8213728d8d..2bbc8fe429 100644 --- a/ql/src/test/results/clientpositive/load_fs_overwrite.q.out +++ b/ql/src/test/results/clientpositive/load_fs_overwrite.q.out @@ -1,6 +1,6 @@ -PREHOOK: query: drop table target +PREHOOK: query: drop table target_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table target +POSTHOOK: query: drop table target_n0 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table temp PREHOOK: type: DROPTABLE @@ -10,12 +10,12 @@ POSTHOOK: type: DROPTABLE PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@target +PREHOOK: Output: default@target_n0 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@target +POSTHOOK: Output: default@target_n0 #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### @@ -37,18 +37,18 @@ POSTHOOK: Output: default@temp #### A masked pattern was here #### PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@target +PREHOOK: Output: default@target_n0 #### A masked pattern was here #### POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@target -PREHOOK: query: select count(*) from target +POSTHOOK: Output: default@target_n0 +PREHOOK: query: select count(*) from target_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@target +PREHOOK: Input: default@target_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from target +POSTHOOK: query: select count(*) from target_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@target +POSTHOOK: Input: default@target_n0 #### A masked pattern was here #### 500 PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table temp @@ -62,28 +62,28 @@ POSTHOOK: Output: default@temp #### A masked pattern was here #### PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@target +PREHOOK: Output: default@target_n0 #### A masked pattern was here #### POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@target -PREHOOK: query: select count(*) from target +POSTHOOK: Output: default@target_n0 +PREHOOK: query: select count(*) from target_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@target +PREHOOK: Input: default@target_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from target +POSTHOOK: query: select count(*) from target_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@target +POSTHOOK: Input: default@target_n0 #### A masked pattern was here #### 500 -PREHOOK: query: drop table target +PREHOOK: query: drop table target_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@target -PREHOOK: Output: default@target -POSTHOOK: query: drop table target +PREHOOK: Input: default@target_n0 +PREHOOK: Output: default@target_n0 +POSTHOOK: query: drop table target_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@target -POSTHOOK: Output: default@target +POSTHOOK: Input: default@target_n0 +POSTHOOK: Output: default@target_n0 PREHOOK: query: drop table temp PREHOOK: type: DROPTABLE PREHOOK: Input: default@temp diff --git a/ql/src/test/results/clientpositive/load_non_hdfs_path.q.out b/ql/src/test/results/clientpositive/load_non_hdfs_path.q.out index 31c40d8daa..9ed4f54807 100644 --- a/ql/src/test/results/clientpositive/load_non_hdfs_path.q.out +++ b/ql/src/test/results/clientpositive/load_non_hdfs_path.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: create table t1(i int) +PREHOOK: query: create table t1_n44(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(i int) +PREHOOK: Output: default@t1_n44 +POSTHOOK: query: create table t1_n44(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n44 #### A masked pattern was here #### PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 +PREHOOK: Output: default@t1_n44 #### A masked pattern was here #### POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n44 diff --git a/ql/src/test/results/clientpositive/load_orc.q.out b/ql/src/test/results/clientpositive/load_orc.q.out index b0835de91d..f07206eea2 100644 --- a/ql/src/test/results/clientpositive/load_orc.q.out +++ b/ql/src/test/results/clientpositive/load_orc.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table orc_staging (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) +PREHOOK: query: create table orc_staging_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orc_staging -POSTHOOK: query: create table orc_staging (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) +PREHOOK: Output: default@orc_staging_n0 +POSTHOOK: query: create table orc_staging_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orc_staging -PREHOOK: query: create table orc_test (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) +POSTHOOK: Output: default@orc_staging_n0 +PREHOOK: query: create table orc_test_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orc_test -POSTHOOK: query: create table orc_test (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) +PREHOOK: Output: default@orc_test_n1 +POSTHOOK: query: create table orc_test_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orc_test -PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_staging +POSTHOOK: Output: default@orc_test_n1 +PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_staging_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@orc_staging -POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_staging +PREHOOK: Output: default@orc_staging_n0 +POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_staging_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@orc_staging +POSTHOOK: Output: default@orc_staging_n0 Found 1 items #### A masked pattern was here #### PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@orc_test +PREHOOK: Output: default@orc_test_n1 #### A masked pattern was here #### POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@orc_test -PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test +POSTHOOK: Output: default@orc_test_n1 +PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@orc_test -POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test +PREHOOK: Output: default@orc_test_n1 +POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@orc_test +POSTHOOK: Output: default@orc_test_n1 Found 2 items #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/load_overwrite.q.out b/ql/src/test/results/clientpositive/load_overwrite.q.out index 6edd1b6d22..1dc98601ee 100644 --- a/ql/src/test/results/clientpositive/load_overwrite.q.out +++ b/ql/src/test/results/clientpositive/load_overwrite.q.out @@ -1,26 +1,26 @@ -PREHOOK: query: create table load_overwrite like src +PREHOOK: query: create table load_overwrite_n0 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@load_overwrite -POSTHOOK: query: create table load_overwrite like src +PREHOOK: Output: default@load_overwrite_n0 +POSTHOOK: query: create table load_overwrite_n0 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@load_overwrite -PREHOOK: query: insert overwrite table load_overwrite select * from src +POSTHOOK: Output: default@load_overwrite_n0 +PREHOOK: query: insert overwrite table load_overwrite_n0 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@load_overwrite -POSTHOOK: query: insert overwrite table load_overwrite select * from src +PREHOOK: Output: default@load_overwrite_n0 +POSTHOOK: query: insert overwrite table load_overwrite_n0 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@load_overwrite -POSTHOOK: Lineage: load_overwrite.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: load_overwrite.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show table extended like load_overwrite +POSTHOOK: Output: default@load_overwrite_n0 +POSTHOOK: Lineage: load_overwrite_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: load_overwrite_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show table extended like load_overwrite_n0 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like load_overwrite +POSTHOOK: query: show table extended like load_overwrite_n0 POSTHOOK: type: SHOW_TABLESTATUS -tableName:load_overwrite +tableName:load_overwrite_n0 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -33,28 +33,28 @@ maxFileSize:5812 minFileSize:5812 #### A masked pattern was here #### -PREHOOK: query: select count(*) from load_overwrite +PREHOOK: query: select count(*) from load_overwrite_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@load_overwrite +PREHOOK: Input: default@load_overwrite_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from load_overwrite +POSTHOOK: query: select count(*) from load_overwrite_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@load_overwrite +POSTHOOK: Input: default@load_overwrite_n0 #### A masked pattern was here #### 500 -PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load_overwrite +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load_overwrite_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@load_overwrite -POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load_overwrite +PREHOOK: Output: default@load_overwrite_n0 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load_overwrite_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@load_overwrite -PREHOOK: query: show table extended like load_overwrite +POSTHOOK: Output: default@load_overwrite_n0 +PREHOOK: query: show table extended like load_overwrite_n0 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like load_overwrite +POSTHOOK: query: show table extended like load_overwrite_n0 POSTHOOK: type: SHOW_TABLESTATUS -tableName:load_overwrite +tableName:load_overwrite_n0 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -67,28 +67,28 @@ maxFileSize:5812 minFileSize:5812 #### A masked pattern was here #### -PREHOOK: query: select count(*) from load_overwrite +PREHOOK: query: select count(*) from load_overwrite_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@load_overwrite +PREHOOK: Input: default@load_overwrite_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from load_overwrite +POSTHOOK: query: select count(*) from load_overwrite_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@load_overwrite +POSTHOOK: Input: default@load_overwrite_n0 #### A masked pattern was here #### 1000 -PREHOOK: query: load data local inpath '../../data/files/kv1.txt' overwrite into table load_overwrite +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' overwrite into table load_overwrite_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@load_overwrite -POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' overwrite into table load_overwrite +PREHOOK: Output: default@load_overwrite_n0 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' overwrite into table load_overwrite_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@load_overwrite -PREHOOK: query: show table extended like load_overwrite +POSTHOOK: Output: default@load_overwrite_n0 +PREHOOK: query: show table extended like load_overwrite_n0 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like load_overwrite +POSTHOOK: query: show table extended like load_overwrite_n0 POSTHOOK: type: SHOW_TABLESTATUS -tableName:load_overwrite +tableName:load_overwrite_n0 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -101,12 +101,12 @@ maxFileSize:5812 minFileSize:5812 #### A masked pattern was here #### -PREHOOK: query: select count(*) from load_overwrite +PREHOOK: query: select count(*) from load_overwrite_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@load_overwrite +PREHOOK: Input: default@load_overwrite_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from load_overwrite +POSTHOOK: query: select count(*) from load_overwrite_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@load_overwrite +POSTHOOK: Input: default@load_overwrite_n0 #### A masked pattern was here #### 500 diff --git a/ql/src/test/results/clientpositive/load_part_authsuccess.q.out b/ql/src/test/results/clientpositive/load_part_authsuccess.q.out index 8249dce82f..d33f85343b 100644 --- a/ql/src/test/results/clientpositive/load_part_authsuccess.q.out +++ b/ql/src/test/results/clientpositive/load_part_authsuccess.q.out @@ -1,23 +1,23 @@ -PREHOOK: query: create table hive_test_src ( col1 string ) partitioned by (pcol1 string) stored as textfile +PREHOOK: query: create table hive_test_src_n0 ( col1 string ) partitioned by (pcol1 string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@hive_test_src -POSTHOOK: query: create table hive_test_src ( col1 string ) partitioned by (pcol1 string) stored as textfile +PREHOOK: Output: default@hive_test_src_n0 +POSTHOOK: query: create table hive_test_src_n0 ( col1 string ) partitioned by (pcol1 string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@hive_test_src -PREHOOK: query: grant Update on table hive_test_src to user hive_test_user +POSTHOOK: Output: default@hive_test_src_n0 +PREHOOK: query: grant Update on table hive_test_src_n0 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@hive_test_src -POSTHOOK: query: grant Update on table hive_test_src to user hive_test_user +PREHOOK: Output: default@hive_test_src_n0 +POSTHOOK: query: grant Update on table hive_test_src_n0 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@hive_test_src -PREHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src partition (pcol1 = 'test_part') +POSTHOOK: Output: default@hive_test_src_n0 +PREHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n0 partition (pcol1 = 'test_part') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@hive_test_src -POSTHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src partition (pcol1 = 'test_part') +PREHOOK: Output: default@hive_test_src_n0 +POSTHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n0 partition (pcol1 = 'test_part') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@hive_test_src -POSTHOOK: Output: default@hive_test_src@pcol1=test_part +POSTHOOK: Output: default@hive_test_src_n0 +POSTHOOK: Output: default@hive_test_src_n0@pcol1=test_part diff --git a/ql/src/test/results/clientpositive/loadpart1.q.out b/ql/src/test/results/clientpositive/loadpart1.q.out index 43a0840987..858dd99972 100644 --- a/ql/src/test/results/clientpositive/loadpart1.q.out +++ b/ql/src/test/results/clientpositive/loadpart1.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: create table hive_test_src ( col1 string ) stored as textfile +PREHOOK: query: create table hive_test_src_n2 ( col1 string ) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@hive_test_src -POSTHOOK: query: create table hive_test_src ( col1 string ) stored as textfile +PREHOOK: Output: default@hive_test_src_n2 +POSTHOOK: query: create table hive_test_src_n2 ( col1 string ) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@hive_test_src -PREHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src +POSTHOOK: Output: default@hive_test_src_n2 +PREHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@hive_test_src -POSTHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src +PREHOOK: Output: default@hive_test_src_n2 +POSTHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@hive_test_src +POSTHOOK: Output: default@hive_test_src_n2 PREHOOK: query: create table hive_test_dst ( col1 string ) partitioned by ( pcol1 string , pcol2 string) stored as sequencefile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -22,15 +22,15 @@ POSTHOOK: query: create table hive_test_dst ( col1 string ) partitioned by ( pco POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@hive_test_dst -PREHOOK: query: insert overwrite table hive_test_dst partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src +PREHOOK: query: insert overwrite table hive_test_dst partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@hive_test_src +PREHOOK: Input: default@hive_test_src_n2 PREHOOK: Output: default@hive_test_dst@pcol1=test_part/pcol2=test_Part -POSTHOOK: query: insert overwrite table hive_test_dst partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src +POSTHOOK: query: insert overwrite table hive_test_dst partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@hive_test_src +POSTHOOK: Input: default@hive_test_src_n2 POSTHOOK: Output: default@hive_test_dst@pcol1=test_part/pcol2=test_Part -POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src_n2)hive_test_src_n2.FieldSchema(name:col1, type:string, comment:null), ] PREHOOK: query: select * from hive_test_dst where pcol1='test_part' and pcol2='test_Part' PREHOOK: type: QUERY PREHOOK: Input: default@hive_test_dst @@ -47,15 +47,15 @@ POSTHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part 4 test_part test_Part 5 test_part test_Part 6 test_part test_Part -PREHOOK: query: insert overwrite table hive_test_dst partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src +PREHOOK: query: insert overwrite table hive_test_dst partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@hive_test_src +PREHOOK: Input: default@hive_test_src_n2 PREHOOK: Output: default@hive_test_dst@pcol1=test_part/pcol2=test_Part -POSTHOOK: query: insert overwrite table hive_test_dst partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src +POSTHOOK: query: insert overwrite table hive_test_dst partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@hive_test_src +POSTHOOK: Input: default@hive_test_src_n2 POSTHOOK: Output: default@hive_test_dst@pcol1=test_part/pcol2=test_Part -POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ] +POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src_n2)hive_test_src_n2.FieldSchema(name:col1, type:string, comment:null), ] PREHOOK: query: select * from hive_test_dst where pcol1='test_part' and pcol2='test_part' PREHOOK: type: QUERY PREHOOK: Input: default@hive_test_dst diff --git a/ql/src/test/results/clientpositive/lock1.q.out b/ql/src/test/results/clientpositive/lock1.q.out index 27545ea584..f837a6f0c4 100644 --- a/ql/src/test/results/clientpositive/lock1.q.out +++ b/ql/src/test/results/clientpositive/lock1.q.out @@ -1,58 +1,58 @@ -PREHOOK: query: drop table tstsrc +PREHOOK: query: drop table tstsrc_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tstsrc +POSTHOOK: query: drop table tstsrc_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table tstsrc like src +PREHOOK: query: create table tstsrc_n1 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tstsrc -POSTHOOK: query: create table tstsrc like src +PREHOOK: Output: default@tstsrc_n1 +POSTHOOK: query: create table tstsrc_n1 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstsrc -PREHOOK: query: insert overwrite table tstsrc select key, value from src +POSTHOOK: Output: default@tstsrc_n1 +PREHOOK: query: insert overwrite table tstsrc_n1 select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tstsrc -POSTHOOK: query: insert overwrite table tstsrc select key, value from src +PREHOOK: Output: default@tstsrc_n1 +POSTHOOK: query: insert overwrite table tstsrc_n1 select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tstsrc -POSTHOOK: Lineage: tstsrc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@tstsrc_n1 +POSTHOOK: Lineage: tstsrc_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrc_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: SHOW LOCKS PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS POSTHOOK: type: SHOWLOCKS -PREHOOK: query: SHOW LOCKS tstsrc +PREHOOK: query: SHOW LOCKS tstsrc_n1 PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrc +POSTHOOK: query: SHOW LOCKS tstsrc_n1 POSTHOOK: type: SHOWLOCKS -PREHOOK: query: LOCK TABLE tstsrc shared +PREHOOK: query: LOCK TABLE tstsrc_n1 shared PREHOOK: type: LOCKTABLE -POSTHOOK: query: LOCK TABLE tstsrc shared +POSTHOOK: query: LOCK TABLE tstsrc_n1 shared POSTHOOK: type: LOCKTABLE PREHOOK: query: SHOW LOCKS PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS POSTHOOK: type: SHOWLOCKS -default@tstsrc SHARED -PREHOOK: query: SHOW LOCKS tstsrc +default@tstsrc_n1 SHARED +PREHOOK: query: SHOW LOCKS tstsrc_n1 PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrc +POSTHOOK: query: SHOW LOCKS tstsrc_n1 POSTHOOK: type: SHOWLOCKS -default@tstsrc SHARED -PREHOOK: query: SHOW LOCKS tstsrc extended +default@tstsrc_n1 SHARED +PREHOOK: query: SHOW LOCKS tstsrc_n1 extended PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrc extended +POSTHOOK: query: SHOW LOCKS tstsrc_n1 extended POSTHOOK: type: SHOWLOCKS -default@tstsrc SHARED +default@tstsrc_n1 SHARED #### A masked pattern was here #### LOCK_MODE:EXPLICIT -LOCK_QUERYSTRING:LOCK TABLE tstsrc shared -PREHOOK: query: UNLOCK TABLE tstsrc +LOCK_QUERYSTRING:LOCK TABLE tstsrc_n1 shared +PREHOOK: query: UNLOCK TABLE tstsrc_n1 PREHOOK: type: UNLOCKTABLE -POSTHOOK: query: UNLOCK TABLE tstsrc +POSTHOOK: query: UNLOCK TABLE tstsrc_n1 POSTHOOK: type: UNLOCKTABLE PREHOOK: query: SHOW LOCKS PREHOOK: type: SHOWLOCKS @@ -62,63 +62,63 @@ PREHOOK: query: SHOW LOCKS extended PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS extended POSTHOOK: type: SHOWLOCKS -PREHOOK: query: SHOW LOCKS tstsrc +PREHOOK: query: SHOW LOCKS tstsrc_n1 PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrc +POSTHOOK: query: SHOW LOCKS tstsrc_n1 POSTHOOK: type: SHOWLOCKS -PREHOOK: query: lock TABLE tstsrc SHARED +PREHOOK: query: lock TABLE tstsrc_n1 SHARED PREHOOK: type: LOCKTABLE -POSTHOOK: query: lock TABLE tstsrc SHARED +POSTHOOK: query: lock TABLE tstsrc_n1 SHARED POSTHOOK: type: LOCKTABLE PREHOOK: query: SHOW LOCKS PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS POSTHOOK: type: SHOWLOCKS -default@tstsrc SHARED +default@tstsrc_n1 SHARED PREHOOK: query: SHOW LOCKS extended PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS extended POSTHOOK: type: SHOWLOCKS -default@tstsrc SHARED +default@tstsrc_n1 SHARED #### A masked pattern was here #### LOCK_MODE:EXPLICIT -LOCK_QUERYSTRING:lock TABLE tstsrc SHARED -PREHOOK: query: SHOW LOCKS tstsrc +LOCK_QUERYSTRING:lock TABLE tstsrc_n1 SHARED +PREHOOK: query: SHOW LOCKS tstsrc_n1 PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrc +POSTHOOK: query: SHOW LOCKS tstsrc_n1 POSTHOOK: type: SHOWLOCKS -default@tstsrc SHARED -PREHOOK: query: LOCK TABLE tstsrc SHARED +default@tstsrc_n1 SHARED +PREHOOK: query: LOCK TABLE tstsrc_n1 SHARED PREHOOK: type: LOCKTABLE -POSTHOOK: query: LOCK TABLE tstsrc SHARED +POSTHOOK: query: LOCK TABLE tstsrc_n1 SHARED POSTHOOK: type: LOCKTABLE PREHOOK: query: SHOW LOCKS PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS POSTHOOK: type: SHOWLOCKS -default@tstsrc SHARED -default@tstsrc SHARED +default@tstsrc_n1 SHARED +default@tstsrc_n1 SHARED PREHOOK: query: SHOW LOCKS extended PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS extended POSTHOOK: type: SHOWLOCKS -default@tstsrc SHARED +default@tstsrc_n1 SHARED #### A masked pattern was here #### LOCK_MODE:EXPLICIT -LOCK_QUERYSTRING:lock TABLE tstsrc SHARED -default@tstsrc SHARED +LOCK_QUERYSTRING:lock TABLE tstsrc_n1 SHARED +default@tstsrc_n1 SHARED #### A masked pattern was here #### LOCK_MODE:EXPLICIT -LOCK_QUERYSTRING:LOCK TABLE tstsrc SHARED -PREHOOK: query: SHOW LOCKS tstsrc +LOCK_QUERYSTRING:LOCK TABLE tstsrc_n1 SHARED +PREHOOK: query: SHOW LOCKS tstsrc_n1 PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrc +POSTHOOK: query: SHOW LOCKS tstsrc_n1 POSTHOOK: type: SHOWLOCKS -default@tstsrc SHARED -default@tstsrc SHARED -PREHOOK: query: UNLOCK TABLE tstsrc +default@tstsrc_n1 SHARED +default@tstsrc_n1 SHARED +PREHOOK: query: UNLOCK TABLE tstsrc_n1 PREHOOK: type: UNLOCKTABLE -POSTHOOK: query: UNLOCK TABLE tstsrc +POSTHOOK: query: UNLOCK TABLE tstsrc_n1 POSTHOOK: type: UNLOCKTABLE PREHOOK: query: SHOW LOCKS PREHOOK: type: SHOWLOCKS @@ -128,15 +128,15 @@ PREHOOK: query: SHOW LOCKS extended PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS extended POSTHOOK: type: SHOWLOCKS -PREHOOK: query: SHOW LOCKS tstsrc +PREHOOK: query: SHOW LOCKS tstsrc_n1 PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrc +POSTHOOK: query: SHOW LOCKS tstsrc_n1 POSTHOOK: type: SHOWLOCKS -PREHOOK: query: drop table tstsrc +PREHOOK: query: drop table tstsrc_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tstsrc -PREHOOK: Output: default@tstsrc -POSTHOOK: query: drop table tstsrc +PREHOOK: Input: default@tstsrc_n1 +PREHOOK: Output: default@tstsrc_n1 +POSTHOOK: query: drop table tstsrc_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tstsrc -POSTHOOK: Output: default@tstsrc +POSTHOOK: Input: default@tstsrc_n1 +POSTHOOK: Output: default@tstsrc_n1 diff --git a/ql/src/test/results/clientpositive/lock2.q.out b/ql/src/test/results/clientpositive/lock2.q.out index e699a8dc4b..0dbb25df2f 100644 --- a/ql/src/test/results/clientpositive/lock2.q.out +++ b/ql/src/test/results/clientpositive/lock2.q.out @@ -20,69 +20,69 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tstsrc POSTHOOK: Lineage: tstsrc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tstsrc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: drop table tstsrcpart +PREHOOK: query: drop table tstsrcpart_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tstsrcpart +POSTHOOK: query: drop table tstsrcpart_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table tstsrcpart like srcpart +PREHOOK: query: create table tstsrcpart_n0 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: create table tstsrcpart like srcpart +PREHOOK: Output: default@tstsrcpart_n0 +POSTHOOK: query: create table tstsrcpart_n0 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstsrcpart -PREHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11') +POSTHOOK: Output: default@tstsrcpart_n0 +PREHOOK: query: insert overwrite table tstsrcpart_n0 partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' and hr='11' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11') +PREHOOK: Output: default@tstsrcpart_n0@ds=2008-04-08/hr=11 +POSTHOOK: query: insert overwrite table tstsrcpart_n0 partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' and hr='11' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@tstsrcpart_n0@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: tstsrcpart_n0 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n0 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: LOCK TABLE tstsrc SHARED PREHOOK: type: LOCKTABLE POSTHOOK: query: LOCK TABLE tstsrc SHARED POSTHOOK: type: LOCKTABLE -PREHOOK: query: LOCK TABLE tstsrcpart SHARED +PREHOOK: query: LOCK TABLE tstsrcpart_n0 SHARED PREHOOK: type: LOCKTABLE -POSTHOOK: query: LOCK TABLE tstsrcpart SHARED +POSTHOOK: query: LOCK TABLE tstsrcpart_n0 SHARED POSTHOOK: type: LOCKTABLE -PREHOOK: query: LOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11') EXCLUSIVE +PREHOOK: query: LOCK TABLE tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') EXCLUSIVE PREHOOK: type: LOCKTABLE -POSTHOOK: query: LOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11') EXCLUSIVE +POSTHOOK: query: LOCK TABLE tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') EXCLUSIVE POSTHOOK: type: LOCKTABLE PREHOOK: query: SHOW LOCKS PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS POSTHOOK: type: SHOWLOCKS default@tstsrc SHARED -default@tstsrcpart SHARED -default@tstsrcpart@ds=2008-04-08/hr=11 EXCLUSIVE -PREHOOK: query: SHOW LOCKS tstsrcpart +default@tstsrcpart_n0 SHARED +default@tstsrcpart_n0@ds=2008-04-08/hr=11 EXCLUSIVE +PREHOOK: query: SHOW LOCKS tstsrcpart_n0 PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrcpart +POSTHOOK: query: SHOW LOCKS tstsrcpart_n0 POSTHOOK: type: SHOWLOCKS -default@tstsrcpart SHARED -PREHOOK: query: SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11') +default@tstsrcpart_n0 SHARED +PREHOOK: query: SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11') +POSTHOOK: query: SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') POSTHOOK: type: SHOWLOCKS -default@tstsrcpart@ds=2008-04-08/hr=11 EXCLUSIVE -PREHOOK: query: SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11') extended +default@tstsrcpart_n0@ds=2008-04-08/hr=11 EXCLUSIVE +PREHOOK: query: SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') extended PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11') extended +POSTHOOK: query: SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') extended POSTHOOK: type: SHOWLOCKS -default@tstsrcpart@ds=2008-04-08/hr=11 EXCLUSIVE +default@tstsrcpart_n0@ds=2008-04-08/hr=11 EXCLUSIVE #### A masked pattern was here #### LOCK_MODE:EXPLICIT -LOCK_QUERYSTRING:LOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11') EXCLUSIVE +LOCK_QUERYSTRING:LOCK TABLE tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') EXCLUSIVE PREHOOK: query: UNLOCK TABLE tstsrc PREHOOK: type: UNLOCKTABLE POSTHOOK: query: UNLOCK TABLE tstsrc @@ -91,51 +91,51 @@ PREHOOK: query: SHOW LOCKS PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS POSTHOOK: type: SHOWLOCKS -default@tstsrcpart SHARED -default@tstsrcpart@ds=2008-04-08/hr=11 EXCLUSIVE -PREHOOK: query: SHOW LOCKS tstsrcpart +default@tstsrcpart_n0 SHARED +default@tstsrcpart_n0@ds=2008-04-08/hr=11 EXCLUSIVE +PREHOOK: query: SHOW LOCKS tstsrcpart_n0 PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrcpart +POSTHOOK: query: SHOW LOCKS tstsrcpart_n0 POSTHOOK: type: SHOWLOCKS -default@tstsrcpart SHARED -PREHOOK: query: SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11') +default@tstsrcpart_n0 SHARED +PREHOOK: query: SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11') +POSTHOOK: query: SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') POSTHOOK: type: SHOWLOCKS -default@tstsrcpart@ds=2008-04-08/hr=11 EXCLUSIVE -PREHOOK: query: UNLOCK TABLE tstsrcpart +default@tstsrcpart_n0@ds=2008-04-08/hr=11 EXCLUSIVE +PREHOOK: query: UNLOCK TABLE tstsrcpart_n0 PREHOOK: type: UNLOCKTABLE -POSTHOOK: query: UNLOCK TABLE tstsrcpart +POSTHOOK: query: UNLOCK TABLE tstsrcpart_n0 POSTHOOK: type: UNLOCKTABLE PREHOOK: query: SHOW LOCKS PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS POSTHOOK: type: SHOWLOCKS -default@tstsrcpart@ds=2008-04-08/hr=11 EXCLUSIVE -PREHOOK: query: SHOW LOCKS tstsrcpart +default@tstsrcpart_n0@ds=2008-04-08/hr=11 EXCLUSIVE +PREHOOK: query: SHOW LOCKS tstsrcpart_n0 PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrcpart +POSTHOOK: query: SHOW LOCKS tstsrcpart_n0 POSTHOOK: type: SHOWLOCKS -PREHOOK: query: SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11') +PREHOOK: query: SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11') +POSTHOOK: query: SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') POSTHOOK: type: SHOWLOCKS -default@tstsrcpart@ds=2008-04-08/hr=11 EXCLUSIVE -PREHOOK: query: UNLOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11') +default@tstsrcpart_n0@ds=2008-04-08/hr=11 EXCLUSIVE +PREHOOK: query: UNLOCK TABLE tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') PREHOOK: type: UNLOCKTABLE -POSTHOOK: query: UNLOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11') +POSTHOOK: query: UNLOCK TABLE tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') POSTHOOK: type: UNLOCKTABLE PREHOOK: query: SHOW LOCKS PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS POSTHOOK: type: SHOWLOCKS -PREHOOK: query: SHOW LOCKS tstsrcpart +PREHOOK: query: SHOW LOCKS tstsrcpart_n0 PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrcpart +POSTHOOK: query: SHOW LOCKS tstsrcpart_n0 POSTHOOK: type: SHOWLOCKS -PREHOOK: query: SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11') +PREHOOK: query: SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11') +POSTHOOK: query: SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') POSTHOOK: type: SHOWLOCKS PREHOOK: query: drop table tstsrc PREHOOK: type: DROPTABLE @@ -145,11 +145,11 @@ POSTHOOK: query: drop table tstsrc POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@tstsrc POSTHOOK: Output: default@tstsrc -PREHOOK: query: drop table tstsrcpart +PREHOOK: query: drop table tstsrcpart_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tstsrcpart -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: drop table tstsrcpart +PREHOOK: Input: default@tstsrcpart_n0 +PREHOOK: Output: default@tstsrcpart_n0 +POSTHOOK: query: drop table tstsrcpart_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Output: default@tstsrcpart +POSTHOOK: Input: default@tstsrcpart_n0 +POSTHOOK: Output: default@tstsrcpart_n0 diff --git a/ql/src/test/results/clientpositive/lock3.q.out b/ql/src/test/results/clientpositive/lock3.q.out index ec9f09d580..5d5d4badad 100644 --- a/ql/src/test/results/clientpositive/lock3.q.out +++ b/ql/src/test/results/clientpositive/lock3.q.out @@ -1,121 +1,121 @@ -PREHOOK: query: drop table tstsrcpart +PREHOOK: query: drop table tstsrcpart_n4 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tstsrcpart +POSTHOOK: query: drop table tstsrcpart_n4 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table tstsrcpart like srcpart +PREHOOK: query: create table tstsrcpart_n4 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: create table tstsrcpart like srcpart +PREHOOK: Output: default@tstsrcpart_n4 +POSTHOOK: query: create table tstsrcpart_n4 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstsrcpart +POSTHOOK: Output: default@tstsrcpart_n4 PREHOOK: query: from srcpart -insert overwrite table tstsrcpart partition (ds='2008-04-08',hr='11') +insert overwrite table tstsrcpart_n4 partition (ds='2008-04-08',hr='11') select key, value where ds='2008-04-08' and hr='11' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 +PREHOOK: Output: default@tstsrcpart_n4@ds=2008-04-08/hr=11 POSTHOOK: query: from srcpart -insert overwrite table tstsrcpart partition (ds='2008-04-08',hr='11') +insert overwrite table tstsrcpart_n4 partition (ds='2008-04-08',hr='11') select key, value where ds='2008-04-08' and hr='11' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@tstsrcpart_n4@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: tstsrcpart_n4 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n4 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: from srcpart -insert overwrite table tstsrcpart partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table tstsrcpart_n4 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@tstsrcpart +PREHOOK: Output: default@tstsrcpart_n4 POSTHOOK: query: from srcpart -insert overwrite table tstsrcpart partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table tstsrcpart_n4 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@tstsrcpart_n4@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@tstsrcpart_n4@ds=2008-04-08/hr=12 +POSTHOOK: Lineage: tstsrcpart_n4 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n4 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n4 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n4 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: from srcpart -insert overwrite table tstsrcpart partition (ds ='2008-04-08', hr) select key, value, hr where ds = '2008-04-08' +insert overwrite table tstsrcpart_n4 partition (ds ='2008-04-08', hr) select key, value, hr where ds = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08 +PREHOOK: Output: default@tstsrcpart_n4@ds=2008-04-08 POSTHOOK: query: from srcpart -insert overwrite table tstsrcpart partition (ds ='2008-04-08', hr) select key, value, hr where ds = '2008-04-08' +insert overwrite table tstsrcpart_n4 partition (ds ='2008-04-08', hr) select key, value, hr where ds = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@tstsrcpart_n4@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@tstsrcpart_n4@ds=2008-04-08/hr=12 +POSTHOOK: Lineage: tstsrcpart_n4 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n4 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n4 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n4 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: SHOW LOCKS PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS POSTHOOK: type: SHOWLOCKS -PREHOOK: query: SHOW LOCKS tstsrcpart +PREHOOK: query: SHOW LOCKS tstsrcpart_n4 PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrcpart +POSTHOOK: query: SHOW LOCKS tstsrcpart_n4 POSTHOOK: type: SHOWLOCKS -PREHOOK: query: drop table tstsrcpart +PREHOOK: query: drop table tstsrcpart_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tstsrcpart -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: drop table tstsrcpart +PREHOOK: Input: default@tstsrcpart_n4 +PREHOOK: Output: default@tstsrcpart_n4 +POSTHOOK: query: drop table tstsrcpart_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Output: default@tstsrcpart -PREHOOK: query: drop table tst1 +POSTHOOK: Input: default@tstsrcpart_n4 +POSTHOOK: Output: default@tstsrcpart_n4 +PREHOOK: query: drop table tst1_n4 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tst1 +POSTHOOK: query: drop table tst1_n4 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table tst1 (key string, value string) partitioned by (a string, b string, c string, d string) +PREHOOK: query: create table tst1_n4 (key string, value string) partitioned by (a string, b string, c string, d string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tst1 -POSTHOOK: query: create table tst1 (key string, value string) partitioned by (a string, b string, c string, d string) +PREHOOK: Output: default@tst1_n4 +POSTHOOK: query: create table tst1_n4 (key string, value string) partitioned by (a string, b string, c string, d string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tst1 +POSTHOOK: Output: default@tst1_n4 PREHOOK: query: from srcpart -insert overwrite table tst1 partition (a='1', b='2', c, d) select key, value, ds, hr where ds = '2008-04-08' +insert overwrite table tst1_n4 partition (a='1', b='2', c, d) select key, value, ds, hr where ds = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@tst1@a=1/b=2 +PREHOOK: Output: default@tst1_n4@a=1/b=2 POSTHOOK: query: from srcpart -insert overwrite table tst1 partition (a='1', b='2', c, d) select key, value, ds, hr where ds = '2008-04-08' +insert overwrite table tst1_n4 partition (a='1', b='2', c, d) select key, value, ds, hr where ds = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@tst1@a=1/b=2/c=2008-04-08/d=11 -POSTHOOK: Output: default@tst1@a=1/b=2/c=2008-04-08/d=12 -POSTHOOK: Lineage: tst1 PARTITION(a=1,b=2,c=2008-04-08,d=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(a=1,b=2,c=2008-04-08,d=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(a=1,b=2,c=2008-04-08,d=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(a=1,b=2,c=2008-04-08,d=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: drop table tst1 +POSTHOOK: Output: default@tst1_n4@a=1/b=2/c=2008-04-08/d=11 +POSTHOOK: Output: default@tst1_n4@a=1/b=2/c=2008-04-08/d=12 +POSTHOOK: Lineage: tst1_n4 PARTITION(a=1,b=2,c=2008-04-08,d=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n4 PARTITION(a=1,b=2,c=2008-04-08,d=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n4 PARTITION(a=1,b=2,c=2008-04-08,d=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n4 PARTITION(a=1,b=2,c=2008-04-08,d=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: drop table tst1_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: drop table tst1 +PREHOOK: Input: default@tst1_n4 +PREHOOK: Output: default@tst1_n4 +POSTHOOK: query: drop table tst1_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 +POSTHOOK: Input: default@tst1_n4 +POSTHOOK: Output: default@tst1_n4 diff --git a/ql/src/test/results/clientpositive/lock4.q.out b/ql/src/test/results/clientpositive/lock4.q.out index ec9f09d580..7deb6ee0da 100644 --- a/ql/src/test/results/clientpositive/lock4.q.out +++ b/ql/src/test/results/clientpositive/lock4.q.out @@ -1,121 +1,121 @@ -PREHOOK: query: drop table tstsrcpart +PREHOOK: query: drop table tstsrcpart_n3 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tstsrcpart +POSTHOOK: query: drop table tstsrcpart_n3 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table tstsrcpart like srcpart +PREHOOK: query: create table tstsrcpart_n3 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: create table tstsrcpart like srcpart +PREHOOK: Output: default@tstsrcpart_n3 +POSTHOOK: query: create table tstsrcpart_n3 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstsrcpart +POSTHOOK: Output: default@tstsrcpart_n3 PREHOOK: query: from srcpart -insert overwrite table tstsrcpart partition (ds='2008-04-08',hr='11') +insert overwrite table tstsrcpart_n3 partition (ds='2008-04-08',hr='11') select key, value where ds='2008-04-08' and hr='11' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 +PREHOOK: Output: default@tstsrcpart_n3@ds=2008-04-08/hr=11 POSTHOOK: query: from srcpart -insert overwrite table tstsrcpart partition (ds='2008-04-08',hr='11') +insert overwrite table tstsrcpart_n3 partition (ds='2008-04-08',hr='11') select key, value where ds='2008-04-08' and hr='11' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@tstsrcpart_n3@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: tstsrcpart_n3 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n3 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: from srcpart -insert overwrite table tstsrcpart partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table tstsrcpart_n3 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@tstsrcpart +PREHOOK: Output: default@tstsrcpart_n3 POSTHOOK: query: from srcpart -insert overwrite table tstsrcpart partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' +insert overwrite table tstsrcpart_n3 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@tstsrcpart_n3@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@tstsrcpart_n3@ds=2008-04-08/hr=12 +POSTHOOK: Lineage: tstsrcpart_n3 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n3 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n3 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n3 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: from srcpart -insert overwrite table tstsrcpart partition (ds ='2008-04-08', hr) select key, value, hr where ds = '2008-04-08' +insert overwrite table tstsrcpart_n3 partition (ds ='2008-04-08', hr) select key, value, hr where ds = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08 +PREHOOK: Output: default@tstsrcpart_n3@ds=2008-04-08 POSTHOOK: query: from srcpart -insert overwrite table tstsrcpart partition (ds ='2008-04-08', hr) select key, value, hr where ds = '2008-04-08' +insert overwrite table tstsrcpart_n3 partition (ds ='2008-04-08', hr) select key, value, hr where ds = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@tstsrcpart_n3@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@tstsrcpart_n3@ds=2008-04-08/hr=12 +POSTHOOK: Lineage: tstsrcpart_n3 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n3 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n3 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n3 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: SHOW LOCKS PREHOOK: type: SHOWLOCKS POSTHOOK: query: SHOW LOCKS POSTHOOK: type: SHOWLOCKS -PREHOOK: query: SHOW LOCKS tstsrcpart +PREHOOK: query: SHOW LOCKS tstsrcpart_n3 PREHOOK: type: SHOWLOCKS -POSTHOOK: query: SHOW LOCKS tstsrcpart +POSTHOOK: query: SHOW LOCKS tstsrcpart_n3 POSTHOOK: type: SHOWLOCKS -PREHOOK: query: drop table tstsrcpart +PREHOOK: query: drop table tstsrcpart_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tstsrcpart -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: drop table tstsrcpart +PREHOOK: Input: default@tstsrcpart_n3 +PREHOOK: Output: default@tstsrcpart_n3 +POSTHOOK: query: drop table tstsrcpart_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Output: default@tstsrcpart -PREHOOK: query: drop table tst1 +POSTHOOK: Input: default@tstsrcpart_n3 +POSTHOOK: Output: default@tstsrcpart_n3 +PREHOOK: query: drop table tst1_n3 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tst1 +POSTHOOK: query: drop table tst1_n3 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table tst1 (key string, value string) partitioned by (a string, b string, c string, d string) +PREHOOK: query: create table tst1_n3 (key string, value string) partitioned by (a string, b string, c string, d string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tst1 -POSTHOOK: query: create table tst1 (key string, value string) partitioned by (a string, b string, c string, d string) +PREHOOK: Output: default@tst1_n3 +POSTHOOK: query: create table tst1_n3 (key string, value string) partitioned by (a string, b string, c string, d string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tst1 +POSTHOOK: Output: default@tst1_n3 PREHOOK: query: from srcpart -insert overwrite table tst1 partition (a='1', b='2', c, d) select key, value, ds, hr where ds = '2008-04-08' +insert overwrite table tst1_n3 partition (a='1', b='2', c, d) select key, value, ds, hr where ds = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@tst1@a=1/b=2 +PREHOOK: Output: default@tst1_n3@a=1/b=2 POSTHOOK: query: from srcpart -insert overwrite table tst1 partition (a='1', b='2', c, d) select key, value, ds, hr where ds = '2008-04-08' +insert overwrite table tst1_n3 partition (a='1', b='2', c, d) select key, value, ds, hr where ds = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@tst1@a=1/b=2/c=2008-04-08/d=11 -POSTHOOK: Output: default@tst1@a=1/b=2/c=2008-04-08/d=12 -POSTHOOK: Lineage: tst1 PARTITION(a=1,b=2,c=2008-04-08,d=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(a=1,b=2,c=2008-04-08,d=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(a=1,b=2,c=2008-04-08,d=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tst1 PARTITION(a=1,b=2,c=2008-04-08,d=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: drop table tst1 +POSTHOOK: Output: default@tst1_n3@a=1/b=2/c=2008-04-08/d=11 +POSTHOOK: Output: default@tst1_n3@a=1/b=2/c=2008-04-08/d=12 +POSTHOOK: Lineage: tst1_n3 PARTITION(a=1,b=2,c=2008-04-08,d=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n3 PARTITION(a=1,b=2,c=2008-04-08,d=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n3 PARTITION(a=1,b=2,c=2008-04-08,d=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tst1_n3 PARTITION(a=1,b=2,c=2008-04-08,d=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: drop table tst1_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tst1 -PREHOOK: Output: default@tst1 -POSTHOOK: query: drop table tst1 +PREHOOK: Input: default@tst1_n3 +PREHOOK: Output: default@tst1_n3 +POSTHOOK: query: drop table tst1_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tst1 -POSTHOOK: Output: default@tst1 +POSTHOOK: Input: default@tst1_n3 +POSTHOOK: Output: default@tst1_n3 diff --git a/ql/src/test/results/clientpositive/mapjoin2.q.out b/ql/src/test/results/clientpositive/mapjoin2.q.out index f1787427e4..2e54591bd0 100644 --- a/ql/src/test/results/clientpositive/mapjoin2.q.out +++ b/ql/src/test/results/clientpositive/mapjoin2.q.out @@ -1,59 +1,59 @@ -PREHOOK: query: create table tbl (n bigint, t string) +PREHOOK: query: create table tbl_n0 (n bigint, t string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tbl -POSTHOOK: query: create table tbl (n bigint, t string) +PREHOOK: Output: default@tbl_n0 +POSTHOOK: query: create table tbl_n0 (n bigint, t string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tbl -PREHOOK: query: insert into tbl values (1, 'one') +POSTHOOK: Output: default@tbl_n0 +PREHOOK: query: insert into tbl_n0 values (1, 'one') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@tbl -POSTHOOK: query: insert into tbl values (1, 'one') +PREHOOK: Output: default@tbl_n0 +POSTHOOK: query: insert into tbl_n0 values (1, 'one') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@tbl -POSTHOOK: Lineage: tbl.n SCRIPT [] -POSTHOOK: Lineage: tbl.t SCRIPT [] -PREHOOK: query: insert into tbl values(2, 'two') +POSTHOOK: Output: default@tbl_n0 +POSTHOOK: Lineage: tbl_n0.n SCRIPT [] +POSTHOOK: Lineage: tbl_n0.t SCRIPT [] +PREHOOK: query: insert into tbl_n0 values(2, 'two') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@tbl -POSTHOOK: query: insert into tbl values(2, 'two') +PREHOOK: Output: default@tbl_n0 +POSTHOOK: query: insert into tbl_n0 values(2, 'two') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@tbl -POSTHOOK: Lineage: tbl.n SCRIPT [] -POSTHOOK: Lineage: tbl.t SCRIPT [] +POSTHOOK: Output: default@tbl_n0 +POSTHOOK: Lineage: tbl_n0.n SCRIPT [] +POSTHOOK: Lineage: tbl_n0.t SCRIPT [] Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Stage-3:MAPRED' is a cross product -PREHOOK: query: select a.n, a.t, isnull(b.n), isnull(b.t) from (select * from tbl where n = 1) a left outer join (select * from tbl where 1 = 2) b on a.n = b.n +PREHOOK: query: select a.n, a.t, isnull(b.n), isnull(b.t) from (select * from tbl_n0 where n = 1) a left outer join (select * from tbl_n0 where 1 = 2) b on a.n = b.n PREHOOK: type: QUERY -PREHOOK: Input: default@tbl +PREHOOK: Input: default@tbl_n0 #### A masked pattern was here #### -POSTHOOK: query: select a.n, a.t, isnull(b.n), isnull(b.t) from (select * from tbl where n = 1) a left outer join (select * from tbl where 1 = 2) b on a.n = b.n +POSTHOOK: query: select a.n, a.t, isnull(b.n), isnull(b.t) from (select * from tbl_n0 where n = 1) a left outer join (select * from tbl_n0 where 1 = 2) b on a.n = b.n POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl +POSTHOOK: Input: default@tbl_n0 #### A masked pattern was here #### 1 one true true Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Stage-3:MAPRED' is a cross product -PREHOOK: query: select isnull(a.n), isnull(a.t), b.n, b.t from (select * from tbl where 2 = 1) a right outer join (select * from tbl where n = 2) b on a.n = b.n +PREHOOK: query: select isnull(a.n), isnull(a.t), b.n, b.t from (select * from tbl_n0 where 2 = 1) a right outer join (select * from tbl_n0 where n = 2) b on a.n = b.n PREHOOK: type: QUERY -PREHOOK: Input: default@tbl +PREHOOK: Input: default@tbl_n0 #### A masked pattern was here #### -POSTHOOK: query: select isnull(a.n), isnull(a.t), b.n, b.t from (select * from tbl where 2 = 1) a right outer join (select * from tbl where n = 2) b on a.n = b.n +POSTHOOK: query: select isnull(a.n), isnull(a.t), b.n, b.t from (select * from tbl_n0 where 2 = 1) a right outer join (select * from tbl_n0 where n = 2) b on a.n = b.n POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl +POSTHOOK: Input: default@tbl_n0 #### A masked pattern was here #### true true 2 two Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: select isnull(a.n), isnull(a.t), isnull(b.n), isnull(b.t) from (select * from tbl where n = 1) a full outer join (select * from tbl where n = 2) b on a.n = b.n +PREHOOK: query: select isnull(a.n), isnull(a.t), isnull(b.n), isnull(b.t) from (select * from tbl_n0 where n = 1) a full outer join (select * from tbl_n0 where n = 2) b on a.n = b.n PREHOOK: type: QUERY -PREHOOK: Input: default@tbl +PREHOOK: Input: default@tbl_n0 #### A masked pattern was here #### -POSTHOOK: query: select isnull(a.n), isnull(a.t), isnull(b.n), isnull(b.t) from (select * from tbl where n = 1) a full outer join (select * from tbl where n = 2) b on a.n = b.n +POSTHOOK: query: select isnull(a.n), isnull(a.t), isnull(b.n), isnull(b.t) from (select * from tbl_n0 where n = 1) a full outer join (select * from tbl_n0 where n = 2) b on a.n = b.n POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl +POSTHOOK: Input: default@tbl_n0 #### A masked pattern was here #### false false true true true true false false diff --git a/ql/src/test/results/clientpositive/mapjoin46.q.out b/ql/src/test/results/clientpositive/mapjoin46.q.out index 61b579a305..b3d418c7e9 100644 --- a/ql/src/test/results/clientpositive/mapjoin46.q.out +++ b/ql/src/test/results/clientpositive/mapjoin46.q.out @@ -1,54 +1,54 @@ -PREHOOK: query: CREATE TABLE test1 (key INT, value INT, col_1 STRING) +PREHOOK: query: CREATE TABLE test1_n4 (key INT, value INT, col_1 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test1 -POSTHOOK: query: CREATE TABLE test1 (key INT, value INT, col_1 STRING) +PREHOOK: Output: default@test1_n4 +POSTHOOK: query: CREATE TABLE test1_n4 (key INT, value INT, col_1 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test1 -PREHOOK: query: INSERT INTO test1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), +POSTHOOK: Output: default@test1_n4 +PREHOOK: query: INSERT INTO test1_n4 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test1 -POSTHOOK: query: INSERT INTO test1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), +PREHOOK: Output: default@test1_n4 +POSTHOOK: query: INSERT INTO test1_n4 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test1 -POSTHOOK: Lineage: test1.col_1 SCRIPT [] -POSTHOOK: Lineage: test1.key SCRIPT [] -POSTHOOK: Lineage: test1.value SCRIPT [] -PREHOOK: query: CREATE TABLE test2 (key INT, value INT, col_2 STRING) +POSTHOOK: Output: default@test1_n4 +POSTHOOK: Lineage: test1_n4.col_1 SCRIPT [] +POSTHOOK: Lineage: test1_n4.key SCRIPT [] +POSTHOOK: Lineage: test1_n4.value SCRIPT [] +PREHOOK: query: CREATE TABLE test2_n2 (key INT, value INT, col_2 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test2 -POSTHOOK: query: CREATE TABLE test2 (key INT, value INT, col_2 STRING) +PREHOOK: Output: default@test2_n2 +POSTHOOK: query: CREATE TABLE test2_n2 (key INT, value INT, col_2 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test2 -PREHOOK: query: INSERT INTO test2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), +POSTHOOK: Output: default@test2_n2 +PREHOOK: query: INSERT INTO test2_n2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), (104, 3, 'Fli'), (105, NULL, 'None') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test2 -POSTHOOK: query: INSERT INTO test2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), +PREHOOK: Output: default@test2_n2 +POSTHOOK: query: INSERT INTO test2_n2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), (104, 3, 'Fli'), (105, NULL, 'None') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test2 -POSTHOOK: Lineage: test2.col_2 SCRIPT [] -POSTHOOK: Lineage: test2.key SCRIPT [] -POSTHOOK: Lineage: test2.value SCRIPT [] +POSTHOOK: Output: default@test2_n2 +POSTHOOK: Lineage: test2_n2.col_2 SCRIPT [] +POSTHOOK: Lineage: test2_n2.key SCRIPT [] +POSTHOOK: Lineage: test2_n2.value SCRIPT [] PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -59,13 +59,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n2 TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -80,7 +80,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -111,18 +111,18 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### NULL NULL None NULL NULL NULL 98 NULL None NULL NULL NULL @@ -134,17 +134,17 @@ NULL NULL None NULL NULL NULL 101 2 Car 103 2 Ema PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND test1_n4.key between 100 and 102 + AND test2_n2.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND test1_n4.key between 100 and 102 + AND test2_n2.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -155,13 +155,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n2 TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key BETWEEN 100 AND 102 (type: boolean) @@ -182,7 +182,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -216,22 +216,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND test1_n4.key between 100 and 102 + AND test2_n2.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND test1_n4.key between 100 and 102 + AND test2_n2.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### NULL NULL None NULL NULL NULL 98 NULL None NULL NULL NULL @@ -242,15 +242,15 @@ NULL NULL None NULL NULL NULL Warning: Map Join MAPJOIN[11][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.key between 100 and 102 + AND test2_n2.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.key between 100 and 102 + AND test2_n2.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -261,13 +261,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n2 TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key BETWEEN 100 AND 102 (type: boolean) @@ -288,7 +288,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -323,20 +323,20 @@ STAGE PLANS: Warning: Map Join MAPJOIN[11][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.key between 100 and 102 + AND test2_n2.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.key between 100 and 102 + AND test2_n2.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### NULL NULL None NULL NULL NULL 98 NULL None NULL NULL NULL @@ -346,13 +346,13 @@ NULL NULL None NULL NULL NULL 101 2 Car 102 2 Del PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value AND true) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value AND true) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -363,13 +363,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:test1 + $hdt$_0:test1_n4 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:test1 + $hdt$_0:test1_n4 TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -384,7 +384,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -415,18 +415,18 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value AND true) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value AND true) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### 99 2 Mat 102 2 Del 101 2 Car 102 2 Del @@ -437,13 +437,13 @@ NULL NULL NULL 105 NULL None Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -454,13 +454,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n2 TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -478,7 +478,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -513,18 +513,18 @@ STAGE PLANS: Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### NULL NULL None NULL NULL NULL 98 NULL None NULL NULL NULL @@ -541,17 +541,17 @@ NULL NULL None NULL NULL NULL Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -562,13 +562,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n2 TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -583,7 +583,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -616,22 +616,22 @@ STAGE PLANS: Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### NULL NULL None 102 2 Del 98 NULL None 102 2 Del @@ -649,15 +649,15 @@ NULL NULL None 102 2 Del Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -668,13 +668,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n2 TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -689,7 +689,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -722,20 +722,20 @@ STAGE PLANS: Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### NULL NULL None NULL NULL NULL 98 NULL None NULL NULL NULL @@ -753,15 +753,15 @@ NULL NULL None NULL NULL NULL Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -772,13 +772,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n2 TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -793,7 +793,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -826,20 +826,20 @@ STAGE PLANS: Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### NULL NULL None 102 2 Del 98 NULL None 102 2 Del @@ -851,17 +851,17 @@ NULL NULL None 102 2 Del 101 2 Car 103 2 Ema PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -872,13 +872,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n2 TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -893,7 +893,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -925,22 +925,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n4 LEFT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### NULL NULL None NULL NULL NULL 98 NULL None NULL NULL NULL @@ -952,17 +952,17 @@ NULL NULL None NULL NULL NULL Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -973,13 +973,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:test1 + $hdt$_0:test1_n4 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:test1 + $hdt$_0:test1_n4 TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -994,7 +994,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1027,22 +1027,22 @@ STAGE PLANS: Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### NULL NULL None 102 2 Del 98 NULL None 102 2 Del @@ -1060,15 +1060,15 @@ NULL NULL None 102 2 Del Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -1079,13 +1079,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:test1 + $hdt$_0:test1_n4 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:test1 + $hdt$_0:test1_n4 TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1100,7 +1100,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1133,20 +1133,20 @@ STAGE PLANS: Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### 99 2 Mat 102 2 Del 100 1 Bob 102 2 Del @@ -1161,15 +1161,15 @@ POSTHOOK: Input: default@test2 Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -1180,13 +1180,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:test1 + $hdt$_0:test1_n4 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:test1 + $hdt$_0:test1_n4 TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1201,7 +1201,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1234,20 +1234,20 @@ STAGE PLANS: Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### NULL NULL None 102 2 Del 98 NULL None 102 2 Del @@ -1261,17 +1261,17 @@ NULL NULL NULL 104 3 Fli NULL NULL NULL 105 NULL None PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -1282,13 +1282,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:test1 + $hdt$_0:test1_n4 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:test1 + $hdt$_0:test1_n4 TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1303,7 +1303,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1335,22 +1335,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n4 RIGHT OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### 99 2 Mat 102 2 Del 101 2 Car 102 2 Del @@ -1360,17 +1360,17 @@ NULL NULL NULL 105 NULL None Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1381,7 +1381,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1392,7 +1392,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1428,22 +1428,22 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### 101 2 Car 105 NULL None 101 2 Car 104 3 Fli @@ -1461,15 +1461,15 @@ NULL NULL None 102 2 Del Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1480,7 +1480,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1491,7 +1491,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1527,20 +1527,20 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test1_n4.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### 101 2 Car 105 NULL None 101 2 Car 104 3 Fli @@ -1558,15 +1558,15 @@ NULL NULL None NULL NULL NULL Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1577,7 +1577,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1588,7 +1588,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1624,20 +1624,20 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + OR test2_n2.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### 101 2 Car 103 2 Ema 101 2 Car 102 2 Del @@ -1651,17 +1651,17 @@ NULL NULL NULL 105 NULL None NULL NULL NULL 104 3 Fli PREHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1672,7 +1672,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1685,7 +1685,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1722,22 +1722,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n4 FULL OUTER JOIN test2_n2 +ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### 98 NULL None NULL NULL NULL NULL NULL None NULL NULL NULL @@ -1752,40 +1752,40 @@ Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'S PREHOOK: query: EXPLAIN SELECT * FROM ( - SELECT test1.key AS key1, test1.value AS value1, test1.col_1 AS col_1, - test2.key AS key2, test2.value AS value2, test2.col_2 AS col_2 - FROM test1 RIGHT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n4.key AS key1, test1_n4.value AS value1, test1_n4.col_1 AS col_1, + test2_n2.key AS key2, test2_n2.value AS value2, test2_n2.col_2 AS col_2 + FROM test1_n4 RIGHT OUTER JOIN test2_n2 + ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) ) sq1 FULL OUTER JOIN ( - SELECT test1.key AS key3, test1.value AS value3, test1.col_1 AS col_3, - test2.key AS key4, test2.value AS value4, test2.col_2 AS col_4 - FROM test1 LEFT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n4.key AS key3, test1_n4.value AS value3, test1_n4.col_1 AS col_3, + test2_n2.key AS key4, test2_n2.value AS value4, test2_n2.col_2 AS col_4 + FROM test1_n4 LEFT OUTER JOIN test2_n2 + ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) ) sq2 ON (sq1.value1 is null or sq2.value4 is null and sq2.value3 != sq1.value2) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * FROM ( - SELECT test1.key AS key1, test1.value AS value1, test1.col_1 AS col_1, - test2.key AS key2, test2.value AS value2, test2.col_2 AS col_2 - FROM test1 RIGHT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n4.key AS key1, test1_n4.value AS value1, test1_n4.col_1 AS col_1, + test2_n2.key AS key2, test2_n2.value AS value2, test2_n2.col_2 AS col_2 + FROM test1_n4 RIGHT OUTER JOIN test2_n2 + ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) ) sq1 FULL OUTER JOIN ( - SELECT test1.key AS key3, test1.value AS value3, test1.col_1 AS col_3, - test2.key AS key4, test2.value AS value4, test2.col_2 AS col_4 - FROM test1 LEFT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n4.key AS key3, test1_n4.value AS value3, test1_n4.col_1 AS col_3, + test2_n2.key AS key4, test2_n2.value AS value4, test2_n2.col_2 AS col_4 + FROM test1_n4 LEFT OUTER JOIN test2_n2 + ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) ) sq2 ON (sq1.value1 is null or sq2.value4 is null and sq2.value3 != sq1.value2) POSTHOOK: type: QUERY @@ -1798,16 +1798,16 @@ STAGE PLANS: Stage: Stage-8 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:test1 + $hdt$_0:test1_n4 Fetch Operator limit: -1 - $hdt$_2:$hdt$_3:test2 + $hdt$_2:$hdt$_3:test2_n2 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:test1 + $hdt$_0:test1_n4 TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1817,9 +1817,9 @@ STAGE PLANS: keys: 0 _col1 (type: int) 1 _col1 (type: int) - $hdt$_2:$hdt$_3:test2 + $hdt$_2:$hdt$_3:test2_n2 TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1834,7 +1834,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test2 + alias: test2_n2 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1854,7 +1854,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 61 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int), _col5 (type: string) TableScan - alias: test1 + alias: test1_n4 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1902,47 +1902,47 @@ STAGE PLANS: Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: SELECT * FROM ( - SELECT test1.key AS key1, test1.value AS value1, test1.col_1 AS col_1, - test2.key AS key2, test2.value AS value2, test2.col_2 AS col_2 - FROM test1 RIGHT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n4.key AS key1, test1_n4.value AS value1, test1_n4.col_1 AS col_1, + test2_n2.key AS key2, test2_n2.value AS value2, test2_n2.col_2 AS col_2 + FROM test1_n4 RIGHT OUTER JOIN test2_n2 + ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) ) sq1 FULL OUTER JOIN ( - SELECT test1.key AS key3, test1.value AS value3, test1.col_1 AS col_3, - test2.key AS key4, test2.value AS value4, test2.col_2 AS col_4 - FROM test1 LEFT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n4.key AS key3, test1_n4.value AS value3, test1_n4.col_1 AS col_3, + test2_n2.key AS key4, test2_n2.value AS value4, test2_n2.col_2 AS col_4 + FROM test1_n4 LEFT OUTER JOIN test2_n2 + ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) ) sq2 ON (sq1.value1 is null or sq2.value4 is null and sq2.value3 != sq1.value2) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n4 +PREHOOK: Input: default@test2_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM ( - SELECT test1.key AS key1, test1.value AS value1, test1.col_1 AS col_1, - test2.key AS key2, test2.value AS value2, test2.col_2 AS col_2 - FROM test1 RIGHT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n4.key AS key1, test1_n4.value AS value1, test1_n4.col_1 AS col_1, + test2_n2.key AS key2, test2_n2.value AS value2, test2_n2.col_2 AS col_2 + FROM test1_n4 RIGHT OUTER JOIN test2_n2 + ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) ) sq1 FULL OUTER JOIN ( - SELECT test1.key AS key3, test1.value AS value3, test1.col_1 AS col_3, - test2.key AS key4, test2.value AS value4, test2.col_2 AS col_4 - FROM test1 LEFT OUTER JOIN test2 - ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) + SELECT test1_n4.key AS key3, test1_n4.value AS value3, test1_n4.col_1 AS col_3, + test2_n2.key AS key4, test2_n2.value AS value4, test2_n2.col_2 AS col_4 + FROM test1_n4 LEFT OUTER JOIN test2_n2 + ON (test1_n4.value=test2_n2.value + AND (test1_n4.key between 100 and 102 + OR test2_n2.key between 100 and 102)) ) sq2 ON (sq1.value1 is null or sq2.value4 is null and sq2.value3 != sq1.value2) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n4 +POSTHOOK: Input: default@test2_n2 #### A masked pattern was here #### NULL NULL NULL 105 NULL None 101 2 Car 103 2 Ema NULL NULL NULL 105 NULL None 101 2 Car 102 2 Del diff --git a/ql/src/test/results/clientpositive/mapjoin_addjar.q.out b/ql/src/test/results/clientpositive/mapjoin_addjar.q.out index c73b9857ea..fc1ff5ea14 100644 --- a/ql/src/test/results/clientpositive/mapjoin_addjar.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_addjar.q.out @@ -1,26 +1,26 @@ -PREHOOK: query: CREATE TABLE t1(KEY STRING, VALUE STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE t1_n24(KEY STRING, VALUE STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: CREATE TABLE t1(KEY STRING, VALUE STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' STORED AS TEXTFILE +PREHOOK: Output: default@t1_n24 +POSTHOOK: query: CREATE TABLE t1_n24(KEY STRING, VALUE STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1_cb.txt' INTO TABLE t1 +POSTHOOK: Output: default@t1_n24 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1_cb.txt' INTO TABLE t1_n24 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1_cb.txt' INTO TABLE t1 +PREHOOK: Output: default@t1_n24 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1_cb.txt' INTO TABLE t1_n24 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: select * from t1 l join t1 r on l.key =r.key +POSTHOOK: Output: default@t1_n24 +PREHOOK: query: select * from t1_n24 l join t1_n24 r on l.key =r.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n24 #### A masked pattern was here #### -POSTHOOK: query: select * from t1 l join t1 r on l.key =r.key +POSTHOOK: query: select * from t1_n24 l join t1_n24 r on l.key =r.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n24 #### A masked pattern was here #### 238 val_238 238 val_238 238 val_238 238 val_238 @@ -1050,11 +1050,11 @@ POSTHOOK: Input: default@t1 200 val_200 200 val_200 97 val_97 97 val_97 97 val_97 97 val_97 -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n24 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n24 +PREHOOK: Output: default@t1_n24 +POSTHOOK: query: drop table t1_n24 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n24 +POSTHOOK: Output: default@t1_n24 diff --git a/ql/src/test/results/clientpositive/mapjoin_hook.q.out b/ql/src/test/results/clientpositive/mapjoin_hook.q.out index f80a26a252..3b75326920 100644 --- a/ql/src/test/results/clientpositive/mapjoin_hook.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_hook.q.out @@ -1,34 +1,34 @@ -PREHOOK: query: drop table dest1 +PREHOOK: query: drop table dest1_n144 PREHOOK: type: DROPTABLE RUN: Stage-0:DDL -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n144(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n144 RUN: Stage-0:DDL -PREHOOK: query: INSERT OVERWRITE TABLE dest1 +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n144 SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n144 [MapJoinCounter PostHook] COMMON_JOIN: 0 HINTED_MAPJOIN: 1 HINTED_MAPJOIN_LOCAL: 1 CONVERTED_MAPJOIN: 0 CONVERTED_MAPJOIN_LOCAL: 0 BACKUP_COMMON_JOIN: 0 RUN: Stage-6:MAPREDLOCAL RUN: Stage-2:MAPRED RUN: Stage-0:MOVE RUN: Stage-3:STATS PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest1_n144 SELECT src1.key, src3.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n144 [MapJoinCounter PostHook] COMMON_JOIN: 0 HINTED_MAPJOIN: 1 HINTED_MAPJOIN_LOCAL: 1 CONVERTED_MAPJOIN: 0 CONVERTED_MAPJOIN_LOCAL: 0 BACKUP_COMMON_JOIN: 0 RUN: Stage-6:MAPREDLOCAL RUN: Stage-5:MAPRED RUN: Stage-0:MOVE RUN: Stage-2:STATS PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest1_n144 SELECT src1.key, src2.value where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11') PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -37,7 +37,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n144 Hive Runtime Error: Map local work exhausted memory FAILED: Execution Error, return code 3 from org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask @@ -48,10 +48,10 @@ RUN: Stage-1:MAPRED RUN: Stage-0:MOVE RUN: Stage-2:STATS PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value +INSERT OVERWRITE TABLE dest1_n144 SELECT src1.key, src3.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n144 Hive Runtime Error: Map local work exhausted memory FAILED: Execution Error, return code 3 from org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask diff --git a/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out b/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out index 4df5f939ec..dfa9387c15 100644 --- a/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out @@ -26,40 +26,40 @@ POSTHOOK: Input: default@src1 POSTHOOK: Output: default@dest_1 POSTHOOK: Lineage: dest_1.key SIMPLE [] POSTHOOK: Lineage: dest_1.value SIMPLE [] -PREHOOK: query: create table dest_2 (key STRING, value STRING) stored as textfile +PREHOOK: query: create table dest_2_n0 (key STRING, value STRING) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_2 -POSTHOOK: query: create table dest_2 (key STRING, value STRING) stored as textfile +PREHOOK: Output: default@dest_2_n0 +POSTHOOK: query: create table dest_2_n0 (key STRING, value STRING) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_2 -PREHOOK: query: insert into table dest_2 select * from dest_1 +POSTHOOK: Output: default@dest_2_n0 +PREHOOK: query: insert into table dest_2_n0 select * from dest_1 PREHOOK: type: QUERY PREHOOK: Input: default@dest_1 -PREHOOK: Output: default@dest_2 -POSTHOOK: query: insert into table dest_2 select * from dest_1 +PREHOOK: Output: default@dest_2_n0 +POSTHOOK: query: insert into table dest_2_n0 select * from dest_1 POSTHOOK: type: QUERY POSTHOOK: Input: default@dest_1 -POSTHOOK: Output: default@dest_2 -POSTHOOK: Lineage: dest_2.key SIMPLE [(dest_1)dest_1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest_2.value SIMPLE [(dest_1)dest_1.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Output: default@dest_2_n0 +POSTHOOK: Lineage: dest_2_n0.key SIMPLE [(dest_1)dest_1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest_2_n0.value SIMPLE [(dest_1)dest_1.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: SELECT * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY PREHOOK: Input: default@dest_1 -PREHOOK: Input: default@dest_2 +PREHOOK: Input: default@dest_2_n0 PREHOOK: Input: default@src1 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY POSTHOOK: Input: default@dest_1 -POSTHOOK: Input: default@dest_2 +POSTHOOK: Input: default@dest_2_n0 POSTHOOK: Input: default@src1 #### A masked pattern was here #### @@ -230,13 +230,13 @@ NULL NULL 333444 555666 333444 555666 PREHOOK: query: explain SELECT /*+ mapjoin(src1, src2) */ * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY POSTHOOK: query: explain SELECT /*+ mapjoin(src1, src2) */ * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -322,20 +322,20 @@ STAGE PLANS: PREHOOK: query: SELECT /*+ mapjoin(src1, src2) */ * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY PREHOOK: Input: default@dest_1 -PREHOOK: Input: default@dest_2 +PREHOOK: Input: default@dest_2_n0 PREHOOK: Input: default@src1 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ mapjoin(src1, src2) */ * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY POSTHOOK: Input: default@dest_1 -POSTHOOK: Input: default@dest_2 +POSTHOOK: Input: default@dest_2_n0 POSTHOOK: Input: default@src1 #### A masked pattern was here #### @@ -505,20 +505,20 @@ POSTHOOK: Input: default@src1 NULL NULL 333444 555666 333444 555666 PREHOOK: query: SELECT /*+ mapjoin(src1, src2) */ * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src1.key = src3.key) + JOIN dest_2_n0 src3 ON (src1.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY PREHOOK: Input: default@dest_1 -PREHOOK: Input: default@dest_2 +PREHOOK: Input: default@dest_2_n0 PREHOOK: Input: default@src1 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ mapjoin(src1, src2) */ * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src1.key = src3.key) + JOIN dest_2_n0 src3 ON (src1.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY POSTHOOK: Input: default@dest_1 -POSTHOOK: Input: default@dest_2 +POSTHOOK: Input: default@dest_2_n0 POSTHOOK: Input: default@src1 #### A masked pattern was here #### @@ -687,20 +687,20 @@ POSTHOOK: Input: default@src1 369 369 369 PREHOOK: query: SELECT * FROM src1 LEFT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src1.key = src3.key) + JOIN dest_2_n0 src3 ON (src1.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY PREHOOK: Input: default@dest_1 -PREHOOK: Input: default@dest_2 +PREHOOK: Input: default@dest_2_n0 PREHOOK: Input: default@src1 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM src1 LEFT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src1.key = src3.key) + JOIN dest_2_n0 src3 ON (src1.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY POSTHOOK: Input: default@dest_1 -POSTHOOK: Input: default@dest_2 +POSTHOOK: Input: default@dest_2_n0 POSTHOOK: Input: default@src1 #### A masked pattern was here #### @@ -869,20 +869,20 @@ POSTHOOK: Input: default@src1 369 369 369 PREHOOK: query: SELECT * FROM src1 LEFT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY PREHOOK: Input: default@dest_1 -PREHOOK: Input: default@dest_2 +PREHOOK: Input: default@dest_2_n0 PREHOOK: Input: default@src1 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM src1 LEFT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY POSTHOOK: Input: default@dest_1 -POSTHOOK: Input: default@dest_2 +POSTHOOK: Input: default@dest_2_n0 POSTHOOK: Input: default@src1 #### A masked pattern was here #### @@ -1052,13 +1052,13 @@ POSTHOOK: Input: default@src1 PREHOOK: query: explain SELECT * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY POSTHOOK: query: explain SELECT * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1144,20 +1144,20 @@ STAGE PLANS: PREHOOK: query: SELECT * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY PREHOOK: Input: default@dest_1 -PREHOOK: Input: default@dest_2 +PREHOOK: Input: default@dest_2_n0 PREHOOK: Input: default@src1 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM src1 RIGHT OUTER JOIN dest_1 src2 ON (src1.key = src2.key) - JOIN dest_2 src3 ON (src2.key = src3.key) + JOIN dest_2_n0 src3 ON (src2.key = src3.key) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY POSTHOOK: Input: default@dest_1 -POSTHOOK: Input: default@dest_2 +POSTHOOK: Input: default@dest_2_n0 POSTHOOK: Input: default@src1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/mapreduce1.q.out b/ql/src/test/results/clientpositive/mapreduce1.q.out index ca27365287..d4c3544689 100644 --- a/ql/src/test/results/clientpositive/mapreduce1.q.out +++ b/ql/src/test/results/clientpositive/mapreduce1.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n100(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n100 +POSTHOOK: query: CREATE TABLE dest1_n100(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n100 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n100 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey @@ -16,7 +16,7 @@ SORT BY ten, one PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n100 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey @@ -64,7 +64,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n100 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: string) outputColumnNames: key, ten, one, value @@ -89,7 +89,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n100 Stage: Stage-2 Stats Work @@ -97,7 +97,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, ten, one, value Column Types: int, int, int, string - Table: default.dest1 + Table: default.dest1_n100 Stage: Stage-3 Map Reduce @@ -123,34 +123,34 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n100 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey SORT BY ten, one PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n100 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n100 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey SORT BY ten, one POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n100 +POSTHOOK: Lineage: dest1_n100.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n100.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n100.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n100.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n100.* FROM dest1_n100 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n100 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n100.* FROM dest1_n100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n100 #### A masked pattern was here #### 0 0 0 val_0 0 0 0 val_0 diff --git a/ql/src/test/results/clientpositive/mapreduce2.q.out b/ql/src/test/results/clientpositive/mapreduce2.q.out index 1022027065..0260c3ae6e 100644 --- a/ql/src/test/results/clientpositive/mapreduce2.q.out +++ b/ql/src/test/results/clientpositive/mapreduce2.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n137(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n137 +POSTHOOK: query: CREATE TABLE dest1_n137(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n137 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n137 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n137 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey @@ -61,7 +61,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n137 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: string) outputColumnNames: key, ten, one, value @@ -86,7 +86,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n137 Stage: Stage-2 Stats Work @@ -94,7 +94,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, ten, one, value Column Types: int, int, int, string - Table: default.dest1 + Table: default.dest1_n137 Stage: Stage-3 Map Reduce @@ -120,32 +120,32 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n137 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n137 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n137 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM (SELECT dest1.* FROM dest1 DISTRIBUTE BY key SORT BY key, ten, one, value) T ORDER BY key +POSTHOOK: Output: default@dest1_n137 +POSTHOOK: Lineage: dest1_n137.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n137.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n137.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n137.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM (SELECT dest1_n137.* FROM dest1_n137 DISTRIBUTE BY key SORT BY key, ten, one, value) T ORDER BY key PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n137 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM (SELECT dest1.* FROM dest1 DISTRIBUTE BY key SORT BY key, ten, one, value) T ORDER BY key +POSTHOOK: query: SELECT * FROM (SELECT dest1_n137.* FROM dest1_n137 DISTRIBUTE BY key SORT BY key, ten, one, value) T ORDER BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n137 #### A masked pattern was here #### 0 0 0 val_0 0 0 0 val_0 diff --git a/ql/src/test/results/clientpositive/mapreduce3.q.out b/ql/src/test/results/clientpositive/mapreduce3.q.out index dbc0bd835f..4001e5f9db 100644 --- a/ql/src/test/results/clientpositive/mapreduce3.q.out +++ b/ql/src/test/results/clientpositive/mapreduce3.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n20(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n20 +POSTHOOK: query: CREATE TABLE dest1_n20(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n20 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n20 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) SORT BY tvalue, tkey PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n20 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) SORT BY tvalue, tkey @@ -61,7 +61,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n20 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: string) outputColumnNames: key, ten, one, value @@ -86,7 +86,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n20 Stage: Stage-2 Stats Work @@ -94,7 +94,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, ten, one, value Column Types: int, int, int, string - Table: default.dest1 + Table: default.dest1_n20 Stage: Stage-3 Map Reduce @@ -120,32 +120,32 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n20 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) SORT BY tvalue, tkey PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n20 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n20 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) SORT BY tvalue, tkey POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n20 +POSTHOOK: Lineage: dest1_n20.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n20.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n20.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n20.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n20.* FROM dest1_n20 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n20 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n20.* FROM dest1_n20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n20 #### A masked pattern was here #### 0 0 0 val_0 0 0 0 val_0 diff --git a/ql/src/test/results/clientpositive/mapreduce4.q.out b/ql/src/test/results/clientpositive/mapreduce4.q.out index 2dfa51c8af..097f28e5d5 100644 --- a/ql/src/test/results/clientpositive/mapreduce4.q.out +++ b/ql/src/test/results/clientpositive/mapreduce4.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n78(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n78 +POSTHOOK: query: CREATE TABLE dest1_n78(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n78 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n78 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey @@ -16,7 +16,7 @@ SORT BY ten DESC, one ASC PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n78 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey @@ -64,7 +64,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n78 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: string) outputColumnNames: key, ten, one, value @@ -89,7 +89,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n78 Stage: Stage-2 Stats Work @@ -97,7 +97,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, ten, one, value Column Types: int, int, int, string - Table: default.dest1 + Table: default.dest1_n78 Stage: Stage-3 Map Reduce @@ -123,34 +123,34 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n78 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey SORT BY ten DESC, one ASC PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n78 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n78 MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey SORT BY ten DESC, one ASC POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n78 +POSTHOOK: Lineage: dest1_n78.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n78.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n78.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n78.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n78.* FROM dest1_n78 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n78 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n78.* FROM dest1_n78 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n78 #### A masked pattern was here #### 90 9 0 val_90 90 9 0 val_90 diff --git a/ql/src/test/results/clientpositive/mapreduce5.q.out b/ql/src/test/results/clientpositive/mapreduce5.q.out index 06db053cd1..9409818fdc 100644 --- a/ql/src/test/results/clientpositive/mapreduce5.q.out +++ b/ql/src/test/results/clientpositive/mapreduce5.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n110(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n110 +POSTHOOK: query: CREATE TABLE dest1_n110(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n110 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n110 SELECT src.key as c1, CAST(src.key / 10 AS INT) as c2, CAST(src.key % 10 AS INT) as c3, src.value as c4 DISTRIBUTE BY c4, c1 SORT BY c2 DESC, c3 ASC PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n110 SELECT src.key as c1, CAST(src.key / 10 AS INT) as c2, CAST(src.key % 10 AS INT) as c3, src.value as c4 DISTRIBUTE BY c4, c1 SORT BY c2 DESC, c3 ASC @@ -56,7 +56,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n110 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: string) outputColumnNames: key, ten, one, value @@ -81,7 +81,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n110 Stage: Stage-2 Stats Work @@ -89,7 +89,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, ten, one, value Column Types: int, int, int, string - Table: default.dest1 + Table: default.dest1_n110 Stage: Stage-3 Map Reduce @@ -115,32 +115,32 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n110 SELECT src.key as c1, CAST(src.key / 10 AS INT) as c2, CAST(src.key % 10 AS INT) as c3, src.value as c4 DISTRIBUTE BY c4, c1 SORT BY c2 DESC, c3 ASC PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n110 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n110 SELECT src.key as c1, CAST(src.key / 10 AS INT) as c2, CAST(src.key % 10 AS INT) as c3, src.value as c4 DISTRIBUTE BY c4, c1 SORT BY c2 DESC, c3 ASC POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.one EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.ten EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n110 +POSTHOOK: Lineage: dest1_n110.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n110.one EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n110.ten EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n110.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n110.* FROM dest1_n110 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n110 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n110.* FROM dest1_n110 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n110 #### A masked pattern was here #### 490 49 0 val_490 491 49 1 val_491 diff --git a/ql/src/test/results/clientpositive/mapreduce6.q.out b/ql/src/test/results/clientpositive/mapreduce6.q.out index a0780b87e3..9f64e33078 100644 --- a/ql/src/test/results/clientpositive/mapreduce6.q.out +++ b/ql/src/test/results/clientpositive/mapreduce6.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n143(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n143 +POSTHOOK: query: CREATE TABLE dest1_n143(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n143 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n143 SELECT src.key, CAST(src.key / 10 AS INT) as c2, CAST(src.key % 10 AS INT) as c3, src.value DISTRIBUTE BY value, key SORT BY c2 DESC, c3 ASC PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n143 SELECT src.key, CAST(src.key / 10 AS INT) as c2, CAST(src.key % 10 AS INT) as c3, src.value DISTRIBUTE BY value, key SORT BY c2 DESC, c3 ASC @@ -56,7 +56,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n143 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: string) outputColumnNames: key, ten, one, value @@ -81,7 +81,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n143 Stage: Stage-2 Stats Work @@ -89,7 +89,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, ten, one, value Column Types: int, int, int, string - Table: default.dest1 + Table: default.dest1_n143 Stage: Stage-3 Map Reduce @@ -115,32 +115,32 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n143 SELECT src.key, CAST(src.key / 10 AS INT) as c2, CAST(src.key % 10 AS INT) as c3, src.value DISTRIBUTE BY value, key SORT BY c2 DESC, c3 ASC PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n143 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n143 SELECT src.key, CAST(src.key / 10 AS INT) as c2, CAST(src.key % 10 AS INT) as c3, src.value DISTRIBUTE BY value, key SORT BY c2 DESC, c3 ASC POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.one EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.ten EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n143 +POSTHOOK: Lineage: dest1_n143.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n143.one EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n143.ten EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n143.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n143.* FROM dest1_n143 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n143 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n143.* FROM dest1_n143 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n143 #### A masked pattern was here #### 490 49 0 val_490 491 49 1 val_491 diff --git a/ql/src/test/results/clientpositive/mapreduce7.q.out b/ql/src/test/results/clientpositive/mapreduce7.q.out index 0c7e353b02..d1d3d513f5 100644 --- a/ql/src/test/results/clientpositive/mapreduce7.q.out +++ b/ql/src/test/results/clientpositive/mapreduce7.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE TABLE dest1(k STRING, v STRING, key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n33(k STRING, v STRING, key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(k STRING, v STRING, key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n33 +POSTHOOK: query: CREATE TABLE dest1_n33(k STRING, v STRING, key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n33 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n33 MAP src.*, src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (k, v, tkey, ten, one, tvalue) SORT BY tvalue, tkey PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n33 MAP src.*, src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (k, v, tkey, ten, one, tvalue) SORT BY tvalue, tkey @@ -61,7 +61,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n33 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: string) outputColumnNames: k, v, key, ten, one, value @@ -86,7 +86,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n33 Stage: Stage-2 Stats Work @@ -94,7 +94,7 @@ STAGE PLANS: Column Stats Desc: Columns: k, v, key, ten, one, value Column Types: string, string, int, int, int, string - Table: default.dest1 + Table: default.dest1_n33 Stage: Stage-3 Map Reduce @@ -120,34 +120,34 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n33 MAP src.*, src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (k, v, tkey, ten, one, tvalue) SORT BY tvalue, tkey PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n33 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n33 MAP src.*, src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (k, v, tkey, ten, one, tvalue) SORT BY tvalue, tkey POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.k SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.v SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n33 +POSTHOOK: Lineage: dest1_n33.k SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n33.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n33.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n33.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n33.v SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n33.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n33.* FROM dest1_n33 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n33 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n33.* FROM dest1_n33 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n33 #### A masked pattern was here #### 0 val_0 0 0 0 val_0 0 val_0 0 0 0 val_0 diff --git a/ql/src/test/results/clientpositive/mapreduce8.q.out b/ql/src/test/results/clientpositive/mapreduce8.q.out index a20063b624..d1859e1143 100644 --- a/ql/src/test/results/clientpositive/mapreduce8.q.out +++ b/ql/src/test/results/clientpositive/mapreduce8.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: CREATE TABLE dest1(k STRING, v STRING, key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n133(k STRING, v STRING, key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(k STRING, v STRING, key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n133 +POSTHOOK: query: CREATE TABLE dest1_n133(k STRING, v STRING, key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n133 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n133 MAP src.*, src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (k, v, tkey, ten, one, tvalue) DISTRIBUTE BY rand(3) @@ -16,7 +16,7 @@ SORT BY tvalue, tkey PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n133 MAP src.*, src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (k, v, tkey, ten, one, tvalue) DISTRIBUTE BY rand(3) @@ -64,7 +64,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n133 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: string) outputColumnNames: k, v, key, ten, one, value @@ -89,7 +89,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n133 Stage: Stage-2 Stats Work @@ -97,7 +97,7 @@ STAGE PLANS: Column Stats Desc: Columns: k, v, key, ten, one, value Column Types: string, string, int, int, int, string - Table: default.dest1 + Table: default.dest1_n133 Stage: Stage-3 Map Reduce @@ -123,36 +123,36 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n133 MAP src.*, src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (k, v, tkey, ten, one, tvalue) DISTRIBUTE BY rand(3) SORT BY tvalue, tkey PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n133 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n133 MAP src.*, src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value USING 'cat' AS (k, v, tkey, ten, one, tvalue) DISTRIBUTE BY rand(3) SORT BY tvalue, tkey POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.k SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.v SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n133 +POSTHOOK: Lineage: dest1_n133.k SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n133.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n133.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n133.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n133.v SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n133.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n133.* FROM dest1_n133 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n133 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n133.* FROM dest1_n133 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n133 #### A masked pattern was here #### 0 val_0 0 0 0 val_0 0 val_0 0 0 0 val_0 diff --git a/ql/src/test/results/clientpositive/masking_1.q.out b/ql/src/test/results/clientpositive/masking_1.q.out index cb7b7a0a7c..bc0a42ab7a 100644 --- a/ql/src/test/results/clientpositive/masking_1.q.out +++ b/ql/src/test/results/clientpositive/masking_1.q.out @@ -1,69 +1,547 @@ -PREHOOK: query: create table masking_test as select cast(key as int) as key, value from src +PREHOOK: query: create table masking_test_n8 as select cast(key as int) as key, value from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create table masking_test as select cast(key as int) as key, value from src +PREHOOK: Output: default@masking_test_n8 +POSTHOOK: query: create table masking_test_n8 as select cast(key as int) as key, value from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain select * from masking_test +POSTHOOK: Output: default@masking_test_n8 +POSTHOOK: Lineage: masking_test_n8.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n8.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select * from masking_test_n8 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test +POSTHOOK: query: explain select * from masking_test_n8 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), reverse(value) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: masking_test_n8 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink -PREHOOK: query: select * from masking_test +PREHOOK: query: select * from masking_test_n8 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test +POSTHOOK: query: select * from masking_test_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n8 #### A masked pattern was here #### -0 0_lav -4 4_lav -8 8_lav -0 0_lav -0 0_lav -2 2_lav -PREHOOK: query: explain select * from masking_test where key > 0 +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +0 val_0 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +0 val_0 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +0 val_0 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: explain select * from masking_test_n8 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test where key > 0 +POSTHOOK: query: explain select * from masking_test_n8 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -74,18 +552,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n8 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (key > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), reverse(value) (type: string) + expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -98,20 +576,514 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test where key > 0 +PREHOOK: query: select * from masking_test_n8 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test where key > 0 +POSTHOOK: query: select * from masking_test_n8 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n8 #### A masked pattern was here #### -4 4_lav -8 8_lav -2 2_lav -PREHOOK: query: explain select key from masking_test where key > 0 +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: explain select key from masking_test_n8 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select key from masking_test where key > 0 +POSTHOOK: query: explain select key from masking_test_n8 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -122,18 +1094,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n8 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (key > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int) outputColumnNames: _col0 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -146,20 +1118,514 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select key from masking_test where key > 0 +PREHOOK: query: select key from masking_test_n8 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n8 #### A masked pattern was here #### -POSTHOOK: query: select key from masking_test where key > 0 +POSTHOOK: query: select key from masking_test_n8 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n8 #### A masked pattern was here #### +238 +86 +311 +27 +165 +409 +255 +278 +98 +484 +265 +193 +401 +150 +273 +224 +369 +66 +128 +213 +146 +406 +429 +374 +152 +469 +145 +495 +37 +327 +281 +277 +209 +15 +82 +403 +166 +417 +430 +252 +292 +219 +287 +153 +193 +338 +446 +459 +394 +237 +482 +174 +413 +494 +207 +199 +466 +208 +174 +399 +396 +247 +417 +489 +162 +377 +397 +309 +365 +266 +439 +342 +367 +325 +167 +195 +475 +17 +113 +155 +203 +339 +455 +128 +311 +316 +57 +302 +205 +149 +438 +345 +129 +170 +20 +489 +157 +378 +221 +92 +111 +47 +72 4 +280 +35 +427 +277 +208 +356 +399 +169 +382 +498 +125 +386 +437 +469 +192 +286 +187 +176 +54 +459 +51 +138 +103 +239 +213 +216 +430 +278 +176 +289 +221 +65 +318 +332 +311 +275 +137 +241 +83 +333 +180 +284 +12 +230 +181 +67 +260 +404 +384 +489 +353 +373 +272 +138 +217 +84 +348 +466 +58 8 +411 +230 +208 +348 +24 +463 +431 +179 +172 +42 +129 +158 +119 +496 +322 +197 +468 +393 +454 +100 +298 +199 +191 +418 +96 +26 +165 +327 +230 +205 +120 +131 +51 +404 +43 +436 +156 +469 +468 +308 +95 +196 +288 +481 +457 +98 +282 +197 +187 +318 +318 +409 +470 +137 +369 +316 +169 +413 +85 +77 +490 +87 +364 +179 +118 +134 +395 +282 +138 +238 +419 +15 +118 +72 +90 +307 +19 +435 +10 +277 +273 +306 +224 +309 +389 +327 +242 +369 +392 +272 +331 +401 +242 +452 +177 +226 +5 +497 +402 +396 +317 +395 +58 +35 +336 +95 +11 +168 +34 +229 +233 +143 +472 +322 +498 +160 +195 +42 +321 +430 +119 +489 +458 +78 +76 +41 +223 +492 +149 +449 +218 +228 +138 +453 +30 +209 +64 +468 +76 +74 +342 +69 +230 +33 +368 +103 +296 +113 +216 +367 +344 +167 +274 +219 +239 +485 +116 +223 +256 +263 +70 +487 +480 +401 +288 +191 +5 +244 +438 +128 +467 +432 +202 +316 +229 +469 +463 +280 2 -PREHOOK: query: explain select value from masking_test where key > 0 +35 +283 +331 +235 +80 +44 +193 +321 +335 +104 +466 +366 +175 +403 +483 +53 +105 +257 +406 +409 +190 +406 +401 +114 +258 +90 +203 +262 +348 +424 +12 +396 +201 +217 +164 +431 +454 +478 +298 +125 +431 +164 +424 +187 +382 +5 +70 +397 +480 +291 +24 +351 +255 +104 +70 +163 +438 +119 +414 +200 +491 +237 +439 +360 +248 +479 +305 +417 +199 +444 +120 +429 +169 +443 +323 +325 +277 +230 +478 +178 +468 +310 +317 +333 +493 +460 +207 +249 +265 +480 +83 +136 +353 +172 +214 +462 +233 +406 +133 +175 +189 +454 +375 +401 +421 +407 +384 +256 +26 +134 +67 +384 +379 +18 +462 +492 +100 +298 +9 +341 +498 +146 +458 +362 +186 +285 +348 +167 +18 +273 +183 +281 +344 +97 +469 +315 +84 +28 +37 +448 +152 +348 +307 +194 +414 +477 +222 +126 +90 +169 +403 +400 +200 +97 +PREHOOK: query: explain select value from masking_test_n8 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select value from masking_test where key > 0 +POSTHOOK: query: explain select value from masking_test_n8 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -170,18 +1636,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n8 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (key > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: reverse(value) (type: string) + expressions: value (type: string) outputColumnNames: _col0 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -194,20 +1660,514 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select value from masking_test where key > 0 +PREHOOK: query: select value from masking_test_n8 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n8 #### A masked pattern was here #### -POSTHOOK: query: select value from masking_test where key > 0 +POSTHOOK: query: select value from masking_test_n8 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n8 #### A masked pattern was here #### -4_lav -8_lav -2_lav -PREHOOK: query: explain select * from masking_test join srcpart on (masking_test.key = srcpart.key) +val_238 +val_86 +val_311 +val_27 +val_165 +val_409 +val_255 +val_278 +val_98 +val_484 +val_265 +val_193 +val_401 +val_150 +val_273 +val_224 +val_369 +val_66 +val_128 +val_213 +val_146 +val_406 +val_429 +val_374 +val_152 +val_469 +val_145 +val_495 +val_37 +val_327 +val_281 +val_277 +val_209 +val_15 +val_82 +val_403 +val_166 +val_417 +val_430 +val_252 +val_292 +val_219 +val_287 +val_153 +val_193 +val_338 +val_446 +val_459 +val_394 +val_237 +val_482 +val_174 +val_413 +val_494 +val_207 +val_199 +val_466 +val_208 +val_174 +val_399 +val_396 +val_247 +val_417 +val_489 +val_162 +val_377 +val_397 +val_309 +val_365 +val_266 +val_439 +val_342 +val_367 +val_325 +val_167 +val_195 +val_475 +val_17 +val_113 +val_155 +val_203 +val_339 +val_455 +val_128 +val_311 +val_316 +val_57 +val_302 +val_205 +val_149 +val_438 +val_345 +val_129 +val_170 +val_20 +val_489 +val_157 +val_378 +val_221 +val_92 +val_111 +val_47 +val_72 +val_4 +val_280 +val_35 +val_427 +val_277 +val_208 +val_356 +val_399 +val_169 +val_382 +val_498 +val_125 +val_386 +val_437 +val_469 +val_192 +val_286 +val_187 +val_176 +val_54 +val_459 +val_51 +val_138 +val_103 +val_239 +val_213 +val_216 +val_430 +val_278 +val_176 +val_289 +val_221 +val_65 +val_318 +val_332 +val_311 +val_275 +val_137 +val_241 +val_83 +val_333 +val_180 +val_284 +val_12 +val_230 +val_181 +val_67 +val_260 +val_404 +val_384 +val_489 +val_353 +val_373 +val_272 +val_138 +val_217 +val_84 +val_348 +val_466 +val_58 +val_8 +val_411 +val_230 +val_208 +val_348 +val_24 +val_463 +val_431 +val_179 +val_172 +val_42 +val_129 +val_158 +val_119 +val_496 +val_322 +val_197 +val_468 +val_393 +val_454 +val_100 +val_298 +val_199 +val_191 +val_418 +val_96 +val_26 +val_165 +val_327 +val_230 +val_205 +val_120 +val_131 +val_51 +val_404 +val_43 +val_436 +val_156 +val_469 +val_468 +val_308 +val_95 +val_196 +val_288 +val_481 +val_457 +val_98 +val_282 +val_197 +val_187 +val_318 +val_318 +val_409 +val_470 +val_137 +val_369 +val_316 +val_169 +val_413 +val_85 +val_77 +val_490 +val_87 +val_364 +val_179 +val_118 +val_134 +val_395 +val_282 +val_138 +val_238 +val_419 +val_15 +val_118 +val_72 +val_90 +val_307 +val_19 +val_435 +val_10 +val_277 +val_273 +val_306 +val_224 +val_309 +val_389 +val_327 +val_242 +val_369 +val_392 +val_272 +val_331 +val_401 +val_242 +val_452 +val_177 +val_226 +val_5 +val_497 +val_402 +val_396 +val_317 +val_395 +val_58 +val_35 +val_336 +val_95 +val_11 +val_168 +val_34 +val_229 +val_233 +val_143 +val_472 +val_322 +val_498 +val_160 +val_195 +val_42 +val_321 +val_430 +val_119 +val_489 +val_458 +val_78 +val_76 +val_41 +val_223 +val_492 +val_149 +val_449 +val_218 +val_228 +val_138 +val_453 +val_30 +val_209 +val_64 +val_468 +val_76 +val_74 +val_342 +val_69 +val_230 +val_33 +val_368 +val_103 +val_296 +val_113 +val_216 +val_367 +val_344 +val_167 +val_274 +val_219 +val_239 +val_485 +val_116 +val_223 +val_256 +val_263 +val_70 +val_487 +val_480 +val_401 +val_288 +val_191 +val_5 +val_244 +val_438 +val_128 +val_467 +val_432 +val_202 +val_316 +val_229 +val_469 +val_463 +val_280 +val_2 +val_35 +val_283 +val_331 +val_235 +val_80 +val_44 +val_193 +val_321 +val_335 +val_104 +val_466 +val_366 +val_175 +val_403 +val_483 +val_53 +val_105 +val_257 +val_406 +val_409 +val_190 +val_406 +val_401 +val_114 +val_258 +val_90 +val_203 +val_262 +val_348 +val_424 +val_12 +val_396 +val_201 +val_217 +val_164 +val_431 +val_454 +val_478 +val_298 +val_125 +val_431 +val_164 +val_424 +val_187 +val_382 +val_5 +val_70 +val_397 +val_480 +val_291 +val_24 +val_351 +val_255 +val_104 +val_70 +val_163 +val_438 +val_119 +val_414 +val_200 +val_491 +val_237 +val_439 +val_360 +val_248 +val_479 +val_305 +val_417 +val_199 +val_444 +val_120 +val_429 +val_169 +val_443 +val_323 +val_325 +val_277 +val_230 +val_478 +val_178 +val_468 +val_310 +val_317 +val_333 +val_493 +val_460 +val_207 +val_249 +val_265 +val_480 +val_83 +val_136 +val_353 +val_172 +val_214 +val_462 +val_233 +val_406 +val_133 +val_175 +val_189 +val_454 +val_375 +val_401 +val_421 +val_407 +val_384 +val_256 +val_26 +val_134 +val_67 +val_384 +val_379 +val_18 +val_462 +val_492 +val_100 +val_298 +val_9 +val_341 +val_498 +val_146 +val_458 +val_362 +val_186 +val_285 +val_348 +val_167 +val_18 +val_273 +val_183 +val_281 +val_344 +val_97 +val_469 +val_315 +val_84 +val_28 +val_37 +val_448 +val_152 +val_348 +val_307 +val_194 +val_414 +val_477 +val_222 +val_126 +val_90 +val_169 +val_403 +val_400 +val_200 +val_97 +PREHOOK: query: explain select * from masking_test_n8 join srcpart on (masking_test_n8.key = srcpart.key) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test join srcpart on (masking_test.key = srcpart.key) +POSTHOOK: query: explain select * from masking_test_n8 join srcpart on (masking_test_n8.key = srcpart.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -218,20 +2178,20 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n8 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + predicate: key is not null (type: boolean) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), reverse(value) (type: string) + expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: UDFToDouble(_col0) (type: double) sort order: + Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string) TableScan alias: srcpart @@ -272,75 +2232,4139 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test join srcpart on (masking_test.key = srcpart.key) +PREHOOK: query: select * from masking_test_n8 join srcpart on (masking_test_n8.key = srcpart.key) PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n8 PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test join srcpart on (masking_test.key = srcpart.key) +POSTHOOK: query: select * from masking_test_n8 join srcpart on (masking_test_n8.key = srcpart.key) POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n8 POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-09 12 -2 2_lav 2 val_2 2008-04-09 11 -2 2_lav 2 val_2 2008-04-08 11 -2 2_lav 2 val_2 2008-04-09 12 -2 2_lav 2 val_2 2008-04-08 12 -4 4_lav 4 val_4 2008-04-08 12 -4 4_lav 4 val_4 2008-04-09 12 -4 4_lav 4 val_4 2008-04-08 11 -4 4_lav 4 val_4 2008-04-09 11 -8 8_lav 8 val_8 2008-04-08 11 -8 8_lav 8 val_8 2008-04-09 11 -8 8_lav 8 val_8 2008-04-08 12 -8 8_lav 8 val_8 2008-04-09 12 -PREHOOK: query: explain select * from default.masking_test where key > 0 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-09 12 +2 val_2 2 val_2 2008-04-09 11 +2 val_2 2 val_2 2008-04-08 11 +2 val_2 2 val_2 2008-04-09 12 +2 val_2 2 val_2 2008-04-08 12 +4 val_4 4 val_4 2008-04-08 12 +4 val_4 4 val_4 2008-04-09 12 +4 val_4 4 val_4 2008-04-08 11 +4 val_4 4 val_4 2008-04-09 11 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-09 12 +8 val_8 8 val_8 2008-04-08 11 +8 val_8 8 val_8 2008-04-09 11 +8 val_8 8 val_8 2008-04-08 12 +8 val_8 8 val_8 2008-04-09 12 +9 val_9 9 val_9 2008-04-08 12 +9 val_9 9 val_9 2008-04-09 12 +9 val_9 9 val_9 2008-04-09 11 +9 val_9 9 val_9 2008-04-08 11 +10 val_10 10 val_10 2008-04-09 11 +10 val_10 10 val_10 2008-04-09 12 +10 val_10 10 val_10 2008-04-08 12 +10 val_10 10 val_10 2008-04-08 11 +11 val_11 11 val_11 2008-04-09 12 +11 val_11 11 val_11 2008-04-09 11 +11 val_11 11 val_11 2008-04-08 12 +11 val_11 11 val_11 2008-04-08 11 +12 val_12 12 val_12 2008-04-09 12 +12 val_12 12 val_12 2008-04-08 11 +12 val_12 12 val_12 2008-04-09 11 +12 val_12 12 val_12 2008-04-08 12 +12 val_12 12 val_12 2008-04-08 11 +12 val_12 12 val_12 2008-04-08 12 +12 val_12 12 val_12 2008-04-09 11 +12 val_12 12 val_12 2008-04-09 12 +12 val_12 12 val_12 2008-04-09 12 +12 val_12 12 val_12 2008-04-08 11 +12 val_12 12 val_12 2008-04-09 11 +12 val_12 12 val_12 2008-04-08 12 +12 val_12 12 val_12 2008-04-08 11 +12 val_12 12 val_12 2008-04-08 12 +12 val_12 12 val_12 2008-04-09 11 +12 val_12 12 val_12 2008-04-09 12 +15 val_15 15 val_15 2008-04-08 12 +15 val_15 15 val_15 2008-04-08 11 +15 val_15 15 val_15 2008-04-09 12 +15 val_15 15 val_15 2008-04-09 12 +15 val_15 15 val_15 2008-04-09 11 +15 val_15 15 val_15 2008-04-08 11 +15 val_15 15 val_15 2008-04-09 11 +15 val_15 15 val_15 2008-04-08 12 +15 val_15 15 val_15 2008-04-08 12 +15 val_15 15 val_15 2008-04-08 11 +15 val_15 15 val_15 2008-04-09 12 +15 val_15 15 val_15 2008-04-09 12 +15 val_15 15 val_15 2008-04-09 11 +15 val_15 15 val_15 2008-04-08 11 +15 val_15 15 val_15 2008-04-09 11 +15 val_15 15 val_15 2008-04-08 12 +17 val_17 17 val_17 2008-04-09 11 +17 val_17 17 val_17 2008-04-09 12 +17 val_17 17 val_17 2008-04-08 12 +17 val_17 17 val_17 2008-04-08 11 +18 val_18 18 val_18 2008-04-08 12 +18 val_18 18 val_18 2008-04-08 12 +18 val_18 18 val_18 2008-04-08 11 +18 val_18 18 val_18 2008-04-09 12 +18 val_18 18 val_18 2008-04-09 12 +18 val_18 18 val_18 2008-04-09 11 +18 val_18 18 val_18 2008-04-09 11 +18 val_18 18 val_18 2008-04-08 11 +18 val_18 18 val_18 2008-04-08 12 +18 val_18 18 val_18 2008-04-08 12 +18 val_18 18 val_18 2008-04-08 11 +18 val_18 18 val_18 2008-04-09 12 +18 val_18 18 val_18 2008-04-09 12 +18 val_18 18 val_18 2008-04-09 11 +18 val_18 18 val_18 2008-04-09 11 +18 val_18 18 val_18 2008-04-08 11 +19 val_19 19 val_19 2008-04-09 11 +19 val_19 19 val_19 2008-04-09 12 +19 val_19 19 val_19 2008-04-08 11 +19 val_19 19 val_19 2008-04-08 12 +20 val_20 20 val_20 2008-04-08 11 +20 val_20 20 val_20 2008-04-09 11 +20 val_20 20 val_20 2008-04-08 12 +20 val_20 20 val_20 2008-04-09 12 +24 val_24 24 val_24 2008-04-09 12 +24 val_24 24 val_24 2008-04-08 11 +24 val_24 24 val_24 2008-04-08 11 +24 val_24 24 val_24 2008-04-09 12 +24 val_24 24 val_24 2008-04-08 12 +24 val_24 24 val_24 2008-04-08 12 +24 val_24 24 val_24 2008-04-09 11 +24 val_24 24 val_24 2008-04-09 11 +24 val_24 24 val_24 2008-04-09 12 +24 val_24 24 val_24 2008-04-08 11 +24 val_24 24 val_24 2008-04-08 11 +24 val_24 24 val_24 2008-04-09 12 +24 val_24 24 val_24 2008-04-08 12 +24 val_24 24 val_24 2008-04-08 12 +24 val_24 24 val_24 2008-04-09 11 +24 val_24 24 val_24 2008-04-09 11 +26 val_26 26 val_26 2008-04-09 12 +26 val_26 26 val_26 2008-04-08 11 +26 val_26 26 val_26 2008-04-09 11 +26 val_26 26 val_26 2008-04-09 12 +26 val_26 26 val_26 2008-04-09 11 +26 val_26 26 val_26 2008-04-08 12 +26 val_26 26 val_26 2008-04-08 12 +26 val_26 26 val_26 2008-04-08 11 +26 val_26 26 val_26 2008-04-09 12 +26 val_26 26 val_26 2008-04-08 11 +26 val_26 26 val_26 2008-04-09 11 +26 val_26 26 val_26 2008-04-09 12 +26 val_26 26 val_26 2008-04-09 11 +26 val_26 26 val_26 2008-04-08 12 +26 val_26 26 val_26 2008-04-08 12 +26 val_26 26 val_26 2008-04-08 11 +27 val_27 27 val_27 2008-04-09 12 +27 val_27 27 val_27 2008-04-09 11 +27 val_27 27 val_27 2008-04-08 12 +27 val_27 27 val_27 2008-04-08 11 +28 val_28 28 val_28 2008-04-08 12 +28 val_28 28 val_28 2008-04-09 11 +28 val_28 28 val_28 2008-04-09 12 +28 val_28 28 val_28 2008-04-08 11 +30 val_30 30 val_30 2008-04-09 12 +30 val_30 30 val_30 2008-04-09 11 +30 val_30 30 val_30 2008-04-08 11 +30 val_30 30 val_30 2008-04-08 12 +33 val_33 33 val_33 2008-04-08 11 +33 val_33 33 val_33 2008-04-08 12 +33 val_33 33 val_33 2008-04-09 11 +33 val_33 33 val_33 2008-04-09 12 +34 val_34 34 val_34 2008-04-09 11 +34 val_34 34 val_34 2008-04-09 12 +34 val_34 34 val_34 2008-04-08 11 +34 val_34 34 val_34 2008-04-08 12 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-09 11 +37 val_37 37 val_37 2008-04-09 12 +37 val_37 37 val_37 2008-04-08 12 +37 val_37 37 val_37 2008-04-08 11 +37 val_37 37 val_37 2008-04-09 11 +37 val_37 37 val_37 2008-04-09 11 +37 val_37 37 val_37 2008-04-09 12 +37 val_37 37 val_37 2008-04-08 12 +37 val_37 37 val_37 2008-04-08 11 +37 val_37 37 val_37 2008-04-09 12 +37 val_37 37 val_37 2008-04-08 12 +37 val_37 37 val_37 2008-04-08 11 +37 val_37 37 val_37 2008-04-09 11 +37 val_37 37 val_37 2008-04-09 11 +37 val_37 37 val_37 2008-04-09 12 +37 val_37 37 val_37 2008-04-08 12 +37 val_37 37 val_37 2008-04-08 11 +41 val_41 41 val_41 2008-04-08 11 +41 val_41 41 val_41 2008-04-09 11 +41 val_41 41 val_41 2008-04-09 12 +41 val_41 41 val_41 2008-04-08 12 +42 val_42 42 val_42 2008-04-09 12 +42 val_42 42 val_42 2008-04-09 11 +42 val_42 42 val_42 2008-04-08 12 +42 val_42 42 val_42 2008-04-08 12 +42 val_42 42 val_42 2008-04-09 12 +42 val_42 42 val_42 2008-04-08 11 +42 val_42 42 val_42 2008-04-09 11 +42 val_42 42 val_42 2008-04-08 11 +42 val_42 42 val_42 2008-04-09 12 +42 val_42 42 val_42 2008-04-09 11 +42 val_42 42 val_42 2008-04-08 12 +42 val_42 42 val_42 2008-04-08 12 +42 val_42 42 val_42 2008-04-09 12 +42 val_42 42 val_42 2008-04-08 11 +42 val_42 42 val_42 2008-04-09 11 +42 val_42 42 val_42 2008-04-08 11 +43 val_43 43 val_43 2008-04-09 12 +43 val_43 43 val_43 2008-04-08 12 +43 val_43 43 val_43 2008-04-09 11 +43 val_43 43 val_43 2008-04-08 11 +44 val_44 44 val_44 2008-04-09 11 +44 val_44 44 val_44 2008-04-08 11 +44 val_44 44 val_44 2008-04-09 12 +44 val_44 44 val_44 2008-04-08 12 +47 val_47 47 val_47 2008-04-08 11 +47 val_47 47 val_47 2008-04-08 12 +47 val_47 47 val_47 2008-04-09 12 +47 val_47 47 val_47 2008-04-09 11 +51 val_51 51 val_51 2008-04-09 12 +51 val_51 51 val_51 2008-04-08 12 +51 val_51 51 val_51 2008-04-08 11 +51 val_51 51 val_51 2008-04-08 11 +51 val_51 51 val_51 2008-04-09 11 +51 val_51 51 val_51 2008-04-09 12 +51 val_51 51 val_51 2008-04-08 12 +51 val_51 51 val_51 2008-04-09 11 +51 val_51 51 val_51 2008-04-09 12 +51 val_51 51 val_51 2008-04-08 12 +51 val_51 51 val_51 2008-04-08 11 +51 val_51 51 val_51 2008-04-08 11 +51 val_51 51 val_51 2008-04-09 11 +51 val_51 51 val_51 2008-04-09 12 +51 val_51 51 val_51 2008-04-08 12 +51 val_51 51 val_51 2008-04-09 11 +53 val_53 53 val_53 2008-04-08 11 +53 val_53 53 val_53 2008-04-09 11 +53 val_53 53 val_53 2008-04-09 12 +53 val_53 53 val_53 2008-04-08 12 +54 val_54 54 val_54 2008-04-08 11 +54 val_54 54 val_54 2008-04-09 12 +54 val_54 54 val_54 2008-04-09 11 +54 val_54 54 val_54 2008-04-08 12 +57 val_57 57 val_57 2008-04-08 12 +57 val_57 57 val_57 2008-04-08 11 +57 val_57 57 val_57 2008-04-09 11 +57 val_57 57 val_57 2008-04-09 12 +58 val_58 58 val_58 2008-04-08 11 +58 val_58 58 val_58 2008-04-09 11 +58 val_58 58 val_58 2008-04-09 12 +58 val_58 58 val_58 2008-04-08 11 +58 val_58 58 val_58 2008-04-08 12 +58 val_58 58 val_58 2008-04-09 11 +58 val_58 58 val_58 2008-04-09 12 +58 val_58 58 val_58 2008-04-08 12 +58 val_58 58 val_58 2008-04-08 11 +58 val_58 58 val_58 2008-04-09 11 +58 val_58 58 val_58 2008-04-09 12 +58 val_58 58 val_58 2008-04-08 11 +58 val_58 58 val_58 2008-04-08 12 +58 val_58 58 val_58 2008-04-09 11 +58 val_58 58 val_58 2008-04-09 12 +58 val_58 58 val_58 2008-04-08 12 +64 val_64 64 val_64 2008-04-09 12 +64 val_64 64 val_64 2008-04-08 11 +64 val_64 64 val_64 2008-04-08 12 +64 val_64 64 val_64 2008-04-09 11 +65 val_65 65 val_65 2008-04-09 11 +65 val_65 65 val_65 2008-04-09 12 +65 val_65 65 val_65 2008-04-08 11 +65 val_65 65 val_65 2008-04-08 12 +66 val_66 66 val_66 2008-04-09 12 +66 val_66 66 val_66 2008-04-09 11 +66 val_66 66 val_66 2008-04-08 11 +66 val_66 66 val_66 2008-04-08 12 +67 val_67 67 val_67 2008-04-09 12 +67 val_67 67 val_67 2008-04-09 11 +67 val_67 67 val_67 2008-04-09 12 +67 val_67 67 val_67 2008-04-09 11 +67 val_67 67 val_67 2008-04-08 11 +67 val_67 67 val_67 2008-04-08 12 +67 val_67 67 val_67 2008-04-08 11 +67 val_67 67 val_67 2008-04-08 12 +67 val_67 67 val_67 2008-04-09 12 +67 val_67 67 val_67 2008-04-09 11 +67 val_67 67 val_67 2008-04-09 12 +67 val_67 67 val_67 2008-04-09 11 +67 val_67 67 val_67 2008-04-08 11 +67 val_67 67 val_67 2008-04-08 12 +67 val_67 67 val_67 2008-04-08 11 +67 val_67 67 val_67 2008-04-08 12 +69 val_69 69 val_69 2008-04-09 12 +69 val_69 69 val_69 2008-04-08 11 +69 val_69 69 val_69 2008-04-08 12 +69 val_69 69 val_69 2008-04-09 11 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +72 val_72 72 val_72 2008-04-09 12 +72 val_72 72 val_72 2008-04-08 12 +72 val_72 72 val_72 2008-04-08 12 +72 val_72 72 val_72 2008-04-09 11 +72 val_72 72 val_72 2008-04-08 11 +72 val_72 72 val_72 2008-04-09 12 +72 val_72 72 val_72 2008-04-08 11 +72 val_72 72 val_72 2008-04-09 11 +72 val_72 72 val_72 2008-04-09 12 +72 val_72 72 val_72 2008-04-08 12 +72 val_72 72 val_72 2008-04-08 12 +72 val_72 72 val_72 2008-04-09 11 +72 val_72 72 val_72 2008-04-08 11 +72 val_72 72 val_72 2008-04-09 12 +72 val_72 72 val_72 2008-04-08 11 +72 val_72 72 val_72 2008-04-09 11 +74 val_74 74 val_74 2008-04-09 11 +74 val_74 74 val_74 2008-04-08 12 +74 val_74 74 val_74 2008-04-09 12 +74 val_74 74 val_74 2008-04-08 11 +76 val_76 76 val_76 2008-04-08 11 +76 val_76 76 val_76 2008-04-08 12 +76 val_76 76 val_76 2008-04-08 12 +76 val_76 76 val_76 2008-04-08 11 +76 val_76 76 val_76 2008-04-09 12 +76 val_76 76 val_76 2008-04-09 12 +76 val_76 76 val_76 2008-04-09 11 +76 val_76 76 val_76 2008-04-09 11 +76 val_76 76 val_76 2008-04-08 11 +76 val_76 76 val_76 2008-04-08 12 +76 val_76 76 val_76 2008-04-08 12 +76 val_76 76 val_76 2008-04-08 11 +76 val_76 76 val_76 2008-04-09 12 +76 val_76 76 val_76 2008-04-09 12 +76 val_76 76 val_76 2008-04-09 11 +76 val_76 76 val_76 2008-04-09 11 +77 val_77 77 val_77 2008-04-08 12 +77 val_77 77 val_77 2008-04-09 12 +77 val_77 77 val_77 2008-04-09 11 +77 val_77 77 val_77 2008-04-08 11 +78 val_78 78 val_78 2008-04-09 11 +78 val_78 78 val_78 2008-04-08 11 +78 val_78 78 val_78 2008-04-09 12 +78 val_78 78 val_78 2008-04-08 12 +80 val_80 80 val_80 2008-04-09 11 +80 val_80 80 val_80 2008-04-08 11 +80 val_80 80 val_80 2008-04-09 12 +80 val_80 80 val_80 2008-04-08 12 +82 val_82 82 val_82 2008-04-08 12 +82 val_82 82 val_82 2008-04-09 11 +82 val_82 82 val_82 2008-04-08 11 +82 val_82 82 val_82 2008-04-09 12 +83 val_83 83 val_83 2008-04-08 12 +83 val_83 83 val_83 2008-04-09 12 +83 val_83 83 val_83 2008-04-09 11 +83 val_83 83 val_83 2008-04-09 12 +83 val_83 83 val_83 2008-04-08 11 +83 val_83 83 val_83 2008-04-08 12 +83 val_83 83 val_83 2008-04-09 11 +83 val_83 83 val_83 2008-04-08 11 +83 val_83 83 val_83 2008-04-08 12 +83 val_83 83 val_83 2008-04-09 12 +83 val_83 83 val_83 2008-04-09 11 +83 val_83 83 val_83 2008-04-09 12 +83 val_83 83 val_83 2008-04-08 11 +83 val_83 83 val_83 2008-04-08 12 +83 val_83 83 val_83 2008-04-09 11 +83 val_83 83 val_83 2008-04-08 11 +84 val_84 84 val_84 2008-04-09 12 +84 val_84 84 val_84 2008-04-08 11 +84 val_84 84 val_84 2008-04-08 11 +84 val_84 84 val_84 2008-04-09 12 +84 val_84 84 val_84 2008-04-08 12 +84 val_84 84 val_84 2008-04-09 11 +84 val_84 84 val_84 2008-04-09 11 +84 val_84 84 val_84 2008-04-08 12 +84 val_84 84 val_84 2008-04-09 12 +84 val_84 84 val_84 2008-04-08 11 +84 val_84 84 val_84 2008-04-08 11 +84 val_84 84 val_84 2008-04-09 12 +84 val_84 84 val_84 2008-04-08 12 +84 val_84 84 val_84 2008-04-09 11 +84 val_84 84 val_84 2008-04-09 11 +84 val_84 84 val_84 2008-04-08 12 +85 val_85 85 val_85 2008-04-09 12 +85 val_85 85 val_85 2008-04-09 11 +85 val_85 85 val_85 2008-04-08 11 +85 val_85 85 val_85 2008-04-08 12 +86 val_86 86 val_86 2008-04-08 12 +86 val_86 86 val_86 2008-04-09 11 +86 val_86 86 val_86 2008-04-08 11 +86 val_86 86 val_86 2008-04-09 12 +87 val_87 87 val_87 2008-04-09 12 +87 val_87 87 val_87 2008-04-08 11 +87 val_87 87 val_87 2008-04-08 12 +87 val_87 87 val_87 2008-04-09 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-09 11 +92 val_92 92 val_92 2008-04-09 11 +92 val_92 92 val_92 2008-04-09 12 +92 val_92 92 val_92 2008-04-08 12 +92 val_92 92 val_92 2008-04-08 11 +95 val_95 95 val_95 2008-04-08 11 +95 val_95 95 val_95 2008-04-09 12 +95 val_95 95 val_95 2008-04-08 12 +95 val_95 95 val_95 2008-04-09 11 +95 val_95 95 val_95 2008-04-08 12 +95 val_95 95 val_95 2008-04-09 11 +95 val_95 95 val_95 2008-04-09 12 +95 val_95 95 val_95 2008-04-08 11 +95 val_95 95 val_95 2008-04-08 11 +95 val_95 95 val_95 2008-04-09 12 +95 val_95 95 val_95 2008-04-08 12 +95 val_95 95 val_95 2008-04-09 11 +95 val_95 95 val_95 2008-04-08 12 +95 val_95 95 val_95 2008-04-09 11 +95 val_95 95 val_95 2008-04-09 12 +95 val_95 95 val_95 2008-04-08 11 +96 val_96 96 val_96 2008-04-09 12 +96 val_96 96 val_96 2008-04-08 11 +96 val_96 96 val_96 2008-04-08 12 +96 val_96 96 val_96 2008-04-09 11 +97 val_97 97 val_97 2008-04-09 12 +97 val_97 97 val_97 2008-04-09 12 +97 val_97 97 val_97 2008-04-09 11 +97 val_97 97 val_97 2008-04-09 11 +97 val_97 97 val_97 2008-04-08 12 +97 val_97 97 val_97 2008-04-08 12 +97 val_97 97 val_97 2008-04-08 11 +97 val_97 97 val_97 2008-04-08 11 +97 val_97 97 val_97 2008-04-09 12 +97 val_97 97 val_97 2008-04-09 12 +97 val_97 97 val_97 2008-04-09 11 +97 val_97 97 val_97 2008-04-09 11 +97 val_97 97 val_97 2008-04-08 12 +97 val_97 97 val_97 2008-04-08 12 +97 val_97 97 val_97 2008-04-08 11 +97 val_97 97 val_97 2008-04-08 11 +98 val_98 98 val_98 2008-04-08 11 +98 val_98 98 val_98 2008-04-08 11 +98 val_98 98 val_98 2008-04-08 12 +98 val_98 98 val_98 2008-04-09 12 +98 val_98 98 val_98 2008-04-09 11 +98 val_98 98 val_98 2008-04-09 11 +98 val_98 98 val_98 2008-04-08 12 +98 val_98 98 val_98 2008-04-09 12 +98 val_98 98 val_98 2008-04-08 11 +98 val_98 98 val_98 2008-04-08 11 +98 val_98 98 val_98 2008-04-08 12 +98 val_98 98 val_98 2008-04-09 12 +98 val_98 98 val_98 2008-04-09 11 +98 val_98 98 val_98 2008-04-09 11 +98 val_98 98 val_98 2008-04-08 12 +98 val_98 98 val_98 2008-04-09 12 +100 val_100 100 val_100 2008-04-09 11 +100 val_100 100 val_100 2008-04-08 11 +100 val_100 100 val_100 2008-04-09 12 +100 val_100 100 val_100 2008-04-08 12 +100 val_100 100 val_100 2008-04-08 12 +100 val_100 100 val_100 2008-04-09 12 +100 val_100 100 val_100 2008-04-08 11 +100 val_100 100 val_100 2008-04-09 11 +100 val_100 100 val_100 2008-04-09 11 +100 val_100 100 val_100 2008-04-08 11 +100 val_100 100 val_100 2008-04-09 12 +100 val_100 100 val_100 2008-04-08 12 +100 val_100 100 val_100 2008-04-08 12 +100 val_100 100 val_100 2008-04-09 12 +100 val_100 100 val_100 2008-04-08 11 +100 val_100 100 val_100 2008-04-09 11 +103 val_103 103 val_103 2008-04-08 11 +103 val_103 103 val_103 2008-04-09 12 +103 val_103 103 val_103 2008-04-09 11 +103 val_103 103 val_103 2008-04-08 11 +103 val_103 103 val_103 2008-04-09 11 +103 val_103 103 val_103 2008-04-08 12 +103 val_103 103 val_103 2008-04-08 12 +103 val_103 103 val_103 2008-04-09 12 +103 val_103 103 val_103 2008-04-08 11 +103 val_103 103 val_103 2008-04-09 12 +103 val_103 103 val_103 2008-04-09 11 +103 val_103 103 val_103 2008-04-08 11 +103 val_103 103 val_103 2008-04-09 11 +103 val_103 103 val_103 2008-04-08 12 +103 val_103 103 val_103 2008-04-08 12 +103 val_103 103 val_103 2008-04-09 12 +104 val_104 104 val_104 2008-04-08 11 +104 val_104 104 val_104 2008-04-09 12 +104 val_104 104 val_104 2008-04-09 11 +104 val_104 104 val_104 2008-04-08 12 +104 val_104 104 val_104 2008-04-08 11 +104 val_104 104 val_104 2008-04-09 12 +104 val_104 104 val_104 2008-04-08 12 +104 val_104 104 val_104 2008-04-09 11 +104 val_104 104 val_104 2008-04-08 11 +104 val_104 104 val_104 2008-04-09 12 +104 val_104 104 val_104 2008-04-09 11 +104 val_104 104 val_104 2008-04-08 12 +104 val_104 104 val_104 2008-04-08 11 +104 val_104 104 val_104 2008-04-09 12 +104 val_104 104 val_104 2008-04-08 12 +104 val_104 104 val_104 2008-04-09 11 +105 val_105 105 val_105 2008-04-09 12 +105 val_105 105 val_105 2008-04-09 11 +105 val_105 105 val_105 2008-04-08 11 +105 val_105 105 val_105 2008-04-08 12 +111 val_111 111 val_111 2008-04-08 12 +111 val_111 111 val_111 2008-04-08 11 +111 val_111 111 val_111 2008-04-09 12 +111 val_111 111 val_111 2008-04-09 11 +113 val_113 113 val_113 2008-04-09 12 +113 val_113 113 val_113 2008-04-08 11 +113 val_113 113 val_113 2008-04-08 12 +113 val_113 113 val_113 2008-04-08 12 +113 val_113 113 val_113 2008-04-09 11 +113 val_113 113 val_113 2008-04-09 11 +113 val_113 113 val_113 2008-04-08 11 +113 val_113 113 val_113 2008-04-09 12 +113 val_113 113 val_113 2008-04-09 12 +113 val_113 113 val_113 2008-04-08 11 +113 val_113 113 val_113 2008-04-08 12 +113 val_113 113 val_113 2008-04-08 12 +113 val_113 113 val_113 2008-04-09 11 +113 val_113 113 val_113 2008-04-09 11 +113 val_113 113 val_113 2008-04-08 11 +113 val_113 113 val_113 2008-04-09 12 +114 val_114 114 val_114 2008-04-09 11 +114 val_114 114 val_114 2008-04-09 12 +114 val_114 114 val_114 2008-04-08 12 +114 val_114 114 val_114 2008-04-08 11 +116 val_116 116 val_116 2008-04-08 12 +116 val_116 116 val_116 2008-04-09 11 +116 val_116 116 val_116 2008-04-08 11 +116 val_116 116 val_116 2008-04-09 12 +118 val_118 118 val_118 2008-04-09 12 +118 val_118 118 val_118 2008-04-08 11 +118 val_118 118 val_118 2008-04-09 11 +118 val_118 118 val_118 2008-04-09 12 +118 val_118 118 val_118 2008-04-08 11 +118 val_118 118 val_118 2008-04-08 12 +118 val_118 118 val_118 2008-04-08 12 +118 val_118 118 val_118 2008-04-09 11 +118 val_118 118 val_118 2008-04-09 12 +118 val_118 118 val_118 2008-04-08 11 +118 val_118 118 val_118 2008-04-09 11 +118 val_118 118 val_118 2008-04-09 12 +118 val_118 118 val_118 2008-04-08 11 +118 val_118 118 val_118 2008-04-08 12 +118 val_118 118 val_118 2008-04-08 12 +118 val_118 118 val_118 2008-04-09 11 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 12 +120 val_120 120 val_120 2008-04-09 11 +120 val_120 120 val_120 2008-04-08 11 +120 val_120 120 val_120 2008-04-09 12 +120 val_120 120 val_120 2008-04-08 12 +120 val_120 120 val_120 2008-04-09 12 +120 val_120 120 val_120 2008-04-08 12 +120 val_120 120 val_120 2008-04-08 11 +120 val_120 120 val_120 2008-04-09 11 +120 val_120 120 val_120 2008-04-09 11 +120 val_120 120 val_120 2008-04-08 11 +120 val_120 120 val_120 2008-04-09 12 +120 val_120 120 val_120 2008-04-08 12 +120 val_120 120 val_120 2008-04-09 12 +120 val_120 120 val_120 2008-04-08 12 +120 val_120 120 val_120 2008-04-08 11 +120 val_120 120 val_120 2008-04-09 11 +125 val_125 125 val_125 2008-04-09 12 +125 val_125 125 val_125 2008-04-08 11 +125 val_125 125 val_125 2008-04-09 11 +125 val_125 125 val_125 2008-04-08 11 +125 val_125 125 val_125 2008-04-09 12 +125 val_125 125 val_125 2008-04-09 11 +125 val_125 125 val_125 2008-04-08 12 +125 val_125 125 val_125 2008-04-08 12 +125 val_125 125 val_125 2008-04-09 12 +125 val_125 125 val_125 2008-04-08 11 +125 val_125 125 val_125 2008-04-09 11 +125 val_125 125 val_125 2008-04-08 11 +125 val_125 125 val_125 2008-04-09 12 +125 val_125 125 val_125 2008-04-09 11 +125 val_125 125 val_125 2008-04-08 12 +125 val_125 125 val_125 2008-04-08 12 +126 val_126 126 val_126 2008-04-08 11 +126 val_126 126 val_126 2008-04-08 12 +126 val_126 126 val_126 2008-04-09 12 +126 val_126 126 val_126 2008-04-09 11 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-08 12 +129 val_129 129 val_129 2008-04-09 12 +129 val_129 129 val_129 2008-04-08 12 +129 val_129 129 val_129 2008-04-09 12 +129 val_129 129 val_129 2008-04-08 12 +129 val_129 129 val_129 2008-04-09 11 +129 val_129 129 val_129 2008-04-08 11 +129 val_129 129 val_129 2008-04-09 11 +129 val_129 129 val_129 2008-04-08 11 +129 val_129 129 val_129 2008-04-09 12 +129 val_129 129 val_129 2008-04-08 12 +129 val_129 129 val_129 2008-04-09 12 +129 val_129 129 val_129 2008-04-08 12 +129 val_129 129 val_129 2008-04-09 11 +129 val_129 129 val_129 2008-04-08 11 +129 val_129 129 val_129 2008-04-09 11 +129 val_129 129 val_129 2008-04-08 11 +131 val_131 131 val_131 2008-04-08 12 +131 val_131 131 val_131 2008-04-09 12 +131 val_131 131 val_131 2008-04-09 11 +131 val_131 131 val_131 2008-04-08 11 +133 val_133 133 val_133 2008-04-08 11 +133 val_133 133 val_133 2008-04-08 12 +133 val_133 133 val_133 2008-04-09 11 +133 val_133 133 val_133 2008-04-09 12 +134 val_134 134 val_134 2008-04-08 12 +134 val_134 134 val_134 2008-04-09 11 +134 val_134 134 val_134 2008-04-08 11 +134 val_134 134 val_134 2008-04-09 11 +134 val_134 134 val_134 2008-04-09 12 +134 val_134 134 val_134 2008-04-08 12 +134 val_134 134 val_134 2008-04-09 12 +134 val_134 134 val_134 2008-04-08 11 +134 val_134 134 val_134 2008-04-08 12 +134 val_134 134 val_134 2008-04-09 11 +134 val_134 134 val_134 2008-04-08 11 +134 val_134 134 val_134 2008-04-09 11 +134 val_134 134 val_134 2008-04-09 12 +134 val_134 134 val_134 2008-04-08 12 +134 val_134 134 val_134 2008-04-09 12 +134 val_134 134 val_134 2008-04-08 11 +136 val_136 136 val_136 2008-04-09 12 +136 val_136 136 val_136 2008-04-08 12 +136 val_136 136 val_136 2008-04-08 11 +136 val_136 136 val_136 2008-04-09 11 +137 val_137 137 val_137 2008-04-08 11 +137 val_137 137 val_137 2008-04-09 12 +137 val_137 137 val_137 2008-04-09 11 +137 val_137 137 val_137 2008-04-09 11 +137 val_137 137 val_137 2008-04-09 12 +137 val_137 137 val_137 2008-04-08 12 +137 val_137 137 val_137 2008-04-08 11 +137 val_137 137 val_137 2008-04-08 12 +137 val_137 137 val_137 2008-04-08 11 +137 val_137 137 val_137 2008-04-09 12 +137 val_137 137 val_137 2008-04-09 11 +137 val_137 137 val_137 2008-04-09 11 +137 val_137 137 val_137 2008-04-09 12 +137 val_137 137 val_137 2008-04-08 12 +137 val_137 137 val_137 2008-04-08 11 +137 val_137 137 val_137 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +143 val_143 143 val_143 2008-04-09 12 +143 val_143 143 val_143 2008-04-08 11 +143 val_143 143 val_143 2008-04-09 11 +143 val_143 143 val_143 2008-04-08 12 +145 val_145 145 val_145 2008-04-09 11 +145 val_145 145 val_145 2008-04-08 11 +145 val_145 145 val_145 2008-04-08 12 +145 val_145 145 val_145 2008-04-09 12 +146 val_146 146 val_146 2008-04-08 11 +146 val_146 146 val_146 2008-04-09 11 +146 val_146 146 val_146 2008-04-08 12 +146 val_146 146 val_146 2008-04-08 11 +146 val_146 146 val_146 2008-04-09 12 +146 val_146 146 val_146 2008-04-09 12 +146 val_146 146 val_146 2008-04-09 11 +146 val_146 146 val_146 2008-04-08 12 +146 val_146 146 val_146 2008-04-08 11 +146 val_146 146 val_146 2008-04-09 11 +146 val_146 146 val_146 2008-04-08 12 +146 val_146 146 val_146 2008-04-08 11 +146 val_146 146 val_146 2008-04-09 12 +146 val_146 146 val_146 2008-04-09 12 +146 val_146 146 val_146 2008-04-09 11 +146 val_146 146 val_146 2008-04-08 12 +149 val_149 149 val_149 2008-04-08 12 +149 val_149 149 val_149 2008-04-09 12 +149 val_149 149 val_149 2008-04-09 12 +149 val_149 149 val_149 2008-04-09 11 +149 val_149 149 val_149 2008-04-09 11 +149 val_149 149 val_149 2008-04-08 11 +149 val_149 149 val_149 2008-04-08 11 +149 val_149 149 val_149 2008-04-08 12 +149 val_149 149 val_149 2008-04-08 12 +149 val_149 149 val_149 2008-04-09 12 +149 val_149 149 val_149 2008-04-09 12 +149 val_149 149 val_149 2008-04-09 11 +149 val_149 149 val_149 2008-04-09 11 +149 val_149 149 val_149 2008-04-08 11 +149 val_149 149 val_149 2008-04-08 11 +149 val_149 149 val_149 2008-04-08 12 +150 val_150 150 val_150 2008-04-09 11 +150 val_150 150 val_150 2008-04-09 12 +150 val_150 150 val_150 2008-04-08 11 +150 val_150 150 val_150 2008-04-08 12 +152 val_152 152 val_152 2008-04-08 11 +152 val_152 152 val_152 2008-04-08 12 +152 val_152 152 val_152 2008-04-09 11 +152 val_152 152 val_152 2008-04-08 11 +152 val_152 152 val_152 2008-04-09 12 +152 val_152 152 val_152 2008-04-09 11 +152 val_152 152 val_152 2008-04-09 12 +152 val_152 152 val_152 2008-04-08 12 +152 val_152 152 val_152 2008-04-08 11 +152 val_152 152 val_152 2008-04-08 12 +152 val_152 152 val_152 2008-04-09 11 +152 val_152 152 val_152 2008-04-08 11 +152 val_152 152 val_152 2008-04-09 12 +152 val_152 152 val_152 2008-04-09 11 +152 val_152 152 val_152 2008-04-09 12 +152 val_152 152 val_152 2008-04-08 12 +153 val_153 153 val_153 2008-04-09 11 +153 val_153 153 val_153 2008-04-08 11 +153 val_153 153 val_153 2008-04-08 12 +153 val_153 153 val_153 2008-04-09 12 +155 val_155 155 val_155 2008-04-08 12 +155 val_155 155 val_155 2008-04-08 11 +155 val_155 155 val_155 2008-04-09 12 +155 val_155 155 val_155 2008-04-09 11 +156 val_156 156 val_156 2008-04-08 12 +156 val_156 156 val_156 2008-04-09 12 +156 val_156 156 val_156 2008-04-09 11 +156 val_156 156 val_156 2008-04-08 11 +157 val_157 157 val_157 2008-04-09 11 +157 val_157 157 val_157 2008-04-08 12 +157 val_157 157 val_157 2008-04-08 11 +157 val_157 157 val_157 2008-04-09 12 +158 val_158 158 val_158 2008-04-08 11 +158 val_158 158 val_158 2008-04-09 11 +158 val_158 158 val_158 2008-04-09 12 +158 val_158 158 val_158 2008-04-08 12 +160 val_160 160 val_160 2008-04-09 11 +160 val_160 160 val_160 2008-04-09 12 +160 val_160 160 val_160 2008-04-08 11 +160 val_160 160 val_160 2008-04-08 12 +162 val_162 162 val_162 2008-04-09 11 +162 val_162 162 val_162 2008-04-08 12 +162 val_162 162 val_162 2008-04-08 11 +162 val_162 162 val_162 2008-04-09 12 +163 val_163 163 val_163 2008-04-09 11 +163 val_163 163 val_163 2008-04-09 12 +163 val_163 163 val_163 2008-04-08 11 +163 val_163 163 val_163 2008-04-08 12 +164 val_164 164 val_164 2008-04-08 11 +164 val_164 164 val_164 2008-04-09 11 +164 val_164 164 val_164 2008-04-08 11 +164 val_164 164 val_164 2008-04-09 12 +164 val_164 164 val_164 2008-04-09 11 +164 val_164 164 val_164 2008-04-09 12 +164 val_164 164 val_164 2008-04-08 12 +164 val_164 164 val_164 2008-04-08 12 +164 val_164 164 val_164 2008-04-08 11 +164 val_164 164 val_164 2008-04-09 11 +164 val_164 164 val_164 2008-04-08 11 +164 val_164 164 val_164 2008-04-09 12 +164 val_164 164 val_164 2008-04-09 11 +164 val_164 164 val_164 2008-04-09 12 +164 val_164 164 val_164 2008-04-08 12 +164 val_164 164 val_164 2008-04-08 12 +165 val_165 165 val_165 2008-04-08 11 +165 val_165 165 val_165 2008-04-09 11 +165 val_165 165 val_165 2008-04-09 12 +165 val_165 165 val_165 2008-04-08 12 +165 val_165 165 val_165 2008-04-09 12 +165 val_165 165 val_165 2008-04-08 12 +165 val_165 165 val_165 2008-04-08 11 +165 val_165 165 val_165 2008-04-09 11 +165 val_165 165 val_165 2008-04-08 11 +165 val_165 165 val_165 2008-04-09 11 +165 val_165 165 val_165 2008-04-09 12 +165 val_165 165 val_165 2008-04-08 12 +165 val_165 165 val_165 2008-04-09 12 +165 val_165 165 val_165 2008-04-08 12 +165 val_165 165 val_165 2008-04-08 11 +165 val_165 165 val_165 2008-04-09 11 +166 val_166 166 val_166 2008-04-08 12 +166 val_166 166 val_166 2008-04-08 11 +166 val_166 166 val_166 2008-04-09 12 +166 val_166 166 val_166 2008-04-09 11 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-08 12 +168 val_168 168 val_168 2008-04-08 11 +168 val_168 168 val_168 2008-04-08 12 +168 val_168 168 val_168 2008-04-09 11 +168 val_168 168 val_168 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-08 11 +170 val_170 170 val_170 2008-04-09 11 +170 val_170 170 val_170 2008-04-09 12 +170 val_170 170 val_170 2008-04-08 11 +170 val_170 170 val_170 2008-04-08 12 +172 val_172 172 val_172 2008-04-08 11 +172 val_172 172 val_172 2008-04-09 11 +172 val_172 172 val_172 2008-04-08 12 +172 val_172 172 val_172 2008-04-09 12 +172 val_172 172 val_172 2008-04-08 11 +172 val_172 172 val_172 2008-04-08 12 +172 val_172 172 val_172 2008-04-09 12 +172 val_172 172 val_172 2008-04-09 11 +172 val_172 172 val_172 2008-04-08 11 +172 val_172 172 val_172 2008-04-09 11 +172 val_172 172 val_172 2008-04-08 12 +172 val_172 172 val_172 2008-04-09 12 +172 val_172 172 val_172 2008-04-08 11 +172 val_172 172 val_172 2008-04-08 12 +172 val_172 172 val_172 2008-04-09 12 +172 val_172 172 val_172 2008-04-09 11 +174 val_174 174 val_174 2008-04-08 12 +174 val_174 174 val_174 2008-04-09 12 +174 val_174 174 val_174 2008-04-09 12 +174 val_174 174 val_174 2008-04-08 11 +174 val_174 174 val_174 2008-04-08 11 +174 val_174 174 val_174 2008-04-08 12 +174 val_174 174 val_174 2008-04-09 11 +174 val_174 174 val_174 2008-04-09 11 +174 val_174 174 val_174 2008-04-08 12 +174 val_174 174 val_174 2008-04-09 12 +174 val_174 174 val_174 2008-04-09 12 +174 val_174 174 val_174 2008-04-08 11 +174 val_174 174 val_174 2008-04-08 11 +174 val_174 174 val_174 2008-04-08 12 +174 val_174 174 val_174 2008-04-09 11 +174 val_174 174 val_174 2008-04-09 11 +175 val_175 175 val_175 2008-04-09 12 +175 val_175 175 val_175 2008-04-08 11 +175 val_175 175 val_175 2008-04-09 12 +175 val_175 175 val_175 2008-04-08 11 +175 val_175 175 val_175 2008-04-09 11 +175 val_175 175 val_175 2008-04-09 11 +175 val_175 175 val_175 2008-04-08 12 +175 val_175 175 val_175 2008-04-08 12 +175 val_175 175 val_175 2008-04-09 12 +175 val_175 175 val_175 2008-04-08 11 +175 val_175 175 val_175 2008-04-09 12 +175 val_175 175 val_175 2008-04-08 11 +175 val_175 175 val_175 2008-04-09 11 +175 val_175 175 val_175 2008-04-09 11 +175 val_175 175 val_175 2008-04-08 12 +175 val_175 175 val_175 2008-04-08 12 +176 val_176 176 val_176 2008-04-08 11 +176 val_176 176 val_176 2008-04-09 12 +176 val_176 176 val_176 2008-04-08 12 +176 val_176 176 val_176 2008-04-09 12 +176 val_176 176 val_176 2008-04-08 11 +176 val_176 176 val_176 2008-04-09 11 +176 val_176 176 val_176 2008-04-09 11 +176 val_176 176 val_176 2008-04-08 12 +176 val_176 176 val_176 2008-04-08 11 +176 val_176 176 val_176 2008-04-09 12 +176 val_176 176 val_176 2008-04-08 12 +176 val_176 176 val_176 2008-04-09 12 +176 val_176 176 val_176 2008-04-08 11 +176 val_176 176 val_176 2008-04-09 11 +176 val_176 176 val_176 2008-04-09 11 +176 val_176 176 val_176 2008-04-08 12 +177 val_177 177 val_177 2008-04-08 12 +177 val_177 177 val_177 2008-04-08 11 +177 val_177 177 val_177 2008-04-09 11 +177 val_177 177 val_177 2008-04-09 12 +178 val_178 178 val_178 2008-04-08 12 +178 val_178 178 val_178 2008-04-09 11 +178 val_178 178 val_178 2008-04-08 11 +178 val_178 178 val_178 2008-04-09 12 +179 val_179 179 val_179 2008-04-08 12 +179 val_179 179 val_179 2008-04-09 12 +179 val_179 179 val_179 2008-04-08 12 +179 val_179 179 val_179 2008-04-09 11 +179 val_179 179 val_179 2008-04-09 12 +179 val_179 179 val_179 2008-04-09 11 +179 val_179 179 val_179 2008-04-08 11 +179 val_179 179 val_179 2008-04-08 11 +179 val_179 179 val_179 2008-04-08 12 +179 val_179 179 val_179 2008-04-09 12 +179 val_179 179 val_179 2008-04-08 12 +179 val_179 179 val_179 2008-04-09 11 +179 val_179 179 val_179 2008-04-09 12 +179 val_179 179 val_179 2008-04-09 11 +179 val_179 179 val_179 2008-04-08 11 +179 val_179 179 val_179 2008-04-08 11 +180 val_180 180 val_180 2008-04-09 11 +180 val_180 180 val_180 2008-04-09 12 +180 val_180 180 val_180 2008-04-08 11 +180 val_180 180 val_180 2008-04-08 12 +181 val_181 181 val_181 2008-04-09 11 +181 val_181 181 val_181 2008-04-09 12 +181 val_181 181 val_181 2008-04-08 11 +181 val_181 181 val_181 2008-04-08 12 +183 val_183 183 val_183 2008-04-08 12 +183 val_183 183 val_183 2008-04-08 11 +183 val_183 183 val_183 2008-04-09 12 +183 val_183 183 val_183 2008-04-09 11 +186 val_186 186 val_186 2008-04-08 11 +186 val_186 186 val_186 2008-04-08 12 +186 val_186 186 val_186 2008-04-09 11 +186 val_186 186 val_186 2008-04-09 12 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-08 12 +189 val_189 189 val_189 2008-04-09 11 +189 val_189 189 val_189 2008-04-09 12 +189 val_189 189 val_189 2008-04-08 11 +189 val_189 189 val_189 2008-04-08 12 +190 val_190 190 val_190 2008-04-09 12 +190 val_190 190 val_190 2008-04-08 12 +190 val_190 190 val_190 2008-04-09 11 +190 val_190 190 val_190 2008-04-08 11 +191 val_191 191 val_191 2008-04-09 11 +191 val_191 191 val_191 2008-04-09 12 +191 val_191 191 val_191 2008-04-08 11 +191 val_191 191 val_191 2008-04-09 11 +191 val_191 191 val_191 2008-04-08 12 +191 val_191 191 val_191 2008-04-08 11 +191 val_191 191 val_191 2008-04-09 12 +191 val_191 191 val_191 2008-04-08 12 +191 val_191 191 val_191 2008-04-09 11 +191 val_191 191 val_191 2008-04-09 12 +191 val_191 191 val_191 2008-04-08 11 +191 val_191 191 val_191 2008-04-09 11 +191 val_191 191 val_191 2008-04-08 12 +191 val_191 191 val_191 2008-04-08 11 +191 val_191 191 val_191 2008-04-09 12 +191 val_191 191 val_191 2008-04-08 12 +192 val_192 192 val_192 2008-04-09 12 +192 val_192 192 val_192 2008-04-08 12 +192 val_192 192 val_192 2008-04-08 11 +192 val_192 192 val_192 2008-04-09 11 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-08 11 +194 val_194 194 val_194 2008-04-08 12 +194 val_194 194 val_194 2008-04-08 11 +194 val_194 194 val_194 2008-04-09 12 +194 val_194 194 val_194 2008-04-09 11 +195 val_195 195 val_195 2008-04-08 11 +195 val_195 195 val_195 2008-04-09 11 +195 val_195 195 val_195 2008-04-09 12 +195 val_195 195 val_195 2008-04-08 12 +195 val_195 195 val_195 2008-04-09 12 +195 val_195 195 val_195 2008-04-08 11 +195 val_195 195 val_195 2008-04-08 12 +195 val_195 195 val_195 2008-04-09 11 +195 val_195 195 val_195 2008-04-08 11 +195 val_195 195 val_195 2008-04-09 11 +195 val_195 195 val_195 2008-04-09 12 +195 val_195 195 val_195 2008-04-08 12 +195 val_195 195 val_195 2008-04-09 12 +195 val_195 195 val_195 2008-04-08 11 +195 val_195 195 val_195 2008-04-08 12 +195 val_195 195 val_195 2008-04-09 11 +196 val_196 196 val_196 2008-04-08 12 +196 val_196 196 val_196 2008-04-09 11 +196 val_196 196 val_196 2008-04-08 11 +196 val_196 196 val_196 2008-04-09 12 +197 val_197 197 val_197 2008-04-08 12 +197 val_197 197 val_197 2008-04-08 11 +197 val_197 197 val_197 2008-04-09 11 +197 val_197 197 val_197 2008-04-09 11 +197 val_197 197 val_197 2008-04-08 11 +197 val_197 197 val_197 2008-04-08 12 +197 val_197 197 val_197 2008-04-09 12 +197 val_197 197 val_197 2008-04-09 12 +197 val_197 197 val_197 2008-04-08 12 +197 val_197 197 val_197 2008-04-08 11 +197 val_197 197 val_197 2008-04-09 11 +197 val_197 197 val_197 2008-04-09 11 +197 val_197 197 val_197 2008-04-08 11 +197 val_197 197 val_197 2008-04-08 12 +197 val_197 197 val_197 2008-04-09 12 +197 val_197 197 val_197 2008-04-09 12 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-08 11 +200 val_200 200 val_200 2008-04-09 12 +200 val_200 200 val_200 2008-04-08 11 +200 val_200 200 val_200 2008-04-09 11 +200 val_200 200 val_200 2008-04-08 12 +200 val_200 200 val_200 2008-04-08 12 +200 val_200 200 val_200 2008-04-09 12 +200 val_200 200 val_200 2008-04-08 11 +200 val_200 200 val_200 2008-04-09 11 +200 val_200 200 val_200 2008-04-09 12 +200 val_200 200 val_200 2008-04-08 11 +200 val_200 200 val_200 2008-04-09 11 +200 val_200 200 val_200 2008-04-08 12 +200 val_200 200 val_200 2008-04-08 12 +200 val_200 200 val_200 2008-04-09 12 +200 val_200 200 val_200 2008-04-08 11 +200 val_200 200 val_200 2008-04-09 11 +201 val_201 201 val_201 2008-04-09 11 +201 val_201 201 val_201 2008-04-08 11 +201 val_201 201 val_201 2008-04-09 12 +201 val_201 201 val_201 2008-04-08 12 +202 val_202 202 val_202 2008-04-09 12 +202 val_202 202 val_202 2008-04-08 11 +202 val_202 202 val_202 2008-04-09 11 +202 val_202 202 val_202 2008-04-08 12 +203 val_203 203 val_203 2008-04-09 11 +203 val_203 203 val_203 2008-04-08 11 +203 val_203 203 val_203 2008-04-08 12 +203 val_203 203 val_203 2008-04-09 11 +203 val_203 203 val_203 2008-04-08 11 +203 val_203 203 val_203 2008-04-09 12 +203 val_203 203 val_203 2008-04-09 12 +203 val_203 203 val_203 2008-04-08 12 +203 val_203 203 val_203 2008-04-09 11 +203 val_203 203 val_203 2008-04-08 11 +203 val_203 203 val_203 2008-04-08 12 +203 val_203 203 val_203 2008-04-09 11 +203 val_203 203 val_203 2008-04-08 11 +203 val_203 203 val_203 2008-04-09 12 +203 val_203 203 val_203 2008-04-09 12 +203 val_203 203 val_203 2008-04-08 12 +205 val_205 205 val_205 2008-04-08 11 +205 val_205 205 val_205 2008-04-08 12 +205 val_205 205 val_205 2008-04-09 11 +205 val_205 205 val_205 2008-04-08 12 +205 val_205 205 val_205 2008-04-09 12 +205 val_205 205 val_205 2008-04-09 11 +205 val_205 205 val_205 2008-04-08 11 +205 val_205 205 val_205 2008-04-09 12 +205 val_205 205 val_205 2008-04-08 11 +205 val_205 205 val_205 2008-04-08 12 +205 val_205 205 val_205 2008-04-09 11 +205 val_205 205 val_205 2008-04-08 12 +205 val_205 205 val_205 2008-04-09 12 +205 val_205 205 val_205 2008-04-09 11 +205 val_205 205 val_205 2008-04-08 11 +205 val_205 205 val_205 2008-04-09 12 +207 val_207 207 val_207 2008-04-09 12 +207 val_207 207 val_207 2008-04-09 11 +207 val_207 207 val_207 2008-04-08 12 +207 val_207 207 val_207 2008-04-09 12 +207 val_207 207 val_207 2008-04-09 11 +207 val_207 207 val_207 2008-04-08 11 +207 val_207 207 val_207 2008-04-08 12 +207 val_207 207 val_207 2008-04-08 11 +207 val_207 207 val_207 2008-04-09 12 +207 val_207 207 val_207 2008-04-09 11 +207 val_207 207 val_207 2008-04-08 12 +207 val_207 207 val_207 2008-04-09 12 +207 val_207 207 val_207 2008-04-09 11 +207 val_207 207 val_207 2008-04-08 11 +207 val_207 207 val_207 2008-04-08 12 +207 val_207 207 val_207 2008-04-08 11 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 12 +209 val_209 209 val_209 2008-04-08 12 +209 val_209 209 val_209 2008-04-09 11 +209 val_209 209 val_209 2008-04-09 12 +209 val_209 209 val_209 2008-04-09 11 +209 val_209 209 val_209 2008-04-08 11 +209 val_209 209 val_209 2008-04-08 11 +209 val_209 209 val_209 2008-04-09 12 +209 val_209 209 val_209 2008-04-08 12 +209 val_209 209 val_209 2008-04-08 12 +209 val_209 209 val_209 2008-04-09 11 +209 val_209 209 val_209 2008-04-09 12 +209 val_209 209 val_209 2008-04-09 11 +209 val_209 209 val_209 2008-04-08 11 +209 val_209 209 val_209 2008-04-08 11 +209 val_209 209 val_209 2008-04-09 12 +209 val_209 209 val_209 2008-04-08 12 +213 val_213 213 val_213 2008-04-08 11 +213 val_213 213 val_213 2008-04-09 12 +213 val_213 213 val_213 2008-04-09 11 +213 val_213 213 val_213 2008-04-09 11 +213 val_213 213 val_213 2008-04-08 11 +213 val_213 213 val_213 2008-04-09 12 +213 val_213 213 val_213 2008-04-08 12 +213 val_213 213 val_213 2008-04-08 12 +213 val_213 213 val_213 2008-04-08 11 +213 val_213 213 val_213 2008-04-09 12 +213 val_213 213 val_213 2008-04-09 11 +213 val_213 213 val_213 2008-04-09 11 +213 val_213 213 val_213 2008-04-08 11 +213 val_213 213 val_213 2008-04-09 12 +213 val_213 213 val_213 2008-04-08 12 +213 val_213 213 val_213 2008-04-08 12 +214 val_214 214 val_214 2008-04-08 12 +214 val_214 214 val_214 2008-04-08 11 +214 val_214 214 val_214 2008-04-09 11 +214 val_214 214 val_214 2008-04-09 12 +216 val_216 216 val_216 2008-04-09 11 +216 val_216 216 val_216 2008-04-08 12 +216 val_216 216 val_216 2008-04-08 11 +216 val_216 216 val_216 2008-04-09 12 +216 val_216 216 val_216 2008-04-08 12 +216 val_216 216 val_216 2008-04-09 11 +216 val_216 216 val_216 2008-04-09 12 +216 val_216 216 val_216 2008-04-08 11 +216 val_216 216 val_216 2008-04-09 11 +216 val_216 216 val_216 2008-04-08 12 +216 val_216 216 val_216 2008-04-08 11 +216 val_216 216 val_216 2008-04-09 12 +216 val_216 216 val_216 2008-04-08 12 +216 val_216 216 val_216 2008-04-09 11 +216 val_216 216 val_216 2008-04-09 12 +216 val_216 216 val_216 2008-04-08 11 +217 val_217 217 val_217 2008-04-08 12 +217 val_217 217 val_217 2008-04-08 11 +217 val_217 217 val_217 2008-04-09 11 +217 val_217 217 val_217 2008-04-08 12 +217 val_217 217 val_217 2008-04-08 11 +217 val_217 217 val_217 2008-04-09 11 +217 val_217 217 val_217 2008-04-09 12 +217 val_217 217 val_217 2008-04-09 12 +217 val_217 217 val_217 2008-04-08 12 +217 val_217 217 val_217 2008-04-08 11 +217 val_217 217 val_217 2008-04-09 11 +217 val_217 217 val_217 2008-04-08 12 +217 val_217 217 val_217 2008-04-08 11 +217 val_217 217 val_217 2008-04-09 11 +217 val_217 217 val_217 2008-04-09 12 +217 val_217 217 val_217 2008-04-09 12 +218 val_218 218 val_218 2008-04-08 12 +218 val_218 218 val_218 2008-04-08 11 +218 val_218 218 val_218 2008-04-09 12 +218 val_218 218 val_218 2008-04-09 11 +219 val_219 219 val_219 2008-04-08 11 +219 val_219 219 val_219 2008-04-09 12 +219 val_219 219 val_219 2008-04-09 11 +219 val_219 219 val_219 2008-04-08 12 +219 val_219 219 val_219 2008-04-08 11 +219 val_219 219 val_219 2008-04-09 11 +219 val_219 219 val_219 2008-04-09 12 +219 val_219 219 val_219 2008-04-08 12 +219 val_219 219 val_219 2008-04-08 11 +219 val_219 219 val_219 2008-04-09 12 +219 val_219 219 val_219 2008-04-09 11 +219 val_219 219 val_219 2008-04-08 12 +219 val_219 219 val_219 2008-04-08 11 +219 val_219 219 val_219 2008-04-09 11 +219 val_219 219 val_219 2008-04-09 12 +219 val_219 219 val_219 2008-04-08 12 +221 val_221 221 val_221 2008-04-09 12 +221 val_221 221 val_221 2008-04-08 12 +221 val_221 221 val_221 2008-04-08 11 +221 val_221 221 val_221 2008-04-09 12 +221 val_221 221 val_221 2008-04-09 11 +221 val_221 221 val_221 2008-04-08 12 +221 val_221 221 val_221 2008-04-09 11 +221 val_221 221 val_221 2008-04-08 11 +221 val_221 221 val_221 2008-04-09 12 +221 val_221 221 val_221 2008-04-08 12 +221 val_221 221 val_221 2008-04-08 11 +221 val_221 221 val_221 2008-04-09 12 +221 val_221 221 val_221 2008-04-09 11 +221 val_221 221 val_221 2008-04-08 12 +221 val_221 221 val_221 2008-04-09 11 +221 val_221 221 val_221 2008-04-08 11 +222 val_222 222 val_222 2008-04-08 12 +222 val_222 222 val_222 2008-04-09 12 +222 val_222 222 val_222 2008-04-09 11 +222 val_222 222 val_222 2008-04-08 11 +223 val_223 223 val_223 2008-04-09 12 +223 val_223 223 val_223 2008-04-09 11 +223 val_223 223 val_223 2008-04-08 12 +223 val_223 223 val_223 2008-04-09 12 +223 val_223 223 val_223 2008-04-08 12 +223 val_223 223 val_223 2008-04-08 11 +223 val_223 223 val_223 2008-04-09 11 +223 val_223 223 val_223 2008-04-08 11 +223 val_223 223 val_223 2008-04-09 12 +223 val_223 223 val_223 2008-04-09 11 +223 val_223 223 val_223 2008-04-08 12 +223 val_223 223 val_223 2008-04-09 12 +223 val_223 223 val_223 2008-04-08 12 +223 val_223 223 val_223 2008-04-08 11 +223 val_223 223 val_223 2008-04-09 11 +223 val_223 223 val_223 2008-04-08 11 +224 val_224 224 val_224 2008-04-09 11 +224 val_224 224 val_224 2008-04-08 11 +224 val_224 224 val_224 2008-04-08 12 +224 val_224 224 val_224 2008-04-09 12 +224 val_224 224 val_224 2008-04-09 12 +224 val_224 224 val_224 2008-04-09 11 +224 val_224 224 val_224 2008-04-08 12 +224 val_224 224 val_224 2008-04-08 11 +224 val_224 224 val_224 2008-04-09 11 +224 val_224 224 val_224 2008-04-08 11 +224 val_224 224 val_224 2008-04-08 12 +224 val_224 224 val_224 2008-04-09 12 +224 val_224 224 val_224 2008-04-09 12 +224 val_224 224 val_224 2008-04-09 11 +224 val_224 224 val_224 2008-04-08 12 +224 val_224 224 val_224 2008-04-08 11 +226 val_226 226 val_226 2008-04-09 12 +226 val_226 226 val_226 2008-04-08 11 +226 val_226 226 val_226 2008-04-09 11 +226 val_226 226 val_226 2008-04-08 12 +228 val_228 228 val_228 2008-04-09 11 +228 val_228 228 val_228 2008-04-08 11 +228 val_228 228 val_228 2008-04-09 12 +228 val_228 228 val_228 2008-04-08 12 +229 val_229 229 val_229 2008-04-08 12 +229 val_229 229 val_229 2008-04-09 12 +229 val_229 229 val_229 2008-04-08 11 +229 val_229 229 val_229 2008-04-09 11 +229 val_229 229 val_229 2008-04-08 12 +229 val_229 229 val_229 2008-04-09 11 +229 val_229 229 val_229 2008-04-09 12 +229 val_229 229 val_229 2008-04-08 11 +229 val_229 229 val_229 2008-04-08 12 +229 val_229 229 val_229 2008-04-09 12 +229 val_229 229 val_229 2008-04-08 11 +229 val_229 229 val_229 2008-04-09 11 +229 val_229 229 val_229 2008-04-08 12 +229 val_229 229 val_229 2008-04-09 11 +229 val_229 229 val_229 2008-04-09 12 +229 val_229 229 val_229 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +233 val_233 233 val_233 2008-04-09 11 +233 val_233 233 val_233 2008-04-09 12 +233 val_233 233 val_233 2008-04-09 12 +233 val_233 233 val_233 2008-04-09 11 +233 val_233 233 val_233 2008-04-08 11 +233 val_233 233 val_233 2008-04-08 11 +233 val_233 233 val_233 2008-04-08 12 +233 val_233 233 val_233 2008-04-08 12 +233 val_233 233 val_233 2008-04-09 11 +233 val_233 233 val_233 2008-04-09 12 +233 val_233 233 val_233 2008-04-09 12 +233 val_233 233 val_233 2008-04-09 11 +233 val_233 233 val_233 2008-04-08 11 +233 val_233 233 val_233 2008-04-08 11 +233 val_233 233 val_233 2008-04-08 12 +233 val_233 233 val_233 2008-04-08 12 +235 val_235 235 val_235 2008-04-09 11 +235 val_235 235 val_235 2008-04-09 12 +235 val_235 235 val_235 2008-04-08 12 +235 val_235 235 val_235 2008-04-08 11 +237 val_237 237 val_237 2008-04-08 12 +237 val_237 237 val_237 2008-04-09 12 +237 val_237 237 val_237 2008-04-09 12 +237 val_237 237 val_237 2008-04-09 11 +237 val_237 237 val_237 2008-04-08 12 +237 val_237 237 val_237 2008-04-08 11 +237 val_237 237 val_237 2008-04-08 11 +237 val_237 237 val_237 2008-04-09 11 +237 val_237 237 val_237 2008-04-08 12 +237 val_237 237 val_237 2008-04-09 12 +237 val_237 237 val_237 2008-04-09 12 +237 val_237 237 val_237 2008-04-09 11 +237 val_237 237 val_237 2008-04-08 12 +237 val_237 237 val_237 2008-04-08 11 +237 val_237 237 val_237 2008-04-08 11 +237 val_237 237 val_237 2008-04-09 11 +238 val_238 238 val_238 2008-04-09 11 +238 val_238 238 val_238 2008-04-08 11 +238 val_238 238 val_238 2008-04-09 12 +238 val_238 238 val_238 2008-04-08 11 +238 val_238 238 val_238 2008-04-08 12 +238 val_238 238 val_238 2008-04-09 11 +238 val_238 238 val_238 2008-04-09 12 +238 val_238 238 val_238 2008-04-08 12 +238 val_238 238 val_238 2008-04-09 11 +238 val_238 238 val_238 2008-04-08 11 +238 val_238 238 val_238 2008-04-09 12 +238 val_238 238 val_238 2008-04-08 11 +238 val_238 238 val_238 2008-04-08 12 +238 val_238 238 val_238 2008-04-09 11 +238 val_238 238 val_238 2008-04-09 12 +238 val_238 238 val_238 2008-04-08 12 +239 val_239 239 val_239 2008-04-08 12 +239 val_239 239 val_239 2008-04-09 11 +239 val_239 239 val_239 2008-04-09 12 +239 val_239 239 val_239 2008-04-08 11 +239 val_239 239 val_239 2008-04-08 11 +239 val_239 239 val_239 2008-04-08 12 +239 val_239 239 val_239 2008-04-09 11 +239 val_239 239 val_239 2008-04-09 12 +239 val_239 239 val_239 2008-04-08 12 +239 val_239 239 val_239 2008-04-09 11 +239 val_239 239 val_239 2008-04-09 12 +239 val_239 239 val_239 2008-04-08 11 +239 val_239 239 val_239 2008-04-08 11 +239 val_239 239 val_239 2008-04-08 12 +239 val_239 239 val_239 2008-04-09 11 +239 val_239 239 val_239 2008-04-09 12 +241 val_241 241 val_241 2008-04-08 12 +241 val_241 241 val_241 2008-04-09 12 +241 val_241 241 val_241 2008-04-08 11 +241 val_241 241 val_241 2008-04-09 11 +242 val_242 242 val_242 2008-04-08 11 +242 val_242 242 val_242 2008-04-09 11 +242 val_242 242 val_242 2008-04-09 12 +242 val_242 242 val_242 2008-04-08 12 +242 val_242 242 val_242 2008-04-08 12 +242 val_242 242 val_242 2008-04-09 11 +242 val_242 242 val_242 2008-04-08 11 +242 val_242 242 val_242 2008-04-09 12 +242 val_242 242 val_242 2008-04-08 11 +242 val_242 242 val_242 2008-04-09 11 +242 val_242 242 val_242 2008-04-09 12 +242 val_242 242 val_242 2008-04-08 12 +242 val_242 242 val_242 2008-04-08 12 +242 val_242 242 val_242 2008-04-09 11 +242 val_242 242 val_242 2008-04-08 11 +242 val_242 242 val_242 2008-04-09 12 +244 val_244 244 val_244 2008-04-09 11 +244 val_244 244 val_244 2008-04-08 11 +244 val_244 244 val_244 2008-04-08 12 +244 val_244 244 val_244 2008-04-09 12 +247 val_247 247 val_247 2008-04-08 11 +247 val_247 247 val_247 2008-04-08 12 +247 val_247 247 val_247 2008-04-09 11 +247 val_247 247 val_247 2008-04-09 12 +248 val_248 248 val_248 2008-04-08 11 +248 val_248 248 val_248 2008-04-09 11 +248 val_248 248 val_248 2008-04-09 12 +248 val_248 248 val_248 2008-04-08 12 +249 val_249 249 val_249 2008-04-09 12 +249 val_249 249 val_249 2008-04-09 11 +249 val_249 249 val_249 2008-04-08 11 +249 val_249 249 val_249 2008-04-08 12 +252 val_252 252 val_252 2008-04-09 12 +252 val_252 252 val_252 2008-04-09 11 +252 val_252 252 val_252 2008-04-08 11 +252 val_252 252 val_252 2008-04-08 12 +255 val_255 255 val_255 2008-04-08 11 +255 val_255 255 val_255 2008-04-08 11 +255 val_255 255 val_255 2008-04-09 12 +255 val_255 255 val_255 2008-04-08 12 +255 val_255 255 val_255 2008-04-08 12 +255 val_255 255 val_255 2008-04-09 11 +255 val_255 255 val_255 2008-04-09 12 +255 val_255 255 val_255 2008-04-09 11 +255 val_255 255 val_255 2008-04-08 11 +255 val_255 255 val_255 2008-04-08 11 +255 val_255 255 val_255 2008-04-09 12 +255 val_255 255 val_255 2008-04-08 12 +255 val_255 255 val_255 2008-04-08 12 +255 val_255 255 val_255 2008-04-09 11 +255 val_255 255 val_255 2008-04-09 12 +255 val_255 255 val_255 2008-04-09 11 +256 val_256 256 val_256 2008-04-09 11 +256 val_256 256 val_256 2008-04-09 12 +256 val_256 256 val_256 2008-04-08 11 +256 val_256 256 val_256 2008-04-08 11 +256 val_256 256 val_256 2008-04-08 12 +256 val_256 256 val_256 2008-04-08 12 +256 val_256 256 val_256 2008-04-09 11 +256 val_256 256 val_256 2008-04-09 12 +256 val_256 256 val_256 2008-04-09 11 +256 val_256 256 val_256 2008-04-09 12 +256 val_256 256 val_256 2008-04-08 11 +256 val_256 256 val_256 2008-04-08 11 +256 val_256 256 val_256 2008-04-08 12 +256 val_256 256 val_256 2008-04-08 12 +256 val_256 256 val_256 2008-04-09 11 +256 val_256 256 val_256 2008-04-09 12 +257 val_257 257 val_257 2008-04-09 11 +257 val_257 257 val_257 2008-04-09 12 +257 val_257 257 val_257 2008-04-08 12 +257 val_257 257 val_257 2008-04-08 11 +258 val_258 258 val_258 2008-04-08 12 +258 val_258 258 val_258 2008-04-09 11 +258 val_258 258 val_258 2008-04-09 12 +258 val_258 258 val_258 2008-04-08 11 +260 val_260 260 val_260 2008-04-09 12 +260 val_260 260 val_260 2008-04-09 11 +260 val_260 260 val_260 2008-04-08 12 +260 val_260 260 val_260 2008-04-08 11 +262 val_262 262 val_262 2008-04-08 11 +262 val_262 262 val_262 2008-04-09 12 +262 val_262 262 val_262 2008-04-09 11 +262 val_262 262 val_262 2008-04-08 12 +263 val_263 263 val_263 2008-04-09 11 +263 val_263 263 val_263 2008-04-08 11 +263 val_263 263 val_263 2008-04-08 12 +263 val_263 263 val_263 2008-04-09 12 +265 val_265 265 val_265 2008-04-08 11 +265 val_265 265 val_265 2008-04-08 12 +265 val_265 265 val_265 2008-04-09 11 +265 val_265 265 val_265 2008-04-08 12 +265 val_265 265 val_265 2008-04-09 11 +265 val_265 265 val_265 2008-04-09 12 +265 val_265 265 val_265 2008-04-08 11 +265 val_265 265 val_265 2008-04-09 12 +265 val_265 265 val_265 2008-04-08 11 +265 val_265 265 val_265 2008-04-08 12 +265 val_265 265 val_265 2008-04-09 11 +265 val_265 265 val_265 2008-04-08 12 +265 val_265 265 val_265 2008-04-09 11 +265 val_265 265 val_265 2008-04-09 12 +265 val_265 265 val_265 2008-04-08 11 +265 val_265 265 val_265 2008-04-09 12 +266 val_266 266 val_266 2008-04-08 12 +266 val_266 266 val_266 2008-04-09 11 +266 val_266 266 val_266 2008-04-08 11 +266 val_266 266 val_266 2008-04-09 12 +272 val_272 272 val_272 2008-04-09 11 +272 val_272 272 val_272 2008-04-08 11 +272 val_272 272 val_272 2008-04-09 11 +272 val_272 272 val_272 2008-04-09 12 +272 val_272 272 val_272 2008-04-08 12 +272 val_272 272 val_272 2008-04-09 12 +272 val_272 272 val_272 2008-04-08 11 +272 val_272 272 val_272 2008-04-08 12 +272 val_272 272 val_272 2008-04-09 11 +272 val_272 272 val_272 2008-04-08 11 +272 val_272 272 val_272 2008-04-09 11 +272 val_272 272 val_272 2008-04-09 12 +272 val_272 272 val_272 2008-04-08 12 +272 val_272 272 val_272 2008-04-09 12 +272 val_272 272 val_272 2008-04-08 11 +272 val_272 272 val_272 2008-04-08 12 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-08 12 +274 val_274 274 val_274 2008-04-09 11 +274 val_274 274 val_274 2008-04-09 12 +274 val_274 274 val_274 2008-04-08 11 +274 val_274 274 val_274 2008-04-08 12 +275 val_275 275 val_275 2008-04-09 12 +275 val_275 275 val_275 2008-04-08 11 +275 val_275 275 val_275 2008-04-08 12 +275 val_275 275 val_275 2008-04-09 11 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +278 val_278 278 val_278 2008-04-09 11 +278 val_278 278 val_278 2008-04-09 12 +278 val_278 278 val_278 2008-04-09 12 +278 val_278 278 val_278 2008-04-08 11 +278 val_278 278 val_278 2008-04-08 11 +278 val_278 278 val_278 2008-04-08 12 +278 val_278 278 val_278 2008-04-09 11 +278 val_278 278 val_278 2008-04-08 12 +278 val_278 278 val_278 2008-04-09 11 +278 val_278 278 val_278 2008-04-09 12 +278 val_278 278 val_278 2008-04-09 12 +278 val_278 278 val_278 2008-04-08 11 +278 val_278 278 val_278 2008-04-08 11 +278 val_278 278 val_278 2008-04-08 12 +278 val_278 278 val_278 2008-04-09 11 +278 val_278 278 val_278 2008-04-08 12 +280 val_280 280 val_280 2008-04-09 12 +280 val_280 280 val_280 2008-04-08 12 +280 val_280 280 val_280 2008-04-09 11 +280 val_280 280 val_280 2008-04-09 12 +280 val_280 280 val_280 2008-04-08 11 +280 val_280 280 val_280 2008-04-08 12 +280 val_280 280 val_280 2008-04-08 11 +280 val_280 280 val_280 2008-04-09 11 +280 val_280 280 val_280 2008-04-09 12 +280 val_280 280 val_280 2008-04-08 12 +280 val_280 280 val_280 2008-04-09 11 +280 val_280 280 val_280 2008-04-09 12 +280 val_280 280 val_280 2008-04-08 11 +280 val_280 280 val_280 2008-04-08 12 +280 val_280 280 val_280 2008-04-08 11 +280 val_280 280 val_280 2008-04-09 11 +281 val_281 281 val_281 2008-04-09 12 +281 val_281 281 val_281 2008-04-08 11 +281 val_281 281 val_281 2008-04-08 12 +281 val_281 281 val_281 2008-04-09 11 +281 val_281 281 val_281 2008-04-08 11 +281 val_281 281 val_281 2008-04-08 12 +281 val_281 281 val_281 2008-04-09 11 +281 val_281 281 val_281 2008-04-09 12 +281 val_281 281 val_281 2008-04-09 12 +281 val_281 281 val_281 2008-04-08 11 +281 val_281 281 val_281 2008-04-08 12 +281 val_281 281 val_281 2008-04-09 11 +281 val_281 281 val_281 2008-04-08 11 +281 val_281 281 val_281 2008-04-08 12 +281 val_281 281 val_281 2008-04-09 11 +281 val_281 281 val_281 2008-04-09 12 +282 val_282 282 val_282 2008-04-08 11 +282 val_282 282 val_282 2008-04-08 11 +282 val_282 282 val_282 2008-04-08 12 +282 val_282 282 val_282 2008-04-09 12 +282 val_282 282 val_282 2008-04-09 11 +282 val_282 282 val_282 2008-04-09 11 +282 val_282 282 val_282 2008-04-08 12 +282 val_282 282 val_282 2008-04-09 12 +282 val_282 282 val_282 2008-04-08 11 +282 val_282 282 val_282 2008-04-08 11 +282 val_282 282 val_282 2008-04-08 12 +282 val_282 282 val_282 2008-04-09 12 +282 val_282 282 val_282 2008-04-09 11 +282 val_282 282 val_282 2008-04-09 11 +282 val_282 282 val_282 2008-04-08 12 +282 val_282 282 val_282 2008-04-09 12 +283 val_283 283 val_283 2008-04-09 12 +283 val_283 283 val_283 2008-04-08 11 +283 val_283 283 val_283 2008-04-08 12 +283 val_283 283 val_283 2008-04-09 11 +284 val_284 284 val_284 2008-04-08 11 +284 val_284 284 val_284 2008-04-09 12 +284 val_284 284 val_284 2008-04-08 12 +284 val_284 284 val_284 2008-04-09 11 +285 val_285 285 val_285 2008-04-09 12 +285 val_285 285 val_285 2008-04-09 11 +285 val_285 285 val_285 2008-04-08 11 +285 val_285 285 val_285 2008-04-08 12 +286 val_286 286 val_286 2008-04-08 11 +286 val_286 286 val_286 2008-04-08 12 +286 val_286 286 val_286 2008-04-09 12 +286 val_286 286 val_286 2008-04-09 11 +287 val_287 287 val_287 2008-04-08 11 +287 val_287 287 val_287 2008-04-09 12 +287 val_287 287 val_287 2008-04-09 11 +287 val_287 287 val_287 2008-04-08 12 +288 val_288 288 val_288 2008-04-09 11 +288 val_288 288 val_288 2008-04-09 12 +288 val_288 288 val_288 2008-04-08 12 +288 val_288 288 val_288 2008-04-09 11 +288 val_288 288 val_288 2008-04-09 12 +288 val_288 288 val_288 2008-04-08 11 +288 val_288 288 val_288 2008-04-08 11 +288 val_288 288 val_288 2008-04-08 12 +288 val_288 288 val_288 2008-04-09 11 +288 val_288 288 val_288 2008-04-09 12 +288 val_288 288 val_288 2008-04-08 12 +288 val_288 288 val_288 2008-04-09 11 +288 val_288 288 val_288 2008-04-09 12 +288 val_288 288 val_288 2008-04-08 11 +288 val_288 288 val_288 2008-04-08 11 +288 val_288 288 val_288 2008-04-08 12 +289 val_289 289 val_289 2008-04-08 12 +289 val_289 289 val_289 2008-04-09 11 +289 val_289 289 val_289 2008-04-08 11 +289 val_289 289 val_289 2008-04-09 12 +291 val_291 291 val_291 2008-04-09 12 +291 val_291 291 val_291 2008-04-08 12 +291 val_291 291 val_291 2008-04-09 11 +291 val_291 291 val_291 2008-04-08 11 +292 val_292 292 val_292 2008-04-09 11 +292 val_292 292 val_292 2008-04-08 11 +292 val_292 292 val_292 2008-04-09 12 +292 val_292 292 val_292 2008-04-08 12 +296 val_296 296 val_296 2008-04-08 12 +296 val_296 296 val_296 2008-04-08 11 +296 val_296 296 val_296 2008-04-09 12 +296 val_296 296 val_296 2008-04-09 11 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-08 12 +302 val_302 302 val_302 2008-04-08 12 +302 val_302 302 val_302 2008-04-09 12 +302 val_302 302 val_302 2008-04-08 11 +302 val_302 302 val_302 2008-04-09 11 +305 val_305 305 val_305 2008-04-08 11 +305 val_305 305 val_305 2008-04-09 12 +305 val_305 305 val_305 2008-04-09 11 +305 val_305 305 val_305 2008-04-08 12 +306 val_306 306 val_306 2008-04-09 11 +306 val_306 306 val_306 2008-04-08 11 +306 val_306 306 val_306 2008-04-09 12 +306 val_306 306 val_306 2008-04-08 12 +307 val_307 307 val_307 2008-04-08 11 +307 val_307 307 val_307 2008-04-08 12 +307 val_307 307 val_307 2008-04-09 12 +307 val_307 307 val_307 2008-04-09 12 +307 val_307 307 val_307 2008-04-08 11 +307 val_307 307 val_307 2008-04-08 12 +307 val_307 307 val_307 2008-04-09 11 +307 val_307 307 val_307 2008-04-09 11 +307 val_307 307 val_307 2008-04-08 11 +307 val_307 307 val_307 2008-04-08 12 +307 val_307 307 val_307 2008-04-09 12 +307 val_307 307 val_307 2008-04-09 12 +307 val_307 307 val_307 2008-04-08 11 +307 val_307 307 val_307 2008-04-08 12 +307 val_307 307 val_307 2008-04-09 11 +307 val_307 307 val_307 2008-04-09 11 +308 val_308 308 val_308 2008-04-08 11 +308 val_308 308 val_308 2008-04-09 12 +308 val_308 308 val_308 2008-04-08 12 +308 val_308 308 val_308 2008-04-09 11 +309 val_309 309 val_309 2008-04-09 12 +309 val_309 309 val_309 2008-04-09 11 +309 val_309 309 val_309 2008-04-08 11 +309 val_309 309 val_309 2008-04-09 12 +309 val_309 309 val_309 2008-04-08 12 +309 val_309 309 val_309 2008-04-08 11 +309 val_309 309 val_309 2008-04-09 11 +309 val_309 309 val_309 2008-04-08 12 +309 val_309 309 val_309 2008-04-09 12 +309 val_309 309 val_309 2008-04-09 11 +309 val_309 309 val_309 2008-04-08 11 +309 val_309 309 val_309 2008-04-09 12 +309 val_309 309 val_309 2008-04-08 12 +309 val_309 309 val_309 2008-04-08 11 +309 val_309 309 val_309 2008-04-09 11 +309 val_309 309 val_309 2008-04-08 12 +310 val_310 310 val_310 2008-04-09 11 +310 val_310 310 val_310 2008-04-09 12 +310 val_310 310 val_310 2008-04-08 11 +310 val_310 310 val_310 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 12 +315 val_315 315 val_315 2008-04-08 11 +315 val_315 315 val_315 2008-04-09 12 +315 val_315 315 val_315 2008-04-09 11 +315 val_315 315 val_315 2008-04-08 12 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-09 12 +317 val_317 317 val_317 2008-04-08 11 +317 val_317 317 val_317 2008-04-09 11 +317 val_317 317 val_317 2008-04-09 12 +317 val_317 317 val_317 2008-04-09 12 +317 val_317 317 val_317 2008-04-08 12 +317 val_317 317 val_317 2008-04-08 12 +317 val_317 317 val_317 2008-04-08 11 +317 val_317 317 val_317 2008-04-09 11 +317 val_317 317 val_317 2008-04-08 11 +317 val_317 317 val_317 2008-04-09 11 +317 val_317 317 val_317 2008-04-09 12 +317 val_317 317 val_317 2008-04-09 12 +317 val_317 317 val_317 2008-04-08 12 +317 val_317 317 val_317 2008-04-08 12 +317 val_317 317 val_317 2008-04-08 11 +317 val_317 317 val_317 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 11 +321 val_321 321 val_321 2008-04-09 11 +321 val_321 321 val_321 2008-04-08 11 +321 val_321 321 val_321 2008-04-08 11 +321 val_321 321 val_321 2008-04-08 12 +321 val_321 321 val_321 2008-04-08 12 +321 val_321 321 val_321 2008-04-09 11 +321 val_321 321 val_321 2008-04-09 12 +321 val_321 321 val_321 2008-04-09 12 +321 val_321 321 val_321 2008-04-09 11 +321 val_321 321 val_321 2008-04-08 11 +321 val_321 321 val_321 2008-04-08 11 +321 val_321 321 val_321 2008-04-08 12 +321 val_321 321 val_321 2008-04-08 12 +321 val_321 321 val_321 2008-04-09 11 +321 val_321 321 val_321 2008-04-09 12 +321 val_321 321 val_321 2008-04-09 12 +322 val_322 322 val_322 2008-04-08 12 +322 val_322 322 val_322 2008-04-08 11 +322 val_322 322 val_322 2008-04-09 11 +322 val_322 322 val_322 2008-04-09 12 +322 val_322 322 val_322 2008-04-09 12 +322 val_322 322 val_322 2008-04-09 11 +322 val_322 322 val_322 2008-04-08 12 +322 val_322 322 val_322 2008-04-08 11 +322 val_322 322 val_322 2008-04-08 12 +322 val_322 322 val_322 2008-04-08 11 +322 val_322 322 val_322 2008-04-09 11 +322 val_322 322 val_322 2008-04-09 12 +322 val_322 322 val_322 2008-04-09 12 +322 val_322 322 val_322 2008-04-09 11 +322 val_322 322 val_322 2008-04-08 12 +322 val_322 322 val_322 2008-04-08 11 +323 val_323 323 val_323 2008-04-08 11 +323 val_323 323 val_323 2008-04-08 12 +323 val_323 323 val_323 2008-04-09 11 +323 val_323 323 val_323 2008-04-09 12 +325 val_325 325 val_325 2008-04-09 11 +325 val_325 325 val_325 2008-04-08 12 +325 val_325 325 val_325 2008-04-09 12 +325 val_325 325 val_325 2008-04-09 11 +325 val_325 325 val_325 2008-04-09 12 +325 val_325 325 val_325 2008-04-08 12 +325 val_325 325 val_325 2008-04-08 11 +325 val_325 325 val_325 2008-04-08 11 +325 val_325 325 val_325 2008-04-09 11 +325 val_325 325 val_325 2008-04-08 12 +325 val_325 325 val_325 2008-04-09 12 +325 val_325 325 val_325 2008-04-09 11 +325 val_325 325 val_325 2008-04-09 12 +325 val_325 325 val_325 2008-04-08 12 +325 val_325 325 val_325 2008-04-08 11 +325 val_325 325 val_325 2008-04-08 11 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-09 12 +331 val_331 331 val_331 2008-04-09 12 +331 val_331 331 val_331 2008-04-08 11 +331 val_331 331 val_331 2008-04-08 11 +331 val_331 331 val_331 2008-04-08 12 +331 val_331 331 val_331 2008-04-09 11 +331 val_331 331 val_331 2008-04-09 11 +331 val_331 331 val_331 2008-04-08 12 +331 val_331 331 val_331 2008-04-09 12 +331 val_331 331 val_331 2008-04-09 12 +331 val_331 331 val_331 2008-04-08 11 +331 val_331 331 val_331 2008-04-08 11 +331 val_331 331 val_331 2008-04-08 12 +331 val_331 331 val_331 2008-04-09 11 +331 val_331 331 val_331 2008-04-09 11 +331 val_331 331 val_331 2008-04-08 12 +331 val_331 331 val_331 2008-04-09 12 +332 val_332 332 val_332 2008-04-08 11 +332 val_332 332 val_332 2008-04-09 11 +332 val_332 332 val_332 2008-04-08 12 +332 val_332 332 val_332 2008-04-09 12 +333 val_333 333 val_333 2008-04-09 11 +333 val_333 333 val_333 2008-04-08 12 +333 val_333 333 val_333 2008-04-09 12 +333 val_333 333 val_333 2008-04-08 12 +333 val_333 333 val_333 2008-04-08 11 +333 val_333 333 val_333 2008-04-09 11 +333 val_333 333 val_333 2008-04-08 11 +333 val_333 333 val_333 2008-04-09 12 +333 val_333 333 val_333 2008-04-09 11 +333 val_333 333 val_333 2008-04-08 12 +333 val_333 333 val_333 2008-04-09 12 +333 val_333 333 val_333 2008-04-08 12 +333 val_333 333 val_333 2008-04-08 11 +333 val_333 333 val_333 2008-04-09 11 +333 val_333 333 val_333 2008-04-08 11 +333 val_333 333 val_333 2008-04-09 12 +335 val_335 335 val_335 2008-04-08 12 +335 val_335 335 val_335 2008-04-09 12 +335 val_335 335 val_335 2008-04-09 11 +335 val_335 335 val_335 2008-04-08 11 +336 val_336 336 val_336 2008-04-09 11 +336 val_336 336 val_336 2008-04-09 12 +336 val_336 336 val_336 2008-04-08 12 +336 val_336 336 val_336 2008-04-08 11 +338 val_338 338 val_338 2008-04-08 11 +338 val_338 338 val_338 2008-04-09 11 +338 val_338 338 val_338 2008-04-08 12 +338 val_338 338 val_338 2008-04-09 12 +339 val_339 339 val_339 2008-04-09 11 +339 val_339 339 val_339 2008-04-08 11 +339 val_339 339 val_339 2008-04-09 12 +339 val_339 339 val_339 2008-04-08 12 +341 val_341 341 val_341 2008-04-09 12 +341 val_341 341 val_341 2008-04-09 11 +341 val_341 341 val_341 2008-04-08 11 +341 val_341 341 val_341 2008-04-08 12 +342 val_342 342 val_342 2008-04-08 11 +342 val_342 342 val_342 2008-04-09 12 +342 val_342 342 val_342 2008-04-08 12 +342 val_342 342 val_342 2008-04-09 11 +342 val_342 342 val_342 2008-04-08 12 +342 val_342 342 val_342 2008-04-08 11 +342 val_342 342 val_342 2008-04-09 12 +342 val_342 342 val_342 2008-04-09 11 +342 val_342 342 val_342 2008-04-08 11 +342 val_342 342 val_342 2008-04-09 12 +342 val_342 342 val_342 2008-04-08 12 +342 val_342 342 val_342 2008-04-09 11 +342 val_342 342 val_342 2008-04-08 12 +342 val_342 342 val_342 2008-04-08 11 +342 val_342 342 val_342 2008-04-09 12 +342 val_342 342 val_342 2008-04-09 11 +344 val_344 344 val_344 2008-04-08 12 +344 val_344 344 val_344 2008-04-09 12 +344 val_344 344 val_344 2008-04-09 12 +344 val_344 344 val_344 2008-04-09 11 +344 val_344 344 val_344 2008-04-08 11 +344 val_344 344 val_344 2008-04-08 12 +344 val_344 344 val_344 2008-04-09 11 +344 val_344 344 val_344 2008-04-08 11 +344 val_344 344 val_344 2008-04-08 12 +344 val_344 344 val_344 2008-04-09 12 +344 val_344 344 val_344 2008-04-09 12 +344 val_344 344 val_344 2008-04-09 11 +344 val_344 344 val_344 2008-04-08 11 +344 val_344 344 val_344 2008-04-08 12 +344 val_344 344 val_344 2008-04-09 11 +344 val_344 344 val_344 2008-04-08 11 +345 val_345 345 val_345 2008-04-08 11 +345 val_345 345 val_345 2008-04-09 11 +345 val_345 345 val_345 2008-04-08 12 +345 val_345 345 val_345 2008-04-09 12 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +351 val_351 351 val_351 2008-04-09 11 +351 val_351 351 val_351 2008-04-08 11 +351 val_351 351 val_351 2008-04-08 12 +351 val_351 351 val_351 2008-04-09 12 +353 val_353 353 val_353 2008-04-08 12 +353 val_353 353 val_353 2008-04-08 12 +353 val_353 353 val_353 2008-04-09 11 +353 val_353 353 val_353 2008-04-09 12 +353 val_353 353 val_353 2008-04-08 11 +353 val_353 353 val_353 2008-04-09 11 +353 val_353 353 val_353 2008-04-09 12 +353 val_353 353 val_353 2008-04-08 11 +353 val_353 353 val_353 2008-04-08 12 +353 val_353 353 val_353 2008-04-08 12 +353 val_353 353 val_353 2008-04-09 11 +353 val_353 353 val_353 2008-04-09 12 +353 val_353 353 val_353 2008-04-08 11 +353 val_353 353 val_353 2008-04-09 11 +353 val_353 353 val_353 2008-04-09 12 +353 val_353 353 val_353 2008-04-08 11 +356 val_356 356 val_356 2008-04-08 12 +356 val_356 356 val_356 2008-04-09 11 +356 val_356 356 val_356 2008-04-08 11 +356 val_356 356 val_356 2008-04-09 12 +360 val_360 360 val_360 2008-04-08 11 +360 val_360 360 val_360 2008-04-08 12 +360 val_360 360 val_360 2008-04-09 12 +360 val_360 360 val_360 2008-04-09 11 +362 val_362 362 val_362 2008-04-09 12 +362 val_362 362 val_362 2008-04-08 11 +362 val_362 362 val_362 2008-04-09 11 +362 val_362 362 val_362 2008-04-08 12 +364 val_364 364 val_364 2008-04-09 12 +364 val_364 364 val_364 2008-04-08 11 +364 val_364 364 val_364 2008-04-09 11 +364 val_364 364 val_364 2008-04-08 12 +365 val_365 365 val_365 2008-04-08 11 +365 val_365 365 val_365 2008-04-09 12 +365 val_365 365 val_365 2008-04-08 12 +365 val_365 365 val_365 2008-04-09 11 +366 val_366 366 val_366 2008-04-08 12 +366 val_366 366 val_366 2008-04-09 12 +366 val_366 366 val_366 2008-04-09 11 +366 val_366 366 val_366 2008-04-08 11 +367 val_367 367 val_367 2008-04-08 12 +367 val_367 367 val_367 2008-04-09 12 +367 val_367 367 val_367 2008-04-09 11 +367 val_367 367 val_367 2008-04-08 11 +367 val_367 367 val_367 2008-04-08 11 +367 val_367 367 val_367 2008-04-08 12 +367 val_367 367 val_367 2008-04-09 12 +367 val_367 367 val_367 2008-04-09 11 +367 val_367 367 val_367 2008-04-08 12 +367 val_367 367 val_367 2008-04-09 12 +367 val_367 367 val_367 2008-04-09 11 +367 val_367 367 val_367 2008-04-08 11 +367 val_367 367 val_367 2008-04-08 11 +367 val_367 367 val_367 2008-04-08 12 +367 val_367 367 val_367 2008-04-09 12 +367 val_367 367 val_367 2008-04-09 11 +368 val_368 368 val_368 2008-04-08 12 +368 val_368 368 val_368 2008-04-08 11 +368 val_368 368 val_368 2008-04-09 12 +368 val_368 368 val_368 2008-04-09 11 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-08 12 +373 val_373 373 val_373 2008-04-08 11 +373 val_373 373 val_373 2008-04-09 11 +373 val_373 373 val_373 2008-04-09 12 +373 val_373 373 val_373 2008-04-08 12 +374 val_374 374 val_374 2008-04-09 12 +374 val_374 374 val_374 2008-04-08 12 +374 val_374 374 val_374 2008-04-08 11 +374 val_374 374 val_374 2008-04-09 11 +375 val_375 375 val_375 2008-04-09 12 +375 val_375 375 val_375 2008-04-09 11 +375 val_375 375 val_375 2008-04-08 12 +375 val_375 375 val_375 2008-04-08 11 +377 val_377 377 val_377 2008-04-09 11 +377 val_377 377 val_377 2008-04-09 12 +377 val_377 377 val_377 2008-04-08 11 +377 val_377 377 val_377 2008-04-08 12 +378 val_378 378 val_378 2008-04-09 11 +378 val_378 378 val_378 2008-04-09 12 +378 val_378 378 val_378 2008-04-08 12 +378 val_378 378 val_378 2008-04-08 11 +379 val_379 379 val_379 2008-04-08 12 +379 val_379 379 val_379 2008-04-08 11 +379 val_379 379 val_379 2008-04-09 11 +379 val_379 379 val_379 2008-04-09 12 +382 val_382 382 val_382 2008-04-08 12 +382 val_382 382 val_382 2008-04-09 11 +382 val_382 382 val_382 2008-04-09 11 +382 val_382 382 val_382 2008-04-08 12 +382 val_382 382 val_382 2008-04-08 11 +382 val_382 382 val_382 2008-04-09 12 +382 val_382 382 val_382 2008-04-09 12 +382 val_382 382 val_382 2008-04-08 11 +382 val_382 382 val_382 2008-04-08 12 +382 val_382 382 val_382 2008-04-09 11 +382 val_382 382 val_382 2008-04-09 11 +382 val_382 382 val_382 2008-04-08 12 +382 val_382 382 val_382 2008-04-08 11 +382 val_382 382 val_382 2008-04-09 12 +382 val_382 382 val_382 2008-04-09 12 +382 val_382 382 val_382 2008-04-08 11 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-09 11 +386 val_386 386 val_386 2008-04-08 12 +386 val_386 386 val_386 2008-04-09 12 +386 val_386 386 val_386 2008-04-09 11 +386 val_386 386 val_386 2008-04-08 11 +389 val_389 389 val_389 2008-04-08 12 +389 val_389 389 val_389 2008-04-09 12 +389 val_389 389 val_389 2008-04-08 11 +389 val_389 389 val_389 2008-04-09 11 +392 val_392 392 val_392 2008-04-08 12 +392 val_392 392 val_392 2008-04-08 11 +392 val_392 392 val_392 2008-04-09 12 +392 val_392 392 val_392 2008-04-09 11 +393 val_393 393 val_393 2008-04-09 12 +393 val_393 393 val_393 2008-04-08 12 +393 val_393 393 val_393 2008-04-08 11 +393 val_393 393 val_393 2008-04-09 11 +394 val_394 394 val_394 2008-04-08 12 +394 val_394 394 val_394 2008-04-09 12 +394 val_394 394 val_394 2008-04-08 11 +394 val_394 394 val_394 2008-04-09 11 +395 val_395 395 val_395 2008-04-09 12 +395 val_395 395 val_395 2008-04-09 11 +395 val_395 395 val_395 2008-04-09 11 +395 val_395 395 val_395 2008-04-08 12 +395 val_395 395 val_395 2008-04-08 11 +395 val_395 395 val_395 2008-04-09 12 +395 val_395 395 val_395 2008-04-08 12 +395 val_395 395 val_395 2008-04-08 11 +395 val_395 395 val_395 2008-04-09 12 +395 val_395 395 val_395 2008-04-09 11 +395 val_395 395 val_395 2008-04-09 11 +395 val_395 395 val_395 2008-04-08 12 +395 val_395 395 val_395 2008-04-08 11 +395 val_395 395 val_395 2008-04-09 12 +395 val_395 395 val_395 2008-04-08 12 +395 val_395 395 val_395 2008-04-08 11 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 12 +397 val_397 397 val_397 2008-04-09 11 +397 val_397 397 val_397 2008-04-09 12 +397 val_397 397 val_397 2008-04-09 12 +397 val_397 397 val_397 2008-04-09 11 +397 val_397 397 val_397 2008-04-08 11 +397 val_397 397 val_397 2008-04-08 11 +397 val_397 397 val_397 2008-04-08 12 +397 val_397 397 val_397 2008-04-08 12 +397 val_397 397 val_397 2008-04-09 11 +397 val_397 397 val_397 2008-04-09 12 +397 val_397 397 val_397 2008-04-09 12 +397 val_397 397 val_397 2008-04-09 11 +397 val_397 397 val_397 2008-04-08 11 +397 val_397 397 val_397 2008-04-08 11 +397 val_397 397 val_397 2008-04-08 12 +397 val_397 397 val_397 2008-04-08 12 +399 val_399 399 val_399 2008-04-09 12 +399 val_399 399 val_399 2008-04-09 12 +399 val_399 399 val_399 2008-04-09 11 +399 val_399 399 val_399 2008-04-08 12 +399 val_399 399 val_399 2008-04-09 11 +399 val_399 399 val_399 2008-04-08 11 +399 val_399 399 val_399 2008-04-08 12 +399 val_399 399 val_399 2008-04-08 11 +399 val_399 399 val_399 2008-04-09 12 +399 val_399 399 val_399 2008-04-09 12 +399 val_399 399 val_399 2008-04-09 11 +399 val_399 399 val_399 2008-04-08 12 +399 val_399 399 val_399 2008-04-09 11 +399 val_399 399 val_399 2008-04-08 11 +399 val_399 399 val_399 2008-04-08 12 +399 val_399 399 val_399 2008-04-08 11 +400 val_400 400 val_400 2008-04-08 12 +400 val_400 400 val_400 2008-04-09 11 +400 val_400 400 val_400 2008-04-09 12 +400 val_400 400 val_400 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +402 val_402 402 val_402 2008-04-09 11 +402 val_402 402 val_402 2008-04-08 11 +402 val_402 402 val_402 2008-04-09 12 +402 val_402 402 val_402 2008-04-08 12 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 12 +404 val_404 404 val_404 2008-04-09 12 +404 val_404 404 val_404 2008-04-09 11 +404 val_404 404 val_404 2008-04-08 11 +404 val_404 404 val_404 2008-04-08 11 +404 val_404 404 val_404 2008-04-08 12 +404 val_404 404 val_404 2008-04-09 11 +404 val_404 404 val_404 2008-04-09 12 +404 val_404 404 val_404 2008-04-08 12 +404 val_404 404 val_404 2008-04-09 12 +404 val_404 404 val_404 2008-04-09 11 +404 val_404 404 val_404 2008-04-08 11 +404 val_404 404 val_404 2008-04-08 11 +404 val_404 404 val_404 2008-04-08 12 +404 val_404 404 val_404 2008-04-09 11 +404 val_404 404 val_404 2008-04-09 12 +404 val_404 404 val_404 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +407 val_407 407 val_407 2008-04-08 11 +407 val_407 407 val_407 2008-04-08 12 +407 val_407 407 val_407 2008-04-09 12 +407 val_407 407 val_407 2008-04-09 11 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-09 11 +411 val_411 411 val_411 2008-04-08 11 +411 val_411 411 val_411 2008-04-08 12 +411 val_411 411 val_411 2008-04-09 12 +411 val_411 411 val_411 2008-04-09 11 +413 val_413 413 val_413 2008-04-09 11 +413 val_413 413 val_413 2008-04-08 12 +413 val_413 413 val_413 2008-04-09 11 +413 val_413 413 val_413 2008-04-09 12 +413 val_413 413 val_413 2008-04-08 12 +413 val_413 413 val_413 2008-04-08 11 +413 val_413 413 val_413 2008-04-09 12 +413 val_413 413 val_413 2008-04-08 11 +413 val_413 413 val_413 2008-04-09 11 +413 val_413 413 val_413 2008-04-08 12 +413 val_413 413 val_413 2008-04-09 11 +413 val_413 413 val_413 2008-04-09 12 +413 val_413 413 val_413 2008-04-08 12 +413 val_413 413 val_413 2008-04-08 11 +413 val_413 413 val_413 2008-04-09 12 +413 val_413 413 val_413 2008-04-08 11 +414 val_414 414 val_414 2008-04-08 11 +414 val_414 414 val_414 2008-04-08 12 +414 val_414 414 val_414 2008-04-09 11 +414 val_414 414 val_414 2008-04-08 11 +414 val_414 414 val_414 2008-04-09 12 +414 val_414 414 val_414 2008-04-08 12 +414 val_414 414 val_414 2008-04-09 11 +414 val_414 414 val_414 2008-04-09 12 +414 val_414 414 val_414 2008-04-08 11 +414 val_414 414 val_414 2008-04-08 12 +414 val_414 414 val_414 2008-04-09 11 +414 val_414 414 val_414 2008-04-08 11 +414 val_414 414 val_414 2008-04-09 12 +414 val_414 414 val_414 2008-04-08 12 +414 val_414 414 val_414 2008-04-09 11 +414 val_414 414 val_414 2008-04-09 12 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-09 11 +418 val_418 418 val_418 2008-04-08 12 +418 val_418 418 val_418 2008-04-09 12 +418 val_418 418 val_418 2008-04-09 11 +418 val_418 418 val_418 2008-04-08 11 +419 val_419 419 val_419 2008-04-08 12 +419 val_419 419 val_419 2008-04-08 11 +419 val_419 419 val_419 2008-04-09 12 +419 val_419 419 val_419 2008-04-09 11 +421 val_421 421 val_421 2008-04-08 11 +421 val_421 421 val_421 2008-04-08 12 +421 val_421 421 val_421 2008-04-09 11 +421 val_421 421 val_421 2008-04-09 12 +424 val_424 424 val_424 2008-04-08 11 +424 val_424 424 val_424 2008-04-09 11 +424 val_424 424 val_424 2008-04-09 12 +424 val_424 424 val_424 2008-04-09 11 +424 val_424 424 val_424 2008-04-08 12 +424 val_424 424 val_424 2008-04-08 12 +424 val_424 424 val_424 2008-04-09 12 +424 val_424 424 val_424 2008-04-08 11 +424 val_424 424 val_424 2008-04-08 11 +424 val_424 424 val_424 2008-04-09 11 +424 val_424 424 val_424 2008-04-09 12 +424 val_424 424 val_424 2008-04-09 11 +424 val_424 424 val_424 2008-04-08 12 +424 val_424 424 val_424 2008-04-08 12 +424 val_424 424 val_424 2008-04-09 12 +424 val_424 424 val_424 2008-04-08 11 +427 val_427 427 val_427 2008-04-09 12 +427 val_427 427 val_427 2008-04-08 12 +427 val_427 427 val_427 2008-04-09 11 +427 val_427 427 val_427 2008-04-08 11 +429 val_429 429 val_429 2008-04-09 11 +429 val_429 429 val_429 2008-04-08 11 +429 val_429 429 val_429 2008-04-08 12 +429 val_429 429 val_429 2008-04-09 12 +429 val_429 429 val_429 2008-04-08 11 +429 val_429 429 val_429 2008-04-09 12 +429 val_429 429 val_429 2008-04-08 12 +429 val_429 429 val_429 2008-04-09 11 +429 val_429 429 val_429 2008-04-09 11 +429 val_429 429 val_429 2008-04-08 11 +429 val_429 429 val_429 2008-04-08 12 +429 val_429 429 val_429 2008-04-09 12 +429 val_429 429 val_429 2008-04-08 11 +429 val_429 429 val_429 2008-04-09 12 +429 val_429 429 val_429 2008-04-08 12 +429 val_429 429 val_429 2008-04-09 11 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-09 12 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-08 11 +432 val_432 432 val_432 2008-04-09 12 +432 val_432 432 val_432 2008-04-08 11 +432 val_432 432 val_432 2008-04-09 11 +432 val_432 432 val_432 2008-04-08 12 +435 val_435 435 val_435 2008-04-09 11 +435 val_435 435 val_435 2008-04-08 11 +435 val_435 435 val_435 2008-04-08 12 +435 val_435 435 val_435 2008-04-09 12 +436 val_436 436 val_436 2008-04-08 12 +436 val_436 436 val_436 2008-04-08 11 +436 val_436 436 val_436 2008-04-09 11 +436 val_436 436 val_436 2008-04-09 12 +437 val_437 437 val_437 2008-04-09 12 +437 val_437 437 val_437 2008-04-08 11 +437 val_437 437 val_437 2008-04-09 11 +437 val_437 437 val_437 2008-04-08 12 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 11 +439 val_439 439 val_439 2008-04-09 11 +439 val_439 439 val_439 2008-04-09 11 +439 val_439 439 val_439 2008-04-08 12 +439 val_439 439 val_439 2008-04-08 12 +439 val_439 439 val_439 2008-04-08 11 +439 val_439 439 val_439 2008-04-09 12 +439 val_439 439 val_439 2008-04-08 11 +439 val_439 439 val_439 2008-04-09 12 +439 val_439 439 val_439 2008-04-09 11 +439 val_439 439 val_439 2008-04-09 11 +439 val_439 439 val_439 2008-04-08 12 +439 val_439 439 val_439 2008-04-08 12 +439 val_439 439 val_439 2008-04-08 11 +439 val_439 439 val_439 2008-04-09 12 +439 val_439 439 val_439 2008-04-08 11 +439 val_439 439 val_439 2008-04-09 12 +443 val_443 443 val_443 2008-04-09 11 +443 val_443 443 val_443 2008-04-09 12 +443 val_443 443 val_443 2008-04-08 11 +443 val_443 443 val_443 2008-04-08 12 +444 val_444 444 val_444 2008-04-09 11 +444 val_444 444 val_444 2008-04-08 11 +444 val_444 444 val_444 2008-04-08 12 +444 val_444 444 val_444 2008-04-09 12 +446 val_446 446 val_446 2008-04-08 12 +446 val_446 446 val_446 2008-04-09 12 +446 val_446 446 val_446 2008-04-08 11 +446 val_446 446 val_446 2008-04-09 11 +448 val_448 448 val_448 2008-04-09 11 +448 val_448 448 val_448 2008-04-09 12 +448 val_448 448 val_448 2008-04-08 11 +448 val_448 448 val_448 2008-04-08 12 +449 val_449 449 val_449 2008-04-08 12 +449 val_449 449 val_449 2008-04-08 11 +449 val_449 449 val_449 2008-04-09 12 +449 val_449 449 val_449 2008-04-09 11 +452 val_452 452 val_452 2008-04-08 11 +452 val_452 452 val_452 2008-04-09 11 +452 val_452 452 val_452 2008-04-09 12 +452 val_452 452 val_452 2008-04-08 12 +453 val_453 453 val_453 2008-04-08 12 +453 val_453 453 val_453 2008-04-09 11 +453 val_453 453 val_453 2008-04-09 12 +453 val_453 453 val_453 2008-04-08 11 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-08 12 +455 val_455 455 val_455 2008-04-08 11 +455 val_455 455 val_455 2008-04-09 11 +455 val_455 455 val_455 2008-04-08 12 +455 val_455 455 val_455 2008-04-09 12 +457 val_457 457 val_457 2008-04-08 12 +457 val_457 457 val_457 2008-04-09 12 +457 val_457 457 val_457 2008-04-08 11 +457 val_457 457 val_457 2008-04-09 11 +458 val_458 458 val_458 2008-04-08 12 +458 val_458 458 val_458 2008-04-09 11 +458 val_458 458 val_458 2008-04-08 11 +458 val_458 458 val_458 2008-04-09 11 +458 val_458 458 val_458 2008-04-08 11 +458 val_458 458 val_458 2008-04-09 12 +458 val_458 458 val_458 2008-04-09 12 +458 val_458 458 val_458 2008-04-08 12 +458 val_458 458 val_458 2008-04-08 12 +458 val_458 458 val_458 2008-04-09 11 +458 val_458 458 val_458 2008-04-08 11 +458 val_458 458 val_458 2008-04-09 11 +458 val_458 458 val_458 2008-04-08 11 +458 val_458 458 val_458 2008-04-09 12 +458 val_458 458 val_458 2008-04-09 12 +458 val_458 458 val_458 2008-04-08 12 +459 val_459 459 val_459 2008-04-08 12 +459 val_459 459 val_459 2008-04-09 12 +459 val_459 459 val_459 2008-04-09 11 +459 val_459 459 val_459 2008-04-09 11 +459 val_459 459 val_459 2008-04-08 11 +459 val_459 459 val_459 2008-04-08 12 +459 val_459 459 val_459 2008-04-09 12 +459 val_459 459 val_459 2008-04-08 11 +459 val_459 459 val_459 2008-04-08 12 +459 val_459 459 val_459 2008-04-09 12 +459 val_459 459 val_459 2008-04-09 11 +459 val_459 459 val_459 2008-04-09 11 +459 val_459 459 val_459 2008-04-08 11 +459 val_459 459 val_459 2008-04-08 12 +459 val_459 459 val_459 2008-04-09 12 +459 val_459 459 val_459 2008-04-08 11 +460 val_460 460 val_460 2008-04-08 11 +460 val_460 460 val_460 2008-04-09 11 +460 val_460 460 val_460 2008-04-08 12 +460 val_460 460 val_460 2008-04-09 12 +462 val_462 462 val_462 2008-04-09 11 +462 val_462 462 val_462 2008-04-09 12 +462 val_462 462 val_462 2008-04-08 11 +462 val_462 462 val_462 2008-04-08 11 +462 val_462 462 val_462 2008-04-09 11 +462 val_462 462 val_462 2008-04-09 12 +462 val_462 462 val_462 2008-04-08 12 +462 val_462 462 val_462 2008-04-08 12 +462 val_462 462 val_462 2008-04-09 11 +462 val_462 462 val_462 2008-04-09 12 +462 val_462 462 val_462 2008-04-08 11 +462 val_462 462 val_462 2008-04-08 11 +462 val_462 462 val_462 2008-04-09 11 +462 val_462 462 val_462 2008-04-09 12 +462 val_462 462 val_462 2008-04-08 12 +462 val_462 462 val_462 2008-04-08 12 +463 val_463 463 val_463 2008-04-09 11 +463 val_463 463 val_463 2008-04-09 12 +463 val_463 463 val_463 2008-04-09 12 +463 val_463 463 val_463 2008-04-08 12 +463 val_463 463 val_463 2008-04-08 11 +463 val_463 463 val_463 2008-04-08 12 +463 val_463 463 val_463 2008-04-09 11 +463 val_463 463 val_463 2008-04-08 11 +463 val_463 463 val_463 2008-04-09 11 +463 val_463 463 val_463 2008-04-09 12 +463 val_463 463 val_463 2008-04-09 12 +463 val_463 463 val_463 2008-04-08 12 +463 val_463 463 val_463 2008-04-08 11 +463 val_463 463 val_463 2008-04-08 12 +463 val_463 463 val_463 2008-04-09 11 +463 val_463 463 val_463 2008-04-08 11 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-09 12 +467 val_467 467 val_467 2008-04-09 12 +467 val_467 467 val_467 2008-04-09 11 +467 val_467 467 val_467 2008-04-08 12 +467 val_467 467 val_467 2008-04-08 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-09 12 +470 val_470 470 val_470 2008-04-08 11 +470 val_470 470 val_470 2008-04-09 12 +470 val_470 470 val_470 2008-04-08 12 +470 val_470 470 val_470 2008-04-09 11 +472 val_472 472 val_472 2008-04-09 11 +472 val_472 472 val_472 2008-04-08 12 +472 val_472 472 val_472 2008-04-09 12 +472 val_472 472 val_472 2008-04-08 11 +475 val_475 475 val_475 2008-04-09 11 +475 val_475 475 val_475 2008-04-08 11 +475 val_475 475 val_475 2008-04-08 12 +475 val_475 475 val_475 2008-04-09 12 +477 val_477 477 val_477 2008-04-09 12 +477 val_477 477 val_477 2008-04-08 11 +477 val_477 477 val_477 2008-04-08 12 +477 val_477 477 val_477 2008-04-09 11 +478 val_478 478 val_478 2008-04-08 11 +478 val_478 478 val_478 2008-04-08 12 +478 val_478 478 val_478 2008-04-09 11 +478 val_478 478 val_478 2008-04-09 12 +478 val_478 478 val_478 2008-04-09 12 +478 val_478 478 val_478 2008-04-08 11 +478 val_478 478 val_478 2008-04-09 11 +478 val_478 478 val_478 2008-04-08 12 +478 val_478 478 val_478 2008-04-08 11 +478 val_478 478 val_478 2008-04-08 12 +478 val_478 478 val_478 2008-04-09 11 +478 val_478 478 val_478 2008-04-09 12 +478 val_478 478 val_478 2008-04-09 12 +478 val_478 478 val_478 2008-04-08 11 +478 val_478 478 val_478 2008-04-09 11 +478 val_478 478 val_478 2008-04-08 12 +479 val_479 479 val_479 2008-04-08 12 +479 val_479 479 val_479 2008-04-08 11 +479 val_479 479 val_479 2008-04-09 11 +479 val_479 479 val_479 2008-04-09 12 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-08 12 +481 val_481 481 val_481 2008-04-08 12 +481 val_481 481 val_481 2008-04-08 11 +481 val_481 481 val_481 2008-04-09 11 +481 val_481 481 val_481 2008-04-09 12 +482 val_482 482 val_482 2008-04-09 11 +482 val_482 482 val_482 2008-04-09 12 +482 val_482 482 val_482 2008-04-08 11 +482 val_482 482 val_482 2008-04-08 12 +483 val_483 483 val_483 2008-04-09 11 +483 val_483 483 val_483 2008-04-08 11 +483 val_483 483 val_483 2008-04-09 12 +483 val_483 483 val_483 2008-04-08 12 +484 val_484 484 val_484 2008-04-08 12 +484 val_484 484 val_484 2008-04-08 11 +484 val_484 484 val_484 2008-04-09 11 +484 val_484 484 val_484 2008-04-09 12 +485 val_485 485 val_485 2008-04-08 11 +485 val_485 485 val_485 2008-04-09 11 +485 val_485 485 val_485 2008-04-09 12 +485 val_485 485 val_485 2008-04-08 12 +487 val_487 487 val_487 2008-04-09 12 +487 val_487 487 val_487 2008-04-08 12 +487 val_487 487 val_487 2008-04-09 11 +487 val_487 487 val_487 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 11 +490 val_490 490 val_490 2008-04-09 11 +490 val_490 490 val_490 2008-04-08 11 +490 val_490 490 val_490 2008-04-09 12 +490 val_490 490 val_490 2008-04-08 12 +491 val_491 491 val_491 2008-04-08 11 +491 val_491 491 val_491 2008-04-08 12 +491 val_491 491 val_491 2008-04-09 11 +491 val_491 491 val_491 2008-04-09 12 +492 val_492 492 val_492 2008-04-08 11 +492 val_492 492 val_492 2008-04-09 11 +492 val_492 492 val_492 2008-04-09 12 +492 val_492 492 val_492 2008-04-09 12 +492 val_492 492 val_492 2008-04-08 12 +492 val_492 492 val_492 2008-04-09 11 +492 val_492 492 val_492 2008-04-08 11 +492 val_492 492 val_492 2008-04-08 12 +492 val_492 492 val_492 2008-04-08 11 +492 val_492 492 val_492 2008-04-09 11 +492 val_492 492 val_492 2008-04-09 12 +492 val_492 492 val_492 2008-04-09 12 +492 val_492 492 val_492 2008-04-08 12 +492 val_492 492 val_492 2008-04-09 11 +492 val_492 492 val_492 2008-04-08 11 +492 val_492 492 val_492 2008-04-08 12 +493 val_493 493 val_493 2008-04-08 12 +493 val_493 493 val_493 2008-04-09 12 +493 val_493 493 val_493 2008-04-09 11 +493 val_493 493 val_493 2008-04-08 11 +494 val_494 494 val_494 2008-04-09 11 +494 val_494 494 val_494 2008-04-08 11 +494 val_494 494 val_494 2008-04-08 12 +494 val_494 494 val_494 2008-04-09 12 +495 val_495 495 val_495 2008-04-08 11 +495 val_495 495 val_495 2008-04-09 12 +495 val_495 495 val_495 2008-04-08 12 +495 val_495 495 val_495 2008-04-09 11 +496 val_496 496 val_496 2008-04-09 11 +496 val_496 496 val_496 2008-04-09 12 +496 val_496 496 val_496 2008-04-08 12 +496 val_496 496 val_496 2008-04-08 11 +497 val_497 497 val_497 2008-04-09 12 +497 val_497 497 val_497 2008-04-08 12 +497 val_497 497 val_497 2008-04-08 11 +497 val_497 497 val_497 2008-04-09 11 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-08 12 +PREHOOK: query: explain select * from default.masking_test_n8 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from default.masking_test where key > 0 +POSTHOOK: query: explain select * from default.masking_test_n8 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -351,18 +6375,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n8 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (key > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), reverse(value) (type: string) + expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -375,20 +6399,514 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from default.masking_test where key > 0 +PREHOOK: query: select * from default.masking_test_n8 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from default.masking_test where key > 0 +POSTHOOK: query: select * from default.masking_test_n8 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n8 #### A masked pattern was here #### -4 4_lav -8 8_lav -2 2_lav -PREHOOK: query: explain select * from masking_test where masking_test.key > 0 +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: explain select * from masking_test_n8 where masking_test_n8.key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test where masking_test.key > 0 +POSTHOOK: query: explain select * from masking_test_n8 where masking_test_n8.key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -399,18 +6917,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n8 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (key > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), reverse(value) (type: string) + expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -423,17 +6941,511 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test where masking_test.key > 0 +PREHOOK: query: select * from masking_test_n8 where masking_test_n8.key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test where masking_test.key > 0 +POSTHOOK: query: select * from masking_test_n8 where masking_test_n8.key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n8 #### A masked pattern was here #### -4 4_lav -8 8_lav -2 2_lav +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 PREHOOK: query: explain select key, value from (select key, value from (select key, upper(value) as value from src where key > 0) t where key < 10) t2 where key % 2 = 0 PREHOOK: type: QUERY POSTHOOK: query: explain select key, value from (select key, value from (select key, upper(value) as value from src where key > 0) t where key < 10) t2 where key % 2 = 0 diff --git a/ql/src/test/results/clientpositive/masking_12.q.out b/ql/src/test/results/clientpositive/masking_12.q.out index dea720164b..b023d0f37c 100644 --- a/ql/src/test/results/clientpositive/masking_12.q.out +++ b/ql/src/test/results/clientpositive/masking_12.q.out @@ -1,27 +1,27 @@ -PREHOOK: query: create table `masking_test` as select cast(key as int) as key, value from src +PREHOOK: query: create table `masking_test_n5` as select cast(key as int) as key, value from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create table `masking_test` as select cast(key as int) as key, value from src +PREHOOK: Output: default@masking_test_n5 +POSTHOOK: query: create table `masking_test_n5` as select cast(key as int) as key, value from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create view `v0` as select * from `masking_test` +POSTHOOK: Output: default@masking_test_n5 +POSTHOOK: Lineage: masking_test_n5.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create view `v0` as select * from `masking_test_n5` PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n5 PREHOOK: Output: database:default PREHOOK: Output: default@v0 -POSTHOOK: query: create view `v0` as select * from `masking_test` +POSTHOOK: query: create view `v0` as select * from `masking_test_n5` POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n5 POSTHOOK: Output: database:default POSTHOOK: Output: default@v0 -POSTHOOK: Lineage: v0.key SIMPLE [(masking_test)masking_test.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: v0.value SIMPLE [(masking_test)masking_test.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: v0.key SIMPLE [(masking_test_n5)masking_test_n5.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: v0.value SIMPLE [(masking_test_n5)masking_test_n5.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select * from `v0` PREHOOK: type: QUERY @@ -29,277 +29,633 @@ POSTHOOK: query: explain select * from `v0` POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test - properties: - insideView TRUE - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), reverse(value) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: masking_test_n5 + properties: + insideView TRUE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: select * from `v0` PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n5 PREHOOK: Input: default@v0 #### A masked pattern was here #### POSTHOOK: query: select * from `v0` POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n5 POSTHOOK: Input: default@v0 #### A masked pattern was here #### -0 0_lav -4 4_lav -8 8_lav -0 0_lav -0 0_lav -2 2_lav -PREHOOK: query: create table `masking_test_subq` as select cast(key as int) as key, value from src +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +0 val_0 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +0 val_0 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +0 val_0 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: create table `masking_test_subq_n1` as select cast(key as int) as key, value from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test_subq -POSTHOOK: query: create table `masking_test_subq` as select cast(key as int) as key, value from src +PREHOOK: Output: default@masking_test_subq_n1 +POSTHOOK: query: create table `masking_test_subq_n1` as select cast(key as int) as key, value from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test_subq -POSTHOOK: Lineage: masking_test_subq.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test_subq.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create view `v1` as select * from `masking_test_subq` +POSTHOOK: Output: default@masking_test_subq_n1 +POSTHOOK: Lineage: masking_test_subq_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_subq_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create view `v1_n4` as select * from `masking_test_subq_n1` PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@masking_test_subq +PREHOOK: Input: default@masking_test_subq_n1 PREHOOK: Output: database:default -PREHOOK: Output: default@v1 -POSTHOOK: query: create view `v1` as select * from `masking_test_subq` +PREHOOK: Output: default@v1_n4 +POSTHOOK: query: create view `v1_n4` as select * from `masking_test_subq_n1` POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@masking_test_subq +POSTHOOK: Input: default@masking_test_subq_n1 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v1 -POSTHOOK: Lineage: v1.key SIMPLE [(masking_test_subq)masking_test_subq.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: v1.value SIMPLE [(masking_test_subq)masking_test_subq.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Output: default@v1_n4 +POSTHOOK: Lineage: v1_n4.key SIMPLE [(masking_test_subq_n1)masking_test_subq_n1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: v1_n4.value SIMPLE [(masking_test_subq_n1)masking_test_subq_n1.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain -select * from `v1` +select * from `v1_n4` limit 20 PREHOOK: type: QUERY POSTHOOK: query: explain -select * from `v1` +select * from `v1_n4` limit 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-2 depends on stages: Stage-3 - Stage-1 depends on stages: Stage-2 - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test_subq - properties: - insideView TRUE - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: key (type: int) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(_col0) (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: string), _col1 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test_subq - properties: - insideView TRUE - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col0 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col0 (type: int) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col1 (type: int) - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double), _col0 (type: int) - 1 UDFToDouble(_col0) (type: double), _col1 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 20 - Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: 20 Processor Tree: - ListSink + TableScan + alias: masking_test_subq_n1 + properties: + insideView TRUE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 20 + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE + ListSink -PREHOOK: query: select * from `v1` +PREHOOK: query: select * from `v1_n4` limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test_subq -PREHOOK: Input: default@src -PREHOOK: Input: default@v1 +PREHOOK: Input: default@masking_test_subq_n1 +PREHOOK: Input: default@v1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from `v1` +POSTHOOK: query: select * from `v1_n4` limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test_subq -POSTHOOK: Input: default@src -POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@masking_test_subq_n1 +POSTHOOK: Input: default@v1_n4 #### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -2 val_2 -4 val_4 -5 val_5 -5 val_5 -5 val_5 -8 val_8 -9 val_9 -10 val_10 -11 val_11 -12 val_12 -12 val_12 -15 val_15 -15 val_15 -17 val_17 -18 val_18 -18 val_18 -19 val_19 +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 PREHOOK: query: create view `masking_test_view` as select key from `v0` PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n5 PREHOOK: Input: default@v0 PREHOOK: Output: database:default PREHOOK: Output: default@masking_test_view POSTHOOK: query: create view `masking_test_view` as select key from `v0` POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n5 POSTHOOK: Input: default@v0 POSTHOOK: Output: database:default POSTHOOK: Output: default@masking_test_view -POSTHOOK: Lineage: masking_test_view.key SIMPLE [(masking_test)masking_test.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: masking_test_view.key SIMPLE [(masking_test_n5)masking_test_n5.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: explain select key from `masking_test_view` PREHOOK: type: QUERY @@ -315,20 +671,20 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n5 properties: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key > 6)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (key > 6) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger((UDFToDouble(key) / 2.0D)) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -343,17 +699,508 @@ STAGE PLANS: PREHOOK: query: select key from `masking_test_view` PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n5 PREHOOK: Input: default@masking_test_view PREHOOK: Input: default@v0 #### A masked pattern was here #### POSTHOOK: query: select key from `masking_test_view` POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n5 POSTHOOK: Input: default@masking_test_view POSTHOOK: Input: default@v0 #### A masked pattern was here #### +119 +43 +155 +13 +82 +204 +127 +139 +49 +242 +132 +96 +200 +75 +136 +112 +184 +33 +64 +106 +73 +203 +214 +187 +76 +234 +72 +247 +18 +163 +140 +138 +104 +7 +41 +201 +83 +208 +215 +126 +146 +109 +143 +76 +96 +169 +223 +229 +197 +118 +241 +87 +206 +247 +103 +99 +233 +104 +87 +199 +198 +123 +208 +244 +81 +188 +198 +154 +182 +133 +219 +171 +183 +162 +83 +97 +237 +8 +56 +77 +101 +169 +227 +64 +155 +158 +28 +151 +102 +74 +219 +172 +64 +85 +10 +244 +78 +189 +110 +46 +55 +23 +36 +140 +17 +213 +138 +104 +178 +199 +84 +191 +249 +62 +193 +218 +234 +96 +143 +93 +88 +27 +229 +25 +69 +51 +119 +106 +108 +215 +139 +88 +144 +110 +32 +159 +166 +155 +137 +68 +120 +41 +166 +90 +142 +6 +115 +90 +33 +130 +202 +192 +244 +176 +186 +136 +69 +108 +42 +174 +233 +29 4 +205 +115 +104 +174 +12 +231 +215 +89 +86 +21 +64 +79 +59 +248 +161 +98 +234 +196 +227 +50 +149 +99 +95 +209 +48 +13 +82 +163 +115 +102 +60 +65 +25 +202 +21 +218 +78 +234 +234 +154 +47 +98 +144 +240 +228 +49 +141 +98 +93 +159 +159 +204 +235 +68 +184 +158 +84 +206 +42 +38 +245 +43 +182 +89 +59 +67 +197 +141 +69 +119 +209 +7 +59 +36 +45 +153 +9 +217 +5 +138 +136 +153 +112 +154 +194 +163 +121 +184 +196 +136 +165 +200 +121 +226 +88 +113 +248 +201 +198 +158 +197 +29 +17 +168 +47 +5 +84 +17 +114 +116 +71 +236 +161 +249 +80 +97 +21 +160 +215 +59 +244 +229 +39 +38 +20 +111 +246 +74 +224 +109 +114 +69 +226 +15 +104 +32 +234 +38 +37 +171 +34 +115 +16 +184 +51 +148 +56 +108 +183 +172 +83 +137 +109 +119 +242 +58 +111 +128 +131 +35 +243 +240 +200 +144 +95 +122 +219 +64 +233 +216 +101 +158 +114 +234 +231 +140 +17 +141 +165 +117 +40 +22 +96 +160 +167 +52 +233 +183 +87 +201 +241 +26 +52 +128 +203 +204 +95 +203 +200 +57 +129 +45 +101 +131 +174 +212 +6 +198 +100 +108 +82 +215 +227 +239 +149 +62 +215 +82 +212 +93 +191 +35 +198 +240 +145 +12 +175 +127 +52 +35 +81 +219 +59 +207 +100 +245 +118 +219 +180 +124 +239 +152 +208 +99 +222 +60 +214 +84 +221 +161 +162 +138 +115 +239 +89 +234 +155 +158 +166 +246 +230 +103 +124 +132 +240 +41 +68 +176 +86 +107 +231 +116 +203 +66 +87 +94 +227 +187 +200 +210 +203 +192 +128 +13 +67 +33 +192 +189 +9 +231 +246 +50 +149 +4 +170 +249 +73 +229 +181 +93 +142 +174 +83 +9 +136 +91 +140 +172 +48 +234 +157 +42 +14 +18 +224 +76 +174 +153 +97 +207 +238 +111 +63 +45 +84 +201 +200 +100 +48 PREHOOK: query: explain select `v0`.value from `v0` join `masking_test_view` on `v0`.key = `masking_test_view`.key PREHOOK: type: QUERY @@ -369,40 +1216,40 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n5 properties: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + predicate: key is not null (type: boolean) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), reverse(value) (type: string) + expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) TableScan - alias: masking_test + alias: masking_test_n5 properties: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToInteger((UDFToDouble(key) / 2.0D)) % 2) = 0) and ((key % 2) = 0) and (UDFToInteger((UDFToDouble(key) / 2.0D)) < 10) and (key < 10) and (key > 6)) (type: boolean) - Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE + predicate: (key > 6) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger((UDFToDouble(key) / 2.0D)) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator condition map: @@ -411,14 +1258,14 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col1 - Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -432,14 +1279,497 @@ STAGE PLANS: PREHOOK: query: select `v0`.value from `v0` join `masking_test_view` on `v0`.key = `masking_test_view`.key PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n5 PREHOOK: Input: default@masking_test_view PREHOOK: Input: default@v0 #### A masked pattern was here #### POSTHOOK: query: select `v0`.value from `v0` join `masking_test_view` on `v0`.key = `masking_test_view`.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n5 POSTHOOK: Input: default@masking_test_view POSTHOOK: Input: default@v0 #### A masked pattern was here #### -4_lav +val_4 +val_4 +val_5 +val_5 +val_5 +val_5 +val_5 +val_5 +val_8 +val_9 +val_9 +val_9 +val_10 +val_12 +val_12 +val_12 +val_12 +val_15 +val_15 +val_17 +val_17 +val_17 +val_17 +val_18 +val_18 +val_18 +val_18 +val_20 +val_26 +val_26 +val_27 +val_28 +val_33 +val_33 +val_33 +val_34 +val_35 +val_35 +val_35 +val_35 +val_35 +val_35 +val_35 +val_35 +val_35 +val_37 +val_37 +val_41 +val_41 +val_41 +val_42 +val_42 +val_42 +val_42 +val_42 +val_42 +val_43 +val_43 +val_47 +val_47 +val_51 +val_51 +val_51 +val_51 +val_57 +val_58 +val_58 +val_64 +val_64 +val_64 +val_64 +val_64 +val_65 +val_66 +val_67 +val_67 +val_67 +val_67 +val_69 +val_69 +val_69 +val_69 +val_72 +val_72 +val_74 +val_74 +val_76 +val_76 +val_76 +val_76 +val_76 +val_76 +val_77 +val_78 +val_78 +val_80 +val_82 +val_82 +val_82 +val_82 +val_83 +val_83 +val_83 +val_83 +val_83 +val_83 +val_83 +val_83 +val_84 +val_84 +val_84 +val_84 +val_84 +val_84 +val_84 +val_84 +val_84 +val_84 +val_85 +val_86 +val_86 +val_87 +val_87 +val_87 +val_87 +val_90 +val_90 +val_90 +val_90 +val_90 +val_90 +val_95 +val_95 +val_95 +val_95 +val_95 +val_95 +val_96 +val_96 +val_96 +val_96 +val_97 +val_97 +val_97 +val_97 +val_97 +val_97 +val_98 +val_98 +val_98 +val_98 +val_98 +val_98 +val_100 +val_100 +val_100 +val_100 +val_100 +val_100 +val_103 +val_103 +val_103 +val_103 +val_104 +val_104 +val_104 +val_104 +val_104 +val_104 +val_104 +val_104 +val_104 +val_104 +val_111 +val_111 +val_111 +val_113 +val_113 +val_114 +val_114 +val_114 +val_116 +val_116 +val_118 +val_118 +val_118 +val_118 +val_119 +val_119 +val_119 +val_119 +val_119 +val_119 +val_119 +val_119 +val_119 +val_119 +val_119 +val_119 +val_120 +val_120 +val_126 +val_128 +val_128 +val_128 +val_128 +val_128 +val_128 +val_128 +val_128 +val_128 +val_129 +val_129 +val_131 +val_131 +val_133 +val_136 +val_136 +val_136 +val_136 +val_136 +val_137 +val_137 +val_137 +val_137 +val_138 +val_138 +val_138 +val_138 +val_138 +val_138 +val_138 +val_138 +val_138 +val_138 +val_138 +val_138 +val_138 +val_138 +val_138 +val_138 +val_143 +val_143 +val_145 +val_146 +val_146 +val_149 +val_149 +val_149 +val_149 +val_149 +val_149 +val_152 +val_152 +val_153 +val_153 +val_153 +val_155 +val_155 +val_155 +val_155 +val_157 +val_158 +val_158 +val_158 +val_158 +val_158 +val_160 +val_160 +val_162 +val_162 +val_163 +val_163 +val_163 +val_165 +val_165 +val_165 +val_165 +val_166 +val_166 +val_166 +val_167 +val_167 +val_167 +val_168 +val_169 +val_169 +val_169 +val_169 +val_169 +val_169 +val_169 +val_169 +val_170 +val_172 +val_172 +val_172 +val_172 +val_172 +val_172 +val_174 +val_174 +val_174 +val_174 +val_174 +val_174 +val_174 +val_174 +val_174 +val_174 +val_175 +val_175 +val_176 +val_176 +val_176 +val_176 +val_178 +val_180 +val_181 +val_183 +val_183 +val_183 +val_186 +val_187 +val_187 +val_187 +val_187 +val_187 +val_187 +val_189 +val_189 +val_191 +val_191 +val_191 +val_191 +val_192 +val_192 +val_192 +val_193 +val_193 +val_193 +val_194 +val_196 +val_196 +val_197 +val_197 +val_197 +val_197 +val_197 +val_197 +val_199 +val_199 +val_199 +val_199 +val_199 +val_199 +val_200 +val_200 +val_200 +val_200 +val_200 +val_200 +val_200 +val_200 +val_200 +val_200 +val_200 +val_200 +val_201 +val_201 +val_201 +val_201 +val_202 +val_202 +val_203 +val_203 +val_203 +val_203 +val_203 +val_203 +val_203 +val_203 +val_203 +val_203 +val_205 +val_205 +val_207 +val_207 +val_207 +val_207 +val_208 +val_208 +val_208 +val_208 +val_208 +val_208 +val_208 +val_208 +val_208 +val_209 +val_209 +val_209 +val_209 +val_213 +val_213 +val_214 +val_214 +val_216 +val_216 +val_217 +val_217 +val_218 +val_218 +val_219 +val_219 +val_219 +val_219 +val_219 +val_219 +val_219 +val_219 +val_219 +val_219 +val_221 +val_221 +val_222 +val_223 +val_223 +val_224 +val_224 +val_224 +val_224 +val_226 +val_226 +val_228 +val_229 +val_229 +val_229 +val_229 +val_229 +val_229 +val_229 +val_229 +val_230 +val_230 +val_230 +val_230 +val_230 +val_233 +val_233 +val_233 +val_233 +val_233 +val_233 +val_233 +val_233 +val_235 +val_237 +val_237 +val_238 +val_238 +val_239 +val_239 +val_239 +val_239 +val_239 +val_239 +val_241 +val_241 +val_242 +val_242 +val_242 +val_242 +val_244 +val_244 +val_244 +val_244 +val_247 +val_247 +val_248 +val_248 +val_249 +val_249 +val_249 diff --git a/ql/src/test/results/clientpositive/masking_1_newdb.q.out b/ql/src/test/results/clientpositive/masking_1_newdb.q.out index 4f73983dbf..fa322120f8 100644 --- a/ql/src/test/results/clientpositive/masking_1_newdb.q.out +++ b/ql/src/test/results/clientpositive/masking_1_newdb.q.out @@ -10,78 +10,556 @@ PREHOOK: Input: database:newdb POSTHOOK: query: use newdb POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:newdb -PREHOOK: query: create table masking_test as select cast(key as int) as key, value from default.src +PREHOOK: query: create table masking_test_n12 as select cast(key as int) as key, value from default.src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:newdb -PREHOOK: Output: newdb@masking_test -POSTHOOK: query: create table masking_test as select cast(key as int) as key, value from default.src +PREHOOK: Output: newdb@masking_test_n12 +POSTHOOK: query: create table masking_test_n12 as select cast(key as int) as key, value from default.src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:newdb -POSTHOOK: Output: newdb@masking_test -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: newdb@masking_test_n12 +POSTHOOK: Lineage: masking_test_n12.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n12.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: use default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: explain select * from newdb.masking_test +PREHOOK: query: explain select * from newdb.masking_test_n12 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from newdb.masking_test +POSTHOOK: query: explain select * from newdb.masking_test_n12 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), reverse(value) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: masking_test_n12 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink -PREHOOK: query: select * from newdb.masking_test +PREHOOK: query: select * from newdb.masking_test_n12 PREHOOK: type: QUERY -PREHOOK: Input: newdb@masking_test +PREHOOK: Input: newdb@masking_test_n12 #### A masked pattern was here #### -POSTHOOK: query: select * from newdb.masking_test +POSTHOOK: query: select * from newdb.masking_test_n12 POSTHOOK: type: QUERY -POSTHOOK: Input: newdb@masking_test +POSTHOOK: Input: newdb@masking_test_n12 #### A masked pattern was here #### -0 0_lav -4 4_lav -8 8_lav -0 0_lav -0 0_lav -2 2_lav -PREHOOK: query: explain select * from newdb.masking_test where key > 0 +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +0 val_0 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +0 val_0 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +0 val_0 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: explain select * from newdb.masking_test_n12 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from newdb.masking_test where key > 0 +POSTHOOK: query: explain select * from newdb.masking_test_n12 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -92,18 +570,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n12 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (key > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), reverse(value) (type: string) + expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -116,14 +594,508 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from newdb.masking_test where key > 0 +PREHOOK: query: select * from newdb.masking_test_n12 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: newdb@masking_test +PREHOOK: Input: newdb@masking_test_n12 #### A masked pattern was here #### -POSTHOOK: query: select * from newdb.masking_test where key > 0 +POSTHOOK: query: select * from newdb.masking_test_n12 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: newdb@masking_test +POSTHOOK: Input: newdb@masking_test_n12 #### A masked pattern was here #### -4 4_lav -8 8_lav -2 2_lav +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 diff --git a/ql/src/test/results/clientpositive/masking_2.q.out b/ql/src/test/results/clientpositive/masking_2.q.out index 907343e109..2d2e69643e 100644 --- a/ql/src/test/results/clientpositive/masking_2.q.out +++ b/ql/src/test/results/clientpositive/masking_2.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: create view masking_test as select cast(key as int) as key, value from src +PREHOOK: query: create view masking_test_n1 as select cast(key as int) as key, value from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create view masking_test as select cast(key as int) as key, value from src +PREHOOK: Output: default@masking_test_n1 +POSTHOOK: query: create view masking_test_n1 as select cast(key as int) as key, value from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain select * from masking_test +POSTHOOK: Output: default@masking_test_n1 +POSTHOOK: Lineage: masking_test_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select * from masking_test_n1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test +POSTHOOK: query: explain select * from masking_test_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -27,20 +27,17 @@ STAGE PLANS: properties: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(key) (type: int), reverse(value) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Select Operator + expressions: UDFToInteger(key) (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Execution mode: vectorized Stage: Stage-0 @@ -49,25 +46,519 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test +PREHOOK: query: select * from masking_test_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n1 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test +POSTHOOK: query: select * from masking_test_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n1 POSTHOOK: Input: default@src #### A masked pattern was here #### -0 0_lav -4 4_lav -8 8_lav -0 0_lav -0 0_lav -2 2_lav -PREHOOK: query: explain select * from masking_test where key > 0 +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +0 val_0 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +0 val_0 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +0 val_0 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: explain select * from masking_test_n1 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test where key > 0 +POSTHOOK: query: explain select * from masking_test_n1 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -83,15 +574,15 @@ STAGE PLANS: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (UDFToInteger(key) > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(key) (type: int), reverse(value) (type: string) + expressions: UDFToInteger(key) (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -104,22 +595,516 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test where key > 0 +PREHOOK: query: select * from masking_test_n1 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n1 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test where key > 0 +POSTHOOK: query: select * from masking_test_n1 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n1 POSTHOOK: Input: default@src #### A masked pattern was here #### -4 4_lav -8 8_lav -2 2_lav -PREHOOK: query: explain select * from src a join masking_test b on a.key = b.value where b.key > 0 +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: explain select * from src a join masking_test_n1 b on a.key = b.value where b.key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from src a join masking_test b on a.key = b.value where b.key > 0 +POSTHOOK: query: explain select * from src a join masking_test_n1 b on a.key = b.value where b.key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -151,17 +1136,17 @@ STAGE PLANS: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0) and reverse(value) is not null) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: ((UDFToInteger(key) > 0) and value is not null) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(key) (type: int), reverse(value) (type: string) + expressions: UDFToInteger(key) (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: string) sort order: + Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int) Reduce Operator Tree: Join Operator @@ -186,9 +1171,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select a.*, b.key from masking_test a join masking_test b on a.key = b.value where b.key > 0 +PREHOOK: query: explain select a.*, b.key from masking_test_n1 a join masking_test_n1 b on a.key = b.value where b.key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select a.*, b.key from masking_test a join masking_test b on a.key = b.value where b.key > 0 +POSTHOOK: query: explain select a.*, b.key from masking_test_n1 a join masking_test_n1 b on a.key = b.value where b.key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -204,17 +1189,17 @@ STAGE PLANS: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + predicate: key is not null (type: boolean) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(key) (type: int), reverse(value) (type: string) + expressions: UDFToInteger(key) (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: UDFToDouble(_col0) (type: double) sort order: + Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string) TableScan alias: src @@ -222,17 +1207,17 @@ STAGE PLANS: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0) and reverse(value) is not null) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: ((UDFToInteger(key) > 0) and value is not null) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(key) (type: int), reverse(value) (type: string) + expressions: UDFToInteger(key) (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: UDFToDouble(_col1) (type: double) sort order: + Map-reduce partition columns: UDFToDouble(_col1) (type: double) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int) Reduce Operator Tree: Join Operator @@ -242,10 +1227,10 @@ STAGE PLANS: 0 UDFToDouble(_col0) (type: double) 1 UDFToDouble(_col1) (type: double) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -257,9 +1242,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from masking_test a union select b.* from masking_test b where b.key > 0 +PREHOOK: query: explain select * from masking_test_n1 a union select b.* from masking_test_n1 b where b.key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test a union select b.* from masking_test b where b.key > 0 +POSTHOOK: query: explain select * from masking_test_n1 a union select b.* from masking_test_n1 b where b.key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -274,58 +1259,55 @@ STAGE PLANS: properties: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(key) (type: int), reverse(value) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: int), _col1 (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(key) (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: int), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: string) + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE TableScan alias: src properties: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (UDFToInteger(key) > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(key) (type: int), reverse(value) (type: string) + expressions: UDFToInteger(key) (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Union - Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: int), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: int), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 583 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 583 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/masking_3.q.out b/ql/src/test/results/clientpositive/masking_3.q.out index bb74c667cb..8553b08335 100644 --- a/ql/src/test/results/clientpositive/masking_3.q.out +++ b/ql/src/test/results/clientpositive/masking_3.q.out @@ -1,796 +1,574 @@ -PREHOOK: query: create table masking_test_subq as select cast(key as int) as key, value from src +PREHOOK: query: create table masking_test_subq_n3 as select cast(key as int) as key, value from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test_subq -POSTHOOK: query: create table masking_test_subq as select cast(key as int) as key, value from src +PREHOOK: Output: default@masking_test_subq_n3 +POSTHOOK: query: create table masking_test_subq_n3 as select cast(key as int) as key, value from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test_subq -POSTHOOK: Lineage: masking_test_subq.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test_subq.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain select * from masking_test_subq +POSTHOOK: Output: default@masking_test_subq_n3 +POSTHOOK: Lineage: masking_test_subq_n3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_subq_n3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select * from masking_test_subq_n3 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test_subq +POSTHOOK: query: explain select * from masking_test_subq_n3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-2 depends on stages: Stage-3 - Stage-1 depends on stages: Stage-2 - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test_subq - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: key (type: int) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(_col0) (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: string), _col1 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test_subq - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col0 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col0 (type: int) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col1 (type: int) - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double), _col0 (type: int) - 1 UDFToDouble(_col0) (type: double), _col1 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: masking_test_subq_n3 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink -PREHOOK: query: select * from masking_test_subq +PREHOOK: query: select * from masking_test_subq_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test_subq -PREHOOK: Input: default@src +PREHOOK: Input: default@masking_test_subq_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test_subq +POSTHOOK: query: select * from masking_test_subq_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test_subq -POSTHOOK: Input: default@src +POSTHOOK: Input: default@masking_test_subq_n3 #### A masked pattern was here #### -0 val_0 -0 val_0 -0 val_0 -2 val_2 -4 val_4 -5 val_5 -5 val_5 -5 val_5 -8 val_8 -9 val_9 -10 val_10 -11 val_11 -12 val_12 -12 val_12 -15 val_15 -15 val_15 -17 val_17 -18 val_18 -18 val_18 -19 val_19 -20 val_20 -24 val_24 -24 val_24 -26 val_26 -26 val_26 -27 val_27 -28 val_28 -30 val_30 -33 val_33 -34 val_34 -35 val_35 -35 val_35 -35 val_35 -37 val_37 -37 val_37 -41 val_41 -42 val_42 -42 val_42 -43 val_43 -44 val_44 -47 val_47 -51 val_51 -51 val_51 -53 val_53 -54 val_54 -57 val_57 -58 val_58 -58 val_58 -64 val_64 -65 val_65 -66 val_66 -67 val_67 -67 val_67 -69 val_69 -70 val_70 -70 val_70 -70 val_70 -72 val_72 -72 val_72 -74 val_74 -76 val_76 -76 val_76 -77 val_77 -78 val_78 -80 val_80 -82 val_82 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -85 val_85 +238 val_238 86 val_86 -87 val_87 -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 98 val_98 -100 val_100 -100 val_100 -103 val_103 -103 val_103 -104 val_104 -104 val_104 -105 val_105 -111 val_111 -113 val_113 -113 val_113 -114 val_114 -116 val_116 -118 val_118 -118 val_118 -119 val_119 -119 val_119 -119 val_119 -120 val_120 -120 val_120 -125 val_125 -125 val_125 -126 val_126 -128 val_128 -128 val_128 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 128 val_128 -129 val_129 -129 val_129 -131 val_131 -133 val_133 -134 val_134 -134 val_134 -136 val_136 -137 val_137 -137 val_137 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -143 val_143 -145 val_145 -146 val_146 +213 val_213 146 val_146 -149 val_149 -149 val_149 -150 val_150 -152 val_152 +406 val_406 +429 val_429 +374 val_374 152 val_152 -153 val_153 -155 val_155 -156 val_156 -157 val_157 -158 val_158 -160 val_160 -162 val_162 -163 val_163 -164 val_164 -164 val_164 -165 val_165 -165 val_165 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 166 val_166 -167 val_167 -167 val_167 -167 val_167 -168 val_168 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -170 val_170 -172 val_172 -172 val_172 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 174 val_174 -175 val_175 -175 val_175 -176 val_176 -176 val_176 -177 val_177 -178 val_178 -179 val_179 -179 val_179 -180 val_180 -181 val_181 -183 val_183 -186 val_186 -187 val_187 -187 val_187 -187 val_187 -189 val_189 -190 val_190 -191 val_191 -191 val_191 -192 val_192 -193 val_193 -193 val_193 -193 val_193 -194 val_194 -195 val_195 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 195 val_195 -196 val_196 -197 val_197 -197 val_197 -199 val_199 -199 val_199 -199 val_199 -200 val_200 -200 val_200 -201 val_201 -202 val_202 -203 val_203 +475 val_475 +17 val_17 +113 val_113 +155 val_155 203 val_203 +339 val_339 +0 val_0 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 205 val_205 -205 val_205 -207 val_207 -207 val_207 -208 val_208 -208 val_208 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 208 val_208 -209 val_209 -209 val_209 -213 val_213 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 213 val_213 -214 val_214 216 val_216 -216 val_216 -217 val_217 -217 val_217 -218 val_218 -219 val_219 -219 val_219 -221 val_221 -221 val_221 -222 val_222 -223 val_223 -223 val_223 -224 val_224 -224 val_224 -226 val_226 -228 val_228 -229 val_229 -229 val_229 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -233 val_233 -233 val_233 -235 val_235 -237 val_237 -237 val_237 -238 val_238 -238 val_238 -239 val_239 -239 val_239 -241 val_241 -242 val_242 -242 val_242 -244 val_244 -247 val_247 -248 val_248 -249 val_249 -252 val_252 -255 val_255 -255 val_255 -256 val_256 -256 val_256 -257 val_257 -258 val_258 -260 val_260 -262 val_262 -263 val_263 -265 val_265 -265 val_265 -266 val_266 -272 val_272 -272 val_272 -273 val_273 -273 val_273 -273 val_273 -274 val_274 -275 val_275 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -278 val_278 +430 val_430 278 val_278 -280 val_280 -280 val_280 -281 val_281 -281 val_281 -282 val_282 -282 val_282 -283 val_283 -284 val_284 -285 val_285 -286 val_286 -287 val_287 -288 val_288 -288 val_288 +176 val_176 289 val_289 -291 val_291 -292 val_292 -296 val_296 -298 val_298 -298 val_298 -298 val_298 -302 val_302 -305 val_305 -306 val_306 -307 val_307 -307 val_307 -308 val_308 -309 val_309 -309 val_309 -310 val_310 -311 val_311 -311 val_311 -311 val_311 -315 val_315 -316 val_316 -316 val_316 -316 val_316 -317 val_317 -317 val_317 -318 val_318 -318 val_318 +221 val_221 +65 val_65 318 val_318 -321 val_321 -321 val_321 -322 val_322 -322 val_322 -323 val_323 -325 val_325 -325 val_325 -327 val_327 -327 val_327 -327 val_327 -331 val_331 -331 val_331 332 val_332 -333 val_333 -333 val_333 -335 val_335 -336 val_336 -338 val_338 -339 val_339 -341 val_341 -342 val_342 -342 val_342 -344 val_344 -344 val_344 -345 val_345 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -351 val_351 -353 val_353 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 353 val_353 -356 val_356 -360 val_360 -362 val_362 -364 val_364 -365 val_365 -366 val_366 -367 val_367 -367 val_367 -368 val_368 -369 val_369 -369 val_369 -369 val_369 373 val_373 -374 val_374 -375 val_375 -377 val_377 -378 val_378 -379 val_379 -382 val_382 -382 val_382 -384 val_384 -384 val_384 -384 val_384 -386 val_386 -389 val_389 -392 val_392 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +0 val_0 +322 val_322 +197 val_197 +468 val_468 393 val_393 -394 val_394 -395 val_395 -395 val_395 -396 val_396 -396 val_396 -396 val_396 -397 val_397 -397 val_397 -399 val_399 -399 val_399 -400 val_400 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -402 val_402 -403 val_403 -403 val_403 -403 val_403 -404 val_404 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 404 val_404 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -407 val_407 -409 val_409 -409 val_409 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 409 val_409 -411 val_411 -413 val_413 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 413 val_413 -414 val_414 -414 val_414 -417 val_417 -417 val_417 -417 val_417 -418 val_418 +85 val_85 +77 val_77 +0 val_0 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 419 val_419 -421 val_421 -424 val_424 -424 val_424 -427 val_427 -429 val_429 -429 val_429 -430 val_430 -430 val_430 -430 val_430 -431 val_431 -431 val_431 -431 val_431 -432 val_432 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 435 val_435 -436 val_436 -437 val_437 -438 val_438 -438 val_438 -438 val_438 -439 val_439 -439 val_439 -443 val_443 -444 val_444 -446 val_446 -448 val_448 -449 val_449 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 452 val_452 -453 val_453 -454 val_454 -454 val_454 -454 val_454 -455 val_455 -457 val_457 -458 val_458 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 458 val_458 -459 val_459 -459 val_459 -460 val_460 -462 val_462 -462 val_462 -463 val_463 -463 val_463 -466 val_466 -466 val_466 -466 val_466 -467 val_467 -468 val_468 -468 val_468 -468 val_468 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -470 val_470 -472 val_472 -475 val_475 -477 val_477 -478 val_478 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 478 val_478 -479 val_479 -480 val_480 -480 val_480 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 480 val_480 -481 val_481 -482 val_482 -483 val_483 -484 val_484 -485 val_485 -487 val_487 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -490 val_490 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 491 val_491 -492 val_492 -492 val_492 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 493 val_493 -494 val_494 -495 val_495 -496 val_496 -497 val_497 -498 val_498 -498 val_498 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 498 val_498 -PREHOOK: query: explain select * from masking_test_subq where key > 0 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: explain select * from masking_test_subq_n3 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test_subq where key > 0 +POSTHOOK: query: explain select * from masking_test_subq_n3 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-2 depends on stages: Stage-3 - Stage-1 depends on stages: Stage-2 + Stage-1 is a root stage Stage-0 depends on stages: Stage-1 STAGE PLANS: - Stage: Stage-3 + Stage: Stage-1 Map Reduce Map Operator Tree: TableScan - alias: masking_test_subq + alias: masking_test_subq_n3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key > 0) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: key (type: int) - mode: hash - outputColumnNames: _col0 + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) + File Output Operator + compressed: false Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(_col0) (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: string), _col1 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test_subq - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (key > 0) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col0 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col0 (type: int) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col1 (type: int) - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double), _col0 (type: int) - 1 UDFToDouble(_col0) (type: double), _col1 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Stage: Stage-0 Fetch Operator @@ -798,609 +576,525 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test_subq where key > 0 +PREHOOK: query: select * from masking_test_subq_n3 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test_subq -PREHOOK: Input: default@src +PREHOOK: Input: default@masking_test_subq_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test_subq where key > 0 +POSTHOOK: query: select * from masking_test_subq_n3 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test_subq -POSTHOOK: Input: default@src +POSTHOOK: Input: default@masking_test_subq_n3 #### A masked pattern was here #### -2 val_2 -4 val_4 -5 val_5 -5 val_5 -5 val_5 -8 val_8 -9 val_9 -10 val_10 -11 val_11 -12 val_12 -12 val_12 -15 val_15 -15 val_15 -17 val_17 -18 val_18 -18 val_18 -19 val_19 -20 val_20 -24 val_24 -24 val_24 -26 val_26 -26 val_26 -27 val_27 -28 val_28 -30 val_30 -33 val_33 -34 val_34 -35 val_35 -35 val_35 -35 val_35 -37 val_37 -37 val_37 -41 val_41 -42 val_42 -42 val_42 -43 val_43 -44 val_44 -47 val_47 -51 val_51 -51 val_51 -53 val_53 -54 val_54 -57 val_57 -58 val_58 -58 val_58 -64 val_64 -65 val_65 -66 val_66 -67 val_67 -67 val_67 -69 val_69 -70 val_70 -70 val_70 -70 val_70 -72 val_72 -72 val_72 -74 val_74 -76 val_76 -76 val_76 -77 val_77 -78 val_78 -80 val_80 -82 val_82 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -85 val_85 +238 val_238 86 val_86 -87 val_87 -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 98 val_98 -100 val_100 -100 val_100 -103 val_103 -103 val_103 -104 val_104 -104 val_104 -105 val_105 -111 val_111 -113 val_113 -113 val_113 -114 val_114 -116 val_116 -118 val_118 -118 val_118 -119 val_119 -119 val_119 -119 val_119 -120 val_120 -120 val_120 -125 val_125 -125 val_125 -126 val_126 -128 val_128 -128 val_128 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 128 val_128 -129 val_129 -129 val_129 -131 val_131 -133 val_133 -134 val_134 -134 val_134 -136 val_136 -137 val_137 -137 val_137 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -143 val_143 -145 val_145 -146 val_146 +213 val_213 146 val_146 -149 val_149 -149 val_149 -150 val_150 -152 val_152 +406 val_406 +429 val_429 +374 val_374 152 val_152 -153 val_153 -155 val_155 -156 val_156 -157 val_157 -158 val_158 -160 val_160 -162 val_162 -163 val_163 -164 val_164 -164 val_164 -165 val_165 -165 val_165 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 166 val_166 -167 val_167 -167 val_167 -167 val_167 -168 val_168 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -170 val_170 -172 val_172 -172 val_172 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 174 val_174 -175 val_175 -175 val_175 -176 val_176 -176 val_176 -177 val_177 -178 val_178 -179 val_179 -179 val_179 -180 val_180 -181 val_181 -183 val_183 -186 val_186 -187 val_187 -187 val_187 -187 val_187 -189 val_189 -190 val_190 -191 val_191 -191 val_191 -192 val_192 -193 val_193 -193 val_193 -193 val_193 -194 val_194 -195 val_195 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 195 val_195 -196 val_196 -197 val_197 -197 val_197 -199 val_199 -199 val_199 -199 val_199 -200 val_200 -200 val_200 -201 val_201 -202 val_202 -203 val_203 +475 val_475 +17 val_17 +113 val_113 +155 val_155 203 val_203 +339 val_339 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 205 val_205 -205 val_205 -207 val_207 -207 val_207 -208 val_208 -208 val_208 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 208 val_208 -209 val_209 -209 val_209 -213 val_213 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 213 val_213 -214 val_214 216 val_216 -216 val_216 -217 val_217 -217 val_217 -218 val_218 -219 val_219 -219 val_219 -221 val_221 +430 val_430 +278 val_278 +176 val_176 +289 val_289 221 val_221 -222 val_222 -223 val_223 -223 val_223 -224 val_224 -224 val_224 -226 val_226 -228 val_228 -229 val_229 -229 val_229 -230 val_230 -230 val_230 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 230 val_230 -233 val_233 -233 val_233 -235 val_235 -237 val_237 -237 val_237 -238 val_238 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 238 val_238 -239 val_239 -239 val_239 -241 val_241 -242 val_242 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 242 val_242 -244 val_244 -247 val_247 -248 val_248 -249 val_249 -252 val_252 -255 val_255 -255 val_255 -256 val_256 -256 val_256 -257 val_257 -258 val_258 -260 val_260 -262 val_262 -263 val_263 -265 val_265 -265 val_265 -266 val_266 -272 val_272 +369 val_369 +392 val_392 272 val_272 -273 val_273 -273 val_273 -273 val_273 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 274 val_274 -275 val_275 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -278 val_278 -278 val_278 -280 val_280 -280 val_280 -281 val_281 -281 val_281 -282 val_282 -282 val_282 -283 val_283 -284 val_284 -285 val_285 -286 val_286 -287 val_287 -288 val_288 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 288 val_288 -289 val_289 -291 val_291 -292 val_292 -296 val_296 -298 val_298 -298 val_298 -298 val_298 -302 val_302 -305 val_305 -306 val_306 -307 val_307 -307 val_307 -308 val_308 -309 val_309 -309 val_309 -310 val_310 -311 val_311 -311 val_311 -311 val_311 -315 val_315 -316 val_316 -316 val_316 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 316 val_316 -317 val_317 -317 val_317 -318 val_318 -318 val_318 -318 val_318 -321 val_321 -321 val_321 -322 val_322 -322 val_322 -323 val_323 -325 val_325 -325 val_325 -327 val_327 -327 val_327 -327 val_327 -331 val_331 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 331 val_331 -332 val_332 -333 val_333 -333 val_333 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 335 val_335 -336 val_336 -338 val_338 -339 val_339 -341 val_341 -342 val_342 -342 val_342 -344 val_344 -344 val_344 -345 val_345 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -351 val_351 -353 val_353 -353 val_353 -356 val_356 -360 val_360 -362 val_362 -364 val_364 -365 val_365 +104 val_104 +466 val_466 366 val_366 -367 val_367 -367 val_367 -368 val_368 -369 val_369 -369 val_369 -369 val_369 -373 val_373 -374 val_374 -375 val_375 -377 val_377 -378 val_378 -379 val_379 -382 val_382 -382 val_382 -384 val_384 -384 val_384 -384 val_384 -386 val_386 -389 val_389 -392 val_392 -393 val_393 -394 val_394 -395 val_395 -395 val_395 -396 val_396 -396 val_396 -396 val_396 -397 val_397 -397 val_397 -399 val_399 -399 val_399 -400 val_400 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -402 val_402 -403 val_403 -403 val_403 +175 val_175 403 val_403 -404 val_404 -404 val_404 -406 val_406 -406 val_406 -406 val_406 +483 val_483 +53 val_53 +105 val_105 +257 val_257 406 val_406 -407 val_407 409 val_409 -409 val_409 -409 val_409 -411 val_411 -413 val_413 -413 val_413 -414 val_414 -414 val_414 -417 val_417 -417 val_417 -417 val_417 -418 val_418 -419 val_419 -421 val_421 -424 val_424 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 424 val_424 -427 val_427 -429 val_429 -429 val_429 -430 val_430 -430 val_430 -430 val_430 -431 val_431 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 431 val_431 -432 val_432 -435 val_435 -436 val_436 -437 val_437 -438 val_438 -438 val_438 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 439 val_439 -439 val_439 -443 val_443 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 444 val_444 -446 val_446 -448 val_448 -449 val_449 -452 val_452 -453 val_453 -454 val_454 -454 val_454 -454 val_454 -455 val_455 -457 val_457 -458 val_458 -458 val_458 -459 val_459 -459 val_459 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 462 val_462 -463 val_463 -463 val_463 -466 val_466 -466 val_466 -466 val_466 -467 val_467 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -470 val_470 -472 val_472 -475 val_475 -477 val_477 -478 val_478 -478 val_478 -479 val_479 -480 val_480 -480 val_480 -480 val_480 -481 val_481 -482 val_482 -483 val_483 -484 val_484 -485 val_485 -487 val_487 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -490 val_490 -491 val_491 492 val_492 -492 val_492 -493 val_493 -494 val_494 -495 val_495 -496 val_496 -497 val_497 -498 val_498 -498 val_498 +100 val_100 +298 val_298 +9 val_9 +341 val_341 498 val_498 -PREHOOK: query: explain select key from masking_test_subq where key > 0 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: explain select key from masking_test_subq_n3 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select key from masking_test_subq where key > 0 +POSTHOOK: query: explain select key from masking_test_subq_n3 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-2 depends on stages: Stage-3 - Stage-1 depends on stages: Stage-2 + Stage-1 is a root stage Stage-0 depends on stages: Stage-1 STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test_subq - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (key > 0) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: key (type: int) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(_col0) (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: string), _col1 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-1 Map Reduce Map Operator Tree: TableScan - alias: masking_test_subq + alias: masking_test_subq_n3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key > 0) (type: boolean) @@ -1409,33 +1103,14 @@ STAGE PLANS: expressions: key (type: int) outputColumnNames: _col0 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col0 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col0 (type: int) + File Output Operator + compressed: false Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col1 (type: int) - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double), _col0 (type: int) - 1 UDFToDouble(_col0) (type: double), _col1 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized Stage: Stage-0 Fetch Operator @@ -1443,649 +1118,541 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select key from masking_test_subq where key > 0 +PREHOOK: query: select key from masking_test_subq_n3 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test_subq -PREHOOK: Input: default@src +PREHOOK: Input: default@masking_test_subq_n3 #### A masked pattern was here #### -POSTHOOK: query: select key from masking_test_subq where key > 0 +POSTHOOK: query: select key from masking_test_subq_n3 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test_subq -POSTHOOK: Input: default@src +POSTHOOK: Input: default@masking_test_subq_n3 #### A masked pattern was here #### -2 -4 -5 -5 -5 -8 -9 -10 -11 -12 -12 -15 -15 -17 -18 -18 -19 -20 -24 -24 -26 -26 -27 -28 -30 -33 -34 -35 -35 -35 -37 -37 -41 -42 -42 -43 -44 -47 -51 -51 -53 -54 -57 -58 -58 -64 -65 -66 -67 -67 -69 -70 -70 -70 -72 -72 -74 -76 -76 -77 -78 -80 -82 -83 -83 -84 -84 -85 +238 86 -87 -90 -90 -90 -92 -95 -95 -96 -97 -97 -98 +311 +27 +165 +409 +255 +278 98 -100 -100 -103 -103 -104 -104 -105 -111 -113 -113 -114 -116 -118 -118 -119 -119 -119 -120 -120 -125 -125 -126 -128 -128 +484 +265 +193 +401 +150 +273 +224 +369 +66 128 -129 -129 -131 -133 -134 -134 -136 -137 -137 -138 -138 -138 -138 -143 -145 -146 +213 146 -149 -149 -150 -152 +406 +429 +374 152 -153 -155 -156 -157 -158 -160 -162 -163 -164 -164 -165 -165 +469 +145 +495 +37 +327 +281 +277 +209 +15 +82 +403 166 -167 -167 -167 -168 -169 -169 -169 -169 -170 -172 -172 -174 -174 -175 -175 -176 -176 -177 -178 -179 -179 -180 -181 -183 -186 -187 -187 -187 -189 -190 -191 -191 -192 -193 -193 +417 +430 +252 +292 +219 +287 +153 193 -194 -195 -195 -196 -197 -197 -199 -199 -199 -200 -200 -201 -202 -203 -203 -205 -205 -207 +338 +446 +459 +394 +237 +482 +174 +413 +494 207 +199 +466 208 -208 -208 -209 -209 -213 -213 -214 -216 -216 -217 -217 -218 -219 -219 -221 -221 -222 -223 -223 -224 -224 -226 -228 -229 -229 -230 -230 -230 -230 -230 -233 -233 -235 -237 -237 -238 -238 -239 -239 -241 -242 -242 -244 +174 +399 +396 247 -248 -249 -252 -255 -255 -256 -256 -257 -258 -260 -262 -263 -265 -265 +417 +489 +162 +377 +397 +309 +365 266 -272 -272 -273 -273 -273 -274 -275 -277 -277 -277 -277 -278 -278 -280 +439 +342 +367 +325 +167 +195 +475 +17 +113 +155 +203 +339 +455 +128 +311 +316 +57 +302 +205 +149 +438 +345 +129 +170 +20 +489 +157 +378 +221 +92 +111 +47 +72 +4 280 -281 -281 -282 -282 -283 -284 -285 +35 +427 +277 +208 +356 +399 +169 +382 +498 +125 +386 +437 +469 +192 286 -287 -288 -288 +187 +176 +54 +459 +51 +138 +103 +239 +213 +216 +430 +278 +176 289 -291 -292 -296 -298 -298 -298 -302 -305 -306 -307 -307 -308 -309 -309 -310 -311 -311 -311 -315 -316 -316 -316 -317 -317 -318 -318 +221 +65 318 -321 -321 -322 -322 -323 -325 -325 -327 -327 -327 -331 -331 332 +311 +275 +137 +241 +83 333 -333 -335 -336 -338 -339 -341 -342 -342 -344 -344 -345 -348 -348 -348 -348 -348 -351 -353 +180 +284 +12 +230 +181 +67 +260 +404 +384 +489 353 -356 -360 -362 -364 -365 -366 -367 -367 -368 -369 -369 -369 373 -374 -375 -377 -378 -379 -382 -382 -384 -384 -384 -386 -389 -392 -393 -394 -395 -395 -396 -396 -396 -397 -397 -399 -399 -400 -401 -401 -401 -401 -401 -402 -403 -403 -403 -404 +272 +138 +217 +84 +348 +466 +58 +8 +411 +230 +208 +348 +24 +463 +431 +179 +172 +42 +129 +158 +119 +496 +322 +197 +468 +393 +454 +100 +298 +199 +191 +418 +96 +26 +165 +327 +230 +205 +120 +131 +51 404 -406 -406 -406 -406 -407 -409 -409 +43 +436 +156 +469 +468 +308 +95 +196 +288 +481 +457 +98 +282 +197 +187 +318 +318 409 -411 -413 +470 +137 +369 +316 +169 413 -414 -414 -417 -417 -417 -418 +85 +77 +490 +87 +364 +179 +118 +134 +395 +282 +138 +238 419 -421 -424 -424 -427 -429 -429 -430 -430 -430 -431 -431 -431 -432 +15 +118 +72 +90 +307 +19 435 -436 -437 -438 -438 -438 -439 -439 -443 -444 -446 -448 -449 +10 +277 +273 +306 +224 +309 +389 +327 +242 +369 +392 +272 +331 +401 +242 452 -453 -454 -454 -454 -455 -457 -458 +177 +226 +5 +497 +402 +396 +317 +395 +58 +35 +336 +95 +11 +168 +34 +229 +233 +143 +472 +322 +498 +160 +195 +42 +321 +430 +119 +489 458 -459 -459 -460 -462 -462 -463 -463 -466 -466 -466 -467 -468 -468 -468 +78 +76 +41 +223 +492 +149 +449 +218 +228 +138 +453 +30 +209 +64 468 -469 -469 -469 -469 -469 -470 -472 -475 -477 -478 -478 -479 -480 -480 -480 -481 -482 -483 -484 +76 +74 +342 +69 +230 +33 +368 +103 +296 +113 +216 +367 +344 +167 +274 +219 +239 485 +116 +223 +256 +263 +70 487 -489 -489 -489 -489 -490 +480 +401 +288 +191 +5 +244 +438 +128 +467 +432 +202 +316 +229 +469 +463 +280 +2 +35 +283 +331 +235 +80 +44 +193 +321 +335 +104 +466 +366 +175 +403 +483 +53 +105 +257 +406 +409 +190 +406 +401 +114 +258 +90 +203 +262 +348 +424 +12 +396 +201 +217 +164 +431 +454 +478 +298 +125 +431 +164 +424 +187 +382 +5 +70 +397 +480 +291 +24 +351 +255 +104 +70 +163 +438 +119 +414 +200 491 -492 -492 +237 +439 +360 +248 +479 +305 +417 +199 +444 +120 +429 +169 +443 +323 +325 +277 +230 +478 +178 +468 +310 +317 +333 493 -494 -495 -496 -497 -498 -498 +460 +207 +249 +265 +480 +83 +136 +353 +172 +214 +462 +233 +406 +133 +175 +189 +454 +375 +401 +421 +407 +384 +256 +26 +134 +67 +384 +379 +18 +462 +492 +100 +298 +9 +341 498 -PREHOOK: query: explain select value from masking_test_subq where key > 0 +146 +458 +362 +186 +285 +348 +167 +18 +273 +183 +281 +344 +97 +469 +315 +84 +28 +37 +448 +152 +348 +307 +194 +414 +477 +222 +126 +90 +169 +403 +400 +200 +97 +PREHOOK: query: explain select value from masking_test_subq_n3 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select value from masking_test_subq where key > 0 +POSTHOOK: query: explain select value from masking_test_subq_n3 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-2 depends on stages: Stage-3 - Stage-1 depends on stages: Stage-2 + Stage-1 is a root stage Stage-0 depends on stages: Stage-1 STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test_subq - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (key > 0) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: key (type: int) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(_col0) (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: string), _col1 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-1 Map Reduce Map Operator Tree: TableScan - alias: masking_test_subq + alias: masking_test_subq_n3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key > 0) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 + expressions: value (type: string) + outputColumnNames: _col0 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col0 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col0 (type: int) + File Output Operator + compressed: false Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col1 (type: int) - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double), _col0 (type: int) - 1 UDFToDouble(_col0) (type: double), _col1 (type: int) - outputColumnNames: _col1 - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col1 (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized Stage: Stage-0 Fetch Operator @@ -2093,610 +1660,525 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select value from masking_test_subq where key > 0 +PREHOOK: query: select value from masking_test_subq_n3 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test_subq -PREHOOK: Input: default@src +PREHOOK: Input: default@masking_test_subq_n3 #### A masked pattern was here #### -POSTHOOK: query: select value from masking_test_subq where key > 0 +POSTHOOK: query: select value from masking_test_subq_n3 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test_subq -POSTHOOK: Input: default@src +POSTHOOK: Input: default@masking_test_subq_n3 #### A masked pattern was here #### -val_2 -val_4 -val_5 -val_5 -val_5 -val_8 -val_9 -val_10 -val_11 -val_12 -val_12 -val_15 -val_15 -val_17 -val_18 -val_18 -val_19 -val_20 -val_24 -val_24 -val_26 -val_26 -val_27 -val_28 -val_30 -val_33 -val_34 -val_35 -val_35 -val_35 -val_37 -val_37 -val_41 -val_42 -val_42 -val_43 -val_44 -val_47 -val_51 -val_51 -val_53 -val_54 -val_57 -val_58 -val_58 -val_64 -val_65 -val_66 -val_67 -val_67 -val_69 -val_70 -val_70 -val_70 -val_72 -val_72 -val_74 -val_76 -val_76 -val_77 -val_78 -val_80 -val_82 -val_83 -val_83 -val_84 -val_84 -val_85 +val_238 val_86 -val_87 -val_90 -val_90 -val_90 -val_92 -val_95 -val_95 -val_96 -val_97 -val_97 -val_98 +val_311 +val_27 +val_165 +val_409 +val_255 +val_278 val_98 -val_100 -val_100 -val_103 -val_103 -val_104 -val_104 -val_105 -val_111 -val_113 -val_113 -val_114 -val_116 -val_118 -val_118 -val_119 -val_119 -val_119 -val_120 -val_120 -val_125 -val_125 -val_126 -val_128 -val_128 +val_484 +val_265 +val_193 +val_401 +val_150 +val_273 +val_224 +val_369 +val_66 val_128 -val_129 -val_129 -val_131 -val_133 -val_134 -val_134 -val_136 -val_137 -val_137 -val_138 -val_138 -val_138 -val_138 -val_143 -val_145 -val_146 +val_213 val_146 -val_149 -val_149 -val_150 -val_152 +val_406 +val_429 +val_374 val_152 -val_153 -val_155 -val_156 -val_157 -val_158 -val_160 -val_162 -val_163 -val_164 -val_164 -val_165 -val_165 +val_469 +val_145 +val_495 +val_37 +val_327 +val_281 +val_277 +val_209 +val_15 +val_82 +val_403 val_166 -val_167 -val_167 -val_167 -val_168 -val_169 -val_169 -val_169 -val_169 -val_170 -val_172 -val_172 +val_417 +val_430 +val_252 +val_292 +val_219 +val_287 +val_153 +val_193 +val_338 +val_446 +val_459 +val_394 +val_237 +val_482 val_174 +val_413 +val_494 +val_207 +val_199 +val_466 +val_208 val_174 -val_175 -val_175 -val_176 -val_176 -val_177 -val_178 -val_179 -val_179 -val_180 -val_181 -val_183 -val_186 -val_187 -val_187 -val_187 -val_189 -val_190 -val_191 -val_191 -val_192 -val_193 -val_193 -val_193 -val_194 -val_195 +val_399 +val_396 +val_247 +val_417 +val_489 +val_162 +val_377 +val_397 +val_309 +val_365 +val_266 +val_439 +val_342 +val_367 +val_325 +val_167 val_195 -val_196 -val_197 -val_197 -val_199 -val_199 -val_199 -val_200 -val_200 -val_201 -val_202 -val_203 +val_475 +val_17 +val_113 +val_155 val_203 +val_339 +val_455 +val_128 +val_311 +val_316 +val_57 +val_302 val_205 -val_205 -val_207 -val_207 -val_208 -val_208 -val_208 -val_209 -val_209 -val_213 +val_149 +val_438 +val_345 +val_129 +val_170 +val_20 +val_489 +val_157 +val_378 +val_221 +val_92 +val_111 +val_47 +val_72 +val_4 +val_280 +val_35 +val_427 +val_277 +val_208 +val_356 +val_399 +val_169 +val_382 +val_498 +val_125 +val_386 +val_437 +val_469 +val_192 +val_286 +val_187 +val_176 +val_54 +val_459 +val_51 +val_138 +val_103 +val_239 val_213 -val_214 val_216 -val_216 -val_217 -val_217 -val_218 -val_219 -val_219 -val_221 +val_430 +val_278 +val_176 +val_289 val_221 -val_222 -val_223 -val_223 -val_224 -val_224 -val_226 -val_228 -val_229 -val_229 -val_230 -val_230 -val_230 -val_230 -val_230 -val_233 -val_233 -val_235 -val_237 -val_237 -val_238 -val_238 -val_239 -val_239 +val_65 +val_318 +val_332 +val_311 +val_275 +val_137 val_241 -val_242 -val_242 -val_244 -val_247 -val_248 -val_249 -val_252 -val_255 -val_255 -val_256 -val_256 -val_257 -val_258 +val_83 +val_333 +val_180 +val_284 +val_12 +val_230 +val_181 +val_67 val_260 -val_262 -val_263 -val_265 -val_265 -val_266 -val_272 +val_404 +val_384 +val_489 +val_353 +val_373 val_272 -val_273 -val_273 -val_273 -val_274 -val_275 -val_277 -val_277 -val_277 -val_277 -val_278 -val_278 -val_280 -val_280 -val_281 -val_281 -val_282 -val_282 -val_283 -val_284 -val_285 -val_286 -val_287 -val_288 -val_288 -val_289 -val_291 -val_292 -val_296 -val_298 -val_298 +val_138 +val_217 +val_84 +val_348 +val_466 +val_58 +val_8 +val_411 +val_230 +val_208 +val_348 +val_24 +val_463 +val_431 +val_179 +val_172 +val_42 +val_129 +val_158 +val_119 +val_496 +val_322 +val_197 +val_468 +val_393 +val_454 +val_100 val_298 -val_302 -val_305 -val_306 -val_307 -val_307 +val_199 +val_191 +val_418 +val_96 +val_26 +val_165 +val_327 +val_230 +val_205 +val_120 +val_131 +val_51 +val_404 +val_43 +val_436 +val_156 +val_469 +val_468 val_308 -val_309 -val_309 -val_310 -val_311 -val_311 -val_311 -val_315 -val_316 -val_316 -val_316 -val_317 -val_317 -val_318 +val_95 +val_196 +val_288 +val_481 +val_457 +val_98 +val_282 +val_197 +val_187 val_318 val_318 -val_321 -val_321 -val_322 -val_322 -val_323 -val_325 -val_325 -val_327 +val_409 +val_470 +val_137 +val_369 +val_316 +val_169 +val_413 +val_85 +val_77 +val_490 +val_87 +val_364 +val_179 +val_118 +val_134 +val_395 +val_282 +val_138 +val_238 +val_419 +val_15 +val_118 +val_72 +val_90 +val_307 +val_19 +val_435 +val_10 +val_277 +val_273 +val_306 +val_224 +val_309 +val_389 val_327 -val_327 -val_331 +val_242 +val_369 +val_392 +val_272 val_331 -val_332 -val_333 -val_333 -val_335 +val_401 +val_242 +val_452 +val_177 +val_226 +val_5 +val_497 +val_402 +val_396 +val_317 +val_395 +val_58 +val_35 val_336 -val_338 -val_339 -val_341 -val_342 +val_95 +val_11 +val_168 +val_34 +val_229 +val_233 +val_143 +val_472 +val_322 +val_498 +val_160 +val_195 +val_42 +val_321 +val_430 +val_119 +val_489 +val_458 +val_78 +val_76 +val_41 +val_223 +val_492 +val_149 +val_449 +val_218 +val_228 +val_138 +val_453 +val_30 +val_209 +val_64 +val_468 +val_76 +val_74 val_342 -val_344 -val_344 -val_345 -val_348 -val_348 -val_348 -val_348 -val_348 -val_351 -val_353 -val_353 -val_356 -val_360 -val_362 -val_364 -val_365 -val_366 -val_367 -val_367 +val_69 +val_230 +val_33 val_368 -val_369 -val_369 -val_369 -val_373 -val_374 -val_375 -val_377 -val_378 -val_379 -val_382 -val_382 -val_384 -val_384 -val_384 -val_386 -val_389 -val_392 -val_393 -val_394 -val_395 -val_395 -val_396 -val_396 -val_396 -val_397 -val_397 -val_399 -val_399 -val_400 -val_401 -val_401 -val_401 -val_401 +val_103 +val_296 +val_113 +val_216 +val_367 +val_344 +val_167 +val_274 +val_219 +val_239 +val_485 +val_116 +val_223 +val_256 +val_263 +val_70 +val_487 +val_480 val_401 -val_402 -val_403 -val_403 +val_288 +val_191 +val_5 +val_244 +val_438 +val_128 +val_467 +val_432 +val_202 +val_316 +val_229 +val_469 +val_463 +val_280 +val_2 +val_35 +val_283 +val_331 +val_235 +val_80 +val_44 +val_193 +val_321 +val_335 +val_104 +val_466 +val_366 +val_175 val_403 -val_404 -val_404 -val_406 -val_406 -val_406 +val_483 +val_53 +val_105 +val_257 val_406 -val_407 val_409 -val_409 -val_409 -val_411 -val_413 -val_413 -val_414 -val_414 -val_417 -val_417 -val_417 -val_418 -val_419 -val_421 -val_424 +val_190 +val_406 +val_401 +val_114 +val_258 +val_90 +val_203 +val_262 +val_348 val_424 -val_427 -val_429 -val_429 -val_430 -val_430 -val_430 -val_431 +val_12 +val_396 +val_201 +val_217 +val_164 val_431 +val_454 +val_478 +val_298 +val_125 val_431 -val_432 -val_435 -val_436 -val_437 -val_438 -val_438 +val_164 +val_424 +val_187 +val_382 +val_5 +val_70 +val_397 +val_480 +val_291 +val_24 +val_351 +val_255 +val_104 +val_70 +val_163 val_438 +val_119 +val_414 +val_200 +val_491 +val_237 val_439 -val_439 -val_443 +val_360 +val_248 +val_479 +val_305 +val_417 +val_199 val_444 -val_446 -val_448 -val_449 -val_452 -val_453 -val_454 -val_454 -val_454 -val_455 -val_457 -val_458 -val_458 -val_459 -val_459 +val_120 +val_429 +val_169 +val_443 +val_323 +val_325 +val_277 +val_230 +val_478 +val_178 +val_468 +val_310 +val_317 +val_333 +val_493 val_460 +val_207 +val_249 +val_265 +val_480 +val_83 +val_136 +val_353 +val_172 +val_214 val_462 +val_233 +val_406 +val_133 +val_175 +val_189 +val_454 +val_375 +val_401 +val_421 +val_407 +val_384 +val_256 +val_26 +val_134 +val_67 +val_384 +val_379 +val_18 val_462 -val_463 -val_463 -val_466 -val_466 -val_466 -val_467 -val_468 -val_468 -val_468 -val_468 -val_469 -val_469 -val_469 -val_469 -val_469 -val_470 -val_472 -val_475 -val_477 -val_478 -val_478 -val_479 -val_480 -val_480 -val_480 -val_481 -val_482 -val_483 -val_484 -val_485 -val_487 -val_489 -val_489 -val_489 -val_489 -val_490 -val_491 val_492 -val_492 -val_493 -val_494 -val_495 -val_496 -val_497 -val_498 -val_498 +val_100 +val_298 +val_9 +val_341 val_498 -PREHOOK: query: explain select * from masking_test_subq join srcpart on (masking_test_subq.key = srcpart.key) +val_146 +val_458 +val_362 +val_186 +val_285 +val_348 +val_167 +val_18 +val_273 +val_183 +val_281 +val_344 +val_97 +val_469 +val_315 +val_84 +val_28 +val_37 +val_448 +val_152 +val_348 +val_307 +val_194 +val_414 +val_477 +val_222 +val_126 +val_90 +val_169 +val_403 +val_400 +val_200 +val_97 +PREHOOK: query: explain select * from masking_test_subq_n3 join srcpart on (masking_test_subq_n3.key = srcpart.key) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test_subq join srcpart on (masking_test_subq.key = srcpart.key) +POSTHOOK: query: explain select * from masking_test_subq_n3 join srcpart on (masking_test_subq_n3.key = srcpart.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-4 is a root stage - Stage-3 depends on stages: Stage-4 - Stage-1 depends on stages: Stage-3 - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 STAGE PLANS: - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test_subq - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: key (type: int) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(_col0) (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: string), _col1 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-1 Map Reduce Map Operator Tree: TableScan - alias: masking_test_subq + alias: masking_test_subq_n3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -2706,43 +2188,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col0 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col0 (type: int) + key expressions: UDFToDouble(_col0) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(_col0) (type: double) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col1 (type: int) - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double), _col0 (type: int) - 1 UDFToDouble(_col0) (type: double), _col1 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string) + value expressions: _col0 (type: int), _col1 (type: string) TableScan alias: srcpart Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE @@ -2782,20 +2232,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test_subq join srcpart on (masking_test_subq.key = srcpart.key) +PREHOOK: query: select * from masking_test_subq_n3 join srcpart on (masking_test_subq_n3.key = srcpart.key) PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test_subq -PREHOOK: Input: default@src +PREHOOK: Input: default@masking_test_subq_n3 PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test_subq join srcpart on (masking_test_subq.key = srcpart.key) +POSTHOOK: query: select * from masking_test_subq_n3 join srcpart on (masking_test_subq_n3.key = srcpart.key) POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test_subq -POSTHOOK: Input: default@src +POSTHOOK: Input: default@masking_test_subq_n3 POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 @@ -6914,102 +6362,20 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 498 val_498 498 val_498 2008-04-09 12 498 val_498 498 val_498 2008-04-09 12 498 val_498 498 val_498 2008-04-08 12 -PREHOOK: query: explain select * from default.masking_test_subq where key > 0 +PREHOOK: query: explain select * from default.masking_test_subq_n3 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from default.masking_test_subq where key > 0 +POSTHOOK: query: explain select * from default.masking_test_subq_n3 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-2 depends on stages: Stage-3 - Stage-1 depends on stages: Stage-2 + Stage-1 is a root stage Stage-0 depends on stages: Stage-1 STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test_subq - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (key > 0) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: key (type: int) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(_col0) (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: string), _col1 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-1 Map Reduce Map Operator Tree: TableScan - alias: masking_test_subq + alias: masking_test_subq_n3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key > 0) (type: boolean) @@ -7018,34 +6384,14 @@ STAGE PLANS: expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col0 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col0 (type: int) + File Output Operator + compressed: false Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col1 (type: int) - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double), _col0 (type: int) - 1 UDFToDouble(_col0) (type: double), _col1 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized Stage: Stage-0 Fetch Operator @@ -7053,609 +6399,525 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from default.masking_test_subq where key > 0 +PREHOOK: query: select * from default.masking_test_subq_n3 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test_subq -PREHOOK: Input: default@src +PREHOOK: Input: default@masking_test_subq_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from default.masking_test_subq where key > 0 +POSTHOOK: query: select * from default.masking_test_subq_n3 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test_subq -POSTHOOK: Input: default@src +POSTHOOK: Input: default@masking_test_subq_n3 #### A masked pattern was here #### -2 val_2 -4 val_4 -5 val_5 -5 val_5 -5 val_5 -8 val_8 -9 val_9 -10 val_10 -11 val_11 -12 val_12 -12 val_12 -15 val_15 -15 val_15 -17 val_17 -18 val_18 -18 val_18 -19 val_19 -20 val_20 -24 val_24 -24 val_24 -26 val_26 -26 val_26 -27 val_27 -28 val_28 -30 val_30 -33 val_33 -34 val_34 -35 val_35 -35 val_35 -35 val_35 -37 val_37 -37 val_37 -41 val_41 -42 val_42 -42 val_42 -43 val_43 -44 val_44 -47 val_47 -51 val_51 -51 val_51 -53 val_53 -54 val_54 -57 val_57 -58 val_58 -58 val_58 -64 val_64 -65 val_65 -66 val_66 -67 val_67 -67 val_67 -69 val_69 -70 val_70 -70 val_70 -70 val_70 -72 val_72 -72 val_72 -74 val_74 -76 val_76 -76 val_76 -77 val_77 -78 val_78 -80 val_80 -82 val_82 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -85 val_85 +238 val_238 86 val_86 -87 val_87 -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 98 val_98 -100 val_100 -100 val_100 -103 val_103 -103 val_103 -104 val_104 -104 val_104 -105 val_105 -111 val_111 -113 val_113 -113 val_113 -114 val_114 -116 val_116 -118 val_118 -118 val_118 -119 val_119 -119 val_119 -119 val_119 -120 val_120 -120 val_120 -125 val_125 -125 val_125 -126 val_126 -128 val_128 -128 val_128 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 128 val_128 -129 val_129 -129 val_129 -131 val_131 -133 val_133 -134 val_134 -134 val_134 -136 val_136 -137 val_137 -137 val_137 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -143 val_143 -145 val_145 -146 val_146 +213 val_213 146 val_146 -149 val_149 -149 val_149 -150 val_150 -152 val_152 +406 val_406 +429 val_429 +374 val_374 152 val_152 -153 val_153 -155 val_155 -156 val_156 -157 val_157 -158 val_158 -160 val_160 -162 val_162 -163 val_163 -164 val_164 -164 val_164 -165 val_165 -165 val_165 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 166 val_166 -167 val_167 -167 val_167 -167 val_167 -168 val_168 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -170 val_170 -172 val_172 -172 val_172 -174 val_174 -174 val_174 -175 val_175 -175 val_175 -176 val_176 -176 val_176 -177 val_177 -178 val_178 -179 val_179 -179 val_179 -180 val_180 -181 val_181 -183 val_183 -186 val_186 -187 val_187 -187 val_187 -187 val_187 -189 val_189 -190 val_190 -191 val_191 -191 val_191 -192 val_192 -193 val_193 -193 val_193 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 193 val_193 -194 val_194 -195 val_195 -195 val_195 -196 val_196 -197 val_197 -197 val_197 -199 val_199 -199 val_199 -199 val_199 -200 val_200 -200 val_200 -201 val_201 -202 val_202 -203 val_203 -203 val_203 -205 val_205 -205 val_205 -207 val_207 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 207 val_207 +199 val_199 +466 val_466 208 val_208 -208 val_208 -208 val_208 -209 val_209 -209 val_209 -213 val_213 -213 val_213 -214 val_214 -216 val_216 -216 val_216 -217 val_217 -217 val_217 -218 val_218 -219 val_219 -219 val_219 -221 val_221 -221 val_221 -222 val_222 -223 val_223 -223 val_223 -224 val_224 -224 val_224 -226 val_226 -228 val_228 -229 val_229 -229 val_229 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -233 val_233 -233 val_233 -235 val_235 -237 val_237 -237 val_237 -238 val_238 -238 val_238 -239 val_239 -239 val_239 -241 val_241 -242 val_242 -242 val_242 -244 val_244 +174 val_174 +399 val_399 +396 val_396 247 val_247 -248 val_248 -249 val_249 -252 val_252 -255 val_255 -255 val_255 -256 val_256 -256 val_256 -257 val_257 -258 val_258 -260 val_260 -262 val_262 -263 val_263 -265 val_265 -265 val_265 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 266 val_266 -272 val_272 -272 val_272 -273 val_273 -273 val_273 -273 val_273 -274 val_274 -275 val_275 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -278 val_278 -278 val_278 -280 val_280 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 280 val_280 -281 val_281 -281 val_281 -282 val_282 -282 val_282 -283 val_283 -284 val_284 -285 val_285 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 286 val_286 -287 val_287 -288 val_288 -288 val_288 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 289 val_289 -291 val_291 -292 val_292 -296 val_296 -298 val_298 -298 val_298 -298 val_298 -302 val_302 -305 val_305 -306 val_306 -307 val_307 -307 val_307 -308 val_308 -309 val_309 -309 val_309 -310 val_310 -311 val_311 -311 val_311 -311 val_311 -315 val_315 -316 val_316 -316 val_316 -316 val_316 -317 val_317 -317 val_317 -318 val_318 -318 val_318 +221 val_221 +65 val_65 318 val_318 -321 val_321 -321 val_321 -322 val_322 -322 val_322 -323 val_323 -325 val_325 -325 val_325 -327 val_327 -327 val_327 -327 val_327 -331 val_331 -331 val_331 332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 333 val_333 -333 val_333 -335 val_335 -336 val_336 -338 val_338 -339 val_339 -341 val_341 -342 val_342 -342 val_342 -344 val_344 -344 val_344 -345 val_345 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -351 val_351 -353 val_353 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 353 val_353 -356 val_356 -360 val_360 -362 val_362 -364 val_364 -365 val_365 -366 val_366 -367 val_367 -367 val_367 -368 val_368 -369 val_369 -369 val_369 -369 val_369 373 val_373 -374 val_374 -375 val_375 -377 val_377 -378 val_378 -379 val_379 -382 val_382 -382 val_382 -384 val_384 -384 val_384 -384 val_384 -386 val_386 -389 val_389 -392 val_392 -393 val_393 -394 val_394 -395 val_395 -395 val_395 -396 val_396 -396 val_396 -396 val_396 -397 val_397 -397 val_397 -399 val_399 -399 val_399 -400 val_400 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -402 val_402 -403 val_403 -403 val_403 -403 val_403 -404 val_404 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 404 val_404 -406 val_406 -406 val_406 -406 val_406 -406 val_406 -407 val_407 -409 val_409 -409 val_409 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 409 val_409 -411 val_411 -413 val_413 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 413 val_413 -414 val_414 -414 val_414 -417 val_417 -417 val_417 -417 val_417 -418 val_418 +85 val_85 +77 val_77 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 419 val_419 -421 val_421 -424 val_424 -424 val_424 -427 val_427 -429 val_429 -429 val_429 -430 val_430 -430 val_430 -430 val_430 -431 val_431 -431 val_431 -431 val_431 -432 val_432 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 435 val_435 -436 val_436 -437 val_437 -438 val_438 -438 val_438 -438 val_438 -439 val_439 -439 val_439 -443 val_443 -444 val_444 -446 val_446 -448 val_448 -449 val_449 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 452 val_452 -453 val_453 -454 val_454 -454 val_454 -454 val_454 -455 val_455 -457 val_457 -458 val_458 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 458 val_458 -459 val_459 -459 val_459 -460 val_460 -462 val_462 -462 val_462 -463 val_463 -463 val_463 -466 val_466 -466 val_466 -466 val_466 -467 val_467 -468 val_468 -468 val_468 -468 val_468 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 468 val_468 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -470 val_470 -472 val_472 -475 val_475 -477 val_477 -478 val_478 -478 val_478 -479 val_479 -480 val_480 -480 val_480 -480 val_480 -481 val_481 -482 val_482 -483 val_483 -484 val_484 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 487 val_487 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -490 val_490 -491 val_491 -492 val_492 -492 val_492 -493 val_493 -494 val_494 -495 val_495 -496 val_496 -497 val_497 -498 val_498 -498 val_498 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 498 val_498 -PREHOOK: query: explain select * from masking_test_subq where masking_test_subq.key > 0 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: explain select * from masking_test_subq_n3 where masking_test_subq_n3.key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test_subq where masking_test_subq.key > 0 +POSTHOOK: query: explain select * from masking_test_subq_n3 where masking_test_subq_n3.key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-2 depends on stages: Stage-3 - Stage-1 depends on stages: Stage-2 + Stage-1 is a root stage Stage-0 depends on stages: Stage-1 STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test_subq - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (key > 0) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: key (type: int) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(_col0) (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: string), _col1 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-1 Map Reduce Map Operator Tree: TableScan - alias: masking_test_subq + alias: masking_test_subq_n3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key > 0) (type: boolean) @@ -7664,34 +6926,14 @@ STAGE PLANS: expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col0 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col0 (type: int) + File Output Operator + compressed: false Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col1 (type: int) - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double), _col0 (type: int) - 1 UDFToDouble(_col0) (type: double), _col1 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized Stage: Stage-0 Fetch Operator @@ -7699,513 +6941,511 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test_subq where masking_test_subq.key > 0 +PREHOOK: query: select * from masking_test_subq_n3 where masking_test_subq_n3.key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test_subq -PREHOOK: Input: default@src +PREHOOK: Input: default@masking_test_subq_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test_subq where masking_test_subq.key > 0 +POSTHOOK: query: select * from masking_test_subq_n3 where masking_test_subq_n3.key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test_subq -POSTHOOK: Input: default@src +POSTHOOK: Input: default@masking_test_subq_n3 #### A masked pattern was here #### -2 val_2 -4 val_4 -5 val_5 -5 val_5 -5 val_5 -8 val_8 -9 val_9 -10 val_10 -11 val_11 -12 val_12 -12 val_12 -15 val_15 -15 val_15 -17 val_17 -18 val_18 -18 val_18 -19 val_19 -20 val_20 -24 val_24 -24 val_24 -26 val_26 -26 val_26 -27 val_27 -28 val_28 -30 val_30 -33 val_33 -34 val_34 -35 val_35 -35 val_35 -35 val_35 -37 val_37 -37 val_37 -41 val_41 -42 val_42 -42 val_42 -43 val_43 -44 val_44 -47 val_47 -51 val_51 -51 val_51 -53 val_53 -54 val_54 -57 val_57 -58 val_58 -58 val_58 -64 val_64 -65 val_65 -66 val_66 -67 val_67 -67 val_67 -69 val_69 -70 val_70 -70 val_70 -70 val_70 -72 val_72 -72 val_72 -74 val_74 -76 val_76 -76 val_76 -77 val_77 -78 val_78 -80 val_80 -82 val_82 -83 val_83 -83 val_83 -84 val_84 -84 val_84 -85 val_85 +238 val_238 86 val_86 -87 val_87 -90 val_90 -90 val_90 -90 val_90 -92 val_92 -95 val_95 -95 val_95 -96 val_96 -97 val_97 -97 val_97 -98 val_98 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 98 val_98 -100 val_100 -100 val_100 -103 val_103 -103 val_103 -104 val_104 -104 val_104 -105 val_105 -111 val_111 -113 val_113 -113 val_113 -114 val_114 -116 val_116 -118 val_118 -118 val_118 -119 val_119 -119 val_119 -119 val_119 -120 val_120 -120 val_120 -125 val_125 -125 val_125 -126 val_126 -128 val_128 -128 val_128 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 128 val_128 -129 val_129 -129 val_129 -131 val_131 -133 val_133 -134 val_134 -134 val_134 -136 val_136 -137 val_137 -137 val_137 -138 val_138 -138 val_138 -138 val_138 -138 val_138 -143 val_143 -145 val_145 -146 val_146 +213 val_213 146 val_146 -149 val_149 -149 val_149 -150 val_150 -152 val_152 +406 val_406 +429 val_429 +374 val_374 152 val_152 -153 val_153 -155 val_155 -156 val_156 -157 val_157 -158 val_158 -160 val_160 -162 val_162 -163 val_163 -164 val_164 -164 val_164 -165 val_165 -165 val_165 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 166 val_166 -167 val_167 -167 val_167 -167 val_167 -168 val_168 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -170 val_170 -172 val_172 -172 val_172 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 174 val_174 -175 val_175 -175 val_175 -176 val_176 -176 val_176 -177 val_177 -178 val_178 -179 val_179 -179 val_179 -180 val_180 -181 val_181 -183 val_183 -186 val_186 -187 val_187 -187 val_187 -187 val_187 -189 val_189 -190 val_190 -191 val_191 -191 val_191 -192 val_192 -193 val_193 -193 val_193 -193 val_193 -194 val_194 -195 val_195 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 195 val_195 -196 val_196 -197 val_197 -197 val_197 -199 val_199 -199 val_199 -199 val_199 -200 val_200 -200 val_200 -201 val_201 -202 val_202 -203 val_203 +475 val_475 +17 val_17 +113 val_113 +155 val_155 203 val_203 +339 val_339 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 205 val_205 -205 val_205 -207 val_207 -207 val_207 -208 val_208 -208 val_208 -208 val_208 -209 val_209 -209 val_209 -213 val_213 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 213 val_213 -214 val_214 -216 val_216 216 val_216 -217 val_217 -217 val_217 -218 val_218 -219 val_219 -219 val_219 -221 val_221 +430 val_430 +278 val_278 +176 val_176 +289 val_289 221 val_221 -222 val_222 -223 val_223 -223 val_223 -224 val_224 -224 val_224 -226 val_226 -228 val_228 -229 val_229 -229 val_229 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -230 val_230 -233 val_233 -233 val_233 -235 val_235 -237 val_237 -237 val_237 -238 val_238 -238 val_238 -239 val_239 -239 val_239 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 241 val_241 -242 val_242 -242 val_242 -244 val_244 -247 val_247 -248 val_248 -249 val_249 -252 val_252 -255 val_255 -255 val_255 -256 val_256 -256 val_256 -257 val_257 -258 val_258 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 260 val_260 -262 val_262 -263 val_263 -265 val_265 -265 val_265 -266 val_266 -272 val_272 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 272 val_272 -273 val_273 -273 val_273 -273 val_273 -274 val_274 -275 val_275 -277 val_277 -277 val_277 -277 val_277 -277 val_277 -278 val_278 -278 val_278 -280 val_280 -280 val_280 -281 val_281 -281 val_281 -282 val_282 -282 val_282 -283 val_283 -284 val_284 -285 val_285 -286 val_286 -287 val_287 -288 val_288 -288 val_288 -289 val_289 -291 val_291 -292 val_292 -296 val_296 -298 val_298 -298 val_298 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 298 val_298 -302 val_302 -305 val_305 -306 val_306 -307 val_307 -307 val_307 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 308 val_308 -309 val_309 -309 val_309 -310 val_310 -311 val_311 -311 val_311 -311 val_311 -315 val_315 -316 val_316 -316 val_316 -316 val_316 -317 val_317 -317 val_317 -318 val_318 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 318 val_318 318 val_318 -321 val_321 -321 val_321 -322 val_322 -322 val_322 -323 val_323 -325 val_325 -325 val_325 -327 val_327 -327 val_327 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 331 val_331 -331 val_331 -332 val_332 -333 val_333 -333 val_333 -335 val_335 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 336 val_336 -338 val_338 -339 val_339 -341 val_341 -342 val_342 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 342 val_342 -344 val_344 -344 val_344 -345 val_345 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -348 val_348 -351 val_351 -353 val_353 -353 val_353 -356 val_356 -360 val_360 -362 val_362 -364 val_364 -365 val_365 -366 val_366 -367 val_367 -367 val_367 +69 val_69 +230 val_230 +33 val_33 368 val_368 -369 val_369 -369 val_369 -369 val_369 -373 val_373 -374 val_374 -375 val_375 -377 val_377 -378 val_378 -379 val_379 -382 val_382 -382 val_382 -384 val_384 -384 val_384 -384 val_384 -386 val_386 -389 val_389 -392 val_392 -393 val_393 -394 val_394 -395 val_395 -395 val_395 -396 val_396 -396 val_396 -396 val_396 -397 val_397 -397 val_397 -399 val_399 -399 val_399 -400 val_400 -401 val_401 -401 val_401 -401 val_401 -401 val_401 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 401 val_401 -402 val_402 -403 val_403 -403 val_403 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 403 val_403 -404 val_404 -404 val_404 -406 val_406 -406 val_406 -406 val_406 +483 val_483 +53 val_53 +105 val_105 +257 val_257 406 val_406 -407 val_407 -409 val_409 409 val_409 -409 val_409 -411 val_411 -413 val_413 -413 val_413 -414 val_414 -414 val_414 -417 val_417 -417 val_417 -417 val_417 -418 val_418 -419 val_419 -421 val_421 -424 val_424 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 424 val_424 -427 val_427 -429 val_429 -429 val_429 -430 val_430 -430 val_430 -430 val_430 -431 val_431 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 431 val_431 -432 val_432 -435 val_435 -436 val_436 -437 val_437 -438 val_438 -438 val_438 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 439 val_439 -439 val_439 -443 val_443 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 444 val_444 -446 val_446 -448 val_448 -449 val_449 -452 val_452 -453 val_453 -454 val_454 -454 val_454 -454 val_454 -455 val_455 -457 val_457 -458 val_458 -458 val_458 -459 val_459 -459 val_459 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 462 val_462 -463 val_463 -463 val_463 -466 val_466 -466 val_466 -466 val_466 -467 val_467 -468 val_468 -468 val_468 -468 val_468 -468 val_468 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -469 val_469 -470 val_470 -472 val_472 -475 val_475 -477 val_477 -478 val_478 -478 val_478 -479 val_479 -480 val_480 -480 val_480 -480 val_480 -481 val_481 -482 val_482 -483 val_483 -484 val_484 -485 val_485 -487 val_487 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -490 val_490 -491 val_491 -492 val_492 492 val_492 -493 val_493 -494 val_494 -495 val_495 -496 val_496 -497 val_497 -498 val_498 -498 val_498 +100 val_100 +298 val_298 +9 val_9 +341 val_341 498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 PREHOOK: query: explain select key, value from (select key, value from (select key, upper(value) as value from src where key > 0) t where key < 10) t2 where key % 2 = 0 PREHOOK: type: QUERY POSTHOOK: query: explain select key, value from (select key, value from (select key, upper(value) as value from src where key > 0) t where key < 10) t2 where key % 2 = 0 diff --git a/ql/src/test/results/clientpositive/masking_4.q.out b/ql/src/test/results/clientpositive/masking_4.q.out index dce4f6eaff..068c484d3b 100644 --- a/ql/src/test/results/clientpositive/masking_4.q.out +++ b/ql/src/test/results/clientpositive/masking_4.q.out @@ -1,27 +1,27 @@ -PREHOOK: query: create table masking_test as select cast(key as int) as key, value from src +PREHOOK: query: create table masking_test_n11 as select cast(key as int) as key, value from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create table masking_test as select cast(key as int) as key, value from src +PREHOOK: Output: default@masking_test_n11 +POSTHOOK: query: create table masking_test_n11 as select cast(key as int) as key, value from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table masking_test_subq as select cast(key as int) as key, value from src +POSTHOOK: Output: default@masking_test_n11 +POSTHOOK: Lineage: masking_test_n11.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n11.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table masking_test_subq_n2 as select cast(key as int) as key, value from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test_subq -POSTHOOK: query: create table masking_test_subq as select cast(key as int) as key, value from src +PREHOOK: Output: default@masking_test_subq_n2 +POSTHOOK: query: create table masking_test_subq_n2 as select cast(key as int) as key, value from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test_subq -POSTHOOK: Lineage: masking_test_subq.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test_subq.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@masking_test_subq_n2 +POSTHOOK: Lineage: masking_test_subq_n2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_subq_n2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain with q1 as ( select key from q2 where key = '5'), q2 as ( select key from src where key = '5') @@ -66,11 +66,11 @@ STAGE PLANS: ListSink PREHOOK: query: explain -with q1 as ( select * from masking_test where key = '5') +with q1 as ( select * from masking_test_n11 where key = '5') select * from q1 PREHOOK: type: QUERY POSTHOOK: query: explain -with q1 as ( select * from masking_test where key = '5') +with q1 as ( select * from masking_test_n11 where key = '5') select * from q1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -82,18 +82,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n11 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key = 5)) (type: boolean) - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE + predicate: (key = 5) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 5 (type: int), reverse(value) (type: string) + expressions: 5 (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -107,12 +107,12 @@ STAGE PLANS: ListSink PREHOOK: query: explain -with masking_test_subq as ( select * from masking_test where key = '5') -select * from masking_test_subq +with masking_test_subq_n2 as ( select * from masking_test_n11 where key = '5') +select * from masking_test_subq_n2 PREHOOK: type: QUERY POSTHOOK: query: explain -with masking_test_subq as ( select * from masking_test where key = '5') -select * from masking_test_subq +with masking_test_subq_n2 as ( select * from masking_test_n11 where key = '5') +select * from masking_test_subq_n2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -123,18 +123,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n11 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key = 5)) (type: boolean) - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE + predicate: (key = 5) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 5 (type: int), reverse(value) (type: string) + expressions: 5 (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -148,145 +148,27 @@ STAGE PLANS: ListSink PREHOOK: query: explain -with q1 as ( select * from masking_test where key = '5') -select * from masking_test_subq +with q1 as ( select * from masking_test_n11 where key = '5') +select * from masking_test_subq_n2 PREHOOK: type: QUERY POSTHOOK: query: explain -with q1 as ( select * from masking_test where key = '5') -select * from masking_test_subq +with q1 as ( select * from masking_test_n11 where key = '5') +select * from masking_test_subq_n2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-3 is a root stage - Stage-2 depends on stages: Stage-3 - Stage-1 depends on stages: Stage-2 - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test_subq - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: key (type: int) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int) - Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(_col0) (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: string), _col1 (type: int) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test_subq - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col0 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col0 (type: int) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) - TableScan - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), _col1 (type: int) - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 UDFToDouble(_col0) (type: double), _col0 (type: int) - 1 UDFToDouble(_col0) (type: double), _col1 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: masking_test_subq_n2 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink diff --git a/ql/src/test/results/clientpositive/masking_5.q.out b/ql/src/test/results/clientpositive/masking_5.q.out index 2c8f398da0..498fc117c7 100644 --- a/ql/src/test/results/clientpositive/masking_5.q.out +++ b/ql/src/test/results/clientpositive/masking_5.q.out @@ -1,66 +1,60 @@ -PREHOOK: query: create table masking_test as select cast(key as int) as key, value from src +PREHOOK: query: create table masking_test_n6 as select cast(key as int) as key, value from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create table masking_test as select cast(key as int) as key, value from src +PREHOOK: Output: default@masking_test_n6 +POSTHOOK: query: create table masking_test_n6 as select cast(key as int) as key, value from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain select * from masking_test tablesample (10 rows) +POSTHOOK: Output: default@masking_test_n6 +POSTHOOK: Lineage: masking_test_n6.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n6.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select * from masking_test_n6 tablesample (10 rows) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test tablesample (10 rows) +POSTHOOK: query: explain select * from masking_test_n6 tablesample (10 rows) POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test - Row Limit Per Split: 10 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), reverse(value) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: masking_test_n6 + Row Limit Per Split: 10 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink -PREHOOK: query: select * from masking_test tablesample (10 rows) +PREHOOK: query: select * from masking_test_n6 tablesample (10 rows) PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n6 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test tablesample (10 rows) +POSTHOOK: query: select * from masking_test_n6 tablesample (10 rows) POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n6 #### A masked pattern was here #### +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 PREHOOK: query: explain -select * from masking_test tablesample(1 percent) +select * from masking_test_n6 tablesample(1 percent) PREHOOK: type: QUERY POSTHOOK: query: explain -select * from masking_test tablesample(1 percent) +select * from masking_test_n6 tablesample(1 percent) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -71,22 +65,19 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n6 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), reverse(value) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Execution mode: vectorized Stage: Stage-0 @@ -95,53 +86,547 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test tablesample(1 percent) +PREHOOK: query: select * from masking_test_n6 tablesample(1 percent) PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n6 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test tablesample(1 percent) +POSTHOOK: query: select * from masking_test_n6 tablesample(1 percent) POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n6 #### A masked pattern was here #### -0 0_lav -4 4_lav -8 8_lav -0 0_lav -0 0_lav -2 2_lav -PREHOOK: query: drop table masking_test +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +0 val_0 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +0 val_0 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +0 val_0 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: drop table masking_test_n6 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@masking_test -PREHOOK: Output: default@masking_test -POSTHOOK: query: drop table masking_test +PREHOOK: Input: default@masking_test_n6 +PREHOOK: Output: default@masking_test_n6 +POSTHOOK: query: drop table masking_test_n6 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@masking_test -POSTHOOK: Output: default@masking_test -PREHOOK: query: CREATE TABLE masking_test(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: Input: default@masking_test_n6 +POSTHOOK: Output: default@masking_test_n6 +PREHOOK: query: CREATE TABLE masking_test_n6(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: CREATE TABLE masking_test(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: Output: default@masking_test_n6 +POSTHOOK: query: CREATE TABLE masking_test_n6(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -PREHOOK: query: insert overwrite table masking_test +POSTHOOK: Output: default@masking_test_n6 +PREHOOK: query: insert overwrite table masking_test_n6 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@masking_test -POSTHOOK: query: insert overwrite table masking_test +PREHOOK: Output: default@masking_test_n6 +POSTHOOK: query: insert overwrite table masking_test_n6 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@masking_test_n6 +POSTHOOK: Lineage: masking_test_n6.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n6.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain -select * from masking_test tablesample (bucket 1 out of 2) s +select * from masking_test_n6 tablesample (bucket 1 out of 2) s PREHOOK: type: QUERY POSTHOOK: query: explain -select * from masking_test tablesample (bucket 1 out of 2) s +select * from masking_test_n6 tablesample (bucket 1 out of 2) s POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -152,18 +637,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: s Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((((hash(key) & 2147483647) % 2) = 0) and ((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE + predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), reverse(value) (type: string) + expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -176,12 +661,130 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test tablesample (bucket 1 out of 2) s +PREHOOK: query: select * from masking_test_n6 tablesample (bucket 1 out of 2) s PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n6 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test tablesample (bucket 1 out of 2) s +POSTHOOK: query: select * from masking_test_n6 tablesample (bucket 1 out of 2) s POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n6 #### A masked pattern was here #### -2 2_lav +238 val_238 +90 val_90 +126 val_126 +414 val_414 +348 val_348 +84 val_84 +344 val_344 +348 val_348 +362 val_362 +458 val_458 +146 val_146 +498 val_498 +384 val_384 +134 val_134 +256 val_256 +384 val_384 +454 val_454 +214 val_214 +480 val_480 +310 val_310 +178 val_178 +478 val_478 +444 val_444 +248 val_248 +360 val_360 +414 val_414 +70 val_70 +480 val_480 +70 val_70 +164 val_164 +478 val_478 +454 val_454 +164 val_164 +396 val_396 +348 val_348 +90 val_90 +190 val_190 +466 val_466 +80 val_80 +2 val_2 +316 val_316 +202 val_202 +432 val_432 +244 val_244 +480 val_480 +70 val_70 +256 val_256 +116 val_116 +344 val_344 +216 val_216 +368 val_368 +342 val_342 +64 val_64 +30 val_30 +458 val_458 +430 val_430 +498 val_498 +472 val_472 +336 val_336 +58 val_58 +396 val_396 +402 val_402 +226 val_226 +452 val_452 +272 val_272 +392 val_392 +10 val_10 +90 val_90 +238 val_238 +134 val_134 +364 val_364 +490 val_490 +316 val_316 +98 val_98 +308 val_308 +156 val_156 +404 val_404 +96 val_96 +454 val_454 +158 val_158 +348 val_348 +208 val_208 +58 val_58 +466 val_466 +348 val_348 +84 val_84 +272 val_272 +384 val_384 +404 val_404 +260 val_260 +284 val_284 +332 val_332 +430 val_430 +216 val_216 +54 val_54 +286 val_286 +386 val_386 +498 val_498 +208 val_208 +378 val_378 +20 val_20 +316 val_316 +342 val_342 +266 val_266 +162 val_162 +396 val_396 +208 val_208 +466 val_466 +482 val_482 +394 val_394 +446 val_446 +338 val_338 +292 val_292 +252 val_252 +430 val_430 +146 val_146 +484 val_484 +98 val_98 +86 val_86 diff --git a/ql/src/test/results/clientpositive/masking_6.q.out b/ql/src/test/results/clientpositive/masking_6.q.out index b0dfe05ff0..bee347f507 100644 --- a/ql/src/test/results/clientpositive/masking_6.q.out +++ b/ql/src/test/results/clientpositive/masking_6.q.out @@ -1,23 +1,23 @@ -PREHOOK: query: drop view masking_test +PREHOOK: query: drop view masking_test_n0 PREHOOK: type: DROPVIEW -POSTHOOK: query: drop view masking_test +POSTHOOK: query: drop view masking_test_n0 POSTHOOK: type: DROPVIEW -PREHOOK: query: create view masking_test as select cast(key as int) as key, value, '12' from src +PREHOOK: query: create view masking_test_n0 as select cast(key as int) as key, value, '12' from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create view masking_test as select cast(key as int) as key, value, '12' from src +PREHOOK: Output: default@masking_test_n0 +POSTHOOK: query: create view masking_test_n0 as select cast(key as int) as key, value, '12' from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test._c2 SIMPLE [] -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain select * from masking_test +POSTHOOK: Output: default@masking_test_n0 +POSTHOOK: Lineage: masking_test_n0._c2 SIMPLE [] +POSTHOOK: Lineage: masking_test_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select * from masking_test_n0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test +POSTHOOK: query: explain select * from masking_test_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -32,20 +32,17 @@ STAGE PLANS: properties: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(key) (type: int), reverse(value) (type: string), '12' (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Select Operator + expressions: UDFToInteger(key) (type: int), value (type: string), '12' (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Execution mode: vectorized Stage: Stage-0 @@ -54,25 +51,519 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test +PREHOOK: query: select * from masking_test_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n0 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test +POSTHOOK: query: select * from masking_test_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n0 POSTHOOK: Input: default@src #### A masked pattern was here #### -0 0_lav 12 -4 4_lav 12 -8 8_lav 12 -0 0_lav 12 -0 0_lav 12 -2 2_lav 12 -PREHOOK: query: explain select * from masking_test where key > 0 +238 val_238 12 +86 val_86 12 +311 val_311 12 +27 val_27 12 +165 val_165 12 +409 val_409 12 +255 val_255 12 +278 val_278 12 +98 val_98 12 +484 val_484 12 +265 val_265 12 +193 val_193 12 +401 val_401 12 +150 val_150 12 +273 val_273 12 +224 val_224 12 +369 val_369 12 +66 val_66 12 +128 val_128 12 +213 val_213 12 +146 val_146 12 +406 val_406 12 +429 val_429 12 +374 val_374 12 +152 val_152 12 +469 val_469 12 +145 val_145 12 +495 val_495 12 +37 val_37 12 +327 val_327 12 +281 val_281 12 +277 val_277 12 +209 val_209 12 +15 val_15 12 +82 val_82 12 +403 val_403 12 +166 val_166 12 +417 val_417 12 +430 val_430 12 +252 val_252 12 +292 val_292 12 +219 val_219 12 +287 val_287 12 +153 val_153 12 +193 val_193 12 +338 val_338 12 +446 val_446 12 +459 val_459 12 +394 val_394 12 +237 val_237 12 +482 val_482 12 +174 val_174 12 +413 val_413 12 +494 val_494 12 +207 val_207 12 +199 val_199 12 +466 val_466 12 +208 val_208 12 +174 val_174 12 +399 val_399 12 +396 val_396 12 +247 val_247 12 +417 val_417 12 +489 val_489 12 +162 val_162 12 +377 val_377 12 +397 val_397 12 +309 val_309 12 +365 val_365 12 +266 val_266 12 +439 val_439 12 +342 val_342 12 +367 val_367 12 +325 val_325 12 +167 val_167 12 +195 val_195 12 +475 val_475 12 +17 val_17 12 +113 val_113 12 +155 val_155 12 +203 val_203 12 +339 val_339 12 +0 val_0 12 +455 val_455 12 +128 val_128 12 +311 val_311 12 +316 val_316 12 +57 val_57 12 +302 val_302 12 +205 val_205 12 +149 val_149 12 +438 val_438 12 +345 val_345 12 +129 val_129 12 +170 val_170 12 +20 val_20 12 +489 val_489 12 +157 val_157 12 +378 val_378 12 +221 val_221 12 +92 val_92 12 +111 val_111 12 +47 val_47 12 +72 val_72 12 +4 val_4 12 +280 val_280 12 +35 val_35 12 +427 val_427 12 +277 val_277 12 +208 val_208 12 +356 val_356 12 +399 val_399 12 +169 val_169 12 +382 val_382 12 +498 val_498 12 +125 val_125 12 +386 val_386 12 +437 val_437 12 +469 val_469 12 +192 val_192 12 +286 val_286 12 +187 val_187 12 +176 val_176 12 +54 val_54 12 +459 val_459 12 +51 val_51 12 +138 val_138 12 +103 val_103 12 +239 val_239 12 +213 val_213 12 +216 val_216 12 +430 val_430 12 +278 val_278 12 +176 val_176 12 +289 val_289 12 +221 val_221 12 +65 val_65 12 +318 val_318 12 +332 val_332 12 +311 val_311 12 +275 val_275 12 +137 val_137 12 +241 val_241 12 +83 val_83 12 +333 val_333 12 +180 val_180 12 +284 val_284 12 +12 val_12 12 +230 val_230 12 +181 val_181 12 +67 val_67 12 +260 val_260 12 +404 val_404 12 +384 val_384 12 +489 val_489 12 +353 val_353 12 +373 val_373 12 +272 val_272 12 +138 val_138 12 +217 val_217 12 +84 val_84 12 +348 val_348 12 +466 val_466 12 +58 val_58 12 +8 val_8 12 +411 val_411 12 +230 val_230 12 +208 val_208 12 +348 val_348 12 +24 val_24 12 +463 val_463 12 +431 val_431 12 +179 val_179 12 +172 val_172 12 +42 val_42 12 +129 val_129 12 +158 val_158 12 +119 val_119 12 +496 val_496 12 +0 val_0 12 +322 val_322 12 +197 val_197 12 +468 val_468 12 +393 val_393 12 +454 val_454 12 +100 val_100 12 +298 val_298 12 +199 val_199 12 +191 val_191 12 +418 val_418 12 +96 val_96 12 +26 val_26 12 +165 val_165 12 +327 val_327 12 +230 val_230 12 +205 val_205 12 +120 val_120 12 +131 val_131 12 +51 val_51 12 +404 val_404 12 +43 val_43 12 +436 val_436 12 +156 val_156 12 +469 val_469 12 +468 val_468 12 +308 val_308 12 +95 val_95 12 +196 val_196 12 +288 val_288 12 +481 val_481 12 +457 val_457 12 +98 val_98 12 +282 val_282 12 +197 val_197 12 +187 val_187 12 +318 val_318 12 +318 val_318 12 +409 val_409 12 +470 val_470 12 +137 val_137 12 +369 val_369 12 +316 val_316 12 +169 val_169 12 +413 val_413 12 +85 val_85 12 +77 val_77 12 +0 val_0 12 +490 val_490 12 +87 val_87 12 +364 val_364 12 +179 val_179 12 +118 val_118 12 +134 val_134 12 +395 val_395 12 +282 val_282 12 +138 val_138 12 +238 val_238 12 +419 val_419 12 +15 val_15 12 +118 val_118 12 +72 val_72 12 +90 val_90 12 +307 val_307 12 +19 val_19 12 +435 val_435 12 +10 val_10 12 +277 val_277 12 +273 val_273 12 +306 val_306 12 +224 val_224 12 +309 val_309 12 +389 val_389 12 +327 val_327 12 +242 val_242 12 +369 val_369 12 +392 val_392 12 +272 val_272 12 +331 val_331 12 +401 val_401 12 +242 val_242 12 +452 val_452 12 +177 val_177 12 +226 val_226 12 +5 val_5 12 +497 val_497 12 +402 val_402 12 +396 val_396 12 +317 val_317 12 +395 val_395 12 +58 val_58 12 +35 val_35 12 +336 val_336 12 +95 val_95 12 +11 val_11 12 +168 val_168 12 +34 val_34 12 +229 val_229 12 +233 val_233 12 +143 val_143 12 +472 val_472 12 +322 val_322 12 +498 val_498 12 +160 val_160 12 +195 val_195 12 +42 val_42 12 +321 val_321 12 +430 val_430 12 +119 val_119 12 +489 val_489 12 +458 val_458 12 +78 val_78 12 +76 val_76 12 +41 val_41 12 +223 val_223 12 +492 val_492 12 +149 val_149 12 +449 val_449 12 +218 val_218 12 +228 val_228 12 +138 val_138 12 +453 val_453 12 +30 val_30 12 +209 val_209 12 +64 val_64 12 +468 val_468 12 +76 val_76 12 +74 val_74 12 +342 val_342 12 +69 val_69 12 +230 val_230 12 +33 val_33 12 +368 val_368 12 +103 val_103 12 +296 val_296 12 +113 val_113 12 +216 val_216 12 +367 val_367 12 +344 val_344 12 +167 val_167 12 +274 val_274 12 +219 val_219 12 +239 val_239 12 +485 val_485 12 +116 val_116 12 +223 val_223 12 +256 val_256 12 +263 val_263 12 +70 val_70 12 +487 val_487 12 +480 val_480 12 +401 val_401 12 +288 val_288 12 +191 val_191 12 +5 val_5 12 +244 val_244 12 +438 val_438 12 +128 val_128 12 +467 val_467 12 +432 val_432 12 +202 val_202 12 +316 val_316 12 +229 val_229 12 +469 val_469 12 +463 val_463 12 +280 val_280 12 +2 val_2 12 +35 val_35 12 +283 val_283 12 +331 val_331 12 +235 val_235 12 +80 val_80 12 +44 val_44 12 +193 val_193 12 +321 val_321 12 +335 val_335 12 +104 val_104 12 +466 val_466 12 +366 val_366 12 +175 val_175 12 +403 val_403 12 +483 val_483 12 +53 val_53 12 +105 val_105 12 +257 val_257 12 +406 val_406 12 +409 val_409 12 +190 val_190 12 +406 val_406 12 +401 val_401 12 +114 val_114 12 +258 val_258 12 +90 val_90 12 +203 val_203 12 +262 val_262 12 +348 val_348 12 +424 val_424 12 +12 val_12 12 +396 val_396 12 +201 val_201 12 +217 val_217 12 +164 val_164 12 +431 val_431 12 +454 val_454 12 +478 val_478 12 +298 val_298 12 +125 val_125 12 +431 val_431 12 +164 val_164 12 +424 val_424 12 +187 val_187 12 +382 val_382 12 +5 val_5 12 +70 val_70 12 +397 val_397 12 +480 val_480 12 +291 val_291 12 +24 val_24 12 +351 val_351 12 +255 val_255 12 +104 val_104 12 +70 val_70 12 +163 val_163 12 +438 val_438 12 +119 val_119 12 +414 val_414 12 +200 val_200 12 +491 val_491 12 +237 val_237 12 +439 val_439 12 +360 val_360 12 +248 val_248 12 +479 val_479 12 +305 val_305 12 +417 val_417 12 +199 val_199 12 +444 val_444 12 +120 val_120 12 +429 val_429 12 +169 val_169 12 +443 val_443 12 +323 val_323 12 +325 val_325 12 +277 val_277 12 +230 val_230 12 +478 val_478 12 +178 val_178 12 +468 val_468 12 +310 val_310 12 +317 val_317 12 +333 val_333 12 +493 val_493 12 +460 val_460 12 +207 val_207 12 +249 val_249 12 +265 val_265 12 +480 val_480 12 +83 val_83 12 +136 val_136 12 +353 val_353 12 +172 val_172 12 +214 val_214 12 +462 val_462 12 +233 val_233 12 +406 val_406 12 +133 val_133 12 +175 val_175 12 +189 val_189 12 +454 val_454 12 +375 val_375 12 +401 val_401 12 +421 val_421 12 +407 val_407 12 +384 val_384 12 +256 val_256 12 +26 val_26 12 +134 val_134 12 +67 val_67 12 +384 val_384 12 +379 val_379 12 +18 val_18 12 +462 val_462 12 +492 val_492 12 +100 val_100 12 +298 val_298 12 +9 val_9 12 +341 val_341 12 +498 val_498 12 +146 val_146 12 +458 val_458 12 +362 val_362 12 +186 val_186 12 +285 val_285 12 +348 val_348 12 +167 val_167 12 +18 val_18 12 +273 val_273 12 +183 val_183 12 +281 val_281 12 +344 val_344 12 +97 val_97 12 +469 val_469 12 +315 val_315 12 +84 val_84 12 +28 val_28 12 +37 val_37 12 +448 val_448 12 +152 val_152 12 +348 val_348 12 +307 val_307 12 +194 val_194 12 +414 val_414 12 +477 val_477 12 +222 val_222 12 +126 val_126 12 +90 val_90 12 +169 val_169 12 +403 val_403 12 +400 val_400 12 +200 val_200 12 +97 val_97 12 +PREHOOK: query: explain select * from masking_test_n0 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test where key > 0 +POSTHOOK: query: explain select * from masking_test_n0 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -88,15 +579,15 @@ STAGE PLANS: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (UDFToInteger(key) > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(key) (type: int), reverse(value) (type: string), '12' (type: string) + expressions: UDFToInteger(key) (type: int), value (type: string), '12' (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -109,56 +600,550 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test where key > 0 +PREHOOK: query: select * from masking_test_n0 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n0 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test where key > 0 +POSTHOOK: query: select * from masking_test_n0 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n0 POSTHOOK: Input: default@src #### A masked pattern was here #### -4 4_lav 12 -8 8_lav 12 -2 2_lav 12 -PREHOOK: query: drop view masking_test +238 val_238 12 +86 val_86 12 +311 val_311 12 +27 val_27 12 +165 val_165 12 +409 val_409 12 +255 val_255 12 +278 val_278 12 +98 val_98 12 +484 val_484 12 +265 val_265 12 +193 val_193 12 +401 val_401 12 +150 val_150 12 +273 val_273 12 +224 val_224 12 +369 val_369 12 +66 val_66 12 +128 val_128 12 +213 val_213 12 +146 val_146 12 +406 val_406 12 +429 val_429 12 +374 val_374 12 +152 val_152 12 +469 val_469 12 +145 val_145 12 +495 val_495 12 +37 val_37 12 +327 val_327 12 +281 val_281 12 +277 val_277 12 +209 val_209 12 +15 val_15 12 +82 val_82 12 +403 val_403 12 +166 val_166 12 +417 val_417 12 +430 val_430 12 +252 val_252 12 +292 val_292 12 +219 val_219 12 +287 val_287 12 +153 val_153 12 +193 val_193 12 +338 val_338 12 +446 val_446 12 +459 val_459 12 +394 val_394 12 +237 val_237 12 +482 val_482 12 +174 val_174 12 +413 val_413 12 +494 val_494 12 +207 val_207 12 +199 val_199 12 +466 val_466 12 +208 val_208 12 +174 val_174 12 +399 val_399 12 +396 val_396 12 +247 val_247 12 +417 val_417 12 +489 val_489 12 +162 val_162 12 +377 val_377 12 +397 val_397 12 +309 val_309 12 +365 val_365 12 +266 val_266 12 +439 val_439 12 +342 val_342 12 +367 val_367 12 +325 val_325 12 +167 val_167 12 +195 val_195 12 +475 val_475 12 +17 val_17 12 +113 val_113 12 +155 val_155 12 +203 val_203 12 +339 val_339 12 +455 val_455 12 +128 val_128 12 +311 val_311 12 +316 val_316 12 +57 val_57 12 +302 val_302 12 +205 val_205 12 +149 val_149 12 +438 val_438 12 +345 val_345 12 +129 val_129 12 +170 val_170 12 +20 val_20 12 +489 val_489 12 +157 val_157 12 +378 val_378 12 +221 val_221 12 +92 val_92 12 +111 val_111 12 +47 val_47 12 +72 val_72 12 +4 val_4 12 +280 val_280 12 +35 val_35 12 +427 val_427 12 +277 val_277 12 +208 val_208 12 +356 val_356 12 +399 val_399 12 +169 val_169 12 +382 val_382 12 +498 val_498 12 +125 val_125 12 +386 val_386 12 +437 val_437 12 +469 val_469 12 +192 val_192 12 +286 val_286 12 +187 val_187 12 +176 val_176 12 +54 val_54 12 +459 val_459 12 +51 val_51 12 +138 val_138 12 +103 val_103 12 +239 val_239 12 +213 val_213 12 +216 val_216 12 +430 val_430 12 +278 val_278 12 +176 val_176 12 +289 val_289 12 +221 val_221 12 +65 val_65 12 +318 val_318 12 +332 val_332 12 +311 val_311 12 +275 val_275 12 +137 val_137 12 +241 val_241 12 +83 val_83 12 +333 val_333 12 +180 val_180 12 +284 val_284 12 +12 val_12 12 +230 val_230 12 +181 val_181 12 +67 val_67 12 +260 val_260 12 +404 val_404 12 +384 val_384 12 +489 val_489 12 +353 val_353 12 +373 val_373 12 +272 val_272 12 +138 val_138 12 +217 val_217 12 +84 val_84 12 +348 val_348 12 +466 val_466 12 +58 val_58 12 +8 val_8 12 +411 val_411 12 +230 val_230 12 +208 val_208 12 +348 val_348 12 +24 val_24 12 +463 val_463 12 +431 val_431 12 +179 val_179 12 +172 val_172 12 +42 val_42 12 +129 val_129 12 +158 val_158 12 +119 val_119 12 +496 val_496 12 +322 val_322 12 +197 val_197 12 +468 val_468 12 +393 val_393 12 +454 val_454 12 +100 val_100 12 +298 val_298 12 +199 val_199 12 +191 val_191 12 +418 val_418 12 +96 val_96 12 +26 val_26 12 +165 val_165 12 +327 val_327 12 +230 val_230 12 +205 val_205 12 +120 val_120 12 +131 val_131 12 +51 val_51 12 +404 val_404 12 +43 val_43 12 +436 val_436 12 +156 val_156 12 +469 val_469 12 +468 val_468 12 +308 val_308 12 +95 val_95 12 +196 val_196 12 +288 val_288 12 +481 val_481 12 +457 val_457 12 +98 val_98 12 +282 val_282 12 +197 val_197 12 +187 val_187 12 +318 val_318 12 +318 val_318 12 +409 val_409 12 +470 val_470 12 +137 val_137 12 +369 val_369 12 +316 val_316 12 +169 val_169 12 +413 val_413 12 +85 val_85 12 +77 val_77 12 +490 val_490 12 +87 val_87 12 +364 val_364 12 +179 val_179 12 +118 val_118 12 +134 val_134 12 +395 val_395 12 +282 val_282 12 +138 val_138 12 +238 val_238 12 +419 val_419 12 +15 val_15 12 +118 val_118 12 +72 val_72 12 +90 val_90 12 +307 val_307 12 +19 val_19 12 +435 val_435 12 +10 val_10 12 +277 val_277 12 +273 val_273 12 +306 val_306 12 +224 val_224 12 +309 val_309 12 +389 val_389 12 +327 val_327 12 +242 val_242 12 +369 val_369 12 +392 val_392 12 +272 val_272 12 +331 val_331 12 +401 val_401 12 +242 val_242 12 +452 val_452 12 +177 val_177 12 +226 val_226 12 +5 val_5 12 +497 val_497 12 +402 val_402 12 +396 val_396 12 +317 val_317 12 +395 val_395 12 +58 val_58 12 +35 val_35 12 +336 val_336 12 +95 val_95 12 +11 val_11 12 +168 val_168 12 +34 val_34 12 +229 val_229 12 +233 val_233 12 +143 val_143 12 +472 val_472 12 +322 val_322 12 +498 val_498 12 +160 val_160 12 +195 val_195 12 +42 val_42 12 +321 val_321 12 +430 val_430 12 +119 val_119 12 +489 val_489 12 +458 val_458 12 +78 val_78 12 +76 val_76 12 +41 val_41 12 +223 val_223 12 +492 val_492 12 +149 val_149 12 +449 val_449 12 +218 val_218 12 +228 val_228 12 +138 val_138 12 +453 val_453 12 +30 val_30 12 +209 val_209 12 +64 val_64 12 +468 val_468 12 +76 val_76 12 +74 val_74 12 +342 val_342 12 +69 val_69 12 +230 val_230 12 +33 val_33 12 +368 val_368 12 +103 val_103 12 +296 val_296 12 +113 val_113 12 +216 val_216 12 +367 val_367 12 +344 val_344 12 +167 val_167 12 +274 val_274 12 +219 val_219 12 +239 val_239 12 +485 val_485 12 +116 val_116 12 +223 val_223 12 +256 val_256 12 +263 val_263 12 +70 val_70 12 +487 val_487 12 +480 val_480 12 +401 val_401 12 +288 val_288 12 +191 val_191 12 +5 val_5 12 +244 val_244 12 +438 val_438 12 +128 val_128 12 +467 val_467 12 +432 val_432 12 +202 val_202 12 +316 val_316 12 +229 val_229 12 +469 val_469 12 +463 val_463 12 +280 val_280 12 +2 val_2 12 +35 val_35 12 +283 val_283 12 +331 val_331 12 +235 val_235 12 +80 val_80 12 +44 val_44 12 +193 val_193 12 +321 val_321 12 +335 val_335 12 +104 val_104 12 +466 val_466 12 +366 val_366 12 +175 val_175 12 +403 val_403 12 +483 val_483 12 +53 val_53 12 +105 val_105 12 +257 val_257 12 +406 val_406 12 +409 val_409 12 +190 val_190 12 +406 val_406 12 +401 val_401 12 +114 val_114 12 +258 val_258 12 +90 val_90 12 +203 val_203 12 +262 val_262 12 +348 val_348 12 +424 val_424 12 +12 val_12 12 +396 val_396 12 +201 val_201 12 +217 val_217 12 +164 val_164 12 +431 val_431 12 +454 val_454 12 +478 val_478 12 +298 val_298 12 +125 val_125 12 +431 val_431 12 +164 val_164 12 +424 val_424 12 +187 val_187 12 +382 val_382 12 +5 val_5 12 +70 val_70 12 +397 val_397 12 +480 val_480 12 +291 val_291 12 +24 val_24 12 +351 val_351 12 +255 val_255 12 +104 val_104 12 +70 val_70 12 +163 val_163 12 +438 val_438 12 +119 val_119 12 +414 val_414 12 +200 val_200 12 +491 val_491 12 +237 val_237 12 +439 val_439 12 +360 val_360 12 +248 val_248 12 +479 val_479 12 +305 val_305 12 +417 val_417 12 +199 val_199 12 +444 val_444 12 +120 val_120 12 +429 val_429 12 +169 val_169 12 +443 val_443 12 +323 val_323 12 +325 val_325 12 +277 val_277 12 +230 val_230 12 +478 val_478 12 +178 val_178 12 +468 val_468 12 +310 val_310 12 +317 val_317 12 +333 val_333 12 +493 val_493 12 +460 val_460 12 +207 val_207 12 +249 val_249 12 +265 val_265 12 +480 val_480 12 +83 val_83 12 +136 val_136 12 +353 val_353 12 +172 val_172 12 +214 val_214 12 +462 val_462 12 +233 val_233 12 +406 val_406 12 +133 val_133 12 +175 val_175 12 +189 val_189 12 +454 val_454 12 +375 val_375 12 +401 val_401 12 +421 val_421 12 +407 val_407 12 +384 val_384 12 +256 val_256 12 +26 val_26 12 +134 val_134 12 +67 val_67 12 +384 val_384 12 +379 val_379 12 +18 val_18 12 +462 val_462 12 +492 val_492 12 +100 val_100 12 +298 val_298 12 +9 val_9 12 +341 val_341 12 +498 val_498 12 +146 val_146 12 +458 val_458 12 +362 val_362 12 +186 val_186 12 +285 val_285 12 +348 val_348 12 +167 val_167 12 +18 val_18 12 +273 val_273 12 +183 val_183 12 +281 val_281 12 +344 val_344 12 +97 val_97 12 +469 val_469 12 +315 val_315 12 +84 val_84 12 +28 val_28 12 +37 val_37 12 +448 val_448 12 +152 val_152 12 +348 val_348 12 +307 val_307 12 +194 val_194 12 +414 val_414 12 +477 val_477 12 +222 val_222 12 +126 val_126 12 +90 val_90 12 +169 val_169 12 +403 val_403 12 +400 val_400 12 +200 val_200 12 +97 val_97 12 +PREHOOK: query: drop view masking_test_n0 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@masking_test -PREHOOK: Output: default@masking_test -POSTHOOK: query: drop view masking_test +PREHOOK: Input: default@masking_test_n0 +PREHOOK: Output: default@masking_test_n0 +POSTHOOK: query: drop view masking_test_n0 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@masking_test -POSTHOOK: Output: default@masking_test -PREHOOK: query: create view masking_test as select cast(key as int) as key, '12', +POSTHOOK: Input: default@masking_test_n0 +POSTHOOK: Output: default@masking_test_n0 +PREHOOK: query: create view masking_test_n0 as select cast(key as int) as key, '12', '12', '12', '12', '12', '12', '12', '12', '12', '12', '12' from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create view masking_test as select cast(key as int) as key, '12', +PREHOOK: Output: default@masking_test_n0 +POSTHOOK: query: create view masking_test_n0 as select cast(key as int) as key, '12', '12', '12', '12', '12', '12', '12', '12', '12', '12', '12' from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test._c1 SIMPLE [] -POSTHOOK: Lineage: masking_test._c10 SIMPLE [] -POSTHOOK: Lineage: masking_test._c11 SIMPLE [] -POSTHOOK: Lineage: masking_test._c2 SIMPLE [] -POSTHOOK: Lineage: masking_test._c3 SIMPLE [] -POSTHOOK: Lineage: masking_test._c4 SIMPLE [] -POSTHOOK: Lineage: masking_test._c5 SIMPLE [] -POSTHOOK: Lineage: masking_test._c6 SIMPLE [] -POSTHOOK: Lineage: masking_test._c7 SIMPLE [] -POSTHOOK: Lineage: masking_test._c8 SIMPLE [] -POSTHOOK: Lineage: masking_test._c9 SIMPLE [] -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: explain select * from masking_test +POSTHOOK: Output: default@masking_test_n0 +POSTHOOK: Lineage: masking_test_n0._c1 SIMPLE [] +POSTHOOK: Lineage: masking_test_n0._c10 SIMPLE [] +POSTHOOK: Lineage: masking_test_n0._c11 SIMPLE [] +POSTHOOK: Lineage: masking_test_n0._c2 SIMPLE [] +POSTHOOK: Lineage: masking_test_n0._c3 SIMPLE [] +POSTHOOK: Lineage: masking_test_n0._c4 SIMPLE [] +POSTHOOK: Lineage: masking_test_n0._c5 SIMPLE [] +POSTHOOK: Lineage: masking_test_n0._c6 SIMPLE [] +POSTHOOK: Lineage: masking_test_n0._c7 SIMPLE [] +POSTHOOK: Lineage: masking_test_n0._c8 SIMPLE [] +POSTHOOK: Lineage: masking_test_n0._c9 SIMPLE [] +POSTHOOK: Lineage: masking_test_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: explain select * from masking_test_n0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test +POSTHOOK: query: explain select * from masking_test_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -173,20 +1158,17 @@ STAGE PLANS: properties: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(key) (type: int), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Select Operator + expressions: UDFToInteger(key) (type: int), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Execution mode: vectorized Stage: Stage-0 @@ -195,25 +1177,519 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test +PREHOOK: query: select * from masking_test_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n0 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test +POSTHOOK: query: select * from masking_test_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n0 POSTHOOK: Input: default@src #### A masked pattern was here #### +238 12 12 12 12 12 12 12 12 12 12 12 +86 12 12 12 12 12 12 12 12 12 12 12 +311 12 12 12 12 12 12 12 12 12 12 12 +27 12 12 12 12 12 12 12 12 12 12 12 +165 12 12 12 12 12 12 12 12 12 12 12 +409 12 12 12 12 12 12 12 12 12 12 12 +255 12 12 12 12 12 12 12 12 12 12 12 +278 12 12 12 12 12 12 12 12 12 12 12 +98 12 12 12 12 12 12 12 12 12 12 12 +484 12 12 12 12 12 12 12 12 12 12 12 +265 12 12 12 12 12 12 12 12 12 12 12 +193 12 12 12 12 12 12 12 12 12 12 12 +401 12 12 12 12 12 12 12 12 12 12 12 +150 12 12 12 12 12 12 12 12 12 12 12 +273 12 12 12 12 12 12 12 12 12 12 12 +224 12 12 12 12 12 12 12 12 12 12 12 +369 12 12 12 12 12 12 12 12 12 12 12 +66 12 12 12 12 12 12 12 12 12 12 12 +128 12 12 12 12 12 12 12 12 12 12 12 +213 12 12 12 12 12 12 12 12 12 12 12 +146 12 12 12 12 12 12 12 12 12 12 12 +406 12 12 12 12 12 12 12 12 12 12 12 +429 12 12 12 12 12 12 12 12 12 12 12 +374 12 12 12 12 12 12 12 12 12 12 12 +152 12 12 12 12 12 12 12 12 12 12 12 +469 12 12 12 12 12 12 12 12 12 12 12 +145 12 12 12 12 12 12 12 12 12 12 12 +495 12 12 12 12 12 12 12 12 12 12 12 +37 12 12 12 12 12 12 12 12 12 12 12 +327 12 12 12 12 12 12 12 12 12 12 12 +281 12 12 12 12 12 12 12 12 12 12 12 +277 12 12 12 12 12 12 12 12 12 12 12 +209 12 12 12 12 12 12 12 12 12 12 12 +15 12 12 12 12 12 12 12 12 12 12 12 +82 12 12 12 12 12 12 12 12 12 12 12 +403 12 12 12 12 12 12 12 12 12 12 12 +166 12 12 12 12 12 12 12 12 12 12 12 +417 12 12 12 12 12 12 12 12 12 12 12 +430 12 12 12 12 12 12 12 12 12 12 12 +252 12 12 12 12 12 12 12 12 12 12 12 +292 12 12 12 12 12 12 12 12 12 12 12 +219 12 12 12 12 12 12 12 12 12 12 12 +287 12 12 12 12 12 12 12 12 12 12 12 +153 12 12 12 12 12 12 12 12 12 12 12 +193 12 12 12 12 12 12 12 12 12 12 12 +338 12 12 12 12 12 12 12 12 12 12 12 +446 12 12 12 12 12 12 12 12 12 12 12 +459 12 12 12 12 12 12 12 12 12 12 12 +394 12 12 12 12 12 12 12 12 12 12 12 +237 12 12 12 12 12 12 12 12 12 12 12 +482 12 12 12 12 12 12 12 12 12 12 12 +174 12 12 12 12 12 12 12 12 12 12 12 +413 12 12 12 12 12 12 12 12 12 12 12 +494 12 12 12 12 12 12 12 12 12 12 12 +207 12 12 12 12 12 12 12 12 12 12 12 +199 12 12 12 12 12 12 12 12 12 12 12 +466 12 12 12 12 12 12 12 12 12 12 12 +208 12 12 12 12 12 12 12 12 12 12 12 +174 12 12 12 12 12 12 12 12 12 12 12 +399 12 12 12 12 12 12 12 12 12 12 12 +396 12 12 12 12 12 12 12 12 12 12 12 +247 12 12 12 12 12 12 12 12 12 12 12 +417 12 12 12 12 12 12 12 12 12 12 12 +489 12 12 12 12 12 12 12 12 12 12 12 +162 12 12 12 12 12 12 12 12 12 12 12 +377 12 12 12 12 12 12 12 12 12 12 12 +397 12 12 12 12 12 12 12 12 12 12 12 +309 12 12 12 12 12 12 12 12 12 12 12 +365 12 12 12 12 12 12 12 12 12 12 12 +266 12 12 12 12 12 12 12 12 12 12 12 +439 12 12 12 12 12 12 12 12 12 12 12 +342 12 12 12 12 12 12 12 12 12 12 12 +367 12 12 12 12 12 12 12 12 12 12 12 +325 12 12 12 12 12 12 12 12 12 12 12 +167 12 12 12 12 12 12 12 12 12 12 12 +195 12 12 12 12 12 12 12 12 12 12 12 +475 12 12 12 12 12 12 12 12 12 12 12 +17 12 12 12 12 12 12 12 12 12 12 12 +113 12 12 12 12 12 12 12 12 12 12 12 +155 12 12 12 12 12 12 12 12 12 12 12 +203 12 12 12 12 12 12 12 12 12 12 12 +339 12 12 12 12 12 12 12 12 12 12 12 0 12 12 12 12 12 12 12 12 12 12 12 +455 12 12 12 12 12 12 12 12 12 12 12 +128 12 12 12 12 12 12 12 12 12 12 12 +311 12 12 12 12 12 12 12 12 12 12 12 +316 12 12 12 12 12 12 12 12 12 12 12 +57 12 12 12 12 12 12 12 12 12 12 12 +302 12 12 12 12 12 12 12 12 12 12 12 +205 12 12 12 12 12 12 12 12 12 12 12 +149 12 12 12 12 12 12 12 12 12 12 12 +438 12 12 12 12 12 12 12 12 12 12 12 +345 12 12 12 12 12 12 12 12 12 12 12 +129 12 12 12 12 12 12 12 12 12 12 12 +170 12 12 12 12 12 12 12 12 12 12 12 +20 12 12 12 12 12 12 12 12 12 12 12 +489 12 12 12 12 12 12 12 12 12 12 12 +157 12 12 12 12 12 12 12 12 12 12 12 +378 12 12 12 12 12 12 12 12 12 12 12 +221 12 12 12 12 12 12 12 12 12 12 12 +92 12 12 12 12 12 12 12 12 12 12 12 +111 12 12 12 12 12 12 12 12 12 12 12 +47 12 12 12 12 12 12 12 12 12 12 12 +72 12 12 12 12 12 12 12 12 12 12 12 4 12 12 12 12 12 12 12 12 12 12 12 +280 12 12 12 12 12 12 12 12 12 12 12 +35 12 12 12 12 12 12 12 12 12 12 12 +427 12 12 12 12 12 12 12 12 12 12 12 +277 12 12 12 12 12 12 12 12 12 12 12 +208 12 12 12 12 12 12 12 12 12 12 12 +356 12 12 12 12 12 12 12 12 12 12 12 +399 12 12 12 12 12 12 12 12 12 12 12 +169 12 12 12 12 12 12 12 12 12 12 12 +382 12 12 12 12 12 12 12 12 12 12 12 +498 12 12 12 12 12 12 12 12 12 12 12 +125 12 12 12 12 12 12 12 12 12 12 12 +386 12 12 12 12 12 12 12 12 12 12 12 +437 12 12 12 12 12 12 12 12 12 12 12 +469 12 12 12 12 12 12 12 12 12 12 12 +192 12 12 12 12 12 12 12 12 12 12 12 +286 12 12 12 12 12 12 12 12 12 12 12 +187 12 12 12 12 12 12 12 12 12 12 12 +176 12 12 12 12 12 12 12 12 12 12 12 +54 12 12 12 12 12 12 12 12 12 12 12 +459 12 12 12 12 12 12 12 12 12 12 12 +51 12 12 12 12 12 12 12 12 12 12 12 +138 12 12 12 12 12 12 12 12 12 12 12 +103 12 12 12 12 12 12 12 12 12 12 12 +239 12 12 12 12 12 12 12 12 12 12 12 +213 12 12 12 12 12 12 12 12 12 12 12 +216 12 12 12 12 12 12 12 12 12 12 12 +430 12 12 12 12 12 12 12 12 12 12 12 +278 12 12 12 12 12 12 12 12 12 12 12 +176 12 12 12 12 12 12 12 12 12 12 12 +289 12 12 12 12 12 12 12 12 12 12 12 +221 12 12 12 12 12 12 12 12 12 12 12 +65 12 12 12 12 12 12 12 12 12 12 12 +318 12 12 12 12 12 12 12 12 12 12 12 +332 12 12 12 12 12 12 12 12 12 12 12 +311 12 12 12 12 12 12 12 12 12 12 12 +275 12 12 12 12 12 12 12 12 12 12 12 +137 12 12 12 12 12 12 12 12 12 12 12 +241 12 12 12 12 12 12 12 12 12 12 12 +83 12 12 12 12 12 12 12 12 12 12 12 +333 12 12 12 12 12 12 12 12 12 12 12 +180 12 12 12 12 12 12 12 12 12 12 12 +284 12 12 12 12 12 12 12 12 12 12 12 +12 12 12 12 12 12 12 12 12 12 12 12 +230 12 12 12 12 12 12 12 12 12 12 12 +181 12 12 12 12 12 12 12 12 12 12 12 +67 12 12 12 12 12 12 12 12 12 12 12 +260 12 12 12 12 12 12 12 12 12 12 12 +404 12 12 12 12 12 12 12 12 12 12 12 +384 12 12 12 12 12 12 12 12 12 12 12 +489 12 12 12 12 12 12 12 12 12 12 12 +353 12 12 12 12 12 12 12 12 12 12 12 +373 12 12 12 12 12 12 12 12 12 12 12 +272 12 12 12 12 12 12 12 12 12 12 12 +138 12 12 12 12 12 12 12 12 12 12 12 +217 12 12 12 12 12 12 12 12 12 12 12 +84 12 12 12 12 12 12 12 12 12 12 12 +348 12 12 12 12 12 12 12 12 12 12 12 +466 12 12 12 12 12 12 12 12 12 12 12 +58 12 12 12 12 12 12 12 12 12 12 12 8 12 12 12 12 12 12 12 12 12 12 12 +411 12 12 12 12 12 12 12 12 12 12 12 +230 12 12 12 12 12 12 12 12 12 12 12 +208 12 12 12 12 12 12 12 12 12 12 12 +348 12 12 12 12 12 12 12 12 12 12 12 +24 12 12 12 12 12 12 12 12 12 12 12 +463 12 12 12 12 12 12 12 12 12 12 12 +431 12 12 12 12 12 12 12 12 12 12 12 +179 12 12 12 12 12 12 12 12 12 12 12 +172 12 12 12 12 12 12 12 12 12 12 12 +42 12 12 12 12 12 12 12 12 12 12 12 +129 12 12 12 12 12 12 12 12 12 12 12 +158 12 12 12 12 12 12 12 12 12 12 12 +119 12 12 12 12 12 12 12 12 12 12 12 +496 12 12 12 12 12 12 12 12 12 12 12 0 12 12 12 12 12 12 12 12 12 12 12 +322 12 12 12 12 12 12 12 12 12 12 12 +197 12 12 12 12 12 12 12 12 12 12 12 +468 12 12 12 12 12 12 12 12 12 12 12 +393 12 12 12 12 12 12 12 12 12 12 12 +454 12 12 12 12 12 12 12 12 12 12 12 +100 12 12 12 12 12 12 12 12 12 12 12 +298 12 12 12 12 12 12 12 12 12 12 12 +199 12 12 12 12 12 12 12 12 12 12 12 +191 12 12 12 12 12 12 12 12 12 12 12 +418 12 12 12 12 12 12 12 12 12 12 12 +96 12 12 12 12 12 12 12 12 12 12 12 +26 12 12 12 12 12 12 12 12 12 12 12 +165 12 12 12 12 12 12 12 12 12 12 12 +327 12 12 12 12 12 12 12 12 12 12 12 +230 12 12 12 12 12 12 12 12 12 12 12 +205 12 12 12 12 12 12 12 12 12 12 12 +120 12 12 12 12 12 12 12 12 12 12 12 +131 12 12 12 12 12 12 12 12 12 12 12 +51 12 12 12 12 12 12 12 12 12 12 12 +404 12 12 12 12 12 12 12 12 12 12 12 +43 12 12 12 12 12 12 12 12 12 12 12 +436 12 12 12 12 12 12 12 12 12 12 12 +156 12 12 12 12 12 12 12 12 12 12 12 +469 12 12 12 12 12 12 12 12 12 12 12 +468 12 12 12 12 12 12 12 12 12 12 12 +308 12 12 12 12 12 12 12 12 12 12 12 +95 12 12 12 12 12 12 12 12 12 12 12 +196 12 12 12 12 12 12 12 12 12 12 12 +288 12 12 12 12 12 12 12 12 12 12 12 +481 12 12 12 12 12 12 12 12 12 12 12 +457 12 12 12 12 12 12 12 12 12 12 12 +98 12 12 12 12 12 12 12 12 12 12 12 +282 12 12 12 12 12 12 12 12 12 12 12 +197 12 12 12 12 12 12 12 12 12 12 12 +187 12 12 12 12 12 12 12 12 12 12 12 +318 12 12 12 12 12 12 12 12 12 12 12 +318 12 12 12 12 12 12 12 12 12 12 12 +409 12 12 12 12 12 12 12 12 12 12 12 +470 12 12 12 12 12 12 12 12 12 12 12 +137 12 12 12 12 12 12 12 12 12 12 12 +369 12 12 12 12 12 12 12 12 12 12 12 +316 12 12 12 12 12 12 12 12 12 12 12 +169 12 12 12 12 12 12 12 12 12 12 12 +413 12 12 12 12 12 12 12 12 12 12 12 +85 12 12 12 12 12 12 12 12 12 12 12 +77 12 12 12 12 12 12 12 12 12 12 12 0 12 12 12 12 12 12 12 12 12 12 12 +490 12 12 12 12 12 12 12 12 12 12 12 +87 12 12 12 12 12 12 12 12 12 12 12 +364 12 12 12 12 12 12 12 12 12 12 12 +179 12 12 12 12 12 12 12 12 12 12 12 +118 12 12 12 12 12 12 12 12 12 12 12 +134 12 12 12 12 12 12 12 12 12 12 12 +395 12 12 12 12 12 12 12 12 12 12 12 +282 12 12 12 12 12 12 12 12 12 12 12 +138 12 12 12 12 12 12 12 12 12 12 12 +238 12 12 12 12 12 12 12 12 12 12 12 +419 12 12 12 12 12 12 12 12 12 12 12 +15 12 12 12 12 12 12 12 12 12 12 12 +118 12 12 12 12 12 12 12 12 12 12 12 +72 12 12 12 12 12 12 12 12 12 12 12 +90 12 12 12 12 12 12 12 12 12 12 12 +307 12 12 12 12 12 12 12 12 12 12 12 +19 12 12 12 12 12 12 12 12 12 12 12 +435 12 12 12 12 12 12 12 12 12 12 12 +10 12 12 12 12 12 12 12 12 12 12 12 +277 12 12 12 12 12 12 12 12 12 12 12 +273 12 12 12 12 12 12 12 12 12 12 12 +306 12 12 12 12 12 12 12 12 12 12 12 +224 12 12 12 12 12 12 12 12 12 12 12 +309 12 12 12 12 12 12 12 12 12 12 12 +389 12 12 12 12 12 12 12 12 12 12 12 +327 12 12 12 12 12 12 12 12 12 12 12 +242 12 12 12 12 12 12 12 12 12 12 12 +369 12 12 12 12 12 12 12 12 12 12 12 +392 12 12 12 12 12 12 12 12 12 12 12 +272 12 12 12 12 12 12 12 12 12 12 12 +331 12 12 12 12 12 12 12 12 12 12 12 +401 12 12 12 12 12 12 12 12 12 12 12 +242 12 12 12 12 12 12 12 12 12 12 12 +452 12 12 12 12 12 12 12 12 12 12 12 +177 12 12 12 12 12 12 12 12 12 12 12 +226 12 12 12 12 12 12 12 12 12 12 12 +5 12 12 12 12 12 12 12 12 12 12 12 +497 12 12 12 12 12 12 12 12 12 12 12 +402 12 12 12 12 12 12 12 12 12 12 12 +396 12 12 12 12 12 12 12 12 12 12 12 +317 12 12 12 12 12 12 12 12 12 12 12 +395 12 12 12 12 12 12 12 12 12 12 12 +58 12 12 12 12 12 12 12 12 12 12 12 +35 12 12 12 12 12 12 12 12 12 12 12 +336 12 12 12 12 12 12 12 12 12 12 12 +95 12 12 12 12 12 12 12 12 12 12 12 +11 12 12 12 12 12 12 12 12 12 12 12 +168 12 12 12 12 12 12 12 12 12 12 12 +34 12 12 12 12 12 12 12 12 12 12 12 +229 12 12 12 12 12 12 12 12 12 12 12 +233 12 12 12 12 12 12 12 12 12 12 12 +143 12 12 12 12 12 12 12 12 12 12 12 +472 12 12 12 12 12 12 12 12 12 12 12 +322 12 12 12 12 12 12 12 12 12 12 12 +498 12 12 12 12 12 12 12 12 12 12 12 +160 12 12 12 12 12 12 12 12 12 12 12 +195 12 12 12 12 12 12 12 12 12 12 12 +42 12 12 12 12 12 12 12 12 12 12 12 +321 12 12 12 12 12 12 12 12 12 12 12 +430 12 12 12 12 12 12 12 12 12 12 12 +119 12 12 12 12 12 12 12 12 12 12 12 +489 12 12 12 12 12 12 12 12 12 12 12 +458 12 12 12 12 12 12 12 12 12 12 12 +78 12 12 12 12 12 12 12 12 12 12 12 +76 12 12 12 12 12 12 12 12 12 12 12 +41 12 12 12 12 12 12 12 12 12 12 12 +223 12 12 12 12 12 12 12 12 12 12 12 +492 12 12 12 12 12 12 12 12 12 12 12 +149 12 12 12 12 12 12 12 12 12 12 12 +449 12 12 12 12 12 12 12 12 12 12 12 +218 12 12 12 12 12 12 12 12 12 12 12 +228 12 12 12 12 12 12 12 12 12 12 12 +138 12 12 12 12 12 12 12 12 12 12 12 +453 12 12 12 12 12 12 12 12 12 12 12 +30 12 12 12 12 12 12 12 12 12 12 12 +209 12 12 12 12 12 12 12 12 12 12 12 +64 12 12 12 12 12 12 12 12 12 12 12 +468 12 12 12 12 12 12 12 12 12 12 12 +76 12 12 12 12 12 12 12 12 12 12 12 +74 12 12 12 12 12 12 12 12 12 12 12 +342 12 12 12 12 12 12 12 12 12 12 12 +69 12 12 12 12 12 12 12 12 12 12 12 +230 12 12 12 12 12 12 12 12 12 12 12 +33 12 12 12 12 12 12 12 12 12 12 12 +368 12 12 12 12 12 12 12 12 12 12 12 +103 12 12 12 12 12 12 12 12 12 12 12 +296 12 12 12 12 12 12 12 12 12 12 12 +113 12 12 12 12 12 12 12 12 12 12 12 +216 12 12 12 12 12 12 12 12 12 12 12 +367 12 12 12 12 12 12 12 12 12 12 12 +344 12 12 12 12 12 12 12 12 12 12 12 +167 12 12 12 12 12 12 12 12 12 12 12 +274 12 12 12 12 12 12 12 12 12 12 12 +219 12 12 12 12 12 12 12 12 12 12 12 +239 12 12 12 12 12 12 12 12 12 12 12 +485 12 12 12 12 12 12 12 12 12 12 12 +116 12 12 12 12 12 12 12 12 12 12 12 +223 12 12 12 12 12 12 12 12 12 12 12 +256 12 12 12 12 12 12 12 12 12 12 12 +263 12 12 12 12 12 12 12 12 12 12 12 +70 12 12 12 12 12 12 12 12 12 12 12 +487 12 12 12 12 12 12 12 12 12 12 12 +480 12 12 12 12 12 12 12 12 12 12 12 +401 12 12 12 12 12 12 12 12 12 12 12 +288 12 12 12 12 12 12 12 12 12 12 12 +191 12 12 12 12 12 12 12 12 12 12 12 +5 12 12 12 12 12 12 12 12 12 12 12 +244 12 12 12 12 12 12 12 12 12 12 12 +438 12 12 12 12 12 12 12 12 12 12 12 +128 12 12 12 12 12 12 12 12 12 12 12 +467 12 12 12 12 12 12 12 12 12 12 12 +432 12 12 12 12 12 12 12 12 12 12 12 +202 12 12 12 12 12 12 12 12 12 12 12 +316 12 12 12 12 12 12 12 12 12 12 12 +229 12 12 12 12 12 12 12 12 12 12 12 +469 12 12 12 12 12 12 12 12 12 12 12 +463 12 12 12 12 12 12 12 12 12 12 12 +280 12 12 12 12 12 12 12 12 12 12 12 2 12 12 12 12 12 12 12 12 12 12 12 -PREHOOK: query: explain select * from masking_test where key > 0 +35 12 12 12 12 12 12 12 12 12 12 12 +283 12 12 12 12 12 12 12 12 12 12 12 +331 12 12 12 12 12 12 12 12 12 12 12 +235 12 12 12 12 12 12 12 12 12 12 12 +80 12 12 12 12 12 12 12 12 12 12 12 +44 12 12 12 12 12 12 12 12 12 12 12 +193 12 12 12 12 12 12 12 12 12 12 12 +321 12 12 12 12 12 12 12 12 12 12 12 +335 12 12 12 12 12 12 12 12 12 12 12 +104 12 12 12 12 12 12 12 12 12 12 12 +466 12 12 12 12 12 12 12 12 12 12 12 +366 12 12 12 12 12 12 12 12 12 12 12 +175 12 12 12 12 12 12 12 12 12 12 12 +403 12 12 12 12 12 12 12 12 12 12 12 +483 12 12 12 12 12 12 12 12 12 12 12 +53 12 12 12 12 12 12 12 12 12 12 12 +105 12 12 12 12 12 12 12 12 12 12 12 +257 12 12 12 12 12 12 12 12 12 12 12 +406 12 12 12 12 12 12 12 12 12 12 12 +409 12 12 12 12 12 12 12 12 12 12 12 +190 12 12 12 12 12 12 12 12 12 12 12 +406 12 12 12 12 12 12 12 12 12 12 12 +401 12 12 12 12 12 12 12 12 12 12 12 +114 12 12 12 12 12 12 12 12 12 12 12 +258 12 12 12 12 12 12 12 12 12 12 12 +90 12 12 12 12 12 12 12 12 12 12 12 +203 12 12 12 12 12 12 12 12 12 12 12 +262 12 12 12 12 12 12 12 12 12 12 12 +348 12 12 12 12 12 12 12 12 12 12 12 +424 12 12 12 12 12 12 12 12 12 12 12 +12 12 12 12 12 12 12 12 12 12 12 12 +396 12 12 12 12 12 12 12 12 12 12 12 +201 12 12 12 12 12 12 12 12 12 12 12 +217 12 12 12 12 12 12 12 12 12 12 12 +164 12 12 12 12 12 12 12 12 12 12 12 +431 12 12 12 12 12 12 12 12 12 12 12 +454 12 12 12 12 12 12 12 12 12 12 12 +478 12 12 12 12 12 12 12 12 12 12 12 +298 12 12 12 12 12 12 12 12 12 12 12 +125 12 12 12 12 12 12 12 12 12 12 12 +431 12 12 12 12 12 12 12 12 12 12 12 +164 12 12 12 12 12 12 12 12 12 12 12 +424 12 12 12 12 12 12 12 12 12 12 12 +187 12 12 12 12 12 12 12 12 12 12 12 +382 12 12 12 12 12 12 12 12 12 12 12 +5 12 12 12 12 12 12 12 12 12 12 12 +70 12 12 12 12 12 12 12 12 12 12 12 +397 12 12 12 12 12 12 12 12 12 12 12 +480 12 12 12 12 12 12 12 12 12 12 12 +291 12 12 12 12 12 12 12 12 12 12 12 +24 12 12 12 12 12 12 12 12 12 12 12 +351 12 12 12 12 12 12 12 12 12 12 12 +255 12 12 12 12 12 12 12 12 12 12 12 +104 12 12 12 12 12 12 12 12 12 12 12 +70 12 12 12 12 12 12 12 12 12 12 12 +163 12 12 12 12 12 12 12 12 12 12 12 +438 12 12 12 12 12 12 12 12 12 12 12 +119 12 12 12 12 12 12 12 12 12 12 12 +414 12 12 12 12 12 12 12 12 12 12 12 +200 12 12 12 12 12 12 12 12 12 12 12 +491 12 12 12 12 12 12 12 12 12 12 12 +237 12 12 12 12 12 12 12 12 12 12 12 +439 12 12 12 12 12 12 12 12 12 12 12 +360 12 12 12 12 12 12 12 12 12 12 12 +248 12 12 12 12 12 12 12 12 12 12 12 +479 12 12 12 12 12 12 12 12 12 12 12 +305 12 12 12 12 12 12 12 12 12 12 12 +417 12 12 12 12 12 12 12 12 12 12 12 +199 12 12 12 12 12 12 12 12 12 12 12 +444 12 12 12 12 12 12 12 12 12 12 12 +120 12 12 12 12 12 12 12 12 12 12 12 +429 12 12 12 12 12 12 12 12 12 12 12 +169 12 12 12 12 12 12 12 12 12 12 12 +443 12 12 12 12 12 12 12 12 12 12 12 +323 12 12 12 12 12 12 12 12 12 12 12 +325 12 12 12 12 12 12 12 12 12 12 12 +277 12 12 12 12 12 12 12 12 12 12 12 +230 12 12 12 12 12 12 12 12 12 12 12 +478 12 12 12 12 12 12 12 12 12 12 12 +178 12 12 12 12 12 12 12 12 12 12 12 +468 12 12 12 12 12 12 12 12 12 12 12 +310 12 12 12 12 12 12 12 12 12 12 12 +317 12 12 12 12 12 12 12 12 12 12 12 +333 12 12 12 12 12 12 12 12 12 12 12 +493 12 12 12 12 12 12 12 12 12 12 12 +460 12 12 12 12 12 12 12 12 12 12 12 +207 12 12 12 12 12 12 12 12 12 12 12 +249 12 12 12 12 12 12 12 12 12 12 12 +265 12 12 12 12 12 12 12 12 12 12 12 +480 12 12 12 12 12 12 12 12 12 12 12 +83 12 12 12 12 12 12 12 12 12 12 12 +136 12 12 12 12 12 12 12 12 12 12 12 +353 12 12 12 12 12 12 12 12 12 12 12 +172 12 12 12 12 12 12 12 12 12 12 12 +214 12 12 12 12 12 12 12 12 12 12 12 +462 12 12 12 12 12 12 12 12 12 12 12 +233 12 12 12 12 12 12 12 12 12 12 12 +406 12 12 12 12 12 12 12 12 12 12 12 +133 12 12 12 12 12 12 12 12 12 12 12 +175 12 12 12 12 12 12 12 12 12 12 12 +189 12 12 12 12 12 12 12 12 12 12 12 +454 12 12 12 12 12 12 12 12 12 12 12 +375 12 12 12 12 12 12 12 12 12 12 12 +401 12 12 12 12 12 12 12 12 12 12 12 +421 12 12 12 12 12 12 12 12 12 12 12 +407 12 12 12 12 12 12 12 12 12 12 12 +384 12 12 12 12 12 12 12 12 12 12 12 +256 12 12 12 12 12 12 12 12 12 12 12 +26 12 12 12 12 12 12 12 12 12 12 12 +134 12 12 12 12 12 12 12 12 12 12 12 +67 12 12 12 12 12 12 12 12 12 12 12 +384 12 12 12 12 12 12 12 12 12 12 12 +379 12 12 12 12 12 12 12 12 12 12 12 +18 12 12 12 12 12 12 12 12 12 12 12 +462 12 12 12 12 12 12 12 12 12 12 12 +492 12 12 12 12 12 12 12 12 12 12 12 +100 12 12 12 12 12 12 12 12 12 12 12 +298 12 12 12 12 12 12 12 12 12 12 12 +9 12 12 12 12 12 12 12 12 12 12 12 +341 12 12 12 12 12 12 12 12 12 12 12 +498 12 12 12 12 12 12 12 12 12 12 12 +146 12 12 12 12 12 12 12 12 12 12 12 +458 12 12 12 12 12 12 12 12 12 12 12 +362 12 12 12 12 12 12 12 12 12 12 12 +186 12 12 12 12 12 12 12 12 12 12 12 +285 12 12 12 12 12 12 12 12 12 12 12 +348 12 12 12 12 12 12 12 12 12 12 12 +167 12 12 12 12 12 12 12 12 12 12 12 +18 12 12 12 12 12 12 12 12 12 12 12 +273 12 12 12 12 12 12 12 12 12 12 12 +183 12 12 12 12 12 12 12 12 12 12 12 +281 12 12 12 12 12 12 12 12 12 12 12 +344 12 12 12 12 12 12 12 12 12 12 12 +97 12 12 12 12 12 12 12 12 12 12 12 +469 12 12 12 12 12 12 12 12 12 12 12 +315 12 12 12 12 12 12 12 12 12 12 12 +84 12 12 12 12 12 12 12 12 12 12 12 +28 12 12 12 12 12 12 12 12 12 12 12 +37 12 12 12 12 12 12 12 12 12 12 12 +448 12 12 12 12 12 12 12 12 12 12 12 +152 12 12 12 12 12 12 12 12 12 12 12 +348 12 12 12 12 12 12 12 12 12 12 12 +307 12 12 12 12 12 12 12 12 12 12 12 +194 12 12 12 12 12 12 12 12 12 12 12 +414 12 12 12 12 12 12 12 12 12 12 12 +477 12 12 12 12 12 12 12 12 12 12 12 +222 12 12 12 12 12 12 12 12 12 12 12 +126 12 12 12 12 12 12 12 12 12 12 12 +90 12 12 12 12 12 12 12 12 12 12 12 +169 12 12 12 12 12 12 12 12 12 12 12 +403 12 12 12 12 12 12 12 12 12 12 12 +400 12 12 12 12 12 12 12 12 12 12 12 +200 12 12 12 12 12 12 12 12 12 12 12 +97 12 12 12 12 12 12 12 12 12 12 12 +PREHOOK: query: explain select * from masking_test_n0 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test where key > 0 +POSTHOOK: query: explain select * from masking_test_n0 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -229,15 +1705,15 @@ STAGE PLANS: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (UDFToInteger(key) > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -250,16 +1726,510 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test where key > 0 +PREHOOK: query: select * from masking_test_n0 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n0 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test where key > 0 +POSTHOOK: query: select * from masking_test_n0 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n0 POSTHOOK: Input: default@src #### A masked pattern was here #### +238 12 12 12 12 12 12 12 12 12 12 12 +86 12 12 12 12 12 12 12 12 12 12 12 +311 12 12 12 12 12 12 12 12 12 12 12 +27 12 12 12 12 12 12 12 12 12 12 12 +165 12 12 12 12 12 12 12 12 12 12 12 +409 12 12 12 12 12 12 12 12 12 12 12 +255 12 12 12 12 12 12 12 12 12 12 12 +278 12 12 12 12 12 12 12 12 12 12 12 +98 12 12 12 12 12 12 12 12 12 12 12 +484 12 12 12 12 12 12 12 12 12 12 12 +265 12 12 12 12 12 12 12 12 12 12 12 +193 12 12 12 12 12 12 12 12 12 12 12 +401 12 12 12 12 12 12 12 12 12 12 12 +150 12 12 12 12 12 12 12 12 12 12 12 +273 12 12 12 12 12 12 12 12 12 12 12 +224 12 12 12 12 12 12 12 12 12 12 12 +369 12 12 12 12 12 12 12 12 12 12 12 +66 12 12 12 12 12 12 12 12 12 12 12 +128 12 12 12 12 12 12 12 12 12 12 12 +213 12 12 12 12 12 12 12 12 12 12 12 +146 12 12 12 12 12 12 12 12 12 12 12 +406 12 12 12 12 12 12 12 12 12 12 12 +429 12 12 12 12 12 12 12 12 12 12 12 +374 12 12 12 12 12 12 12 12 12 12 12 +152 12 12 12 12 12 12 12 12 12 12 12 +469 12 12 12 12 12 12 12 12 12 12 12 +145 12 12 12 12 12 12 12 12 12 12 12 +495 12 12 12 12 12 12 12 12 12 12 12 +37 12 12 12 12 12 12 12 12 12 12 12 +327 12 12 12 12 12 12 12 12 12 12 12 +281 12 12 12 12 12 12 12 12 12 12 12 +277 12 12 12 12 12 12 12 12 12 12 12 +209 12 12 12 12 12 12 12 12 12 12 12 +15 12 12 12 12 12 12 12 12 12 12 12 +82 12 12 12 12 12 12 12 12 12 12 12 +403 12 12 12 12 12 12 12 12 12 12 12 +166 12 12 12 12 12 12 12 12 12 12 12 +417 12 12 12 12 12 12 12 12 12 12 12 +430 12 12 12 12 12 12 12 12 12 12 12 +252 12 12 12 12 12 12 12 12 12 12 12 +292 12 12 12 12 12 12 12 12 12 12 12 +219 12 12 12 12 12 12 12 12 12 12 12 +287 12 12 12 12 12 12 12 12 12 12 12 +153 12 12 12 12 12 12 12 12 12 12 12 +193 12 12 12 12 12 12 12 12 12 12 12 +338 12 12 12 12 12 12 12 12 12 12 12 +446 12 12 12 12 12 12 12 12 12 12 12 +459 12 12 12 12 12 12 12 12 12 12 12 +394 12 12 12 12 12 12 12 12 12 12 12 +237 12 12 12 12 12 12 12 12 12 12 12 +482 12 12 12 12 12 12 12 12 12 12 12 +174 12 12 12 12 12 12 12 12 12 12 12 +413 12 12 12 12 12 12 12 12 12 12 12 +494 12 12 12 12 12 12 12 12 12 12 12 +207 12 12 12 12 12 12 12 12 12 12 12 +199 12 12 12 12 12 12 12 12 12 12 12 +466 12 12 12 12 12 12 12 12 12 12 12 +208 12 12 12 12 12 12 12 12 12 12 12 +174 12 12 12 12 12 12 12 12 12 12 12 +399 12 12 12 12 12 12 12 12 12 12 12 +396 12 12 12 12 12 12 12 12 12 12 12 +247 12 12 12 12 12 12 12 12 12 12 12 +417 12 12 12 12 12 12 12 12 12 12 12 +489 12 12 12 12 12 12 12 12 12 12 12 +162 12 12 12 12 12 12 12 12 12 12 12 +377 12 12 12 12 12 12 12 12 12 12 12 +397 12 12 12 12 12 12 12 12 12 12 12 +309 12 12 12 12 12 12 12 12 12 12 12 +365 12 12 12 12 12 12 12 12 12 12 12 +266 12 12 12 12 12 12 12 12 12 12 12 +439 12 12 12 12 12 12 12 12 12 12 12 +342 12 12 12 12 12 12 12 12 12 12 12 +367 12 12 12 12 12 12 12 12 12 12 12 +325 12 12 12 12 12 12 12 12 12 12 12 +167 12 12 12 12 12 12 12 12 12 12 12 +195 12 12 12 12 12 12 12 12 12 12 12 +475 12 12 12 12 12 12 12 12 12 12 12 +17 12 12 12 12 12 12 12 12 12 12 12 +113 12 12 12 12 12 12 12 12 12 12 12 +155 12 12 12 12 12 12 12 12 12 12 12 +203 12 12 12 12 12 12 12 12 12 12 12 +339 12 12 12 12 12 12 12 12 12 12 12 +455 12 12 12 12 12 12 12 12 12 12 12 +128 12 12 12 12 12 12 12 12 12 12 12 +311 12 12 12 12 12 12 12 12 12 12 12 +316 12 12 12 12 12 12 12 12 12 12 12 +57 12 12 12 12 12 12 12 12 12 12 12 +302 12 12 12 12 12 12 12 12 12 12 12 +205 12 12 12 12 12 12 12 12 12 12 12 +149 12 12 12 12 12 12 12 12 12 12 12 +438 12 12 12 12 12 12 12 12 12 12 12 +345 12 12 12 12 12 12 12 12 12 12 12 +129 12 12 12 12 12 12 12 12 12 12 12 +170 12 12 12 12 12 12 12 12 12 12 12 +20 12 12 12 12 12 12 12 12 12 12 12 +489 12 12 12 12 12 12 12 12 12 12 12 +157 12 12 12 12 12 12 12 12 12 12 12 +378 12 12 12 12 12 12 12 12 12 12 12 +221 12 12 12 12 12 12 12 12 12 12 12 +92 12 12 12 12 12 12 12 12 12 12 12 +111 12 12 12 12 12 12 12 12 12 12 12 +47 12 12 12 12 12 12 12 12 12 12 12 +72 12 12 12 12 12 12 12 12 12 12 12 4 12 12 12 12 12 12 12 12 12 12 12 +280 12 12 12 12 12 12 12 12 12 12 12 +35 12 12 12 12 12 12 12 12 12 12 12 +427 12 12 12 12 12 12 12 12 12 12 12 +277 12 12 12 12 12 12 12 12 12 12 12 +208 12 12 12 12 12 12 12 12 12 12 12 +356 12 12 12 12 12 12 12 12 12 12 12 +399 12 12 12 12 12 12 12 12 12 12 12 +169 12 12 12 12 12 12 12 12 12 12 12 +382 12 12 12 12 12 12 12 12 12 12 12 +498 12 12 12 12 12 12 12 12 12 12 12 +125 12 12 12 12 12 12 12 12 12 12 12 +386 12 12 12 12 12 12 12 12 12 12 12 +437 12 12 12 12 12 12 12 12 12 12 12 +469 12 12 12 12 12 12 12 12 12 12 12 +192 12 12 12 12 12 12 12 12 12 12 12 +286 12 12 12 12 12 12 12 12 12 12 12 +187 12 12 12 12 12 12 12 12 12 12 12 +176 12 12 12 12 12 12 12 12 12 12 12 +54 12 12 12 12 12 12 12 12 12 12 12 +459 12 12 12 12 12 12 12 12 12 12 12 +51 12 12 12 12 12 12 12 12 12 12 12 +138 12 12 12 12 12 12 12 12 12 12 12 +103 12 12 12 12 12 12 12 12 12 12 12 +239 12 12 12 12 12 12 12 12 12 12 12 +213 12 12 12 12 12 12 12 12 12 12 12 +216 12 12 12 12 12 12 12 12 12 12 12 +430 12 12 12 12 12 12 12 12 12 12 12 +278 12 12 12 12 12 12 12 12 12 12 12 +176 12 12 12 12 12 12 12 12 12 12 12 +289 12 12 12 12 12 12 12 12 12 12 12 +221 12 12 12 12 12 12 12 12 12 12 12 +65 12 12 12 12 12 12 12 12 12 12 12 +318 12 12 12 12 12 12 12 12 12 12 12 +332 12 12 12 12 12 12 12 12 12 12 12 +311 12 12 12 12 12 12 12 12 12 12 12 +275 12 12 12 12 12 12 12 12 12 12 12 +137 12 12 12 12 12 12 12 12 12 12 12 +241 12 12 12 12 12 12 12 12 12 12 12 +83 12 12 12 12 12 12 12 12 12 12 12 +333 12 12 12 12 12 12 12 12 12 12 12 +180 12 12 12 12 12 12 12 12 12 12 12 +284 12 12 12 12 12 12 12 12 12 12 12 +12 12 12 12 12 12 12 12 12 12 12 12 +230 12 12 12 12 12 12 12 12 12 12 12 +181 12 12 12 12 12 12 12 12 12 12 12 +67 12 12 12 12 12 12 12 12 12 12 12 +260 12 12 12 12 12 12 12 12 12 12 12 +404 12 12 12 12 12 12 12 12 12 12 12 +384 12 12 12 12 12 12 12 12 12 12 12 +489 12 12 12 12 12 12 12 12 12 12 12 +353 12 12 12 12 12 12 12 12 12 12 12 +373 12 12 12 12 12 12 12 12 12 12 12 +272 12 12 12 12 12 12 12 12 12 12 12 +138 12 12 12 12 12 12 12 12 12 12 12 +217 12 12 12 12 12 12 12 12 12 12 12 +84 12 12 12 12 12 12 12 12 12 12 12 +348 12 12 12 12 12 12 12 12 12 12 12 +466 12 12 12 12 12 12 12 12 12 12 12 +58 12 12 12 12 12 12 12 12 12 12 12 8 12 12 12 12 12 12 12 12 12 12 12 +411 12 12 12 12 12 12 12 12 12 12 12 +230 12 12 12 12 12 12 12 12 12 12 12 +208 12 12 12 12 12 12 12 12 12 12 12 +348 12 12 12 12 12 12 12 12 12 12 12 +24 12 12 12 12 12 12 12 12 12 12 12 +463 12 12 12 12 12 12 12 12 12 12 12 +431 12 12 12 12 12 12 12 12 12 12 12 +179 12 12 12 12 12 12 12 12 12 12 12 +172 12 12 12 12 12 12 12 12 12 12 12 +42 12 12 12 12 12 12 12 12 12 12 12 +129 12 12 12 12 12 12 12 12 12 12 12 +158 12 12 12 12 12 12 12 12 12 12 12 +119 12 12 12 12 12 12 12 12 12 12 12 +496 12 12 12 12 12 12 12 12 12 12 12 +322 12 12 12 12 12 12 12 12 12 12 12 +197 12 12 12 12 12 12 12 12 12 12 12 +468 12 12 12 12 12 12 12 12 12 12 12 +393 12 12 12 12 12 12 12 12 12 12 12 +454 12 12 12 12 12 12 12 12 12 12 12 +100 12 12 12 12 12 12 12 12 12 12 12 +298 12 12 12 12 12 12 12 12 12 12 12 +199 12 12 12 12 12 12 12 12 12 12 12 +191 12 12 12 12 12 12 12 12 12 12 12 +418 12 12 12 12 12 12 12 12 12 12 12 +96 12 12 12 12 12 12 12 12 12 12 12 +26 12 12 12 12 12 12 12 12 12 12 12 +165 12 12 12 12 12 12 12 12 12 12 12 +327 12 12 12 12 12 12 12 12 12 12 12 +230 12 12 12 12 12 12 12 12 12 12 12 +205 12 12 12 12 12 12 12 12 12 12 12 +120 12 12 12 12 12 12 12 12 12 12 12 +131 12 12 12 12 12 12 12 12 12 12 12 +51 12 12 12 12 12 12 12 12 12 12 12 +404 12 12 12 12 12 12 12 12 12 12 12 +43 12 12 12 12 12 12 12 12 12 12 12 +436 12 12 12 12 12 12 12 12 12 12 12 +156 12 12 12 12 12 12 12 12 12 12 12 +469 12 12 12 12 12 12 12 12 12 12 12 +468 12 12 12 12 12 12 12 12 12 12 12 +308 12 12 12 12 12 12 12 12 12 12 12 +95 12 12 12 12 12 12 12 12 12 12 12 +196 12 12 12 12 12 12 12 12 12 12 12 +288 12 12 12 12 12 12 12 12 12 12 12 +481 12 12 12 12 12 12 12 12 12 12 12 +457 12 12 12 12 12 12 12 12 12 12 12 +98 12 12 12 12 12 12 12 12 12 12 12 +282 12 12 12 12 12 12 12 12 12 12 12 +197 12 12 12 12 12 12 12 12 12 12 12 +187 12 12 12 12 12 12 12 12 12 12 12 +318 12 12 12 12 12 12 12 12 12 12 12 +318 12 12 12 12 12 12 12 12 12 12 12 +409 12 12 12 12 12 12 12 12 12 12 12 +470 12 12 12 12 12 12 12 12 12 12 12 +137 12 12 12 12 12 12 12 12 12 12 12 +369 12 12 12 12 12 12 12 12 12 12 12 +316 12 12 12 12 12 12 12 12 12 12 12 +169 12 12 12 12 12 12 12 12 12 12 12 +413 12 12 12 12 12 12 12 12 12 12 12 +85 12 12 12 12 12 12 12 12 12 12 12 +77 12 12 12 12 12 12 12 12 12 12 12 +490 12 12 12 12 12 12 12 12 12 12 12 +87 12 12 12 12 12 12 12 12 12 12 12 +364 12 12 12 12 12 12 12 12 12 12 12 +179 12 12 12 12 12 12 12 12 12 12 12 +118 12 12 12 12 12 12 12 12 12 12 12 +134 12 12 12 12 12 12 12 12 12 12 12 +395 12 12 12 12 12 12 12 12 12 12 12 +282 12 12 12 12 12 12 12 12 12 12 12 +138 12 12 12 12 12 12 12 12 12 12 12 +238 12 12 12 12 12 12 12 12 12 12 12 +419 12 12 12 12 12 12 12 12 12 12 12 +15 12 12 12 12 12 12 12 12 12 12 12 +118 12 12 12 12 12 12 12 12 12 12 12 +72 12 12 12 12 12 12 12 12 12 12 12 +90 12 12 12 12 12 12 12 12 12 12 12 +307 12 12 12 12 12 12 12 12 12 12 12 +19 12 12 12 12 12 12 12 12 12 12 12 +435 12 12 12 12 12 12 12 12 12 12 12 +10 12 12 12 12 12 12 12 12 12 12 12 +277 12 12 12 12 12 12 12 12 12 12 12 +273 12 12 12 12 12 12 12 12 12 12 12 +306 12 12 12 12 12 12 12 12 12 12 12 +224 12 12 12 12 12 12 12 12 12 12 12 +309 12 12 12 12 12 12 12 12 12 12 12 +389 12 12 12 12 12 12 12 12 12 12 12 +327 12 12 12 12 12 12 12 12 12 12 12 +242 12 12 12 12 12 12 12 12 12 12 12 +369 12 12 12 12 12 12 12 12 12 12 12 +392 12 12 12 12 12 12 12 12 12 12 12 +272 12 12 12 12 12 12 12 12 12 12 12 +331 12 12 12 12 12 12 12 12 12 12 12 +401 12 12 12 12 12 12 12 12 12 12 12 +242 12 12 12 12 12 12 12 12 12 12 12 +452 12 12 12 12 12 12 12 12 12 12 12 +177 12 12 12 12 12 12 12 12 12 12 12 +226 12 12 12 12 12 12 12 12 12 12 12 +5 12 12 12 12 12 12 12 12 12 12 12 +497 12 12 12 12 12 12 12 12 12 12 12 +402 12 12 12 12 12 12 12 12 12 12 12 +396 12 12 12 12 12 12 12 12 12 12 12 +317 12 12 12 12 12 12 12 12 12 12 12 +395 12 12 12 12 12 12 12 12 12 12 12 +58 12 12 12 12 12 12 12 12 12 12 12 +35 12 12 12 12 12 12 12 12 12 12 12 +336 12 12 12 12 12 12 12 12 12 12 12 +95 12 12 12 12 12 12 12 12 12 12 12 +11 12 12 12 12 12 12 12 12 12 12 12 +168 12 12 12 12 12 12 12 12 12 12 12 +34 12 12 12 12 12 12 12 12 12 12 12 +229 12 12 12 12 12 12 12 12 12 12 12 +233 12 12 12 12 12 12 12 12 12 12 12 +143 12 12 12 12 12 12 12 12 12 12 12 +472 12 12 12 12 12 12 12 12 12 12 12 +322 12 12 12 12 12 12 12 12 12 12 12 +498 12 12 12 12 12 12 12 12 12 12 12 +160 12 12 12 12 12 12 12 12 12 12 12 +195 12 12 12 12 12 12 12 12 12 12 12 +42 12 12 12 12 12 12 12 12 12 12 12 +321 12 12 12 12 12 12 12 12 12 12 12 +430 12 12 12 12 12 12 12 12 12 12 12 +119 12 12 12 12 12 12 12 12 12 12 12 +489 12 12 12 12 12 12 12 12 12 12 12 +458 12 12 12 12 12 12 12 12 12 12 12 +78 12 12 12 12 12 12 12 12 12 12 12 +76 12 12 12 12 12 12 12 12 12 12 12 +41 12 12 12 12 12 12 12 12 12 12 12 +223 12 12 12 12 12 12 12 12 12 12 12 +492 12 12 12 12 12 12 12 12 12 12 12 +149 12 12 12 12 12 12 12 12 12 12 12 +449 12 12 12 12 12 12 12 12 12 12 12 +218 12 12 12 12 12 12 12 12 12 12 12 +228 12 12 12 12 12 12 12 12 12 12 12 +138 12 12 12 12 12 12 12 12 12 12 12 +453 12 12 12 12 12 12 12 12 12 12 12 +30 12 12 12 12 12 12 12 12 12 12 12 +209 12 12 12 12 12 12 12 12 12 12 12 +64 12 12 12 12 12 12 12 12 12 12 12 +468 12 12 12 12 12 12 12 12 12 12 12 +76 12 12 12 12 12 12 12 12 12 12 12 +74 12 12 12 12 12 12 12 12 12 12 12 +342 12 12 12 12 12 12 12 12 12 12 12 +69 12 12 12 12 12 12 12 12 12 12 12 +230 12 12 12 12 12 12 12 12 12 12 12 +33 12 12 12 12 12 12 12 12 12 12 12 +368 12 12 12 12 12 12 12 12 12 12 12 +103 12 12 12 12 12 12 12 12 12 12 12 +296 12 12 12 12 12 12 12 12 12 12 12 +113 12 12 12 12 12 12 12 12 12 12 12 +216 12 12 12 12 12 12 12 12 12 12 12 +367 12 12 12 12 12 12 12 12 12 12 12 +344 12 12 12 12 12 12 12 12 12 12 12 +167 12 12 12 12 12 12 12 12 12 12 12 +274 12 12 12 12 12 12 12 12 12 12 12 +219 12 12 12 12 12 12 12 12 12 12 12 +239 12 12 12 12 12 12 12 12 12 12 12 +485 12 12 12 12 12 12 12 12 12 12 12 +116 12 12 12 12 12 12 12 12 12 12 12 +223 12 12 12 12 12 12 12 12 12 12 12 +256 12 12 12 12 12 12 12 12 12 12 12 +263 12 12 12 12 12 12 12 12 12 12 12 +70 12 12 12 12 12 12 12 12 12 12 12 +487 12 12 12 12 12 12 12 12 12 12 12 +480 12 12 12 12 12 12 12 12 12 12 12 +401 12 12 12 12 12 12 12 12 12 12 12 +288 12 12 12 12 12 12 12 12 12 12 12 +191 12 12 12 12 12 12 12 12 12 12 12 +5 12 12 12 12 12 12 12 12 12 12 12 +244 12 12 12 12 12 12 12 12 12 12 12 +438 12 12 12 12 12 12 12 12 12 12 12 +128 12 12 12 12 12 12 12 12 12 12 12 +467 12 12 12 12 12 12 12 12 12 12 12 +432 12 12 12 12 12 12 12 12 12 12 12 +202 12 12 12 12 12 12 12 12 12 12 12 +316 12 12 12 12 12 12 12 12 12 12 12 +229 12 12 12 12 12 12 12 12 12 12 12 +469 12 12 12 12 12 12 12 12 12 12 12 +463 12 12 12 12 12 12 12 12 12 12 12 +280 12 12 12 12 12 12 12 12 12 12 12 2 12 12 12 12 12 12 12 12 12 12 12 +35 12 12 12 12 12 12 12 12 12 12 12 +283 12 12 12 12 12 12 12 12 12 12 12 +331 12 12 12 12 12 12 12 12 12 12 12 +235 12 12 12 12 12 12 12 12 12 12 12 +80 12 12 12 12 12 12 12 12 12 12 12 +44 12 12 12 12 12 12 12 12 12 12 12 +193 12 12 12 12 12 12 12 12 12 12 12 +321 12 12 12 12 12 12 12 12 12 12 12 +335 12 12 12 12 12 12 12 12 12 12 12 +104 12 12 12 12 12 12 12 12 12 12 12 +466 12 12 12 12 12 12 12 12 12 12 12 +366 12 12 12 12 12 12 12 12 12 12 12 +175 12 12 12 12 12 12 12 12 12 12 12 +403 12 12 12 12 12 12 12 12 12 12 12 +483 12 12 12 12 12 12 12 12 12 12 12 +53 12 12 12 12 12 12 12 12 12 12 12 +105 12 12 12 12 12 12 12 12 12 12 12 +257 12 12 12 12 12 12 12 12 12 12 12 +406 12 12 12 12 12 12 12 12 12 12 12 +409 12 12 12 12 12 12 12 12 12 12 12 +190 12 12 12 12 12 12 12 12 12 12 12 +406 12 12 12 12 12 12 12 12 12 12 12 +401 12 12 12 12 12 12 12 12 12 12 12 +114 12 12 12 12 12 12 12 12 12 12 12 +258 12 12 12 12 12 12 12 12 12 12 12 +90 12 12 12 12 12 12 12 12 12 12 12 +203 12 12 12 12 12 12 12 12 12 12 12 +262 12 12 12 12 12 12 12 12 12 12 12 +348 12 12 12 12 12 12 12 12 12 12 12 +424 12 12 12 12 12 12 12 12 12 12 12 +12 12 12 12 12 12 12 12 12 12 12 12 +396 12 12 12 12 12 12 12 12 12 12 12 +201 12 12 12 12 12 12 12 12 12 12 12 +217 12 12 12 12 12 12 12 12 12 12 12 +164 12 12 12 12 12 12 12 12 12 12 12 +431 12 12 12 12 12 12 12 12 12 12 12 +454 12 12 12 12 12 12 12 12 12 12 12 +478 12 12 12 12 12 12 12 12 12 12 12 +298 12 12 12 12 12 12 12 12 12 12 12 +125 12 12 12 12 12 12 12 12 12 12 12 +431 12 12 12 12 12 12 12 12 12 12 12 +164 12 12 12 12 12 12 12 12 12 12 12 +424 12 12 12 12 12 12 12 12 12 12 12 +187 12 12 12 12 12 12 12 12 12 12 12 +382 12 12 12 12 12 12 12 12 12 12 12 +5 12 12 12 12 12 12 12 12 12 12 12 +70 12 12 12 12 12 12 12 12 12 12 12 +397 12 12 12 12 12 12 12 12 12 12 12 +480 12 12 12 12 12 12 12 12 12 12 12 +291 12 12 12 12 12 12 12 12 12 12 12 +24 12 12 12 12 12 12 12 12 12 12 12 +351 12 12 12 12 12 12 12 12 12 12 12 +255 12 12 12 12 12 12 12 12 12 12 12 +104 12 12 12 12 12 12 12 12 12 12 12 +70 12 12 12 12 12 12 12 12 12 12 12 +163 12 12 12 12 12 12 12 12 12 12 12 +438 12 12 12 12 12 12 12 12 12 12 12 +119 12 12 12 12 12 12 12 12 12 12 12 +414 12 12 12 12 12 12 12 12 12 12 12 +200 12 12 12 12 12 12 12 12 12 12 12 +491 12 12 12 12 12 12 12 12 12 12 12 +237 12 12 12 12 12 12 12 12 12 12 12 +439 12 12 12 12 12 12 12 12 12 12 12 +360 12 12 12 12 12 12 12 12 12 12 12 +248 12 12 12 12 12 12 12 12 12 12 12 +479 12 12 12 12 12 12 12 12 12 12 12 +305 12 12 12 12 12 12 12 12 12 12 12 +417 12 12 12 12 12 12 12 12 12 12 12 +199 12 12 12 12 12 12 12 12 12 12 12 +444 12 12 12 12 12 12 12 12 12 12 12 +120 12 12 12 12 12 12 12 12 12 12 12 +429 12 12 12 12 12 12 12 12 12 12 12 +169 12 12 12 12 12 12 12 12 12 12 12 +443 12 12 12 12 12 12 12 12 12 12 12 +323 12 12 12 12 12 12 12 12 12 12 12 +325 12 12 12 12 12 12 12 12 12 12 12 +277 12 12 12 12 12 12 12 12 12 12 12 +230 12 12 12 12 12 12 12 12 12 12 12 +478 12 12 12 12 12 12 12 12 12 12 12 +178 12 12 12 12 12 12 12 12 12 12 12 +468 12 12 12 12 12 12 12 12 12 12 12 +310 12 12 12 12 12 12 12 12 12 12 12 +317 12 12 12 12 12 12 12 12 12 12 12 +333 12 12 12 12 12 12 12 12 12 12 12 +493 12 12 12 12 12 12 12 12 12 12 12 +460 12 12 12 12 12 12 12 12 12 12 12 +207 12 12 12 12 12 12 12 12 12 12 12 +249 12 12 12 12 12 12 12 12 12 12 12 +265 12 12 12 12 12 12 12 12 12 12 12 +480 12 12 12 12 12 12 12 12 12 12 12 +83 12 12 12 12 12 12 12 12 12 12 12 +136 12 12 12 12 12 12 12 12 12 12 12 +353 12 12 12 12 12 12 12 12 12 12 12 +172 12 12 12 12 12 12 12 12 12 12 12 +214 12 12 12 12 12 12 12 12 12 12 12 +462 12 12 12 12 12 12 12 12 12 12 12 +233 12 12 12 12 12 12 12 12 12 12 12 +406 12 12 12 12 12 12 12 12 12 12 12 +133 12 12 12 12 12 12 12 12 12 12 12 +175 12 12 12 12 12 12 12 12 12 12 12 +189 12 12 12 12 12 12 12 12 12 12 12 +454 12 12 12 12 12 12 12 12 12 12 12 +375 12 12 12 12 12 12 12 12 12 12 12 +401 12 12 12 12 12 12 12 12 12 12 12 +421 12 12 12 12 12 12 12 12 12 12 12 +407 12 12 12 12 12 12 12 12 12 12 12 +384 12 12 12 12 12 12 12 12 12 12 12 +256 12 12 12 12 12 12 12 12 12 12 12 +26 12 12 12 12 12 12 12 12 12 12 12 +134 12 12 12 12 12 12 12 12 12 12 12 +67 12 12 12 12 12 12 12 12 12 12 12 +384 12 12 12 12 12 12 12 12 12 12 12 +379 12 12 12 12 12 12 12 12 12 12 12 +18 12 12 12 12 12 12 12 12 12 12 12 +462 12 12 12 12 12 12 12 12 12 12 12 +492 12 12 12 12 12 12 12 12 12 12 12 +100 12 12 12 12 12 12 12 12 12 12 12 +298 12 12 12 12 12 12 12 12 12 12 12 +9 12 12 12 12 12 12 12 12 12 12 12 +341 12 12 12 12 12 12 12 12 12 12 12 +498 12 12 12 12 12 12 12 12 12 12 12 +146 12 12 12 12 12 12 12 12 12 12 12 +458 12 12 12 12 12 12 12 12 12 12 12 +362 12 12 12 12 12 12 12 12 12 12 12 +186 12 12 12 12 12 12 12 12 12 12 12 +285 12 12 12 12 12 12 12 12 12 12 12 +348 12 12 12 12 12 12 12 12 12 12 12 +167 12 12 12 12 12 12 12 12 12 12 12 +18 12 12 12 12 12 12 12 12 12 12 12 +273 12 12 12 12 12 12 12 12 12 12 12 +183 12 12 12 12 12 12 12 12 12 12 12 +281 12 12 12 12 12 12 12 12 12 12 12 +344 12 12 12 12 12 12 12 12 12 12 12 +97 12 12 12 12 12 12 12 12 12 12 12 +469 12 12 12 12 12 12 12 12 12 12 12 +315 12 12 12 12 12 12 12 12 12 12 12 +84 12 12 12 12 12 12 12 12 12 12 12 +28 12 12 12 12 12 12 12 12 12 12 12 +37 12 12 12 12 12 12 12 12 12 12 12 +448 12 12 12 12 12 12 12 12 12 12 12 +152 12 12 12 12 12 12 12 12 12 12 12 +348 12 12 12 12 12 12 12 12 12 12 12 +307 12 12 12 12 12 12 12 12 12 12 12 +194 12 12 12 12 12 12 12 12 12 12 12 +414 12 12 12 12 12 12 12 12 12 12 12 +477 12 12 12 12 12 12 12 12 12 12 12 +222 12 12 12 12 12 12 12 12 12 12 12 +126 12 12 12 12 12 12 12 12 12 12 12 +90 12 12 12 12 12 12 12 12 12 12 12 +169 12 12 12 12 12 12 12 12 12 12 12 +403 12 12 12 12 12 12 12 12 12 12 12 +400 12 12 12 12 12 12 12 12 12 12 12 +200 12 12 12 12 12 12 12 12 12 12 12 +97 12 12 12 12 12 12 12 12 12 12 12 diff --git a/ql/src/test/results/clientpositive/masking_7.q.out b/ql/src/test/results/clientpositive/masking_7.q.out index 2e0c82c5ef..9fa62f9be0 100644 --- a/ql/src/test/results/clientpositive/masking_7.q.out +++ b/ql/src/test/results/clientpositive/masking_7.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: drop view masking_test +PREHOOK: query: drop view masking_test_n14 PREHOOK: type: DROPVIEW -POSTHOOK: query: drop view masking_test +POSTHOOK: query: drop view masking_test_n14 POSTHOOK: type: DROPVIEW -PREHOOK: query: create view masking_test as select cast(key as int) as key, value, '12', ROW__ID from src +PREHOOK: query: create view masking_test_n14 as select cast(key as int) as key, value, '12', ROW__ID from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create view masking_test as select cast(key as int) as key, value, '12', ROW__ID from src +PREHOOK: Output: default@masking_test_n14 +POSTHOOK: query: create view masking_test_n14 as select cast(key as int) as key, value, '12', ROW__ID from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test._c2 SIMPLE [] -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.row__id SIMPLE [(src)src.FieldSchema(name:ROW__ID, type:struct, comment:), ] -POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain select * from masking_test +POSTHOOK: Output: default@masking_test_n14 +POSTHOOK: Lineage: masking_test_n14._c2 SIMPLE [] +POSTHOOK: Lineage: masking_test_n14.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n14.row__id SIMPLE [(src)src.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: masking_test_n14.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select * from masking_test_n14 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test +POSTHOOK: query: explain select * from masking_test_n14 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -33,20 +33,17 @@ STAGE PLANS: properties: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(key) (type: int), reverse(value) (type: string), '12' (type: string), ROW__ID (type: struct) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Select Operator + expressions: UDFToInteger(key) (type: int), value (type: string), '12' (type: string), ROW__ID (type: struct) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Execution mode: vectorized Stage: Stage-0 @@ -55,25 +52,519 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test +PREHOOK: query: select * from masking_test_n14 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n14 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test +POSTHOOK: query: select * from masking_test_n14 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n14 POSTHOOK: Input: default@src #### A masked pattern was here #### -0 0_lav 12 NULL -4 4_lav 12 NULL -8 8_lav 12 NULL -0 0_lav 12 NULL -0 0_lav 12 NULL -2 2_lav 12 NULL -PREHOOK: query: explain select * from masking_test where key > 0 +238 val_238 12 NULL +86 val_86 12 NULL +311 val_311 12 NULL +27 val_27 12 NULL +165 val_165 12 NULL +409 val_409 12 NULL +255 val_255 12 NULL +278 val_278 12 NULL +98 val_98 12 NULL +484 val_484 12 NULL +265 val_265 12 NULL +193 val_193 12 NULL +401 val_401 12 NULL +150 val_150 12 NULL +273 val_273 12 NULL +224 val_224 12 NULL +369 val_369 12 NULL +66 val_66 12 NULL +128 val_128 12 NULL +213 val_213 12 NULL +146 val_146 12 NULL +406 val_406 12 NULL +429 val_429 12 NULL +374 val_374 12 NULL +152 val_152 12 NULL +469 val_469 12 NULL +145 val_145 12 NULL +495 val_495 12 NULL +37 val_37 12 NULL +327 val_327 12 NULL +281 val_281 12 NULL +277 val_277 12 NULL +209 val_209 12 NULL +15 val_15 12 NULL +82 val_82 12 NULL +403 val_403 12 NULL +166 val_166 12 NULL +417 val_417 12 NULL +430 val_430 12 NULL +252 val_252 12 NULL +292 val_292 12 NULL +219 val_219 12 NULL +287 val_287 12 NULL +153 val_153 12 NULL +193 val_193 12 NULL +338 val_338 12 NULL +446 val_446 12 NULL +459 val_459 12 NULL +394 val_394 12 NULL +237 val_237 12 NULL +482 val_482 12 NULL +174 val_174 12 NULL +413 val_413 12 NULL +494 val_494 12 NULL +207 val_207 12 NULL +199 val_199 12 NULL +466 val_466 12 NULL +208 val_208 12 NULL +174 val_174 12 NULL +399 val_399 12 NULL +396 val_396 12 NULL +247 val_247 12 NULL +417 val_417 12 NULL +489 val_489 12 NULL +162 val_162 12 NULL +377 val_377 12 NULL +397 val_397 12 NULL +309 val_309 12 NULL +365 val_365 12 NULL +266 val_266 12 NULL +439 val_439 12 NULL +342 val_342 12 NULL +367 val_367 12 NULL +325 val_325 12 NULL +167 val_167 12 NULL +195 val_195 12 NULL +475 val_475 12 NULL +17 val_17 12 NULL +113 val_113 12 NULL +155 val_155 12 NULL +203 val_203 12 NULL +339 val_339 12 NULL +0 val_0 12 NULL +455 val_455 12 NULL +128 val_128 12 NULL +311 val_311 12 NULL +316 val_316 12 NULL +57 val_57 12 NULL +302 val_302 12 NULL +205 val_205 12 NULL +149 val_149 12 NULL +438 val_438 12 NULL +345 val_345 12 NULL +129 val_129 12 NULL +170 val_170 12 NULL +20 val_20 12 NULL +489 val_489 12 NULL +157 val_157 12 NULL +378 val_378 12 NULL +221 val_221 12 NULL +92 val_92 12 NULL +111 val_111 12 NULL +47 val_47 12 NULL +72 val_72 12 NULL +4 val_4 12 NULL +280 val_280 12 NULL +35 val_35 12 NULL +427 val_427 12 NULL +277 val_277 12 NULL +208 val_208 12 NULL +356 val_356 12 NULL +399 val_399 12 NULL +169 val_169 12 NULL +382 val_382 12 NULL +498 val_498 12 NULL +125 val_125 12 NULL +386 val_386 12 NULL +437 val_437 12 NULL +469 val_469 12 NULL +192 val_192 12 NULL +286 val_286 12 NULL +187 val_187 12 NULL +176 val_176 12 NULL +54 val_54 12 NULL +459 val_459 12 NULL +51 val_51 12 NULL +138 val_138 12 NULL +103 val_103 12 NULL +239 val_239 12 NULL +213 val_213 12 NULL +216 val_216 12 NULL +430 val_430 12 NULL +278 val_278 12 NULL +176 val_176 12 NULL +289 val_289 12 NULL +221 val_221 12 NULL +65 val_65 12 NULL +318 val_318 12 NULL +332 val_332 12 NULL +311 val_311 12 NULL +275 val_275 12 NULL +137 val_137 12 NULL +241 val_241 12 NULL +83 val_83 12 NULL +333 val_333 12 NULL +180 val_180 12 NULL +284 val_284 12 NULL +12 val_12 12 NULL +230 val_230 12 NULL +181 val_181 12 NULL +67 val_67 12 NULL +260 val_260 12 NULL +404 val_404 12 NULL +384 val_384 12 NULL +489 val_489 12 NULL +353 val_353 12 NULL +373 val_373 12 NULL +272 val_272 12 NULL +138 val_138 12 NULL +217 val_217 12 NULL +84 val_84 12 NULL +348 val_348 12 NULL +466 val_466 12 NULL +58 val_58 12 NULL +8 val_8 12 NULL +411 val_411 12 NULL +230 val_230 12 NULL +208 val_208 12 NULL +348 val_348 12 NULL +24 val_24 12 NULL +463 val_463 12 NULL +431 val_431 12 NULL +179 val_179 12 NULL +172 val_172 12 NULL +42 val_42 12 NULL +129 val_129 12 NULL +158 val_158 12 NULL +119 val_119 12 NULL +496 val_496 12 NULL +0 val_0 12 NULL +322 val_322 12 NULL +197 val_197 12 NULL +468 val_468 12 NULL +393 val_393 12 NULL +454 val_454 12 NULL +100 val_100 12 NULL +298 val_298 12 NULL +199 val_199 12 NULL +191 val_191 12 NULL +418 val_418 12 NULL +96 val_96 12 NULL +26 val_26 12 NULL +165 val_165 12 NULL +327 val_327 12 NULL +230 val_230 12 NULL +205 val_205 12 NULL +120 val_120 12 NULL +131 val_131 12 NULL +51 val_51 12 NULL +404 val_404 12 NULL +43 val_43 12 NULL +436 val_436 12 NULL +156 val_156 12 NULL +469 val_469 12 NULL +468 val_468 12 NULL +308 val_308 12 NULL +95 val_95 12 NULL +196 val_196 12 NULL +288 val_288 12 NULL +481 val_481 12 NULL +457 val_457 12 NULL +98 val_98 12 NULL +282 val_282 12 NULL +197 val_197 12 NULL +187 val_187 12 NULL +318 val_318 12 NULL +318 val_318 12 NULL +409 val_409 12 NULL +470 val_470 12 NULL +137 val_137 12 NULL +369 val_369 12 NULL +316 val_316 12 NULL +169 val_169 12 NULL +413 val_413 12 NULL +85 val_85 12 NULL +77 val_77 12 NULL +0 val_0 12 NULL +490 val_490 12 NULL +87 val_87 12 NULL +364 val_364 12 NULL +179 val_179 12 NULL +118 val_118 12 NULL +134 val_134 12 NULL +395 val_395 12 NULL +282 val_282 12 NULL +138 val_138 12 NULL +238 val_238 12 NULL +419 val_419 12 NULL +15 val_15 12 NULL +118 val_118 12 NULL +72 val_72 12 NULL +90 val_90 12 NULL +307 val_307 12 NULL +19 val_19 12 NULL +435 val_435 12 NULL +10 val_10 12 NULL +277 val_277 12 NULL +273 val_273 12 NULL +306 val_306 12 NULL +224 val_224 12 NULL +309 val_309 12 NULL +389 val_389 12 NULL +327 val_327 12 NULL +242 val_242 12 NULL +369 val_369 12 NULL +392 val_392 12 NULL +272 val_272 12 NULL +331 val_331 12 NULL +401 val_401 12 NULL +242 val_242 12 NULL +452 val_452 12 NULL +177 val_177 12 NULL +226 val_226 12 NULL +5 val_5 12 NULL +497 val_497 12 NULL +402 val_402 12 NULL +396 val_396 12 NULL +317 val_317 12 NULL +395 val_395 12 NULL +58 val_58 12 NULL +35 val_35 12 NULL +336 val_336 12 NULL +95 val_95 12 NULL +11 val_11 12 NULL +168 val_168 12 NULL +34 val_34 12 NULL +229 val_229 12 NULL +233 val_233 12 NULL +143 val_143 12 NULL +472 val_472 12 NULL +322 val_322 12 NULL +498 val_498 12 NULL +160 val_160 12 NULL +195 val_195 12 NULL +42 val_42 12 NULL +321 val_321 12 NULL +430 val_430 12 NULL +119 val_119 12 NULL +489 val_489 12 NULL +458 val_458 12 NULL +78 val_78 12 NULL +76 val_76 12 NULL +41 val_41 12 NULL +223 val_223 12 NULL +492 val_492 12 NULL +149 val_149 12 NULL +449 val_449 12 NULL +218 val_218 12 NULL +228 val_228 12 NULL +138 val_138 12 NULL +453 val_453 12 NULL +30 val_30 12 NULL +209 val_209 12 NULL +64 val_64 12 NULL +468 val_468 12 NULL +76 val_76 12 NULL +74 val_74 12 NULL +342 val_342 12 NULL +69 val_69 12 NULL +230 val_230 12 NULL +33 val_33 12 NULL +368 val_368 12 NULL +103 val_103 12 NULL +296 val_296 12 NULL +113 val_113 12 NULL +216 val_216 12 NULL +367 val_367 12 NULL +344 val_344 12 NULL +167 val_167 12 NULL +274 val_274 12 NULL +219 val_219 12 NULL +239 val_239 12 NULL +485 val_485 12 NULL +116 val_116 12 NULL +223 val_223 12 NULL +256 val_256 12 NULL +263 val_263 12 NULL +70 val_70 12 NULL +487 val_487 12 NULL +480 val_480 12 NULL +401 val_401 12 NULL +288 val_288 12 NULL +191 val_191 12 NULL +5 val_5 12 NULL +244 val_244 12 NULL +438 val_438 12 NULL +128 val_128 12 NULL +467 val_467 12 NULL +432 val_432 12 NULL +202 val_202 12 NULL +316 val_316 12 NULL +229 val_229 12 NULL +469 val_469 12 NULL +463 val_463 12 NULL +280 val_280 12 NULL +2 val_2 12 NULL +35 val_35 12 NULL +283 val_283 12 NULL +331 val_331 12 NULL +235 val_235 12 NULL +80 val_80 12 NULL +44 val_44 12 NULL +193 val_193 12 NULL +321 val_321 12 NULL +335 val_335 12 NULL +104 val_104 12 NULL +466 val_466 12 NULL +366 val_366 12 NULL +175 val_175 12 NULL +403 val_403 12 NULL +483 val_483 12 NULL +53 val_53 12 NULL +105 val_105 12 NULL +257 val_257 12 NULL +406 val_406 12 NULL +409 val_409 12 NULL +190 val_190 12 NULL +406 val_406 12 NULL +401 val_401 12 NULL +114 val_114 12 NULL +258 val_258 12 NULL +90 val_90 12 NULL +203 val_203 12 NULL +262 val_262 12 NULL +348 val_348 12 NULL +424 val_424 12 NULL +12 val_12 12 NULL +396 val_396 12 NULL +201 val_201 12 NULL +217 val_217 12 NULL +164 val_164 12 NULL +431 val_431 12 NULL +454 val_454 12 NULL +478 val_478 12 NULL +298 val_298 12 NULL +125 val_125 12 NULL +431 val_431 12 NULL +164 val_164 12 NULL +424 val_424 12 NULL +187 val_187 12 NULL +382 val_382 12 NULL +5 val_5 12 NULL +70 val_70 12 NULL +397 val_397 12 NULL +480 val_480 12 NULL +291 val_291 12 NULL +24 val_24 12 NULL +351 val_351 12 NULL +255 val_255 12 NULL +104 val_104 12 NULL +70 val_70 12 NULL +163 val_163 12 NULL +438 val_438 12 NULL +119 val_119 12 NULL +414 val_414 12 NULL +200 val_200 12 NULL +491 val_491 12 NULL +237 val_237 12 NULL +439 val_439 12 NULL +360 val_360 12 NULL +248 val_248 12 NULL +479 val_479 12 NULL +305 val_305 12 NULL +417 val_417 12 NULL +199 val_199 12 NULL +444 val_444 12 NULL +120 val_120 12 NULL +429 val_429 12 NULL +169 val_169 12 NULL +443 val_443 12 NULL +323 val_323 12 NULL +325 val_325 12 NULL +277 val_277 12 NULL +230 val_230 12 NULL +478 val_478 12 NULL +178 val_178 12 NULL +468 val_468 12 NULL +310 val_310 12 NULL +317 val_317 12 NULL +333 val_333 12 NULL +493 val_493 12 NULL +460 val_460 12 NULL +207 val_207 12 NULL +249 val_249 12 NULL +265 val_265 12 NULL +480 val_480 12 NULL +83 val_83 12 NULL +136 val_136 12 NULL +353 val_353 12 NULL +172 val_172 12 NULL +214 val_214 12 NULL +462 val_462 12 NULL +233 val_233 12 NULL +406 val_406 12 NULL +133 val_133 12 NULL +175 val_175 12 NULL +189 val_189 12 NULL +454 val_454 12 NULL +375 val_375 12 NULL +401 val_401 12 NULL +421 val_421 12 NULL +407 val_407 12 NULL +384 val_384 12 NULL +256 val_256 12 NULL +26 val_26 12 NULL +134 val_134 12 NULL +67 val_67 12 NULL +384 val_384 12 NULL +379 val_379 12 NULL +18 val_18 12 NULL +462 val_462 12 NULL +492 val_492 12 NULL +100 val_100 12 NULL +298 val_298 12 NULL +9 val_9 12 NULL +341 val_341 12 NULL +498 val_498 12 NULL +146 val_146 12 NULL +458 val_458 12 NULL +362 val_362 12 NULL +186 val_186 12 NULL +285 val_285 12 NULL +348 val_348 12 NULL +167 val_167 12 NULL +18 val_18 12 NULL +273 val_273 12 NULL +183 val_183 12 NULL +281 val_281 12 NULL +344 val_344 12 NULL +97 val_97 12 NULL +469 val_469 12 NULL +315 val_315 12 NULL +84 val_84 12 NULL +28 val_28 12 NULL +37 val_37 12 NULL +448 val_448 12 NULL +152 val_152 12 NULL +348 val_348 12 NULL +307 val_307 12 NULL +194 val_194 12 NULL +414 val_414 12 NULL +477 val_477 12 NULL +222 val_222 12 NULL +126 val_126 12 NULL +90 val_90 12 NULL +169 val_169 12 NULL +403 val_403 12 NULL +400 val_400 12 NULL +200 val_200 12 NULL +97 val_97 12 NULL +PREHOOK: query: explain select * from masking_test_n14 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test where key > 0 +POSTHOOK: query: explain select * from masking_test_n14 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -89,15 +580,15 @@ STAGE PLANS: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (UDFToInteger(key) > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(key) (type: int), reverse(value) (type: string), '12' (type: string), ROW__ID (type: struct) + expressions: UDFToInteger(key) (type: int), value (type: string), '12' (type: string), ROW__ID (type: struct) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -110,57 +601,551 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test where key > 0 +PREHOOK: query: select * from masking_test_n14 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n14 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test where key > 0 +POSTHOOK: query: select * from masking_test_n14 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n14 POSTHOOK: Input: default@src #### A masked pattern was here #### -4 4_lav 12 NULL -8 8_lav 12 NULL -2 2_lav 12 NULL -PREHOOK: query: drop view masking_test +238 val_238 12 NULL +86 val_86 12 NULL +311 val_311 12 NULL +27 val_27 12 NULL +165 val_165 12 NULL +409 val_409 12 NULL +255 val_255 12 NULL +278 val_278 12 NULL +98 val_98 12 NULL +484 val_484 12 NULL +265 val_265 12 NULL +193 val_193 12 NULL +401 val_401 12 NULL +150 val_150 12 NULL +273 val_273 12 NULL +224 val_224 12 NULL +369 val_369 12 NULL +66 val_66 12 NULL +128 val_128 12 NULL +213 val_213 12 NULL +146 val_146 12 NULL +406 val_406 12 NULL +429 val_429 12 NULL +374 val_374 12 NULL +152 val_152 12 NULL +469 val_469 12 NULL +145 val_145 12 NULL +495 val_495 12 NULL +37 val_37 12 NULL +327 val_327 12 NULL +281 val_281 12 NULL +277 val_277 12 NULL +209 val_209 12 NULL +15 val_15 12 NULL +82 val_82 12 NULL +403 val_403 12 NULL +166 val_166 12 NULL +417 val_417 12 NULL +430 val_430 12 NULL +252 val_252 12 NULL +292 val_292 12 NULL +219 val_219 12 NULL +287 val_287 12 NULL +153 val_153 12 NULL +193 val_193 12 NULL +338 val_338 12 NULL +446 val_446 12 NULL +459 val_459 12 NULL +394 val_394 12 NULL +237 val_237 12 NULL +482 val_482 12 NULL +174 val_174 12 NULL +413 val_413 12 NULL +494 val_494 12 NULL +207 val_207 12 NULL +199 val_199 12 NULL +466 val_466 12 NULL +208 val_208 12 NULL +174 val_174 12 NULL +399 val_399 12 NULL +396 val_396 12 NULL +247 val_247 12 NULL +417 val_417 12 NULL +489 val_489 12 NULL +162 val_162 12 NULL +377 val_377 12 NULL +397 val_397 12 NULL +309 val_309 12 NULL +365 val_365 12 NULL +266 val_266 12 NULL +439 val_439 12 NULL +342 val_342 12 NULL +367 val_367 12 NULL +325 val_325 12 NULL +167 val_167 12 NULL +195 val_195 12 NULL +475 val_475 12 NULL +17 val_17 12 NULL +113 val_113 12 NULL +155 val_155 12 NULL +203 val_203 12 NULL +339 val_339 12 NULL +455 val_455 12 NULL +128 val_128 12 NULL +311 val_311 12 NULL +316 val_316 12 NULL +57 val_57 12 NULL +302 val_302 12 NULL +205 val_205 12 NULL +149 val_149 12 NULL +438 val_438 12 NULL +345 val_345 12 NULL +129 val_129 12 NULL +170 val_170 12 NULL +20 val_20 12 NULL +489 val_489 12 NULL +157 val_157 12 NULL +378 val_378 12 NULL +221 val_221 12 NULL +92 val_92 12 NULL +111 val_111 12 NULL +47 val_47 12 NULL +72 val_72 12 NULL +4 val_4 12 NULL +280 val_280 12 NULL +35 val_35 12 NULL +427 val_427 12 NULL +277 val_277 12 NULL +208 val_208 12 NULL +356 val_356 12 NULL +399 val_399 12 NULL +169 val_169 12 NULL +382 val_382 12 NULL +498 val_498 12 NULL +125 val_125 12 NULL +386 val_386 12 NULL +437 val_437 12 NULL +469 val_469 12 NULL +192 val_192 12 NULL +286 val_286 12 NULL +187 val_187 12 NULL +176 val_176 12 NULL +54 val_54 12 NULL +459 val_459 12 NULL +51 val_51 12 NULL +138 val_138 12 NULL +103 val_103 12 NULL +239 val_239 12 NULL +213 val_213 12 NULL +216 val_216 12 NULL +430 val_430 12 NULL +278 val_278 12 NULL +176 val_176 12 NULL +289 val_289 12 NULL +221 val_221 12 NULL +65 val_65 12 NULL +318 val_318 12 NULL +332 val_332 12 NULL +311 val_311 12 NULL +275 val_275 12 NULL +137 val_137 12 NULL +241 val_241 12 NULL +83 val_83 12 NULL +333 val_333 12 NULL +180 val_180 12 NULL +284 val_284 12 NULL +12 val_12 12 NULL +230 val_230 12 NULL +181 val_181 12 NULL +67 val_67 12 NULL +260 val_260 12 NULL +404 val_404 12 NULL +384 val_384 12 NULL +489 val_489 12 NULL +353 val_353 12 NULL +373 val_373 12 NULL +272 val_272 12 NULL +138 val_138 12 NULL +217 val_217 12 NULL +84 val_84 12 NULL +348 val_348 12 NULL +466 val_466 12 NULL +58 val_58 12 NULL +8 val_8 12 NULL +411 val_411 12 NULL +230 val_230 12 NULL +208 val_208 12 NULL +348 val_348 12 NULL +24 val_24 12 NULL +463 val_463 12 NULL +431 val_431 12 NULL +179 val_179 12 NULL +172 val_172 12 NULL +42 val_42 12 NULL +129 val_129 12 NULL +158 val_158 12 NULL +119 val_119 12 NULL +496 val_496 12 NULL +322 val_322 12 NULL +197 val_197 12 NULL +468 val_468 12 NULL +393 val_393 12 NULL +454 val_454 12 NULL +100 val_100 12 NULL +298 val_298 12 NULL +199 val_199 12 NULL +191 val_191 12 NULL +418 val_418 12 NULL +96 val_96 12 NULL +26 val_26 12 NULL +165 val_165 12 NULL +327 val_327 12 NULL +230 val_230 12 NULL +205 val_205 12 NULL +120 val_120 12 NULL +131 val_131 12 NULL +51 val_51 12 NULL +404 val_404 12 NULL +43 val_43 12 NULL +436 val_436 12 NULL +156 val_156 12 NULL +469 val_469 12 NULL +468 val_468 12 NULL +308 val_308 12 NULL +95 val_95 12 NULL +196 val_196 12 NULL +288 val_288 12 NULL +481 val_481 12 NULL +457 val_457 12 NULL +98 val_98 12 NULL +282 val_282 12 NULL +197 val_197 12 NULL +187 val_187 12 NULL +318 val_318 12 NULL +318 val_318 12 NULL +409 val_409 12 NULL +470 val_470 12 NULL +137 val_137 12 NULL +369 val_369 12 NULL +316 val_316 12 NULL +169 val_169 12 NULL +413 val_413 12 NULL +85 val_85 12 NULL +77 val_77 12 NULL +490 val_490 12 NULL +87 val_87 12 NULL +364 val_364 12 NULL +179 val_179 12 NULL +118 val_118 12 NULL +134 val_134 12 NULL +395 val_395 12 NULL +282 val_282 12 NULL +138 val_138 12 NULL +238 val_238 12 NULL +419 val_419 12 NULL +15 val_15 12 NULL +118 val_118 12 NULL +72 val_72 12 NULL +90 val_90 12 NULL +307 val_307 12 NULL +19 val_19 12 NULL +435 val_435 12 NULL +10 val_10 12 NULL +277 val_277 12 NULL +273 val_273 12 NULL +306 val_306 12 NULL +224 val_224 12 NULL +309 val_309 12 NULL +389 val_389 12 NULL +327 val_327 12 NULL +242 val_242 12 NULL +369 val_369 12 NULL +392 val_392 12 NULL +272 val_272 12 NULL +331 val_331 12 NULL +401 val_401 12 NULL +242 val_242 12 NULL +452 val_452 12 NULL +177 val_177 12 NULL +226 val_226 12 NULL +5 val_5 12 NULL +497 val_497 12 NULL +402 val_402 12 NULL +396 val_396 12 NULL +317 val_317 12 NULL +395 val_395 12 NULL +58 val_58 12 NULL +35 val_35 12 NULL +336 val_336 12 NULL +95 val_95 12 NULL +11 val_11 12 NULL +168 val_168 12 NULL +34 val_34 12 NULL +229 val_229 12 NULL +233 val_233 12 NULL +143 val_143 12 NULL +472 val_472 12 NULL +322 val_322 12 NULL +498 val_498 12 NULL +160 val_160 12 NULL +195 val_195 12 NULL +42 val_42 12 NULL +321 val_321 12 NULL +430 val_430 12 NULL +119 val_119 12 NULL +489 val_489 12 NULL +458 val_458 12 NULL +78 val_78 12 NULL +76 val_76 12 NULL +41 val_41 12 NULL +223 val_223 12 NULL +492 val_492 12 NULL +149 val_149 12 NULL +449 val_449 12 NULL +218 val_218 12 NULL +228 val_228 12 NULL +138 val_138 12 NULL +453 val_453 12 NULL +30 val_30 12 NULL +209 val_209 12 NULL +64 val_64 12 NULL +468 val_468 12 NULL +76 val_76 12 NULL +74 val_74 12 NULL +342 val_342 12 NULL +69 val_69 12 NULL +230 val_230 12 NULL +33 val_33 12 NULL +368 val_368 12 NULL +103 val_103 12 NULL +296 val_296 12 NULL +113 val_113 12 NULL +216 val_216 12 NULL +367 val_367 12 NULL +344 val_344 12 NULL +167 val_167 12 NULL +274 val_274 12 NULL +219 val_219 12 NULL +239 val_239 12 NULL +485 val_485 12 NULL +116 val_116 12 NULL +223 val_223 12 NULL +256 val_256 12 NULL +263 val_263 12 NULL +70 val_70 12 NULL +487 val_487 12 NULL +480 val_480 12 NULL +401 val_401 12 NULL +288 val_288 12 NULL +191 val_191 12 NULL +5 val_5 12 NULL +244 val_244 12 NULL +438 val_438 12 NULL +128 val_128 12 NULL +467 val_467 12 NULL +432 val_432 12 NULL +202 val_202 12 NULL +316 val_316 12 NULL +229 val_229 12 NULL +469 val_469 12 NULL +463 val_463 12 NULL +280 val_280 12 NULL +2 val_2 12 NULL +35 val_35 12 NULL +283 val_283 12 NULL +331 val_331 12 NULL +235 val_235 12 NULL +80 val_80 12 NULL +44 val_44 12 NULL +193 val_193 12 NULL +321 val_321 12 NULL +335 val_335 12 NULL +104 val_104 12 NULL +466 val_466 12 NULL +366 val_366 12 NULL +175 val_175 12 NULL +403 val_403 12 NULL +483 val_483 12 NULL +53 val_53 12 NULL +105 val_105 12 NULL +257 val_257 12 NULL +406 val_406 12 NULL +409 val_409 12 NULL +190 val_190 12 NULL +406 val_406 12 NULL +401 val_401 12 NULL +114 val_114 12 NULL +258 val_258 12 NULL +90 val_90 12 NULL +203 val_203 12 NULL +262 val_262 12 NULL +348 val_348 12 NULL +424 val_424 12 NULL +12 val_12 12 NULL +396 val_396 12 NULL +201 val_201 12 NULL +217 val_217 12 NULL +164 val_164 12 NULL +431 val_431 12 NULL +454 val_454 12 NULL +478 val_478 12 NULL +298 val_298 12 NULL +125 val_125 12 NULL +431 val_431 12 NULL +164 val_164 12 NULL +424 val_424 12 NULL +187 val_187 12 NULL +382 val_382 12 NULL +5 val_5 12 NULL +70 val_70 12 NULL +397 val_397 12 NULL +480 val_480 12 NULL +291 val_291 12 NULL +24 val_24 12 NULL +351 val_351 12 NULL +255 val_255 12 NULL +104 val_104 12 NULL +70 val_70 12 NULL +163 val_163 12 NULL +438 val_438 12 NULL +119 val_119 12 NULL +414 val_414 12 NULL +200 val_200 12 NULL +491 val_491 12 NULL +237 val_237 12 NULL +439 val_439 12 NULL +360 val_360 12 NULL +248 val_248 12 NULL +479 val_479 12 NULL +305 val_305 12 NULL +417 val_417 12 NULL +199 val_199 12 NULL +444 val_444 12 NULL +120 val_120 12 NULL +429 val_429 12 NULL +169 val_169 12 NULL +443 val_443 12 NULL +323 val_323 12 NULL +325 val_325 12 NULL +277 val_277 12 NULL +230 val_230 12 NULL +478 val_478 12 NULL +178 val_178 12 NULL +468 val_468 12 NULL +310 val_310 12 NULL +317 val_317 12 NULL +333 val_333 12 NULL +493 val_493 12 NULL +460 val_460 12 NULL +207 val_207 12 NULL +249 val_249 12 NULL +265 val_265 12 NULL +480 val_480 12 NULL +83 val_83 12 NULL +136 val_136 12 NULL +353 val_353 12 NULL +172 val_172 12 NULL +214 val_214 12 NULL +462 val_462 12 NULL +233 val_233 12 NULL +406 val_406 12 NULL +133 val_133 12 NULL +175 val_175 12 NULL +189 val_189 12 NULL +454 val_454 12 NULL +375 val_375 12 NULL +401 val_401 12 NULL +421 val_421 12 NULL +407 val_407 12 NULL +384 val_384 12 NULL +256 val_256 12 NULL +26 val_26 12 NULL +134 val_134 12 NULL +67 val_67 12 NULL +384 val_384 12 NULL +379 val_379 12 NULL +18 val_18 12 NULL +462 val_462 12 NULL +492 val_492 12 NULL +100 val_100 12 NULL +298 val_298 12 NULL +9 val_9 12 NULL +341 val_341 12 NULL +498 val_498 12 NULL +146 val_146 12 NULL +458 val_458 12 NULL +362 val_362 12 NULL +186 val_186 12 NULL +285 val_285 12 NULL +348 val_348 12 NULL +167 val_167 12 NULL +18 val_18 12 NULL +273 val_273 12 NULL +183 val_183 12 NULL +281 val_281 12 NULL +344 val_344 12 NULL +97 val_97 12 NULL +469 val_469 12 NULL +315 val_315 12 NULL +84 val_84 12 NULL +28 val_28 12 NULL +37 val_37 12 NULL +448 val_448 12 NULL +152 val_152 12 NULL +348 val_348 12 NULL +307 val_307 12 NULL +194 val_194 12 NULL +414 val_414 12 NULL +477 val_477 12 NULL +222 val_222 12 NULL +126 val_126 12 NULL +90 val_90 12 NULL +169 val_169 12 NULL +403 val_403 12 NULL +400 val_400 12 NULL +200 val_200 12 NULL +97 val_97 12 NULL +PREHOOK: query: drop view masking_test_n14 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@masking_test -PREHOOK: Output: default@masking_test -POSTHOOK: query: drop view masking_test +PREHOOK: Input: default@masking_test_n14 +PREHOOK: Output: default@masking_test_n14 +POSTHOOK: query: drop view masking_test_n14 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@masking_test -POSTHOOK: Output: default@masking_test -PREHOOK: query: create view masking_test as select cast(key as int) as key, '12', ROW__ID, +POSTHOOK: Input: default@masking_test_n14 +POSTHOOK: Output: default@masking_test_n14 +PREHOOK: query: create view masking_test_n14 as select cast(key as int) as key, '12', ROW__ID, '12', '12', '12', '12', '12', '12', '12', '12', '12', '12' from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create view masking_test as select cast(key as int) as key, '12', ROW__ID, +PREHOOK: Output: default@masking_test_n14 +POSTHOOK: query: create view masking_test_n14 as select cast(key as int) as key, '12', ROW__ID, '12', '12', '12', '12', '12', '12', '12', '12', '12', '12' from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test._c1 SIMPLE [] -POSTHOOK: Lineage: masking_test._c10 SIMPLE [] -POSTHOOK: Lineage: masking_test._c11 SIMPLE [] -POSTHOOK: Lineage: masking_test._c12 SIMPLE [] -POSTHOOK: Lineage: masking_test._c3 SIMPLE [] -POSTHOOK: Lineage: masking_test._c4 SIMPLE [] -POSTHOOK: Lineage: masking_test._c5 SIMPLE [] -POSTHOOK: Lineage: masking_test._c6 SIMPLE [] -POSTHOOK: Lineage: masking_test._c7 SIMPLE [] -POSTHOOK: Lineage: masking_test._c8 SIMPLE [] -POSTHOOK: Lineage: masking_test._c9 SIMPLE [] -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.row__id SIMPLE [(src)src.FieldSchema(name:ROW__ID, type:struct, comment:), ] -PREHOOK: query: explain select * from masking_test +POSTHOOK: Output: default@masking_test_n14 +POSTHOOK: Lineage: masking_test_n14._c1 SIMPLE [] +POSTHOOK: Lineage: masking_test_n14._c10 SIMPLE [] +POSTHOOK: Lineage: masking_test_n14._c11 SIMPLE [] +POSTHOOK: Lineage: masking_test_n14._c12 SIMPLE [] +POSTHOOK: Lineage: masking_test_n14._c3 SIMPLE [] +POSTHOOK: Lineage: masking_test_n14._c4 SIMPLE [] +POSTHOOK: Lineage: masking_test_n14._c5 SIMPLE [] +POSTHOOK: Lineage: masking_test_n14._c6 SIMPLE [] +POSTHOOK: Lineage: masking_test_n14._c7 SIMPLE [] +POSTHOOK: Lineage: masking_test_n14._c8 SIMPLE [] +POSTHOOK: Lineage: masking_test_n14._c9 SIMPLE [] +POSTHOOK: Lineage: masking_test_n14.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n14.row__id SIMPLE [(src)src.FieldSchema(name:ROW__ID, type:struct, comment:), ] +PREHOOK: query: explain select * from masking_test_n14 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test +POSTHOOK: query: explain select * from masking_test_n14 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -175,20 +1160,17 @@ STAGE PLANS: properties: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(key) (type: int), '12' (type: string), ROW__ID (type: struct), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Select Operator + expressions: UDFToInteger(key) (type: int), '12' (type: string), ROW__ID (type: struct), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Execution mode: vectorized Stage: Stage-0 @@ -197,25 +1179,519 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test +PREHOOK: query: select * from masking_test_n14 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n14 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test +POSTHOOK: query: select * from masking_test_n14 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n14 POSTHOOK: Input: default@src #### A masked pattern was here #### +238 12 NULL 12 12 12 12 12 12 12 12 12 12 +86 12 NULL 12 12 12 12 12 12 12 12 12 12 +311 12 NULL 12 12 12 12 12 12 12 12 12 12 +27 12 NULL 12 12 12 12 12 12 12 12 12 12 +165 12 NULL 12 12 12 12 12 12 12 12 12 12 +409 12 NULL 12 12 12 12 12 12 12 12 12 12 +255 12 NULL 12 12 12 12 12 12 12 12 12 12 +278 12 NULL 12 12 12 12 12 12 12 12 12 12 +98 12 NULL 12 12 12 12 12 12 12 12 12 12 +484 12 NULL 12 12 12 12 12 12 12 12 12 12 +265 12 NULL 12 12 12 12 12 12 12 12 12 12 +193 12 NULL 12 12 12 12 12 12 12 12 12 12 +401 12 NULL 12 12 12 12 12 12 12 12 12 12 +150 12 NULL 12 12 12 12 12 12 12 12 12 12 +273 12 NULL 12 12 12 12 12 12 12 12 12 12 +224 12 NULL 12 12 12 12 12 12 12 12 12 12 +369 12 NULL 12 12 12 12 12 12 12 12 12 12 +66 12 NULL 12 12 12 12 12 12 12 12 12 12 +128 12 NULL 12 12 12 12 12 12 12 12 12 12 +213 12 NULL 12 12 12 12 12 12 12 12 12 12 +146 12 NULL 12 12 12 12 12 12 12 12 12 12 +406 12 NULL 12 12 12 12 12 12 12 12 12 12 +429 12 NULL 12 12 12 12 12 12 12 12 12 12 +374 12 NULL 12 12 12 12 12 12 12 12 12 12 +152 12 NULL 12 12 12 12 12 12 12 12 12 12 +469 12 NULL 12 12 12 12 12 12 12 12 12 12 +145 12 NULL 12 12 12 12 12 12 12 12 12 12 +495 12 NULL 12 12 12 12 12 12 12 12 12 12 +37 12 NULL 12 12 12 12 12 12 12 12 12 12 +327 12 NULL 12 12 12 12 12 12 12 12 12 12 +281 12 NULL 12 12 12 12 12 12 12 12 12 12 +277 12 NULL 12 12 12 12 12 12 12 12 12 12 +209 12 NULL 12 12 12 12 12 12 12 12 12 12 +15 12 NULL 12 12 12 12 12 12 12 12 12 12 +82 12 NULL 12 12 12 12 12 12 12 12 12 12 +403 12 NULL 12 12 12 12 12 12 12 12 12 12 +166 12 NULL 12 12 12 12 12 12 12 12 12 12 +417 12 NULL 12 12 12 12 12 12 12 12 12 12 +430 12 NULL 12 12 12 12 12 12 12 12 12 12 +252 12 NULL 12 12 12 12 12 12 12 12 12 12 +292 12 NULL 12 12 12 12 12 12 12 12 12 12 +219 12 NULL 12 12 12 12 12 12 12 12 12 12 +287 12 NULL 12 12 12 12 12 12 12 12 12 12 +153 12 NULL 12 12 12 12 12 12 12 12 12 12 +193 12 NULL 12 12 12 12 12 12 12 12 12 12 +338 12 NULL 12 12 12 12 12 12 12 12 12 12 +446 12 NULL 12 12 12 12 12 12 12 12 12 12 +459 12 NULL 12 12 12 12 12 12 12 12 12 12 +394 12 NULL 12 12 12 12 12 12 12 12 12 12 +237 12 NULL 12 12 12 12 12 12 12 12 12 12 +482 12 NULL 12 12 12 12 12 12 12 12 12 12 +174 12 NULL 12 12 12 12 12 12 12 12 12 12 +413 12 NULL 12 12 12 12 12 12 12 12 12 12 +494 12 NULL 12 12 12 12 12 12 12 12 12 12 +207 12 NULL 12 12 12 12 12 12 12 12 12 12 +199 12 NULL 12 12 12 12 12 12 12 12 12 12 +466 12 NULL 12 12 12 12 12 12 12 12 12 12 +208 12 NULL 12 12 12 12 12 12 12 12 12 12 +174 12 NULL 12 12 12 12 12 12 12 12 12 12 +399 12 NULL 12 12 12 12 12 12 12 12 12 12 +396 12 NULL 12 12 12 12 12 12 12 12 12 12 +247 12 NULL 12 12 12 12 12 12 12 12 12 12 +417 12 NULL 12 12 12 12 12 12 12 12 12 12 +489 12 NULL 12 12 12 12 12 12 12 12 12 12 +162 12 NULL 12 12 12 12 12 12 12 12 12 12 +377 12 NULL 12 12 12 12 12 12 12 12 12 12 +397 12 NULL 12 12 12 12 12 12 12 12 12 12 +309 12 NULL 12 12 12 12 12 12 12 12 12 12 +365 12 NULL 12 12 12 12 12 12 12 12 12 12 +266 12 NULL 12 12 12 12 12 12 12 12 12 12 +439 12 NULL 12 12 12 12 12 12 12 12 12 12 +342 12 NULL 12 12 12 12 12 12 12 12 12 12 +367 12 NULL 12 12 12 12 12 12 12 12 12 12 +325 12 NULL 12 12 12 12 12 12 12 12 12 12 +167 12 NULL 12 12 12 12 12 12 12 12 12 12 +195 12 NULL 12 12 12 12 12 12 12 12 12 12 +475 12 NULL 12 12 12 12 12 12 12 12 12 12 +17 12 NULL 12 12 12 12 12 12 12 12 12 12 +113 12 NULL 12 12 12 12 12 12 12 12 12 12 +155 12 NULL 12 12 12 12 12 12 12 12 12 12 +203 12 NULL 12 12 12 12 12 12 12 12 12 12 +339 12 NULL 12 12 12 12 12 12 12 12 12 12 0 12 NULL 12 12 12 12 12 12 12 12 12 12 +455 12 NULL 12 12 12 12 12 12 12 12 12 12 +128 12 NULL 12 12 12 12 12 12 12 12 12 12 +311 12 NULL 12 12 12 12 12 12 12 12 12 12 +316 12 NULL 12 12 12 12 12 12 12 12 12 12 +57 12 NULL 12 12 12 12 12 12 12 12 12 12 +302 12 NULL 12 12 12 12 12 12 12 12 12 12 +205 12 NULL 12 12 12 12 12 12 12 12 12 12 +149 12 NULL 12 12 12 12 12 12 12 12 12 12 +438 12 NULL 12 12 12 12 12 12 12 12 12 12 +345 12 NULL 12 12 12 12 12 12 12 12 12 12 +129 12 NULL 12 12 12 12 12 12 12 12 12 12 +170 12 NULL 12 12 12 12 12 12 12 12 12 12 +20 12 NULL 12 12 12 12 12 12 12 12 12 12 +489 12 NULL 12 12 12 12 12 12 12 12 12 12 +157 12 NULL 12 12 12 12 12 12 12 12 12 12 +378 12 NULL 12 12 12 12 12 12 12 12 12 12 +221 12 NULL 12 12 12 12 12 12 12 12 12 12 +92 12 NULL 12 12 12 12 12 12 12 12 12 12 +111 12 NULL 12 12 12 12 12 12 12 12 12 12 +47 12 NULL 12 12 12 12 12 12 12 12 12 12 +72 12 NULL 12 12 12 12 12 12 12 12 12 12 4 12 NULL 12 12 12 12 12 12 12 12 12 12 +280 12 NULL 12 12 12 12 12 12 12 12 12 12 +35 12 NULL 12 12 12 12 12 12 12 12 12 12 +427 12 NULL 12 12 12 12 12 12 12 12 12 12 +277 12 NULL 12 12 12 12 12 12 12 12 12 12 +208 12 NULL 12 12 12 12 12 12 12 12 12 12 +356 12 NULL 12 12 12 12 12 12 12 12 12 12 +399 12 NULL 12 12 12 12 12 12 12 12 12 12 +169 12 NULL 12 12 12 12 12 12 12 12 12 12 +382 12 NULL 12 12 12 12 12 12 12 12 12 12 +498 12 NULL 12 12 12 12 12 12 12 12 12 12 +125 12 NULL 12 12 12 12 12 12 12 12 12 12 +386 12 NULL 12 12 12 12 12 12 12 12 12 12 +437 12 NULL 12 12 12 12 12 12 12 12 12 12 +469 12 NULL 12 12 12 12 12 12 12 12 12 12 +192 12 NULL 12 12 12 12 12 12 12 12 12 12 +286 12 NULL 12 12 12 12 12 12 12 12 12 12 +187 12 NULL 12 12 12 12 12 12 12 12 12 12 +176 12 NULL 12 12 12 12 12 12 12 12 12 12 +54 12 NULL 12 12 12 12 12 12 12 12 12 12 +459 12 NULL 12 12 12 12 12 12 12 12 12 12 +51 12 NULL 12 12 12 12 12 12 12 12 12 12 +138 12 NULL 12 12 12 12 12 12 12 12 12 12 +103 12 NULL 12 12 12 12 12 12 12 12 12 12 +239 12 NULL 12 12 12 12 12 12 12 12 12 12 +213 12 NULL 12 12 12 12 12 12 12 12 12 12 +216 12 NULL 12 12 12 12 12 12 12 12 12 12 +430 12 NULL 12 12 12 12 12 12 12 12 12 12 +278 12 NULL 12 12 12 12 12 12 12 12 12 12 +176 12 NULL 12 12 12 12 12 12 12 12 12 12 +289 12 NULL 12 12 12 12 12 12 12 12 12 12 +221 12 NULL 12 12 12 12 12 12 12 12 12 12 +65 12 NULL 12 12 12 12 12 12 12 12 12 12 +318 12 NULL 12 12 12 12 12 12 12 12 12 12 +332 12 NULL 12 12 12 12 12 12 12 12 12 12 +311 12 NULL 12 12 12 12 12 12 12 12 12 12 +275 12 NULL 12 12 12 12 12 12 12 12 12 12 +137 12 NULL 12 12 12 12 12 12 12 12 12 12 +241 12 NULL 12 12 12 12 12 12 12 12 12 12 +83 12 NULL 12 12 12 12 12 12 12 12 12 12 +333 12 NULL 12 12 12 12 12 12 12 12 12 12 +180 12 NULL 12 12 12 12 12 12 12 12 12 12 +284 12 NULL 12 12 12 12 12 12 12 12 12 12 +12 12 NULL 12 12 12 12 12 12 12 12 12 12 +230 12 NULL 12 12 12 12 12 12 12 12 12 12 +181 12 NULL 12 12 12 12 12 12 12 12 12 12 +67 12 NULL 12 12 12 12 12 12 12 12 12 12 +260 12 NULL 12 12 12 12 12 12 12 12 12 12 +404 12 NULL 12 12 12 12 12 12 12 12 12 12 +384 12 NULL 12 12 12 12 12 12 12 12 12 12 +489 12 NULL 12 12 12 12 12 12 12 12 12 12 +353 12 NULL 12 12 12 12 12 12 12 12 12 12 +373 12 NULL 12 12 12 12 12 12 12 12 12 12 +272 12 NULL 12 12 12 12 12 12 12 12 12 12 +138 12 NULL 12 12 12 12 12 12 12 12 12 12 +217 12 NULL 12 12 12 12 12 12 12 12 12 12 +84 12 NULL 12 12 12 12 12 12 12 12 12 12 +348 12 NULL 12 12 12 12 12 12 12 12 12 12 +466 12 NULL 12 12 12 12 12 12 12 12 12 12 +58 12 NULL 12 12 12 12 12 12 12 12 12 12 8 12 NULL 12 12 12 12 12 12 12 12 12 12 +411 12 NULL 12 12 12 12 12 12 12 12 12 12 +230 12 NULL 12 12 12 12 12 12 12 12 12 12 +208 12 NULL 12 12 12 12 12 12 12 12 12 12 +348 12 NULL 12 12 12 12 12 12 12 12 12 12 +24 12 NULL 12 12 12 12 12 12 12 12 12 12 +463 12 NULL 12 12 12 12 12 12 12 12 12 12 +431 12 NULL 12 12 12 12 12 12 12 12 12 12 +179 12 NULL 12 12 12 12 12 12 12 12 12 12 +172 12 NULL 12 12 12 12 12 12 12 12 12 12 +42 12 NULL 12 12 12 12 12 12 12 12 12 12 +129 12 NULL 12 12 12 12 12 12 12 12 12 12 +158 12 NULL 12 12 12 12 12 12 12 12 12 12 +119 12 NULL 12 12 12 12 12 12 12 12 12 12 +496 12 NULL 12 12 12 12 12 12 12 12 12 12 0 12 NULL 12 12 12 12 12 12 12 12 12 12 +322 12 NULL 12 12 12 12 12 12 12 12 12 12 +197 12 NULL 12 12 12 12 12 12 12 12 12 12 +468 12 NULL 12 12 12 12 12 12 12 12 12 12 +393 12 NULL 12 12 12 12 12 12 12 12 12 12 +454 12 NULL 12 12 12 12 12 12 12 12 12 12 +100 12 NULL 12 12 12 12 12 12 12 12 12 12 +298 12 NULL 12 12 12 12 12 12 12 12 12 12 +199 12 NULL 12 12 12 12 12 12 12 12 12 12 +191 12 NULL 12 12 12 12 12 12 12 12 12 12 +418 12 NULL 12 12 12 12 12 12 12 12 12 12 +96 12 NULL 12 12 12 12 12 12 12 12 12 12 +26 12 NULL 12 12 12 12 12 12 12 12 12 12 +165 12 NULL 12 12 12 12 12 12 12 12 12 12 +327 12 NULL 12 12 12 12 12 12 12 12 12 12 +230 12 NULL 12 12 12 12 12 12 12 12 12 12 +205 12 NULL 12 12 12 12 12 12 12 12 12 12 +120 12 NULL 12 12 12 12 12 12 12 12 12 12 +131 12 NULL 12 12 12 12 12 12 12 12 12 12 +51 12 NULL 12 12 12 12 12 12 12 12 12 12 +404 12 NULL 12 12 12 12 12 12 12 12 12 12 +43 12 NULL 12 12 12 12 12 12 12 12 12 12 +436 12 NULL 12 12 12 12 12 12 12 12 12 12 +156 12 NULL 12 12 12 12 12 12 12 12 12 12 +469 12 NULL 12 12 12 12 12 12 12 12 12 12 +468 12 NULL 12 12 12 12 12 12 12 12 12 12 +308 12 NULL 12 12 12 12 12 12 12 12 12 12 +95 12 NULL 12 12 12 12 12 12 12 12 12 12 +196 12 NULL 12 12 12 12 12 12 12 12 12 12 +288 12 NULL 12 12 12 12 12 12 12 12 12 12 +481 12 NULL 12 12 12 12 12 12 12 12 12 12 +457 12 NULL 12 12 12 12 12 12 12 12 12 12 +98 12 NULL 12 12 12 12 12 12 12 12 12 12 +282 12 NULL 12 12 12 12 12 12 12 12 12 12 +197 12 NULL 12 12 12 12 12 12 12 12 12 12 +187 12 NULL 12 12 12 12 12 12 12 12 12 12 +318 12 NULL 12 12 12 12 12 12 12 12 12 12 +318 12 NULL 12 12 12 12 12 12 12 12 12 12 +409 12 NULL 12 12 12 12 12 12 12 12 12 12 +470 12 NULL 12 12 12 12 12 12 12 12 12 12 +137 12 NULL 12 12 12 12 12 12 12 12 12 12 +369 12 NULL 12 12 12 12 12 12 12 12 12 12 +316 12 NULL 12 12 12 12 12 12 12 12 12 12 +169 12 NULL 12 12 12 12 12 12 12 12 12 12 +413 12 NULL 12 12 12 12 12 12 12 12 12 12 +85 12 NULL 12 12 12 12 12 12 12 12 12 12 +77 12 NULL 12 12 12 12 12 12 12 12 12 12 0 12 NULL 12 12 12 12 12 12 12 12 12 12 +490 12 NULL 12 12 12 12 12 12 12 12 12 12 +87 12 NULL 12 12 12 12 12 12 12 12 12 12 +364 12 NULL 12 12 12 12 12 12 12 12 12 12 +179 12 NULL 12 12 12 12 12 12 12 12 12 12 +118 12 NULL 12 12 12 12 12 12 12 12 12 12 +134 12 NULL 12 12 12 12 12 12 12 12 12 12 +395 12 NULL 12 12 12 12 12 12 12 12 12 12 +282 12 NULL 12 12 12 12 12 12 12 12 12 12 +138 12 NULL 12 12 12 12 12 12 12 12 12 12 +238 12 NULL 12 12 12 12 12 12 12 12 12 12 +419 12 NULL 12 12 12 12 12 12 12 12 12 12 +15 12 NULL 12 12 12 12 12 12 12 12 12 12 +118 12 NULL 12 12 12 12 12 12 12 12 12 12 +72 12 NULL 12 12 12 12 12 12 12 12 12 12 +90 12 NULL 12 12 12 12 12 12 12 12 12 12 +307 12 NULL 12 12 12 12 12 12 12 12 12 12 +19 12 NULL 12 12 12 12 12 12 12 12 12 12 +435 12 NULL 12 12 12 12 12 12 12 12 12 12 +10 12 NULL 12 12 12 12 12 12 12 12 12 12 +277 12 NULL 12 12 12 12 12 12 12 12 12 12 +273 12 NULL 12 12 12 12 12 12 12 12 12 12 +306 12 NULL 12 12 12 12 12 12 12 12 12 12 +224 12 NULL 12 12 12 12 12 12 12 12 12 12 +309 12 NULL 12 12 12 12 12 12 12 12 12 12 +389 12 NULL 12 12 12 12 12 12 12 12 12 12 +327 12 NULL 12 12 12 12 12 12 12 12 12 12 +242 12 NULL 12 12 12 12 12 12 12 12 12 12 +369 12 NULL 12 12 12 12 12 12 12 12 12 12 +392 12 NULL 12 12 12 12 12 12 12 12 12 12 +272 12 NULL 12 12 12 12 12 12 12 12 12 12 +331 12 NULL 12 12 12 12 12 12 12 12 12 12 +401 12 NULL 12 12 12 12 12 12 12 12 12 12 +242 12 NULL 12 12 12 12 12 12 12 12 12 12 +452 12 NULL 12 12 12 12 12 12 12 12 12 12 +177 12 NULL 12 12 12 12 12 12 12 12 12 12 +226 12 NULL 12 12 12 12 12 12 12 12 12 12 +5 12 NULL 12 12 12 12 12 12 12 12 12 12 +497 12 NULL 12 12 12 12 12 12 12 12 12 12 +402 12 NULL 12 12 12 12 12 12 12 12 12 12 +396 12 NULL 12 12 12 12 12 12 12 12 12 12 +317 12 NULL 12 12 12 12 12 12 12 12 12 12 +395 12 NULL 12 12 12 12 12 12 12 12 12 12 +58 12 NULL 12 12 12 12 12 12 12 12 12 12 +35 12 NULL 12 12 12 12 12 12 12 12 12 12 +336 12 NULL 12 12 12 12 12 12 12 12 12 12 +95 12 NULL 12 12 12 12 12 12 12 12 12 12 +11 12 NULL 12 12 12 12 12 12 12 12 12 12 +168 12 NULL 12 12 12 12 12 12 12 12 12 12 +34 12 NULL 12 12 12 12 12 12 12 12 12 12 +229 12 NULL 12 12 12 12 12 12 12 12 12 12 +233 12 NULL 12 12 12 12 12 12 12 12 12 12 +143 12 NULL 12 12 12 12 12 12 12 12 12 12 +472 12 NULL 12 12 12 12 12 12 12 12 12 12 +322 12 NULL 12 12 12 12 12 12 12 12 12 12 +498 12 NULL 12 12 12 12 12 12 12 12 12 12 +160 12 NULL 12 12 12 12 12 12 12 12 12 12 +195 12 NULL 12 12 12 12 12 12 12 12 12 12 +42 12 NULL 12 12 12 12 12 12 12 12 12 12 +321 12 NULL 12 12 12 12 12 12 12 12 12 12 +430 12 NULL 12 12 12 12 12 12 12 12 12 12 +119 12 NULL 12 12 12 12 12 12 12 12 12 12 +489 12 NULL 12 12 12 12 12 12 12 12 12 12 +458 12 NULL 12 12 12 12 12 12 12 12 12 12 +78 12 NULL 12 12 12 12 12 12 12 12 12 12 +76 12 NULL 12 12 12 12 12 12 12 12 12 12 +41 12 NULL 12 12 12 12 12 12 12 12 12 12 +223 12 NULL 12 12 12 12 12 12 12 12 12 12 +492 12 NULL 12 12 12 12 12 12 12 12 12 12 +149 12 NULL 12 12 12 12 12 12 12 12 12 12 +449 12 NULL 12 12 12 12 12 12 12 12 12 12 +218 12 NULL 12 12 12 12 12 12 12 12 12 12 +228 12 NULL 12 12 12 12 12 12 12 12 12 12 +138 12 NULL 12 12 12 12 12 12 12 12 12 12 +453 12 NULL 12 12 12 12 12 12 12 12 12 12 +30 12 NULL 12 12 12 12 12 12 12 12 12 12 +209 12 NULL 12 12 12 12 12 12 12 12 12 12 +64 12 NULL 12 12 12 12 12 12 12 12 12 12 +468 12 NULL 12 12 12 12 12 12 12 12 12 12 +76 12 NULL 12 12 12 12 12 12 12 12 12 12 +74 12 NULL 12 12 12 12 12 12 12 12 12 12 +342 12 NULL 12 12 12 12 12 12 12 12 12 12 +69 12 NULL 12 12 12 12 12 12 12 12 12 12 +230 12 NULL 12 12 12 12 12 12 12 12 12 12 +33 12 NULL 12 12 12 12 12 12 12 12 12 12 +368 12 NULL 12 12 12 12 12 12 12 12 12 12 +103 12 NULL 12 12 12 12 12 12 12 12 12 12 +296 12 NULL 12 12 12 12 12 12 12 12 12 12 +113 12 NULL 12 12 12 12 12 12 12 12 12 12 +216 12 NULL 12 12 12 12 12 12 12 12 12 12 +367 12 NULL 12 12 12 12 12 12 12 12 12 12 +344 12 NULL 12 12 12 12 12 12 12 12 12 12 +167 12 NULL 12 12 12 12 12 12 12 12 12 12 +274 12 NULL 12 12 12 12 12 12 12 12 12 12 +219 12 NULL 12 12 12 12 12 12 12 12 12 12 +239 12 NULL 12 12 12 12 12 12 12 12 12 12 +485 12 NULL 12 12 12 12 12 12 12 12 12 12 +116 12 NULL 12 12 12 12 12 12 12 12 12 12 +223 12 NULL 12 12 12 12 12 12 12 12 12 12 +256 12 NULL 12 12 12 12 12 12 12 12 12 12 +263 12 NULL 12 12 12 12 12 12 12 12 12 12 +70 12 NULL 12 12 12 12 12 12 12 12 12 12 +487 12 NULL 12 12 12 12 12 12 12 12 12 12 +480 12 NULL 12 12 12 12 12 12 12 12 12 12 +401 12 NULL 12 12 12 12 12 12 12 12 12 12 +288 12 NULL 12 12 12 12 12 12 12 12 12 12 +191 12 NULL 12 12 12 12 12 12 12 12 12 12 +5 12 NULL 12 12 12 12 12 12 12 12 12 12 +244 12 NULL 12 12 12 12 12 12 12 12 12 12 +438 12 NULL 12 12 12 12 12 12 12 12 12 12 +128 12 NULL 12 12 12 12 12 12 12 12 12 12 +467 12 NULL 12 12 12 12 12 12 12 12 12 12 +432 12 NULL 12 12 12 12 12 12 12 12 12 12 +202 12 NULL 12 12 12 12 12 12 12 12 12 12 +316 12 NULL 12 12 12 12 12 12 12 12 12 12 +229 12 NULL 12 12 12 12 12 12 12 12 12 12 +469 12 NULL 12 12 12 12 12 12 12 12 12 12 +463 12 NULL 12 12 12 12 12 12 12 12 12 12 +280 12 NULL 12 12 12 12 12 12 12 12 12 12 2 12 NULL 12 12 12 12 12 12 12 12 12 12 -PREHOOK: query: explain select * from masking_test where key > 0 +35 12 NULL 12 12 12 12 12 12 12 12 12 12 +283 12 NULL 12 12 12 12 12 12 12 12 12 12 +331 12 NULL 12 12 12 12 12 12 12 12 12 12 +235 12 NULL 12 12 12 12 12 12 12 12 12 12 +80 12 NULL 12 12 12 12 12 12 12 12 12 12 +44 12 NULL 12 12 12 12 12 12 12 12 12 12 +193 12 NULL 12 12 12 12 12 12 12 12 12 12 +321 12 NULL 12 12 12 12 12 12 12 12 12 12 +335 12 NULL 12 12 12 12 12 12 12 12 12 12 +104 12 NULL 12 12 12 12 12 12 12 12 12 12 +466 12 NULL 12 12 12 12 12 12 12 12 12 12 +366 12 NULL 12 12 12 12 12 12 12 12 12 12 +175 12 NULL 12 12 12 12 12 12 12 12 12 12 +403 12 NULL 12 12 12 12 12 12 12 12 12 12 +483 12 NULL 12 12 12 12 12 12 12 12 12 12 +53 12 NULL 12 12 12 12 12 12 12 12 12 12 +105 12 NULL 12 12 12 12 12 12 12 12 12 12 +257 12 NULL 12 12 12 12 12 12 12 12 12 12 +406 12 NULL 12 12 12 12 12 12 12 12 12 12 +409 12 NULL 12 12 12 12 12 12 12 12 12 12 +190 12 NULL 12 12 12 12 12 12 12 12 12 12 +406 12 NULL 12 12 12 12 12 12 12 12 12 12 +401 12 NULL 12 12 12 12 12 12 12 12 12 12 +114 12 NULL 12 12 12 12 12 12 12 12 12 12 +258 12 NULL 12 12 12 12 12 12 12 12 12 12 +90 12 NULL 12 12 12 12 12 12 12 12 12 12 +203 12 NULL 12 12 12 12 12 12 12 12 12 12 +262 12 NULL 12 12 12 12 12 12 12 12 12 12 +348 12 NULL 12 12 12 12 12 12 12 12 12 12 +424 12 NULL 12 12 12 12 12 12 12 12 12 12 +12 12 NULL 12 12 12 12 12 12 12 12 12 12 +396 12 NULL 12 12 12 12 12 12 12 12 12 12 +201 12 NULL 12 12 12 12 12 12 12 12 12 12 +217 12 NULL 12 12 12 12 12 12 12 12 12 12 +164 12 NULL 12 12 12 12 12 12 12 12 12 12 +431 12 NULL 12 12 12 12 12 12 12 12 12 12 +454 12 NULL 12 12 12 12 12 12 12 12 12 12 +478 12 NULL 12 12 12 12 12 12 12 12 12 12 +298 12 NULL 12 12 12 12 12 12 12 12 12 12 +125 12 NULL 12 12 12 12 12 12 12 12 12 12 +431 12 NULL 12 12 12 12 12 12 12 12 12 12 +164 12 NULL 12 12 12 12 12 12 12 12 12 12 +424 12 NULL 12 12 12 12 12 12 12 12 12 12 +187 12 NULL 12 12 12 12 12 12 12 12 12 12 +382 12 NULL 12 12 12 12 12 12 12 12 12 12 +5 12 NULL 12 12 12 12 12 12 12 12 12 12 +70 12 NULL 12 12 12 12 12 12 12 12 12 12 +397 12 NULL 12 12 12 12 12 12 12 12 12 12 +480 12 NULL 12 12 12 12 12 12 12 12 12 12 +291 12 NULL 12 12 12 12 12 12 12 12 12 12 +24 12 NULL 12 12 12 12 12 12 12 12 12 12 +351 12 NULL 12 12 12 12 12 12 12 12 12 12 +255 12 NULL 12 12 12 12 12 12 12 12 12 12 +104 12 NULL 12 12 12 12 12 12 12 12 12 12 +70 12 NULL 12 12 12 12 12 12 12 12 12 12 +163 12 NULL 12 12 12 12 12 12 12 12 12 12 +438 12 NULL 12 12 12 12 12 12 12 12 12 12 +119 12 NULL 12 12 12 12 12 12 12 12 12 12 +414 12 NULL 12 12 12 12 12 12 12 12 12 12 +200 12 NULL 12 12 12 12 12 12 12 12 12 12 +491 12 NULL 12 12 12 12 12 12 12 12 12 12 +237 12 NULL 12 12 12 12 12 12 12 12 12 12 +439 12 NULL 12 12 12 12 12 12 12 12 12 12 +360 12 NULL 12 12 12 12 12 12 12 12 12 12 +248 12 NULL 12 12 12 12 12 12 12 12 12 12 +479 12 NULL 12 12 12 12 12 12 12 12 12 12 +305 12 NULL 12 12 12 12 12 12 12 12 12 12 +417 12 NULL 12 12 12 12 12 12 12 12 12 12 +199 12 NULL 12 12 12 12 12 12 12 12 12 12 +444 12 NULL 12 12 12 12 12 12 12 12 12 12 +120 12 NULL 12 12 12 12 12 12 12 12 12 12 +429 12 NULL 12 12 12 12 12 12 12 12 12 12 +169 12 NULL 12 12 12 12 12 12 12 12 12 12 +443 12 NULL 12 12 12 12 12 12 12 12 12 12 +323 12 NULL 12 12 12 12 12 12 12 12 12 12 +325 12 NULL 12 12 12 12 12 12 12 12 12 12 +277 12 NULL 12 12 12 12 12 12 12 12 12 12 +230 12 NULL 12 12 12 12 12 12 12 12 12 12 +478 12 NULL 12 12 12 12 12 12 12 12 12 12 +178 12 NULL 12 12 12 12 12 12 12 12 12 12 +468 12 NULL 12 12 12 12 12 12 12 12 12 12 +310 12 NULL 12 12 12 12 12 12 12 12 12 12 +317 12 NULL 12 12 12 12 12 12 12 12 12 12 +333 12 NULL 12 12 12 12 12 12 12 12 12 12 +493 12 NULL 12 12 12 12 12 12 12 12 12 12 +460 12 NULL 12 12 12 12 12 12 12 12 12 12 +207 12 NULL 12 12 12 12 12 12 12 12 12 12 +249 12 NULL 12 12 12 12 12 12 12 12 12 12 +265 12 NULL 12 12 12 12 12 12 12 12 12 12 +480 12 NULL 12 12 12 12 12 12 12 12 12 12 +83 12 NULL 12 12 12 12 12 12 12 12 12 12 +136 12 NULL 12 12 12 12 12 12 12 12 12 12 +353 12 NULL 12 12 12 12 12 12 12 12 12 12 +172 12 NULL 12 12 12 12 12 12 12 12 12 12 +214 12 NULL 12 12 12 12 12 12 12 12 12 12 +462 12 NULL 12 12 12 12 12 12 12 12 12 12 +233 12 NULL 12 12 12 12 12 12 12 12 12 12 +406 12 NULL 12 12 12 12 12 12 12 12 12 12 +133 12 NULL 12 12 12 12 12 12 12 12 12 12 +175 12 NULL 12 12 12 12 12 12 12 12 12 12 +189 12 NULL 12 12 12 12 12 12 12 12 12 12 +454 12 NULL 12 12 12 12 12 12 12 12 12 12 +375 12 NULL 12 12 12 12 12 12 12 12 12 12 +401 12 NULL 12 12 12 12 12 12 12 12 12 12 +421 12 NULL 12 12 12 12 12 12 12 12 12 12 +407 12 NULL 12 12 12 12 12 12 12 12 12 12 +384 12 NULL 12 12 12 12 12 12 12 12 12 12 +256 12 NULL 12 12 12 12 12 12 12 12 12 12 +26 12 NULL 12 12 12 12 12 12 12 12 12 12 +134 12 NULL 12 12 12 12 12 12 12 12 12 12 +67 12 NULL 12 12 12 12 12 12 12 12 12 12 +384 12 NULL 12 12 12 12 12 12 12 12 12 12 +379 12 NULL 12 12 12 12 12 12 12 12 12 12 +18 12 NULL 12 12 12 12 12 12 12 12 12 12 +462 12 NULL 12 12 12 12 12 12 12 12 12 12 +492 12 NULL 12 12 12 12 12 12 12 12 12 12 +100 12 NULL 12 12 12 12 12 12 12 12 12 12 +298 12 NULL 12 12 12 12 12 12 12 12 12 12 +9 12 NULL 12 12 12 12 12 12 12 12 12 12 +341 12 NULL 12 12 12 12 12 12 12 12 12 12 +498 12 NULL 12 12 12 12 12 12 12 12 12 12 +146 12 NULL 12 12 12 12 12 12 12 12 12 12 +458 12 NULL 12 12 12 12 12 12 12 12 12 12 +362 12 NULL 12 12 12 12 12 12 12 12 12 12 +186 12 NULL 12 12 12 12 12 12 12 12 12 12 +285 12 NULL 12 12 12 12 12 12 12 12 12 12 +348 12 NULL 12 12 12 12 12 12 12 12 12 12 +167 12 NULL 12 12 12 12 12 12 12 12 12 12 +18 12 NULL 12 12 12 12 12 12 12 12 12 12 +273 12 NULL 12 12 12 12 12 12 12 12 12 12 +183 12 NULL 12 12 12 12 12 12 12 12 12 12 +281 12 NULL 12 12 12 12 12 12 12 12 12 12 +344 12 NULL 12 12 12 12 12 12 12 12 12 12 +97 12 NULL 12 12 12 12 12 12 12 12 12 12 +469 12 NULL 12 12 12 12 12 12 12 12 12 12 +315 12 NULL 12 12 12 12 12 12 12 12 12 12 +84 12 NULL 12 12 12 12 12 12 12 12 12 12 +28 12 NULL 12 12 12 12 12 12 12 12 12 12 +37 12 NULL 12 12 12 12 12 12 12 12 12 12 +448 12 NULL 12 12 12 12 12 12 12 12 12 12 +152 12 NULL 12 12 12 12 12 12 12 12 12 12 +348 12 NULL 12 12 12 12 12 12 12 12 12 12 +307 12 NULL 12 12 12 12 12 12 12 12 12 12 +194 12 NULL 12 12 12 12 12 12 12 12 12 12 +414 12 NULL 12 12 12 12 12 12 12 12 12 12 +477 12 NULL 12 12 12 12 12 12 12 12 12 12 +222 12 NULL 12 12 12 12 12 12 12 12 12 12 +126 12 NULL 12 12 12 12 12 12 12 12 12 12 +90 12 NULL 12 12 12 12 12 12 12 12 12 12 +169 12 NULL 12 12 12 12 12 12 12 12 12 12 +403 12 NULL 12 12 12 12 12 12 12 12 12 12 +400 12 NULL 12 12 12 12 12 12 12 12 12 12 +200 12 NULL 12 12 12 12 12 12 12 12 12 12 +97 12 NULL 12 12 12 12 12 12 12 12 12 12 +PREHOOK: query: explain select * from masking_test_n14 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test where key > 0 +POSTHOOK: query: explain select * from masking_test_n14 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -231,15 +1707,15 @@ STAGE PLANS: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (UDFToInteger(key) > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), '12' (type: string), ROW__ID (type: struct), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string), '12' (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -252,16 +1728,510 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test where key > 0 +PREHOOK: query: select * from masking_test_n14 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n14 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test where key > 0 +POSTHOOK: query: select * from masking_test_n14 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n14 POSTHOOK: Input: default@src #### A masked pattern was here #### +238 12 NULL 12 12 12 12 12 12 12 12 12 12 +86 12 NULL 12 12 12 12 12 12 12 12 12 12 +311 12 NULL 12 12 12 12 12 12 12 12 12 12 +27 12 NULL 12 12 12 12 12 12 12 12 12 12 +165 12 NULL 12 12 12 12 12 12 12 12 12 12 +409 12 NULL 12 12 12 12 12 12 12 12 12 12 +255 12 NULL 12 12 12 12 12 12 12 12 12 12 +278 12 NULL 12 12 12 12 12 12 12 12 12 12 +98 12 NULL 12 12 12 12 12 12 12 12 12 12 +484 12 NULL 12 12 12 12 12 12 12 12 12 12 +265 12 NULL 12 12 12 12 12 12 12 12 12 12 +193 12 NULL 12 12 12 12 12 12 12 12 12 12 +401 12 NULL 12 12 12 12 12 12 12 12 12 12 +150 12 NULL 12 12 12 12 12 12 12 12 12 12 +273 12 NULL 12 12 12 12 12 12 12 12 12 12 +224 12 NULL 12 12 12 12 12 12 12 12 12 12 +369 12 NULL 12 12 12 12 12 12 12 12 12 12 +66 12 NULL 12 12 12 12 12 12 12 12 12 12 +128 12 NULL 12 12 12 12 12 12 12 12 12 12 +213 12 NULL 12 12 12 12 12 12 12 12 12 12 +146 12 NULL 12 12 12 12 12 12 12 12 12 12 +406 12 NULL 12 12 12 12 12 12 12 12 12 12 +429 12 NULL 12 12 12 12 12 12 12 12 12 12 +374 12 NULL 12 12 12 12 12 12 12 12 12 12 +152 12 NULL 12 12 12 12 12 12 12 12 12 12 +469 12 NULL 12 12 12 12 12 12 12 12 12 12 +145 12 NULL 12 12 12 12 12 12 12 12 12 12 +495 12 NULL 12 12 12 12 12 12 12 12 12 12 +37 12 NULL 12 12 12 12 12 12 12 12 12 12 +327 12 NULL 12 12 12 12 12 12 12 12 12 12 +281 12 NULL 12 12 12 12 12 12 12 12 12 12 +277 12 NULL 12 12 12 12 12 12 12 12 12 12 +209 12 NULL 12 12 12 12 12 12 12 12 12 12 +15 12 NULL 12 12 12 12 12 12 12 12 12 12 +82 12 NULL 12 12 12 12 12 12 12 12 12 12 +403 12 NULL 12 12 12 12 12 12 12 12 12 12 +166 12 NULL 12 12 12 12 12 12 12 12 12 12 +417 12 NULL 12 12 12 12 12 12 12 12 12 12 +430 12 NULL 12 12 12 12 12 12 12 12 12 12 +252 12 NULL 12 12 12 12 12 12 12 12 12 12 +292 12 NULL 12 12 12 12 12 12 12 12 12 12 +219 12 NULL 12 12 12 12 12 12 12 12 12 12 +287 12 NULL 12 12 12 12 12 12 12 12 12 12 +153 12 NULL 12 12 12 12 12 12 12 12 12 12 +193 12 NULL 12 12 12 12 12 12 12 12 12 12 +338 12 NULL 12 12 12 12 12 12 12 12 12 12 +446 12 NULL 12 12 12 12 12 12 12 12 12 12 +459 12 NULL 12 12 12 12 12 12 12 12 12 12 +394 12 NULL 12 12 12 12 12 12 12 12 12 12 +237 12 NULL 12 12 12 12 12 12 12 12 12 12 +482 12 NULL 12 12 12 12 12 12 12 12 12 12 +174 12 NULL 12 12 12 12 12 12 12 12 12 12 +413 12 NULL 12 12 12 12 12 12 12 12 12 12 +494 12 NULL 12 12 12 12 12 12 12 12 12 12 +207 12 NULL 12 12 12 12 12 12 12 12 12 12 +199 12 NULL 12 12 12 12 12 12 12 12 12 12 +466 12 NULL 12 12 12 12 12 12 12 12 12 12 +208 12 NULL 12 12 12 12 12 12 12 12 12 12 +174 12 NULL 12 12 12 12 12 12 12 12 12 12 +399 12 NULL 12 12 12 12 12 12 12 12 12 12 +396 12 NULL 12 12 12 12 12 12 12 12 12 12 +247 12 NULL 12 12 12 12 12 12 12 12 12 12 +417 12 NULL 12 12 12 12 12 12 12 12 12 12 +489 12 NULL 12 12 12 12 12 12 12 12 12 12 +162 12 NULL 12 12 12 12 12 12 12 12 12 12 +377 12 NULL 12 12 12 12 12 12 12 12 12 12 +397 12 NULL 12 12 12 12 12 12 12 12 12 12 +309 12 NULL 12 12 12 12 12 12 12 12 12 12 +365 12 NULL 12 12 12 12 12 12 12 12 12 12 +266 12 NULL 12 12 12 12 12 12 12 12 12 12 +439 12 NULL 12 12 12 12 12 12 12 12 12 12 +342 12 NULL 12 12 12 12 12 12 12 12 12 12 +367 12 NULL 12 12 12 12 12 12 12 12 12 12 +325 12 NULL 12 12 12 12 12 12 12 12 12 12 +167 12 NULL 12 12 12 12 12 12 12 12 12 12 +195 12 NULL 12 12 12 12 12 12 12 12 12 12 +475 12 NULL 12 12 12 12 12 12 12 12 12 12 +17 12 NULL 12 12 12 12 12 12 12 12 12 12 +113 12 NULL 12 12 12 12 12 12 12 12 12 12 +155 12 NULL 12 12 12 12 12 12 12 12 12 12 +203 12 NULL 12 12 12 12 12 12 12 12 12 12 +339 12 NULL 12 12 12 12 12 12 12 12 12 12 +455 12 NULL 12 12 12 12 12 12 12 12 12 12 +128 12 NULL 12 12 12 12 12 12 12 12 12 12 +311 12 NULL 12 12 12 12 12 12 12 12 12 12 +316 12 NULL 12 12 12 12 12 12 12 12 12 12 +57 12 NULL 12 12 12 12 12 12 12 12 12 12 +302 12 NULL 12 12 12 12 12 12 12 12 12 12 +205 12 NULL 12 12 12 12 12 12 12 12 12 12 +149 12 NULL 12 12 12 12 12 12 12 12 12 12 +438 12 NULL 12 12 12 12 12 12 12 12 12 12 +345 12 NULL 12 12 12 12 12 12 12 12 12 12 +129 12 NULL 12 12 12 12 12 12 12 12 12 12 +170 12 NULL 12 12 12 12 12 12 12 12 12 12 +20 12 NULL 12 12 12 12 12 12 12 12 12 12 +489 12 NULL 12 12 12 12 12 12 12 12 12 12 +157 12 NULL 12 12 12 12 12 12 12 12 12 12 +378 12 NULL 12 12 12 12 12 12 12 12 12 12 +221 12 NULL 12 12 12 12 12 12 12 12 12 12 +92 12 NULL 12 12 12 12 12 12 12 12 12 12 +111 12 NULL 12 12 12 12 12 12 12 12 12 12 +47 12 NULL 12 12 12 12 12 12 12 12 12 12 +72 12 NULL 12 12 12 12 12 12 12 12 12 12 4 12 NULL 12 12 12 12 12 12 12 12 12 12 +280 12 NULL 12 12 12 12 12 12 12 12 12 12 +35 12 NULL 12 12 12 12 12 12 12 12 12 12 +427 12 NULL 12 12 12 12 12 12 12 12 12 12 +277 12 NULL 12 12 12 12 12 12 12 12 12 12 +208 12 NULL 12 12 12 12 12 12 12 12 12 12 +356 12 NULL 12 12 12 12 12 12 12 12 12 12 +399 12 NULL 12 12 12 12 12 12 12 12 12 12 +169 12 NULL 12 12 12 12 12 12 12 12 12 12 +382 12 NULL 12 12 12 12 12 12 12 12 12 12 +498 12 NULL 12 12 12 12 12 12 12 12 12 12 +125 12 NULL 12 12 12 12 12 12 12 12 12 12 +386 12 NULL 12 12 12 12 12 12 12 12 12 12 +437 12 NULL 12 12 12 12 12 12 12 12 12 12 +469 12 NULL 12 12 12 12 12 12 12 12 12 12 +192 12 NULL 12 12 12 12 12 12 12 12 12 12 +286 12 NULL 12 12 12 12 12 12 12 12 12 12 +187 12 NULL 12 12 12 12 12 12 12 12 12 12 +176 12 NULL 12 12 12 12 12 12 12 12 12 12 +54 12 NULL 12 12 12 12 12 12 12 12 12 12 +459 12 NULL 12 12 12 12 12 12 12 12 12 12 +51 12 NULL 12 12 12 12 12 12 12 12 12 12 +138 12 NULL 12 12 12 12 12 12 12 12 12 12 +103 12 NULL 12 12 12 12 12 12 12 12 12 12 +239 12 NULL 12 12 12 12 12 12 12 12 12 12 +213 12 NULL 12 12 12 12 12 12 12 12 12 12 +216 12 NULL 12 12 12 12 12 12 12 12 12 12 +430 12 NULL 12 12 12 12 12 12 12 12 12 12 +278 12 NULL 12 12 12 12 12 12 12 12 12 12 +176 12 NULL 12 12 12 12 12 12 12 12 12 12 +289 12 NULL 12 12 12 12 12 12 12 12 12 12 +221 12 NULL 12 12 12 12 12 12 12 12 12 12 +65 12 NULL 12 12 12 12 12 12 12 12 12 12 +318 12 NULL 12 12 12 12 12 12 12 12 12 12 +332 12 NULL 12 12 12 12 12 12 12 12 12 12 +311 12 NULL 12 12 12 12 12 12 12 12 12 12 +275 12 NULL 12 12 12 12 12 12 12 12 12 12 +137 12 NULL 12 12 12 12 12 12 12 12 12 12 +241 12 NULL 12 12 12 12 12 12 12 12 12 12 +83 12 NULL 12 12 12 12 12 12 12 12 12 12 +333 12 NULL 12 12 12 12 12 12 12 12 12 12 +180 12 NULL 12 12 12 12 12 12 12 12 12 12 +284 12 NULL 12 12 12 12 12 12 12 12 12 12 +12 12 NULL 12 12 12 12 12 12 12 12 12 12 +230 12 NULL 12 12 12 12 12 12 12 12 12 12 +181 12 NULL 12 12 12 12 12 12 12 12 12 12 +67 12 NULL 12 12 12 12 12 12 12 12 12 12 +260 12 NULL 12 12 12 12 12 12 12 12 12 12 +404 12 NULL 12 12 12 12 12 12 12 12 12 12 +384 12 NULL 12 12 12 12 12 12 12 12 12 12 +489 12 NULL 12 12 12 12 12 12 12 12 12 12 +353 12 NULL 12 12 12 12 12 12 12 12 12 12 +373 12 NULL 12 12 12 12 12 12 12 12 12 12 +272 12 NULL 12 12 12 12 12 12 12 12 12 12 +138 12 NULL 12 12 12 12 12 12 12 12 12 12 +217 12 NULL 12 12 12 12 12 12 12 12 12 12 +84 12 NULL 12 12 12 12 12 12 12 12 12 12 +348 12 NULL 12 12 12 12 12 12 12 12 12 12 +466 12 NULL 12 12 12 12 12 12 12 12 12 12 +58 12 NULL 12 12 12 12 12 12 12 12 12 12 8 12 NULL 12 12 12 12 12 12 12 12 12 12 +411 12 NULL 12 12 12 12 12 12 12 12 12 12 +230 12 NULL 12 12 12 12 12 12 12 12 12 12 +208 12 NULL 12 12 12 12 12 12 12 12 12 12 +348 12 NULL 12 12 12 12 12 12 12 12 12 12 +24 12 NULL 12 12 12 12 12 12 12 12 12 12 +463 12 NULL 12 12 12 12 12 12 12 12 12 12 +431 12 NULL 12 12 12 12 12 12 12 12 12 12 +179 12 NULL 12 12 12 12 12 12 12 12 12 12 +172 12 NULL 12 12 12 12 12 12 12 12 12 12 +42 12 NULL 12 12 12 12 12 12 12 12 12 12 +129 12 NULL 12 12 12 12 12 12 12 12 12 12 +158 12 NULL 12 12 12 12 12 12 12 12 12 12 +119 12 NULL 12 12 12 12 12 12 12 12 12 12 +496 12 NULL 12 12 12 12 12 12 12 12 12 12 +322 12 NULL 12 12 12 12 12 12 12 12 12 12 +197 12 NULL 12 12 12 12 12 12 12 12 12 12 +468 12 NULL 12 12 12 12 12 12 12 12 12 12 +393 12 NULL 12 12 12 12 12 12 12 12 12 12 +454 12 NULL 12 12 12 12 12 12 12 12 12 12 +100 12 NULL 12 12 12 12 12 12 12 12 12 12 +298 12 NULL 12 12 12 12 12 12 12 12 12 12 +199 12 NULL 12 12 12 12 12 12 12 12 12 12 +191 12 NULL 12 12 12 12 12 12 12 12 12 12 +418 12 NULL 12 12 12 12 12 12 12 12 12 12 +96 12 NULL 12 12 12 12 12 12 12 12 12 12 +26 12 NULL 12 12 12 12 12 12 12 12 12 12 +165 12 NULL 12 12 12 12 12 12 12 12 12 12 +327 12 NULL 12 12 12 12 12 12 12 12 12 12 +230 12 NULL 12 12 12 12 12 12 12 12 12 12 +205 12 NULL 12 12 12 12 12 12 12 12 12 12 +120 12 NULL 12 12 12 12 12 12 12 12 12 12 +131 12 NULL 12 12 12 12 12 12 12 12 12 12 +51 12 NULL 12 12 12 12 12 12 12 12 12 12 +404 12 NULL 12 12 12 12 12 12 12 12 12 12 +43 12 NULL 12 12 12 12 12 12 12 12 12 12 +436 12 NULL 12 12 12 12 12 12 12 12 12 12 +156 12 NULL 12 12 12 12 12 12 12 12 12 12 +469 12 NULL 12 12 12 12 12 12 12 12 12 12 +468 12 NULL 12 12 12 12 12 12 12 12 12 12 +308 12 NULL 12 12 12 12 12 12 12 12 12 12 +95 12 NULL 12 12 12 12 12 12 12 12 12 12 +196 12 NULL 12 12 12 12 12 12 12 12 12 12 +288 12 NULL 12 12 12 12 12 12 12 12 12 12 +481 12 NULL 12 12 12 12 12 12 12 12 12 12 +457 12 NULL 12 12 12 12 12 12 12 12 12 12 +98 12 NULL 12 12 12 12 12 12 12 12 12 12 +282 12 NULL 12 12 12 12 12 12 12 12 12 12 +197 12 NULL 12 12 12 12 12 12 12 12 12 12 +187 12 NULL 12 12 12 12 12 12 12 12 12 12 +318 12 NULL 12 12 12 12 12 12 12 12 12 12 +318 12 NULL 12 12 12 12 12 12 12 12 12 12 +409 12 NULL 12 12 12 12 12 12 12 12 12 12 +470 12 NULL 12 12 12 12 12 12 12 12 12 12 +137 12 NULL 12 12 12 12 12 12 12 12 12 12 +369 12 NULL 12 12 12 12 12 12 12 12 12 12 +316 12 NULL 12 12 12 12 12 12 12 12 12 12 +169 12 NULL 12 12 12 12 12 12 12 12 12 12 +413 12 NULL 12 12 12 12 12 12 12 12 12 12 +85 12 NULL 12 12 12 12 12 12 12 12 12 12 +77 12 NULL 12 12 12 12 12 12 12 12 12 12 +490 12 NULL 12 12 12 12 12 12 12 12 12 12 +87 12 NULL 12 12 12 12 12 12 12 12 12 12 +364 12 NULL 12 12 12 12 12 12 12 12 12 12 +179 12 NULL 12 12 12 12 12 12 12 12 12 12 +118 12 NULL 12 12 12 12 12 12 12 12 12 12 +134 12 NULL 12 12 12 12 12 12 12 12 12 12 +395 12 NULL 12 12 12 12 12 12 12 12 12 12 +282 12 NULL 12 12 12 12 12 12 12 12 12 12 +138 12 NULL 12 12 12 12 12 12 12 12 12 12 +238 12 NULL 12 12 12 12 12 12 12 12 12 12 +419 12 NULL 12 12 12 12 12 12 12 12 12 12 +15 12 NULL 12 12 12 12 12 12 12 12 12 12 +118 12 NULL 12 12 12 12 12 12 12 12 12 12 +72 12 NULL 12 12 12 12 12 12 12 12 12 12 +90 12 NULL 12 12 12 12 12 12 12 12 12 12 +307 12 NULL 12 12 12 12 12 12 12 12 12 12 +19 12 NULL 12 12 12 12 12 12 12 12 12 12 +435 12 NULL 12 12 12 12 12 12 12 12 12 12 +10 12 NULL 12 12 12 12 12 12 12 12 12 12 +277 12 NULL 12 12 12 12 12 12 12 12 12 12 +273 12 NULL 12 12 12 12 12 12 12 12 12 12 +306 12 NULL 12 12 12 12 12 12 12 12 12 12 +224 12 NULL 12 12 12 12 12 12 12 12 12 12 +309 12 NULL 12 12 12 12 12 12 12 12 12 12 +389 12 NULL 12 12 12 12 12 12 12 12 12 12 +327 12 NULL 12 12 12 12 12 12 12 12 12 12 +242 12 NULL 12 12 12 12 12 12 12 12 12 12 +369 12 NULL 12 12 12 12 12 12 12 12 12 12 +392 12 NULL 12 12 12 12 12 12 12 12 12 12 +272 12 NULL 12 12 12 12 12 12 12 12 12 12 +331 12 NULL 12 12 12 12 12 12 12 12 12 12 +401 12 NULL 12 12 12 12 12 12 12 12 12 12 +242 12 NULL 12 12 12 12 12 12 12 12 12 12 +452 12 NULL 12 12 12 12 12 12 12 12 12 12 +177 12 NULL 12 12 12 12 12 12 12 12 12 12 +226 12 NULL 12 12 12 12 12 12 12 12 12 12 +5 12 NULL 12 12 12 12 12 12 12 12 12 12 +497 12 NULL 12 12 12 12 12 12 12 12 12 12 +402 12 NULL 12 12 12 12 12 12 12 12 12 12 +396 12 NULL 12 12 12 12 12 12 12 12 12 12 +317 12 NULL 12 12 12 12 12 12 12 12 12 12 +395 12 NULL 12 12 12 12 12 12 12 12 12 12 +58 12 NULL 12 12 12 12 12 12 12 12 12 12 +35 12 NULL 12 12 12 12 12 12 12 12 12 12 +336 12 NULL 12 12 12 12 12 12 12 12 12 12 +95 12 NULL 12 12 12 12 12 12 12 12 12 12 +11 12 NULL 12 12 12 12 12 12 12 12 12 12 +168 12 NULL 12 12 12 12 12 12 12 12 12 12 +34 12 NULL 12 12 12 12 12 12 12 12 12 12 +229 12 NULL 12 12 12 12 12 12 12 12 12 12 +233 12 NULL 12 12 12 12 12 12 12 12 12 12 +143 12 NULL 12 12 12 12 12 12 12 12 12 12 +472 12 NULL 12 12 12 12 12 12 12 12 12 12 +322 12 NULL 12 12 12 12 12 12 12 12 12 12 +498 12 NULL 12 12 12 12 12 12 12 12 12 12 +160 12 NULL 12 12 12 12 12 12 12 12 12 12 +195 12 NULL 12 12 12 12 12 12 12 12 12 12 +42 12 NULL 12 12 12 12 12 12 12 12 12 12 +321 12 NULL 12 12 12 12 12 12 12 12 12 12 +430 12 NULL 12 12 12 12 12 12 12 12 12 12 +119 12 NULL 12 12 12 12 12 12 12 12 12 12 +489 12 NULL 12 12 12 12 12 12 12 12 12 12 +458 12 NULL 12 12 12 12 12 12 12 12 12 12 +78 12 NULL 12 12 12 12 12 12 12 12 12 12 +76 12 NULL 12 12 12 12 12 12 12 12 12 12 +41 12 NULL 12 12 12 12 12 12 12 12 12 12 +223 12 NULL 12 12 12 12 12 12 12 12 12 12 +492 12 NULL 12 12 12 12 12 12 12 12 12 12 +149 12 NULL 12 12 12 12 12 12 12 12 12 12 +449 12 NULL 12 12 12 12 12 12 12 12 12 12 +218 12 NULL 12 12 12 12 12 12 12 12 12 12 +228 12 NULL 12 12 12 12 12 12 12 12 12 12 +138 12 NULL 12 12 12 12 12 12 12 12 12 12 +453 12 NULL 12 12 12 12 12 12 12 12 12 12 +30 12 NULL 12 12 12 12 12 12 12 12 12 12 +209 12 NULL 12 12 12 12 12 12 12 12 12 12 +64 12 NULL 12 12 12 12 12 12 12 12 12 12 +468 12 NULL 12 12 12 12 12 12 12 12 12 12 +76 12 NULL 12 12 12 12 12 12 12 12 12 12 +74 12 NULL 12 12 12 12 12 12 12 12 12 12 +342 12 NULL 12 12 12 12 12 12 12 12 12 12 +69 12 NULL 12 12 12 12 12 12 12 12 12 12 +230 12 NULL 12 12 12 12 12 12 12 12 12 12 +33 12 NULL 12 12 12 12 12 12 12 12 12 12 +368 12 NULL 12 12 12 12 12 12 12 12 12 12 +103 12 NULL 12 12 12 12 12 12 12 12 12 12 +296 12 NULL 12 12 12 12 12 12 12 12 12 12 +113 12 NULL 12 12 12 12 12 12 12 12 12 12 +216 12 NULL 12 12 12 12 12 12 12 12 12 12 +367 12 NULL 12 12 12 12 12 12 12 12 12 12 +344 12 NULL 12 12 12 12 12 12 12 12 12 12 +167 12 NULL 12 12 12 12 12 12 12 12 12 12 +274 12 NULL 12 12 12 12 12 12 12 12 12 12 +219 12 NULL 12 12 12 12 12 12 12 12 12 12 +239 12 NULL 12 12 12 12 12 12 12 12 12 12 +485 12 NULL 12 12 12 12 12 12 12 12 12 12 +116 12 NULL 12 12 12 12 12 12 12 12 12 12 +223 12 NULL 12 12 12 12 12 12 12 12 12 12 +256 12 NULL 12 12 12 12 12 12 12 12 12 12 +263 12 NULL 12 12 12 12 12 12 12 12 12 12 +70 12 NULL 12 12 12 12 12 12 12 12 12 12 +487 12 NULL 12 12 12 12 12 12 12 12 12 12 +480 12 NULL 12 12 12 12 12 12 12 12 12 12 +401 12 NULL 12 12 12 12 12 12 12 12 12 12 +288 12 NULL 12 12 12 12 12 12 12 12 12 12 +191 12 NULL 12 12 12 12 12 12 12 12 12 12 +5 12 NULL 12 12 12 12 12 12 12 12 12 12 +244 12 NULL 12 12 12 12 12 12 12 12 12 12 +438 12 NULL 12 12 12 12 12 12 12 12 12 12 +128 12 NULL 12 12 12 12 12 12 12 12 12 12 +467 12 NULL 12 12 12 12 12 12 12 12 12 12 +432 12 NULL 12 12 12 12 12 12 12 12 12 12 +202 12 NULL 12 12 12 12 12 12 12 12 12 12 +316 12 NULL 12 12 12 12 12 12 12 12 12 12 +229 12 NULL 12 12 12 12 12 12 12 12 12 12 +469 12 NULL 12 12 12 12 12 12 12 12 12 12 +463 12 NULL 12 12 12 12 12 12 12 12 12 12 +280 12 NULL 12 12 12 12 12 12 12 12 12 12 2 12 NULL 12 12 12 12 12 12 12 12 12 12 +35 12 NULL 12 12 12 12 12 12 12 12 12 12 +283 12 NULL 12 12 12 12 12 12 12 12 12 12 +331 12 NULL 12 12 12 12 12 12 12 12 12 12 +235 12 NULL 12 12 12 12 12 12 12 12 12 12 +80 12 NULL 12 12 12 12 12 12 12 12 12 12 +44 12 NULL 12 12 12 12 12 12 12 12 12 12 +193 12 NULL 12 12 12 12 12 12 12 12 12 12 +321 12 NULL 12 12 12 12 12 12 12 12 12 12 +335 12 NULL 12 12 12 12 12 12 12 12 12 12 +104 12 NULL 12 12 12 12 12 12 12 12 12 12 +466 12 NULL 12 12 12 12 12 12 12 12 12 12 +366 12 NULL 12 12 12 12 12 12 12 12 12 12 +175 12 NULL 12 12 12 12 12 12 12 12 12 12 +403 12 NULL 12 12 12 12 12 12 12 12 12 12 +483 12 NULL 12 12 12 12 12 12 12 12 12 12 +53 12 NULL 12 12 12 12 12 12 12 12 12 12 +105 12 NULL 12 12 12 12 12 12 12 12 12 12 +257 12 NULL 12 12 12 12 12 12 12 12 12 12 +406 12 NULL 12 12 12 12 12 12 12 12 12 12 +409 12 NULL 12 12 12 12 12 12 12 12 12 12 +190 12 NULL 12 12 12 12 12 12 12 12 12 12 +406 12 NULL 12 12 12 12 12 12 12 12 12 12 +401 12 NULL 12 12 12 12 12 12 12 12 12 12 +114 12 NULL 12 12 12 12 12 12 12 12 12 12 +258 12 NULL 12 12 12 12 12 12 12 12 12 12 +90 12 NULL 12 12 12 12 12 12 12 12 12 12 +203 12 NULL 12 12 12 12 12 12 12 12 12 12 +262 12 NULL 12 12 12 12 12 12 12 12 12 12 +348 12 NULL 12 12 12 12 12 12 12 12 12 12 +424 12 NULL 12 12 12 12 12 12 12 12 12 12 +12 12 NULL 12 12 12 12 12 12 12 12 12 12 +396 12 NULL 12 12 12 12 12 12 12 12 12 12 +201 12 NULL 12 12 12 12 12 12 12 12 12 12 +217 12 NULL 12 12 12 12 12 12 12 12 12 12 +164 12 NULL 12 12 12 12 12 12 12 12 12 12 +431 12 NULL 12 12 12 12 12 12 12 12 12 12 +454 12 NULL 12 12 12 12 12 12 12 12 12 12 +478 12 NULL 12 12 12 12 12 12 12 12 12 12 +298 12 NULL 12 12 12 12 12 12 12 12 12 12 +125 12 NULL 12 12 12 12 12 12 12 12 12 12 +431 12 NULL 12 12 12 12 12 12 12 12 12 12 +164 12 NULL 12 12 12 12 12 12 12 12 12 12 +424 12 NULL 12 12 12 12 12 12 12 12 12 12 +187 12 NULL 12 12 12 12 12 12 12 12 12 12 +382 12 NULL 12 12 12 12 12 12 12 12 12 12 +5 12 NULL 12 12 12 12 12 12 12 12 12 12 +70 12 NULL 12 12 12 12 12 12 12 12 12 12 +397 12 NULL 12 12 12 12 12 12 12 12 12 12 +480 12 NULL 12 12 12 12 12 12 12 12 12 12 +291 12 NULL 12 12 12 12 12 12 12 12 12 12 +24 12 NULL 12 12 12 12 12 12 12 12 12 12 +351 12 NULL 12 12 12 12 12 12 12 12 12 12 +255 12 NULL 12 12 12 12 12 12 12 12 12 12 +104 12 NULL 12 12 12 12 12 12 12 12 12 12 +70 12 NULL 12 12 12 12 12 12 12 12 12 12 +163 12 NULL 12 12 12 12 12 12 12 12 12 12 +438 12 NULL 12 12 12 12 12 12 12 12 12 12 +119 12 NULL 12 12 12 12 12 12 12 12 12 12 +414 12 NULL 12 12 12 12 12 12 12 12 12 12 +200 12 NULL 12 12 12 12 12 12 12 12 12 12 +491 12 NULL 12 12 12 12 12 12 12 12 12 12 +237 12 NULL 12 12 12 12 12 12 12 12 12 12 +439 12 NULL 12 12 12 12 12 12 12 12 12 12 +360 12 NULL 12 12 12 12 12 12 12 12 12 12 +248 12 NULL 12 12 12 12 12 12 12 12 12 12 +479 12 NULL 12 12 12 12 12 12 12 12 12 12 +305 12 NULL 12 12 12 12 12 12 12 12 12 12 +417 12 NULL 12 12 12 12 12 12 12 12 12 12 +199 12 NULL 12 12 12 12 12 12 12 12 12 12 +444 12 NULL 12 12 12 12 12 12 12 12 12 12 +120 12 NULL 12 12 12 12 12 12 12 12 12 12 +429 12 NULL 12 12 12 12 12 12 12 12 12 12 +169 12 NULL 12 12 12 12 12 12 12 12 12 12 +443 12 NULL 12 12 12 12 12 12 12 12 12 12 +323 12 NULL 12 12 12 12 12 12 12 12 12 12 +325 12 NULL 12 12 12 12 12 12 12 12 12 12 +277 12 NULL 12 12 12 12 12 12 12 12 12 12 +230 12 NULL 12 12 12 12 12 12 12 12 12 12 +478 12 NULL 12 12 12 12 12 12 12 12 12 12 +178 12 NULL 12 12 12 12 12 12 12 12 12 12 +468 12 NULL 12 12 12 12 12 12 12 12 12 12 +310 12 NULL 12 12 12 12 12 12 12 12 12 12 +317 12 NULL 12 12 12 12 12 12 12 12 12 12 +333 12 NULL 12 12 12 12 12 12 12 12 12 12 +493 12 NULL 12 12 12 12 12 12 12 12 12 12 +460 12 NULL 12 12 12 12 12 12 12 12 12 12 +207 12 NULL 12 12 12 12 12 12 12 12 12 12 +249 12 NULL 12 12 12 12 12 12 12 12 12 12 +265 12 NULL 12 12 12 12 12 12 12 12 12 12 +480 12 NULL 12 12 12 12 12 12 12 12 12 12 +83 12 NULL 12 12 12 12 12 12 12 12 12 12 +136 12 NULL 12 12 12 12 12 12 12 12 12 12 +353 12 NULL 12 12 12 12 12 12 12 12 12 12 +172 12 NULL 12 12 12 12 12 12 12 12 12 12 +214 12 NULL 12 12 12 12 12 12 12 12 12 12 +462 12 NULL 12 12 12 12 12 12 12 12 12 12 +233 12 NULL 12 12 12 12 12 12 12 12 12 12 +406 12 NULL 12 12 12 12 12 12 12 12 12 12 +133 12 NULL 12 12 12 12 12 12 12 12 12 12 +175 12 NULL 12 12 12 12 12 12 12 12 12 12 +189 12 NULL 12 12 12 12 12 12 12 12 12 12 +454 12 NULL 12 12 12 12 12 12 12 12 12 12 +375 12 NULL 12 12 12 12 12 12 12 12 12 12 +401 12 NULL 12 12 12 12 12 12 12 12 12 12 +421 12 NULL 12 12 12 12 12 12 12 12 12 12 +407 12 NULL 12 12 12 12 12 12 12 12 12 12 +384 12 NULL 12 12 12 12 12 12 12 12 12 12 +256 12 NULL 12 12 12 12 12 12 12 12 12 12 +26 12 NULL 12 12 12 12 12 12 12 12 12 12 +134 12 NULL 12 12 12 12 12 12 12 12 12 12 +67 12 NULL 12 12 12 12 12 12 12 12 12 12 +384 12 NULL 12 12 12 12 12 12 12 12 12 12 +379 12 NULL 12 12 12 12 12 12 12 12 12 12 +18 12 NULL 12 12 12 12 12 12 12 12 12 12 +462 12 NULL 12 12 12 12 12 12 12 12 12 12 +492 12 NULL 12 12 12 12 12 12 12 12 12 12 +100 12 NULL 12 12 12 12 12 12 12 12 12 12 +298 12 NULL 12 12 12 12 12 12 12 12 12 12 +9 12 NULL 12 12 12 12 12 12 12 12 12 12 +341 12 NULL 12 12 12 12 12 12 12 12 12 12 +498 12 NULL 12 12 12 12 12 12 12 12 12 12 +146 12 NULL 12 12 12 12 12 12 12 12 12 12 +458 12 NULL 12 12 12 12 12 12 12 12 12 12 +362 12 NULL 12 12 12 12 12 12 12 12 12 12 +186 12 NULL 12 12 12 12 12 12 12 12 12 12 +285 12 NULL 12 12 12 12 12 12 12 12 12 12 +348 12 NULL 12 12 12 12 12 12 12 12 12 12 +167 12 NULL 12 12 12 12 12 12 12 12 12 12 +18 12 NULL 12 12 12 12 12 12 12 12 12 12 +273 12 NULL 12 12 12 12 12 12 12 12 12 12 +183 12 NULL 12 12 12 12 12 12 12 12 12 12 +281 12 NULL 12 12 12 12 12 12 12 12 12 12 +344 12 NULL 12 12 12 12 12 12 12 12 12 12 +97 12 NULL 12 12 12 12 12 12 12 12 12 12 +469 12 NULL 12 12 12 12 12 12 12 12 12 12 +315 12 NULL 12 12 12 12 12 12 12 12 12 12 +84 12 NULL 12 12 12 12 12 12 12 12 12 12 +28 12 NULL 12 12 12 12 12 12 12 12 12 12 +37 12 NULL 12 12 12 12 12 12 12 12 12 12 +448 12 NULL 12 12 12 12 12 12 12 12 12 12 +152 12 NULL 12 12 12 12 12 12 12 12 12 12 +348 12 NULL 12 12 12 12 12 12 12 12 12 12 +307 12 NULL 12 12 12 12 12 12 12 12 12 12 +194 12 NULL 12 12 12 12 12 12 12 12 12 12 +414 12 NULL 12 12 12 12 12 12 12 12 12 12 +477 12 NULL 12 12 12 12 12 12 12 12 12 12 +222 12 NULL 12 12 12 12 12 12 12 12 12 12 +126 12 NULL 12 12 12 12 12 12 12 12 12 12 +90 12 NULL 12 12 12 12 12 12 12 12 12 12 +169 12 NULL 12 12 12 12 12 12 12 12 12 12 +403 12 NULL 12 12 12 12 12 12 12 12 12 12 +400 12 NULL 12 12 12 12 12 12 12 12 12 12 +200 12 NULL 12 12 12 12 12 12 12 12 12 12 +97 12 NULL 12 12 12 12 12 12 12 12 12 12 diff --git a/ql/src/test/results/clientpositive/masking_8.q.out b/ql/src/test/results/clientpositive/masking_8.q.out index b0baafa006..cc4d155b3d 100644 --- a/ql/src/test/results/clientpositive/masking_8.q.out +++ b/ql/src/test/results/clientpositive/masking_8.q.out @@ -1,289 +1,1708 @@ -PREHOOK: query: drop table masking_test +PREHOOK: query: drop table masking_test_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table masking_test +POSTHOOK: query: drop table masking_test_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table masking_test as select cast(key as int) as key, value, '12' from src +PREHOOK: query: create table masking_test_n2 as select cast(key as int) as key, value, '12' from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create table masking_test as select cast(key as int) as key, value, '12' from src +PREHOOK: Output: default@masking_test_n2 +POSTHOOK: query: create table masking_test_n2 as select cast(key as int) as key, value, '12' from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test._c2 SIMPLE [] -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain select *, ROW__ID from masking_test +POSTHOOK: Output: default@masking_test_n2 +POSTHOOK: Lineage: masking_test_n2._c2 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select *, ROW__ID from masking_test_n2 PREHOOK: type: QUERY -POSTHOOK: query: explain select *, ROW__ID from masking_test +POSTHOOK: query: explain select *, ROW__ID from masking_test_n2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test - Statistics: Num rows: 500 Data size: 6812 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 1130 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), reverse(value) (type: string), _c2 (type: string), ROW__ID (type: struct) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 83 Data size: 1130 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 1130 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: masking_test_n2 + Statistics: Num rows: 500 Data size: 6812 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string), _c2 (type: string), ROW__ID (type: struct) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 500 Data size: 6812 Basic stats: COMPLETE Column stats: NONE + ListSink -PREHOOK: query: select *, ROW__ID from masking_test +PREHOOK: query: select *, ROW__ID from masking_test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n2 #### A masked pattern was here #### -POSTHOOK: query: select *, ROW__ID from masking_test +POSTHOOK: query: select *, ROW__ID from masking_test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n2 #### A masked pattern was here #### -0 0_lav 12 NULL -4 4_lav 12 NULL -8 8_lav 12 NULL -0 0_lav 12 NULL -0 0_lav 12 NULL -2 2_lav 12 NULL -PREHOOK: query: explain select * from masking_test +238 val_238 12 NULL +86 val_86 12 NULL +311 val_311 12 NULL +27 val_27 12 NULL +165 val_165 12 NULL +409 val_409 12 NULL +255 val_255 12 NULL +278 val_278 12 NULL +98 val_98 12 NULL +484 val_484 12 NULL +265 val_265 12 NULL +193 val_193 12 NULL +401 val_401 12 NULL +150 val_150 12 NULL +273 val_273 12 NULL +224 val_224 12 NULL +369 val_369 12 NULL +66 val_66 12 NULL +128 val_128 12 NULL +213 val_213 12 NULL +146 val_146 12 NULL +406 val_406 12 NULL +429 val_429 12 NULL +374 val_374 12 NULL +152 val_152 12 NULL +469 val_469 12 NULL +145 val_145 12 NULL +495 val_495 12 NULL +37 val_37 12 NULL +327 val_327 12 NULL +281 val_281 12 NULL +277 val_277 12 NULL +209 val_209 12 NULL +15 val_15 12 NULL +82 val_82 12 NULL +403 val_403 12 NULL +166 val_166 12 NULL +417 val_417 12 NULL +430 val_430 12 NULL +252 val_252 12 NULL +292 val_292 12 NULL +219 val_219 12 NULL +287 val_287 12 NULL +153 val_153 12 NULL +193 val_193 12 NULL +338 val_338 12 NULL +446 val_446 12 NULL +459 val_459 12 NULL +394 val_394 12 NULL +237 val_237 12 NULL +482 val_482 12 NULL +174 val_174 12 NULL +413 val_413 12 NULL +494 val_494 12 NULL +207 val_207 12 NULL +199 val_199 12 NULL +466 val_466 12 NULL +208 val_208 12 NULL +174 val_174 12 NULL +399 val_399 12 NULL +396 val_396 12 NULL +247 val_247 12 NULL +417 val_417 12 NULL +489 val_489 12 NULL +162 val_162 12 NULL +377 val_377 12 NULL +397 val_397 12 NULL +309 val_309 12 NULL +365 val_365 12 NULL +266 val_266 12 NULL +439 val_439 12 NULL +342 val_342 12 NULL +367 val_367 12 NULL +325 val_325 12 NULL +167 val_167 12 NULL +195 val_195 12 NULL +475 val_475 12 NULL +17 val_17 12 NULL +113 val_113 12 NULL +155 val_155 12 NULL +203 val_203 12 NULL +339 val_339 12 NULL +0 val_0 12 NULL +455 val_455 12 NULL +128 val_128 12 NULL +311 val_311 12 NULL +316 val_316 12 NULL +57 val_57 12 NULL +302 val_302 12 NULL +205 val_205 12 NULL +149 val_149 12 NULL +438 val_438 12 NULL +345 val_345 12 NULL +129 val_129 12 NULL +170 val_170 12 NULL +20 val_20 12 NULL +489 val_489 12 NULL +157 val_157 12 NULL +378 val_378 12 NULL +221 val_221 12 NULL +92 val_92 12 NULL +111 val_111 12 NULL +47 val_47 12 NULL +72 val_72 12 NULL +4 val_4 12 NULL +280 val_280 12 NULL +35 val_35 12 NULL +427 val_427 12 NULL +277 val_277 12 NULL +208 val_208 12 NULL +356 val_356 12 NULL +399 val_399 12 NULL +169 val_169 12 NULL +382 val_382 12 NULL +498 val_498 12 NULL +125 val_125 12 NULL +386 val_386 12 NULL +437 val_437 12 NULL +469 val_469 12 NULL +192 val_192 12 NULL +286 val_286 12 NULL +187 val_187 12 NULL +176 val_176 12 NULL +54 val_54 12 NULL +459 val_459 12 NULL +51 val_51 12 NULL +138 val_138 12 NULL +103 val_103 12 NULL +239 val_239 12 NULL +213 val_213 12 NULL +216 val_216 12 NULL +430 val_430 12 NULL +278 val_278 12 NULL +176 val_176 12 NULL +289 val_289 12 NULL +221 val_221 12 NULL +65 val_65 12 NULL +318 val_318 12 NULL +332 val_332 12 NULL +311 val_311 12 NULL +275 val_275 12 NULL +137 val_137 12 NULL +241 val_241 12 NULL +83 val_83 12 NULL +333 val_333 12 NULL +180 val_180 12 NULL +284 val_284 12 NULL +12 val_12 12 NULL +230 val_230 12 NULL +181 val_181 12 NULL +67 val_67 12 NULL +260 val_260 12 NULL +404 val_404 12 NULL +384 val_384 12 NULL +489 val_489 12 NULL +353 val_353 12 NULL +373 val_373 12 NULL +272 val_272 12 NULL +138 val_138 12 NULL +217 val_217 12 NULL +84 val_84 12 NULL +348 val_348 12 NULL +466 val_466 12 NULL +58 val_58 12 NULL +8 val_8 12 NULL +411 val_411 12 NULL +230 val_230 12 NULL +208 val_208 12 NULL +348 val_348 12 NULL +24 val_24 12 NULL +463 val_463 12 NULL +431 val_431 12 NULL +179 val_179 12 NULL +172 val_172 12 NULL +42 val_42 12 NULL +129 val_129 12 NULL +158 val_158 12 NULL +119 val_119 12 NULL +496 val_496 12 NULL +0 val_0 12 NULL +322 val_322 12 NULL +197 val_197 12 NULL +468 val_468 12 NULL +393 val_393 12 NULL +454 val_454 12 NULL +100 val_100 12 NULL +298 val_298 12 NULL +199 val_199 12 NULL +191 val_191 12 NULL +418 val_418 12 NULL +96 val_96 12 NULL +26 val_26 12 NULL +165 val_165 12 NULL +327 val_327 12 NULL +230 val_230 12 NULL +205 val_205 12 NULL +120 val_120 12 NULL +131 val_131 12 NULL +51 val_51 12 NULL +404 val_404 12 NULL +43 val_43 12 NULL +436 val_436 12 NULL +156 val_156 12 NULL +469 val_469 12 NULL +468 val_468 12 NULL +308 val_308 12 NULL +95 val_95 12 NULL +196 val_196 12 NULL +288 val_288 12 NULL +481 val_481 12 NULL +457 val_457 12 NULL +98 val_98 12 NULL +282 val_282 12 NULL +197 val_197 12 NULL +187 val_187 12 NULL +318 val_318 12 NULL +318 val_318 12 NULL +409 val_409 12 NULL +470 val_470 12 NULL +137 val_137 12 NULL +369 val_369 12 NULL +316 val_316 12 NULL +169 val_169 12 NULL +413 val_413 12 NULL +85 val_85 12 NULL +77 val_77 12 NULL +0 val_0 12 NULL +490 val_490 12 NULL +87 val_87 12 NULL +364 val_364 12 NULL +179 val_179 12 NULL +118 val_118 12 NULL +134 val_134 12 NULL +395 val_395 12 NULL +282 val_282 12 NULL +138 val_138 12 NULL +238 val_238 12 NULL +419 val_419 12 NULL +15 val_15 12 NULL +118 val_118 12 NULL +72 val_72 12 NULL +90 val_90 12 NULL +307 val_307 12 NULL +19 val_19 12 NULL +435 val_435 12 NULL +10 val_10 12 NULL +277 val_277 12 NULL +273 val_273 12 NULL +306 val_306 12 NULL +224 val_224 12 NULL +309 val_309 12 NULL +389 val_389 12 NULL +327 val_327 12 NULL +242 val_242 12 NULL +369 val_369 12 NULL +392 val_392 12 NULL +272 val_272 12 NULL +331 val_331 12 NULL +401 val_401 12 NULL +242 val_242 12 NULL +452 val_452 12 NULL +177 val_177 12 NULL +226 val_226 12 NULL +5 val_5 12 NULL +497 val_497 12 NULL +402 val_402 12 NULL +396 val_396 12 NULL +317 val_317 12 NULL +395 val_395 12 NULL +58 val_58 12 NULL +35 val_35 12 NULL +336 val_336 12 NULL +95 val_95 12 NULL +11 val_11 12 NULL +168 val_168 12 NULL +34 val_34 12 NULL +229 val_229 12 NULL +233 val_233 12 NULL +143 val_143 12 NULL +472 val_472 12 NULL +322 val_322 12 NULL +498 val_498 12 NULL +160 val_160 12 NULL +195 val_195 12 NULL +42 val_42 12 NULL +321 val_321 12 NULL +430 val_430 12 NULL +119 val_119 12 NULL +489 val_489 12 NULL +458 val_458 12 NULL +78 val_78 12 NULL +76 val_76 12 NULL +41 val_41 12 NULL +223 val_223 12 NULL +492 val_492 12 NULL +149 val_149 12 NULL +449 val_449 12 NULL +218 val_218 12 NULL +228 val_228 12 NULL +138 val_138 12 NULL +453 val_453 12 NULL +30 val_30 12 NULL +209 val_209 12 NULL +64 val_64 12 NULL +468 val_468 12 NULL +76 val_76 12 NULL +74 val_74 12 NULL +342 val_342 12 NULL +69 val_69 12 NULL +230 val_230 12 NULL +33 val_33 12 NULL +368 val_368 12 NULL +103 val_103 12 NULL +296 val_296 12 NULL +113 val_113 12 NULL +216 val_216 12 NULL +367 val_367 12 NULL +344 val_344 12 NULL +167 val_167 12 NULL +274 val_274 12 NULL +219 val_219 12 NULL +239 val_239 12 NULL +485 val_485 12 NULL +116 val_116 12 NULL +223 val_223 12 NULL +256 val_256 12 NULL +263 val_263 12 NULL +70 val_70 12 NULL +487 val_487 12 NULL +480 val_480 12 NULL +401 val_401 12 NULL +288 val_288 12 NULL +191 val_191 12 NULL +5 val_5 12 NULL +244 val_244 12 NULL +438 val_438 12 NULL +128 val_128 12 NULL +467 val_467 12 NULL +432 val_432 12 NULL +202 val_202 12 NULL +316 val_316 12 NULL +229 val_229 12 NULL +469 val_469 12 NULL +463 val_463 12 NULL +280 val_280 12 NULL +2 val_2 12 NULL +35 val_35 12 NULL +283 val_283 12 NULL +331 val_331 12 NULL +235 val_235 12 NULL +80 val_80 12 NULL +44 val_44 12 NULL +193 val_193 12 NULL +321 val_321 12 NULL +335 val_335 12 NULL +104 val_104 12 NULL +466 val_466 12 NULL +366 val_366 12 NULL +175 val_175 12 NULL +403 val_403 12 NULL +483 val_483 12 NULL +53 val_53 12 NULL +105 val_105 12 NULL +257 val_257 12 NULL +406 val_406 12 NULL +409 val_409 12 NULL +190 val_190 12 NULL +406 val_406 12 NULL +401 val_401 12 NULL +114 val_114 12 NULL +258 val_258 12 NULL +90 val_90 12 NULL +203 val_203 12 NULL +262 val_262 12 NULL +348 val_348 12 NULL +424 val_424 12 NULL +12 val_12 12 NULL +396 val_396 12 NULL +201 val_201 12 NULL +217 val_217 12 NULL +164 val_164 12 NULL +431 val_431 12 NULL +454 val_454 12 NULL +478 val_478 12 NULL +298 val_298 12 NULL +125 val_125 12 NULL +431 val_431 12 NULL +164 val_164 12 NULL +424 val_424 12 NULL +187 val_187 12 NULL +382 val_382 12 NULL +5 val_5 12 NULL +70 val_70 12 NULL +397 val_397 12 NULL +480 val_480 12 NULL +291 val_291 12 NULL +24 val_24 12 NULL +351 val_351 12 NULL +255 val_255 12 NULL +104 val_104 12 NULL +70 val_70 12 NULL +163 val_163 12 NULL +438 val_438 12 NULL +119 val_119 12 NULL +414 val_414 12 NULL +200 val_200 12 NULL +491 val_491 12 NULL +237 val_237 12 NULL +439 val_439 12 NULL +360 val_360 12 NULL +248 val_248 12 NULL +479 val_479 12 NULL +305 val_305 12 NULL +417 val_417 12 NULL +199 val_199 12 NULL +444 val_444 12 NULL +120 val_120 12 NULL +429 val_429 12 NULL +169 val_169 12 NULL +443 val_443 12 NULL +323 val_323 12 NULL +325 val_325 12 NULL +277 val_277 12 NULL +230 val_230 12 NULL +478 val_478 12 NULL +178 val_178 12 NULL +468 val_468 12 NULL +310 val_310 12 NULL +317 val_317 12 NULL +333 val_333 12 NULL +493 val_493 12 NULL +460 val_460 12 NULL +207 val_207 12 NULL +249 val_249 12 NULL +265 val_265 12 NULL +480 val_480 12 NULL +83 val_83 12 NULL +136 val_136 12 NULL +353 val_353 12 NULL +172 val_172 12 NULL +214 val_214 12 NULL +462 val_462 12 NULL +233 val_233 12 NULL +406 val_406 12 NULL +133 val_133 12 NULL +175 val_175 12 NULL +189 val_189 12 NULL +454 val_454 12 NULL +375 val_375 12 NULL +401 val_401 12 NULL +421 val_421 12 NULL +407 val_407 12 NULL +384 val_384 12 NULL +256 val_256 12 NULL +26 val_26 12 NULL +134 val_134 12 NULL +67 val_67 12 NULL +384 val_384 12 NULL +379 val_379 12 NULL +18 val_18 12 NULL +462 val_462 12 NULL +492 val_492 12 NULL +100 val_100 12 NULL +298 val_298 12 NULL +9 val_9 12 NULL +341 val_341 12 NULL +498 val_498 12 NULL +146 val_146 12 NULL +458 val_458 12 NULL +362 val_362 12 NULL +186 val_186 12 NULL +285 val_285 12 NULL +348 val_348 12 NULL +167 val_167 12 NULL +18 val_18 12 NULL +273 val_273 12 NULL +183 val_183 12 NULL +281 val_281 12 NULL +344 val_344 12 NULL +97 val_97 12 NULL +469 val_469 12 NULL +315 val_315 12 NULL +84 val_84 12 NULL +28 val_28 12 NULL +37 val_37 12 NULL +448 val_448 12 NULL +152 val_152 12 NULL +348 val_348 12 NULL +307 val_307 12 NULL +194 val_194 12 NULL +414 val_414 12 NULL +477 val_477 12 NULL +222 val_222 12 NULL +126 val_126 12 NULL +90 val_90 12 NULL +169 val_169 12 NULL +403 val_403 12 NULL +400 val_400 12 NULL +200 val_200 12 NULL +97 val_97 12 NULL +PREHOOK: query: explain select * from masking_test_n2 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test +POSTHOOK: query: explain select * from masking_test_n2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test - Statistics: Num rows: 500 Data size: 6812 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 1130 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), reverse(value) (type: string), _c2 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 83 Data size: 1130 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 1130 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: masking_test_n2 + Statistics: Num rows: 500 Data size: 6812 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string), _c2 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 500 Data size: 6812 Basic stats: COMPLETE Column stats: NONE + ListSink -PREHOOK: query: select * from masking_test +PREHOOK: query: select * from masking_test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test +POSTHOOK: query: select * from masking_test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n2 #### A masked pattern was here #### -0 0_lav 12 -4 4_lav 12 -8 8_lav 12 -0 0_lav 12 -0 0_lav 12 -2 2_lav 12 -PREHOOK: query: explain select INPUT__FILE__NAME, *, ROW__ID from masking_test +238 val_238 12 +86 val_86 12 +311 val_311 12 +27 val_27 12 +165 val_165 12 +409 val_409 12 +255 val_255 12 +278 val_278 12 +98 val_98 12 +484 val_484 12 +265 val_265 12 +193 val_193 12 +401 val_401 12 +150 val_150 12 +273 val_273 12 +224 val_224 12 +369 val_369 12 +66 val_66 12 +128 val_128 12 +213 val_213 12 +146 val_146 12 +406 val_406 12 +429 val_429 12 +374 val_374 12 +152 val_152 12 +469 val_469 12 +145 val_145 12 +495 val_495 12 +37 val_37 12 +327 val_327 12 +281 val_281 12 +277 val_277 12 +209 val_209 12 +15 val_15 12 +82 val_82 12 +403 val_403 12 +166 val_166 12 +417 val_417 12 +430 val_430 12 +252 val_252 12 +292 val_292 12 +219 val_219 12 +287 val_287 12 +153 val_153 12 +193 val_193 12 +338 val_338 12 +446 val_446 12 +459 val_459 12 +394 val_394 12 +237 val_237 12 +482 val_482 12 +174 val_174 12 +413 val_413 12 +494 val_494 12 +207 val_207 12 +199 val_199 12 +466 val_466 12 +208 val_208 12 +174 val_174 12 +399 val_399 12 +396 val_396 12 +247 val_247 12 +417 val_417 12 +489 val_489 12 +162 val_162 12 +377 val_377 12 +397 val_397 12 +309 val_309 12 +365 val_365 12 +266 val_266 12 +439 val_439 12 +342 val_342 12 +367 val_367 12 +325 val_325 12 +167 val_167 12 +195 val_195 12 +475 val_475 12 +17 val_17 12 +113 val_113 12 +155 val_155 12 +203 val_203 12 +339 val_339 12 +0 val_0 12 +455 val_455 12 +128 val_128 12 +311 val_311 12 +316 val_316 12 +57 val_57 12 +302 val_302 12 +205 val_205 12 +149 val_149 12 +438 val_438 12 +345 val_345 12 +129 val_129 12 +170 val_170 12 +20 val_20 12 +489 val_489 12 +157 val_157 12 +378 val_378 12 +221 val_221 12 +92 val_92 12 +111 val_111 12 +47 val_47 12 +72 val_72 12 +4 val_4 12 +280 val_280 12 +35 val_35 12 +427 val_427 12 +277 val_277 12 +208 val_208 12 +356 val_356 12 +399 val_399 12 +169 val_169 12 +382 val_382 12 +498 val_498 12 +125 val_125 12 +386 val_386 12 +437 val_437 12 +469 val_469 12 +192 val_192 12 +286 val_286 12 +187 val_187 12 +176 val_176 12 +54 val_54 12 +459 val_459 12 +51 val_51 12 +138 val_138 12 +103 val_103 12 +239 val_239 12 +213 val_213 12 +216 val_216 12 +430 val_430 12 +278 val_278 12 +176 val_176 12 +289 val_289 12 +221 val_221 12 +65 val_65 12 +318 val_318 12 +332 val_332 12 +311 val_311 12 +275 val_275 12 +137 val_137 12 +241 val_241 12 +83 val_83 12 +333 val_333 12 +180 val_180 12 +284 val_284 12 +12 val_12 12 +230 val_230 12 +181 val_181 12 +67 val_67 12 +260 val_260 12 +404 val_404 12 +384 val_384 12 +489 val_489 12 +353 val_353 12 +373 val_373 12 +272 val_272 12 +138 val_138 12 +217 val_217 12 +84 val_84 12 +348 val_348 12 +466 val_466 12 +58 val_58 12 +8 val_8 12 +411 val_411 12 +230 val_230 12 +208 val_208 12 +348 val_348 12 +24 val_24 12 +463 val_463 12 +431 val_431 12 +179 val_179 12 +172 val_172 12 +42 val_42 12 +129 val_129 12 +158 val_158 12 +119 val_119 12 +496 val_496 12 +0 val_0 12 +322 val_322 12 +197 val_197 12 +468 val_468 12 +393 val_393 12 +454 val_454 12 +100 val_100 12 +298 val_298 12 +199 val_199 12 +191 val_191 12 +418 val_418 12 +96 val_96 12 +26 val_26 12 +165 val_165 12 +327 val_327 12 +230 val_230 12 +205 val_205 12 +120 val_120 12 +131 val_131 12 +51 val_51 12 +404 val_404 12 +43 val_43 12 +436 val_436 12 +156 val_156 12 +469 val_469 12 +468 val_468 12 +308 val_308 12 +95 val_95 12 +196 val_196 12 +288 val_288 12 +481 val_481 12 +457 val_457 12 +98 val_98 12 +282 val_282 12 +197 val_197 12 +187 val_187 12 +318 val_318 12 +318 val_318 12 +409 val_409 12 +470 val_470 12 +137 val_137 12 +369 val_369 12 +316 val_316 12 +169 val_169 12 +413 val_413 12 +85 val_85 12 +77 val_77 12 +0 val_0 12 +490 val_490 12 +87 val_87 12 +364 val_364 12 +179 val_179 12 +118 val_118 12 +134 val_134 12 +395 val_395 12 +282 val_282 12 +138 val_138 12 +238 val_238 12 +419 val_419 12 +15 val_15 12 +118 val_118 12 +72 val_72 12 +90 val_90 12 +307 val_307 12 +19 val_19 12 +435 val_435 12 +10 val_10 12 +277 val_277 12 +273 val_273 12 +306 val_306 12 +224 val_224 12 +309 val_309 12 +389 val_389 12 +327 val_327 12 +242 val_242 12 +369 val_369 12 +392 val_392 12 +272 val_272 12 +331 val_331 12 +401 val_401 12 +242 val_242 12 +452 val_452 12 +177 val_177 12 +226 val_226 12 +5 val_5 12 +497 val_497 12 +402 val_402 12 +396 val_396 12 +317 val_317 12 +395 val_395 12 +58 val_58 12 +35 val_35 12 +336 val_336 12 +95 val_95 12 +11 val_11 12 +168 val_168 12 +34 val_34 12 +229 val_229 12 +233 val_233 12 +143 val_143 12 +472 val_472 12 +322 val_322 12 +498 val_498 12 +160 val_160 12 +195 val_195 12 +42 val_42 12 +321 val_321 12 +430 val_430 12 +119 val_119 12 +489 val_489 12 +458 val_458 12 +78 val_78 12 +76 val_76 12 +41 val_41 12 +223 val_223 12 +492 val_492 12 +149 val_149 12 +449 val_449 12 +218 val_218 12 +228 val_228 12 +138 val_138 12 +453 val_453 12 +30 val_30 12 +209 val_209 12 +64 val_64 12 +468 val_468 12 +76 val_76 12 +74 val_74 12 +342 val_342 12 +69 val_69 12 +230 val_230 12 +33 val_33 12 +368 val_368 12 +103 val_103 12 +296 val_296 12 +113 val_113 12 +216 val_216 12 +367 val_367 12 +344 val_344 12 +167 val_167 12 +274 val_274 12 +219 val_219 12 +239 val_239 12 +485 val_485 12 +116 val_116 12 +223 val_223 12 +256 val_256 12 +263 val_263 12 +70 val_70 12 +487 val_487 12 +480 val_480 12 +401 val_401 12 +288 val_288 12 +191 val_191 12 +5 val_5 12 +244 val_244 12 +438 val_438 12 +128 val_128 12 +467 val_467 12 +432 val_432 12 +202 val_202 12 +316 val_316 12 +229 val_229 12 +469 val_469 12 +463 val_463 12 +280 val_280 12 +2 val_2 12 +35 val_35 12 +283 val_283 12 +331 val_331 12 +235 val_235 12 +80 val_80 12 +44 val_44 12 +193 val_193 12 +321 val_321 12 +335 val_335 12 +104 val_104 12 +466 val_466 12 +366 val_366 12 +175 val_175 12 +403 val_403 12 +483 val_483 12 +53 val_53 12 +105 val_105 12 +257 val_257 12 +406 val_406 12 +409 val_409 12 +190 val_190 12 +406 val_406 12 +401 val_401 12 +114 val_114 12 +258 val_258 12 +90 val_90 12 +203 val_203 12 +262 val_262 12 +348 val_348 12 +424 val_424 12 +12 val_12 12 +396 val_396 12 +201 val_201 12 +217 val_217 12 +164 val_164 12 +431 val_431 12 +454 val_454 12 +478 val_478 12 +298 val_298 12 +125 val_125 12 +431 val_431 12 +164 val_164 12 +424 val_424 12 +187 val_187 12 +382 val_382 12 +5 val_5 12 +70 val_70 12 +397 val_397 12 +480 val_480 12 +291 val_291 12 +24 val_24 12 +351 val_351 12 +255 val_255 12 +104 val_104 12 +70 val_70 12 +163 val_163 12 +438 val_438 12 +119 val_119 12 +414 val_414 12 +200 val_200 12 +491 val_491 12 +237 val_237 12 +439 val_439 12 +360 val_360 12 +248 val_248 12 +479 val_479 12 +305 val_305 12 +417 val_417 12 +199 val_199 12 +444 val_444 12 +120 val_120 12 +429 val_429 12 +169 val_169 12 +443 val_443 12 +323 val_323 12 +325 val_325 12 +277 val_277 12 +230 val_230 12 +478 val_478 12 +178 val_178 12 +468 val_468 12 +310 val_310 12 +317 val_317 12 +333 val_333 12 +493 val_493 12 +460 val_460 12 +207 val_207 12 +249 val_249 12 +265 val_265 12 +480 val_480 12 +83 val_83 12 +136 val_136 12 +353 val_353 12 +172 val_172 12 +214 val_214 12 +462 val_462 12 +233 val_233 12 +406 val_406 12 +133 val_133 12 +175 val_175 12 +189 val_189 12 +454 val_454 12 +375 val_375 12 +401 val_401 12 +421 val_421 12 +407 val_407 12 +384 val_384 12 +256 val_256 12 +26 val_26 12 +134 val_134 12 +67 val_67 12 +384 val_384 12 +379 val_379 12 +18 val_18 12 +462 val_462 12 +492 val_492 12 +100 val_100 12 +298 val_298 12 +9 val_9 12 +341 val_341 12 +498 val_498 12 +146 val_146 12 +458 val_458 12 +362 val_362 12 +186 val_186 12 +285 val_285 12 +348 val_348 12 +167 val_167 12 +18 val_18 12 +273 val_273 12 +183 val_183 12 +281 val_281 12 +344 val_344 12 +97 val_97 12 +469 val_469 12 +315 val_315 12 +84 val_84 12 +28 val_28 12 +37 val_37 12 +448 val_448 12 +152 val_152 12 +348 val_348 12 +307 val_307 12 +194 val_194 12 +414 val_414 12 +477 val_477 12 +222 val_222 12 +126 val_126 12 +90 val_90 12 +169 val_169 12 +403 val_403 12 +400 val_400 12 +200 val_200 12 +97 val_97 12 +PREHOOK: query: explain select INPUT__FILE__NAME, *, ROW__ID from masking_test_n2 PREHOOK: type: QUERY -POSTHOOK: query: explain select INPUT__FILE__NAME, *, ROW__ID from masking_test +POSTHOOK: query: explain select INPUT__FILE__NAME, *, ROW__ID from masking_test_n2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test - Statistics: Num rows: 500 Data size: 6812 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 1130 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: INPUT__FILE__NAME (type: string), key (type: int), reverse(value) (type: string), _c2 (type: string), ROW__ID (type: struct) - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 83 Data size: 1130 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 1130 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: masking_test_n2 + Statistics: Num rows: 500 Data size: 6812 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: INPUT__FILE__NAME (type: string), key (type: int), value (type: string), _c2 (type: string), ROW__ID (type: struct) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 500 Data size: 6812 Basic stats: COMPLETE Column stats: NONE + ListSink -PREHOOK: query: select INPUT__FILE__NAME, *, ROW__ID from masking_test +PREHOOK: query: select INPUT__FILE__NAME, *, ROW__ID from masking_test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n2 #### A masked pattern was here #### -POSTHOOK: query: select INPUT__FILE__NAME, *, ROW__ID from masking_test +POSTHOOK: query: select INPUT__FILE__NAME, *, ROW__ID from masking_test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n2 #### A masked pattern was here #### -PREHOOK: query: drop table masking_test +PREHOOK: query: drop table masking_test_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@masking_test -PREHOOK: Output: default@masking_test -POSTHOOK: query: drop table masking_test +PREHOOK: Input: default@masking_test_n2 +PREHOOK: Output: default@masking_test_n2 +POSTHOOK: query: drop table masking_test_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@masking_test -POSTHOOK: Output: default@masking_test -PREHOOK: query: create table masking_test as select cast(key as int) as key, '12' +POSTHOOK: Input: default@masking_test_n2 +POSTHOOK: Output: default@masking_test_n2 +PREHOOK: query: create table masking_test_n2 as select cast(key as int) as key, '12' '12', '12', '12', '12', '12', '12', '12', '12', '12', '12' from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create table masking_test as select cast(key as int) as key, '12' +PREHOOK: Output: default@masking_test_n2 +POSTHOOK: query: create table masking_test_n2 as select cast(key as int) as key, '12' '12', '12', '12', '12', '12', '12', '12', '12', '12', '12' from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test._c1 SIMPLE [] -POSTHOOK: Lineage: masking_test._c10 SIMPLE [] -POSTHOOK: Lineage: masking_test._c2 SIMPLE [] -POSTHOOK: Lineage: masking_test._c3 SIMPLE [] -POSTHOOK: Lineage: masking_test._c4 SIMPLE [] -POSTHOOK: Lineage: masking_test._c5 SIMPLE [] -POSTHOOK: Lineage: masking_test._c6 SIMPLE [] -POSTHOOK: Lineage: masking_test._c7 SIMPLE [] -POSTHOOK: Lineage: masking_test._c8 SIMPLE [] -POSTHOOK: Lineage: masking_test._c9 SIMPLE [] -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: explain select ROW__ID, * from masking_test +POSTHOOK: Output: default@masking_test_n2 +POSTHOOK: Lineage: masking_test_n2._c1 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c10 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c2 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c3 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c4 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c5 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c6 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c7 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c8 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c9 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: explain select ROW__ID, * from masking_test_n2 PREHOOK: type: QUERY -POSTHOOK: query: explain select ROW__ID, * from masking_test +POSTHOOK: query: explain select ROW__ID, * from masking_test_n2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test - Statistics: Num rows: 500 Data size: 17406 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 2889 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: ROW__ID (type: struct), key (type: int), _c1 (type: string), _c2 (type: string), _c3 (type: string), _c4 (type: string), _c5 (type: string), _c6 (type: string), _c7 (type: string), _c8 (type: string), _c9 (type: string), _c10 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 - Statistics: Num rows: 83 Data size: 2889 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 2889 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: masking_test_n2 + Statistics: Num rows: 500 Data size: 17406 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ROW__ID (type: struct), key (type: int), _c1 (type: string), _c2 (type: string), _c3 (type: string), _c4 (type: string), _c5 (type: string), _c6 (type: string), _c7 (type: string), _c8 (type: string), _c9 (type: string), _c10 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 + Statistics: Num rows: 500 Data size: 17406 Basic stats: COMPLETE Column stats: NONE + ListSink -PREHOOK: query: select ROW__ID, * from masking_test +PREHOOK: query: select ROW__ID, * from masking_test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n2 #### A masked pattern was here #### -POSTHOOK: query: select ROW__ID, * from masking_test +POSTHOOK: query: select ROW__ID, * from masking_test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n2 #### A masked pattern was here #### +NULL 238 1212 12 12 12 12 12 12 12 12 12 +NULL 86 1212 12 12 12 12 12 12 12 12 12 +NULL 311 1212 12 12 12 12 12 12 12 12 12 +NULL 27 1212 12 12 12 12 12 12 12 12 12 +NULL 165 1212 12 12 12 12 12 12 12 12 12 +NULL 409 1212 12 12 12 12 12 12 12 12 12 +NULL 255 1212 12 12 12 12 12 12 12 12 12 +NULL 278 1212 12 12 12 12 12 12 12 12 12 +NULL 98 1212 12 12 12 12 12 12 12 12 12 +NULL 484 1212 12 12 12 12 12 12 12 12 12 +NULL 265 1212 12 12 12 12 12 12 12 12 12 +NULL 193 1212 12 12 12 12 12 12 12 12 12 +NULL 401 1212 12 12 12 12 12 12 12 12 12 +NULL 150 1212 12 12 12 12 12 12 12 12 12 +NULL 273 1212 12 12 12 12 12 12 12 12 12 +NULL 224 1212 12 12 12 12 12 12 12 12 12 +NULL 369 1212 12 12 12 12 12 12 12 12 12 +NULL 66 1212 12 12 12 12 12 12 12 12 12 +NULL 128 1212 12 12 12 12 12 12 12 12 12 +NULL 213 1212 12 12 12 12 12 12 12 12 12 +NULL 146 1212 12 12 12 12 12 12 12 12 12 +NULL 406 1212 12 12 12 12 12 12 12 12 12 +NULL 429 1212 12 12 12 12 12 12 12 12 12 +NULL 374 1212 12 12 12 12 12 12 12 12 12 +NULL 152 1212 12 12 12 12 12 12 12 12 12 +NULL 469 1212 12 12 12 12 12 12 12 12 12 +NULL 145 1212 12 12 12 12 12 12 12 12 12 +NULL 495 1212 12 12 12 12 12 12 12 12 12 +NULL 37 1212 12 12 12 12 12 12 12 12 12 +NULL 327 1212 12 12 12 12 12 12 12 12 12 +NULL 281 1212 12 12 12 12 12 12 12 12 12 +NULL 277 1212 12 12 12 12 12 12 12 12 12 +NULL 209 1212 12 12 12 12 12 12 12 12 12 +NULL 15 1212 12 12 12 12 12 12 12 12 12 +NULL 82 1212 12 12 12 12 12 12 12 12 12 +NULL 403 1212 12 12 12 12 12 12 12 12 12 +NULL 166 1212 12 12 12 12 12 12 12 12 12 +NULL 417 1212 12 12 12 12 12 12 12 12 12 +NULL 430 1212 12 12 12 12 12 12 12 12 12 +NULL 252 1212 12 12 12 12 12 12 12 12 12 +NULL 292 1212 12 12 12 12 12 12 12 12 12 +NULL 219 1212 12 12 12 12 12 12 12 12 12 +NULL 287 1212 12 12 12 12 12 12 12 12 12 +NULL 153 1212 12 12 12 12 12 12 12 12 12 +NULL 193 1212 12 12 12 12 12 12 12 12 12 +NULL 338 1212 12 12 12 12 12 12 12 12 12 +NULL 446 1212 12 12 12 12 12 12 12 12 12 +NULL 459 1212 12 12 12 12 12 12 12 12 12 +NULL 394 1212 12 12 12 12 12 12 12 12 12 +NULL 237 1212 12 12 12 12 12 12 12 12 12 +NULL 482 1212 12 12 12 12 12 12 12 12 12 +NULL 174 1212 12 12 12 12 12 12 12 12 12 +NULL 413 1212 12 12 12 12 12 12 12 12 12 +NULL 494 1212 12 12 12 12 12 12 12 12 12 +NULL 207 1212 12 12 12 12 12 12 12 12 12 +NULL 199 1212 12 12 12 12 12 12 12 12 12 +NULL 466 1212 12 12 12 12 12 12 12 12 12 +NULL 208 1212 12 12 12 12 12 12 12 12 12 +NULL 174 1212 12 12 12 12 12 12 12 12 12 +NULL 399 1212 12 12 12 12 12 12 12 12 12 +NULL 396 1212 12 12 12 12 12 12 12 12 12 +NULL 247 1212 12 12 12 12 12 12 12 12 12 +NULL 417 1212 12 12 12 12 12 12 12 12 12 +NULL 489 1212 12 12 12 12 12 12 12 12 12 +NULL 162 1212 12 12 12 12 12 12 12 12 12 +NULL 377 1212 12 12 12 12 12 12 12 12 12 +NULL 397 1212 12 12 12 12 12 12 12 12 12 +NULL 309 1212 12 12 12 12 12 12 12 12 12 +NULL 365 1212 12 12 12 12 12 12 12 12 12 +NULL 266 1212 12 12 12 12 12 12 12 12 12 +NULL 439 1212 12 12 12 12 12 12 12 12 12 +NULL 342 1212 12 12 12 12 12 12 12 12 12 +NULL 367 1212 12 12 12 12 12 12 12 12 12 +NULL 325 1212 12 12 12 12 12 12 12 12 12 +NULL 167 1212 12 12 12 12 12 12 12 12 12 +NULL 195 1212 12 12 12 12 12 12 12 12 12 +NULL 475 1212 12 12 12 12 12 12 12 12 12 +NULL 17 1212 12 12 12 12 12 12 12 12 12 +NULL 113 1212 12 12 12 12 12 12 12 12 12 +NULL 155 1212 12 12 12 12 12 12 12 12 12 +NULL 203 1212 12 12 12 12 12 12 12 12 12 +NULL 339 1212 12 12 12 12 12 12 12 12 12 NULL 0 1212 12 12 12 12 12 12 12 12 12 +NULL 455 1212 12 12 12 12 12 12 12 12 12 +NULL 128 1212 12 12 12 12 12 12 12 12 12 +NULL 311 1212 12 12 12 12 12 12 12 12 12 +NULL 316 1212 12 12 12 12 12 12 12 12 12 +NULL 57 1212 12 12 12 12 12 12 12 12 12 +NULL 302 1212 12 12 12 12 12 12 12 12 12 +NULL 205 1212 12 12 12 12 12 12 12 12 12 +NULL 149 1212 12 12 12 12 12 12 12 12 12 +NULL 438 1212 12 12 12 12 12 12 12 12 12 +NULL 345 1212 12 12 12 12 12 12 12 12 12 +NULL 129 1212 12 12 12 12 12 12 12 12 12 +NULL 170 1212 12 12 12 12 12 12 12 12 12 +NULL 20 1212 12 12 12 12 12 12 12 12 12 +NULL 489 1212 12 12 12 12 12 12 12 12 12 +NULL 157 1212 12 12 12 12 12 12 12 12 12 +NULL 378 1212 12 12 12 12 12 12 12 12 12 +NULL 221 1212 12 12 12 12 12 12 12 12 12 +NULL 92 1212 12 12 12 12 12 12 12 12 12 +NULL 111 1212 12 12 12 12 12 12 12 12 12 +NULL 47 1212 12 12 12 12 12 12 12 12 12 +NULL 72 1212 12 12 12 12 12 12 12 12 12 NULL 4 1212 12 12 12 12 12 12 12 12 12 +NULL 280 1212 12 12 12 12 12 12 12 12 12 +NULL 35 1212 12 12 12 12 12 12 12 12 12 +NULL 427 1212 12 12 12 12 12 12 12 12 12 +NULL 277 1212 12 12 12 12 12 12 12 12 12 +NULL 208 1212 12 12 12 12 12 12 12 12 12 +NULL 356 1212 12 12 12 12 12 12 12 12 12 +NULL 399 1212 12 12 12 12 12 12 12 12 12 +NULL 169 1212 12 12 12 12 12 12 12 12 12 +NULL 382 1212 12 12 12 12 12 12 12 12 12 +NULL 498 1212 12 12 12 12 12 12 12 12 12 +NULL 125 1212 12 12 12 12 12 12 12 12 12 +NULL 386 1212 12 12 12 12 12 12 12 12 12 +NULL 437 1212 12 12 12 12 12 12 12 12 12 +NULL 469 1212 12 12 12 12 12 12 12 12 12 +NULL 192 1212 12 12 12 12 12 12 12 12 12 +NULL 286 1212 12 12 12 12 12 12 12 12 12 +NULL 187 1212 12 12 12 12 12 12 12 12 12 +NULL 176 1212 12 12 12 12 12 12 12 12 12 +NULL 54 1212 12 12 12 12 12 12 12 12 12 +NULL 459 1212 12 12 12 12 12 12 12 12 12 +NULL 51 1212 12 12 12 12 12 12 12 12 12 +NULL 138 1212 12 12 12 12 12 12 12 12 12 +NULL 103 1212 12 12 12 12 12 12 12 12 12 +NULL 239 1212 12 12 12 12 12 12 12 12 12 +NULL 213 1212 12 12 12 12 12 12 12 12 12 +NULL 216 1212 12 12 12 12 12 12 12 12 12 +NULL 430 1212 12 12 12 12 12 12 12 12 12 +NULL 278 1212 12 12 12 12 12 12 12 12 12 +NULL 176 1212 12 12 12 12 12 12 12 12 12 +NULL 289 1212 12 12 12 12 12 12 12 12 12 +NULL 221 1212 12 12 12 12 12 12 12 12 12 +NULL 65 1212 12 12 12 12 12 12 12 12 12 +NULL 318 1212 12 12 12 12 12 12 12 12 12 +NULL 332 1212 12 12 12 12 12 12 12 12 12 +NULL 311 1212 12 12 12 12 12 12 12 12 12 +NULL 275 1212 12 12 12 12 12 12 12 12 12 +NULL 137 1212 12 12 12 12 12 12 12 12 12 +NULL 241 1212 12 12 12 12 12 12 12 12 12 +NULL 83 1212 12 12 12 12 12 12 12 12 12 +NULL 333 1212 12 12 12 12 12 12 12 12 12 +NULL 180 1212 12 12 12 12 12 12 12 12 12 +NULL 284 1212 12 12 12 12 12 12 12 12 12 +NULL 12 1212 12 12 12 12 12 12 12 12 12 +NULL 230 1212 12 12 12 12 12 12 12 12 12 +NULL 181 1212 12 12 12 12 12 12 12 12 12 +NULL 67 1212 12 12 12 12 12 12 12 12 12 +NULL 260 1212 12 12 12 12 12 12 12 12 12 +NULL 404 1212 12 12 12 12 12 12 12 12 12 +NULL 384 1212 12 12 12 12 12 12 12 12 12 +NULL 489 1212 12 12 12 12 12 12 12 12 12 +NULL 353 1212 12 12 12 12 12 12 12 12 12 +NULL 373 1212 12 12 12 12 12 12 12 12 12 +NULL 272 1212 12 12 12 12 12 12 12 12 12 +NULL 138 1212 12 12 12 12 12 12 12 12 12 +NULL 217 1212 12 12 12 12 12 12 12 12 12 +NULL 84 1212 12 12 12 12 12 12 12 12 12 +NULL 348 1212 12 12 12 12 12 12 12 12 12 +NULL 466 1212 12 12 12 12 12 12 12 12 12 +NULL 58 1212 12 12 12 12 12 12 12 12 12 NULL 8 1212 12 12 12 12 12 12 12 12 12 +NULL 411 1212 12 12 12 12 12 12 12 12 12 +NULL 230 1212 12 12 12 12 12 12 12 12 12 +NULL 208 1212 12 12 12 12 12 12 12 12 12 +NULL 348 1212 12 12 12 12 12 12 12 12 12 +NULL 24 1212 12 12 12 12 12 12 12 12 12 +NULL 463 1212 12 12 12 12 12 12 12 12 12 +NULL 431 1212 12 12 12 12 12 12 12 12 12 +NULL 179 1212 12 12 12 12 12 12 12 12 12 +NULL 172 1212 12 12 12 12 12 12 12 12 12 +NULL 42 1212 12 12 12 12 12 12 12 12 12 +NULL 129 1212 12 12 12 12 12 12 12 12 12 +NULL 158 1212 12 12 12 12 12 12 12 12 12 +NULL 119 1212 12 12 12 12 12 12 12 12 12 +NULL 496 1212 12 12 12 12 12 12 12 12 12 NULL 0 1212 12 12 12 12 12 12 12 12 12 +NULL 322 1212 12 12 12 12 12 12 12 12 12 +NULL 197 1212 12 12 12 12 12 12 12 12 12 +NULL 468 1212 12 12 12 12 12 12 12 12 12 +NULL 393 1212 12 12 12 12 12 12 12 12 12 +NULL 454 1212 12 12 12 12 12 12 12 12 12 +NULL 100 1212 12 12 12 12 12 12 12 12 12 +NULL 298 1212 12 12 12 12 12 12 12 12 12 +NULL 199 1212 12 12 12 12 12 12 12 12 12 +NULL 191 1212 12 12 12 12 12 12 12 12 12 +NULL 418 1212 12 12 12 12 12 12 12 12 12 +NULL 96 1212 12 12 12 12 12 12 12 12 12 +NULL 26 1212 12 12 12 12 12 12 12 12 12 +NULL 165 1212 12 12 12 12 12 12 12 12 12 +NULL 327 1212 12 12 12 12 12 12 12 12 12 +NULL 230 1212 12 12 12 12 12 12 12 12 12 +NULL 205 1212 12 12 12 12 12 12 12 12 12 +NULL 120 1212 12 12 12 12 12 12 12 12 12 +NULL 131 1212 12 12 12 12 12 12 12 12 12 +NULL 51 1212 12 12 12 12 12 12 12 12 12 +NULL 404 1212 12 12 12 12 12 12 12 12 12 +NULL 43 1212 12 12 12 12 12 12 12 12 12 +NULL 436 1212 12 12 12 12 12 12 12 12 12 +NULL 156 1212 12 12 12 12 12 12 12 12 12 +NULL 469 1212 12 12 12 12 12 12 12 12 12 +NULL 468 1212 12 12 12 12 12 12 12 12 12 +NULL 308 1212 12 12 12 12 12 12 12 12 12 +NULL 95 1212 12 12 12 12 12 12 12 12 12 +NULL 196 1212 12 12 12 12 12 12 12 12 12 +NULL 288 1212 12 12 12 12 12 12 12 12 12 +NULL 481 1212 12 12 12 12 12 12 12 12 12 +NULL 457 1212 12 12 12 12 12 12 12 12 12 +NULL 98 1212 12 12 12 12 12 12 12 12 12 +NULL 282 1212 12 12 12 12 12 12 12 12 12 +NULL 197 1212 12 12 12 12 12 12 12 12 12 +NULL 187 1212 12 12 12 12 12 12 12 12 12 +NULL 318 1212 12 12 12 12 12 12 12 12 12 +NULL 318 1212 12 12 12 12 12 12 12 12 12 +NULL 409 1212 12 12 12 12 12 12 12 12 12 +NULL 470 1212 12 12 12 12 12 12 12 12 12 +NULL 137 1212 12 12 12 12 12 12 12 12 12 +NULL 369 1212 12 12 12 12 12 12 12 12 12 +NULL 316 1212 12 12 12 12 12 12 12 12 12 +NULL 169 1212 12 12 12 12 12 12 12 12 12 +NULL 413 1212 12 12 12 12 12 12 12 12 12 +NULL 85 1212 12 12 12 12 12 12 12 12 12 +NULL 77 1212 12 12 12 12 12 12 12 12 12 NULL 0 1212 12 12 12 12 12 12 12 12 12 +NULL 490 1212 12 12 12 12 12 12 12 12 12 +NULL 87 1212 12 12 12 12 12 12 12 12 12 +NULL 364 1212 12 12 12 12 12 12 12 12 12 +NULL 179 1212 12 12 12 12 12 12 12 12 12 +NULL 118 1212 12 12 12 12 12 12 12 12 12 +NULL 134 1212 12 12 12 12 12 12 12 12 12 +NULL 395 1212 12 12 12 12 12 12 12 12 12 +NULL 282 1212 12 12 12 12 12 12 12 12 12 +NULL 138 1212 12 12 12 12 12 12 12 12 12 +NULL 238 1212 12 12 12 12 12 12 12 12 12 +NULL 419 1212 12 12 12 12 12 12 12 12 12 +NULL 15 1212 12 12 12 12 12 12 12 12 12 +NULL 118 1212 12 12 12 12 12 12 12 12 12 +NULL 72 1212 12 12 12 12 12 12 12 12 12 +NULL 90 1212 12 12 12 12 12 12 12 12 12 +NULL 307 1212 12 12 12 12 12 12 12 12 12 +NULL 19 1212 12 12 12 12 12 12 12 12 12 +NULL 435 1212 12 12 12 12 12 12 12 12 12 +NULL 10 1212 12 12 12 12 12 12 12 12 12 +NULL 277 1212 12 12 12 12 12 12 12 12 12 +NULL 273 1212 12 12 12 12 12 12 12 12 12 +NULL 306 1212 12 12 12 12 12 12 12 12 12 +NULL 224 1212 12 12 12 12 12 12 12 12 12 +NULL 309 1212 12 12 12 12 12 12 12 12 12 +NULL 389 1212 12 12 12 12 12 12 12 12 12 +NULL 327 1212 12 12 12 12 12 12 12 12 12 +NULL 242 1212 12 12 12 12 12 12 12 12 12 +NULL 369 1212 12 12 12 12 12 12 12 12 12 +NULL 392 1212 12 12 12 12 12 12 12 12 12 +NULL 272 1212 12 12 12 12 12 12 12 12 12 +NULL 331 1212 12 12 12 12 12 12 12 12 12 +NULL 401 1212 12 12 12 12 12 12 12 12 12 +NULL 242 1212 12 12 12 12 12 12 12 12 12 +NULL 452 1212 12 12 12 12 12 12 12 12 12 +NULL 177 1212 12 12 12 12 12 12 12 12 12 +NULL 226 1212 12 12 12 12 12 12 12 12 12 +NULL 5 1212 12 12 12 12 12 12 12 12 12 +NULL 497 1212 12 12 12 12 12 12 12 12 12 +NULL 402 1212 12 12 12 12 12 12 12 12 12 +NULL 396 1212 12 12 12 12 12 12 12 12 12 +NULL 317 1212 12 12 12 12 12 12 12 12 12 +NULL 395 1212 12 12 12 12 12 12 12 12 12 +NULL 58 1212 12 12 12 12 12 12 12 12 12 +NULL 35 1212 12 12 12 12 12 12 12 12 12 +NULL 336 1212 12 12 12 12 12 12 12 12 12 +NULL 95 1212 12 12 12 12 12 12 12 12 12 +NULL 11 1212 12 12 12 12 12 12 12 12 12 +NULL 168 1212 12 12 12 12 12 12 12 12 12 +NULL 34 1212 12 12 12 12 12 12 12 12 12 +NULL 229 1212 12 12 12 12 12 12 12 12 12 +NULL 233 1212 12 12 12 12 12 12 12 12 12 +NULL 143 1212 12 12 12 12 12 12 12 12 12 +NULL 472 1212 12 12 12 12 12 12 12 12 12 +NULL 322 1212 12 12 12 12 12 12 12 12 12 +NULL 498 1212 12 12 12 12 12 12 12 12 12 +NULL 160 1212 12 12 12 12 12 12 12 12 12 +NULL 195 1212 12 12 12 12 12 12 12 12 12 +NULL 42 1212 12 12 12 12 12 12 12 12 12 +NULL 321 1212 12 12 12 12 12 12 12 12 12 +NULL 430 1212 12 12 12 12 12 12 12 12 12 +NULL 119 1212 12 12 12 12 12 12 12 12 12 +NULL 489 1212 12 12 12 12 12 12 12 12 12 +NULL 458 1212 12 12 12 12 12 12 12 12 12 +NULL 78 1212 12 12 12 12 12 12 12 12 12 +NULL 76 1212 12 12 12 12 12 12 12 12 12 +NULL 41 1212 12 12 12 12 12 12 12 12 12 +NULL 223 1212 12 12 12 12 12 12 12 12 12 +NULL 492 1212 12 12 12 12 12 12 12 12 12 +NULL 149 1212 12 12 12 12 12 12 12 12 12 +NULL 449 1212 12 12 12 12 12 12 12 12 12 +NULL 218 1212 12 12 12 12 12 12 12 12 12 +NULL 228 1212 12 12 12 12 12 12 12 12 12 +NULL 138 1212 12 12 12 12 12 12 12 12 12 +NULL 453 1212 12 12 12 12 12 12 12 12 12 +NULL 30 1212 12 12 12 12 12 12 12 12 12 +NULL 209 1212 12 12 12 12 12 12 12 12 12 +NULL 64 1212 12 12 12 12 12 12 12 12 12 +NULL 468 1212 12 12 12 12 12 12 12 12 12 +NULL 76 1212 12 12 12 12 12 12 12 12 12 +NULL 74 1212 12 12 12 12 12 12 12 12 12 +NULL 342 1212 12 12 12 12 12 12 12 12 12 +NULL 69 1212 12 12 12 12 12 12 12 12 12 +NULL 230 1212 12 12 12 12 12 12 12 12 12 +NULL 33 1212 12 12 12 12 12 12 12 12 12 +NULL 368 1212 12 12 12 12 12 12 12 12 12 +NULL 103 1212 12 12 12 12 12 12 12 12 12 +NULL 296 1212 12 12 12 12 12 12 12 12 12 +NULL 113 1212 12 12 12 12 12 12 12 12 12 +NULL 216 1212 12 12 12 12 12 12 12 12 12 +NULL 367 1212 12 12 12 12 12 12 12 12 12 +NULL 344 1212 12 12 12 12 12 12 12 12 12 +NULL 167 1212 12 12 12 12 12 12 12 12 12 +NULL 274 1212 12 12 12 12 12 12 12 12 12 +NULL 219 1212 12 12 12 12 12 12 12 12 12 +NULL 239 1212 12 12 12 12 12 12 12 12 12 +NULL 485 1212 12 12 12 12 12 12 12 12 12 +NULL 116 1212 12 12 12 12 12 12 12 12 12 +NULL 223 1212 12 12 12 12 12 12 12 12 12 +NULL 256 1212 12 12 12 12 12 12 12 12 12 +NULL 263 1212 12 12 12 12 12 12 12 12 12 +NULL 70 1212 12 12 12 12 12 12 12 12 12 +NULL 487 1212 12 12 12 12 12 12 12 12 12 +NULL 480 1212 12 12 12 12 12 12 12 12 12 +NULL 401 1212 12 12 12 12 12 12 12 12 12 +NULL 288 1212 12 12 12 12 12 12 12 12 12 +NULL 191 1212 12 12 12 12 12 12 12 12 12 +NULL 5 1212 12 12 12 12 12 12 12 12 12 +NULL 244 1212 12 12 12 12 12 12 12 12 12 +NULL 438 1212 12 12 12 12 12 12 12 12 12 +NULL 128 1212 12 12 12 12 12 12 12 12 12 +NULL 467 1212 12 12 12 12 12 12 12 12 12 +NULL 432 1212 12 12 12 12 12 12 12 12 12 +NULL 202 1212 12 12 12 12 12 12 12 12 12 +NULL 316 1212 12 12 12 12 12 12 12 12 12 +NULL 229 1212 12 12 12 12 12 12 12 12 12 +NULL 469 1212 12 12 12 12 12 12 12 12 12 +NULL 463 1212 12 12 12 12 12 12 12 12 12 +NULL 280 1212 12 12 12 12 12 12 12 12 12 NULL 2 1212 12 12 12 12 12 12 12 12 12 -PREHOOK: query: drop table masking_test +NULL 35 1212 12 12 12 12 12 12 12 12 12 +NULL 283 1212 12 12 12 12 12 12 12 12 12 +NULL 331 1212 12 12 12 12 12 12 12 12 12 +NULL 235 1212 12 12 12 12 12 12 12 12 12 +NULL 80 1212 12 12 12 12 12 12 12 12 12 +NULL 44 1212 12 12 12 12 12 12 12 12 12 +NULL 193 1212 12 12 12 12 12 12 12 12 12 +NULL 321 1212 12 12 12 12 12 12 12 12 12 +NULL 335 1212 12 12 12 12 12 12 12 12 12 +NULL 104 1212 12 12 12 12 12 12 12 12 12 +NULL 466 1212 12 12 12 12 12 12 12 12 12 +NULL 366 1212 12 12 12 12 12 12 12 12 12 +NULL 175 1212 12 12 12 12 12 12 12 12 12 +NULL 403 1212 12 12 12 12 12 12 12 12 12 +NULL 483 1212 12 12 12 12 12 12 12 12 12 +NULL 53 1212 12 12 12 12 12 12 12 12 12 +NULL 105 1212 12 12 12 12 12 12 12 12 12 +NULL 257 1212 12 12 12 12 12 12 12 12 12 +NULL 406 1212 12 12 12 12 12 12 12 12 12 +NULL 409 1212 12 12 12 12 12 12 12 12 12 +NULL 190 1212 12 12 12 12 12 12 12 12 12 +NULL 406 1212 12 12 12 12 12 12 12 12 12 +NULL 401 1212 12 12 12 12 12 12 12 12 12 +NULL 114 1212 12 12 12 12 12 12 12 12 12 +NULL 258 1212 12 12 12 12 12 12 12 12 12 +NULL 90 1212 12 12 12 12 12 12 12 12 12 +NULL 203 1212 12 12 12 12 12 12 12 12 12 +NULL 262 1212 12 12 12 12 12 12 12 12 12 +NULL 348 1212 12 12 12 12 12 12 12 12 12 +NULL 424 1212 12 12 12 12 12 12 12 12 12 +NULL 12 1212 12 12 12 12 12 12 12 12 12 +NULL 396 1212 12 12 12 12 12 12 12 12 12 +NULL 201 1212 12 12 12 12 12 12 12 12 12 +NULL 217 1212 12 12 12 12 12 12 12 12 12 +NULL 164 1212 12 12 12 12 12 12 12 12 12 +NULL 431 1212 12 12 12 12 12 12 12 12 12 +NULL 454 1212 12 12 12 12 12 12 12 12 12 +NULL 478 1212 12 12 12 12 12 12 12 12 12 +NULL 298 1212 12 12 12 12 12 12 12 12 12 +NULL 125 1212 12 12 12 12 12 12 12 12 12 +NULL 431 1212 12 12 12 12 12 12 12 12 12 +NULL 164 1212 12 12 12 12 12 12 12 12 12 +NULL 424 1212 12 12 12 12 12 12 12 12 12 +NULL 187 1212 12 12 12 12 12 12 12 12 12 +NULL 382 1212 12 12 12 12 12 12 12 12 12 +NULL 5 1212 12 12 12 12 12 12 12 12 12 +NULL 70 1212 12 12 12 12 12 12 12 12 12 +NULL 397 1212 12 12 12 12 12 12 12 12 12 +NULL 480 1212 12 12 12 12 12 12 12 12 12 +NULL 291 1212 12 12 12 12 12 12 12 12 12 +NULL 24 1212 12 12 12 12 12 12 12 12 12 +NULL 351 1212 12 12 12 12 12 12 12 12 12 +NULL 255 1212 12 12 12 12 12 12 12 12 12 +NULL 104 1212 12 12 12 12 12 12 12 12 12 +NULL 70 1212 12 12 12 12 12 12 12 12 12 +NULL 163 1212 12 12 12 12 12 12 12 12 12 +NULL 438 1212 12 12 12 12 12 12 12 12 12 +NULL 119 1212 12 12 12 12 12 12 12 12 12 +NULL 414 1212 12 12 12 12 12 12 12 12 12 +NULL 200 1212 12 12 12 12 12 12 12 12 12 +NULL 491 1212 12 12 12 12 12 12 12 12 12 +NULL 237 1212 12 12 12 12 12 12 12 12 12 +NULL 439 1212 12 12 12 12 12 12 12 12 12 +NULL 360 1212 12 12 12 12 12 12 12 12 12 +NULL 248 1212 12 12 12 12 12 12 12 12 12 +NULL 479 1212 12 12 12 12 12 12 12 12 12 +NULL 305 1212 12 12 12 12 12 12 12 12 12 +NULL 417 1212 12 12 12 12 12 12 12 12 12 +NULL 199 1212 12 12 12 12 12 12 12 12 12 +NULL 444 1212 12 12 12 12 12 12 12 12 12 +NULL 120 1212 12 12 12 12 12 12 12 12 12 +NULL 429 1212 12 12 12 12 12 12 12 12 12 +NULL 169 1212 12 12 12 12 12 12 12 12 12 +NULL 443 1212 12 12 12 12 12 12 12 12 12 +NULL 323 1212 12 12 12 12 12 12 12 12 12 +NULL 325 1212 12 12 12 12 12 12 12 12 12 +NULL 277 1212 12 12 12 12 12 12 12 12 12 +NULL 230 1212 12 12 12 12 12 12 12 12 12 +NULL 478 1212 12 12 12 12 12 12 12 12 12 +NULL 178 1212 12 12 12 12 12 12 12 12 12 +NULL 468 1212 12 12 12 12 12 12 12 12 12 +NULL 310 1212 12 12 12 12 12 12 12 12 12 +NULL 317 1212 12 12 12 12 12 12 12 12 12 +NULL 333 1212 12 12 12 12 12 12 12 12 12 +NULL 493 1212 12 12 12 12 12 12 12 12 12 +NULL 460 1212 12 12 12 12 12 12 12 12 12 +NULL 207 1212 12 12 12 12 12 12 12 12 12 +NULL 249 1212 12 12 12 12 12 12 12 12 12 +NULL 265 1212 12 12 12 12 12 12 12 12 12 +NULL 480 1212 12 12 12 12 12 12 12 12 12 +NULL 83 1212 12 12 12 12 12 12 12 12 12 +NULL 136 1212 12 12 12 12 12 12 12 12 12 +NULL 353 1212 12 12 12 12 12 12 12 12 12 +NULL 172 1212 12 12 12 12 12 12 12 12 12 +NULL 214 1212 12 12 12 12 12 12 12 12 12 +NULL 462 1212 12 12 12 12 12 12 12 12 12 +NULL 233 1212 12 12 12 12 12 12 12 12 12 +NULL 406 1212 12 12 12 12 12 12 12 12 12 +NULL 133 1212 12 12 12 12 12 12 12 12 12 +NULL 175 1212 12 12 12 12 12 12 12 12 12 +NULL 189 1212 12 12 12 12 12 12 12 12 12 +NULL 454 1212 12 12 12 12 12 12 12 12 12 +NULL 375 1212 12 12 12 12 12 12 12 12 12 +NULL 401 1212 12 12 12 12 12 12 12 12 12 +NULL 421 1212 12 12 12 12 12 12 12 12 12 +NULL 407 1212 12 12 12 12 12 12 12 12 12 +NULL 384 1212 12 12 12 12 12 12 12 12 12 +NULL 256 1212 12 12 12 12 12 12 12 12 12 +NULL 26 1212 12 12 12 12 12 12 12 12 12 +NULL 134 1212 12 12 12 12 12 12 12 12 12 +NULL 67 1212 12 12 12 12 12 12 12 12 12 +NULL 384 1212 12 12 12 12 12 12 12 12 12 +NULL 379 1212 12 12 12 12 12 12 12 12 12 +NULL 18 1212 12 12 12 12 12 12 12 12 12 +NULL 462 1212 12 12 12 12 12 12 12 12 12 +NULL 492 1212 12 12 12 12 12 12 12 12 12 +NULL 100 1212 12 12 12 12 12 12 12 12 12 +NULL 298 1212 12 12 12 12 12 12 12 12 12 +NULL 9 1212 12 12 12 12 12 12 12 12 12 +NULL 341 1212 12 12 12 12 12 12 12 12 12 +NULL 498 1212 12 12 12 12 12 12 12 12 12 +NULL 146 1212 12 12 12 12 12 12 12 12 12 +NULL 458 1212 12 12 12 12 12 12 12 12 12 +NULL 362 1212 12 12 12 12 12 12 12 12 12 +NULL 186 1212 12 12 12 12 12 12 12 12 12 +NULL 285 1212 12 12 12 12 12 12 12 12 12 +NULL 348 1212 12 12 12 12 12 12 12 12 12 +NULL 167 1212 12 12 12 12 12 12 12 12 12 +NULL 18 1212 12 12 12 12 12 12 12 12 12 +NULL 273 1212 12 12 12 12 12 12 12 12 12 +NULL 183 1212 12 12 12 12 12 12 12 12 12 +NULL 281 1212 12 12 12 12 12 12 12 12 12 +NULL 344 1212 12 12 12 12 12 12 12 12 12 +NULL 97 1212 12 12 12 12 12 12 12 12 12 +NULL 469 1212 12 12 12 12 12 12 12 12 12 +NULL 315 1212 12 12 12 12 12 12 12 12 12 +NULL 84 1212 12 12 12 12 12 12 12 12 12 +NULL 28 1212 12 12 12 12 12 12 12 12 12 +NULL 37 1212 12 12 12 12 12 12 12 12 12 +NULL 448 1212 12 12 12 12 12 12 12 12 12 +NULL 152 1212 12 12 12 12 12 12 12 12 12 +NULL 348 1212 12 12 12 12 12 12 12 12 12 +NULL 307 1212 12 12 12 12 12 12 12 12 12 +NULL 194 1212 12 12 12 12 12 12 12 12 12 +NULL 414 1212 12 12 12 12 12 12 12 12 12 +NULL 477 1212 12 12 12 12 12 12 12 12 12 +NULL 222 1212 12 12 12 12 12 12 12 12 12 +NULL 126 1212 12 12 12 12 12 12 12 12 12 +NULL 90 1212 12 12 12 12 12 12 12 12 12 +NULL 169 1212 12 12 12 12 12 12 12 12 12 +NULL 403 1212 12 12 12 12 12 12 12 12 12 +NULL 400 1212 12 12 12 12 12 12 12 12 12 +NULL 200 1212 12 12 12 12 12 12 12 12 12 +NULL 97 1212 12 12 12 12 12 12 12 12 12 +PREHOOK: query: drop table masking_test_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@masking_test -PREHOOK: Output: default@masking_test -POSTHOOK: query: drop table masking_test +PREHOOK: Input: default@masking_test_n2 +PREHOOK: Output: default@masking_test_n2 +POSTHOOK: query: drop table masking_test_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@masking_test -POSTHOOK: Output: default@masking_test -PREHOOK: query: create table masking_test as select cast(key as int) as key, '12' +POSTHOOK: Input: default@masking_test_n2 +POSTHOOK: Output: default@masking_test_n2 +PREHOOK: query: create table masking_test_n2 as select cast(key as int) as key, '12' '12', '12', '12', '12', '12', INPUT__FILE__NAME, '12', '12', '12', '12', '12' from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create table masking_test as select cast(key as int) as key, '12' +PREHOOK: Output: default@masking_test_n2 +POSTHOOK: query: create table masking_test_n2 as select cast(key as int) as key, '12' '12', '12', '12', '12', '12', INPUT__FILE__NAME, '12', '12', '12', '12', '12' from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test._c1 SIMPLE [] -POSTHOOK: Lineage: masking_test._c10 SIMPLE [] -POSTHOOK: Lineage: masking_test._c11 SIMPLE [] -POSTHOOK: Lineage: masking_test._c2 SIMPLE [] -POSTHOOK: Lineage: masking_test._c3 SIMPLE [] -POSTHOOK: Lineage: masking_test._c4 SIMPLE [] -POSTHOOK: Lineage: masking_test._c5 SIMPLE [] -POSTHOOK: Lineage: masking_test._c7 SIMPLE [] -POSTHOOK: Lineage: masking_test._c8 SIMPLE [] -POSTHOOK: Lineage: masking_test._c9 SIMPLE [] -POSTHOOK: Lineage: masking_test.input__file__name SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: select INPUT__FILE__NAME, *, ROW__ID from masking_test +POSTHOOK: Output: default@masking_test_n2 +POSTHOOK: Lineage: masking_test_n2._c1 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c10 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c11 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c2 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c3 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c4 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c5 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c7 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c8 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2._c9 SIMPLE [] +POSTHOOK: Lineage: masking_test_n2.input__file__name SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: masking_test_n2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: select INPUT__FILE__NAME, *, ROW__ID from masking_test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n2 #### A masked pattern was here #### -POSTHOOK: query: select INPUT__FILE__NAME, *, ROW__ID from masking_test +POSTHOOK: query: select INPUT__FILE__NAME, *, ROW__ID from masking_test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/masking_acid_no_masking.q.out b/ql/src/test/results/clientpositive/masking_acid_no_masking.q.out index 8116fb4bc8..1826c20d4d 100644 --- a/ql/src/test/results/clientpositive/masking_acid_no_masking.q.out +++ b/ql/src/test/results/clientpositive/masking_acid_no_masking.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: create table nonacid (key int, value string) stored as orc +PREHOOK: query: create table nonacid_n0 (key int, value string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@nonacid -POSTHOOK: query: create table nonacid (key int, value string) stored as orc +PREHOOK: Output: default@nonacid_n0 +POSTHOOK: query: create table nonacid_n0 (key int, value string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@nonacid +POSTHOOK: Output: default@nonacid_n0 PREHOOK: query: create table masking_acid_no_masking (key int, value string) clustered by (value) into 2 buckets stored as orc tblproperties ("transactional"="true") @@ -34,28 +34,28 @@ POSTHOOK: query: delete from masking_acid_no_masking where value='ddd' POSTHOOK: type: QUERY POSTHOOK: Input: default@masking_acid_no_masking POSTHOOK: Output: default@masking_acid_no_masking -PREHOOK: query: MERGE INTO masking_acid_no_masking as t using nonacid as s ON t.key = s.key +PREHOOK: query: MERGE INTO masking_acid_no_masking as t using nonacid_n0 as s ON t.key = s.key WHEN MATCHED AND s.key < 5 THEN DELETE WHEN MATCHED AND s.key < 3 THEN UPDATE set key = 1 WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.value) PREHOOK: type: QUERY PREHOOK: Input: default@masking_acid_no_masking -PREHOOK: Input: default@nonacid +PREHOOK: Input: default@nonacid_n0 PREHOOK: Output: default@masking_acid_no_masking PREHOOK: Output: default@masking_acid_no_masking PREHOOK: Output: default@masking_acid_no_masking PREHOOK: Output: default@merge_tmp_table -POSTHOOK: query: MERGE INTO masking_acid_no_masking as t using nonacid as s ON t.key = s.key +POSTHOOK: query: MERGE INTO masking_acid_no_masking as t using nonacid_n0 as s ON t.key = s.key WHEN MATCHED AND s.key < 5 THEN DELETE WHEN MATCHED AND s.key < 3 THEN UPDATE set key = 1 WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.value) POSTHOOK: type: QUERY POSTHOOK: Input: default@masking_acid_no_masking -POSTHOOK: Input: default@nonacid +POSTHOOK: Input: default@nonacid_n0 POSTHOOK: Output: default@masking_acid_no_masking POSTHOOK: Output: default@masking_acid_no_masking POSTHOOK: Output: default@masking_acid_no_masking POSTHOOK: Output: default@merge_tmp_table -POSTHOOK: Lineage: masking_acid_no_masking.key SIMPLE [(nonacid)s.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: masking_acid_no_masking.value SIMPLE [(nonacid)s.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: masking_acid_no_masking.key SIMPLE [(nonacid_n0)s.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: masking_acid_no_masking.value SIMPLE [(nonacid_n0)s.FieldSchema(name:value, type:string, comment:null), ] POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(masking_acid_no_masking)t.FieldSchema(name:ROW__ID, type:struct, comment:), ] diff --git a/ql/src/test/results/clientpositive/masking_disablecbo_1.q.out b/ql/src/test/results/clientpositive/masking_disablecbo_1.q.out index a255485dd2..0967fde1a0 100644 --- a/ql/src/test/results/clientpositive/masking_disablecbo_1.q.out +++ b/ql/src/test/results/clientpositive/masking_disablecbo_1.q.out @@ -1,69 +1,547 @@ -PREHOOK: query: create table masking_test as select cast(key as int) as key, value from src +PREHOOK: query: create table masking_test_n10 as select cast(key as int) as key, value from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create table masking_test as select cast(key as int) as key, value from src +PREHOOK: Output: default@masking_test_n10 +POSTHOOK: query: create table masking_test_n10 as select cast(key as int) as key, value from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain select * from masking_test +POSTHOOK: Output: default@masking_test_n10 +POSTHOOK: Lineage: masking_test_n10.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n10.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select * from masking_test_n10 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test +POSTHOOK: query: explain select * from masking_test_n10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), reverse(value) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: masking_test_n10 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink -PREHOOK: query: select * from masking_test +PREHOOK: query: select * from masking_test_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n10 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test +POSTHOOK: query: select * from masking_test_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n10 #### A masked pattern was here #### -0 0_lav -4 4_lav -8 8_lav -0 0_lav -0 0_lav -2 2_lav -PREHOOK: query: explain select * from masking_test where key > 0 +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +0 val_0 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +0 val_0 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +0 val_0 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: explain select * from masking_test_n10 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test where key > 0 +POSTHOOK: query: explain select * from masking_test_n10 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -74,18 +552,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n10 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (key > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), reverse(value) (type: string) + expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -98,20 +576,514 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test where key > 0 +PREHOOK: query: select * from masking_test_n10 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n10 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test where key > 0 +POSTHOOK: query: select * from masking_test_n10 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n10 #### A masked pattern was here #### -4 4_lav -8 8_lav -2 2_lav -PREHOOK: query: explain select key from masking_test where key > 0 +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: explain select key from masking_test_n10 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select key from masking_test where key > 0 +POSTHOOK: query: explain select key from masking_test_n10 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -122,18 +1094,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n10 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (key > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int) outputColumnNames: _col0 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -146,20 +1118,514 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select key from masking_test where key > 0 +PREHOOK: query: select key from masking_test_n10 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n10 #### A masked pattern was here #### -POSTHOOK: query: select key from masking_test where key > 0 +POSTHOOK: query: select key from masking_test_n10 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n10 #### A masked pattern was here #### +238 +86 +311 +27 +165 +409 +255 +278 +98 +484 +265 +193 +401 +150 +273 +224 +369 +66 +128 +213 +146 +406 +429 +374 +152 +469 +145 +495 +37 +327 +281 +277 +209 +15 +82 +403 +166 +417 +430 +252 +292 +219 +287 +153 +193 +338 +446 +459 +394 +237 +482 +174 +413 +494 +207 +199 +466 +208 +174 +399 +396 +247 +417 +489 +162 +377 +397 +309 +365 +266 +439 +342 +367 +325 +167 +195 +475 +17 +113 +155 +203 +339 +455 +128 +311 +316 +57 +302 +205 +149 +438 +345 +129 +170 +20 +489 +157 +378 +221 +92 +111 +47 +72 4 +280 +35 +427 +277 +208 +356 +399 +169 +382 +498 +125 +386 +437 +469 +192 +286 +187 +176 +54 +459 +51 +138 +103 +239 +213 +216 +430 +278 +176 +289 +221 +65 +318 +332 +311 +275 +137 +241 +83 +333 +180 +284 +12 +230 +181 +67 +260 +404 +384 +489 +353 +373 +272 +138 +217 +84 +348 +466 +58 8 +411 +230 +208 +348 +24 +463 +431 +179 +172 +42 +129 +158 +119 +496 +322 +197 +468 +393 +454 +100 +298 +199 +191 +418 +96 +26 +165 +327 +230 +205 +120 +131 +51 +404 +43 +436 +156 +469 +468 +308 +95 +196 +288 +481 +457 +98 +282 +197 +187 +318 +318 +409 +470 +137 +369 +316 +169 +413 +85 +77 +490 +87 +364 +179 +118 +134 +395 +282 +138 +238 +419 +15 +118 +72 +90 +307 +19 +435 +10 +277 +273 +306 +224 +309 +389 +327 +242 +369 +392 +272 +331 +401 +242 +452 +177 +226 +5 +497 +402 +396 +317 +395 +58 +35 +336 +95 +11 +168 +34 +229 +233 +143 +472 +322 +498 +160 +195 +42 +321 +430 +119 +489 +458 +78 +76 +41 +223 +492 +149 +449 +218 +228 +138 +453 +30 +209 +64 +468 +76 +74 +342 +69 +230 +33 +368 +103 +296 +113 +216 +367 +344 +167 +274 +219 +239 +485 +116 +223 +256 +263 +70 +487 +480 +401 +288 +191 +5 +244 +438 +128 +467 +432 +202 +316 +229 +469 +463 +280 2 -PREHOOK: query: explain select value from masking_test where key > 0 +35 +283 +331 +235 +80 +44 +193 +321 +335 +104 +466 +366 +175 +403 +483 +53 +105 +257 +406 +409 +190 +406 +401 +114 +258 +90 +203 +262 +348 +424 +12 +396 +201 +217 +164 +431 +454 +478 +298 +125 +431 +164 +424 +187 +382 +5 +70 +397 +480 +291 +24 +351 +255 +104 +70 +163 +438 +119 +414 +200 +491 +237 +439 +360 +248 +479 +305 +417 +199 +444 +120 +429 +169 +443 +323 +325 +277 +230 +478 +178 +468 +310 +317 +333 +493 +460 +207 +249 +265 +480 +83 +136 +353 +172 +214 +462 +233 +406 +133 +175 +189 +454 +375 +401 +421 +407 +384 +256 +26 +134 +67 +384 +379 +18 +462 +492 +100 +298 +9 +341 +498 +146 +458 +362 +186 +285 +348 +167 +18 +273 +183 +281 +344 +97 +469 +315 +84 +28 +37 +448 +152 +348 +307 +194 +414 +477 +222 +126 +90 +169 +403 +400 +200 +97 +PREHOOK: query: explain select value from masking_test_n10 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select value from masking_test where key > 0 +POSTHOOK: query: explain select value from masking_test_n10 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -170,18 +1636,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n10 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (key > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: reverse(value) (type: string) + expressions: value (type: string) outputColumnNames: _col0 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -194,20 +1660,514 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select value from masking_test where key > 0 +PREHOOK: query: select value from masking_test_n10 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n10 #### A masked pattern was here #### -POSTHOOK: query: select value from masking_test where key > 0 +POSTHOOK: query: select value from masking_test_n10 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n10 #### A masked pattern was here #### -4_lav -8_lav -2_lav -PREHOOK: query: explain select * from masking_test join srcpart on (masking_test.key = srcpart.key) +val_238 +val_86 +val_311 +val_27 +val_165 +val_409 +val_255 +val_278 +val_98 +val_484 +val_265 +val_193 +val_401 +val_150 +val_273 +val_224 +val_369 +val_66 +val_128 +val_213 +val_146 +val_406 +val_429 +val_374 +val_152 +val_469 +val_145 +val_495 +val_37 +val_327 +val_281 +val_277 +val_209 +val_15 +val_82 +val_403 +val_166 +val_417 +val_430 +val_252 +val_292 +val_219 +val_287 +val_153 +val_193 +val_338 +val_446 +val_459 +val_394 +val_237 +val_482 +val_174 +val_413 +val_494 +val_207 +val_199 +val_466 +val_208 +val_174 +val_399 +val_396 +val_247 +val_417 +val_489 +val_162 +val_377 +val_397 +val_309 +val_365 +val_266 +val_439 +val_342 +val_367 +val_325 +val_167 +val_195 +val_475 +val_17 +val_113 +val_155 +val_203 +val_339 +val_455 +val_128 +val_311 +val_316 +val_57 +val_302 +val_205 +val_149 +val_438 +val_345 +val_129 +val_170 +val_20 +val_489 +val_157 +val_378 +val_221 +val_92 +val_111 +val_47 +val_72 +val_4 +val_280 +val_35 +val_427 +val_277 +val_208 +val_356 +val_399 +val_169 +val_382 +val_498 +val_125 +val_386 +val_437 +val_469 +val_192 +val_286 +val_187 +val_176 +val_54 +val_459 +val_51 +val_138 +val_103 +val_239 +val_213 +val_216 +val_430 +val_278 +val_176 +val_289 +val_221 +val_65 +val_318 +val_332 +val_311 +val_275 +val_137 +val_241 +val_83 +val_333 +val_180 +val_284 +val_12 +val_230 +val_181 +val_67 +val_260 +val_404 +val_384 +val_489 +val_353 +val_373 +val_272 +val_138 +val_217 +val_84 +val_348 +val_466 +val_58 +val_8 +val_411 +val_230 +val_208 +val_348 +val_24 +val_463 +val_431 +val_179 +val_172 +val_42 +val_129 +val_158 +val_119 +val_496 +val_322 +val_197 +val_468 +val_393 +val_454 +val_100 +val_298 +val_199 +val_191 +val_418 +val_96 +val_26 +val_165 +val_327 +val_230 +val_205 +val_120 +val_131 +val_51 +val_404 +val_43 +val_436 +val_156 +val_469 +val_468 +val_308 +val_95 +val_196 +val_288 +val_481 +val_457 +val_98 +val_282 +val_197 +val_187 +val_318 +val_318 +val_409 +val_470 +val_137 +val_369 +val_316 +val_169 +val_413 +val_85 +val_77 +val_490 +val_87 +val_364 +val_179 +val_118 +val_134 +val_395 +val_282 +val_138 +val_238 +val_419 +val_15 +val_118 +val_72 +val_90 +val_307 +val_19 +val_435 +val_10 +val_277 +val_273 +val_306 +val_224 +val_309 +val_389 +val_327 +val_242 +val_369 +val_392 +val_272 +val_331 +val_401 +val_242 +val_452 +val_177 +val_226 +val_5 +val_497 +val_402 +val_396 +val_317 +val_395 +val_58 +val_35 +val_336 +val_95 +val_11 +val_168 +val_34 +val_229 +val_233 +val_143 +val_472 +val_322 +val_498 +val_160 +val_195 +val_42 +val_321 +val_430 +val_119 +val_489 +val_458 +val_78 +val_76 +val_41 +val_223 +val_492 +val_149 +val_449 +val_218 +val_228 +val_138 +val_453 +val_30 +val_209 +val_64 +val_468 +val_76 +val_74 +val_342 +val_69 +val_230 +val_33 +val_368 +val_103 +val_296 +val_113 +val_216 +val_367 +val_344 +val_167 +val_274 +val_219 +val_239 +val_485 +val_116 +val_223 +val_256 +val_263 +val_70 +val_487 +val_480 +val_401 +val_288 +val_191 +val_5 +val_244 +val_438 +val_128 +val_467 +val_432 +val_202 +val_316 +val_229 +val_469 +val_463 +val_280 +val_2 +val_35 +val_283 +val_331 +val_235 +val_80 +val_44 +val_193 +val_321 +val_335 +val_104 +val_466 +val_366 +val_175 +val_403 +val_483 +val_53 +val_105 +val_257 +val_406 +val_409 +val_190 +val_406 +val_401 +val_114 +val_258 +val_90 +val_203 +val_262 +val_348 +val_424 +val_12 +val_396 +val_201 +val_217 +val_164 +val_431 +val_454 +val_478 +val_298 +val_125 +val_431 +val_164 +val_424 +val_187 +val_382 +val_5 +val_70 +val_397 +val_480 +val_291 +val_24 +val_351 +val_255 +val_104 +val_70 +val_163 +val_438 +val_119 +val_414 +val_200 +val_491 +val_237 +val_439 +val_360 +val_248 +val_479 +val_305 +val_417 +val_199 +val_444 +val_120 +val_429 +val_169 +val_443 +val_323 +val_325 +val_277 +val_230 +val_478 +val_178 +val_468 +val_310 +val_317 +val_333 +val_493 +val_460 +val_207 +val_249 +val_265 +val_480 +val_83 +val_136 +val_353 +val_172 +val_214 +val_462 +val_233 +val_406 +val_133 +val_175 +val_189 +val_454 +val_375 +val_401 +val_421 +val_407 +val_384 +val_256 +val_26 +val_134 +val_67 +val_384 +val_379 +val_18 +val_462 +val_492 +val_100 +val_298 +val_9 +val_341 +val_498 +val_146 +val_458 +val_362 +val_186 +val_285 +val_348 +val_167 +val_18 +val_273 +val_183 +val_281 +val_344 +val_97 +val_469 +val_315 +val_84 +val_28 +val_37 +val_448 +val_152 +val_348 +val_307 +val_194 +val_414 +val_477 +val_222 +val_126 +val_90 +val_169 +val_403 +val_400 +val_200 +val_97 +PREHOOK: query: explain select * from masking_test_n10 join srcpart on (masking_test_n10.key = srcpart.key) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test join srcpart on (masking_test.key = srcpart.key) +POSTHOOK: query: explain select * from masking_test_n10 join srcpart on (masking_test_n10.key = srcpart.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -218,21 +2178,17 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n10 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and UDFToDouble(key) is not null) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), reverse(value) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string) + predicate: UDFToDouble(key) is not null (type: boolean) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(key) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(key) (type: double) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: key (type: int), value (type: string) TableScan alias: srcpart Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE @@ -250,7 +2206,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 UDFToDouble(_col0) (type: double) + 0 UDFToDouble(key) (type: double) 1 UDFToDouble(key) (type: double) outputColumnNames: _col0, _col1, _col5, _col6, _col7, _col8 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE @@ -272,75 +2228,4139 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test join srcpart on (masking_test.key = srcpart.key) +PREHOOK: query: select * from masking_test_n10 join srcpart on (masking_test_n10.key = srcpart.key) PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n10 PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test join srcpart on (masking_test.key = srcpart.key) +POSTHOOK: query: select * from masking_test_n10 join srcpart on (masking_test_n10.key = srcpart.key) POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n10 POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 12 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-08 11 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-08 12 -0 0_lav 0 val_0 2008-04-09 11 -0 0_lav 0 val_0 2008-04-09 12 -2 2_lav 2 val_2 2008-04-09 11 -2 2_lav 2 val_2 2008-04-08 11 -2 2_lav 2 val_2 2008-04-09 12 -2 2_lav 2 val_2 2008-04-08 12 -4 4_lav 4 val_4 2008-04-08 12 -4 4_lav 4 val_4 2008-04-09 12 -4 4_lav 4 val_4 2008-04-08 11 -4 4_lav 4 val_4 2008-04-09 11 -8 8_lav 8 val_8 2008-04-08 11 -8 8_lav 8 val_8 2008-04-09 11 -8 8_lav 8 val_8 2008-04-08 12 -8 8_lav 8 val_8 2008-04-09 12 -PREHOOK: query: explain select * from default.masking_test where key > 0 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 12 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-08 11 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-08 12 +0 val_0 0 val_0 2008-04-09 11 +0 val_0 0 val_0 2008-04-09 12 +2 val_2 2 val_2 2008-04-09 11 +2 val_2 2 val_2 2008-04-08 11 +2 val_2 2 val_2 2008-04-09 12 +2 val_2 2 val_2 2008-04-08 12 +4 val_4 4 val_4 2008-04-08 12 +4 val_4 4 val_4 2008-04-09 12 +4 val_4 4 val_4 2008-04-08 11 +4 val_4 4 val_4 2008-04-09 11 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-08 11 +5 val_5 5 val_5 2008-04-09 12 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-09 11 +5 val_5 5 val_5 2008-04-08 12 +5 val_5 5 val_5 2008-04-09 12 +8 val_8 8 val_8 2008-04-08 11 +8 val_8 8 val_8 2008-04-09 11 +8 val_8 8 val_8 2008-04-08 12 +8 val_8 8 val_8 2008-04-09 12 +9 val_9 9 val_9 2008-04-08 12 +9 val_9 9 val_9 2008-04-09 12 +9 val_9 9 val_9 2008-04-09 11 +9 val_9 9 val_9 2008-04-08 11 +10 val_10 10 val_10 2008-04-09 11 +10 val_10 10 val_10 2008-04-09 12 +10 val_10 10 val_10 2008-04-08 12 +10 val_10 10 val_10 2008-04-08 11 +11 val_11 11 val_11 2008-04-09 12 +11 val_11 11 val_11 2008-04-09 11 +11 val_11 11 val_11 2008-04-08 12 +11 val_11 11 val_11 2008-04-08 11 +12 val_12 12 val_12 2008-04-09 12 +12 val_12 12 val_12 2008-04-08 11 +12 val_12 12 val_12 2008-04-09 11 +12 val_12 12 val_12 2008-04-08 12 +12 val_12 12 val_12 2008-04-08 11 +12 val_12 12 val_12 2008-04-08 12 +12 val_12 12 val_12 2008-04-09 11 +12 val_12 12 val_12 2008-04-09 12 +12 val_12 12 val_12 2008-04-09 12 +12 val_12 12 val_12 2008-04-08 11 +12 val_12 12 val_12 2008-04-09 11 +12 val_12 12 val_12 2008-04-08 12 +12 val_12 12 val_12 2008-04-08 11 +12 val_12 12 val_12 2008-04-08 12 +12 val_12 12 val_12 2008-04-09 11 +12 val_12 12 val_12 2008-04-09 12 +15 val_15 15 val_15 2008-04-08 12 +15 val_15 15 val_15 2008-04-08 11 +15 val_15 15 val_15 2008-04-09 12 +15 val_15 15 val_15 2008-04-09 12 +15 val_15 15 val_15 2008-04-09 11 +15 val_15 15 val_15 2008-04-08 11 +15 val_15 15 val_15 2008-04-09 11 +15 val_15 15 val_15 2008-04-08 12 +15 val_15 15 val_15 2008-04-08 12 +15 val_15 15 val_15 2008-04-08 11 +15 val_15 15 val_15 2008-04-09 12 +15 val_15 15 val_15 2008-04-09 12 +15 val_15 15 val_15 2008-04-09 11 +15 val_15 15 val_15 2008-04-08 11 +15 val_15 15 val_15 2008-04-09 11 +15 val_15 15 val_15 2008-04-08 12 +17 val_17 17 val_17 2008-04-09 11 +17 val_17 17 val_17 2008-04-09 12 +17 val_17 17 val_17 2008-04-08 12 +17 val_17 17 val_17 2008-04-08 11 +18 val_18 18 val_18 2008-04-08 12 +18 val_18 18 val_18 2008-04-08 12 +18 val_18 18 val_18 2008-04-08 11 +18 val_18 18 val_18 2008-04-09 12 +18 val_18 18 val_18 2008-04-09 12 +18 val_18 18 val_18 2008-04-09 11 +18 val_18 18 val_18 2008-04-09 11 +18 val_18 18 val_18 2008-04-08 11 +18 val_18 18 val_18 2008-04-08 12 +18 val_18 18 val_18 2008-04-08 12 +18 val_18 18 val_18 2008-04-08 11 +18 val_18 18 val_18 2008-04-09 12 +18 val_18 18 val_18 2008-04-09 12 +18 val_18 18 val_18 2008-04-09 11 +18 val_18 18 val_18 2008-04-09 11 +18 val_18 18 val_18 2008-04-08 11 +19 val_19 19 val_19 2008-04-09 11 +19 val_19 19 val_19 2008-04-09 12 +19 val_19 19 val_19 2008-04-08 11 +19 val_19 19 val_19 2008-04-08 12 +20 val_20 20 val_20 2008-04-08 11 +20 val_20 20 val_20 2008-04-09 11 +20 val_20 20 val_20 2008-04-08 12 +20 val_20 20 val_20 2008-04-09 12 +24 val_24 24 val_24 2008-04-09 12 +24 val_24 24 val_24 2008-04-08 11 +24 val_24 24 val_24 2008-04-08 11 +24 val_24 24 val_24 2008-04-09 12 +24 val_24 24 val_24 2008-04-08 12 +24 val_24 24 val_24 2008-04-08 12 +24 val_24 24 val_24 2008-04-09 11 +24 val_24 24 val_24 2008-04-09 11 +24 val_24 24 val_24 2008-04-09 12 +24 val_24 24 val_24 2008-04-08 11 +24 val_24 24 val_24 2008-04-08 11 +24 val_24 24 val_24 2008-04-09 12 +24 val_24 24 val_24 2008-04-08 12 +24 val_24 24 val_24 2008-04-08 12 +24 val_24 24 val_24 2008-04-09 11 +24 val_24 24 val_24 2008-04-09 11 +26 val_26 26 val_26 2008-04-09 12 +26 val_26 26 val_26 2008-04-08 11 +26 val_26 26 val_26 2008-04-09 11 +26 val_26 26 val_26 2008-04-09 12 +26 val_26 26 val_26 2008-04-09 11 +26 val_26 26 val_26 2008-04-08 12 +26 val_26 26 val_26 2008-04-08 12 +26 val_26 26 val_26 2008-04-08 11 +26 val_26 26 val_26 2008-04-09 12 +26 val_26 26 val_26 2008-04-08 11 +26 val_26 26 val_26 2008-04-09 11 +26 val_26 26 val_26 2008-04-09 12 +26 val_26 26 val_26 2008-04-09 11 +26 val_26 26 val_26 2008-04-08 12 +26 val_26 26 val_26 2008-04-08 12 +26 val_26 26 val_26 2008-04-08 11 +27 val_27 27 val_27 2008-04-09 12 +27 val_27 27 val_27 2008-04-09 11 +27 val_27 27 val_27 2008-04-08 12 +27 val_27 27 val_27 2008-04-08 11 +28 val_28 28 val_28 2008-04-08 12 +28 val_28 28 val_28 2008-04-09 11 +28 val_28 28 val_28 2008-04-09 12 +28 val_28 28 val_28 2008-04-08 11 +30 val_30 30 val_30 2008-04-09 12 +30 val_30 30 val_30 2008-04-09 11 +30 val_30 30 val_30 2008-04-08 11 +30 val_30 30 val_30 2008-04-08 12 +33 val_33 33 val_33 2008-04-08 11 +33 val_33 33 val_33 2008-04-08 12 +33 val_33 33 val_33 2008-04-09 11 +33 val_33 33 val_33 2008-04-09 12 +34 val_34 34 val_34 2008-04-09 11 +34 val_34 34 val_34 2008-04-09 12 +34 val_34 34 val_34 2008-04-08 11 +34 val_34 34 val_34 2008-04-08 12 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-08 12 +35 val_35 35 val_35 2008-04-09 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-08 11 +35 val_35 35 val_35 2008-04-09 12 +35 val_35 35 val_35 2008-04-09 11 +37 val_37 37 val_37 2008-04-09 12 +37 val_37 37 val_37 2008-04-08 12 +37 val_37 37 val_37 2008-04-08 11 +37 val_37 37 val_37 2008-04-09 11 +37 val_37 37 val_37 2008-04-09 11 +37 val_37 37 val_37 2008-04-09 12 +37 val_37 37 val_37 2008-04-08 12 +37 val_37 37 val_37 2008-04-08 11 +37 val_37 37 val_37 2008-04-09 12 +37 val_37 37 val_37 2008-04-08 12 +37 val_37 37 val_37 2008-04-08 11 +37 val_37 37 val_37 2008-04-09 11 +37 val_37 37 val_37 2008-04-09 11 +37 val_37 37 val_37 2008-04-09 12 +37 val_37 37 val_37 2008-04-08 12 +37 val_37 37 val_37 2008-04-08 11 +41 val_41 41 val_41 2008-04-08 11 +41 val_41 41 val_41 2008-04-09 11 +41 val_41 41 val_41 2008-04-09 12 +41 val_41 41 val_41 2008-04-08 12 +42 val_42 42 val_42 2008-04-09 12 +42 val_42 42 val_42 2008-04-09 11 +42 val_42 42 val_42 2008-04-08 12 +42 val_42 42 val_42 2008-04-08 12 +42 val_42 42 val_42 2008-04-09 12 +42 val_42 42 val_42 2008-04-08 11 +42 val_42 42 val_42 2008-04-09 11 +42 val_42 42 val_42 2008-04-08 11 +42 val_42 42 val_42 2008-04-09 12 +42 val_42 42 val_42 2008-04-09 11 +42 val_42 42 val_42 2008-04-08 12 +42 val_42 42 val_42 2008-04-08 12 +42 val_42 42 val_42 2008-04-09 12 +42 val_42 42 val_42 2008-04-08 11 +42 val_42 42 val_42 2008-04-09 11 +42 val_42 42 val_42 2008-04-08 11 +43 val_43 43 val_43 2008-04-09 12 +43 val_43 43 val_43 2008-04-08 12 +43 val_43 43 val_43 2008-04-09 11 +43 val_43 43 val_43 2008-04-08 11 +44 val_44 44 val_44 2008-04-09 11 +44 val_44 44 val_44 2008-04-08 11 +44 val_44 44 val_44 2008-04-09 12 +44 val_44 44 val_44 2008-04-08 12 +47 val_47 47 val_47 2008-04-08 11 +47 val_47 47 val_47 2008-04-08 12 +47 val_47 47 val_47 2008-04-09 12 +47 val_47 47 val_47 2008-04-09 11 +51 val_51 51 val_51 2008-04-09 12 +51 val_51 51 val_51 2008-04-08 12 +51 val_51 51 val_51 2008-04-08 11 +51 val_51 51 val_51 2008-04-08 11 +51 val_51 51 val_51 2008-04-09 11 +51 val_51 51 val_51 2008-04-09 12 +51 val_51 51 val_51 2008-04-08 12 +51 val_51 51 val_51 2008-04-09 11 +51 val_51 51 val_51 2008-04-09 12 +51 val_51 51 val_51 2008-04-08 12 +51 val_51 51 val_51 2008-04-08 11 +51 val_51 51 val_51 2008-04-08 11 +51 val_51 51 val_51 2008-04-09 11 +51 val_51 51 val_51 2008-04-09 12 +51 val_51 51 val_51 2008-04-08 12 +51 val_51 51 val_51 2008-04-09 11 +53 val_53 53 val_53 2008-04-08 11 +53 val_53 53 val_53 2008-04-09 11 +53 val_53 53 val_53 2008-04-09 12 +53 val_53 53 val_53 2008-04-08 12 +54 val_54 54 val_54 2008-04-08 11 +54 val_54 54 val_54 2008-04-09 12 +54 val_54 54 val_54 2008-04-09 11 +54 val_54 54 val_54 2008-04-08 12 +57 val_57 57 val_57 2008-04-08 12 +57 val_57 57 val_57 2008-04-08 11 +57 val_57 57 val_57 2008-04-09 11 +57 val_57 57 val_57 2008-04-09 12 +58 val_58 58 val_58 2008-04-08 11 +58 val_58 58 val_58 2008-04-09 11 +58 val_58 58 val_58 2008-04-09 12 +58 val_58 58 val_58 2008-04-08 11 +58 val_58 58 val_58 2008-04-08 12 +58 val_58 58 val_58 2008-04-09 11 +58 val_58 58 val_58 2008-04-09 12 +58 val_58 58 val_58 2008-04-08 12 +58 val_58 58 val_58 2008-04-08 11 +58 val_58 58 val_58 2008-04-09 11 +58 val_58 58 val_58 2008-04-09 12 +58 val_58 58 val_58 2008-04-08 11 +58 val_58 58 val_58 2008-04-08 12 +58 val_58 58 val_58 2008-04-09 11 +58 val_58 58 val_58 2008-04-09 12 +58 val_58 58 val_58 2008-04-08 12 +64 val_64 64 val_64 2008-04-09 12 +64 val_64 64 val_64 2008-04-08 11 +64 val_64 64 val_64 2008-04-08 12 +64 val_64 64 val_64 2008-04-09 11 +65 val_65 65 val_65 2008-04-09 11 +65 val_65 65 val_65 2008-04-09 12 +65 val_65 65 val_65 2008-04-08 11 +65 val_65 65 val_65 2008-04-08 12 +66 val_66 66 val_66 2008-04-09 12 +66 val_66 66 val_66 2008-04-09 11 +66 val_66 66 val_66 2008-04-08 11 +66 val_66 66 val_66 2008-04-08 12 +67 val_67 67 val_67 2008-04-09 12 +67 val_67 67 val_67 2008-04-09 11 +67 val_67 67 val_67 2008-04-09 12 +67 val_67 67 val_67 2008-04-09 11 +67 val_67 67 val_67 2008-04-08 11 +67 val_67 67 val_67 2008-04-08 12 +67 val_67 67 val_67 2008-04-08 11 +67 val_67 67 val_67 2008-04-08 12 +67 val_67 67 val_67 2008-04-09 12 +67 val_67 67 val_67 2008-04-09 11 +67 val_67 67 val_67 2008-04-09 12 +67 val_67 67 val_67 2008-04-09 11 +67 val_67 67 val_67 2008-04-08 11 +67 val_67 67 val_67 2008-04-08 12 +67 val_67 67 val_67 2008-04-08 11 +67 val_67 67 val_67 2008-04-08 12 +69 val_69 69 val_69 2008-04-09 12 +69 val_69 69 val_69 2008-04-08 11 +69 val_69 69 val_69 2008-04-08 12 +69 val_69 69 val_69 2008-04-09 11 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-08 12 +70 val_70 70 val_70 2008-04-09 11 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +70 val_70 70 val_70 2008-04-08 11 +70 val_70 70 val_70 2008-04-09 12 +72 val_72 72 val_72 2008-04-09 12 +72 val_72 72 val_72 2008-04-08 12 +72 val_72 72 val_72 2008-04-08 12 +72 val_72 72 val_72 2008-04-09 11 +72 val_72 72 val_72 2008-04-08 11 +72 val_72 72 val_72 2008-04-09 12 +72 val_72 72 val_72 2008-04-08 11 +72 val_72 72 val_72 2008-04-09 11 +72 val_72 72 val_72 2008-04-09 12 +72 val_72 72 val_72 2008-04-08 12 +72 val_72 72 val_72 2008-04-08 12 +72 val_72 72 val_72 2008-04-09 11 +72 val_72 72 val_72 2008-04-08 11 +72 val_72 72 val_72 2008-04-09 12 +72 val_72 72 val_72 2008-04-08 11 +72 val_72 72 val_72 2008-04-09 11 +74 val_74 74 val_74 2008-04-09 11 +74 val_74 74 val_74 2008-04-08 12 +74 val_74 74 val_74 2008-04-09 12 +74 val_74 74 val_74 2008-04-08 11 +76 val_76 76 val_76 2008-04-08 11 +76 val_76 76 val_76 2008-04-08 12 +76 val_76 76 val_76 2008-04-08 12 +76 val_76 76 val_76 2008-04-08 11 +76 val_76 76 val_76 2008-04-09 12 +76 val_76 76 val_76 2008-04-09 12 +76 val_76 76 val_76 2008-04-09 11 +76 val_76 76 val_76 2008-04-09 11 +76 val_76 76 val_76 2008-04-08 11 +76 val_76 76 val_76 2008-04-08 12 +76 val_76 76 val_76 2008-04-08 12 +76 val_76 76 val_76 2008-04-08 11 +76 val_76 76 val_76 2008-04-09 12 +76 val_76 76 val_76 2008-04-09 12 +76 val_76 76 val_76 2008-04-09 11 +76 val_76 76 val_76 2008-04-09 11 +77 val_77 77 val_77 2008-04-08 12 +77 val_77 77 val_77 2008-04-09 12 +77 val_77 77 val_77 2008-04-09 11 +77 val_77 77 val_77 2008-04-08 11 +78 val_78 78 val_78 2008-04-09 11 +78 val_78 78 val_78 2008-04-08 11 +78 val_78 78 val_78 2008-04-09 12 +78 val_78 78 val_78 2008-04-08 12 +80 val_80 80 val_80 2008-04-09 11 +80 val_80 80 val_80 2008-04-08 11 +80 val_80 80 val_80 2008-04-09 12 +80 val_80 80 val_80 2008-04-08 12 +82 val_82 82 val_82 2008-04-08 12 +82 val_82 82 val_82 2008-04-09 11 +82 val_82 82 val_82 2008-04-08 11 +82 val_82 82 val_82 2008-04-09 12 +83 val_83 83 val_83 2008-04-08 12 +83 val_83 83 val_83 2008-04-09 12 +83 val_83 83 val_83 2008-04-09 11 +83 val_83 83 val_83 2008-04-09 12 +83 val_83 83 val_83 2008-04-08 11 +83 val_83 83 val_83 2008-04-08 12 +83 val_83 83 val_83 2008-04-09 11 +83 val_83 83 val_83 2008-04-08 11 +83 val_83 83 val_83 2008-04-08 12 +83 val_83 83 val_83 2008-04-09 12 +83 val_83 83 val_83 2008-04-09 11 +83 val_83 83 val_83 2008-04-09 12 +83 val_83 83 val_83 2008-04-08 11 +83 val_83 83 val_83 2008-04-08 12 +83 val_83 83 val_83 2008-04-09 11 +83 val_83 83 val_83 2008-04-08 11 +84 val_84 84 val_84 2008-04-09 12 +84 val_84 84 val_84 2008-04-08 11 +84 val_84 84 val_84 2008-04-08 11 +84 val_84 84 val_84 2008-04-09 12 +84 val_84 84 val_84 2008-04-08 12 +84 val_84 84 val_84 2008-04-09 11 +84 val_84 84 val_84 2008-04-09 11 +84 val_84 84 val_84 2008-04-08 12 +84 val_84 84 val_84 2008-04-09 12 +84 val_84 84 val_84 2008-04-08 11 +84 val_84 84 val_84 2008-04-08 11 +84 val_84 84 val_84 2008-04-09 12 +84 val_84 84 val_84 2008-04-08 12 +84 val_84 84 val_84 2008-04-09 11 +84 val_84 84 val_84 2008-04-09 11 +84 val_84 84 val_84 2008-04-08 12 +85 val_85 85 val_85 2008-04-09 12 +85 val_85 85 val_85 2008-04-09 11 +85 val_85 85 val_85 2008-04-08 11 +85 val_85 85 val_85 2008-04-08 12 +86 val_86 86 val_86 2008-04-08 12 +86 val_86 86 val_86 2008-04-09 11 +86 val_86 86 val_86 2008-04-08 11 +86 val_86 86 val_86 2008-04-09 12 +87 val_87 87 val_87 2008-04-09 12 +87 val_87 87 val_87 2008-04-08 11 +87 val_87 87 val_87 2008-04-08 12 +87 val_87 87 val_87 2008-04-09 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-09 11 +90 val_90 90 val_90 2008-04-09 12 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-08 12 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-08 11 +90 val_90 90 val_90 2008-04-09 11 +92 val_92 92 val_92 2008-04-09 11 +92 val_92 92 val_92 2008-04-09 12 +92 val_92 92 val_92 2008-04-08 12 +92 val_92 92 val_92 2008-04-08 11 +95 val_95 95 val_95 2008-04-08 11 +95 val_95 95 val_95 2008-04-09 12 +95 val_95 95 val_95 2008-04-08 12 +95 val_95 95 val_95 2008-04-09 11 +95 val_95 95 val_95 2008-04-08 12 +95 val_95 95 val_95 2008-04-09 11 +95 val_95 95 val_95 2008-04-09 12 +95 val_95 95 val_95 2008-04-08 11 +95 val_95 95 val_95 2008-04-08 11 +95 val_95 95 val_95 2008-04-09 12 +95 val_95 95 val_95 2008-04-08 12 +95 val_95 95 val_95 2008-04-09 11 +95 val_95 95 val_95 2008-04-08 12 +95 val_95 95 val_95 2008-04-09 11 +95 val_95 95 val_95 2008-04-09 12 +95 val_95 95 val_95 2008-04-08 11 +96 val_96 96 val_96 2008-04-09 12 +96 val_96 96 val_96 2008-04-08 11 +96 val_96 96 val_96 2008-04-08 12 +96 val_96 96 val_96 2008-04-09 11 +97 val_97 97 val_97 2008-04-09 12 +97 val_97 97 val_97 2008-04-09 12 +97 val_97 97 val_97 2008-04-09 11 +97 val_97 97 val_97 2008-04-09 11 +97 val_97 97 val_97 2008-04-08 12 +97 val_97 97 val_97 2008-04-08 12 +97 val_97 97 val_97 2008-04-08 11 +97 val_97 97 val_97 2008-04-08 11 +97 val_97 97 val_97 2008-04-09 12 +97 val_97 97 val_97 2008-04-09 12 +97 val_97 97 val_97 2008-04-09 11 +97 val_97 97 val_97 2008-04-09 11 +97 val_97 97 val_97 2008-04-08 12 +97 val_97 97 val_97 2008-04-08 12 +97 val_97 97 val_97 2008-04-08 11 +97 val_97 97 val_97 2008-04-08 11 +98 val_98 98 val_98 2008-04-08 11 +98 val_98 98 val_98 2008-04-08 11 +98 val_98 98 val_98 2008-04-08 12 +98 val_98 98 val_98 2008-04-09 12 +98 val_98 98 val_98 2008-04-09 11 +98 val_98 98 val_98 2008-04-09 11 +98 val_98 98 val_98 2008-04-08 12 +98 val_98 98 val_98 2008-04-09 12 +98 val_98 98 val_98 2008-04-08 11 +98 val_98 98 val_98 2008-04-08 11 +98 val_98 98 val_98 2008-04-08 12 +98 val_98 98 val_98 2008-04-09 12 +98 val_98 98 val_98 2008-04-09 11 +98 val_98 98 val_98 2008-04-09 11 +98 val_98 98 val_98 2008-04-08 12 +98 val_98 98 val_98 2008-04-09 12 +100 val_100 100 val_100 2008-04-09 11 +100 val_100 100 val_100 2008-04-08 11 +100 val_100 100 val_100 2008-04-09 12 +100 val_100 100 val_100 2008-04-08 12 +100 val_100 100 val_100 2008-04-08 12 +100 val_100 100 val_100 2008-04-09 12 +100 val_100 100 val_100 2008-04-08 11 +100 val_100 100 val_100 2008-04-09 11 +100 val_100 100 val_100 2008-04-09 11 +100 val_100 100 val_100 2008-04-08 11 +100 val_100 100 val_100 2008-04-09 12 +100 val_100 100 val_100 2008-04-08 12 +100 val_100 100 val_100 2008-04-08 12 +100 val_100 100 val_100 2008-04-09 12 +100 val_100 100 val_100 2008-04-08 11 +100 val_100 100 val_100 2008-04-09 11 +103 val_103 103 val_103 2008-04-08 11 +103 val_103 103 val_103 2008-04-09 12 +103 val_103 103 val_103 2008-04-09 11 +103 val_103 103 val_103 2008-04-08 11 +103 val_103 103 val_103 2008-04-09 11 +103 val_103 103 val_103 2008-04-08 12 +103 val_103 103 val_103 2008-04-08 12 +103 val_103 103 val_103 2008-04-09 12 +103 val_103 103 val_103 2008-04-08 11 +103 val_103 103 val_103 2008-04-09 12 +103 val_103 103 val_103 2008-04-09 11 +103 val_103 103 val_103 2008-04-08 11 +103 val_103 103 val_103 2008-04-09 11 +103 val_103 103 val_103 2008-04-08 12 +103 val_103 103 val_103 2008-04-08 12 +103 val_103 103 val_103 2008-04-09 12 +104 val_104 104 val_104 2008-04-08 11 +104 val_104 104 val_104 2008-04-09 12 +104 val_104 104 val_104 2008-04-09 11 +104 val_104 104 val_104 2008-04-08 12 +104 val_104 104 val_104 2008-04-08 11 +104 val_104 104 val_104 2008-04-09 12 +104 val_104 104 val_104 2008-04-08 12 +104 val_104 104 val_104 2008-04-09 11 +104 val_104 104 val_104 2008-04-08 11 +104 val_104 104 val_104 2008-04-09 12 +104 val_104 104 val_104 2008-04-09 11 +104 val_104 104 val_104 2008-04-08 12 +104 val_104 104 val_104 2008-04-08 11 +104 val_104 104 val_104 2008-04-09 12 +104 val_104 104 val_104 2008-04-08 12 +104 val_104 104 val_104 2008-04-09 11 +105 val_105 105 val_105 2008-04-09 12 +105 val_105 105 val_105 2008-04-09 11 +105 val_105 105 val_105 2008-04-08 11 +105 val_105 105 val_105 2008-04-08 12 +111 val_111 111 val_111 2008-04-08 12 +111 val_111 111 val_111 2008-04-08 11 +111 val_111 111 val_111 2008-04-09 12 +111 val_111 111 val_111 2008-04-09 11 +113 val_113 113 val_113 2008-04-09 12 +113 val_113 113 val_113 2008-04-08 11 +113 val_113 113 val_113 2008-04-08 12 +113 val_113 113 val_113 2008-04-08 12 +113 val_113 113 val_113 2008-04-09 11 +113 val_113 113 val_113 2008-04-09 11 +113 val_113 113 val_113 2008-04-08 11 +113 val_113 113 val_113 2008-04-09 12 +113 val_113 113 val_113 2008-04-09 12 +113 val_113 113 val_113 2008-04-08 11 +113 val_113 113 val_113 2008-04-08 12 +113 val_113 113 val_113 2008-04-08 12 +113 val_113 113 val_113 2008-04-09 11 +113 val_113 113 val_113 2008-04-09 11 +113 val_113 113 val_113 2008-04-08 11 +113 val_113 113 val_113 2008-04-09 12 +114 val_114 114 val_114 2008-04-09 11 +114 val_114 114 val_114 2008-04-09 12 +114 val_114 114 val_114 2008-04-08 12 +114 val_114 114 val_114 2008-04-08 11 +116 val_116 116 val_116 2008-04-08 12 +116 val_116 116 val_116 2008-04-09 11 +116 val_116 116 val_116 2008-04-08 11 +116 val_116 116 val_116 2008-04-09 12 +118 val_118 118 val_118 2008-04-09 12 +118 val_118 118 val_118 2008-04-08 11 +118 val_118 118 val_118 2008-04-09 11 +118 val_118 118 val_118 2008-04-09 12 +118 val_118 118 val_118 2008-04-08 11 +118 val_118 118 val_118 2008-04-08 12 +118 val_118 118 val_118 2008-04-08 12 +118 val_118 118 val_118 2008-04-09 11 +118 val_118 118 val_118 2008-04-09 12 +118 val_118 118 val_118 2008-04-08 11 +118 val_118 118 val_118 2008-04-09 11 +118 val_118 118 val_118 2008-04-09 12 +118 val_118 118 val_118 2008-04-08 11 +118 val_118 118 val_118 2008-04-08 12 +118 val_118 118 val_118 2008-04-08 12 +118 val_118 118 val_118 2008-04-09 11 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-09 11 +119 val_119 119 val_119 2008-04-09 12 +119 val_119 119 val_119 2008-04-08 11 +119 val_119 119 val_119 2008-04-09 12 +120 val_120 120 val_120 2008-04-09 11 +120 val_120 120 val_120 2008-04-08 11 +120 val_120 120 val_120 2008-04-09 12 +120 val_120 120 val_120 2008-04-08 12 +120 val_120 120 val_120 2008-04-09 12 +120 val_120 120 val_120 2008-04-08 12 +120 val_120 120 val_120 2008-04-08 11 +120 val_120 120 val_120 2008-04-09 11 +120 val_120 120 val_120 2008-04-09 11 +120 val_120 120 val_120 2008-04-08 11 +120 val_120 120 val_120 2008-04-09 12 +120 val_120 120 val_120 2008-04-08 12 +120 val_120 120 val_120 2008-04-09 12 +120 val_120 120 val_120 2008-04-08 12 +120 val_120 120 val_120 2008-04-08 11 +120 val_120 120 val_120 2008-04-09 11 +125 val_125 125 val_125 2008-04-09 12 +125 val_125 125 val_125 2008-04-08 11 +125 val_125 125 val_125 2008-04-09 11 +125 val_125 125 val_125 2008-04-08 11 +125 val_125 125 val_125 2008-04-09 12 +125 val_125 125 val_125 2008-04-09 11 +125 val_125 125 val_125 2008-04-08 12 +125 val_125 125 val_125 2008-04-08 12 +125 val_125 125 val_125 2008-04-09 12 +125 val_125 125 val_125 2008-04-08 11 +125 val_125 125 val_125 2008-04-09 11 +125 val_125 125 val_125 2008-04-08 11 +125 val_125 125 val_125 2008-04-09 12 +125 val_125 125 val_125 2008-04-09 11 +125 val_125 125 val_125 2008-04-08 12 +125 val_125 125 val_125 2008-04-08 12 +126 val_126 126 val_126 2008-04-08 11 +126 val_126 126 val_126 2008-04-08 12 +126 val_126 126 val_126 2008-04-09 12 +126 val_126 126 val_126 2008-04-09 11 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-08 11 +128 val_128 128 val_128 2008-04-09 11 +128 val_128 128 val_128 2008-04-09 12 +128 val_128 128 val_128 2008-04-08 12 +128 val_128 128 val_128 2008-04-08 12 +129 val_129 129 val_129 2008-04-09 12 +129 val_129 129 val_129 2008-04-08 12 +129 val_129 129 val_129 2008-04-09 12 +129 val_129 129 val_129 2008-04-08 12 +129 val_129 129 val_129 2008-04-09 11 +129 val_129 129 val_129 2008-04-08 11 +129 val_129 129 val_129 2008-04-09 11 +129 val_129 129 val_129 2008-04-08 11 +129 val_129 129 val_129 2008-04-09 12 +129 val_129 129 val_129 2008-04-08 12 +129 val_129 129 val_129 2008-04-09 12 +129 val_129 129 val_129 2008-04-08 12 +129 val_129 129 val_129 2008-04-09 11 +129 val_129 129 val_129 2008-04-08 11 +129 val_129 129 val_129 2008-04-09 11 +129 val_129 129 val_129 2008-04-08 11 +131 val_131 131 val_131 2008-04-08 12 +131 val_131 131 val_131 2008-04-09 12 +131 val_131 131 val_131 2008-04-09 11 +131 val_131 131 val_131 2008-04-08 11 +133 val_133 133 val_133 2008-04-08 11 +133 val_133 133 val_133 2008-04-08 12 +133 val_133 133 val_133 2008-04-09 11 +133 val_133 133 val_133 2008-04-09 12 +134 val_134 134 val_134 2008-04-08 12 +134 val_134 134 val_134 2008-04-09 11 +134 val_134 134 val_134 2008-04-08 11 +134 val_134 134 val_134 2008-04-09 11 +134 val_134 134 val_134 2008-04-09 12 +134 val_134 134 val_134 2008-04-08 12 +134 val_134 134 val_134 2008-04-09 12 +134 val_134 134 val_134 2008-04-08 11 +134 val_134 134 val_134 2008-04-08 12 +134 val_134 134 val_134 2008-04-09 11 +134 val_134 134 val_134 2008-04-08 11 +134 val_134 134 val_134 2008-04-09 11 +134 val_134 134 val_134 2008-04-09 12 +134 val_134 134 val_134 2008-04-08 12 +134 val_134 134 val_134 2008-04-09 12 +134 val_134 134 val_134 2008-04-08 11 +136 val_136 136 val_136 2008-04-09 12 +136 val_136 136 val_136 2008-04-08 12 +136 val_136 136 val_136 2008-04-08 11 +136 val_136 136 val_136 2008-04-09 11 +137 val_137 137 val_137 2008-04-08 11 +137 val_137 137 val_137 2008-04-09 12 +137 val_137 137 val_137 2008-04-09 11 +137 val_137 137 val_137 2008-04-09 11 +137 val_137 137 val_137 2008-04-09 12 +137 val_137 137 val_137 2008-04-08 12 +137 val_137 137 val_137 2008-04-08 11 +137 val_137 137 val_137 2008-04-08 12 +137 val_137 137 val_137 2008-04-08 11 +137 val_137 137 val_137 2008-04-09 12 +137 val_137 137 val_137 2008-04-09 11 +137 val_137 137 val_137 2008-04-09 11 +137 val_137 137 val_137 2008-04-09 12 +137 val_137 137 val_137 2008-04-08 12 +137 val_137 137 val_137 2008-04-08 11 +137 val_137 137 val_137 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-09 12 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 12 +138 val_138 138 val_138 2008-04-09 11 +138 val_138 138 val_138 2008-04-08 11 +138 val_138 138 val_138 2008-04-08 12 +143 val_143 143 val_143 2008-04-09 12 +143 val_143 143 val_143 2008-04-08 11 +143 val_143 143 val_143 2008-04-09 11 +143 val_143 143 val_143 2008-04-08 12 +145 val_145 145 val_145 2008-04-09 11 +145 val_145 145 val_145 2008-04-08 11 +145 val_145 145 val_145 2008-04-08 12 +145 val_145 145 val_145 2008-04-09 12 +146 val_146 146 val_146 2008-04-08 11 +146 val_146 146 val_146 2008-04-09 11 +146 val_146 146 val_146 2008-04-08 12 +146 val_146 146 val_146 2008-04-08 11 +146 val_146 146 val_146 2008-04-09 12 +146 val_146 146 val_146 2008-04-09 12 +146 val_146 146 val_146 2008-04-09 11 +146 val_146 146 val_146 2008-04-08 12 +146 val_146 146 val_146 2008-04-08 11 +146 val_146 146 val_146 2008-04-09 11 +146 val_146 146 val_146 2008-04-08 12 +146 val_146 146 val_146 2008-04-08 11 +146 val_146 146 val_146 2008-04-09 12 +146 val_146 146 val_146 2008-04-09 12 +146 val_146 146 val_146 2008-04-09 11 +146 val_146 146 val_146 2008-04-08 12 +149 val_149 149 val_149 2008-04-08 12 +149 val_149 149 val_149 2008-04-09 12 +149 val_149 149 val_149 2008-04-09 12 +149 val_149 149 val_149 2008-04-09 11 +149 val_149 149 val_149 2008-04-09 11 +149 val_149 149 val_149 2008-04-08 11 +149 val_149 149 val_149 2008-04-08 11 +149 val_149 149 val_149 2008-04-08 12 +149 val_149 149 val_149 2008-04-08 12 +149 val_149 149 val_149 2008-04-09 12 +149 val_149 149 val_149 2008-04-09 12 +149 val_149 149 val_149 2008-04-09 11 +149 val_149 149 val_149 2008-04-09 11 +149 val_149 149 val_149 2008-04-08 11 +149 val_149 149 val_149 2008-04-08 11 +149 val_149 149 val_149 2008-04-08 12 +150 val_150 150 val_150 2008-04-09 11 +150 val_150 150 val_150 2008-04-09 12 +150 val_150 150 val_150 2008-04-08 11 +150 val_150 150 val_150 2008-04-08 12 +152 val_152 152 val_152 2008-04-08 11 +152 val_152 152 val_152 2008-04-08 12 +152 val_152 152 val_152 2008-04-09 11 +152 val_152 152 val_152 2008-04-08 11 +152 val_152 152 val_152 2008-04-09 12 +152 val_152 152 val_152 2008-04-09 11 +152 val_152 152 val_152 2008-04-09 12 +152 val_152 152 val_152 2008-04-08 12 +152 val_152 152 val_152 2008-04-08 11 +152 val_152 152 val_152 2008-04-08 12 +152 val_152 152 val_152 2008-04-09 11 +152 val_152 152 val_152 2008-04-08 11 +152 val_152 152 val_152 2008-04-09 12 +152 val_152 152 val_152 2008-04-09 11 +152 val_152 152 val_152 2008-04-09 12 +152 val_152 152 val_152 2008-04-08 12 +153 val_153 153 val_153 2008-04-09 11 +153 val_153 153 val_153 2008-04-08 11 +153 val_153 153 val_153 2008-04-08 12 +153 val_153 153 val_153 2008-04-09 12 +155 val_155 155 val_155 2008-04-08 12 +155 val_155 155 val_155 2008-04-08 11 +155 val_155 155 val_155 2008-04-09 12 +155 val_155 155 val_155 2008-04-09 11 +156 val_156 156 val_156 2008-04-08 12 +156 val_156 156 val_156 2008-04-09 12 +156 val_156 156 val_156 2008-04-09 11 +156 val_156 156 val_156 2008-04-08 11 +157 val_157 157 val_157 2008-04-09 11 +157 val_157 157 val_157 2008-04-08 12 +157 val_157 157 val_157 2008-04-08 11 +157 val_157 157 val_157 2008-04-09 12 +158 val_158 158 val_158 2008-04-08 11 +158 val_158 158 val_158 2008-04-09 11 +158 val_158 158 val_158 2008-04-09 12 +158 val_158 158 val_158 2008-04-08 12 +160 val_160 160 val_160 2008-04-09 11 +160 val_160 160 val_160 2008-04-09 12 +160 val_160 160 val_160 2008-04-08 11 +160 val_160 160 val_160 2008-04-08 12 +162 val_162 162 val_162 2008-04-09 11 +162 val_162 162 val_162 2008-04-08 12 +162 val_162 162 val_162 2008-04-08 11 +162 val_162 162 val_162 2008-04-09 12 +163 val_163 163 val_163 2008-04-09 11 +163 val_163 163 val_163 2008-04-09 12 +163 val_163 163 val_163 2008-04-08 11 +163 val_163 163 val_163 2008-04-08 12 +164 val_164 164 val_164 2008-04-08 11 +164 val_164 164 val_164 2008-04-09 11 +164 val_164 164 val_164 2008-04-08 11 +164 val_164 164 val_164 2008-04-09 12 +164 val_164 164 val_164 2008-04-09 11 +164 val_164 164 val_164 2008-04-09 12 +164 val_164 164 val_164 2008-04-08 12 +164 val_164 164 val_164 2008-04-08 12 +164 val_164 164 val_164 2008-04-08 11 +164 val_164 164 val_164 2008-04-09 11 +164 val_164 164 val_164 2008-04-08 11 +164 val_164 164 val_164 2008-04-09 12 +164 val_164 164 val_164 2008-04-09 11 +164 val_164 164 val_164 2008-04-09 12 +164 val_164 164 val_164 2008-04-08 12 +164 val_164 164 val_164 2008-04-08 12 +165 val_165 165 val_165 2008-04-08 11 +165 val_165 165 val_165 2008-04-09 11 +165 val_165 165 val_165 2008-04-09 12 +165 val_165 165 val_165 2008-04-08 12 +165 val_165 165 val_165 2008-04-09 12 +165 val_165 165 val_165 2008-04-08 12 +165 val_165 165 val_165 2008-04-08 11 +165 val_165 165 val_165 2008-04-09 11 +165 val_165 165 val_165 2008-04-08 11 +165 val_165 165 val_165 2008-04-09 11 +165 val_165 165 val_165 2008-04-09 12 +165 val_165 165 val_165 2008-04-08 12 +165 val_165 165 val_165 2008-04-09 12 +165 val_165 165 val_165 2008-04-08 12 +165 val_165 165 val_165 2008-04-08 11 +165 val_165 165 val_165 2008-04-09 11 +166 val_166 166 val_166 2008-04-08 12 +166 val_166 166 val_166 2008-04-08 11 +166 val_166 166 val_166 2008-04-09 12 +166 val_166 166 val_166 2008-04-09 11 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-08 12 +167 val_167 167 val_167 2008-04-09 11 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-08 11 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-09 12 +167 val_167 167 val_167 2008-04-08 12 +168 val_168 168 val_168 2008-04-08 11 +168 val_168 168 val_168 2008-04-08 12 +168 val_168 168 val_168 2008-04-09 11 +168 val_168 168 val_168 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-08 12 +169 val_169 169 val_169 2008-04-09 11 +169 val_169 169 val_169 2008-04-09 12 +169 val_169 169 val_169 2008-04-08 11 +169 val_169 169 val_169 2008-04-08 11 +170 val_170 170 val_170 2008-04-09 11 +170 val_170 170 val_170 2008-04-09 12 +170 val_170 170 val_170 2008-04-08 11 +170 val_170 170 val_170 2008-04-08 12 +172 val_172 172 val_172 2008-04-08 11 +172 val_172 172 val_172 2008-04-09 11 +172 val_172 172 val_172 2008-04-08 12 +172 val_172 172 val_172 2008-04-09 12 +172 val_172 172 val_172 2008-04-08 11 +172 val_172 172 val_172 2008-04-08 12 +172 val_172 172 val_172 2008-04-09 12 +172 val_172 172 val_172 2008-04-09 11 +172 val_172 172 val_172 2008-04-08 11 +172 val_172 172 val_172 2008-04-09 11 +172 val_172 172 val_172 2008-04-08 12 +172 val_172 172 val_172 2008-04-09 12 +172 val_172 172 val_172 2008-04-08 11 +172 val_172 172 val_172 2008-04-08 12 +172 val_172 172 val_172 2008-04-09 12 +172 val_172 172 val_172 2008-04-09 11 +174 val_174 174 val_174 2008-04-08 12 +174 val_174 174 val_174 2008-04-09 12 +174 val_174 174 val_174 2008-04-09 12 +174 val_174 174 val_174 2008-04-08 11 +174 val_174 174 val_174 2008-04-08 11 +174 val_174 174 val_174 2008-04-08 12 +174 val_174 174 val_174 2008-04-09 11 +174 val_174 174 val_174 2008-04-09 11 +174 val_174 174 val_174 2008-04-08 12 +174 val_174 174 val_174 2008-04-09 12 +174 val_174 174 val_174 2008-04-09 12 +174 val_174 174 val_174 2008-04-08 11 +174 val_174 174 val_174 2008-04-08 11 +174 val_174 174 val_174 2008-04-08 12 +174 val_174 174 val_174 2008-04-09 11 +174 val_174 174 val_174 2008-04-09 11 +175 val_175 175 val_175 2008-04-09 12 +175 val_175 175 val_175 2008-04-08 11 +175 val_175 175 val_175 2008-04-09 12 +175 val_175 175 val_175 2008-04-08 11 +175 val_175 175 val_175 2008-04-09 11 +175 val_175 175 val_175 2008-04-09 11 +175 val_175 175 val_175 2008-04-08 12 +175 val_175 175 val_175 2008-04-08 12 +175 val_175 175 val_175 2008-04-09 12 +175 val_175 175 val_175 2008-04-08 11 +175 val_175 175 val_175 2008-04-09 12 +175 val_175 175 val_175 2008-04-08 11 +175 val_175 175 val_175 2008-04-09 11 +175 val_175 175 val_175 2008-04-09 11 +175 val_175 175 val_175 2008-04-08 12 +175 val_175 175 val_175 2008-04-08 12 +176 val_176 176 val_176 2008-04-08 11 +176 val_176 176 val_176 2008-04-09 12 +176 val_176 176 val_176 2008-04-08 12 +176 val_176 176 val_176 2008-04-09 12 +176 val_176 176 val_176 2008-04-08 11 +176 val_176 176 val_176 2008-04-09 11 +176 val_176 176 val_176 2008-04-09 11 +176 val_176 176 val_176 2008-04-08 12 +176 val_176 176 val_176 2008-04-08 11 +176 val_176 176 val_176 2008-04-09 12 +176 val_176 176 val_176 2008-04-08 12 +176 val_176 176 val_176 2008-04-09 12 +176 val_176 176 val_176 2008-04-08 11 +176 val_176 176 val_176 2008-04-09 11 +176 val_176 176 val_176 2008-04-09 11 +176 val_176 176 val_176 2008-04-08 12 +177 val_177 177 val_177 2008-04-08 12 +177 val_177 177 val_177 2008-04-08 11 +177 val_177 177 val_177 2008-04-09 11 +177 val_177 177 val_177 2008-04-09 12 +178 val_178 178 val_178 2008-04-08 12 +178 val_178 178 val_178 2008-04-09 11 +178 val_178 178 val_178 2008-04-08 11 +178 val_178 178 val_178 2008-04-09 12 +179 val_179 179 val_179 2008-04-08 12 +179 val_179 179 val_179 2008-04-09 12 +179 val_179 179 val_179 2008-04-08 12 +179 val_179 179 val_179 2008-04-09 11 +179 val_179 179 val_179 2008-04-09 12 +179 val_179 179 val_179 2008-04-09 11 +179 val_179 179 val_179 2008-04-08 11 +179 val_179 179 val_179 2008-04-08 11 +179 val_179 179 val_179 2008-04-08 12 +179 val_179 179 val_179 2008-04-09 12 +179 val_179 179 val_179 2008-04-08 12 +179 val_179 179 val_179 2008-04-09 11 +179 val_179 179 val_179 2008-04-09 12 +179 val_179 179 val_179 2008-04-09 11 +179 val_179 179 val_179 2008-04-08 11 +179 val_179 179 val_179 2008-04-08 11 +180 val_180 180 val_180 2008-04-09 11 +180 val_180 180 val_180 2008-04-09 12 +180 val_180 180 val_180 2008-04-08 11 +180 val_180 180 val_180 2008-04-08 12 +181 val_181 181 val_181 2008-04-09 11 +181 val_181 181 val_181 2008-04-09 12 +181 val_181 181 val_181 2008-04-08 11 +181 val_181 181 val_181 2008-04-08 12 +183 val_183 183 val_183 2008-04-08 12 +183 val_183 183 val_183 2008-04-08 11 +183 val_183 183 val_183 2008-04-09 12 +183 val_183 183 val_183 2008-04-09 11 +186 val_186 186 val_186 2008-04-08 11 +186 val_186 186 val_186 2008-04-08 12 +186 val_186 186 val_186 2008-04-09 11 +186 val_186 186 val_186 2008-04-09 12 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-08 12 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-09 12 +187 val_187 187 val_187 2008-04-08 11 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-09 11 +187 val_187 187 val_187 2008-04-08 12 +189 val_189 189 val_189 2008-04-09 11 +189 val_189 189 val_189 2008-04-09 12 +189 val_189 189 val_189 2008-04-08 11 +189 val_189 189 val_189 2008-04-08 12 +190 val_190 190 val_190 2008-04-09 12 +190 val_190 190 val_190 2008-04-08 12 +190 val_190 190 val_190 2008-04-09 11 +190 val_190 190 val_190 2008-04-08 11 +191 val_191 191 val_191 2008-04-09 11 +191 val_191 191 val_191 2008-04-09 12 +191 val_191 191 val_191 2008-04-08 11 +191 val_191 191 val_191 2008-04-09 11 +191 val_191 191 val_191 2008-04-08 12 +191 val_191 191 val_191 2008-04-08 11 +191 val_191 191 val_191 2008-04-09 12 +191 val_191 191 val_191 2008-04-08 12 +191 val_191 191 val_191 2008-04-09 11 +191 val_191 191 val_191 2008-04-09 12 +191 val_191 191 val_191 2008-04-08 11 +191 val_191 191 val_191 2008-04-09 11 +191 val_191 191 val_191 2008-04-08 12 +191 val_191 191 val_191 2008-04-08 11 +191 val_191 191 val_191 2008-04-09 12 +191 val_191 191 val_191 2008-04-08 12 +192 val_192 192 val_192 2008-04-09 12 +192 val_192 192 val_192 2008-04-08 12 +192 val_192 192 val_192 2008-04-08 11 +192 val_192 192 val_192 2008-04-09 11 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 11 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 11 +193 val_193 193 val_193 2008-04-09 12 +193 val_193 193 val_193 2008-04-08 12 +193 val_193 193 val_193 2008-04-08 11 +194 val_194 194 val_194 2008-04-08 12 +194 val_194 194 val_194 2008-04-08 11 +194 val_194 194 val_194 2008-04-09 12 +194 val_194 194 val_194 2008-04-09 11 +195 val_195 195 val_195 2008-04-08 11 +195 val_195 195 val_195 2008-04-09 11 +195 val_195 195 val_195 2008-04-09 12 +195 val_195 195 val_195 2008-04-08 12 +195 val_195 195 val_195 2008-04-09 12 +195 val_195 195 val_195 2008-04-08 11 +195 val_195 195 val_195 2008-04-08 12 +195 val_195 195 val_195 2008-04-09 11 +195 val_195 195 val_195 2008-04-08 11 +195 val_195 195 val_195 2008-04-09 11 +195 val_195 195 val_195 2008-04-09 12 +195 val_195 195 val_195 2008-04-08 12 +195 val_195 195 val_195 2008-04-09 12 +195 val_195 195 val_195 2008-04-08 11 +195 val_195 195 val_195 2008-04-08 12 +195 val_195 195 val_195 2008-04-09 11 +196 val_196 196 val_196 2008-04-08 12 +196 val_196 196 val_196 2008-04-09 11 +196 val_196 196 val_196 2008-04-08 11 +196 val_196 196 val_196 2008-04-09 12 +197 val_197 197 val_197 2008-04-08 12 +197 val_197 197 val_197 2008-04-08 11 +197 val_197 197 val_197 2008-04-09 11 +197 val_197 197 val_197 2008-04-09 11 +197 val_197 197 val_197 2008-04-08 11 +197 val_197 197 val_197 2008-04-08 12 +197 val_197 197 val_197 2008-04-09 12 +197 val_197 197 val_197 2008-04-09 12 +197 val_197 197 val_197 2008-04-08 12 +197 val_197 197 val_197 2008-04-08 11 +197 val_197 197 val_197 2008-04-09 11 +197 val_197 197 val_197 2008-04-09 11 +197 val_197 197 val_197 2008-04-08 11 +197 val_197 197 val_197 2008-04-08 12 +197 val_197 197 val_197 2008-04-09 12 +197 val_197 197 val_197 2008-04-09 12 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 12 +199 val_199 199 val_199 2008-04-08 11 +199 val_199 199 val_199 2008-04-09 12 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-09 11 +199 val_199 199 val_199 2008-04-08 11 +200 val_200 200 val_200 2008-04-09 12 +200 val_200 200 val_200 2008-04-08 11 +200 val_200 200 val_200 2008-04-09 11 +200 val_200 200 val_200 2008-04-08 12 +200 val_200 200 val_200 2008-04-08 12 +200 val_200 200 val_200 2008-04-09 12 +200 val_200 200 val_200 2008-04-08 11 +200 val_200 200 val_200 2008-04-09 11 +200 val_200 200 val_200 2008-04-09 12 +200 val_200 200 val_200 2008-04-08 11 +200 val_200 200 val_200 2008-04-09 11 +200 val_200 200 val_200 2008-04-08 12 +200 val_200 200 val_200 2008-04-08 12 +200 val_200 200 val_200 2008-04-09 12 +200 val_200 200 val_200 2008-04-08 11 +200 val_200 200 val_200 2008-04-09 11 +201 val_201 201 val_201 2008-04-09 11 +201 val_201 201 val_201 2008-04-08 11 +201 val_201 201 val_201 2008-04-09 12 +201 val_201 201 val_201 2008-04-08 12 +202 val_202 202 val_202 2008-04-09 12 +202 val_202 202 val_202 2008-04-08 11 +202 val_202 202 val_202 2008-04-09 11 +202 val_202 202 val_202 2008-04-08 12 +203 val_203 203 val_203 2008-04-09 11 +203 val_203 203 val_203 2008-04-08 11 +203 val_203 203 val_203 2008-04-08 12 +203 val_203 203 val_203 2008-04-09 11 +203 val_203 203 val_203 2008-04-08 11 +203 val_203 203 val_203 2008-04-09 12 +203 val_203 203 val_203 2008-04-09 12 +203 val_203 203 val_203 2008-04-08 12 +203 val_203 203 val_203 2008-04-09 11 +203 val_203 203 val_203 2008-04-08 11 +203 val_203 203 val_203 2008-04-08 12 +203 val_203 203 val_203 2008-04-09 11 +203 val_203 203 val_203 2008-04-08 11 +203 val_203 203 val_203 2008-04-09 12 +203 val_203 203 val_203 2008-04-09 12 +203 val_203 203 val_203 2008-04-08 12 +205 val_205 205 val_205 2008-04-08 11 +205 val_205 205 val_205 2008-04-08 12 +205 val_205 205 val_205 2008-04-09 11 +205 val_205 205 val_205 2008-04-08 12 +205 val_205 205 val_205 2008-04-09 12 +205 val_205 205 val_205 2008-04-09 11 +205 val_205 205 val_205 2008-04-08 11 +205 val_205 205 val_205 2008-04-09 12 +205 val_205 205 val_205 2008-04-08 11 +205 val_205 205 val_205 2008-04-08 12 +205 val_205 205 val_205 2008-04-09 11 +205 val_205 205 val_205 2008-04-08 12 +205 val_205 205 val_205 2008-04-09 12 +205 val_205 205 val_205 2008-04-09 11 +205 val_205 205 val_205 2008-04-08 11 +205 val_205 205 val_205 2008-04-09 12 +207 val_207 207 val_207 2008-04-09 12 +207 val_207 207 val_207 2008-04-09 11 +207 val_207 207 val_207 2008-04-08 12 +207 val_207 207 val_207 2008-04-09 12 +207 val_207 207 val_207 2008-04-09 11 +207 val_207 207 val_207 2008-04-08 11 +207 val_207 207 val_207 2008-04-08 12 +207 val_207 207 val_207 2008-04-08 11 +207 val_207 207 val_207 2008-04-09 12 +207 val_207 207 val_207 2008-04-09 11 +207 val_207 207 val_207 2008-04-08 12 +207 val_207 207 val_207 2008-04-09 12 +207 val_207 207 val_207 2008-04-09 11 +207 val_207 207 val_207 2008-04-08 11 +207 val_207 207 val_207 2008-04-08 12 +207 val_207 207 val_207 2008-04-08 11 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 12 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 12 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 11 +208 val_208 208 val_208 2008-04-08 11 +208 val_208 208 val_208 2008-04-09 12 +209 val_209 209 val_209 2008-04-08 12 +209 val_209 209 val_209 2008-04-09 11 +209 val_209 209 val_209 2008-04-09 12 +209 val_209 209 val_209 2008-04-09 11 +209 val_209 209 val_209 2008-04-08 11 +209 val_209 209 val_209 2008-04-08 11 +209 val_209 209 val_209 2008-04-09 12 +209 val_209 209 val_209 2008-04-08 12 +209 val_209 209 val_209 2008-04-08 12 +209 val_209 209 val_209 2008-04-09 11 +209 val_209 209 val_209 2008-04-09 12 +209 val_209 209 val_209 2008-04-09 11 +209 val_209 209 val_209 2008-04-08 11 +209 val_209 209 val_209 2008-04-08 11 +209 val_209 209 val_209 2008-04-09 12 +209 val_209 209 val_209 2008-04-08 12 +213 val_213 213 val_213 2008-04-08 11 +213 val_213 213 val_213 2008-04-09 12 +213 val_213 213 val_213 2008-04-09 11 +213 val_213 213 val_213 2008-04-09 11 +213 val_213 213 val_213 2008-04-08 11 +213 val_213 213 val_213 2008-04-09 12 +213 val_213 213 val_213 2008-04-08 12 +213 val_213 213 val_213 2008-04-08 12 +213 val_213 213 val_213 2008-04-08 11 +213 val_213 213 val_213 2008-04-09 12 +213 val_213 213 val_213 2008-04-09 11 +213 val_213 213 val_213 2008-04-09 11 +213 val_213 213 val_213 2008-04-08 11 +213 val_213 213 val_213 2008-04-09 12 +213 val_213 213 val_213 2008-04-08 12 +213 val_213 213 val_213 2008-04-08 12 +214 val_214 214 val_214 2008-04-08 12 +214 val_214 214 val_214 2008-04-08 11 +214 val_214 214 val_214 2008-04-09 11 +214 val_214 214 val_214 2008-04-09 12 +216 val_216 216 val_216 2008-04-09 11 +216 val_216 216 val_216 2008-04-08 12 +216 val_216 216 val_216 2008-04-08 11 +216 val_216 216 val_216 2008-04-09 12 +216 val_216 216 val_216 2008-04-08 12 +216 val_216 216 val_216 2008-04-09 11 +216 val_216 216 val_216 2008-04-09 12 +216 val_216 216 val_216 2008-04-08 11 +216 val_216 216 val_216 2008-04-09 11 +216 val_216 216 val_216 2008-04-08 12 +216 val_216 216 val_216 2008-04-08 11 +216 val_216 216 val_216 2008-04-09 12 +216 val_216 216 val_216 2008-04-08 12 +216 val_216 216 val_216 2008-04-09 11 +216 val_216 216 val_216 2008-04-09 12 +216 val_216 216 val_216 2008-04-08 11 +217 val_217 217 val_217 2008-04-08 12 +217 val_217 217 val_217 2008-04-08 11 +217 val_217 217 val_217 2008-04-09 11 +217 val_217 217 val_217 2008-04-08 12 +217 val_217 217 val_217 2008-04-08 11 +217 val_217 217 val_217 2008-04-09 11 +217 val_217 217 val_217 2008-04-09 12 +217 val_217 217 val_217 2008-04-09 12 +217 val_217 217 val_217 2008-04-08 12 +217 val_217 217 val_217 2008-04-08 11 +217 val_217 217 val_217 2008-04-09 11 +217 val_217 217 val_217 2008-04-08 12 +217 val_217 217 val_217 2008-04-08 11 +217 val_217 217 val_217 2008-04-09 11 +217 val_217 217 val_217 2008-04-09 12 +217 val_217 217 val_217 2008-04-09 12 +218 val_218 218 val_218 2008-04-08 12 +218 val_218 218 val_218 2008-04-08 11 +218 val_218 218 val_218 2008-04-09 12 +218 val_218 218 val_218 2008-04-09 11 +219 val_219 219 val_219 2008-04-08 11 +219 val_219 219 val_219 2008-04-09 12 +219 val_219 219 val_219 2008-04-09 11 +219 val_219 219 val_219 2008-04-08 12 +219 val_219 219 val_219 2008-04-08 11 +219 val_219 219 val_219 2008-04-09 11 +219 val_219 219 val_219 2008-04-09 12 +219 val_219 219 val_219 2008-04-08 12 +219 val_219 219 val_219 2008-04-08 11 +219 val_219 219 val_219 2008-04-09 12 +219 val_219 219 val_219 2008-04-09 11 +219 val_219 219 val_219 2008-04-08 12 +219 val_219 219 val_219 2008-04-08 11 +219 val_219 219 val_219 2008-04-09 11 +219 val_219 219 val_219 2008-04-09 12 +219 val_219 219 val_219 2008-04-08 12 +221 val_221 221 val_221 2008-04-09 12 +221 val_221 221 val_221 2008-04-08 12 +221 val_221 221 val_221 2008-04-08 11 +221 val_221 221 val_221 2008-04-09 12 +221 val_221 221 val_221 2008-04-09 11 +221 val_221 221 val_221 2008-04-08 12 +221 val_221 221 val_221 2008-04-09 11 +221 val_221 221 val_221 2008-04-08 11 +221 val_221 221 val_221 2008-04-09 12 +221 val_221 221 val_221 2008-04-08 12 +221 val_221 221 val_221 2008-04-08 11 +221 val_221 221 val_221 2008-04-09 12 +221 val_221 221 val_221 2008-04-09 11 +221 val_221 221 val_221 2008-04-08 12 +221 val_221 221 val_221 2008-04-09 11 +221 val_221 221 val_221 2008-04-08 11 +222 val_222 222 val_222 2008-04-08 12 +222 val_222 222 val_222 2008-04-09 12 +222 val_222 222 val_222 2008-04-09 11 +222 val_222 222 val_222 2008-04-08 11 +223 val_223 223 val_223 2008-04-09 12 +223 val_223 223 val_223 2008-04-09 11 +223 val_223 223 val_223 2008-04-08 12 +223 val_223 223 val_223 2008-04-09 12 +223 val_223 223 val_223 2008-04-08 12 +223 val_223 223 val_223 2008-04-08 11 +223 val_223 223 val_223 2008-04-09 11 +223 val_223 223 val_223 2008-04-08 11 +223 val_223 223 val_223 2008-04-09 12 +223 val_223 223 val_223 2008-04-09 11 +223 val_223 223 val_223 2008-04-08 12 +223 val_223 223 val_223 2008-04-09 12 +223 val_223 223 val_223 2008-04-08 12 +223 val_223 223 val_223 2008-04-08 11 +223 val_223 223 val_223 2008-04-09 11 +223 val_223 223 val_223 2008-04-08 11 +224 val_224 224 val_224 2008-04-09 11 +224 val_224 224 val_224 2008-04-08 11 +224 val_224 224 val_224 2008-04-08 12 +224 val_224 224 val_224 2008-04-09 12 +224 val_224 224 val_224 2008-04-09 12 +224 val_224 224 val_224 2008-04-09 11 +224 val_224 224 val_224 2008-04-08 12 +224 val_224 224 val_224 2008-04-08 11 +224 val_224 224 val_224 2008-04-09 11 +224 val_224 224 val_224 2008-04-08 11 +224 val_224 224 val_224 2008-04-08 12 +224 val_224 224 val_224 2008-04-09 12 +224 val_224 224 val_224 2008-04-09 12 +224 val_224 224 val_224 2008-04-09 11 +224 val_224 224 val_224 2008-04-08 12 +224 val_224 224 val_224 2008-04-08 11 +226 val_226 226 val_226 2008-04-09 12 +226 val_226 226 val_226 2008-04-08 11 +226 val_226 226 val_226 2008-04-09 11 +226 val_226 226 val_226 2008-04-08 12 +228 val_228 228 val_228 2008-04-09 11 +228 val_228 228 val_228 2008-04-08 11 +228 val_228 228 val_228 2008-04-09 12 +228 val_228 228 val_228 2008-04-08 12 +229 val_229 229 val_229 2008-04-08 12 +229 val_229 229 val_229 2008-04-09 12 +229 val_229 229 val_229 2008-04-08 11 +229 val_229 229 val_229 2008-04-09 11 +229 val_229 229 val_229 2008-04-08 12 +229 val_229 229 val_229 2008-04-09 11 +229 val_229 229 val_229 2008-04-09 12 +229 val_229 229 val_229 2008-04-08 11 +229 val_229 229 val_229 2008-04-08 12 +229 val_229 229 val_229 2008-04-09 12 +229 val_229 229 val_229 2008-04-08 11 +229 val_229 229 val_229 2008-04-09 11 +229 val_229 229 val_229 2008-04-08 12 +229 val_229 229 val_229 2008-04-09 11 +229 val_229 229 val_229 2008-04-09 12 +229 val_229 229 val_229 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-09 12 +230 val_230 230 val_230 2008-04-08 11 +230 val_230 230 val_230 2008-04-09 11 +230 val_230 230 val_230 2008-04-08 12 +230 val_230 230 val_230 2008-04-08 11 +233 val_233 233 val_233 2008-04-09 11 +233 val_233 233 val_233 2008-04-09 12 +233 val_233 233 val_233 2008-04-09 12 +233 val_233 233 val_233 2008-04-09 11 +233 val_233 233 val_233 2008-04-08 11 +233 val_233 233 val_233 2008-04-08 11 +233 val_233 233 val_233 2008-04-08 12 +233 val_233 233 val_233 2008-04-08 12 +233 val_233 233 val_233 2008-04-09 11 +233 val_233 233 val_233 2008-04-09 12 +233 val_233 233 val_233 2008-04-09 12 +233 val_233 233 val_233 2008-04-09 11 +233 val_233 233 val_233 2008-04-08 11 +233 val_233 233 val_233 2008-04-08 11 +233 val_233 233 val_233 2008-04-08 12 +233 val_233 233 val_233 2008-04-08 12 +235 val_235 235 val_235 2008-04-09 11 +235 val_235 235 val_235 2008-04-09 12 +235 val_235 235 val_235 2008-04-08 12 +235 val_235 235 val_235 2008-04-08 11 +237 val_237 237 val_237 2008-04-08 12 +237 val_237 237 val_237 2008-04-09 12 +237 val_237 237 val_237 2008-04-09 12 +237 val_237 237 val_237 2008-04-09 11 +237 val_237 237 val_237 2008-04-08 12 +237 val_237 237 val_237 2008-04-08 11 +237 val_237 237 val_237 2008-04-08 11 +237 val_237 237 val_237 2008-04-09 11 +237 val_237 237 val_237 2008-04-08 12 +237 val_237 237 val_237 2008-04-09 12 +237 val_237 237 val_237 2008-04-09 12 +237 val_237 237 val_237 2008-04-09 11 +237 val_237 237 val_237 2008-04-08 12 +237 val_237 237 val_237 2008-04-08 11 +237 val_237 237 val_237 2008-04-08 11 +237 val_237 237 val_237 2008-04-09 11 +238 val_238 238 val_238 2008-04-09 11 +238 val_238 238 val_238 2008-04-08 11 +238 val_238 238 val_238 2008-04-09 12 +238 val_238 238 val_238 2008-04-08 11 +238 val_238 238 val_238 2008-04-08 12 +238 val_238 238 val_238 2008-04-09 11 +238 val_238 238 val_238 2008-04-09 12 +238 val_238 238 val_238 2008-04-08 12 +238 val_238 238 val_238 2008-04-09 11 +238 val_238 238 val_238 2008-04-08 11 +238 val_238 238 val_238 2008-04-09 12 +238 val_238 238 val_238 2008-04-08 11 +238 val_238 238 val_238 2008-04-08 12 +238 val_238 238 val_238 2008-04-09 11 +238 val_238 238 val_238 2008-04-09 12 +238 val_238 238 val_238 2008-04-08 12 +239 val_239 239 val_239 2008-04-08 12 +239 val_239 239 val_239 2008-04-09 11 +239 val_239 239 val_239 2008-04-09 12 +239 val_239 239 val_239 2008-04-08 11 +239 val_239 239 val_239 2008-04-08 11 +239 val_239 239 val_239 2008-04-08 12 +239 val_239 239 val_239 2008-04-09 11 +239 val_239 239 val_239 2008-04-09 12 +239 val_239 239 val_239 2008-04-08 12 +239 val_239 239 val_239 2008-04-09 11 +239 val_239 239 val_239 2008-04-09 12 +239 val_239 239 val_239 2008-04-08 11 +239 val_239 239 val_239 2008-04-08 11 +239 val_239 239 val_239 2008-04-08 12 +239 val_239 239 val_239 2008-04-09 11 +239 val_239 239 val_239 2008-04-09 12 +241 val_241 241 val_241 2008-04-08 12 +241 val_241 241 val_241 2008-04-09 12 +241 val_241 241 val_241 2008-04-08 11 +241 val_241 241 val_241 2008-04-09 11 +242 val_242 242 val_242 2008-04-08 11 +242 val_242 242 val_242 2008-04-09 11 +242 val_242 242 val_242 2008-04-09 12 +242 val_242 242 val_242 2008-04-08 12 +242 val_242 242 val_242 2008-04-08 12 +242 val_242 242 val_242 2008-04-09 11 +242 val_242 242 val_242 2008-04-08 11 +242 val_242 242 val_242 2008-04-09 12 +242 val_242 242 val_242 2008-04-08 11 +242 val_242 242 val_242 2008-04-09 11 +242 val_242 242 val_242 2008-04-09 12 +242 val_242 242 val_242 2008-04-08 12 +242 val_242 242 val_242 2008-04-08 12 +242 val_242 242 val_242 2008-04-09 11 +242 val_242 242 val_242 2008-04-08 11 +242 val_242 242 val_242 2008-04-09 12 +244 val_244 244 val_244 2008-04-09 11 +244 val_244 244 val_244 2008-04-08 11 +244 val_244 244 val_244 2008-04-08 12 +244 val_244 244 val_244 2008-04-09 12 +247 val_247 247 val_247 2008-04-08 11 +247 val_247 247 val_247 2008-04-08 12 +247 val_247 247 val_247 2008-04-09 11 +247 val_247 247 val_247 2008-04-09 12 +248 val_248 248 val_248 2008-04-08 11 +248 val_248 248 val_248 2008-04-09 11 +248 val_248 248 val_248 2008-04-09 12 +248 val_248 248 val_248 2008-04-08 12 +249 val_249 249 val_249 2008-04-09 12 +249 val_249 249 val_249 2008-04-09 11 +249 val_249 249 val_249 2008-04-08 11 +249 val_249 249 val_249 2008-04-08 12 +252 val_252 252 val_252 2008-04-09 12 +252 val_252 252 val_252 2008-04-09 11 +252 val_252 252 val_252 2008-04-08 11 +252 val_252 252 val_252 2008-04-08 12 +255 val_255 255 val_255 2008-04-08 11 +255 val_255 255 val_255 2008-04-08 11 +255 val_255 255 val_255 2008-04-09 12 +255 val_255 255 val_255 2008-04-08 12 +255 val_255 255 val_255 2008-04-08 12 +255 val_255 255 val_255 2008-04-09 11 +255 val_255 255 val_255 2008-04-09 12 +255 val_255 255 val_255 2008-04-09 11 +255 val_255 255 val_255 2008-04-08 11 +255 val_255 255 val_255 2008-04-08 11 +255 val_255 255 val_255 2008-04-09 12 +255 val_255 255 val_255 2008-04-08 12 +255 val_255 255 val_255 2008-04-08 12 +255 val_255 255 val_255 2008-04-09 11 +255 val_255 255 val_255 2008-04-09 12 +255 val_255 255 val_255 2008-04-09 11 +256 val_256 256 val_256 2008-04-09 11 +256 val_256 256 val_256 2008-04-09 12 +256 val_256 256 val_256 2008-04-08 11 +256 val_256 256 val_256 2008-04-08 11 +256 val_256 256 val_256 2008-04-08 12 +256 val_256 256 val_256 2008-04-08 12 +256 val_256 256 val_256 2008-04-09 11 +256 val_256 256 val_256 2008-04-09 12 +256 val_256 256 val_256 2008-04-09 11 +256 val_256 256 val_256 2008-04-09 12 +256 val_256 256 val_256 2008-04-08 11 +256 val_256 256 val_256 2008-04-08 11 +256 val_256 256 val_256 2008-04-08 12 +256 val_256 256 val_256 2008-04-08 12 +256 val_256 256 val_256 2008-04-09 11 +256 val_256 256 val_256 2008-04-09 12 +257 val_257 257 val_257 2008-04-09 11 +257 val_257 257 val_257 2008-04-09 12 +257 val_257 257 val_257 2008-04-08 12 +257 val_257 257 val_257 2008-04-08 11 +258 val_258 258 val_258 2008-04-08 12 +258 val_258 258 val_258 2008-04-09 11 +258 val_258 258 val_258 2008-04-09 12 +258 val_258 258 val_258 2008-04-08 11 +260 val_260 260 val_260 2008-04-09 12 +260 val_260 260 val_260 2008-04-09 11 +260 val_260 260 val_260 2008-04-08 12 +260 val_260 260 val_260 2008-04-08 11 +262 val_262 262 val_262 2008-04-08 11 +262 val_262 262 val_262 2008-04-09 12 +262 val_262 262 val_262 2008-04-09 11 +262 val_262 262 val_262 2008-04-08 12 +263 val_263 263 val_263 2008-04-09 11 +263 val_263 263 val_263 2008-04-08 11 +263 val_263 263 val_263 2008-04-08 12 +263 val_263 263 val_263 2008-04-09 12 +265 val_265 265 val_265 2008-04-08 11 +265 val_265 265 val_265 2008-04-08 12 +265 val_265 265 val_265 2008-04-09 11 +265 val_265 265 val_265 2008-04-08 12 +265 val_265 265 val_265 2008-04-09 11 +265 val_265 265 val_265 2008-04-09 12 +265 val_265 265 val_265 2008-04-08 11 +265 val_265 265 val_265 2008-04-09 12 +265 val_265 265 val_265 2008-04-08 11 +265 val_265 265 val_265 2008-04-08 12 +265 val_265 265 val_265 2008-04-09 11 +265 val_265 265 val_265 2008-04-08 12 +265 val_265 265 val_265 2008-04-09 11 +265 val_265 265 val_265 2008-04-09 12 +265 val_265 265 val_265 2008-04-08 11 +265 val_265 265 val_265 2008-04-09 12 +266 val_266 266 val_266 2008-04-08 12 +266 val_266 266 val_266 2008-04-09 11 +266 val_266 266 val_266 2008-04-08 11 +266 val_266 266 val_266 2008-04-09 12 +272 val_272 272 val_272 2008-04-09 11 +272 val_272 272 val_272 2008-04-08 11 +272 val_272 272 val_272 2008-04-09 11 +272 val_272 272 val_272 2008-04-09 12 +272 val_272 272 val_272 2008-04-08 12 +272 val_272 272 val_272 2008-04-09 12 +272 val_272 272 val_272 2008-04-08 11 +272 val_272 272 val_272 2008-04-08 12 +272 val_272 272 val_272 2008-04-09 11 +272 val_272 272 val_272 2008-04-08 11 +272 val_272 272 val_272 2008-04-09 11 +272 val_272 272 val_272 2008-04-09 12 +272 val_272 272 val_272 2008-04-08 12 +272 val_272 272 val_272 2008-04-09 12 +272 val_272 272 val_272 2008-04-08 11 +272 val_272 272 val_272 2008-04-08 12 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-08 12 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-08 11 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-09 11 +273 val_273 273 val_273 2008-04-09 12 +273 val_273 273 val_273 2008-04-08 12 +274 val_274 274 val_274 2008-04-09 11 +274 val_274 274 val_274 2008-04-09 12 +274 val_274 274 val_274 2008-04-08 11 +274 val_274 274 val_274 2008-04-08 12 +275 val_275 275 val_275 2008-04-09 12 +275 val_275 275 val_275 2008-04-08 11 +275 val_275 275 val_275 2008-04-08 12 +275 val_275 275 val_275 2008-04-09 11 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-08 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-08 12 +277 val_277 277 val_277 2008-04-09 11 +277 val_277 277 val_277 2008-04-09 12 +277 val_277 277 val_277 2008-04-08 11 +278 val_278 278 val_278 2008-04-09 11 +278 val_278 278 val_278 2008-04-09 12 +278 val_278 278 val_278 2008-04-09 12 +278 val_278 278 val_278 2008-04-08 11 +278 val_278 278 val_278 2008-04-08 11 +278 val_278 278 val_278 2008-04-08 12 +278 val_278 278 val_278 2008-04-09 11 +278 val_278 278 val_278 2008-04-08 12 +278 val_278 278 val_278 2008-04-09 11 +278 val_278 278 val_278 2008-04-09 12 +278 val_278 278 val_278 2008-04-09 12 +278 val_278 278 val_278 2008-04-08 11 +278 val_278 278 val_278 2008-04-08 11 +278 val_278 278 val_278 2008-04-08 12 +278 val_278 278 val_278 2008-04-09 11 +278 val_278 278 val_278 2008-04-08 12 +280 val_280 280 val_280 2008-04-09 12 +280 val_280 280 val_280 2008-04-08 12 +280 val_280 280 val_280 2008-04-09 11 +280 val_280 280 val_280 2008-04-09 12 +280 val_280 280 val_280 2008-04-08 11 +280 val_280 280 val_280 2008-04-08 12 +280 val_280 280 val_280 2008-04-08 11 +280 val_280 280 val_280 2008-04-09 11 +280 val_280 280 val_280 2008-04-09 12 +280 val_280 280 val_280 2008-04-08 12 +280 val_280 280 val_280 2008-04-09 11 +280 val_280 280 val_280 2008-04-09 12 +280 val_280 280 val_280 2008-04-08 11 +280 val_280 280 val_280 2008-04-08 12 +280 val_280 280 val_280 2008-04-08 11 +280 val_280 280 val_280 2008-04-09 11 +281 val_281 281 val_281 2008-04-09 12 +281 val_281 281 val_281 2008-04-08 11 +281 val_281 281 val_281 2008-04-08 12 +281 val_281 281 val_281 2008-04-09 11 +281 val_281 281 val_281 2008-04-08 11 +281 val_281 281 val_281 2008-04-08 12 +281 val_281 281 val_281 2008-04-09 11 +281 val_281 281 val_281 2008-04-09 12 +281 val_281 281 val_281 2008-04-09 12 +281 val_281 281 val_281 2008-04-08 11 +281 val_281 281 val_281 2008-04-08 12 +281 val_281 281 val_281 2008-04-09 11 +281 val_281 281 val_281 2008-04-08 11 +281 val_281 281 val_281 2008-04-08 12 +281 val_281 281 val_281 2008-04-09 11 +281 val_281 281 val_281 2008-04-09 12 +282 val_282 282 val_282 2008-04-08 11 +282 val_282 282 val_282 2008-04-08 11 +282 val_282 282 val_282 2008-04-08 12 +282 val_282 282 val_282 2008-04-09 12 +282 val_282 282 val_282 2008-04-09 11 +282 val_282 282 val_282 2008-04-09 11 +282 val_282 282 val_282 2008-04-08 12 +282 val_282 282 val_282 2008-04-09 12 +282 val_282 282 val_282 2008-04-08 11 +282 val_282 282 val_282 2008-04-08 11 +282 val_282 282 val_282 2008-04-08 12 +282 val_282 282 val_282 2008-04-09 12 +282 val_282 282 val_282 2008-04-09 11 +282 val_282 282 val_282 2008-04-09 11 +282 val_282 282 val_282 2008-04-08 12 +282 val_282 282 val_282 2008-04-09 12 +283 val_283 283 val_283 2008-04-09 12 +283 val_283 283 val_283 2008-04-08 11 +283 val_283 283 val_283 2008-04-08 12 +283 val_283 283 val_283 2008-04-09 11 +284 val_284 284 val_284 2008-04-08 11 +284 val_284 284 val_284 2008-04-09 12 +284 val_284 284 val_284 2008-04-08 12 +284 val_284 284 val_284 2008-04-09 11 +285 val_285 285 val_285 2008-04-09 12 +285 val_285 285 val_285 2008-04-09 11 +285 val_285 285 val_285 2008-04-08 11 +285 val_285 285 val_285 2008-04-08 12 +286 val_286 286 val_286 2008-04-08 11 +286 val_286 286 val_286 2008-04-08 12 +286 val_286 286 val_286 2008-04-09 12 +286 val_286 286 val_286 2008-04-09 11 +287 val_287 287 val_287 2008-04-08 11 +287 val_287 287 val_287 2008-04-09 12 +287 val_287 287 val_287 2008-04-09 11 +287 val_287 287 val_287 2008-04-08 12 +288 val_288 288 val_288 2008-04-09 11 +288 val_288 288 val_288 2008-04-09 12 +288 val_288 288 val_288 2008-04-08 12 +288 val_288 288 val_288 2008-04-09 11 +288 val_288 288 val_288 2008-04-09 12 +288 val_288 288 val_288 2008-04-08 11 +288 val_288 288 val_288 2008-04-08 11 +288 val_288 288 val_288 2008-04-08 12 +288 val_288 288 val_288 2008-04-09 11 +288 val_288 288 val_288 2008-04-09 12 +288 val_288 288 val_288 2008-04-08 12 +288 val_288 288 val_288 2008-04-09 11 +288 val_288 288 val_288 2008-04-09 12 +288 val_288 288 val_288 2008-04-08 11 +288 val_288 288 val_288 2008-04-08 11 +288 val_288 288 val_288 2008-04-08 12 +289 val_289 289 val_289 2008-04-08 12 +289 val_289 289 val_289 2008-04-09 11 +289 val_289 289 val_289 2008-04-08 11 +289 val_289 289 val_289 2008-04-09 12 +291 val_291 291 val_291 2008-04-09 12 +291 val_291 291 val_291 2008-04-08 12 +291 val_291 291 val_291 2008-04-09 11 +291 val_291 291 val_291 2008-04-08 11 +292 val_292 292 val_292 2008-04-09 11 +292 val_292 292 val_292 2008-04-08 11 +292 val_292 292 val_292 2008-04-09 12 +292 val_292 292 val_292 2008-04-08 12 +296 val_296 296 val_296 2008-04-08 12 +296 val_296 296 val_296 2008-04-08 11 +296 val_296 296 val_296 2008-04-09 12 +296 val_296 296 val_296 2008-04-09 11 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-08 12 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-09 11 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-09 12 +298 val_298 298 val_298 2008-04-08 11 +298 val_298 298 val_298 2008-04-08 12 +302 val_302 302 val_302 2008-04-08 12 +302 val_302 302 val_302 2008-04-09 12 +302 val_302 302 val_302 2008-04-08 11 +302 val_302 302 val_302 2008-04-09 11 +305 val_305 305 val_305 2008-04-08 11 +305 val_305 305 val_305 2008-04-09 12 +305 val_305 305 val_305 2008-04-09 11 +305 val_305 305 val_305 2008-04-08 12 +306 val_306 306 val_306 2008-04-09 11 +306 val_306 306 val_306 2008-04-08 11 +306 val_306 306 val_306 2008-04-09 12 +306 val_306 306 val_306 2008-04-08 12 +307 val_307 307 val_307 2008-04-08 11 +307 val_307 307 val_307 2008-04-08 12 +307 val_307 307 val_307 2008-04-09 12 +307 val_307 307 val_307 2008-04-09 12 +307 val_307 307 val_307 2008-04-08 11 +307 val_307 307 val_307 2008-04-08 12 +307 val_307 307 val_307 2008-04-09 11 +307 val_307 307 val_307 2008-04-09 11 +307 val_307 307 val_307 2008-04-08 11 +307 val_307 307 val_307 2008-04-08 12 +307 val_307 307 val_307 2008-04-09 12 +307 val_307 307 val_307 2008-04-09 12 +307 val_307 307 val_307 2008-04-08 11 +307 val_307 307 val_307 2008-04-08 12 +307 val_307 307 val_307 2008-04-09 11 +307 val_307 307 val_307 2008-04-09 11 +308 val_308 308 val_308 2008-04-08 11 +308 val_308 308 val_308 2008-04-09 12 +308 val_308 308 val_308 2008-04-08 12 +308 val_308 308 val_308 2008-04-09 11 +309 val_309 309 val_309 2008-04-09 12 +309 val_309 309 val_309 2008-04-09 11 +309 val_309 309 val_309 2008-04-08 11 +309 val_309 309 val_309 2008-04-09 12 +309 val_309 309 val_309 2008-04-08 12 +309 val_309 309 val_309 2008-04-08 11 +309 val_309 309 val_309 2008-04-09 11 +309 val_309 309 val_309 2008-04-08 12 +309 val_309 309 val_309 2008-04-09 12 +309 val_309 309 val_309 2008-04-09 11 +309 val_309 309 val_309 2008-04-08 11 +309 val_309 309 val_309 2008-04-09 12 +309 val_309 309 val_309 2008-04-08 12 +309 val_309 309 val_309 2008-04-08 11 +309 val_309 309 val_309 2008-04-09 11 +309 val_309 309 val_309 2008-04-08 12 +310 val_310 310 val_310 2008-04-09 11 +310 val_310 310 val_310 2008-04-09 12 +310 val_310 310 val_310 2008-04-08 11 +310 val_310 310 val_310 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-09 12 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-09 11 +311 val_311 311 val_311 2008-04-08 11 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 12 +311 val_311 311 val_311 2008-04-08 12 +315 val_315 315 val_315 2008-04-08 11 +315 val_315 315 val_315 2008-04-09 12 +315 val_315 315 val_315 2008-04-09 11 +315 val_315 315 val_315 2008-04-08 12 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-08 12 +316 val_316 316 val_316 2008-04-09 11 +316 val_316 316 val_316 2008-04-09 12 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-08 11 +316 val_316 316 val_316 2008-04-09 12 +317 val_317 317 val_317 2008-04-08 11 +317 val_317 317 val_317 2008-04-09 11 +317 val_317 317 val_317 2008-04-09 12 +317 val_317 317 val_317 2008-04-09 12 +317 val_317 317 val_317 2008-04-08 12 +317 val_317 317 val_317 2008-04-08 12 +317 val_317 317 val_317 2008-04-08 11 +317 val_317 317 val_317 2008-04-09 11 +317 val_317 317 val_317 2008-04-08 11 +317 val_317 317 val_317 2008-04-09 11 +317 val_317 317 val_317 2008-04-09 12 +317 val_317 317 val_317 2008-04-09 12 +317 val_317 317 val_317 2008-04-08 12 +317 val_317 317 val_317 2008-04-08 12 +317 val_317 317 val_317 2008-04-08 11 +317 val_317 317 val_317 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-09 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-08 11 +318 val_318 318 val_318 2008-04-09 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 12 +318 val_318 318 val_318 2008-04-08 11 +321 val_321 321 val_321 2008-04-09 11 +321 val_321 321 val_321 2008-04-08 11 +321 val_321 321 val_321 2008-04-08 11 +321 val_321 321 val_321 2008-04-08 12 +321 val_321 321 val_321 2008-04-08 12 +321 val_321 321 val_321 2008-04-09 11 +321 val_321 321 val_321 2008-04-09 12 +321 val_321 321 val_321 2008-04-09 12 +321 val_321 321 val_321 2008-04-09 11 +321 val_321 321 val_321 2008-04-08 11 +321 val_321 321 val_321 2008-04-08 11 +321 val_321 321 val_321 2008-04-08 12 +321 val_321 321 val_321 2008-04-08 12 +321 val_321 321 val_321 2008-04-09 11 +321 val_321 321 val_321 2008-04-09 12 +321 val_321 321 val_321 2008-04-09 12 +322 val_322 322 val_322 2008-04-08 12 +322 val_322 322 val_322 2008-04-08 11 +322 val_322 322 val_322 2008-04-09 11 +322 val_322 322 val_322 2008-04-09 12 +322 val_322 322 val_322 2008-04-09 12 +322 val_322 322 val_322 2008-04-09 11 +322 val_322 322 val_322 2008-04-08 12 +322 val_322 322 val_322 2008-04-08 11 +322 val_322 322 val_322 2008-04-08 12 +322 val_322 322 val_322 2008-04-08 11 +322 val_322 322 val_322 2008-04-09 11 +322 val_322 322 val_322 2008-04-09 12 +322 val_322 322 val_322 2008-04-09 12 +322 val_322 322 val_322 2008-04-09 11 +322 val_322 322 val_322 2008-04-08 12 +322 val_322 322 val_322 2008-04-08 11 +323 val_323 323 val_323 2008-04-08 11 +323 val_323 323 val_323 2008-04-08 12 +323 val_323 323 val_323 2008-04-09 11 +323 val_323 323 val_323 2008-04-09 12 +325 val_325 325 val_325 2008-04-09 11 +325 val_325 325 val_325 2008-04-08 12 +325 val_325 325 val_325 2008-04-09 12 +325 val_325 325 val_325 2008-04-09 11 +325 val_325 325 val_325 2008-04-09 12 +325 val_325 325 val_325 2008-04-08 12 +325 val_325 325 val_325 2008-04-08 11 +325 val_325 325 val_325 2008-04-08 11 +325 val_325 325 val_325 2008-04-09 11 +325 val_325 325 val_325 2008-04-08 12 +325 val_325 325 val_325 2008-04-09 12 +325 val_325 325 val_325 2008-04-09 11 +325 val_325 325 val_325 2008-04-09 12 +325 val_325 325 val_325 2008-04-08 12 +325 val_325 325 val_325 2008-04-08 11 +325 val_325 325 val_325 2008-04-08 11 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-09 11 +327 val_327 327 val_327 2008-04-08 12 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-08 11 +327 val_327 327 val_327 2008-04-09 12 +327 val_327 327 val_327 2008-04-09 12 +331 val_331 331 val_331 2008-04-09 12 +331 val_331 331 val_331 2008-04-08 11 +331 val_331 331 val_331 2008-04-08 11 +331 val_331 331 val_331 2008-04-08 12 +331 val_331 331 val_331 2008-04-09 11 +331 val_331 331 val_331 2008-04-09 11 +331 val_331 331 val_331 2008-04-08 12 +331 val_331 331 val_331 2008-04-09 12 +331 val_331 331 val_331 2008-04-09 12 +331 val_331 331 val_331 2008-04-08 11 +331 val_331 331 val_331 2008-04-08 11 +331 val_331 331 val_331 2008-04-08 12 +331 val_331 331 val_331 2008-04-09 11 +331 val_331 331 val_331 2008-04-09 11 +331 val_331 331 val_331 2008-04-08 12 +331 val_331 331 val_331 2008-04-09 12 +332 val_332 332 val_332 2008-04-08 11 +332 val_332 332 val_332 2008-04-09 11 +332 val_332 332 val_332 2008-04-08 12 +332 val_332 332 val_332 2008-04-09 12 +333 val_333 333 val_333 2008-04-09 11 +333 val_333 333 val_333 2008-04-08 12 +333 val_333 333 val_333 2008-04-09 12 +333 val_333 333 val_333 2008-04-08 12 +333 val_333 333 val_333 2008-04-08 11 +333 val_333 333 val_333 2008-04-09 11 +333 val_333 333 val_333 2008-04-08 11 +333 val_333 333 val_333 2008-04-09 12 +333 val_333 333 val_333 2008-04-09 11 +333 val_333 333 val_333 2008-04-08 12 +333 val_333 333 val_333 2008-04-09 12 +333 val_333 333 val_333 2008-04-08 12 +333 val_333 333 val_333 2008-04-08 11 +333 val_333 333 val_333 2008-04-09 11 +333 val_333 333 val_333 2008-04-08 11 +333 val_333 333 val_333 2008-04-09 12 +335 val_335 335 val_335 2008-04-08 12 +335 val_335 335 val_335 2008-04-09 12 +335 val_335 335 val_335 2008-04-09 11 +335 val_335 335 val_335 2008-04-08 11 +336 val_336 336 val_336 2008-04-09 11 +336 val_336 336 val_336 2008-04-09 12 +336 val_336 336 val_336 2008-04-08 12 +336 val_336 336 val_336 2008-04-08 11 +338 val_338 338 val_338 2008-04-08 11 +338 val_338 338 val_338 2008-04-09 11 +338 val_338 338 val_338 2008-04-08 12 +338 val_338 338 val_338 2008-04-09 12 +339 val_339 339 val_339 2008-04-09 11 +339 val_339 339 val_339 2008-04-08 11 +339 val_339 339 val_339 2008-04-09 12 +339 val_339 339 val_339 2008-04-08 12 +341 val_341 341 val_341 2008-04-09 12 +341 val_341 341 val_341 2008-04-09 11 +341 val_341 341 val_341 2008-04-08 11 +341 val_341 341 val_341 2008-04-08 12 +342 val_342 342 val_342 2008-04-08 11 +342 val_342 342 val_342 2008-04-09 12 +342 val_342 342 val_342 2008-04-08 12 +342 val_342 342 val_342 2008-04-09 11 +342 val_342 342 val_342 2008-04-08 12 +342 val_342 342 val_342 2008-04-08 11 +342 val_342 342 val_342 2008-04-09 12 +342 val_342 342 val_342 2008-04-09 11 +342 val_342 342 val_342 2008-04-08 11 +342 val_342 342 val_342 2008-04-09 12 +342 val_342 342 val_342 2008-04-08 12 +342 val_342 342 val_342 2008-04-09 11 +342 val_342 342 val_342 2008-04-08 12 +342 val_342 342 val_342 2008-04-08 11 +342 val_342 342 val_342 2008-04-09 12 +342 val_342 342 val_342 2008-04-09 11 +344 val_344 344 val_344 2008-04-08 12 +344 val_344 344 val_344 2008-04-09 12 +344 val_344 344 val_344 2008-04-09 12 +344 val_344 344 val_344 2008-04-09 11 +344 val_344 344 val_344 2008-04-08 11 +344 val_344 344 val_344 2008-04-08 12 +344 val_344 344 val_344 2008-04-09 11 +344 val_344 344 val_344 2008-04-08 11 +344 val_344 344 val_344 2008-04-08 12 +344 val_344 344 val_344 2008-04-09 12 +344 val_344 344 val_344 2008-04-09 12 +344 val_344 344 val_344 2008-04-09 11 +344 val_344 344 val_344 2008-04-08 11 +344 val_344 344 val_344 2008-04-08 12 +344 val_344 344 val_344 2008-04-09 11 +344 val_344 344 val_344 2008-04-08 11 +345 val_345 345 val_345 2008-04-08 11 +345 val_345 345 val_345 2008-04-09 11 +345 val_345 345 val_345 2008-04-08 12 +345 val_345 345 val_345 2008-04-09 12 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 11 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 12 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +348 val_348 348 val_348 2008-04-09 12 +348 val_348 348 val_348 2008-04-08 11 +351 val_351 351 val_351 2008-04-09 11 +351 val_351 351 val_351 2008-04-08 11 +351 val_351 351 val_351 2008-04-08 12 +351 val_351 351 val_351 2008-04-09 12 +353 val_353 353 val_353 2008-04-08 12 +353 val_353 353 val_353 2008-04-08 12 +353 val_353 353 val_353 2008-04-09 11 +353 val_353 353 val_353 2008-04-09 12 +353 val_353 353 val_353 2008-04-08 11 +353 val_353 353 val_353 2008-04-09 11 +353 val_353 353 val_353 2008-04-09 12 +353 val_353 353 val_353 2008-04-08 11 +353 val_353 353 val_353 2008-04-08 12 +353 val_353 353 val_353 2008-04-08 12 +353 val_353 353 val_353 2008-04-09 11 +353 val_353 353 val_353 2008-04-09 12 +353 val_353 353 val_353 2008-04-08 11 +353 val_353 353 val_353 2008-04-09 11 +353 val_353 353 val_353 2008-04-09 12 +353 val_353 353 val_353 2008-04-08 11 +356 val_356 356 val_356 2008-04-08 12 +356 val_356 356 val_356 2008-04-09 11 +356 val_356 356 val_356 2008-04-08 11 +356 val_356 356 val_356 2008-04-09 12 +360 val_360 360 val_360 2008-04-08 11 +360 val_360 360 val_360 2008-04-08 12 +360 val_360 360 val_360 2008-04-09 12 +360 val_360 360 val_360 2008-04-09 11 +362 val_362 362 val_362 2008-04-09 12 +362 val_362 362 val_362 2008-04-08 11 +362 val_362 362 val_362 2008-04-09 11 +362 val_362 362 val_362 2008-04-08 12 +364 val_364 364 val_364 2008-04-09 12 +364 val_364 364 val_364 2008-04-08 11 +364 val_364 364 val_364 2008-04-09 11 +364 val_364 364 val_364 2008-04-08 12 +365 val_365 365 val_365 2008-04-08 11 +365 val_365 365 val_365 2008-04-09 12 +365 val_365 365 val_365 2008-04-08 12 +365 val_365 365 val_365 2008-04-09 11 +366 val_366 366 val_366 2008-04-08 12 +366 val_366 366 val_366 2008-04-09 12 +366 val_366 366 val_366 2008-04-09 11 +366 val_366 366 val_366 2008-04-08 11 +367 val_367 367 val_367 2008-04-08 12 +367 val_367 367 val_367 2008-04-09 12 +367 val_367 367 val_367 2008-04-09 11 +367 val_367 367 val_367 2008-04-08 11 +367 val_367 367 val_367 2008-04-08 11 +367 val_367 367 val_367 2008-04-08 12 +367 val_367 367 val_367 2008-04-09 12 +367 val_367 367 val_367 2008-04-09 11 +367 val_367 367 val_367 2008-04-08 12 +367 val_367 367 val_367 2008-04-09 12 +367 val_367 367 val_367 2008-04-09 11 +367 val_367 367 val_367 2008-04-08 11 +367 val_367 367 val_367 2008-04-08 11 +367 val_367 367 val_367 2008-04-08 12 +367 val_367 367 val_367 2008-04-09 12 +367 val_367 367 val_367 2008-04-09 11 +368 val_368 368 val_368 2008-04-08 12 +368 val_368 368 val_368 2008-04-08 11 +368 val_368 368 val_368 2008-04-09 12 +368 val_368 368 val_368 2008-04-09 11 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-09 12 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-08 12 +369 val_369 369 val_369 2008-04-09 11 +369 val_369 369 val_369 2008-04-08 11 +369 val_369 369 val_369 2008-04-08 12 +373 val_373 373 val_373 2008-04-08 11 +373 val_373 373 val_373 2008-04-09 11 +373 val_373 373 val_373 2008-04-09 12 +373 val_373 373 val_373 2008-04-08 12 +374 val_374 374 val_374 2008-04-09 12 +374 val_374 374 val_374 2008-04-08 12 +374 val_374 374 val_374 2008-04-08 11 +374 val_374 374 val_374 2008-04-09 11 +375 val_375 375 val_375 2008-04-09 12 +375 val_375 375 val_375 2008-04-09 11 +375 val_375 375 val_375 2008-04-08 12 +375 val_375 375 val_375 2008-04-08 11 +377 val_377 377 val_377 2008-04-09 11 +377 val_377 377 val_377 2008-04-09 12 +377 val_377 377 val_377 2008-04-08 11 +377 val_377 377 val_377 2008-04-08 12 +378 val_378 378 val_378 2008-04-09 11 +378 val_378 378 val_378 2008-04-09 12 +378 val_378 378 val_378 2008-04-08 12 +378 val_378 378 val_378 2008-04-08 11 +379 val_379 379 val_379 2008-04-08 12 +379 val_379 379 val_379 2008-04-08 11 +379 val_379 379 val_379 2008-04-09 11 +379 val_379 379 val_379 2008-04-09 12 +382 val_382 382 val_382 2008-04-08 12 +382 val_382 382 val_382 2008-04-09 11 +382 val_382 382 val_382 2008-04-09 11 +382 val_382 382 val_382 2008-04-08 12 +382 val_382 382 val_382 2008-04-08 11 +382 val_382 382 val_382 2008-04-09 12 +382 val_382 382 val_382 2008-04-09 12 +382 val_382 382 val_382 2008-04-08 11 +382 val_382 382 val_382 2008-04-08 12 +382 val_382 382 val_382 2008-04-09 11 +382 val_382 382 val_382 2008-04-09 11 +382 val_382 382 val_382 2008-04-08 12 +382 val_382 382 val_382 2008-04-08 11 +382 val_382 382 val_382 2008-04-09 12 +382 val_382 382 val_382 2008-04-09 12 +382 val_382 382 val_382 2008-04-08 11 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-09 12 +384 val_384 384 val_384 2008-04-08 12 +384 val_384 384 val_384 2008-04-09 11 +384 val_384 384 val_384 2008-04-08 11 +384 val_384 384 val_384 2008-04-09 11 +386 val_386 386 val_386 2008-04-08 12 +386 val_386 386 val_386 2008-04-09 12 +386 val_386 386 val_386 2008-04-09 11 +386 val_386 386 val_386 2008-04-08 11 +389 val_389 389 val_389 2008-04-08 12 +389 val_389 389 val_389 2008-04-09 12 +389 val_389 389 val_389 2008-04-08 11 +389 val_389 389 val_389 2008-04-09 11 +392 val_392 392 val_392 2008-04-08 12 +392 val_392 392 val_392 2008-04-08 11 +392 val_392 392 val_392 2008-04-09 12 +392 val_392 392 val_392 2008-04-09 11 +393 val_393 393 val_393 2008-04-09 12 +393 val_393 393 val_393 2008-04-08 12 +393 val_393 393 val_393 2008-04-08 11 +393 val_393 393 val_393 2008-04-09 11 +394 val_394 394 val_394 2008-04-08 12 +394 val_394 394 val_394 2008-04-09 12 +394 val_394 394 val_394 2008-04-08 11 +394 val_394 394 val_394 2008-04-09 11 +395 val_395 395 val_395 2008-04-09 12 +395 val_395 395 val_395 2008-04-09 11 +395 val_395 395 val_395 2008-04-09 11 +395 val_395 395 val_395 2008-04-08 12 +395 val_395 395 val_395 2008-04-08 11 +395 val_395 395 val_395 2008-04-09 12 +395 val_395 395 val_395 2008-04-08 12 +395 val_395 395 val_395 2008-04-08 11 +395 val_395 395 val_395 2008-04-09 12 +395 val_395 395 val_395 2008-04-09 11 +395 val_395 395 val_395 2008-04-09 11 +395 val_395 395 val_395 2008-04-08 12 +395 val_395 395 val_395 2008-04-08 11 +395 val_395 395 val_395 2008-04-09 12 +395 val_395 395 val_395 2008-04-08 12 +395 val_395 395 val_395 2008-04-08 11 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 12 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-08 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 11 +396 val_396 396 val_396 2008-04-08 12 +396 val_396 396 val_396 2008-04-09 12 +397 val_397 397 val_397 2008-04-09 11 +397 val_397 397 val_397 2008-04-09 12 +397 val_397 397 val_397 2008-04-09 12 +397 val_397 397 val_397 2008-04-09 11 +397 val_397 397 val_397 2008-04-08 11 +397 val_397 397 val_397 2008-04-08 11 +397 val_397 397 val_397 2008-04-08 12 +397 val_397 397 val_397 2008-04-08 12 +397 val_397 397 val_397 2008-04-09 11 +397 val_397 397 val_397 2008-04-09 12 +397 val_397 397 val_397 2008-04-09 12 +397 val_397 397 val_397 2008-04-09 11 +397 val_397 397 val_397 2008-04-08 11 +397 val_397 397 val_397 2008-04-08 11 +397 val_397 397 val_397 2008-04-08 12 +397 val_397 397 val_397 2008-04-08 12 +399 val_399 399 val_399 2008-04-09 12 +399 val_399 399 val_399 2008-04-09 12 +399 val_399 399 val_399 2008-04-09 11 +399 val_399 399 val_399 2008-04-08 12 +399 val_399 399 val_399 2008-04-09 11 +399 val_399 399 val_399 2008-04-08 11 +399 val_399 399 val_399 2008-04-08 12 +399 val_399 399 val_399 2008-04-08 11 +399 val_399 399 val_399 2008-04-09 12 +399 val_399 399 val_399 2008-04-09 12 +399 val_399 399 val_399 2008-04-09 11 +399 val_399 399 val_399 2008-04-08 12 +399 val_399 399 val_399 2008-04-09 11 +399 val_399 399 val_399 2008-04-08 11 +399 val_399 399 val_399 2008-04-08 12 +399 val_399 399 val_399 2008-04-08 11 +400 val_400 400 val_400 2008-04-08 12 +400 val_400 400 val_400 2008-04-09 11 +400 val_400 400 val_400 2008-04-09 12 +400 val_400 400 val_400 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-09 12 +401 val_401 401 val_401 2008-04-08 11 +401 val_401 401 val_401 2008-04-08 12 +401 val_401 401 val_401 2008-04-09 12 +402 val_402 402 val_402 2008-04-09 11 +402 val_402 402 val_402 2008-04-08 11 +402 val_402 402 val_402 2008-04-09 12 +402 val_402 402 val_402 2008-04-08 12 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 11 +403 val_403 403 val_403 2008-04-09 12 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-09 11 +403 val_403 403 val_403 2008-04-08 12 +403 val_403 403 val_403 2008-04-09 12 +404 val_404 404 val_404 2008-04-09 12 +404 val_404 404 val_404 2008-04-09 11 +404 val_404 404 val_404 2008-04-08 11 +404 val_404 404 val_404 2008-04-08 11 +404 val_404 404 val_404 2008-04-08 12 +404 val_404 404 val_404 2008-04-09 11 +404 val_404 404 val_404 2008-04-09 12 +404 val_404 404 val_404 2008-04-08 12 +404 val_404 404 val_404 2008-04-09 12 +404 val_404 404 val_404 2008-04-09 11 +404 val_404 404 val_404 2008-04-08 11 +404 val_404 404 val_404 2008-04-08 11 +404 val_404 404 val_404 2008-04-08 12 +404 val_404 404 val_404 2008-04-09 11 +404 val_404 404 val_404 2008-04-09 12 +404 val_404 404 val_404 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 11 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-09 12 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 11 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +406 val_406 406 val_406 2008-04-08 12 +407 val_407 407 val_407 2008-04-08 11 +407 val_407 407 val_407 2008-04-08 12 +407 val_407 407 val_407 2008-04-09 12 +407 val_407 407 val_407 2008-04-09 11 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-08 11 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-09 12 +409 val_409 409 val_409 2008-04-08 12 +409 val_409 409 val_409 2008-04-09 11 +409 val_409 409 val_409 2008-04-09 11 +411 val_411 411 val_411 2008-04-08 11 +411 val_411 411 val_411 2008-04-08 12 +411 val_411 411 val_411 2008-04-09 12 +411 val_411 411 val_411 2008-04-09 11 +413 val_413 413 val_413 2008-04-09 11 +413 val_413 413 val_413 2008-04-08 12 +413 val_413 413 val_413 2008-04-09 11 +413 val_413 413 val_413 2008-04-09 12 +413 val_413 413 val_413 2008-04-08 12 +413 val_413 413 val_413 2008-04-08 11 +413 val_413 413 val_413 2008-04-09 12 +413 val_413 413 val_413 2008-04-08 11 +413 val_413 413 val_413 2008-04-09 11 +413 val_413 413 val_413 2008-04-08 12 +413 val_413 413 val_413 2008-04-09 11 +413 val_413 413 val_413 2008-04-09 12 +413 val_413 413 val_413 2008-04-08 12 +413 val_413 413 val_413 2008-04-08 11 +413 val_413 413 val_413 2008-04-09 12 +413 val_413 413 val_413 2008-04-08 11 +414 val_414 414 val_414 2008-04-08 11 +414 val_414 414 val_414 2008-04-08 12 +414 val_414 414 val_414 2008-04-09 11 +414 val_414 414 val_414 2008-04-08 11 +414 val_414 414 val_414 2008-04-09 12 +414 val_414 414 val_414 2008-04-08 12 +414 val_414 414 val_414 2008-04-09 11 +414 val_414 414 val_414 2008-04-09 12 +414 val_414 414 val_414 2008-04-08 11 +414 val_414 414 val_414 2008-04-08 12 +414 val_414 414 val_414 2008-04-09 11 +414 val_414 414 val_414 2008-04-08 11 +414 val_414 414 val_414 2008-04-09 12 +414 val_414 414 val_414 2008-04-08 12 +414 val_414 414 val_414 2008-04-09 11 +414 val_414 414 val_414 2008-04-09 12 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-08 12 +417 val_417 417 val_417 2008-04-09 11 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-09 12 +417 val_417 417 val_417 2008-04-08 11 +417 val_417 417 val_417 2008-04-09 11 +418 val_418 418 val_418 2008-04-08 12 +418 val_418 418 val_418 2008-04-09 12 +418 val_418 418 val_418 2008-04-09 11 +418 val_418 418 val_418 2008-04-08 11 +419 val_419 419 val_419 2008-04-08 12 +419 val_419 419 val_419 2008-04-08 11 +419 val_419 419 val_419 2008-04-09 12 +419 val_419 419 val_419 2008-04-09 11 +421 val_421 421 val_421 2008-04-08 11 +421 val_421 421 val_421 2008-04-08 12 +421 val_421 421 val_421 2008-04-09 11 +421 val_421 421 val_421 2008-04-09 12 +424 val_424 424 val_424 2008-04-08 11 +424 val_424 424 val_424 2008-04-09 11 +424 val_424 424 val_424 2008-04-09 12 +424 val_424 424 val_424 2008-04-09 11 +424 val_424 424 val_424 2008-04-08 12 +424 val_424 424 val_424 2008-04-08 12 +424 val_424 424 val_424 2008-04-09 12 +424 val_424 424 val_424 2008-04-08 11 +424 val_424 424 val_424 2008-04-08 11 +424 val_424 424 val_424 2008-04-09 11 +424 val_424 424 val_424 2008-04-09 12 +424 val_424 424 val_424 2008-04-09 11 +424 val_424 424 val_424 2008-04-08 12 +424 val_424 424 val_424 2008-04-08 12 +424 val_424 424 val_424 2008-04-09 12 +424 val_424 424 val_424 2008-04-08 11 +427 val_427 427 val_427 2008-04-09 12 +427 val_427 427 val_427 2008-04-08 12 +427 val_427 427 val_427 2008-04-09 11 +427 val_427 427 val_427 2008-04-08 11 +429 val_429 429 val_429 2008-04-09 11 +429 val_429 429 val_429 2008-04-08 11 +429 val_429 429 val_429 2008-04-08 12 +429 val_429 429 val_429 2008-04-09 12 +429 val_429 429 val_429 2008-04-08 11 +429 val_429 429 val_429 2008-04-09 12 +429 val_429 429 val_429 2008-04-08 12 +429 val_429 429 val_429 2008-04-09 11 +429 val_429 429 val_429 2008-04-09 11 +429 val_429 429 val_429 2008-04-08 11 +429 val_429 429 val_429 2008-04-08 12 +429 val_429 429 val_429 2008-04-09 12 +429 val_429 429 val_429 2008-04-08 11 +429 val_429 429 val_429 2008-04-09 12 +429 val_429 429 val_429 2008-04-08 12 +429 val_429 429 val_429 2008-04-09 11 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-09 11 +430 val_430 430 val_430 2008-04-09 12 +430 val_430 430 val_430 2008-04-08 11 +430 val_430 430 val_430 2008-04-08 12 +430 val_430 430 val_430 2008-04-09 12 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-09 11 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-08 12 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-09 12 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-08 11 +431 val_431 431 val_431 2008-04-08 11 +432 val_432 432 val_432 2008-04-09 12 +432 val_432 432 val_432 2008-04-08 11 +432 val_432 432 val_432 2008-04-09 11 +432 val_432 432 val_432 2008-04-08 12 +435 val_435 435 val_435 2008-04-09 11 +435 val_435 435 val_435 2008-04-08 11 +435 val_435 435 val_435 2008-04-08 12 +435 val_435 435 val_435 2008-04-09 12 +436 val_436 436 val_436 2008-04-08 12 +436 val_436 436 val_436 2008-04-08 11 +436 val_436 436 val_436 2008-04-09 11 +436 val_436 436 val_436 2008-04-09 12 +437 val_437 437 val_437 2008-04-09 12 +437 val_437 437 val_437 2008-04-08 11 +437 val_437 437 val_437 2008-04-09 11 +437 val_437 437 val_437 2008-04-08 12 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-08 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 12 +438 val_438 438 val_438 2008-04-09 12 +438 val_438 438 val_438 2008-04-09 11 +438 val_438 438 val_438 2008-04-08 11 +439 val_439 439 val_439 2008-04-09 11 +439 val_439 439 val_439 2008-04-09 11 +439 val_439 439 val_439 2008-04-08 12 +439 val_439 439 val_439 2008-04-08 12 +439 val_439 439 val_439 2008-04-08 11 +439 val_439 439 val_439 2008-04-09 12 +439 val_439 439 val_439 2008-04-08 11 +439 val_439 439 val_439 2008-04-09 12 +439 val_439 439 val_439 2008-04-09 11 +439 val_439 439 val_439 2008-04-09 11 +439 val_439 439 val_439 2008-04-08 12 +439 val_439 439 val_439 2008-04-08 12 +439 val_439 439 val_439 2008-04-08 11 +439 val_439 439 val_439 2008-04-09 12 +439 val_439 439 val_439 2008-04-08 11 +439 val_439 439 val_439 2008-04-09 12 +443 val_443 443 val_443 2008-04-09 11 +443 val_443 443 val_443 2008-04-09 12 +443 val_443 443 val_443 2008-04-08 11 +443 val_443 443 val_443 2008-04-08 12 +444 val_444 444 val_444 2008-04-09 11 +444 val_444 444 val_444 2008-04-08 11 +444 val_444 444 val_444 2008-04-08 12 +444 val_444 444 val_444 2008-04-09 12 +446 val_446 446 val_446 2008-04-08 12 +446 val_446 446 val_446 2008-04-09 12 +446 val_446 446 val_446 2008-04-08 11 +446 val_446 446 val_446 2008-04-09 11 +448 val_448 448 val_448 2008-04-09 11 +448 val_448 448 val_448 2008-04-09 12 +448 val_448 448 val_448 2008-04-08 11 +448 val_448 448 val_448 2008-04-08 12 +449 val_449 449 val_449 2008-04-08 12 +449 val_449 449 val_449 2008-04-08 11 +449 val_449 449 val_449 2008-04-09 12 +449 val_449 449 val_449 2008-04-09 11 +452 val_452 452 val_452 2008-04-08 11 +452 val_452 452 val_452 2008-04-09 11 +452 val_452 452 val_452 2008-04-09 12 +452 val_452 452 val_452 2008-04-08 12 +453 val_453 453 val_453 2008-04-08 12 +453 val_453 453 val_453 2008-04-09 11 +453 val_453 453 val_453 2008-04-09 12 +453 val_453 453 val_453 2008-04-08 11 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-08 11 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-09 11 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-09 12 +454 val_454 454 val_454 2008-04-08 12 +454 val_454 454 val_454 2008-04-08 12 +455 val_455 455 val_455 2008-04-08 11 +455 val_455 455 val_455 2008-04-09 11 +455 val_455 455 val_455 2008-04-08 12 +455 val_455 455 val_455 2008-04-09 12 +457 val_457 457 val_457 2008-04-08 12 +457 val_457 457 val_457 2008-04-09 12 +457 val_457 457 val_457 2008-04-08 11 +457 val_457 457 val_457 2008-04-09 11 +458 val_458 458 val_458 2008-04-08 12 +458 val_458 458 val_458 2008-04-09 11 +458 val_458 458 val_458 2008-04-08 11 +458 val_458 458 val_458 2008-04-09 11 +458 val_458 458 val_458 2008-04-08 11 +458 val_458 458 val_458 2008-04-09 12 +458 val_458 458 val_458 2008-04-09 12 +458 val_458 458 val_458 2008-04-08 12 +458 val_458 458 val_458 2008-04-08 12 +458 val_458 458 val_458 2008-04-09 11 +458 val_458 458 val_458 2008-04-08 11 +458 val_458 458 val_458 2008-04-09 11 +458 val_458 458 val_458 2008-04-08 11 +458 val_458 458 val_458 2008-04-09 12 +458 val_458 458 val_458 2008-04-09 12 +458 val_458 458 val_458 2008-04-08 12 +459 val_459 459 val_459 2008-04-08 12 +459 val_459 459 val_459 2008-04-09 12 +459 val_459 459 val_459 2008-04-09 11 +459 val_459 459 val_459 2008-04-09 11 +459 val_459 459 val_459 2008-04-08 11 +459 val_459 459 val_459 2008-04-08 12 +459 val_459 459 val_459 2008-04-09 12 +459 val_459 459 val_459 2008-04-08 11 +459 val_459 459 val_459 2008-04-08 12 +459 val_459 459 val_459 2008-04-09 12 +459 val_459 459 val_459 2008-04-09 11 +459 val_459 459 val_459 2008-04-09 11 +459 val_459 459 val_459 2008-04-08 11 +459 val_459 459 val_459 2008-04-08 12 +459 val_459 459 val_459 2008-04-09 12 +459 val_459 459 val_459 2008-04-08 11 +460 val_460 460 val_460 2008-04-08 11 +460 val_460 460 val_460 2008-04-09 11 +460 val_460 460 val_460 2008-04-08 12 +460 val_460 460 val_460 2008-04-09 12 +462 val_462 462 val_462 2008-04-09 11 +462 val_462 462 val_462 2008-04-09 12 +462 val_462 462 val_462 2008-04-08 11 +462 val_462 462 val_462 2008-04-08 11 +462 val_462 462 val_462 2008-04-09 11 +462 val_462 462 val_462 2008-04-09 12 +462 val_462 462 val_462 2008-04-08 12 +462 val_462 462 val_462 2008-04-08 12 +462 val_462 462 val_462 2008-04-09 11 +462 val_462 462 val_462 2008-04-09 12 +462 val_462 462 val_462 2008-04-08 11 +462 val_462 462 val_462 2008-04-08 11 +462 val_462 462 val_462 2008-04-09 11 +462 val_462 462 val_462 2008-04-09 12 +462 val_462 462 val_462 2008-04-08 12 +462 val_462 462 val_462 2008-04-08 12 +463 val_463 463 val_463 2008-04-09 11 +463 val_463 463 val_463 2008-04-09 12 +463 val_463 463 val_463 2008-04-09 12 +463 val_463 463 val_463 2008-04-08 12 +463 val_463 463 val_463 2008-04-08 11 +463 val_463 463 val_463 2008-04-08 12 +463 val_463 463 val_463 2008-04-09 11 +463 val_463 463 val_463 2008-04-08 11 +463 val_463 463 val_463 2008-04-09 11 +463 val_463 463 val_463 2008-04-09 12 +463 val_463 463 val_463 2008-04-09 12 +463 val_463 463 val_463 2008-04-08 12 +463 val_463 463 val_463 2008-04-08 11 +463 val_463 463 val_463 2008-04-08 12 +463 val_463 463 val_463 2008-04-09 11 +463 val_463 463 val_463 2008-04-08 11 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-08 11 +466 val_466 466 val_466 2008-04-09 11 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-09 12 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-08 12 +466 val_466 466 val_466 2008-04-09 12 +467 val_467 467 val_467 2008-04-09 12 +467 val_467 467 val_467 2008-04-09 11 +467 val_467 467 val_467 2008-04-08 12 +467 val_467 467 val_467 2008-04-08 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-08 12 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 12 +468 val_468 468 val_468 2008-04-08 11 +468 val_468 468 val_468 2008-04-09 11 +468 val_468 468 val_468 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-09 11 +469 val_469 469 val_469 2008-04-09 12 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 11 +469 val_469 469 val_469 2008-04-08 12 +469 val_469 469 val_469 2008-04-09 12 +470 val_470 470 val_470 2008-04-08 11 +470 val_470 470 val_470 2008-04-09 12 +470 val_470 470 val_470 2008-04-08 12 +470 val_470 470 val_470 2008-04-09 11 +472 val_472 472 val_472 2008-04-09 11 +472 val_472 472 val_472 2008-04-08 12 +472 val_472 472 val_472 2008-04-09 12 +472 val_472 472 val_472 2008-04-08 11 +475 val_475 475 val_475 2008-04-09 11 +475 val_475 475 val_475 2008-04-08 11 +475 val_475 475 val_475 2008-04-08 12 +475 val_475 475 val_475 2008-04-09 12 +477 val_477 477 val_477 2008-04-09 12 +477 val_477 477 val_477 2008-04-08 11 +477 val_477 477 val_477 2008-04-08 12 +477 val_477 477 val_477 2008-04-09 11 +478 val_478 478 val_478 2008-04-08 11 +478 val_478 478 val_478 2008-04-08 12 +478 val_478 478 val_478 2008-04-09 11 +478 val_478 478 val_478 2008-04-09 12 +478 val_478 478 val_478 2008-04-09 12 +478 val_478 478 val_478 2008-04-08 11 +478 val_478 478 val_478 2008-04-09 11 +478 val_478 478 val_478 2008-04-08 12 +478 val_478 478 val_478 2008-04-08 11 +478 val_478 478 val_478 2008-04-08 12 +478 val_478 478 val_478 2008-04-09 11 +478 val_478 478 val_478 2008-04-09 12 +478 val_478 478 val_478 2008-04-09 12 +478 val_478 478 val_478 2008-04-08 11 +478 val_478 478 val_478 2008-04-09 11 +478 val_478 478 val_478 2008-04-08 12 +479 val_479 479 val_479 2008-04-08 12 +479 val_479 479 val_479 2008-04-08 11 +479 val_479 479 val_479 2008-04-09 11 +479 val_479 479 val_479 2008-04-09 12 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-08 11 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 12 +480 val_480 480 val_480 2008-04-08 12 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-09 11 +480 val_480 480 val_480 2008-04-08 12 +481 val_481 481 val_481 2008-04-08 12 +481 val_481 481 val_481 2008-04-08 11 +481 val_481 481 val_481 2008-04-09 11 +481 val_481 481 val_481 2008-04-09 12 +482 val_482 482 val_482 2008-04-09 11 +482 val_482 482 val_482 2008-04-09 12 +482 val_482 482 val_482 2008-04-08 11 +482 val_482 482 val_482 2008-04-08 12 +483 val_483 483 val_483 2008-04-09 11 +483 val_483 483 val_483 2008-04-08 11 +483 val_483 483 val_483 2008-04-09 12 +483 val_483 483 val_483 2008-04-08 12 +484 val_484 484 val_484 2008-04-08 12 +484 val_484 484 val_484 2008-04-08 11 +484 val_484 484 val_484 2008-04-09 11 +484 val_484 484 val_484 2008-04-09 12 +485 val_485 485 val_485 2008-04-08 11 +485 val_485 485 val_485 2008-04-09 11 +485 val_485 485 val_485 2008-04-09 12 +485 val_485 485 val_485 2008-04-08 12 +487 val_487 487 val_487 2008-04-09 12 +487 val_487 487 val_487 2008-04-08 12 +487 val_487 487 val_487 2008-04-09 11 +487 val_487 487 val_487 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-08 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-09 11 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 12 +489 val_489 489 val_489 2008-04-08 12 +489 val_489 489 val_489 2008-04-09 11 +490 val_490 490 val_490 2008-04-09 11 +490 val_490 490 val_490 2008-04-08 11 +490 val_490 490 val_490 2008-04-09 12 +490 val_490 490 val_490 2008-04-08 12 +491 val_491 491 val_491 2008-04-08 11 +491 val_491 491 val_491 2008-04-08 12 +491 val_491 491 val_491 2008-04-09 11 +491 val_491 491 val_491 2008-04-09 12 +492 val_492 492 val_492 2008-04-08 11 +492 val_492 492 val_492 2008-04-09 11 +492 val_492 492 val_492 2008-04-09 12 +492 val_492 492 val_492 2008-04-09 12 +492 val_492 492 val_492 2008-04-08 12 +492 val_492 492 val_492 2008-04-09 11 +492 val_492 492 val_492 2008-04-08 11 +492 val_492 492 val_492 2008-04-08 12 +492 val_492 492 val_492 2008-04-08 11 +492 val_492 492 val_492 2008-04-09 11 +492 val_492 492 val_492 2008-04-09 12 +492 val_492 492 val_492 2008-04-09 12 +492 val_492 492 val_492 2008-04-08 12 +492 val_492 492 val_492 2008-04-09 11 +492 val_492 492 val_492 2008-04-08 11 +492 val_492 492 val_492 2008-04-08 12 +493 val_493 493 val_493 2008-04-08 12 +493 val_493 493 val_493 2008-04-09 12 +493 val_493 493 val_493 2008-04-09 11 +493 val_493 493 val_493 2008-04-08 11 +494 val_494 494 val_494 2008-04-09 11 +494 val_494 494 val_494 2008-04-08 11 +494 val_494 494 val_494 2008-04-08 12 +494 val_494 494 val_494 2008-04-09 12 +495 val_495 495 val_495 2008-04-08 11 +495 val_495 495 val_495 2008-04-09 12 +495 val_495 495 val_495 2008-04-08 12 +495 val_495 495 val_495 2008-04-09 11 +496 val_496 496 val_496 2008-04-09 11 +496 val_496 496 val_496 2008-04-09 12 +496 val_496 496 val_496 2008-04-08 12 +496 val_496 496 val_496 2008-04-08 11 +497 val_497 497 val_497 2008-04-09 12 +497 val_497 497 val_497 2008-04-08 12 +497 val_497 497 val_497 2008-04-08 11 +497 val_497 497 val_497 2008-04-09 11 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-09 11 +498 val_498 498 val_498 2008-04-08 11 +498 val_498 498 val_498 2008-04-08 12 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-09 12 +498 val_498 498 val_498 2008-04-08 12 +PREHOOK: query: explain select * from default.masking_test_n10 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from default.masking_test where key > 0 +POSTHOOK: query: explain select * from default.masking_test_n10 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -351,18 +6371,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n10 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (key > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), reverse(value) (type: string) + expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -375,20 +6395,514 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from default.masking_test where key > 0 +PREHOOK: query: select * from default.masking_test_n10 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n10 #### A masked pattern was here #### -POSTHOOK: query: select * from default.masking_test where key > 0 +POSTHOOK: query: select * from default.masking_test_n10 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n10 #### A masked pattern was here #### -4 4_lav -8 8_lav -2 2_lav -PREHOOK: query: explain select * from masking_test where masking_test.key > 0 +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: explain select * from masking_test_n10 where masking_test_n10.key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test where masking_test.key > 0 +POSTHOOK: query: explain select * from masking_test_n10 where masking_test_n10.key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -399,18 +6913,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n10 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + predicate: (key > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: int), reverse(value) (type: string) + expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -423,17 +6937,511 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test where masking_test.key > 0 +PREHOOK: query: select * from masking_test_n10 where masking_test_n10.key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n10 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test where masking_test.key > 0 +POSTHOOK: query: select * from masking_test_n10 where masking_test_n10.key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n10 #### A masked pattern was here #### -4 4_lav -8 8_lav -2 2_lav +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 PREHOOK: query: explain select key, value from (select key, value from (select key, upper(value) as value from src where key > 0) t where key < 10) t2 where key % 2 = 0 PREHOOK: type: QUERY POSTHOOK: query: explain select key, value from (select key, value from (select key, upper(value) as value from src where key > 0) t where key < 10) t2 where key % 2 = 0 diff --git a/ql/src/test/results/clientpositive/masking_disablecbo_2.q.out b/ql/src/test/results/clientpositive/masking_disablecbo_2.q.out index e9ba98412a..bb58b0864f 100644 --- a/ql/src/test/results/clientpositive/masking_disablecbo_2.q.out +++ b/ql/src/test/results/clientpositive/masking_disablecbo_2.q.out @@ -1,75 +1,549 @@ -PREHOOK: query: create view masking_test as select cast(key as int) as key, value from src +PREHOOK: query: create view masking_test_n13 as select cast(key as int) as key, value from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create view masking_test as select cast(key as int) as key, value from src +PREHOOK: Output: default@masking_test_n13 +POSTHOOK: query: create view masking_test_n13 as select cast(key as int) as key, value from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain select * from masking_test +POSTHOOK: Output: default@masking_test_n13 +POSTHOOK: Lineage: masking_test_n13.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n13.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select * from masking_test_n13 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test +POSTHOOK: query: explain select * from masking_test_n13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(key) (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((_col0 % 2) = 0) and (_col0 < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: int), reverse(_col1) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(key) (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink -PREHOOK: query: select * from masking_test +PREHOOK: query: select * from masking_test_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n13 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test +POSTHOOK: query: select * from masking_test_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n13 POSTHOOK: Input: default@src #### A masked pattern was here #### -0 0_lav -4 4_lav -8 8_lav -0 0_lav -0 0_lav -2 2_lav -PREHOOK: query: explain select * from masking_test where key > 0 +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +0 val_0 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +0 val_0 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +0 val_0 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: explain select * from masking_test_n13 where key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test where key > 0 +POSTHOOK: query: explain select * from masking_test_n13 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -87,19 +561,15 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col0 % 2) = 0) and (_col0 < 10) and (_col0 > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: int), reverse(_col1) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + predicate: (_col0 > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Execution mode: vectorized Stage: Stage-0 @@ -108,22 +578,516 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test where key > 0 +PREHOOK: query: select * from masking_test_n13 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n13 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test where key > 0 +POSTHOOK: query: select * from masking_test_n13 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n13 POSTHOOK: Input: default@src #### A masked pattern was here #### -4 4_lav -8 8_lav -2 2_lav -PREHOOK: query: explain select * from src a join masking_test b on a.key = b.value where b.key > 0 +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: explain select * from src a join masking_test_n13 b on a.key = b.value where b.key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from src a join masking_test b on a.key = b.value where b.key > 0 +POSTHOOK: query: explain select * from src a join masking_test_n13 b on a.key = b.value where b.key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -136,26 +1100,22 @@ STAGE PLANS: TableScan alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(key) (type: int), value (type: string) - outputColumnNames: _col0, _col1 + Filter Operator + predicate: value is not null (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((_col0 % 2) = 0) and (_col0 < 10) and (_col0 > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: int), reverse(_col1) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: _col1 is not null (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int) + Select Operator + expressions: UDFToInteger(key) (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col0 > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col1 (type: string) + sort order: + + Map-reduce partition columns: _col1 (type: string) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: int) TableScan alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -195,9 +1155,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select a.*, b.key from masking_test a join masking_test b on a.key = b.value where b.key > 0 +PREHOOK: query: explain select a.*, b.key from masking_test_n13 a join masking_test_n13 b on a.key = b.value where b.key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select a.*, b.key from masking_test a join masking_test b on a.key = b.value where b.key > 0 +POSTHOOK: query: explain select a.*, b.key from masking_test_n13 a join masking_test_n13 b on a.key = b.value where b.key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -215,41 +1175,33 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col0 % 2) = 0) and (_col0 < 10) and UDFToDouble(_col0) is not null) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: int), reverse(_col1) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string) + predicate: UDFToDouble(_col0) is not null (type: boolean) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(_col0) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(_col0) (type: double) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: int), _col1 (type: string) TableScan alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(key) (type: int), value (type: string) - outputColumnNames: _col0, _col1 + Filter Operator + predicate: UDFToDouble(value) is not null (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((_col0 % 2) = 0) and (_col0 < 10) and (_col0 > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: int), reverse(_col1) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: UDFToDouble(_col1) is not null (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col1) (type: double) - sort order: + - Map-reduce partition columns: UDFToDouble(_col1) (type: double) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int) + Select Operator + expressions: UDFToInteger(key) (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col0 > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(_col1) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(_col1) (type: double) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: int) Reduce Operator Tree: Join Operator condition map: @@ -258,10 +1210,10 @@ STAGE PLANS: 0 UDFToDouble(_col0) (type: double) 1 UDFToDouble(_col1) (type: double) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -273,9 +1225,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from masking_test a union select b.* from masking_test b where b.key > 0 +PREHOOK: query: explain select * from masking_test_n13 a union select b.* from masking_test_n13 b where b.key > 0 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from masking_test a union select b.* from masking_test b where b.key > 0 +POSTHOOK: query: explain select * from masking_test_n13 a union select b.* from masking_test_n13 b where b.key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -292,25 +1244,18 @@ STAGE PLANS: expressions: UDFToInteger(key) (type: int), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((_col0 % 2) = 0) and (_col0 < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: int), reverse(_col1) (type: string) + Union + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: int), _col1 (type: string) + mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: int), _col1 (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: string) + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE TableScan alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -319,33 +1264,29 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col0 % 2) = 0) and (_col0 < 10) and (_col0 > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: int), reverse(_col1) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: int), _col1 (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE + predicate: (_col0 > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: int), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: string) + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: int), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 583 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 583 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/masking_disablecbo_4.q.out b/ql/src/test/results/clientpositive/masking_disablecbo_4.q.out index 1b3dcbbbc2..087d09307f 100644 --- a/ql/src/test/results/clientpositive/masking_disablecbo_4.q.out +++ b/ql/src/test/results/clientpositive/masking_disablecbo_4.q.out @@ -1,27 +1,27 @@ -PREHOOK: query: create table masking_test as select cast(key as int) as key, value from src +PREHOOK: query: create table masking_test_n3 as select cast(key as int) as key, value from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create table masking_test as select cast(key as int) as key, value from src +PREHOOK: Output: default@masking_test_n3 +POSTHOOK: query: create table masking_test_n3 as select cast(key as int) as key, value from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table masking_test_subq as select cast(key as int) as key, value from src +POSTHOOK: Output: default@masking_test_n3 +POSTHOOK: Lineage: masking_test_n3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table masking_test_subq_n0 as select cast(key as int) as key, value from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test_subq -POSTHOOK: query: create table masking_test_subq as select cast(key as int) as key, value from src +PREHOOK: Output: default@masking_test_subq_n0 +POSTHOOK: query: create table masking_test_subq_n0 as select cast(key as int) as key, value from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test_subq -POSTHOOK: Lineage: masking_test_subq.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test_subq.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@masking_test_subq_n0 +POSTHOOK: Lineage: masking_test_subq_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_subq_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain with q1 as ( select key from q2 where key = '5'), q2 as ( select key from src where key = '5') @@ -66,11 +66,11 @@ STAGE PLANS: ListSink PREHOOK: query: explain -with q1 as ( select * from masking_test where key = '5') +with q1 as ( select * from masking_test_n3 where key = '5') select * from q1 PREHOOK: type: QUERY POSTHOOK: query: explain -with q1 as ( select * from masking_test where key = '5') +with q1 as ( select * from masking_test_n3 where key = '5') select * from q1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -82,18 +82,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key = 5)) (type: boolean) - Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE + predicate: (key = 5) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 5 (type: int), reverse(value) (type: string) + expressions: 5 (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -107,12 +107,12 @@ STAGE PLANS: ListSink PREHOOK: query: explain -with masking_test_subq as ( select * from masking_test where key = '5') -select * from masking_test_subq +with masking_test_subq_n0 as ( select * from masking_test_n3 where key = '5') +select * from masking_test_subq_n0 PREHOOK: type: QUERY POSTHOOK: query: explain -with masking_test_subq as ( select * from masking_test where key = '5') -select * from masking_test_subq +with masking_test_subq_n0 as ( select * from masking_test_n3 where key = '5') +select * from masking_test_subq_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -123,18 +123,18 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key = 5)) (type: boolean) - Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE + predicate: (key = 5) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 5 (type: int), reverse(value) (type: string) + expressions: 5 (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -148,73 +148,27 @@ STAGE PLANS: ListSink PREHOOK: query: explain -with q1 as ( select * from masking_test where key = '5') -select * from masking_test_subq +with q1 as ( select * from masking_test_n3 where key = '5') +select * from masking_test_subq_n0 PREHOOK: type: QUERY POSTHOOK: query: explain -with q1 as ( select * from masking_test where key = '5') -select * from masking_test_subq +with q1 as ( select * from masking_test_n3 where key = '5') +select * from masking_test_subq_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: masking_test_subq - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: UDFToDouble(key) is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(key) (type: double), UDFToDouble(key) (type: double) - sort order: ++ - Map-reduce partition columns: UDFToDouble(key) (type: double), UDFToDouble(key) (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: key (type: int), value (type: string) - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: UDFToDouble(key) is not null (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), key (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: string), _col1 (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: UDFToDouble(_col0) (type: double), UDFToDouble(_col1) (type: double) - sort order: ++ - Map-reduce partition columns: UDFToDouble(_col0) (type: double), UDFToDouble(_col1) (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Operator Tree: - Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 UDFToDouble(key) (type: double), UDFToDouble(key) (type: double) - 1 UDFToDouble(_col0) (type: double), UDFToDouble(_col1) (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: masking_test_subq_n0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink diff --git a/ql/src/test/results/clientpositive/merge1.q.out b/ql/src/test/results/clientpositive/merge1.q.out index 903e839f25..f24454e2d6 100644 --- a/ql/src/test/results/clientpositive/merge1.q.out +++ b/ql/src/test/results/clientpositive/merge1.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: create table dest1(key int, val int) +PREHOOK: query: create table dest1_n121(key int, val int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: create table dest1(key int, val int) +PREHOOK: Output: default@dest1_n121 +POSTHOOK: query: create table dest1_n121(key int, val int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n121 PREHOOK: query: explain -insert overwrite table dest1 +insert overwrite table dest1_n121 select key, count(1) from src group by key PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table dest1 +insert overwrite table dest1_n121 select key, count(1) from src group by key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -67,7 +67,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n121 Select Operator expressions: _col0 (type: int), _col1 (type: int) outputColumnNames: key, val @@ -101,7 +101,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n121 Stage: Stage-2 Stats Work @@ -109,7 +109,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val Column Types: int, int - Table: default.dest1 + Table: default.dest1_n121 Stage: Stage-3 Map Reduce @@ -121,7 +121,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n121 Stage: Stage-5 Map Reduce @@ -133,7 +133,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n121 Stage: Stage-6 Move Operator @@ -164,25 +164,25 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: insert overwrite table dest1 +PREHOOK: query: insert overwrite table dest1_n121 select key, count(1) from src group by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: insert overwrite table dest1 +PREHOOK: Output: default@dest1_n121 +POSTHOOK: query: insert overwrite table dest1_n121 select key, count(1) from src group by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.val EXPRESSION [(src)src.null, ] -PREHOOK: query: select * from dest1 +POSTHOOK: Output: default@dest1_n121 +POSTHOOK: Lineage: dest1_n121.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n121.val EXPRESSION [(src)src.null, ] +PREHOOK: query: select * from dest1_n121 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n121 #### A masked pattern was here #### -POSTHOOK: query: select * from dest1 +POSTHOOK: query: select * from dest1_n121 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n121 #### A masked pattern was here #### 0 3 10 1 @@ -493,55 +493,55 @@ POSTHOOK: Input: default@dest1 96 1 97 2 98 2 -PREHOOK: query: drop table dest1 +PREHOOK: query: drop table dest1_n121 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: drop table dest1 +PREHOOK: Input: default@dest1_n121 +PREHOOK: Output: default@dest1_n121 +POSTHOOK: query: drop table dest1_n121 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -PREHOOK: query: create table test_src(key string, value string) partitioned by (ds string) +POSTHOOK: Input: default@dest1_n121 +POSTHOOK: Output: default@dest1_n121 +PREHOOK: query: create table test_src_n2(key string, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_src -POSTHOOK: query: create table test_src(key string, value string) partitioned by (ds string) +PREHOOK: Output: default@test_src_n2 +POSTHOOK: query: create table test_src_n2(key string, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_src -PREHOOK: query: create table dest1(key string) +POSTHOOK: Output: default@test_src_n2 +PREHOOK: query: create table dest1_n121(key string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: create table dest1(key string) +PREHOOK: Output: default@dest1_n121 +POSTHOOK: query: create table dest1_n121(key string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: insert overwrite table test_src partition(ds='101') select * from src +POSTHOOK: Output: default@dest1_n121 +PREHOOK: query: insert overwrite table test_src_n2 partition(ds='101') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_src@ds=101 -POSTHOOK: query: insert overwrite table test_src partition(ds='101') select * from src +PREHOOK: Output: default@test_src_n2@ds=101 +POSTHOOK: query: insert overwrite table test_src_n2 partition(ds='101') select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_src@ds=101 -POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table test_src partition(ds='102') select * from src +POSTHOOK: Output: default@test_src_n2@ds=101 +POSTHOOK: Lineage: test_src_n2 PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src_n2 PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table test_src_n2 partition(ds='102') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_src@ds=102 -POSTHOOK: query: insert overwrite table test_src partition(ds='102') select * from src +PREHOOK: Output: default@test_src_n2@ds=102 +POSTHOOK: query: insert overwrite table test_src_n2 partition(ds='102') select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_src@ds=102 -POSTHOOK: Lineage: test_src PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_src PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@test_src_n2@ds=102 +POSTHOOK: Lineage: test_src_n2 PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src_n2 PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain -insert overwrite table dest1 select key from test_src +insert overwrite table dest1_n121 select key from test_src_n2 PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table dest1 select key from test_src +insert overwrite table dest1_n121 select key from test_src_n2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -558,7 +558,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_src + alias: test_src_n2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -571,7 +571,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n121 Select Operator expressions: _col0 (type: string) outputColumnNames: key @@ -616,7 +616,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n121 Stage: Stage-2 Stats Work @@ -624,7 +624,7 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: string - Table: default.dest1 + Table: default.dest1_n121 Stage: Stage-3 Map Reduce @@ -636,7 +636,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n121 Stage: Stage-5 Map Reduce @@ -648,7 +648,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n121 Stage: Stage-6 Move Operator @@ -656,24 +656,24 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table dest1 select key from test_src +PREHOOK: query: insert overwrite table dest1_n121 select key from test_src_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@test_src -PREHOOK: Input: default@test_src@ds=101 -PREHOOK: Input: default@test_src@ds=102 -PREHOOK: Output: default@dest1 -POSTHOOK: query: insert overwrite table dest1 select key from test_src +PREHOOK: Input: default@test_src_n2 +PREHOOK: Input: default@test_src_n2@ds=101 +PREHOOK: Input: default@test_src_n2@ds=102 +PREHOOK: Output: default@dest1_n121 +POSTHOOK: query: insert overwrite table dest1_n121 select key from test_src_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_src -POSTHOOK: Input: default@test_src@ds=101 -POSTHOOK: Input: default@test_src@ds=102 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Input: default@test_src_n2 +POSTHOOK: Input: default@test_src_n2@ds=101 +POSTHOOK: Input: default@test_src_n2@ds=102 +POSTHOOK: Output: default@dest1_n121 +POSTHOOK: Lineage: dest1_n121.key SIMPLE [(test_src_n2)test_src_n2.FieldSchema(name:key, type:string, comment:null), ] PREHOOK: query: explain -insert overwrite table dest1 select key from test_src +insert overwrite table dest1_n121 select key from test_src_n2 PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table dest1 select key from test_src +insert overwrite table dest1_n121 select key from test_src_n2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -690,7 +690,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_src + alias: test_src_n2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -703,7 +703,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n121 Select Operator expressions: _col0 (type: string) outputColumnNames: key @@ -748,7 +748,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n121 Stage: Stage-2 Stats Work @@ -756,7 +756,7 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: string - Table: default.dest1 + Table: default.dest1_n121 Stage: Stage-3 Map Reduce @@ -768,7 +768,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n121 Stage: Stage-5 Map Reduce @@ -780,7 +780,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n121 Stage: Stage-6 Move Operator @@ -788,16 +788,16 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table dest1 select key from test_src +PREHOOK: query: insert overwrite table dest1_n121 select key from test_src_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@test_src -PREHOOK: Input: default@test_src@ds=101 -PREHOOK: Input: default@test_src@ds=102 -PREHOOK: Output: default@dest1 -POSTHOOK: query: insert overwrite table dest1 select key from test_src +PREHOOK: Input: default@test_src_n2 +PREHOOK: Input: default@test_src_n2@ds=101 +PREHOOK: Input: default@test_src_n2@ds=102 +PREHOOK: Output: default@dest1_n121 +POSTHOOK: query: insert overwrite table dest1_n121 select key from test_src_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_src -POSTHOOK: Input: default@test_src@ds=101 -POSTHOOK: Input: default@test_src@ds=102 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Input: default@test_src_n2 +POSTHOOK: Input: default@test_src_n2@ds=101 +POSTHOOK: Input: default@test_src_n2@ds=102 +POSTHOOK: Output: default@dest1_n121 +POSTHOOK: Lineage: dest1_n121.key SIMPLE [(test_src_n2)test_src_n2.FieldSchema(name:key, type:string, comment:null), ] diff --git a/ql/src/test/results/clientpositive/merge2.q.out b/ql/src/test/results/clientpositive/merge2.q.out index 91679ee07a..19bb3a6cea 100644 --- a/ql/src/test/results/clientpositive/merge2.q.out +++ b/ql/src/test/results/clientpositive/merge2.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: create table test1(key int, val int) +PREHOOK: query: create table test1_n9(key int, val int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test1 -POSTHOOK: query: create table test1(key int, val int) +PREHOOK: Output: default@test1_n9 +POSTHOOK: query: create table test1_n9(key int, val int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test1 +POSTHOOK: Output: default@test1_n9 PREHOOK: query: explain -insert overwrite table test1 +insert overwrite table test1_n9 select key, count(1) from src group by key PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table test1 +insert overwrite table test1_n9 select key, count(1) from src group by key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -67,7 +67,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 + name: default.test1_n9 Select Operator expressions: _col0 (type: int), _col1 (type: int) outputColumnNames: key, val @@ -101,7 +101,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 + name: default.test1_n9 Stage: Stage-2 Stats Work @@ -109,7 +109,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val Column Types: int, int - Table: default.test1 + Table: default.test1_n9 Stage: Stage-3 Map Reduce @@ -121,7 +121,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 + name: default.test1_n9 Stage: Stage-5 Map Reduce @@ -133,7 +133,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 + name: default.test1_n9 Stage: Stage-6 Move Operator @@ -164,25 +164,25 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: insert overwrite table test1 +PREHOOK: query: insert overwrite table test1_n9 select key, count(1) from src group by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test1 -POSTHOOK: query: insert overwrite table test1 +PREHOOK: Output: default@test1_n9 +POSTHOOK: query: insert overwrite table test1_n9 select key, count(1) from src group by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test1 -POSTHOOK: Lineage: test1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test1.val EXPRESSION [(src)src.null, ] -PREHOOK: query: select * from test1 +POSTHOOK: Output: default@test1_n9 +POSTHOOK: Lineage: test1_n9.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test1_n9.val EXPRESSION [(src)src.null, ] +PREHOOK: query: select * from test1_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 +PREHOOK: Input: default@test1_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from test1 +POSTHOOK: query: select * from test1_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 +POSTHOOK: Input: default@test1_n9 #### A masked pattern was here #### 0 3 10 1 @@ -493,55 +493,55 @@ POSTHOOK: Input: default@test1 96 1 97 2 98 2 -PREHOOK: query: drop table test1 +PREHOOK: query: drop table test1_n9 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test1 -PREHOOK: Output: default@test1 -POSTHOOK: query: drop table test1 +PREHOOK: Input: default@test1_n9 +PREHOOK: Output: default@test1_n9 +POSTHOOK: query: drop table test1_n9 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test1 -POSTHOOK: Output: default@test1 -PREHOOK: query: create table test_src(key string, value string) partitioned by (ds string) +POSTHOOK: Input: default@test1_n9 +POSTHOOK: Output: default@test1_n9 +PREHOOK: query: create table test_src_n0(key string, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_src -POSTHOOK: query: create table test_src(key string, value string) partitioned by (ds string) +PREHOOK: Output: default@test_src_n0 +POSTHOOK: query: create table test_src_n0(key string, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_src -PREHOOK: query: create table test1(key string) +POSTHOOK: Output: default@test_src_n0 +PREHOOK: query: create table test1_n9(key string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test1 -POSTHOOK: query: create table test1(key string) +PREHOOK: Output: default@test1_n9 +POSTHOOK: query: create table test1_n9(key string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test1 -PREHOOK: query: insert overwrite table test_src partition(ds='101') select * from src +POSTHOOK: Output: default@test1_n9 +PREHOOK: query: insert overwrite table test_src_n0 partition(ds='101') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_src@ds=101 -POSTHOOK: query: insert overwrite table test_src partition(ds='101') select * from src +PREHOOK: Output: default@test_src_n0@ds=101 +POSTHOOK: query: insert overwrite table test_src_n0 partition(ds='101') select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_src@ds=101 -POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table test_src partition(ds='102') select * from src +POSTHOOK: Output: default@test_src_n0@ds=101 +POSTHOOK: Lineage: test_src_n0 PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src_n0 PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table test_src_n0 partition(ds='102') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_src@ds=102 -POSTHOOK: query: insert overwrite table test_src partition(ds='102') select * from src +PREHOOK: Output: default@test_src_n0@ds=102 +POSTHOOK: query: insert overwrite table test_src_n0 partition(ds='102') select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_src@ds=102 -POSTHOOK: Lineage: test_src PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_src PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@test_src_n0@ds=102 +POSTHOOK: Lineage: test_src_n0 PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src_n0 PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain -insert overwrite table test1 select key from test_src +insert overwrite table test1_n9 select key from test_src_n0 PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table test1 select key from test_src +insert overwrite table test1_n9 select key from test_src_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -558,7 +558,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_src + alias: test_src_n0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -571,7 +571,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 + name: default.test1_n9 Select Operator expressions: _col0 (type: string) outputColumnNames: key @@ -616,7 +616,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 + name: default.test1_n9 Stage: Stage-2 Stats Work @@ -624,7 +624,7 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: string - Table: default.test1 + Table: default.test1_n9 Stage: Stage-3 Map Reduce @@ -636,7 +636,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 + name: default.test1_n9 Stage: Stage-5 Map Reduce @@ -648,7 +648,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 + name: default.test1_n9 Stage: Stage-6 Move Operator @@ -656,24 +656,24 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table test1 select key from test_src +PREHOOK: query: insert overwrite table test1_n9 select key from test_src_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_src -PREHOOK: Input: default@test_src@ds=101 -PREHOOK: Input: default@test_src@ds=102 -PREHOOK: Output: default@test1 -POSTHOOK: query: insert overwrite table test1 select key from test_src +PREHOOK: Input: default@test_src_n0 +PREHOOK: Input: default@test_src_n0@ds=101 +PREHOOK: Input: default@test_src_n0@ds=102 +PREHOOK: Output: default@test1_n9 +POSTHOOK: query: insert overwrite table test1_n9 select key from test_src_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_src -POSTHOOK: Input: default@test_src@ds=101 -POSTHOOK: Input: default@test_src@ds=102 -POSTHOOK: Output: default@test1 -POSTHOOK: Lineage: test1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Input: default@test_src_n0 +POSTHOOK: Input: default@test_src_n0@ds=101 +POSTHOOK: Input: default@test_src_n0@ds=102 +POSTHOOK: Output: default@test1_n9 +POSTHOOK: Lineage: test1_n9.key SIMPLE [(test_src_n0)test_src_n0.FieldSchema(name:key, type:string, comment:null), ] PREHOOK: query: explain -insert overwrite table test1 select key from test_src +insert overwrite table test1_n9 select key from test_src_n0 PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table test1 select key from test_src +insert overwrite table test1_n9 select key from test_src_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -690,7 +690,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_src + alias: test_src_n0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -703,7 +703,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 + name: default.test1_n9 Select Operator expressions: _col0 (type: string) outputColumnNames: key @@ -748,7 +748,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 + name: default.test1_n9 Stage: Stage-2 Stats Work @@ -756,7 +756,7 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: string - Table: default.test1 + Table: default.test1_n9 Stage: Stage-3 Map Reduce @@ -768,7 +768,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 + name: default.test1_n9 Stage: Stage-5 Map Reduce @@ -780,7 +780,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 + name: default.test1_n9 Stage: Stage-6 Move Operator @@ -788,16 +788,16 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table test1 select key from test_src +PREHOOK: query: insert overwrite table test1_n9 select key from test_src_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_src -PREHOOK: Input: default@test_src@ds=101 -PREHOOK: Input: default@test_src@ds=102 -PREHOOK: Output: default@test1 -POSTHOOK: query: insert overwrite table test1 select key from test_src +PREHOOK: Input: default@test_src_n0 +PREHOOK: Input: default@test_src_n0@ds=101 +PREHOOK: Input: default@test_src_n0@ds=102 +PREHOOK: Output: default@test1_n9 +POSTHOOK: query: insert overwrite table test1_n9 select key from test_src_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_src -POSTHOOK: Input: default@test_src@ds=101 -POSTHOOK: Input: default@test_src@ds=102 -POSTHOOK: Output: default@test1 -POSTHOOK: Lineage: test1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Input: default@test_src_n0 +POSTHOOK: Input: default@test_src_n0@ds=101 +POSTHOOK: Input: default@test_src_n0@ds=102 +POSTHOOK: Output: default@test1_n9 +POSTHOOK: Lineage: test1_n9.key SIMPLE [(test_src_n0)test_src_n0.FieldSchema(name:key, type:string, comment:null), ] diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition.q.out index 6fdd3f7ae1..45c3a933f0 100644 --- a/ql/src/test/results/clientpositive/merge_dynamic_partition.q.out +++ b/ql/src/test/results/clientpositive/merge_dynamic_partition.q.out @@ -1,57 +1,57 @@ -PREHOOK: query: create table srcpart_merge_dp like srcpart +PREHOOK: query: create table srcpart_merge_dp_n1 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: create table srcpart_merge_dp like srcpart +PREHOOK: Output: default@srcpart_merge_dp_n1 +POSTHOOK: query: create table srcpart_merge_dp_n1 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcpart_merge_dp -PREHOOK: query: create table merge_dynamic_part like srcpart +POSTHOOK: Output: default@srcpart_merge_dp_n1 +PREHOOK: query: create table merge_dynamic_part_n1 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@merge_dynamic_part -POSTHOOK: query: create table merge_dynamic_part like srcpart +PREHOOK: Output: default@merge_dynamic_part_n1 +POSTHOOK: query: create table merge_dynamic_part_n1 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@merge_dynamic_part -PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@merge_dynamic_part_n1 +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n1 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n1 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n1 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@srcpart_merge_dp_n1 +POSTHOOK: Output: default@srcpart_merge_dp_n1@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n1 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n1@ds=2008-04-08/hr=11 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n1 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@srcpart_merge_dp_n1@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n1 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n1@ds=2008-04-08/hr=11 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n1 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@srcpart_merge_dp_n1@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n1 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n1@ds=2008-04-08/hr=11 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n1 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@srcpart_merge_dp_n1@ds=2008-04-08/hr=11 PREHOOK: query: explain -insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08' +insert overwrite table merge_dynamic_part_n1 partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp_n1 where ds='2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08' +insert overwrite table merge_dynamic_part_n1 partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp_n1 where ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -63,7 +63,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: srcpart_merge_dp + alias: srcpart_merge_dp_n1 Statistics: Num rows: 99 Data size: 58120 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), hr (type: string) @@ -76,7 +76,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n1 Select Operator expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), _col2 (type: string) outputColumnNames: key, value, ds, hr @@ -123,7 +123,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n1 Stage: Stage-2 Stats Work @@ -131,29 +131,29 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.merge_dynamic_part + Table: default.merge_dynamic_part_n1 -PREHOOK: query: insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08' +PREHOOK: query: insert overwrite table merge_dynamic_part_n1 partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp_n1 where ds='2008-04-08' PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_merge_dp -PREHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: Output: default@merge_dynamic_part@ds=2008-04-08 -POSTHOOK: query: insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08' +PREHOOK: Input: default@srcpart_merge_dp_n1 +PREHOOK: Input: default@srcpart_merge_dp_n1@ds=2008-04-08/hr=11 +PREHOOK: Output: default@merge_dynamic_part_n1@ds=2008-04-08 +POSTHOOK: query: insert overwrite table merge_dynamic_part_n1 partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp_n1 where ds='2008-04-08' POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_merge_dp -POSTHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@merge_dynamic_part@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from merge_dynamic_part +POSTHOOK: Input: default@srcpart_merge_dp_n1 +POSTHOOK: Input: default@srcpart_merge_dp_n1@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@merge_dynamic_part_n1@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: merge_dynamic_part_n1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_merge_dp_n1)srcpart_merge_dp_n1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart_merge_dp_n1)srcpart_merge_dp_n1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from merge_dynamic_part_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@merge_dynamic_part -PREHOOK: Input: default@merge_dynamic_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@merge_dynamic_part_n1 +PREHOOK: Input: default@merge_dynamic_part_n1@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select * from merge_dynamic_part +POSTHOOK: query: select * from merge_dynamic_part_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@merge_dynamic_part -POSTHOOK: Input: default@merge_dynamic_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@merge_dynamic_part_n1 +POSTHOOK: Input: default@merge_dynamic_part_n1@ds=2008-04-08/hr=11 #### A masked pattern was here #### 0 val_0 2008-04-08 11 0 val_0 2008-04-08 11 @@ -655,11 +655,11 @@ POSTHOOK: Input: default@merge_dynamic_part@ds=2008-04-08/hr=11 97 val_97 2008-04-08 11 98 val_98 2008-04-08 11 98 val_98 2008-04-08 11 -PREHOOK: query: show table extended like `merge_dynamic_part` +PREHOOK: query: show table extended like `merge_dynamic_part_n1` PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like `merge_dynamic_part` +POSTHOOK: query: show table extended like `merge_dynamic_part_n1` POSTHOOK: type: SHOW_TABLESTATUS -tableName:merge_dynamic_part +tableName:merge_dynamic_part_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -673,10 +673,10 @@ minFileSize:1358 #### A masked pattern was here #### PREHOOK: query: explain -insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp where ds='2008-04-08' +insert overwrite table merge_dynamic_part_n1 partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp_n1 where ds='2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp where ds='2008-04-08' +insert overwrite table merge_dynamic_part_n1 partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp_n1 where ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -693,7 +693,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: srcpart_merge_dp + alias: srcpart_merge_dp_n1 Statistics: Num rows: 99 Data size: 58120 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -706,7 +706,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n1 Select Operator expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), '11' (type: string) outputColumnNames: key, value, ds, hr @@ -762,7 +762,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n1 Stage: Stage-2 Stats Work @@ -770,7 +770,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.merge_dynamic_part + Table: default.merge_dynamic_part_n1 Stage: Stage-3 Map Reduce @@ -782,7 +782,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n1 Stage: Stage-5 Map Reduce @@ -794,7 +794,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n1 Stage: Stage-6 Move Operator @@ -802,27 +802,27 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp where ds='2008-04-08' +PREHOOK: query: insert overwrite table merge_dynamic_part_n1 partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp_n1 where ds='2008-04-08' PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_merge_dp -PREHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: Output: default@merge_dynamic_part@ds=2008-04-08/hr=11 -POSTHOOK: query: insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp where ds='2008-04-08' +PREHOOK: Input: default@srcpart_merge_dp_n1 +PREHOOK: Input: default@srcpart_merge_dp_n1@ds=2008-04-08/hr=11 +PREHOOK: Output: default@merge_dynamic_part_n1@ds=2008-04-08/hr=11 +POSTHOOK: query: insert overwrite table merge_dynamic_part_n1 partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp_n1 where ds='2008-04-08' POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_merge_dp -POSTHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@merge_dynamic_part@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from merge_dynamic_part +POSTHOOK: Input: default@srcpart_merge_dp_n1 +POSTHOOK: Input: default@srcpart_merge_dp_n1@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@merge_dynamic_part_n1@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: merge_dynamic_part_n1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_merge_dp_n1)srcpart_merge_dp_n1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart_merge_dp_n1)srcpart_merge_dp_n1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from merge_dynamic_part_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@merge_dynamic_part -PREHOOK: Input: default@merge_dynamic_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@merge_dynamic_part_n1 +PREHOOK: Input: default@merge_dynamic_part_n1@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select * from merge_dynamic_part +POSTHOOK: query: select * from merge_dynamic_part_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@merge_dynamic_part -POSTHOOK: Input: default@merge_dynamic_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@merge_dynamic_part_n1 +POSTHOOK: Input: default@merge_dynamic_part_n1@ds=2008-04-08/hr=11 #### A masked pattern was here #### 0 val_0 2008-04-08 11 0 val_0 2008-04-08 11 @@ -1324,11 +1324,11 @@ POSTHOOK: Input: default@merge_dynamic_part@ds=2008-04-08/hr=11 97 val_97 2008-04-08 11 98 val_98 2008-04-08 11 98 val_98 2008-04-08 11 -PREHOOK: query: show table extended like `merge_dynamic_part` +PREHOOK: query: show table extended like `merge_dynamic_part_n1` PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like `merge_dynamic_part` +POSTHOOK: query: show table extended like `merge_dynamic_part_n1` POSTHOOK: type: SHOW_TABLESTATUS -tableName:merge_dynamic_part +tableName:merge_dynamic_part_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1342,10 +1342,10 @@ minFileSize:5812 #### A masked pattern was here #### PREHOOK: query: explain -insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds='2008-04-08' and hr=11 +insert overwrite table merge_dynamic_part_n1 partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp_n1 where ds='2008-04-08' and hr=11 PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds='2008-04-08' and hr=11 +insert overwrite table merge_dynamic_part_n1 partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp_n1 where ds='2008-04-08' and hr=11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1362,7 +1362,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: srcpart_merge_dp + alias: srcpart_merge_dp_n1 Statistics: Num rows: 99 Data size: 58120 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) @@ -1375,7 +1375,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n1 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) outputColumnNames: key, value, ds, hr @@ -1431,7 +1431,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n1 Stage: Stage-2 Stats Work @@ -1439,7 +1439,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.merge_dynamic_part + Table: default.merge_dynamic_part_n1 Stage: Stage-3 Map Reduce @@ -1451,7 +1451,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n1 Stage: Stage-5 Map Reduce @@ -1463,7 +1463,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n1 Stage: Stage-6 Move Operator @@ -1471,27 +1471,27 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds='2008-04-08' and hr=11 +PREHOOK: query: insert overwrite table merge_dynamic_part_n1 partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp_n1 where ds='2008-04-08' and hr=11 PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_merge_dp -PREHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: Output: default@merge_dynamic_part -POSTHOOK: query: insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds='2008-04-08' and hr=11 +PREHOOK: Input: default@srcpart_merge_dp_n1 +PREHOOK: Input: default@srcpart_merge_dp_n1@ds=2008-04-08/hr=11 +PREHOOK: Output: default@merge_dynamic_part_n1 +POSTHOOK: query: insert overwrite table merge_dynamic_part_n1 partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp_n1 where ds='2008-04-08' and hr=11 POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_merge_dp -POSTHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@merge_dynamic_part@ds=2008-04-08/hr=11 -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from merge_dynamic_part +POSTHOOK: Input: default@srcpart_merge_dp_n1 +POSTHOOK: Input: default@srcpart_merge_dp_n1@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@merge_dynamic_part_n1@ds=2008-04-08/hr=11 +POSTHOOK: Lineage: merge_dynamic_part_n1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_merge_dp_n1)srcpart_merge_dp_n1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart_merge_dp_n1)srcpart_merge_dp_n1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from merge_dynamic_part_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@merge_dynamic_part -PREHOOK: Input: default@merge_dynamic_part@ds=2008-04-08/hr=11 +PREHOOK: Input: default@merge_dynamic_part_n1 +PREHOOK: Input: default@merge_dynamic_part_n1@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: select * from merge_dynamic_part +POSTHOOK: query: select * from merge_dynamic_part_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@merge_dynamic_part -POSTHOOK: Input: default@merge_dynamic_part@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@merge_dynamic_part_n1 +POSTHOOK: Input: default@merge_dynamic_part_n1@ds=2008-04-08/hr=11 #### A masked pattern was here #### 0 val_0 2008-04-08 11 0 val_0 2008-04-08 11 @@ -1993,11 +1993,11 @@ POSTHOOK: Input: default@merge_dynamic_part@ds=2008-04-08/hr=11 97 val_97 2008-04-08 11 98 val_98 2008-04-08 11 98 val_98 2008-04-08 11 -PREHOOK: query: show table extended like `merge_dynamic_part` +PREHOOK: query: show table extended like `merge_dynamic_part_n1` PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like `merge_dynamic_part` +POSTHOOK: query: show table extended like `merge_dynamic_part_n1` POSTHOOK: type: SHOW_TABLESTATUS -tableName:merge_dynamic_part +tableName:merge_dynamic_part_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition2.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition2.q.out index ba02cfe075..aa87e3b1db 100644 --- a/ql/src/test/results/clientpositive/merge_dynamic_partition2.q.out +++ b/ql/src/test/results/clientpositive/merge_dynamic_partition2.q.out @@ -1,74 +1,74 @@ -PREHOOK: query: create table srcpart_merge_dp like srcpart +PREHOOK: query: create table srcpart_merge_dp_n0 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: create table srcpart_merge_dp like srcpart +PREHOOK: Output: default@srcpart_merge_dp_n0 +POSTHOOK: query: create table srcpart_merge_dp_n0 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcpart_merge_dp -PREHOOK: query: create table merge_dynamic_part like srcpart +POSTHOOK: Output: default@srcpart_merge_dp_n0 +PREHOOK: query: create table merge_dynamic_part_n0 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@merge_dynamic_part -POSTHOOK: query: create table merge_dynamic_part like srcpart +PREHOOK: Output: default@merge_dynamic_part_n0 +POSTHOOK: query: create table merge_dynamic_part_n0 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@merge_dynamic_part -PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@merge_dynamic_part_n0 +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n0 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@srcpart_merge_dp_n0 +POSTHOOK: Output: default@srcpart_merge_dp_n0@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n0@ds=2008-04-08/hr=11 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@srcpart_merge_dp_n0@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n0@ds=2008-04-08/hr=11 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@srcpart_merge_dp_n0@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n0@ds=2008-04-08/hr=11 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket0.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +POSTHOOK: Output: default@srcpart_merge_dp_n0@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket0.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=12) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: load data local inpath '../../data/files/srcbucket0.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +PREHOOK: Output: default@srcpart_merge_dp_n0 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket0.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=12) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -PREHOOK: query: load data local inpath '../../data/files/srcbucket1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +POSTHOOK: Output: default@srcpart_merge_dp_n0 +POSTHOOK: Output: default@srcpart_merge_dp_n0@ds=2008-04-08/hr=12 +PREHOOK: query: load data local inpath '../../data/files/srcbucket1.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=12) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +PREHOOK: Output: default@srcpart_merge_dp_n0@ds=2008-04-08/hr=12 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket1.txt' INTO TABLE srcpart_merge_dp_n0 partition(ds='2008-04-08', hr=12) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@srcpart_merge_dp_n0@ds=2008-04-08/hr=12 PREHOOK: query: explain -insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08' +insert overwrite table merge_dynamic_part_n0 partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp_n0 where ds='2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08' +insert overwrite table merge_dynamic_part_n0 partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp_n0 where ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -85,7 +85,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: srcpart_merge_dp + alias: srcpart_merge_dp_n0 Statistics: Num rows: 297 Data size: 174150 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), hr (type: string) @@ -98,7 +98,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n0 Select Operator expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), _col2 (type: string) outputColumnNames: key, value, ds, hr @@ -154,7 +154,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n0 Stage: Stage-2 Stats Work @@ -162,7 +162,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.merge_dynamic_part + Table: default.merge_dynamic_part_n0 Stage: Stage-3 Map Reduce @@ -174,7 +174,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n0 Stage: Stage-5 Map Reduce @@ -186,7 +186,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n0 Stage: Stage-6 Move Operator @@ -194,28 +194,28 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08' +PREHOOK: query: insert overwrite table merge_dynamic_part_n0 partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp_n0 where ds='2008-04-08' PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_merge_dp -PREHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -PREHOOK: Output: default@merge_dynamic_part@ds=2008-04-08 -POSTHOOK: query: insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08' +PREHOOK: Input: default@srcpart_merge_dp_n0 +PREHOOK: Input: default@srcpart_merge_dp_n0@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart_merge_dp_n0@ds=2008-04-08/hr=12 +PREHOOK: Output: default@merge_dynamic_part_n0@ds=2008-04-08 +POSTHOOK: query: insert overwrite table merge_dynamic_part_n0 partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp_n0 where ds='2008-04-08' POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_merge_dp -POSTHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@merge_dynamic_part@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@merge_dynamic_part@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show table extended like `merge_dynamic_part` +POSTHOOK: Input: default@srcpart_merge_dp_n0 +POSTHOOK: Input: default@srcpart_merge_dp_n0@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart_merge_dp_n0@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@merge_dynamic_part_n0@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@merge_dynamic_part_n0@ds=2008-04-08/hr=12 +POSTHOOK: Lineage: merge_dynamic_part_n0 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_merge_dp_n0)srcpart_merge_dp_n0.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n0 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart_merge_dp_n0)srcpart_merge_dp_n0.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n0 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart_merge_dp_n0)srcpart_merge_dp_n0.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n0 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart_merge_dp_n0)srcpart_merge_dp_n0.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show table extended like `merge_dynamic_part_n0` PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like `merge_dynamic_part` +POSTHOOK: query: show table extended like `merge_dynamic_part_n0` POSTHOOK: type: SHOW_TABLESTATUS -tableName:merge_dynamic_part +tableName:merge_dynamic_part_n0 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out index 16ed934117..6ffcf4c040 100644 --- a/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out +++ b/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out @@ -1,134 +1,134 @@ -PREHOOK: query: create table srcpart_merge_dp like srcpart +PREHOOK: query: create table srcpart_merge_dp_n2 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: create table srcpart_merge_dp like srcpart +PREHOOK: Output: default@srcpart_merge_dp_n2 +POSTHOOK: query: create table srcpart_merge_dp_n2 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcpart_merge_dp -PREHOOK: query: create table merge_dynamic_part like srcpart +POSTHOOK: Output: default@srcpart_merge_dp_n2 +PREHOOK: query: create table merge_dynamic_part_n2 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@merge_dynamic_part -POSTHOOK: query: create table merge_dynamic_part like srcpart +PREHOOK: Output: default@merge_dynamic_part_n2 +POSTHOOK: query: create table merge_dynamic_part_n2 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@merge_dynamic_part -PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@merge_dynamic_part_n2 +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n2 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@srcpart_merge_dp_n2 +POSTHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=11 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=11 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=11 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +POSTHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=12) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +PREHOOK: Output: default@srcpart_merge_dp_n2 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=12) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +POSTHOOK: Output: default@srcpart_merge_dp_n2 +POSTHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=12 +PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=12) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +PREHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=12 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=12) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +POSTHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=12 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=12) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +PREHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=12 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=12) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +POSTHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=12 +PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=12) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +PREHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=12 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-08', hr=12) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -PREHOOK: query: load data local inpath '../../data/files/kv1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=11) +POSTHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=12 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-09', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n2 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-09', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-09/hr=11 -PREHOOK: query: load data local inpath '../../data/files/kv2.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=11) +POSTHOOK: Output: default@srcpart_merge_dp_n2 +POSTHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-09/hr=11 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-09', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-09/hr=11 -POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-09/hr=11 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-09', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-09/hr=11 -PREHOOK: query: load data local inpath '../../data/files/kv1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=12) +POSTHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-09/hr=11 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-09', hr=12) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=12) +PREHOOK: Output: default@srcpart_merge_dp_n2 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-09', hr=12) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-09/hr=12 -PREHOOK: query: load data local inpath '../../data/files/kv2.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=12) +POSTHOOK: Output: default@srcpart_merge_dp_n2 +POSTHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-09/hr=12 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-09', hr=12) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-09/hr=12 -POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=12) +PREHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-09/hr=12 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' INTO TABLE srcpart_merge_dp_n2 partition(ds='2008-04-09', hr=12) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-09/hr=12 -PREHOOK: query: show partitions srcpart_merge_dp +POSTHOOK: Output: default@srcpart_merge_dp_n2@ds=2008-04-09/hr=12 +PREHOOK: query: show partitions srcpart_merge_dp_n2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@srcpart_merge_dp -POSTHOOK: query: show partitions srcpart_merge_dp +PREHOOK: Input: default@srcpart_merge_dp_n2 +POSTHOOK: query: show partitions srcpart_merge_dp_n2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@srcpart_merge_dp +POSTHOOK: Input: default@srcpart_merge_dp_n2 ds=2008-04-08/hr=11 ds=2008-04-08/hr=12 ds=2008-04-09/hr=11 ds=2008-04-09/hr=12 PREHOOK: query: explain -insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds>='2008-04-08' +insert overwrite table merge_dynamic_part_n2 partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp_n2 where ds>='2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds>='2008-04-08' +insert overwrite table merge_dynamic_part_n2 partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp_n2 where ds>='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -145,7 +145,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: srcpart_merge_dp + alias: srcpart_merge_dp_n2 Statistics: Num rows: 594 Data size: 348300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) @@ -158,7 +158,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n2 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) outputColumnNames: key, value, ds, hr @@ -214,7 +214,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n2 Stage: Stage-2 Stats Work @@ -222,7 +222,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.merge_dynamic_part + Table: default.merge_dynamic_part_n2 Stage: Stage-3 Map Reduce @@ -234,7 +234,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n2 Stage: Stage-5 Map Reduce @@ -246,7 +246,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n2 Stage: Stage-6 Move Operator @@ -254,58 +254,58 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds>='2008-04-08' +PREHOOK: query: insert overwrite table merge_dynamic_part_n2 partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp_n2 where ds>='2008-04-08' PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_merge_dp -PREHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart_merge_dp@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart_merge_dp@ds=2008-04-09/hr=12 -PREHOOK: Output: default@merge_dynamic_part -POSTHOOK: query: insert overwrite table merge_dynamic_part partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp where ds>='2008-04-08' +PREHOOK: Input: default@srcpart_merge_dp_n2 +PREHOOK: Input: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart_merge_dp_n2@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart_merge_dp_n2@ds=2008-04-09/hr=12 +PREHOOK: Output: default@merge_dynamic_part_n2 +POSTHOOK: query: insert overwrite table merge_dynamic_part_n2 partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp_n2 where ds>='2008-04-08' POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_merge_dp -POSTHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart_merge_dp@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart_merge_dp@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@merge_dynamic_part@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@merge_dynamic_part@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@merge_dynamic_part@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@merge_dynamic_part@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select ds, hr, count(1) from merge_dynamic_part where ds>='2008-04-08' group by ds, hr +POSTHOOK: Input: default@srcpart_merge_dp_n2 +POSTHOOK: Input: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart_merge_dp_n2@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart_merge_dp_n2@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart_merge_dp_n2@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@merge_dynamic_part_n2@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@merge_dynamic_part_n2@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@merge_dynamic_part_n2@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@merge_dynamic_part_n2@ds=2008-04-09/hr=12 +POSTHOOK: Lineage: merge_dynamic_part_n2 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_merge_dp_n2)srcpart_merge_dp_n2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n2 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart_merge_dp_n2)srcpart_merge_dp_n2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n2 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart_merge_dp_n2)srcpart_merge_dp_n2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n2 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart_merge_dp_n2)srcpart_merge_dp_n2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n2 PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart_merge_dp_n2)srcpart_merge_dp_n2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n2 PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart_merge_dp_n2)srcpart_merge_dp_n2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n2 PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart_merge_dp_n2)srcpart_merge_dp_n2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n2 PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart_merge_dp_n2)srcpart_merge_dp_n2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select ds, hr, count(1) from merge_dynamic_part_n2 where ds>='2008-04-08' group by ds, hr PREHOOK: type: QUERY -PREHOOK: Input: default@merge_dynamic_part -PREHOOK: Input: default@merge_dynamic_part@ds=2008-04-08/hr=11 -PREHOOK: Input: default@merge_dynamic_part@ds=2008-04-08/hr=12 -PREHOOK: Input: default@merge_dynamic_part@ds=2008-04-09/hr=11 -PREHOOK: Input: default@merge_dynamic_part@ds=2008-04-09/hr=12 +PREHOOK: Input: default@merge_dynamic_part_n2 +PREHOOK: Input: default@merge_dynamic_part_n2@ds=2008-04-08/hr=11 +PREHOOK: Input: default@merge_dynamic_part_n2@ds=2008-04-08/hr=12 +PREHOOK: Input: default@merge_dynamic_part_n2@ds=2008-04-09/hr=11 +PREHOOK: Input: default@merge_dynamic_part_n2@ds=2008-04-09/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select ds, hr, count(1) from merge_dynamic_part where ds>='2008-04-08' group by ds, hr +POSTHOOK: query: select ds, hr, count(1) from merge_dynamic_part_n2 where ds>='2008-04-08' group by ds, hr POSTHOOK: type: QUERY -POSTHOOK: Input: default@merge_dynamic_part -POSTHOOK: Input: default@merge_dynamic_part@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@merge_dynamic_part@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@merge_dynamic_part@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@merge_dynamic_part@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@merge_dynamic_part_n2 +POSTHOOK: Input: default@merge_dynamic_part_n2@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@merge_dynamic_part_n2@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@merge_dynamic_part_n2@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@merge_dynamic_part_n2@ds=2008-04-09/hr=12 #### A masked pattern was here #### 2008-04-08 11 500 2008-04-08 12 500 2008-04-09 11 1000 2008-04-09 12 1000 -PREHOOK: query: show table extended like `merge_dynamic_part` +PREHOOK: query: show table extended like `merge_dynamic_part_n2` PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like `merge_dynamic_part` +POSTHOOK: query: show table extended like `merge_dynamic_part_n2` POSTHOOK: type: SHOW_TABLESTATUS -tableName:merge_dynamic_part +tableName:merge_dynamic_part_n2 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out index 6288584c51..4ede977f9f 100644 --- a/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out +++ b/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out @@ -1,135 +1,135 @@ -PREHOOK: query: create table srcpart_merge_dp like srcpart +PREHOOK: query: create table srcpart_merge_dp_n4 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: create table srcpart_merge_dp like srcpart +PREHOOK: Output: default@srcpart_merge_dp_n4 +POSTHOOK: query: create table srcpart_merge_dp_n4 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcpart_merge_dp -PREHOOK: query: create table srcpart_merge_dp_rc like srcpart +POSTHOOK: Output: default@srcpart_merge_dp_n4 +PREHOOK: query: create table srcpart_merge_dp_rc_n1 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcpart_merge_dp_rc -POSTHOOK: query: create table srcpart_merge_dp_rc like srcpart +PREHOOK: Output: default@srcpart_merge_dp_rc_n1 +POSTHOOK: query: create table srcpart_merge_dp_rc_n1 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcpart_merge_dp_rc -PREHOOK: query: alter table srcpart_merge_dp_rc set fileformat RCFILE +POSTHOOK: Output: default@srcpart_merge_dp_rc_n1 +PREHOOK: query: alter table srcpart_merge_dp_rc_n1 set fileformat RCFILE PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@srcpart_merge_dp_rc -PREHOOK: Output: default@srcpart_merge_dp_rc -POSTHOOK: query: alter table srcpart_merge_dp_rc set fileformat RCFILE +PREHOOK: Input: default@srcpart_merge_dp_rc_n1 +PREHOOK: Output: default@srcpart_merge_dp_rc_n1 +POSTHOOK: query: alter table srcpart_merge_dp_rc_n1 set fileformat RCFILE POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@srcpart_merge_dp_rc -POSTHOOK: Output: default@srcpart_merge_dp_rc -PREHOOK: query: create table merge_dynamic_part like srcpart +POSTHOOK: Input: default@srcpart_merge_dp_rc_n1 +POSTHOOK: Output: default@srcpart_merge_dp_rc_n1 +PREHOOK: query: create table merge_dynamic_part_n3 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@merge_dynamic_part -POSTHOOK: query: create table merge_dynamic_part like srcpart +PREHOOK: Output: default@merge_dynamic_part_n3 +POSTHOOK: query: create table merge_dynamic_part_n3 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@merge_dynamic_part -PREHOOK: query: alter table merge_dynamic_part set fileformat RCFILE +POSTHOOK: Output: default@merge_dynamic_part_n3 +PREHOOK: query: alter table merge_dynamic_part_n3 set fileformat RCFILE PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@merge_dynamic_part -PREHOOK: Output: default@merge_dynamic_part -POSTHOOK: query: alter table merge_dynamic_part set fileformat RCFILE +PREHOOK: Input: default@merge_dynamic_part_n3 +PREHOOK: Output: default@merge_dynamic_part_n3 +POSTHOOK: query: alter table merge_dynamic_part_n3 set fileformat RCFILE POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@merge_dynamic_part -POSTHOOK: Output: default@merge_dynamic_part -PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Input: default@merge_dynamic_part_n3 +POSTHOOK: Output: default@merge_dynamic_part_n3 +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n4 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@srcpart_merge_dp_n4 +POSTHOOK: Output: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=11 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=11 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +POSTHOOK: Output: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=11) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11) +PREHOOK: Output: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=11 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=11) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +POSTHOOK: Output: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=11 +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=12) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +PREHOOK: Output: default@srcpart_merge_dp_n4 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=12) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +POSTHOOK: Output: default@srcpart_merge_dp_n4 +POSTHOOK: Output: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=12 +PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=12) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +PREHOOK: Output: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=12 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=12) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +POSTHOOK: Output: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=12 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=12) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +PREHOOK: Output: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=12 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=12) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +POSTHOOK: Output: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=12 +PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=12) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12) +PREHOOK: Output: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=12 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n4 partition(ds='2008-04-08', hr=12) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -PREHOOK: query: insert overwrite table srcpart_merge_dp_rc partition (ds = '2008-04-08', hr) -select key, value, hr from srcpart_merge_dp where ds = '2008-04-08' +POSTHOOK: Output: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=12 +PREHOOK: query: insert overwrite table srcpart_merge_dp_rc_n1 partition (ds = '2008-04-08', hr) +select key, value, hr from srcpart_merge_dp_n4 where ds = '2008-04-08' PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_merge_dp -PREHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -PREHOOK: Output: default@srcpart_merge_dp_rc@ds=2008-04-08 -POSTHOOK: query: insert overwrite table srcpart_merge_dp_rc partition (ds = '2008-04-08', hr) -select key, value, hr from srcpart_merge_dp where ds = '2008-04-08' +PREHOOK: Input: default@srcpart_merge_dp_n4 +PREHOOK: Input: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=12 +PREHOOK: Output: default@srcpart_merge_dp_rc_n1@ds=2008-04-08 +POSTHOOK: query: insert overwrite table srcpart_merge_dp_rc_n1 partition (ds = '2008-04-08', hr) +select key, value, hr from srcpart_merge_dp_n4 where ds = '2008-04-08' POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_merge_dp -POSTHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart_merge_dp@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@srcpart_merge_dp_rc@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@srcpart_merge_dp_rc@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: srcpart_merge_dp_rc PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcpart_merge_dp_rc PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: srcpart_merge_dp_rc PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcpart_merge_dp_rc PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart_merge_dp)srcpart_merge_dp.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Input: default@srcpart_merge_dp_n4 +POSTHOOK: Input: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart_merge_dp_n4@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@srcpart_merge_dp_rc_n1@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@srcpart_merge_dp_rc_n1@ds=2008-04-08/hr=12 +POSTHOOK: Lineage: srcpart_merge_dp_rc_n1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_merge_dp_n4)srcpart_merge_dp_n4.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcpart_merge_dp_rc_n1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart_merge_dp_n4)srcpart_merge_dp_n4.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: srcpart_merge_dp_rc_n1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart_merge_dp_n4)srcpart_merge_dp_n4.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcpart_merge_dp_rc_n1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart_merge_dp_n4)srcpart_merge_dp_n4.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain -insert overwrite table merge_dynamic_part partition (ds = '2008-04-08', hr) -select key, value, if(key % 2 == 0, 'a1', 'b1') as hr from srcpart_merge_dp_rc where ds = '2008-04-08' +insert overwrite table merge_dynamic_part_n3 partition (ds = '2008-04-08', hr) +select key, value, if(key % 2 == 0, 'a1', 'b1') as hr from srcpart_merge_dp_rc_n1 where ds = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table merge_dynamic_part partition (ds = '2008-04-08', hr) -select key, value, if(key % 2 == 0, 'a1', 'b1') as hr from srcpart_merge_dp_rc where ds = '2008-04-08' +insert overwrite table merge_dynamic_part_n3 partition (ds = '2008-04-08', hr) +select key, value, if(key % 2 == 0, 'a1', 'b1') as hr from srcpart_merge_dp_rc_n1 where ds = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -146,7 +146,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: srcpart_merge_dp_rc + alias: srcpart_merge_dp_rc_n1 Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 2.0D) = 0.0D), 'a1', 'b1') (type: string) @@ -159,7 +159,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n3 Select Operator expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), _col2 (type: string) outputColumnNames: key, value, ds, hr @@ -215,7 +215,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.merge_dynamic_part + name: default.merge_dynamic_part_n3 Stage: Stage-2 Stats Work @@ -223,7 +223,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.merge_dynamic_part + Table: default.merge_dynamic_part_n3 Stage: Stage-3 Merge File Operator @@ -245,39 +245,39 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table merge_dynamic_part partition (ds = '2008-04-08', hr) -select key, value, if(key % 2 == 0, 'a1', 'b1') as hr from srcpart_merge_dp_rc where ds = '2008-04-08' +PREHOOK: query: insert overwrite table merge_dynamic_part_n3 partition (ds = '2008-04-08', hr) +select key, value, if(key % 2 == 0, 'a1', 'b1') as hr from srcpart_merge_dp_rc_n1 where ds = '2008-04-08' PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart_merge_dp_rc -PREHOOK: Input: default@srcpart_merge_dp_rc@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart_merge_dp_rc@ds=2008-04-08/hr=12 -PREHOOK: Output: default@merge_dynamic_part@ds=2008-04-08 -POSTHOOK: query: insert overwrite table merge_dynamic_part partition (ds = '2008-04-08', hr) -select key, value, if(key % 2 == 0, 'a1', 'b1') as hr from srcpart_merge_dp_rc where ds = '2008-04-08' +PREHOOK: Input: default@srcpart_merge_dp_rc_n1 +PREHOOK: Input: default@srcpart_merge_dp_rc_n1@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart_merge_dp_rc_n1@ds=2008-04-08/hr=12 +PREHOOK: Output: default@merge_dynamic_part_n3@ds=2008-04-08 +POSTHOOK: query: insert overwrite table merge_dynamic_part_n3 partition (ds = '2008-04-08', hr) +select key, value, if(key % 2 == 0, 'a1', 'b1') as hr from srcpart_merge_dp_rc_n1 where ds = '2008-04-08' POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart_merge_dp_rc -POSTHOOK: Input: default@srcpart_merge_dp_rc@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart_merge_dp_rc@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@merge_dynamic_part@ds=2008-04-08/hr=a1 -POSTHOOK: Output: default@merge_dynamic_part@ds=2008-04-08/hr=b1 -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart_merge_dp_rc)srcpart_merge_dp_rc.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart_merge_dp_rc)srcpart_merge_dp_rc.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart_merge_dp_rc)srcpart_merge_dp_rc.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: merge_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart_merge_dp_rc)srcpart_merge_dp_rc.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show partitions merge_dynamic_part +POSTHOOK: Input: default@srcpart_merge_dp_rc_n1 +POSTHOOK: Input: default@srcpart_merge_dp_rc_n1@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart_merge_dp_rc_n1@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@merge_dynamic_part_n3@ds=2008-04-08/hr=a1 +POSTHOOK: Output: default@merge_dynamic_part_n3@ds=2008-04-08/hr=b1 +POSTHOOK: Lineage: merge_dynamic_part_n3 PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart_merge_dp_rc_n1)srcpart_merge_dp_rc_n1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n3 PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart_merge_dp_rc_n1)srcpart_merge_dp_rc_n1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n3 PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart_merge_dp_rc_n1)srcpart_merge_dp_rc_n1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: merge_dynamic_part_n3 PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart_merge_dp_rc_n1)srcpart_merge_dp_rc_n1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions merge_dynamic_part_n3 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@merge_dynamic_part -POSTHOOK: query: show partitions merge_dynamic_part +PREHOOK: Input: default@merge_dynamic_part_n3 +POSTHOOK: query: show partitions merge_dynamic_part_n3 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@merge_dynamic_part +POSTHOOK: Input: default@merge_dynamic_part_n3 ds=2008-04-08/hr=a1 ds=2008-04-08/hr=b1 -PREHOOK: query: select count(*) from merge_dynamic_part +PREHOOK: query: select count(*) from merge_dynamic_part_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@merge_dynamic_part +PREHOOK: Input: default@merge_dynamic_part_n3 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from merge_dynamic_part +POSTHOOK: query: select count(*) from merge_dynamic_part_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@merge_dynamic_part +POSTHOOK: Input: default@merge_dynamic_part_n3 #### A masked pattern was here #### 1000 diff --git a/ql/src/test/results/clientpositive/merge_empty.q.out b/ql/src/test/results/clientpositive/merge_empty.q.out index c13cbf4d98..5d851dc181 100644 --- a/ql/src/test/results/clientpositive/merge_empty.q.out +++ b/ql/src/test/results/clientpositive/merge_empty.q.out @@ -1,45 +1,45 @@ -PREHOOK: query: create table dummy (a string) +PREHOOK: query: create table dummy_n3 (a string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dummy -POSTHOOK: query: create table dummy (a string) +PREHOOK: Output: default@dummy_n3 +POSTHOOK: query: create table dummy_n3 (a string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dummy +POSTHOOK: Output: default@dummy_n3 #### A masked pattern was here #### PREHOOK: type: QUERY -PREHOOK: Input: default@dummy +PREHOOK: Input: default@dummy_n3 PREHOOK: Input: default@src #### A masked pattern was here #### POSTHOOK: type: QUERY -POSTHOOK: Input: default@dummy +POSTHOOK: Input: default@dummy_n3 POSTHOOK: Input: default@src #### A masked pattern was here #### -PREHOOK: query: create table foo (a bigint, b string) clustered by (a) into 256 buckets +PREHOOK: query: create table foo_n6 (a bigint, b string) clustered by (a) into 256 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@foo -POSTHOOK: query: create table foo (a bigint, b string) clustered by (a) into 256 buckets +PREHOOK: Output: default@foo_n6 +POSTHOOK: query: create table foo_n6 (a bigint, b string) clustered by (a) into 256 buckets POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@foo -PREHOOK: query: create table bar (a bigint, b string) +POSTHOOK: Output: default@foo_n6 +PREHOOK: query: create table bar_n1 (a bigint, b string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bar -POSTHOOK: query: create table bar (a bigint, b string) +PREHOOK: Output: default@bar_n1 +POSTHOOK: query: create table bar_n1 (a bigint, b string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bar -PREHOOK: query: insert overwrite table foo select * from bar +POSTHOOK: Output: default@bar_n1 +PREHOOK: query: insert overwrite table foo_n6 select * from bar_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@bar -PREHOOK: Output: default@foo -POSTHOOK: query: insert overwrite table foo select * from bar +PREHOOK: Input: default@bar_n1 +PREHOOK: Output: default@foo_n6 +POSTHOOK: query: insert overwrite table foo_n6 select * from bar_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bar -POSTHOOK: Output: default@foo -POSTHOOK: Lineage: foo.a SIMPLE [(bar)bar.FieldSchema(name:a, type:bigint, comment:null), ] -POSTHOOK: Lineage: foo.b SIMPLE [(bar)bar.FieldSchema(name:b, type:string, comment:null), ] +POSTHOOK: Input: default@bar_n1 +POSTHOOK: Output: default@foo_n6 +POSTHOOK: Lineage: foo_n6.a SIMPLE [(bar_n1)bar_n1.FieldSchema(name:a, type:bigint, comment:null), ] +POSTHOOK: Lineage: foo_n6.b SIMPLE [(bar_n1)bar_n1.FieldSchema(name:b, type:string, comment:null), ] Found 256 items #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/mergejoin.q.out b/ql/src/test/results/clientpositive/mergejoin.q.out index 5d3474038e..22d826b44b 100644 --- a/ql/src/test/results/clientpositive/mergejoin.q.out +++ b/ql/src/test/results/clientpositive/mergejoin.q.out @@ -127,123 +127,123 @@ POSTHOOK: Input: default@src1 66 val_66 66 val_66 98 val_98 98 val_98 98 val_98 98 val_98 -PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_n16(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_n16 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_n16(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin -PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE +POSTHOOK: Output: default@srcbucket_mapjoin_n16 +PREHOOK: query: CREATE TABLE tab_part_n10 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tab_part -POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE +PREHOOK: Output: default@tab_part_n10 +POSTHOOK: query: CREATE TABLE tab_part_n10 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab_part -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@tab_part_n10 +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n17 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_part_n17 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n17 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n17 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n16 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_n16 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n16 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_n16 +POSTHOOK: Output: default@srcbucket_mapjoin_n16@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n16 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_n16@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n16 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_n16@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n17 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n17 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n17 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n17 +POSTHOOK: Output: default@srcbucket_mapjoin_part_n17@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n17 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n17@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n17 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n17@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n17 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n17@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n17 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n17@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n17 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n17@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n17 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part +POSTHOOK: Output: default@srcbucket_mapjoin_part_n17@ds=2008-04-08 +PREHOOK: query: insert overwrite table tab_part_n10 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n17 PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: Output: default@tab_part@ds=2008-04-08 -POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part +PREHOOK: Input: default@srcbucket_mapjoin_part_n17 +PREHOOK: Input: default@srcbucket_mapjoin_part_n17@ds=2008-04-08 +PREHOOK: Output: default@tab_part_n10@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab_part_n10 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n17 POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: Output: default@tab_part@ds=2008-04-08 -POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE +POSTHOOK: Input: default@srcbucket_mapjoin_part_n17 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n17@ds=2008-04-08 +POSTHOOK: Output: default@tab_part_n10@ds=2008-04-08 +POSTHOOK: Lineage: tab_part_n10 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part_n17)srcbucket_mapjoin_part_n17.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab_part_n10 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part_n17)srcbucket_mapjoin_part_n17.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE tab_n9(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tab -POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE +PREHOOK: Output: default@tab_n9 +POSTHOOK: query: CREATE TABLE tab_n9(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab -PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin +POSTHOOK: Output: default@tab_n9 +PREHOOK: query: insert overwrite table tab_n9 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n16 PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin -PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin +PREHOOK: Input: default@srcbucket_mapjoin_n16 +PREHOOK: Input: default@srcbucket_mapjoin_n16@ds=2008-04-08 +PREHOOK: Output: default@tab_n9@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab_n9 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n16 POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin -POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Input: default@srcbucket_mapjoin_n16 +POSTHOOK: Input: default@srcbucket_mapjoin_n16@ds=2008-04-08 +POSTHOOK: Output: default@tab_n9@ds=2008-04-08 +POSTHOOK: Lineage: tab_n9 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_n16)srcbucket_mapjoin_n16.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab_n9 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_n16)srcbucket_mapjoin_n16.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain vectorization detail select count(*) -from tab a join tab_part b on a.key = b.key +from tab_n9 a join tab_part_n10 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail select count(*) -from tab a join tab_part b on a.key = b.key +from tab_n9 a join tab_part_n10 b on a.key = b.key POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -373,19 +373,19 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from tab a join tab_part b on a.key = b.key +PREHOOK: query: select * from tab_n9 a join tab_part_n10 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_n9@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select * from tab a join tab_part b on a.key = b.key +POSTHOOK: query: select * from tab_n9 a join tab_part_n10 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_n9@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### 0 val_0 2008-04-08 0 val_0 2008-04-08 0 val_0 2008-04-08 0 val_0 2008-04-08 @@ -867,19 +867,19 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08 97 val_97 2008-04-08 97 val_97 2008-04-08 97 val_97 2008-04-08 97 val_97 2008-04-08 97 val_97 2008-04-08 97 val_97 2008-04-08 -PREHOOK: query: select * from tab a join tab_part b on a.key = b.key +PREHOOK: query: select * from tab_n9 a join tab_part_n10 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_n9@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select * from tab a join tab_part b on a.key = b.key +POSTHOOK: query: select * from tab_n9 a join tab_part_n10 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_n9@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### 0 val_0 2008-04-08 0 val_0 2008-04-08 0 val_0 2008-04-08 0 val_0 2008-04-08 @@ -1363,11 +1363,11 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08 97 val_97 2008-04-08 97 val_97 2008-04-08 PREHOOK: query: explain vectorization detail select count(*) -from tab a left outer join tab_part b on a.key = b.key +from tab_n9 a left outer join tab_part_n10 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail select count(*) -from tab a left outer join tab_part b on a.key = b.key +from tab_n9 a left outer join tab_part_n10 b on a.key = b.key POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -1490,29 +1490,29 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) -from tab a left outer join tab_part b on a.key = b.key +from tab_n9 a left outer join tab_part_n10 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_n9@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### POSTHOOK: query: select count(*) -from tab a left outer join tab_part b on a.key = b.key +from tab_n9 a left outer join tab_part_n10 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_n9@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### 480 PREHOOK: query: explain vectorization detail select count (*) -from tab a right outer join tab_part b on a.key = b.key +from tab_n9 a right outer join tab_part_n10 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail select count (*) -from tab a right outer join tab_part b on a.key = b.key +from tab_n9 a right outer join tab_part_n10 b on a.key = b.key POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -1635,29 +1635,29 @@ STAGE PLANS: ListSink PREHOOK: query: select count (*) -from tab a right outer join tab_part b on a.key = b.key +from tab_n9 a right outer join tab_part_n10 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_n9@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### POSTHOOK: query: select count (*) -from tab a right outer join tab_part b on a.key = b.key +from tab_n9 a right outer join tab_part_n10 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_n9@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### 738 PREHOOK: query: explain vectorization detail select count(*) -from tab a full outer join tab_part b on a.key = b.key +from tab_n9 a full outer join tab_part_n10 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail select count(*) -from tab a full outer join tab_part b on a.key = b.key +from tab_n9 a full outer join tab_part_n10 b on a.key = b.key POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -1780,27 +1780,27 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) -from tab a full outer join tab_part b on a.key = b.key +from tab_n9 a full outer join tab_part_n10 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_n9@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### POSTHOOK: query: select count(*) -from tab a full outer join tab_part b on a.key = b.key +from tab_n9 a full outer join tab_part_n10 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_n9@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### 738 PREHOOK: query: explain vectorization detail -select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +select count(*) from tab_n9 a join tab_part_n10 b on a.key = b.key join src1 c on a.value = c.value PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail -select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +select count(*) from tab_n9 a join tab_part_n10 b on a.key = b.key join src1 c on a.value = c.value POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -1980,28 +1980,28 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +PREHOOK: query: select count(*) from tab_n9 a join tab_part_n10 b on a.key = b.key join src1 c on a.value = c.value PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_n9@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +POSTHOOK: query: select count(*) from tab_n9 a join tab_part_n10 b on a.key = b.key join src1 c on a.value = c.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_n9@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### 40 PREHOOK: query: explain vectorization detail -select count(*) from tab a join tab_part b on a.value = b.value +select count(*) from tab_n9 a join tab_part_n10 b on a.value = b.value PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail -select count(*) from tab a join tab_part b on a.value = b.value +select count(*) from tab_n9 a join tab_part_n10 b on a.value = b.value POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -2131,32 +2131,32 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from tab a join tab_part b on a.value = b.value +PREHOOK: query: select count(*) from tab_n9 a join tab_part_n10 b on a.value = b.value PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_n9@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from tab a join tab_part b on a.value = b.value +POSTHOOK: query: select count(*) from tab_n9 a join tab_part_n10 b on a.value = b.value POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_n9@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### 480 PREHOOK: query: explain vectorization detail -select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +select count(*) from (select s1.key as key, s1.value as value from tab_n9 s1 join tab_n9 s3 on s1.key=s3.key UNION ALL -select s2.key as key, s2.value as value from tab s2 -) a join tab_part b on (a.key = b.key) +select s2.key as key, s2.value as value from tab_n9 s2 +) a join tab_part_n10 b on (a.key = b.key) PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail -select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +select count(*) from (select s1.key as key, s1.value as value from tab_n9 s1 join tab_n9 s3 on s1.key=s3.key UNION ALL -select s2.key as key, s2.value as value from tab s2 -) a join tab_part b on (a.key = b.key) +select s2.key as key, s2.value as value from tab_n9 s2 +) a join tab_part_n10 b on (a.key = b.key) POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -2356,10 +2356,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain vectorization detail -select count(*) from tab a join tab_part b on a.value = b.value +select count(*) from tab_n9 a join tab_part_n10 b on a.value = b.value PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail -select count(*) from tab a join tab_part b on a.value = b.value +select count(*) from tab_n9 a join tab_part_n10 b on a.value = b.value POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -2489,26 +2489,26 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from tab a join tab_part b on a.value = b.value +PREHOOK: query: select count(*) from tab_n9 a join tab_part_n10 b on a.value = b.value PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_n9@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from tab a join tab_part b on a.value = b.value +POSTHOOK: query: select count(*) from tab_n9 a join tab_part_n10 b on a.value = b.value POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_n9@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### 480 PREHOOK: query: explain vectorization detail -select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +select count(*) from tab_n9 a join tab_part_n10 b on a.key = b.key join src1 c on a.value = c.value PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail -select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +select count(*) from tab_n9 a join tab_part_n10 b on a.key = b.key join src1 c on a.value = c.value POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -2688,34 +2688,34 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +PREHOOK: query: select count(*) from tab_n9 a join tab_part_n10 b on a.key = b.key join src1 c on a.value = c.value PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_n9@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +POSTHOOK: query: select count(*) from tab_n9 a join tab_part_n10 b on a.key = b.key join src1 c on a.value = c.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_n9@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### 40 PREHOOK: query: explain vectorization detail -select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +select count(*) from (select s1.key as key, s1.value as value from tab_n9 s1 join tab_n9 s3 on s1.key=s3.key UNION ALL -select s2.key as key, s2.value as value from tab s2 -) a join tab_part b on (a.key = b.key) +select s2.key as key, s2.value as value from tab_n9 s2 +) a join tab_part_n10 b on (a.key = b.key) PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail -select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +select count(*) from (select s1.key as key, s1.value as value from tab_n9 s1 join tab_n9 s3 on s1.key=s3.key UNION ALL -select s2.key as key, s2.value as value from tab s2 -) a join tab_part b on (a.key = b.key) +select s2.key as key, s2.value as value from tab_n9 s2 +) a join tab_part_n10 b on (a.key = b.key) POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -2917,19 +2917,19 @@ STAGE PLANS: PREHOOK: query: explain vectorization detail select count(*) from (select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 +(select t1.key as id, t1.value as od from tab_n9 t1 order by id, od) rt1) vt1 join (select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 +(select t2.key as id, t2.value as od from tab_part_n10 t2 order by id, od) rt2) vt2 where vt1.id=vt2.id PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail select count(*) from (select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 +(select t1.key as id, t1.value as od from tab_n9 t1 order by id, od) rt1) vt1 join (select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 +(select t2.key as id, t2.value as od from tab_part_n10 t2 order by id, od) rt2) vt2 where vt1.id=vt2.id POSTHOOK: type: QUERY PLAN VECTORIZATION: @@ -3178,45 +3178,45 @@ STAGE PLANS: PREHOOK: query: select count(*) from (select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 +(select t1.key as id, t1.value as od from tab_n9 t1 order by id, od) rt1) vt1 join (select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 +(select t2.key as id, t2.value as od from tab_part_n10 t2 order by id, od) rt2) vt2 where vt1.id=vt2.id PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_n9@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### POSTHOOK: query: select count(*) from (select rt1.id from -(select t1.key as id, t1.value as od from tab t1 order by id, od) rt1) vt1 +(select t1.key as id, t1.value as od from tab_n9 t1 order by id, od) rt1) vt1 join (select rt2.id from -(select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2 +(select t2.key as id, t2.value as od from tab_part_n10 t2 order by id, od) rt2) vt2 where vt1.id=vt2.id POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_n9@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### 480 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key +PREHOOK: query: select * from (select * from tab_n9 where tab_n9.key = 0)a full outer join (select * from tab_part_n10 where tab_part_n10.key = 98)b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_n9@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key +POSTHOOK: query: select * from (select * from tab_n9 where tab_n9.key = 0)a full outer join (select * from tab_part_n10 where tab_part_n10.key = 98)b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_n9@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### 0 val_0 2008-04-08 NULL NULL NULL 0 val_0 2008-04-08 NULL NULL NULL @@ -3224,56 +3224,56 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08 NULL NULL NULL 98 val_98 2008-04-08 NULL NULL NULL 98 val_98 2008-04-08 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key +PREHOOK: query: select * from (select * from tab_n9 where tab_n9.key = 0)a right outer join (select * from tab_part_n10 where tab_part_n10.key = 98)b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key +POSTHOOK: query: select * from (select * from tab_n9 where tab_n9.key = 0)a right outer join (select * from tab_part_n10 where tab_part_n10.key = 98)b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### NULL NULL NULL 98 val_98 2008-04-08 NULL NULL NULL 98 val_98 2008-04-08 Warning: Shuffle Join JOIN[12][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a full outer join -(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key +(select * from tab_part_n10 where tab_part_n10.key = 98)b join tab_part_n10 c on a.key = b.key and b.key = c.key PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab_part +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_part_n10 #### A masked pattern was here #### POSTHOOK: query: select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a full outer join -(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key +(select * from tab_part_n10 where tab_part_n10.key = 98)b join tab_part_n10 c on a.key = b.key and b.key = c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab_part +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_part_n10 #### A masked pattern was here #### Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a full outer join -(select * from tab_part where tab_part.key = 98)b on a.key = b.key join tab_part c on b.key = c.key +(select * from tab_part_n10 where tab_part_n10.key = 98)b on a.key = b.key join tab_part_n10 c on b.key = c.key PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### POSTHOOK: query: select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a full outer join -(select * from tab_part where tab_part.key = 98)b on a.key = b.key join tab_part c on b.key = c.key +(select * from tab_part_n10 where tab_part_n10.key = 98)b on a.key = b.key join tab_part_n10 c on b.key = c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### NULL NULL NULL 98 val_98 2008-04-08 98 val_98 2008-04-08 NULL NULL NULL 98 val_98 2008-04-08 98 val_98 2008-04-08 @@ -3282,24 +3282,24 @@ NULL NULL NULL 98 val_98 2008-04-08 98 val_98 2008-04-08 Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a join -(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key +(select * from tab_part_n10 where tab_part_n10.key = 98)b full outer join tab_part_n10 c on a.key = b.key and b.key = c.key PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_n9@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### POSTHOOK: query: select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a join -(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key +(select * from tab_part_n10 where tab_part_n10.key = 98)b full outer join tab_part_n10 c on a.key = b.key and b.key = c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_n9@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### 0 val_0 2008-04-08 98 val_98 2008-04-08 NULL NULL NULL 0 val_0 2008-04-08 98 val_98 2008-04-08 NULL NULL NULL @@ -3810,22 +3810,22 @@ NULL NULL NULL NULL NULL NULL 98 val_98 2008-04-08 Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a join -(select * from tab_part where tab_part.key = 98)b on a.key = b.key full outer join tab_part c on b.key = c.key +(select * from tab_part_n10 where tab_part_n10.key = 98)b on a.key = b.key full outer join tab_part_n10 c on b.key = c.key PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### POSTHOOK: query: select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a join -(select * from tab_part where tab_part.key = 98)b on a.key = b.key full outer join tab_part c on b.key = c.key +(select * from tab_part_n10 where tab_part_n10.key = 98)b on a.key = b.key full outer join tab_part_n10 c on b.key = c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### NULL NULL NULL NULL NULL NULL 0 val_0 2008-04-08 NULL NULL NULL NULL NULL NULL 0 val_0 2008-04-08 @@ -4329,45 +4329,45 @@ NULL NULL NULL NULL NULL NULL 98 val_98 2008-04-08 NULL NULL NULL NULL NULL NULL 98 val_98 2008-04-08 Warning: Shuffle Join JOIN[9][tables = [a, b]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a full outer join -(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key +(select * from tab_part_n10 where tab_part_n10.key = 98)b join tab_part_n10 c on a.key = b.key and b.key = c.key PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_n9@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### POSTHOOK: query: select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a full outer join -(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key +(select * from tab_part_n10 where tab_part_n10.key = 98)b join tab_part_n10 c on a.key = b.key and b.key = c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_n9@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### Warning: Shuffle Join JOIN[9][tables = [a, b]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a join -(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key +(select * from tab_part_n10 where tab_part_n10.key = 98)b full outer join tab_part_n10 c on a.key = b.key and b.key = c.key PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_n9 +PREHOOK: Input: default@tab_n9@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n10 +PREHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### POSTHOOK: query: select * from -(select * from tab where tab.key = 0)a +(select * from tab_n9 where tab_n9.key = 0)a join -(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key +(select * from tab_part_n10 where tab_part_n10.key = 98)b full outer join tab_part_n10 c on a.key = b.key and b.key = c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_n9 +POSTHOOK: Input: default@tab_n9@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n10 +POSTHOOK: Input: default@tab_part_n10@ds=2008-04-08 #### A masked pattern was here #### 0 val_0 2008-04-08 98 val_98 2008-04-08 NULL NULL NULL 0 val_0 2008-04-08 98 val_98 2008-04-08 NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/mergejoins.q.out b/ql/src/test/results/clientpositive/mergejoins.q.out index 1e05fe00c9..b34b388273 100644 --- a/ql/src/test/results/clientpositive/mergejoins.q.out +++ b/ql/src/test/results/clientpositive/mergejoins.q.out @@ -1,46 +1,46 @@ -PREHOOK: query: create table a (val1 int, val2 int) +PREHOOK: query: create table a_n0 (val1 int, val2 int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@a -POSTHOOK: query: create table a (val1 int, val2 int) +PREHOOK: Output: default@a_n0 +POSTHOOK: query: create table a_n0 (val1 int, val2 int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@a -PREHOOK: query: create table b (val1 int, val2 int) +POSTHOOK: Output: default@a_n0 +PREHOOK: query: create table b_n0 (val1 int, val2 int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@b -POSTHOOK: query: create table b (val1 int, val2 int) +PREHOOK: Output: default@b_n0 +POSTHOOK: query: create table b_n0 (val1 int, val2 int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@b -PREHOOK: query: create table c (val1 int, val2 int) +POSTHOOK: Output: default@b_n0 +PREHOOK: query: create table c_n0 (val1 int, val2 int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@c -POSTHOOK: query: create table c (val1 int, val2 int) +PREHOOK: Output: default@c_n0 +POSTHOOK: query: create table c_n0 (val1 int, val2 int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@c -PREHOOK: query: create table d (val1 int, val2 int) +POSTHOOK: Output: default@c_n0 +PREHOOK: query: create table d_n0 (val1 int, val2 int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@d -POSTHOOK: query: create table d (val1 int, val2 int) +PREHOOK: Output: default@d_n0 +POSTHOOK: query: create table d_n0 (val1 int, val2 int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@d -PREHOOK: query: create table e (val1 int, val2 int) +POSTHOOK: Output: default@d_n0 +PREHOOK: query: create table e_n0 (val1 int, val2 int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@e -POSTHOOK: query: create table e (val1 int, val2 int) +PREHOOK: Output: default@e_n0 +POSTHOOK: query: create table e_n0 (val1 int, val2 int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@e -PREHOOK: query: explain select * from a join b on a.val1=b.val1 join c on a.val1=c.val1 join d on a.val1=d.val1 join e on a.val2=e.val2 +POSTHOOK: Output: default@e_n0 +PREHOOK: query: explain select * from a_n0 join b_n0 on a_n0.val1=b_n0.val1 join c_n0 on a_n0.val1=c_n0.val1 join d_n0 on a_n0.val1=d_n0.val1 join e_n0 on a_n0.val2=e_n0.val2 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from a join b on a.val1=b.val1 join c on a.val1=c.val1 join d on a.val1=d.val1 join e on a.val2=e.val2 +POSTHOOK: query: explain select * from a_n0 join b_n0 on a_n0.val1=b_n0.val1 join c_n0 on a_n0.val1=c_n0.val1 join d_n0 on a_n0.val1=d_n0.val1 join e_n0 on a_n0.val2=e_n0.val2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -52,7 +52,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (val1 is not null and val2 is not null) (type: boolean) @@ -68,7 +68,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: int) TableScan - alias: b + alias: b_n0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: val1 is not null (type: boolean) @@ -84,7 +84,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: int) TableScan - alias: c + alias: c_n0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: val1 is not null (type: boolean) @@ -100,7 +100,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: int) TableScan - alias: d + alias: d_n0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: val1 is not null (type: boolean) @@ -146,7 +146,7 @@ STAGE PLANS: Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int) TableScan - alias: e + alias: e_n0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: val2 is not null (type: boolean) @@ -184,9 +184,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from src a join src b on a.key=b.key left outer join src c on b.key=c.key and b.key<10 +PREHOOK: query: explain select * from src a_n0 join src b_n0 on a_n0.key=b_n0.key left outer join src c_n0 on b_n0.key=c_n0.key and b_n0.key<10 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from src a join src b on a.key=b.key left outer join src c on b.key=c.key and b.key<10 +POSTHOOK: query: explain select * from src a_n0 join src b_n0 on a_n0.key=b_n0.key left outer join src c_n0 on b_n0.key=c_n0.key and b_n0.key<10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -197,7 +197,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -213,7 +213,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) TableScan - alias: b + alias: b_n0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -229,7 +229,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) TableScan - alias: c + alias: c_n0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) diff --git a/ql/src/test/results/clientpositive/mergejoins_mixed.q.out b/ql/src/test/results/clientpositive/mergejoins_mixed.q.out index a5cc23ae9f..6a0088bf11 100644 --- a/ql/src/test/results/clientpositive/mergejoins_mixed.q.out +++ b/ql/src/test/results/clientpositive/mergejoins_mixed.q.out @@ -1,16 +1,16 @@ -PREHOOK: query: create table a (key string, value string) +PREHOOK: query: create table a_n4 (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@a -POSTHOOK: query: create table a (key string, value string) +PREHOOK: Output: default@a_n4 +POSTHOOK: query: create table a_n4 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@a +POSTHOOK: Output: default@a_n4 PREHOOK: query: explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.key=c.key) left outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) left outer join a_n4 c on (b.key=c.key) left outer join a_n4 d on (a_n4.key=d.key) PREHOOK: type: QUERY POSTHOOK: query: explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.key=c.key) left outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) left outer join a_n4 c on (b.key=c.key) left outer join a_n4 d on (a_n4.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -21,7 +21,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n4 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -112,10 +112,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.key=c.key) right outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) left outer join a_n4 c on (b.key=c.key) right outer join a_n4 d on (a_n4.key=d.key) PREHOOK: type: QUERY POSTHOOK: query: explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.key=c.key) right outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) left outer join a_n4 c on (b.key=c.key) right outer join a_n4 d on (a_n4.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -126,7 +126,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n4 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -214,10 +214,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.key=c.key) left outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) right outer join a_n4 c on (b.key=c.key) left outer join a_n4 d on (a_n4.key=d.key) PREHOOK: type: QUERY POSTHOOK: query: explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.key=c.key) left outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) right outer join a_n4 c on (b.key=c.key) left outer join a_n4 d on (a_n4.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -228,7 +228,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n4 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -313,10 +313,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.key=c.key) right outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) right outer join a_n4 c on (b.key=c.key) right outer join a_n4 d on (a_n4.key=d.key) PREHOOK: type: QUERY POSTHOOK: query: explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.key=c.key) right outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) right outer join a_n4 c on (b.key=c.key) right outer join a_n4 d on (a_n4.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -327,7 +327,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n4 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -412,10 +412,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) left outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) left outer join a_n4 c on (b.value=c.key) left outer join a_n4 d on (a_n4.key=d.key) PREHOOK: type: QUERY POSTHOOK: query: explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) left outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) left outer join a_n4 c on (b.value=c.key) left outer join a_n4 d on (a_n4.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -428,7 +428,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n4 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -564,10 +564,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.value=c.key) right outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) right outer join a_n4 c on (b.value=c.key) right outer join a_n4 d on (a_n4.key=d.key) PREHOOK: type: QUERY POSTHOOK: query: explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.value=c.key) right outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) right outer join a_n4 c on (b.value=c.key) right outer join a_n4 d on (a_n4.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -580,7 +580,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n4 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -713,10 +713,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select * from a join a b on (a.key=b.key) full outer join a c on (b.value=c.key) full outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) full outer join a_n4 c on (b.value=c.key) full outer join a_n4 d on (a_n4.key=d.key) PREHOOK: type: QUERY POSTHOOK: query: explain -select * from a join a b on (a.key=b.key) full outer join a c on (b.value=c.key) full outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) full outer join a_n4 c on (b.value=c.key) full outer join a_n4 d on (a_n4.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -729,7 +729,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n4 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -862,10 +862,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) right outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) left outer join a_n4 c on (b.value=c.key) right outer join a_n4 d on (a_n4.key=d.key) PREHOOK: type: QUERY POSTHOOK: query: explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) right outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) left outer join a_n4 c on (b.value=c.key) right outer join a_n4 d on (a_n4.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -878,7 +878,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n4 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -1011,10 +1011,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) full outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) left outer join a_n4 c on (b.value=c.key) full outer join a_n4 d on (a_n4.key=d.key) PREHOOK: type: QUERY POSTHOOK: query: explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) full outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) left outer join a_n4 c on (b.value=c.key) full outer join a_n4 d on (a_n4.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1027,7 +1027,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n4 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -1160,10 +1160,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.value=c.key) left outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) right outer join a_n4 c on (b.value=c.key) left outer join a_n4 d on (a_n4.key=d.key) PREHOOK: type: QUERY POSTHOOK: query: explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.value=c.key) left outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) right outer join a_n4 c on (b.value=c.key) left outer join a_n4 d on (a_n4.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1176,7 +1176,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n4 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -1309,10 +1309,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.value=c.key) full outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) right outer join a_n4 c on (b.value=c.key) full outer join a_n4 d on (a_n4.key=d.key) PREHOOK: type: QUERY POSTHOOK: query: explain -select * from a join a b on (a.key=b.key) right outer join a c on (b.value=c.key) full outer join a d on (a.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) right outer join a_n4 c on (b.value=c.key) full outer join a_n4 d on (a_n4.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1325,7 +1325,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n4 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -1458,10 +1458,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) left outer join a d on (c.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) left outer join a_n4 c on (b.value=c.key) left outer join a_n4 d on (c.key=d.key) PREHOOK: type: QUERY POSTHOOK: query: explain -select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) left outer join a d on (c.key=d.key) +select * from a_n4 join a_n4 b on (a_n4.key=b.key) left outer join a_n4 c on (b.value=c.key) left outer join a_n4 d on (c.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1473,7 +1473,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a + alias: a_n4 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) diff --git a/ql/src/test/results/clientpositive/metadata_only_queries.q.out b/ql/src/test/results/clientpositive/metadata_only_queries.q.out index cf3f7c0e4b..b8abe8f7b9 100644 --- a/ql/src/test/results/clientpositive/metadata_only_queries.q.out +++ b/ql/src/test/results/clientpositive/metadata_only_queries.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table over10k( +PREHOOK: query: create table over10k_n12( t tinyint, si smallint, i int, @@ -14,8 +14,8 @@ PREHOOK: query: create table over10k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over10k -POSTHOOK: query: create table over10k( +PREHOOK: Output: default@over10k_n12 +POSTHOOK: query: create table over10k_n12( t tinyint, si smallint, i int, @@ -31,15 +31,15 @@ POSTHOOK: query: create table over10k( fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k -PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: Output: default@over10k_n12 +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n12 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over10k -POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: Output: default@over10k_n12 +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n12 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over10k +POSTHOOK: Output: default@over10k_n12 PREHOOK: query: create table stats_tbl( t tinyint, si smallint, @@ -100,82 +100,82 @@ POSTHOOK: query: create table stats_tbl_part( POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@stats_tbl_part -PREHOOK: query: insert overwrite table stats_tbl select * from over10k +PREHOOK: query: insert overwrite table stats_tbl select * from over10k_n12 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n12 PREHOOK: Output: default@stats_tbl -POSTHOOK: query: insert overwrite table stats_tbl select * from over10k +POSTHOOK: query: insert overwrite table stats_tbl select * from over10k_n12 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n12 POSTHOOK: Output: default@stats_tbl -POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] -PREHOOK: query: insert into table stats_tbl_part partition (dt='2010') select * from over10k where t>0 and t<30 +POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: insert into table stats_tbl_part partition (dt='2010') select * from over10k_n12 where t>0 and t<30 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n12 PREHOOK: Output: default@stats_tbl_part@dt=2010 -POSTHOOK: query: insert into table stats_tbl_part partition (dt='2010') select * from over10k where t>0 and t<30 +POSTHOOK: query: insert into table stats_tbl_part partition (dt='2010') select * from over10k_n12 where t>0 and t<30 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n12 POSTHOOK: Output: default@stats_tbl_part@dt=2010 -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] -PREHOOK: query: insert into table stats_tbl_part partition (dt='2011') select * from over10k where t>30 and t<60 +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: insert into table stats_tbl_part partition (dt='2011') select * from over10k_n12 where t>30 and t<60 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n12 PREHOOK: Output: default@stats_tbl_part@dt=2011 -POSTHOOK: query: insert into table stats_tbl_part partition (dt='2011') select * from over10k where t>30 and t<60 +POSTHOOK: query: insert into table stats_tbl_part partition (dt='2011') select * from over10k_n12 where t>30 and t<60 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n12 POSTHOOK: Output: default@stats_tbl_part@dt=2011 -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] -PREHOOK: query: insert into table stats_tbl_part partition (dt='2012') select * from over10k where t>60 +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: insert into table stats_tbl_part partition (dt='2012') select * from over10k_n12 where t>60 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n12 PREHOOK: Output: default@stats_tbl_part@dt=2012 -POSTHOOK: query: insert into table stats_tbl_part partition (dt='2012') select * from over10k where t>60 +POSTHOOK: query: insert into table stats_tbl_part partition (dt='2012') select * from over10k_n12 where t>60 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n12 POSTHOOK: Output: default@stats_tbl_part@dt=2012 -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k_n12)over10k_n12.FieldSchema(name:ts, type:timestamp, comment:null), ] PREHOOK: query: explain select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b) from stats_tbl PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out b/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out index 244afcf0a2..6be091107d 100644 --- a/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out +++ b/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table over10k( +PREHOOK: query: create table over10k_n23( t tinyint, si smallint, i int, @@ -14,8 +14,8 @@ PREHOOK: query: create table over10k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over10k -POSTHOOK: query: create table over10k( +PREHOOK: Output: default@over10k_n23 +POSTHOOK: query: create table over10k_n23( t tinyint, si smallint, i int, @@ -31,16 +31,16 @@ POSTHOOK: query: create table over10k( fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k -PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: Output: default@over10k_n23 +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n23 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over10k -POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: Output: default@over10k_n23 +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n23 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over10k -PREHOOK: query: create table stats_tbl_part( +POSTHOOK: Output: default@over10k_n23 +PREHOOK: query: create table stats_tbl_part_n0( t tinyint, si smallint, i int, @@ -54,8 +54,8 @@ PREHOOK: query: create table stats_tbl_part( bin binary) partitioned by (dt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@stats_tbl_part -POSTHOOK: query: create table stats_tbl_part( +PREHOOK: Output: default@stats_tbl_part_n0 +POSTHOOK: query: create table stats_tbl_part_n0( t tinyint, si smallint, i int, @@ -69,92 +69,92 @@ POSTHOOK: query: create table stats_tbl_part( bin binary) partitioned by (dt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@stats_tbl_part -PREHOOK: query: from over10k -insert overwrite table stats_tbl_part partition (dt=2010) select t,si,i,b,f,d,bo,s,ts,`dec`,bin where t>0 and t<30 -insert overwrite table stats_tbl_part partition (dt=2014) select t,si,i,b,f,d,bo,s,ts,`dec`,bin where t > 30 and t<60 +POSTHOOK: Output: default@stats_tbl_part_n0 +PREHOOK: query: from over10k_n23 +insert overwrite table stats_tbl_part_n0 partition (dt=2010) select t,si,i,b,f,d,bo,s,ts,`dec`,bin where t>0 and t<30 +insert overwrite table stats_tbl_part_n0 partition (dt=2014) select t,si,i,b,f,d,bo,s,ts,`dec`,bin where t > 30 and t<60 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k -PREHOOK: Output: default@stats_tbl_part@dt=2010 -PREHOOK: Output: default@stats_tbl_part@dt=2014 -POSTHOOK: query: from over10k -insert overwrite table stats_tbl_part partition (dt=2010) select t,si,i,b,f,d,bo,s,ts,`dec`,bin where t>0 and t<30 -insert overwrite table stats_tbl_part partition (dt=2014) select t,si,i,b,f,d,bo,s,ts,`dec`,bin where t > 30 and t<60 +PREHOOK: Input: default@over10k_n23 +PREHOOK: Output: default@stats_tbl_part_n0@dt=2010 +PREHOOK: Output: default@stats_tbl_part_n0@dt=2014 +POSTHOOK: query: from over10k_n23 +insert overwrite table stats_tbl_part_n0 partition (dt=2010) select t,si,i,b,f,d,bo,s,ts,`dec`,bin where t>0 and t<30 +insert overwrite table stats_tbl_part_n0 partition (dt=2014) select t,si,i,b,f,d,bo,s,ts,`dec`,bin where t > 30 and t<60 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k -POSTHOOK: Output: default@stats_tbl_part@dt=2010 -POSTHOOK: Output: default@stats_tbl_part@dt=2014 -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2014).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2014).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2014).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2014).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2014).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2014).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2014).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2014).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2014).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2014).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2014).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] -PREHOOK: query: analyze table stats_tbl_part partition(dt) compute statistics +POSTHOOK: Input: default@over10k_n23 +POSTHOOK: Output: default@stats_tbl_part_n0@dt=2010 +POSTHOOK: Output: default@stats_tbl_part_n0@dt=2014 +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2010).b SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2010).bin SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2010).bo SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2010).d SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2010).dec SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2010).f SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2010).i SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2010).s SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2010).si SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2010).t SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2010).ts SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2014).b SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2014).bin SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2014).bo SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2014).d SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2014).dec SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2014).f SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2014).i SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2014).s SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2014).si SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2014).t SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: stats_tbl_part_n0 PARTITION(dt=2014).ts SIMPLE [(over10k_n23)over10k_n23.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: analyze table stats_tbl_part_n0 partition(dt) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@stats_tbl_part -PREHOOK: Input: default@stats_tbl_part@dt=2010 -PREHOOK: Input: default@stats_tbl_part@dt=2014 -PREHOOK: Output: default@stats_tbl_part -PREHOOK: Output: default@stats_tbl_part@dt=2010 -PREHOOK: Output: default@stats_tbl_part@dt=2014 -POSTHOOK: query: analyze table stats_tbl_part partition(dt) compute statistics +PREHOOK: Input: default@stats_tbl_part_n0 +PREHOOK: Input: default@stats_tbl_part_n0@dt=2010 +PREHOOK: Input: default@stats_tbl_part_n0@dt=2014 +PREHOOK: Output: default@stats_tbl_part_n0 +PREHOOK: Output: default@stats_tbl_part_n0@dt=2010 +PREHOOK: Output: default@stats_tbl_part_n0@dt=2014 +POSTHOOK: query: analyze table stats_tbl_part_n0 partition(dt) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@stats_tbl_part -POSTHOOK: Input: default@stats_tbl_part@dt=2010 -POSTHOOK: Input: default@stats_tbl_part@dt=2014 -POSTHOOK: Output: default@stats_tbl_part -POSTHOOK: Output: default@stats_tbl_part@dt=2010 -POSTHOOK: Output: default@stats_tbl_part@dt=2014 -PREHOOK: query: analyze table stats_tbl_part partition(dt=2010) compute statistics for columns t,si,i,b,f,d,bo,s,bin +POSTHOOK: Input: default@stats_tbl_part_n0 +POSTHOOK: Input: default@stats_tbl_part_n0@dt=2010 +POSTHOOK: Input: default@stats_tbl_part_n0@dt=2014 +POSTHOOK: Output: default@stats_tbl_part_n0 +POSTHOOK: Output: default@stats_tbl_part_n0@dt=2010 +POSTHOOK: Output: default@stats_tbl_part_n0@dt=2014 +PREHOOK: query: analyze table stats_tbl_part_n0 partition(dt=2010) compute statistics for columns t,si,i,b,f,d,bo,s,bin PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@stats_tbl_part -PREHOOK: Input: default@stats_tbl_part@dt=2010 -PREHOOK: Output: default@stats_tbl_part -PREHOOK: Output: default@stats_tbl_part@dt=2010 +PREHOOK: Input: default@stats_tbl_part_n0 +PREHOOK: Input: default@stats_tbl_part_n0@dt=2010 +PREHOOK: Output: default@stats_tbl_part_n0 +PREHOOK: Output: default@stats_tbl_part_n0@dt=2010 #### A masked pattern was here #### -POSTHOOK: query: analyze table stats_tbl_part partition(dt=2010) compute statistics for columns t,si,i,b,f,d,bo,s,bin +POSTHOOK: query: analyze table stats_tbl_part_n0 partition(dt=2010) compute statistics for columns t,si,i,b,f,d,bo,s,bin POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@stats_tbl_part -POSTHOOK: Input: default@stats_tbl_part@dt=2010 -POSTHOOK: Output: default@stats_tbl_part -POSTHOOK: Output: default@stats_tbl_part@dt=2010 +POSTHOOK: Input: default@stats_tbl_part_n0 +POSTHOOK: Input: default@stats_tbl_part_n0@dt=2010 +POSTHOOK: Output: default@stats_tbl_part_n0 +POSTHOOK: Output: default@stats_tbl_part_n0@dt=2010 #### A masked pattern was here #### -PREHOOK: query: analyze table stats_tbl_part partition(dt=2014) compute statistics for columns t,si,i,b,f,d,bo,s,bin +PREHOOK: query: analyze table stats_tbl_part_n0 partition(dt=2014) compute statistics for columns t,si,i,b,f,d,bo,s,bin PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@stats_tbl_part -PREHOOK: Input: default@stats_tbl_part@dt=2014 -PREHOOK: Output: default@stats_tbl_part -PREHOOK: Output: default@stats_tbl_part@dt=2014 +PREHOOK: Input: default@stats_tbl_part_n0 +PREHOOK: Input: default@stats_tbl_part_n0@dt=2014 +PREHOOK: Output: default@stats_tbl_part_n0 +PREHOOK: Output: default@stats_tbl_part_n0@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: analyze table stats_tbl_part partition(dt=2014) compute statistics for columns t,si,i,b,f,d,bo,s,bin +POSTHOOK: query: analyze table stats_tbl_part_n0 partition(dt=2014) compute statistics for columns t,si,i,b,f,d,bo,s,bin POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@stats_tbl_part -POSTHOOK: Input: default@stats_tbl_part@dt=2014 -POSTHOOK: Output: default@stats_tbl_part -POSTHOOK: Output: default@stats_tbl_part@dt=2014 +POSTHOOK: Input: default@stats_tbl_part_n0 +POSTHOOK: Input: default@stats_tbl_part_n0@dt=2014 +POSTHOOK: Output: default@stats_tbl_part_n0 +POSTHOOK: Output: default@stats_tbl_part_n0@dt=2014 #### A masked pattern was here #### PREHOOK: query: explain -select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010 +select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt = 2010 PREHOOK: type: QUERY POSTHOOK: query: explain -select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010 +select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt = 2010 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -166,20 +166,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010 +PREHOOK: query: select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt = 2010 PREHOOK: type: QUERY -PREHOOK: Input: default@stats_tbl_part +PREHOOK: Input: default@stats_tbl_part_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010 +POSTHOOK: query: select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt = 2010 POSTHOOK: type: QUERY -POSTHOOK: Input: default@stats_tbl_part +POSTHOOK: Input: default@stats_tbl_part_n0 #### A masked pattern was here #### 2322 2322 2322 2322 2322 2322 2322 65791 4294967296 99.98 0.03 PREHOOK: query: explain -select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010 +select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt > 2010 PREHOOK: type: QUERY POSTHOOK: query: explain -select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010 +select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt > 2010 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -191,42 +191,42 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010 +PREHOOK: query: select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt > 2010 PREHOOK: type: QUERY -PREHOOK: Input: default@stats_tbl_part +PREHOOK: Input: default@stats_tbl_part_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010 +POSTHOOK: query: select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt > 2010 POSTHOOK: type: QUERY -POSTHOOK: Input: default@stats_tbl_part +POSTHOOK: Input: default@stats_tbl_part_n0 #### A masked pattern was here #### 2219 2219 2219 4438 2219 2219 2219 2219 65791 4294967296 99.96 0.04 -PREHOOK: query: select count(*) from stats_tbl_part +PREHOOK: query: select count(*) from stats_tbl_part_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@stats_tbl_part +PREHOOK: Input: default@stats_tbl_part_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from stats_tbl_part +POSTHOOK: query: select count(*) from stats_tbl_part_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@stats_tbl_part +POSTHOOK: Input: default@stats_tbl_part_n0 #### A masked pattern was here #### 4541 -PREHOOK: query: select count(*)/2 from stats_tbl_part +PREHOOK: query: select count(*)/2 from stats_tbl_part_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@stats_tbl_part -PREHOOK: Input: default@stats_tbl_part@dt=2010 -PREHOOK: Input: default@stats_tbl_part@dt=2014 +PREHOOK: Input: default@stats_tbl_part_n0 +PREHOOK: Input: default@stats_tbl_part_n0@dt=2010 +PREHOOK: Input: default@stats_tbl_part_n0@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: select count(*)/2 from stats_tbl_part +POSTHOOK: query: select count(*)/2 from stats_tbl_part_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@stats_tbl_part -POSTHOOK: Input: default@stats_tbl_part@dt=2010 -POSTHOOK: Input: default@stats_tbl_part@dt=2014 +POSTHOOK: Input: default@stats_tbl_part_n0 +POSTHOOK: Input: default@stats_tbl_part_n0@dt=2010 +POSTHOOK: Input: default@stats_tbl_part_n0@dt=2014 #### A masked pattern was here #### 2270.5 -PREHOOK: query: drop table stats_tbl_part +PREHOOK: query: drop table stats_tbl_part_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@stats_tbl_part -PREHOOK: Output: default@stats_tbl_part -POSTHOOK: query: drop table stats_tbl_part +PREHOOK: Input: default@stats_tbl_part_n0 +PREHOOK: Output: default@stats_tbl_part_n0 +POSTHOOK: query: drop table stats_tbl_part_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@stats_tbl_part -POSTHOOK: Output: default@stats_tbl_part +POSTHOOK: Input: default@stats_tbl_part_n0 +POSTHOOK: Output: default@stats_tbl_part_n0 diff --git a/ql/src/test/results/clientpositive/misc_json.q.out b/ql/src/test/results/clientpositive/misc_json.q.out index 29f875abe0..7e98ff748e 100644 --- a/ql/src/test/results/clientpositive/misc_json.q.out +++ b/ql/src/test/results/clientpositive/misc_json.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE IF NOT EXISTS jsontable (key INT, value STRING) COMMENT 'json table' STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE IF NOT EXISTS jsontable_n0 (key INT, value STRING) COMMENT 'json table' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@jsontable -POSTHOOK: query: CREATE TABLE IF NOT EXISTS jsontable (key INT, value STRING) COMMENT 'json table' STORED AS TEXTFILE +PREHOOK: Output: default@jsontable_n0 +POSTHOOK: query: CREATE TABLE IF NOT EXISTS jsontable_n0 (key INT, value STRING) COMMENT 'json table' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@jsontable -PREHOOK: query: ALTER TABLE jsontable ADD COLUMNS (name STRING COMMENT 'a new column') +POSTHOOK: Output: default@jsontable_n0 +PREHOOK: query: ALTER TABLE jsontable_n0 ADD COLUMNS (name STRING COMMENT 'a new column') PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@jsontable -PREHOOK: Output: default@jsontable -POSTHOOK: query: ALTER TABLE jsontable ADD COLUMNS (name STRING COMMENT 'a new column') +PREHOOK: Input: default@jsontable_n0 +PREHOOK: Output: default@jsontable_n0 +POSTHOOK: query: ALTER TABLE jsontable_n0 ADD COLUMNS (name STRING COMMENT 'a new column') POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@jsontable -POSTHOOK: Output: default@jsontable -PREHOOK: query: ALTER TABLE jsontable RENAME TO jsontable2 +POSTHOOK: Input: default@jsontable_n0 +POSTHOOK: Output: default@jsontable_n0 +PREHOOK: query: ALTER TABLE jsontable_n0 RENAME TO jsontable2 PREHOOK: type: ALTERTABLE_RENAME -PREHOOK: Input: default@jsontable -PREHOOK: Output: default@jsontable -POSTHOOK: query: ALTER TABLE jsontable RENAME TO jsontable2 +PREHOOK: Input: default@jsontable_n0 +PREHOOK: Output: default@jsontable_n0 +POSTHOOK: query: ALTER TABLE jsontable_n0 RENAME TO jsontable2 POSTHOOK: type: ALTERTABLE_RENAME -POSTHOOK: Input: default@jsontable -POSTHOOK: Output: default@jsontable +POSTHOOK: Input: default@jsontable_n0 POSTHOOK: Output: default@jsontable2 +POSTHOOK: Output: default@jsontable_n0 PREHOOK: query: SHOW TABLE EXTENDED LIKE jsontable2 PREHOOK: type: SHOW_TABLESTATUS POSTHOOK: query: SHOW TABLE EXTENDED LIKE jsontable2 diff --git a/ql/src/test/results/clientpositive/mm_all.q.out b/ql/src/test/results/clientpositive/mm_all.q.out index d82f85c061..e7df4c0a29 100644 --- a/ql/src/test/results/clientpositive/mm_all.q.out +++ b/ql/src/test/results/clientpositive/mm_all.q.out @@ -1,57 +1,57 @@ -PREHOOK: query: drop table intermediate +PREHOOK: query: drop table intermediate_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table intermediate +POSTHOOK: query: drop table intermediate_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +PREHOOK: query: create table intermediate_n0(key int) partitioned by (p int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@intermediate -POSTHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +PREHOOK: Output: default@intermediate_n0 +POSTHOOK: query: create table intermediate_n0(key int) partitioned by (p int) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@intermediate -PREHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +POSTHOOK: Output: default@intermediate_n0 +PREHOOK: query: insert into table intermediate_n0 partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@intermediate@p=455 -POSTHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +PREHOOK: Output: default@intermediate_n0@p=455 +POSTHOOK: query: insert into table intermediate_n0 partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@intermediate@p=455 +POSTHOOK: Output: default@intermediate_n0@p=455 POSTHOOK: Lineage: ###Masked### -PREHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +PREHOOK: query: insert into table intermediate_n0 partition(p='456') select distinct key from src where key is not null order by key asc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@intermediate@p=456 -POSTHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +PREHOOK: Output: default@intermediate_n0@p=456 +POSTHOOK: query: insert into table intermediate_n0 partition(p='456') select distinct key from src where key is not null order by key asc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@intermediate@p=456 +POSTHOOK: Output: default@intermediate_n0@p=456 POSTHOOK: Lineage: ###Masked### -PREHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +PREHOOK: query: insert into table intermediate_n0 partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@intermediate@p=457 -POSTHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +PREHOOK: Output: default@intermediate_n0@p=457 +POSTHOOK: query: insert into table intermediate_n0 partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@intermediate@p=457 +POSTHOOK: Output: default@intermediate_n0@p=457 POSTHOOK: Lineage: ###Masked### -PREHOOK: query: drop table part_mm +PREHOOK: query: drop table part_mm_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table part_mm +POSTHOOK: query: drop table part_mm_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only") +PREHOOK: query: create table part_mm_n0(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part_mm -POSTHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only") +PREHOOK: Output: default@part_mm_n0 +POSTHOOK: query: create table part_mm_n0(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_mm -PREHOOK: query: explain insert into table part_mm partition(key_mm=455) select key from intermediate +POSTHOOK: Output: default@part_mm_n0 +PREHOOK: query: explain insert into table part_mm_n0 partition(key_mm=455) select key from intermediate_n0 PREHOOK: type: QUERY -POSTHOOK: query: explain insert into table part_mm partition(key_mm=455) select key from intermediate +POSTHOOK: query: explain insert into table part_mm_n0 partition(key_mm=455) select key from intermediate_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -68,7 +68,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: intermediate + alias: intermediate_n0 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int) @@ -81,7 +81,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part_mm + name: default.part_mm_n0 Write Type: INSERT Select Operator expressions: _col0 (type: int), UDFToInteger('455') (type: int) @@ -134,7 +134,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part_mm + name: default.part_mm_n0 micromanaged table: true Stage: Stage-2 @@ -143,7 +143,7 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: int - Table: default.part_mm + Table: default.part_mm_n0 Stage: Stage-3 Merge File Operator @@ -162,62 +162,62 @@ STAGE PLANS: Stage: Stage-6 Dependency Collection -PREHOOK: query: insert into table part_mm partition(key_mm=455) select key from intermediate -PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 -PREHOOK: Output: default@part_mm@key_mm=455 -POSTHOOK: query: insert into table part_mm partition(key_mm=455) select key from intermediate -POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 -POSTHOOK: Output: default@part_mm@key_mm=455 -POSTHOOK: Lineage: ###Masked### -PREHOOK: query: insert into table part_mm partition(key_mm=456) select key from intermediate -PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 -PREHOOK: Output: default@part_mm@key_mm=456 -POSTHOOK: query: insert into table part_mm partition(key_mm=456) select key from intermediate -POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 -POSTHOOK: Output: default@part_mm@key_mm=456 -POSTHOOK: Lineage: ###Masked### -PREHOOK: query: insert into table part_mm partition(key_mm=455) select key from intermediate -PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 -PREHOOK: Output: default@part_mm@key_mm=455 -POSTHOOK: query: insert into table part_mm partition(key_mm=455) select key from intermediate -POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 -POSTHOOK: Output: default@part_mm@key_mm=455 -POSTHOOK: Lineage: ###Masked### -PREHOOK: query: select * from part_mm order by key, key_mm -PREHOOK: type: QUERY -PREHOOK: Input: default@part_mm -PREHOOK: Input: default@part_mm@key_mm=455 -PREHOOK: Input: default@part_mm@key_mm=456 -#### A masked pattern was here #### -POSTHOOK: query: select * from part_mm order by key, key_mm -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_mm -POSTHOOK: Input: default@part_mm@key_mm=455 -POSTHOOK: Input: default@part_mm@key_mm=456 +PREHOOK: query: insert into table part_mm_n0 partition(key_mm=455) select key from intermediate_n0 +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 +PREHOOK: Output: default@part_mm_n0@key_mm=455 +POSTHOOK: query: insert into table part_mm_n0 partition(key_mm=455) select key from intermediate_n0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 +POSTHOOK: Output: default@part_mm_n0@key_mm=455 +POSTHOOK: Lineage: ###Masked### +PREHOOK: query: insert into table part_mm_n0 partition(key_mm=456) select key from intermediate_n0 +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 +PREHOOK: Output: default@part_mm_n0@key_mm=456 +POSTHOOK: query: insert into table part_mm_n0 partition(key_mm=456) select key from intermediate_n0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 +POSTHOOK: Output: default@part_mm_n0@key_mm=456 +POSTHOOK: Lineage: ###Masked### +PREHOOK: query: insert into table part_mm_n0 partition(key_mm=455) select key from intermediate_n0 +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 +PREHOOK: Output: default@part_mm_n0@key_mm=455 +POSTHOOK: query: insert into table part_mm_n0 partition(key_mm=455) select key from intermediate_n0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 +POSTHOOK: Output: default@part_mm_n0@key_mm=455 +POSTHOOK: Lineage: ###Masked### +PREHOOK: query: select * from part_mm_n0 order by key, key_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@part_mm_n0 +PREHOOK: Input: default@part_mm_n0@key_mm=455 +PREHOOK: Input: default@part_mm_n0@key_mm=456 +#### A masked pattern was here #### +POSTHOOK: query: select * from part_mm_n0 order by key, key_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part_mm_n0 +POSTHOOK: Input: default@part_mm_n0@key_mm=455 +POSTHOOK: Input: default@part_mm_n0@key_mm=456 #### A masked pattern was here #### 0 455 0 455 @@ -237,17 +237,17 @@ POSTHOOK: Input: default@part_mm@key_mm=456 103 455 103 455 103 456 -PREHOOK: query: select * from part_mm order by key, key_mm +PREHOOK: query: select * from part_mm_n0 order by key, key_mm PREHOOK: type: QUERY -PREHOOK: Input: default@part_mm -PREHOOK: Input: default@part_mm@key_mm=455 -PREHOOK: Input: default@part_mm@key_mm=456 +PREHOOK: Input: default@part_mm_n0 +PREHOOK: Input: default@part_mm_n0@key_mm=455 +PREHOOK: Input: default@part_mm_n0@key_mm=456 #### A masked pattern was here #### -POSTHOOK: query: select * from part_mm order by key, key_mm +POSTHOOK: query: select * from part_mm_n0 order by key, key_mm POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_mm -POSTHOOK: Input: default@part_mm@key_mm=455 -POSTHOOK: Input: default@part_mm@key_mm=456 +POSTHOOK: Input: default@part_mm_n0 +POSTHOOK: Input: default@part_mm_n0@key_mm=455 +POSTHOOK: Input: default@part_mm_n0@key_mm=456 #### A masked pattern was here #### 0 455 0 455 @@ -267,34 +267,34 @@ POSTHOOK: Input: default@part_mm@key_mm=456 103 455 103 455 103 456 -PREHOOK: query: truncate table part_mm +PREHOOK: query: truncate table part_mm_n0 PREHOOK: type: TRUNCATETABLE -PREHOOK: Output: default@part_mm@key_mm=455 -PREHOOK: Output: default@part_mm@key_mm=456 -POSTHOOK: query: truncate table part_mm +PREHOOK: Output: default@part_mm_n0@key_mm=455 +PREHOOK: Output: default@part_mm_n0@key_mm=456 +POSTHOOK: query: truncate table part_mm_n0 POSTHOOK: type: TRUNCATETABLE -POSTHOOK: Output: default@part_mm@key_mm=455 -POSTHOOK: Output: default@part_mm@key_mm=456 -PREHOOK: query: select * from part_mm order by key, key_mm +POSTHOOK: Output: default@part_mm_n0@key_mm=455 +POSTHOOK: Output: default@part_mm_n0@key_mm=456 +PREHOOK: query: select * from part_mm_n0 order by key, key_mm PREHOOK: type: QUERY -PREHOOK: Input: default@part_mm -PREHOOK: Input: default@part_mm@key_mm=455 -PREHOOK: Input: default@part_mm@key_mm=456 +PREHOOK: Input: default@part_mm_n0 +PREHOOK: Input: default@part_mm_n0@key_mm=455 +PREHOOK: Input: default@part_mm_n0@key_mm=456 #### A masked pattern was here #### -POSTHOOK: query: select * from part_mm order by key, key_mm +POSTHOOK: query: select * from part_mm_n0 order by key, key_mm POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_mm -POSTHOOK: Input: default@part_mm@key_mm=455 -POSTHOOK: Input: default@part_mm@key_mm=456 +POSTHOOK: Input: default@part_mm_n0 +POSTHOOK: Input: default@part_mm_n0@key_mm=455 +POSTHOOK: Input: default@part_mm_n0@key_mm=456 #### A masked pattern was here #### -PREHOOK: query: drop table part_mm +PREHOOK: query: drop table part_mm_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_mm -PREHOOK: Output: default@part_mm -POSTHOOK: query: drop table part_mm +PREHOOK: Input: default@part_mm_n0 +PREHOOK: Output: default@part_mm_n0 +POSTHOOK: query: drop table part_mm_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_mm -POSTHOOK: Output: default@part_mm +POSTHOOK: Input: default@part_mm_n0 +POSTHOOK: Output: default@part_mm_n0 PREHOOK: query: drop table simple_mm PREHOOK: type: DROPTABLE POSTHOOK: query: drop table simple_mm @@ -307,19 +307,19 @@ POSTHOOK: query: create table simple_mm(key int) stored as orc tblproperties ("t POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@simple_mm -PREHOOK: query: insert into table simple_mm select key from intermediate +PREHOOK: query: insert into table simple_mm select key from intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@simple_mm -POSTHOOK: query: insert into table simple_mm select key from intermediate +POSTHOOK: query: insert into table simple_mm select key from intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@simple_mm POSTHOOK: Lineage: ###Masked### PREHOOK: query: select * from simple_mm order by key @@ -336,19 +336,19 @@ POSTHOOK: Input: default@simple_mm 98 100 103 -PREHOOK: query: insert into table simple_mm select key from intermediate +PREHOOK: query: insert into table simple_mm select key from intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@simple_mm -POSTHOOK: query: insert into table simple_mm select key from intermediate +POSTHOOK: query: insert into table simple_mm select key from intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@simple_mm POSTHOOK: Lineage: ###Masked### PREHOOK: query: select * from simple_mm order by key @@ -407,19 +407,19 @@ POSTHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dp_mm -PREHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate +PREHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@dp_mm@key1=123 -POSTHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate +POSTHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@dp_mm@key1=123/key2=0 POSTHOOK: Output: default@dp_mm@key1=123/key2=10 POSTHOOK: Output: default@dp_mm@key1=123/key2=100 @@ -476,25 +476,25 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@union_mm PREHOOK: query: insert into table union_mm select temps.p from ( -select key as p from intermediate +select key as p from intermediate_n0 union all -select key + 1 as p from intermediate ) temps +select key + 1 as p from intermediate_n0 ) temps PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@union_mm POSTHOOK: query: insert into table union_mm select temps.p from ( -select key as p from intermediate +select key as p from intermediate_n0 union all -select key + 1 as p from intermediate ) temps +select key + 1 as p from intermediate_n0 ) temps POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@union_mm POSTHOOK: Lineage: ###Masked### PREHOOK: query: select * from union_mm order by id @@ -520,32 +520,32 @@ POSTHOOK: Input: default@union_mm PREHOOK: query: insert into table union_mm select p from ( -select key + 1 as p from intermediate +select key + 1 as p from intermediate_n0 union all -select key from intermediate +select key from intermediate_n0 ) tab group by p union all -select key + 2 as p from intermediate +select key + 2 as p from intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@union_mm POSTHOOK: query: insert into table union_mm select p from ( -select key + 1 as p from intermediate +select key + 1 as p from intermediate_n0 union all -select key from intermediate +select key from intermediate_n0 ) tab group by p union all -select key + 2 as p from intermediate +select key + 2 as p from intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@union_mm POSTHOOK: Lineage: ###Masked### PREHOOK: query: select * from union_mm order by id @@ -588,46 +588,46 @@ POSTHOOK: Input: default@union_mm PREHOOK: query: insert into table union_mm SELECT p FROM ( - SELECT key + 1 as p FROM intermediate + SELECT key + 1 as p FROM intermediate_n0 UNION ALL SELECT key as p FROM ( SELECT distinct key FROM ( SELECT key FROM ( - SELECT key + 2 as key FROM intermediate + SELECT key + 2 as key FROM intermediate_n0 UNION ALL - SELECT key FROM intermediate + SELECT key FROM intermediate_n0 )t1 group by key)t2 )t3 )t4 group by p PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@union_mm POSTHOOK: query: insert into table union_mm SELECT p FROM ( - SELECT key + 1 as p FROM intermediate + SELECT key + 1 as p FROM intermediate_n0 UNION ALL SELECT key as p FROM ( SELECT distinct key FROM ( SELECT key FROM ( - SELECT key + 2 as key FROM intermediate + SELECT key + 2 as key FROM intermediate_n0 UNION ALL - SELECT key FROM intermediate + SELECT key FROM intermediate_n0 )t1 group by key)t2 )t3 )t4 group by p POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@union_mm POSTHOOK: Lineage: ###Masked### PREHOOK: query: select * from union_mm order by id @@ -700,25 +700,25 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@partunion_mm PREHOOK: query: insert into table partunion_mm partition(key) select temps.* from ( -select key as p, key from intermediate +select key as p, key from intermediate_n0 union all -select key + 1 as p, key + 1 from intermediate ) temps +select key + 1 as p, key + 1 from intermediate_n0 ) temps PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@partunion_mm POSTHOOK: query: insert into table partunion_mm partition(key) select temps.* from ( -select key as p, key from intermediate +select key as p, key from intermediate_n0 union all -select key + 1 as p, key + 1 from intermediate ) temps +select key + 1 as p, key + 1 from intermediate_n0 ) temps POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@partunion_mm@key=0 POSTHOOK: Output: default@partunion_mm@key=1 POSTHOOK: Output: default@partunion_mm@key=10 @@ -802,20 +802,20 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@skew_mm PREHOOK: query: insert into table skew_mm -select key, key, key from intermediate +select key, key, key from intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@skew_mm POSTHOOK: query: insert into table skew_mm -select key, key, key from intermediate +select key, key, key from intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@skew_mm POSTHOOK: Lineage: ###Masked### POSTHOOK: Lineage: ###Masked### @@ -853,24 +853,24 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@skew_dp_union_mm PREHOOK: query: insert into table skew_dp_union_mm partition (k3) -select key as i, key as j, key as k, key as l from intermediate +select key as i, key as j, key as k, key as l from intermediate_n0 union all -select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@skew_dp_union_mm POSTHOOK: query: insert into table skew_dp_union_mm partition (k3) -select key as i, key as j, key as k, key as l from intermediate +select key as i, key as j, key as k, key as l from intermediate_n0 union all -select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@skew_dp_union_mm@k3=0 POSTHOOK: Output: default@skew_dp_union_mm@k3=10 POSTHOOK: Output: default@skew_dp_union_mm@k3=100 @@ -979,19 +979,19 @@ POSTHOOK: query: create table merge0_mm (id int) stored as orc tblproperties("tr POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@merge0_mm -PREHOOK: query: insert into table merge0_mm select key from intermediate +PREHOOK: query: insert into table merge0_mm select key from intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@merge0_mm -POSTHOOK: query: insert into table merge0_mm select key from intermediate +POSTHOOK: query: insert into table merge0_mm select key from intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@merge0_mm POSTHOOK: Lineage: ###Masked### PREHOOK: query: select * from merge0_mm @@ -1008,19 +1008,19 @@ POSTHOOK: Input: default@merge0_mm 10 100 103 -PREHOOK: query: insert into table merge0_mm select key from intermediate +PREHOOK: query: insert into table merge0_mm select key from intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@merge0_mm -POSTHOOK: query: insert into table merge0_mm select key from intermediate +POSTHOOK: query: insert into table merge0_mm select key from intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@merge0_mm POSTHOOK: Lineage: ###Masked### PREHOOK: query: select * from merge0_mm @@ -1059,19 +1059,19 @@ POSTHOOK: query: create table merge2_mm (id int) tblproperties("transactional"=" POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@merge2_mm -PREHOOK: query: insert into table merge2_mm select key from intermediate +PREHOOK: query: insert into table merge2_mm select key from intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@merge2_mm -POSTHOOK: query: insert into table merge2_mm select key from intermediate +POSTHOOK: query: insert into table merge2_mm select key from intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@merge2_mm POSTHOOK: Lineage: ###Masked### PREHOOK: query: select * from merge2_mm @@ -1088,19 +1088,19 @@ POSTHOOK: Input: default@merge2_mm 10 100 103 -PREHOOK: query: insert into table merge2_mm select key from intermediate +PREHOOK: query: insert into table merge2_mm select key from intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@merge2_mm -POSTHOOK: query: insert into table merge2_mm select key from intermediate +POSTHOOK: query: insert into table merge2_mm select key from intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@merge2_mm POSTHOOK: Lineage: ###Masked### PREHOOK: query: select * from merge2_mm @@ -1139,19 +1139,19 @@ POSTHOOK: query: create table merge1_mm (id int) partitioned by (key int) stored POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@merge1_mm -PREHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate +PREHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@merge1_mm -POSTHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate +POSTHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@merge1_mm@key=0 POSTHOOK: Output: default@merge1_mm@key=10 POSTHOOK: Output: default@merge1_mm@key=100 @@ -1190,19 +1190,19 @@ POSTHOOK: Input: default@merge1_mm@key=98 98 98 100 100 103 103 -PREHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate +PREHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@merge1_mm -POSTHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate +POSTHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@merge1_mm@key=0 POSTHOOK: Output: default@merge1_mm@key=10 POSTHOOK: Output: default@merge1_mm@key=100 @@ -1259,20 +1259,20 @@ PREHOOK: query: drop table ctas0_mm PREHOOK: type: DROPTABLE POSTHOOK: query: drop table ctas0_mm POSTHOOK: type: DROPTABLE -PREHOOK: query: create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate +PREHOOK: query: create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate_n0 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: database:default PREHOOK: Output: default@ctas0_mm -POSTHOOK: query: create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate +POSTHOOK: query: create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate_n0 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: database:default POSTHOOK: Output: default@ctas0_mm POSTHOOK: Lineage: ###Masked### @@ -1304,21 +1304,21 @@ PREHOOK: type: DROPTABLE POSTHOOK: query: drop table ctas1_mm POSTHOOK: type: DROPTABLE PREHOOK: query: create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as - select * from intermediate union all select * from intermediate + select * from intermediate_n0 union all select * from intermediate_n0 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: database:default PREHOOK: Output: default@ctas1_mm POSTHOOK: query: create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as - select * from intermediate union all select * from intermediate + select * from intermediate_n0 union all select * from intermediate_n0 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: database:default POSTHOOK: Output: default@ctas1_mm POSTHOOK: Lineage: ###Masked### @@ -1375,24 +1375,24 @@ POSTHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties("tr POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@multi0_2_mm -PREHOOK: query: from intermediate +PREHOOK: query: from intermediate_n0 insert overwrite table multi0_1_mm select key, p insert overwrite table multi0_2_mm select p, key PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@multi0_1_mm PREHOOK: Output: default@multi0_2_mm -POSTHOOK: query: from intermediate +POSTHOOK: query: from intermediate_n0 insert overwrite table multi0_1_mm select key, p insert overwrite table multi0_2_mm select p, key POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@multi0_1_mm POSTHOOK: Output: default@multi0_2_mm POSTHOOK: Lineage: ###Masked### @@ -1427,24 +1427,24 @@ POSTHOOK: Input: default@multi0_2_mm 456 10 457 100 457 103 -PREHOOK: query: from intermediate +PREHOOK: query: from intermediate_n0 insert into table multi0_1_mm select p, key insert overwrite table multi0_2_mm select key, p PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@multi0_1_mm PREHOOK: Output: default@multi0_2_mm -POSTHOOK: query: from intermediate +POSTHOOK: query: from intermediate_n0 insert into table multi0_1_mm select p, key insert overwrite table multi0_2_mm select key, p POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@multi0_1_mm POSTHOOK: Output: default@multi0_2_mm POSTHOOK: Lineage: ###Masked### @@ -1513,24 +1513,24 @@ POSTHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p in POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@multi1_mm -PREHOOK: query: from intermediate +PREHOOK: query: from intermediate_n0 insert into table multi1_mm partition(p=1) select p, key insert into table multi1_mm partition(p=2) select key, p PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@multi1_mm@p=1 PREHOOK: Output: default@multi1_mm@p=2 -POSTHOOK: query: from intermediate +POSTHOOK: query: from intermediate_n0 insert into table multi1_mm partition(p=1) select p, key insert into table multi1_mm partition(p=2) select key, p POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@multi1_mm@p=1 POSTHOOK: Output: default@multi1_mm@p=2 POSTHOOK: Lineage: ###Masked### @@ -1561,24 +1561,24 @@ POSTHOOK: Input: default@multi1_mm@p=2 456 10 1 457 100 1 457 103 1 -PREHOOK: query: from intermediate +PREHOOK: query: from intermediate_n0 insert into table multi1_mm partition(p=2) select p, key insert overwrite table multi1_mm partition(p=1) select key, p PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@multi1_mm@p=1 PREHOOK: Output: default@multi1_mm@p=2 -POSTHOOK: query: from intermediate +POSTHOOK: query: from intermediate_n0 insert into table multi1_mm partition(p=2) select p, key insert overwrite table multi1_mm partition(p=1) select key, p POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@multi1_mm@p=1 POSTHOOK: Output: default@multi1_mm@p=2 POSTHOOK: Lineage: ###Masked### @@ -1615,24 +1615,24 @@ POSTHOOK: Input: default@multi1_mm@p=2 456 10 2 457 100 2 457 103 2 -PREHOOK: query: from intermediate +PREHOOK: query: from intermediate_n0 insert into table multi1_mm partition(p) select p, key, p insert into table multi1_mm partition(p=1) select key, p PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@multi1_mm PREHOOK: Output: default@multi1_mm@p=1 -POSTHOOK: query: from intermediate +POSTHOOK: query: from intermediate_n0 insert into table multi1_mm partition(p) select p, key, p insert into table multi1_mm partition(p=1) select key, p POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@multi1_mm@p=1 POSTHOOK: Output: default@multi1_mm@p=455 POSTHOOK: Output: default@multi1_mm@p=456 @@ -1695,24 +1695,24 @@ POSTHOOK: Input: default@multi1_mm@p=457 457 100 457 457 103 2 457 103 457 -PREHOOK: query: from intermediate +PREHOOK: query: from intermediate_n0 insert into table multi1_mm partition(p) select p, key, 1 insert into table multi1_mm partition(p=1) select key, p PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@multi1_mm PREHOOK: Output: default@multi1_mm@p=1 -POSTHOOK: query: from intermediate +POSTHOOK: query: from intermediate_n0 insert into table multi1_mm partition(p) select p, key, 1 insert into table multi1_mm partition(p=1) select key, p POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@multi1_mm@p=1 POSTHOOK: Lineage: ###Masked### POSTHOOK: Lineage: ###Masked### @@ -1798,19 +1798,19 @@ POSTHOOK: query: create table stats_mm(key int) tblproperties("transactional"=" POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@stats_mm -PREHOOK: query: insert into table stats_mm select key from intermediate +PREHOOK: query: insert into table stats_mm select key from intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@stats_mm -POSTHOOK: query: insert into table stats_mm select key from intermediate +POSTHOOK: query: insert into table stats_mm select key from intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@stats_mm POSTHOOK: Lineage: ###Masked### PREHOOK: query: desc formatted stats_mm @@ -1848,19 +1848,19 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: insert into table stats_mm select key from intermediate +PREHOOK: query: insert into table stats_mm select key from intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@stats_mm -POSTHOOK: query: insert into table stats_mm select key from intermediate +POSTHOOK: query: insert into table stats_mm select key from intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@stats_mm POSTHOOK: Lineage: ###Masked### PREHOOK: query: desc formatted stats_mm @@ -2090,19 +2090,19 @@ POSTHOOK: query: CREATE TEMPORARY TABLE temp1 (a int) TBLPROPERTIES ("transactio POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@temp1 -PREHOOK: query: INSERT INTO temp1 SELECT key FROM intermediate +PREHOOK: query: INSERT INTO temp1 SELECT key FROM intermediate_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Input: default@intermediate_n0@p=455 +PREHOOK: Input: default@intermediate_n0@p=456 +PREHOOK: Input: default@intermediate_n0@p=457 PREHOOK: Output: default@temp1 -POSTHOOK: query: INSERT INTO temp1 SELECT key FROM intermediate +POSTHOOK: query: INSERT INTO temp1 SELECT key FROM intermediate_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Input: default@intermediate_n0@p=455 +POSTHOOK: Input: default@intermediate_n0@p=456 +POSTHOOK: Input: default@intermediate_n0@p=457 POSTHOOK: Output: default@temp1 POSTHOOK: Lineage: ###Masked### PREHOOK: query: DESC EXTENDED temp1 @@ -2128,11 +2128,11 @@ POSTHOOK: Input: default@temp1 10 100 103 -PREHOOK: query: drop table intermediate +PREHOOK: query: drop table intermediate_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@intermediate -PREHOOK: Output: default@intermediate -POSTHOOK: query: drop table intermediate +PREHOOK: Input: default@intermediate_n0 +PREHOOK: Output: default@intermediate_n0 +POSTHOOK: query: drop table intermediate_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@intermediate -POSTHOOK: Output: default@intermediate +POSTHOOK: Input: default@intermediate_n0 +POSTHOOK: Output: default@intermediate_n0 diff --git a/ql/src/test/results/clientpositive/mm_buckets.q.out b/ql/src/test/results/clientpositive/mm_buckets.q.out index 2b2c95913f..d37e2f6a54 100644 --- a/ql/src/test/results/clientpositive/mm_buckets.q.out +++ b/ql/src/test/results/clientpositive/mm_buckets.q.out @@ -1,42 +1,42 @@ -PREHOOK: query: drop table intermediate +PREHOOK: query: drop table intermediate_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table intermediate +POSTHOOK: query: drop table intermediate_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +PREHOOK: query: create table intermediate_n2(key int) partitioned by (p int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@intermediate -POSTHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +PREHOOK: Output: default@intermediate_n2 +POSTHOOK: query: create table intermediate_n2(key int) partitioned by (p int) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@intermediate -PREHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +POSTHOOK: Output: default@intermediate_n2 +PREHOOK: query: insert into table intermediate_n2 partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@intermediate@p=455 -POSTHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +PREHOOK: Output: default@intermediate_n2@p=455 +POSTHOOK: query: insert into table intermediate_n2 partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@intermediate@p=455 -POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +POSTHOOK: Output: default@intermediate_n2@p=455 +POSTHOOK: Lineage: intermediate_n2 PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate_n2 partition(p='456') select distinct key from src where key is not null order by key asc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@intermediate@p=456 -POSTHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +PREHOOK: Output: default@intermediate_n2@p=456 +POSTHOOK: query: insert into table intermediate_n2 partition(p='456') select distinct key from src where key is not null order by key asc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@intermediate@p=456 -POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +POSTHOOK: Output: default@intermediate_n2@p=456 +POSTHOOK: Lineage: intermediate_n2 PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate_n2 partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@intermediate@p=457 -POSTHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +PREHOOK: Output: default@intermediate_n2@p=457 +POSTHOOK: query: insert into table intermediate_n2 partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@intermediate@p=457 -POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Output: default@intermediate_n2@p=457 +POSTHOOK: Lineage: intermediate_n2 PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: drop table bucket0_mm PREHOOK: type: DROPTABLE POSTHOOK: query: drop table bucket0_mm @@ -53,22 +53,22 @@ tblproperties("transactional"="true", "transactional_properties"="insert_only") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket0_mm -PREHOOK: query: insert into table bucket0_mm select key, key from intermediate +PREHOOK: query: insert into table bucket0_mm select key, key from intermediate_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n2 +PREHOOK: Input: default@intermediate_n2@p=455 +PREHOOK: Input: default@intermediate_n2@p=456 +PREHOOK: Input: default@intermediate_n2@p=457 PREHOOK: Output: default@bucket0_mm -POSTHOOK: query: insert into table bucket0_mm select key, key from intermediate +POSTHOOK: query: insert into table bucket0_mm select key, key from intermediate_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n2 +POSTHOOK: Input: default@intermediate_n2@p=455 +POSTHOOK: Input: default@intermediate_n2@p=456 +POSTHOOK: Input: default@intermediate_n2@p=457 POSTHOOK: Output: default@bucket0_mm -POSTHOOK: Lineage: bucket0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket0_mm.id SIMPLE [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket0_mm.key SIMPLE [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from bucket0_mm order by key, id PREHOOK: type: QUERY PREHOOK: Input: default@bucket0_mm @@ -105,22 +105,22 @@ POSTHOOK: Input: default@bucket0_mm 100 100 0 0 103 103 -PREHOOK: query: insert into table bucket0_mm select key, key from intermediate +PREHOOK: query: insert into table bucket0_mm select key, key from intermediate_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n2 +PREHOOK: Input: default@intermediate_n2@p=455 +PREHOOK: Input: default@intermediate_n2@p=456 +PREHOOK: Input: default@intermediate_n2@p=457 PREHOOK: Output: default@bucket0_mm -POSTHOOK: query: insert into table bucket0_mm select key, key from intermediate +POSTHOOK: query: insert into table bucket0_mm select key, key from intermediate_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n2 +POSTHOOK: Input: default@intermediate_n2@p=455 +POSTHOOK: Input: default@intermediate_n2@p=456 +POSTHOOK: Input: default@intermediate_n2@p=457 POSTHOOK: Output: default@bucket0_mm -POSTHOOK: Lineage: bucket0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket0_mm.id SIMPLE [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket0_mm.key SIMPLE [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from bucket0_mm order by key, id PREHOOK: type: QUERY PREHOOK: Input: default@bucket0_mm @@ -194,24 +194,24 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket1_mm PREHOOK: query: insert into table bucket1_mm partition (key2) -select key + 1, key, key - 1 from intermediate +select key + 1, key, key - 1 from intermediate_n2 union all -select key - 1, key, key + 1 from intermediate +select key - 1, key, key + 1 from intermediate_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n2 +PREHOOK: Input: default@intermediate_n2@p=455 +PREHOOK: Input: default@intermediate_n2@p=456 +PREHOOK: Input: default@intermediate_n2@p=457 PREHOOK: Output: default@bucket1_mm POSTHOOK: query: insert into table bucket1_mm partition (key2) -select key + 1, key, key - 1 from intermediate +select key + 1, key, key - 1 from intermediate_n2 union all -select key - 1, key, key + 1 from intermediate +select key - 1, key, key + 1 from intermediate_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n2 +POSTHOOK: Input: default@intermediate_n2@p=455 +POSTHOOK: Input: default@intermediate_n2@p=456 +POSTHOOK: Input: default@intermediate_n2@p=457 POSTHOOK: Output: default@bucket1_mm@key2=-1 POSTHOOK: Output: default@bucket1_mm@key2=1 POSTHOOK: Output: default@bucket1_mm@key2=101 @@ -223,28 +223,28 @@ POSTHOOK: Output: default@bucket1_mm@key2=96 POSTHOOK: Output: default@bucket1_mm@key2=97 POSTHOOK: Output: default@bucket1_mm@key2=98 POSTHOOK: Output: default@bucket1_mm@key2=99 -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=-1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=-1).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=101).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=101).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=102).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=102).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=104).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=104).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=11).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=11).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=1).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=96).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=96).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=97).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=98).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=99).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=99).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=9).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=9).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=-1).id EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=-1).key EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=101).id EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=101).key EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=102).id EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=102).key EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=104).id EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=104).key EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=11).id EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=11).key EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=1).id EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=1).key EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=96).id EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=96).key EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=97).id EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=97).key EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=98).id EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=98).key EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=99).id EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=99).key EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=9).id EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=9).key EXPRESSION [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from bucket1_mm order by key, id PREHOOK: type: QUERY PREHOOK: Input: default@bucket1_mm @@ -383,22 +383,22 @@ tblproperties("transactional"="true", "transactional_properties"="insert_only") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket2_mm -PREHOOK: query: insert into table bucket2_mm select key, key from intermediate where key == 0 +PREHOOK: query: insert into table bucket2_mm select key, key from intermediate_n2 where key == 0 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n2 +PREHOOK: Input: default@intermediate_n2@p=455 +PREHOOK: Input: default@intermediate_n2@p=456 +PREHOOK: Input: default@intermediate_n2@p=457 PREHOOK: Output: default@bucket2_mm -POSTHOOK: query: insert into table bucket2_mm select key, key from intermediate where key == 0 +POSTHOOK: query: insert into table bucket2_mm select key, key from intermediate_n2 where key == 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n2 +POSTHOOK: Input: default@intermediate_n2@p=455 +POSTHOOK: Input: default@intermediate_n2@p=456 +POSTHOOK: Input: default@intermediate_n2@p=457 POSTHOOK: Output: default@bucket2_mm -POSTHOOK: Lineage: bucket2_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket2_mm.id SIMPLE [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket2_mm.key SIMPLE [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from bucket2_mm order by key, id PREHOOK: type: QUERY PREHOOK: Input: default@bucket2_mm @@ -424,22 +424,22 @@ POSTHOOK: query: select * from bucket2_mm tablesample (bucket 4 out of 10) s ord POSTHOOK: type: QUERY POSTHOOK: Input: default@bucket2_mm #### A masked pattern was here #### -PREHOOK: query: insert into table bucket2_mm select key, key from intermediate where key in (0, 103) +PREHOOK: query: insert into table bucket2_mm select key, key from intermediate_n2 where key in (0, 103) PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n2 +PREHOOK: Input: default@intermediate_n2@p=455 +PREHOOK: Input: default@intermediate_n2@p=456 +PREHOOK: Input: default@intermediate_n2@p=457 PREHOOK: Output: default@bucket2_mm -POSTHOOK: query: insert into table bucket2_mm select key, key from intermediate where key in (0, 103) +POSTHOOK: query: insert into table bucket2_mm select key, key from intermediate_n2 where key in (0, 103) POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n2 +POSTHOOK: Input: default@intermediate_n2@p=455 +POSTHOOK: Input: default@intermediate_n2@p=456 +POSTHOOK: Input: default@intermediate_n2@p=457 POSTHOOK: Output: default@bucket2_mm -POSTHOOK: Lineage: bucket2_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket2_mm.id SIMPLE [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket2_mm.key SIMPLE [(intermediate_n2)intermediate_n2.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from bucket2_mm PREHOOK: type: QUERY PREHOOK: Input: default@bucket2_mm @@ -475,11 +475,11 @@ POSTHOOK: query: drop table bucket2_mm POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@bucket2_mm POSTHOOK: Output: default@bucket2_mm -PREHOOK: query: drop table intermediate +PREHOOK: query: drop table intermediate_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@intermediate -PREHOOK: Output: default@intermediate -POSTHOOK: query: drop table intermediate +PREHOOK: Input: default@intermediate_n2 +PREHOOK: Output: default@intermediate_n2 +POSTHOOK: query: drop table intermediate_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@intermediate -POSTHOOK: Output: default@intermediate +POSTHOOK: Input: default@intermediate_n2 +POSTHOOK: Output: default@intermediate_n2 diff --git a/ql/src/test/results/clientpositive/mm_cttas.q.out b/ql/src/test/results/clientpositive/mm_cttas.q.out index e5c5761c58..0060fc0d73 100644 --- a/ql/src/test/results/clientpositive/mm_cttas.q.out +++ b/ql/src/test/results/clientpositive/mm_cttas.q.out @@ -1,60 +1,60 @@ -PREHOOK: query: drop table intermediate +PREHOOK: query: drop table intermediate_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table intermediate +POSTHOOK: query: drop table intermediate_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +PREHOOK: query: create table intermediate_n1(key int) partitioned by (p int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@intermediate -POSTHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +PREHOOK: Output: default@intermediate_n1 +POSTHOOK: query: create table intermediate_n1(key int) partitioned by (p int) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@intermediate -PREHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +POSTHOOK: Output: default@intermediate_n1 +PREHOOK: query: insert into table intermediate_n1 partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@intermediate@p=455 -POSTHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +PREHOOK: Output: default@intermediate_n1@p=455 +POSTHOOK: query: insert into table intermediate_n1 partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@intermediate@p=455 -POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +POSTHOOK: Output: default@intermediate_n1@p=455 +POSTHOOK: Lineage: intermediate_n1 PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate_n1 partition(p='456') select distinct key from src where key is not null order by key asc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@intermediate@p=456 -POSTHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +PREHOOK: Output: default@intermediate_n1@p=456 +POSTHOOK: query: insert into table intermediate_n1 partition(p='456') select distinct key from src where key is not null order by key asc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@intermediate@p=456 -POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +POSTHOOK: Output: default@intermediate_n1@p=456 +POSTHOOK: Lineage: intermediate_n1 PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate_n1 partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@intermediate@p=457 -POSTHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +PREHOOK: Output: default@intermediate_n1@p=457 +POSTHOOK: query: insert into table intermediate_n1 partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@intermediate@p=457 -POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Output: default@intermediate_n1@p=457 +POSTHOOK: Lineage: intermediate_n1 PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: drop table cttas1_mm PREHOOK: type: DROPTABLE POSTHOOK: query: drop table cttas1_mm POSTHOOK: type: DROPTABLE -PREHOOK: query: create temporary table cttas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate +PREHOOK: query: create temporary table cttas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate_n1 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n1 +PREHOOK: Input: default@intermediate_n1@p=455 +PREHOOK: Input: default@intermediate_n1@p=456 +PREHOOK: Input: default@intermediate_n1@p=457 PREHOOK: Output: database:default PREHOOK: Output: default@cttas1_mm -POSTHOOK: query: create temporary table cttas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate +POSTHOOK: query: create temporary table cttas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate_n1 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n1 +POSTHOOK: Input: default@intermediate_n1@p=455 +POSTHOOK: Input: default@intermediate_n1@p=456 +POSTHOOK: Input: default@intermediate_n1@p=457 POSTHOOK: Output: database:default POSTHOOK: Output: default@cttas1_mm PREHOOK: query: select * from cttas1_mm @@ -79,11 +79,11 @@ POSTHOOK: query: drop table cttas1_mm POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@cttas1_mm POSTHOOK: Output: default@cttas1_mm -PREHOOK: query: drop table intermediate +PREHOOK: query: drop table intermediate_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@intermediate -PREHOOK: Output: default@intermediate -POSTHOOK: query: drop table intermediate +PREHOOK: Input: default@intermediate_n1 +PREHOOK: Output: default@intermediate_n1 +POSTHOOK: query: drop table intermediate_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@intermediate -POSTHOOK: Output: default@intermediate +POSTHOOK: Input: default@intermediate_n1 +POSTHOOK: Output: default@intermediate_n1 diff --git a/ql/src/test/results/clientpositive/mm_default.q.out b/ql/src/test/results/clientpositive/mm_default.q.out index e08ec5fb53..4ba6aa5223 100644 --- a/ql/src/test/results/clientpositive/mm_default.q.out +++ b/ql/src/test/results/clientpositive/mm_default.q.out @@ -46,14 +46,14 @@ POSTHOOK: query: create table mm2 like mm0 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@mm2 -PREHOOK: query: create table acid1 like acid0 +PREHOOK: query: create table acid1_n0 like acid0 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@acid1 -POSTHOOK: query: create table acid1 like acid0 +PREHOOK: Output: default@acid1_n0 +POSTHOOK: query: create table acid1_n0 like acid0 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@acid1 +POSTHOOK: Output: default@acid1_n0 PREHOOK: query: create table mm3 as select key from src limit 1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src @@ -73,14 +73,14 @@ POSTHOOK: query: create table mm4 (key string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@mm4 -PREHOOK: query: create table acid2 (key string) stored as ORC tblproperties("transactional"="true") +PREHOOK: query: create table acid2_n0 (key string) stored as ORC tblproperties("transactional"="true") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@acid2 -POSTHOOK: query: create table acid2 (key string) stored as ORC tblproperties("transactional"="true") +PREHOOK: Output: default@acid2_n0 +POSTHOOK: query: create table acid2_n0 (key string) stored as ORC tblproperties("transactional"="true") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@acid2 +POSTHOOK: Output: default@acid2_n0 PREHOOK: query: create table non_mm1 tblproperties("transactional"="false") as select key from src limit 1 PREHOOK: type: CREATETABLE_AS_SELECT @@ -270,12 +270,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted acid1 +PREHOOK: query: desc formatted acid1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@acid1 -POSTHOOK: query: desc formatted acid1 +PREHOOK: Input: default@acid1_n0 +POSTHOOK: query: desc formatted acid1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@acid1 +POSTHOOK: Input: default@acid1_n0 # col_name data_type comment key string @@ -305,12 +305,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted acid2 +PREHOOK: query: desc formatted acid2_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@acid2 -POSTHOOK: query: desc formatted acid2 +PREHOOK: Input: default@acid2_n0 +POSTHOOK: query: desc formatted acid2_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@acid2 +POSTHOOK: Input: default@acid2_n0 # col_name data_type comment key string @@ -405,19 +405,19 @@ POSTHOOK: query: drop table acid0 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@acid0 POSTHOOK: Output: default@acid0 -PREHOOK: query: drop table acid1 +PREHOOK: query: drop table acid1_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@acid1 -PREHOOK: Output: default@acid1 -POSTHOOK: query: drop table acid1 +PREHOOK: Input: default@acid1_n0 +PREHOOK: Output: default@acid1_n0 +POSTHOOK: query: drop table acid1_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@acid1 -POSTHOOK: Output: default@acid1 -PREHOOK: query: drop table acid2 +POSTHOOK: Input: default@acid1_n0 +POSTHOOK: Output: default@acid1_n0 +PREHOOK: query: drop table acid2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@acid2 -PREHOOK: Output: default@acid2 -POSTHOOK: query: drop table acid2 +PREHOOK: Input: default@acid2_n0 +PREHOOK: Output: default@acid2_n0 +POSTHOOK: query: drop table acid2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@acid2 -POSTHOOK: Output: default@acid2 +POSTHOOK: Input: default@acid2_n0 +POSTHOOK: Output: default@acid2_n0 diff --git a/ql/src/test/results/clientpositive/msck_repair_0.q.out b/ql/src/test/results/clientpositive/msck_repair_0.q.out index f48cc78f41..fa6e4a9882 100644 --- a/ql/src/test/results/clientpositive/msck_repair_0.q.out +++ b/ql/src/test/results/clientpositive/msck_repair_0.q.out @@ -1,89 +1,80 @@ -PREHOOK: query: DROP TABLE IF EXISTS repairtable +PREHOOK: query: DROP TABLE IF EXISTS repairtable_n5 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS repairtable +POSTHOOK: query: DROP TABLE IF EXISTS repairtable_n5 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) +PREHOOK: query: CREATE TABLE repairtable_n5(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@repairtable -POSTHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) +PREHOOK: Output: default@repairtable_n5 +POSTHOOK: query: CREATE TABLE repairtable_n5(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@repairtable -PREHOOK: query: MSCK TABLE repairtable +POSTHOOK: Output: default@repairtable_n5 +PREHOOK: query: MSCK TABLE repairtable_n5 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE repairtable +PREHOOK: Output: default@repairtable_n5 +POSTHOOK: query: MSCK TABLE repairtable_n5 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -PREHOOK: query: show partitions repairtable +POSTHOOK: Output: default@repairtable_n5 +PREHOOK: query: show partitions repairtable_n5 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions repairtable +PREHOOK: Input: default@repairtable_n5 +POSTHOOK: query: show partitions repairtable_n5 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -PREHOOK: query: MSCK TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n5 +PREHOOK: query: MSCK TABLE default.repairtable_n5 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable +PREHOOK: Output: default@repairtable_n5 +POSTHOOK: query: MSCK TABLE default.repairtable_n5 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=c/p2=a -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n5 +PREHOOK: query: show partitions default.repairtable_n5 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n5 +POSTHOOK: query: show partitions default.repairtable_n5 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -PREHOOK: query: MSCK REPAIR TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n5 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n5 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable +PREHOOK: Output: default@repairtable_n5 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n5 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=c/p2=a -#### A masked pattern was here #### -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n5 +PREHOOK: query: show partitions default.repairtable_n5 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n5 +POSTHOOK: query: show partitions default.repairtable_n5 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=c/p2=a -PREHOOK: query: MSCK TABLE repairtable +POSTHOOK: Input: default@repairtable_n5 +PREHOOK: query: MSCK TABLE repairtable_n5 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE repairtable +PREHOOK: Output: default@repairtable_n5 +POSTHOOK: query: MSCK TABLE repairtable_n5 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -PREHOOK: query: show partitions repairtable +POSTHOOK: Output: default@repairtable_n5 +PREHOOK: query: show partitions repairtable_n5 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions repairtable +PREHOOK: Input: default@repairtable_n5 +POSTHOOK: query: show partitions repairtable_n5 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=c/p2=a -PREHOOK: query: MSCK REPAIR TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n5 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n5 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable +PREHOOK: Output: default@repairtable_n5 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n5 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=e/p2=f -#### A masked pattern was here #### -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n5 +PREHOOK: query: show partitions default.repairtable_n5 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n5 +POSTHOOK: query: show partitions default.repairtable_n5 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=c/p2=a -p1=e/p2=f -PREHOOK: query: DROP TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n5 +PREHOOK: query: DROP TABLE default.repairtable_n5 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@repairtable -PREHOOK: Output: default@repairtable -POSTHOOK: query: DROP TABLE default.repairtable +PREHOOK: Input: default@repairtable_n5 +PREHOOK: Output: default@repairtable_n5 +POSTHOOK: query: DROP TABLE default.repairtable_n5 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@repairtable -POSTHOOK: Output: default@repairtable +POSTHOOK: Input: default@repairtable_n5 +POSTHOOK: Output: default@repairtable_n5 diff --git a/ql/src/test/results/clientpositive/msck_repair_2.q.out b/ql/src/test/results/clientpositive/msck_repair_2.q.out index c702f37f8b..7fbd934e11 100644 --- a/ql/src/test/results/clientpositive/msck_repair_2.q.out +++ b/ql/src/test/results/clientpositive/msck_repair_2.q.out @@ -1,73 +1,68 @@ -PREHOOK: query: DROP TABLE IF EXISTS repairtable +PREHOOK: query: DROP TABLE IF EXISTS repairtable_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS repairtable +POSTHOOK: query: DROP TABLE IF EXISTS repairtable_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) +PREHOOK: query: CREATE TABLE repairtable_n2(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@repairtable -POSTHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) +PREHOOK: Output: default@repairtable_n2 +POSTHOOK: query: CREATE TABLE repairtable_n2(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@repairtable -PREHOOK: query: MSCK TABLE repairtable +POSTHOOK: Output: default@repairtable_n2 +PREHOOK: query: MSCK TABLE repairtable_n2 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE repairtable +PREHOOK: Output: default@repairtable_n2 +POSTHOOK: query: MSCK TABLE repairtable_n2 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -PREHOOK: query: show partitions repairtable +POSTHOOK: Output: default@repairtable_n2 +PREHOOK: query: show partitions repairtable_n2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions repairtable +PREHOOK: Input: default@repairtable_n2 +POSTHOOK: query: show partitions repairtable_n2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -PREHOOK: query: MSCK TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n2 +PREHOOK: query: MSCK TABLE default.repairtable_n2 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable +PREHOOK: Output: default@repairtable_n2 +POSTHOOK: query: MSCK TABLE default.repairtable_n2 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=c/p2=a -PREHOOK: query: show partitions repairtable +POSTHOOK: Output: default@repairtable_n2 +PREHOOK: query: show partitions repairtable_n2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions repairtable +PREHOOK: Input: default@repairtable_n2 +POSTHOOK: query: show partitions repairtable_n2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -PREHOOK: query: MSCK REPAIR TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n2 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n2 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable +PREHOOK: Output: default@repairtable_n2 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n2 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=c/p2=a -#### A masked pattern was here #### -PREHOOK: query: show partitions repairtable +POSTHOOK: Output: default@repairtable_n2 +PREHOOK: query: show partitions repairtable_n2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions repairtable +PREHOOK: Input: default@repairtable_n2 +POSTHOOK: query: show partitions repairtable_n2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=c/p2=a -PREHOOK: query: MSCK TABLE repairtable +POSTHOOK: Input: default@repairtable_n2 +PREHOOK: query: MSCK TABLE repairtable_n2 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE repairtable +PREHOOK: Output: default@repairtable_n2 +POSTHOOK: query: MSCK TABLE repairtable_n2 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -PREHOOK: query: show partitions repairtable +POSTHOOK: Output: default@repairtable_n2 +PREHOOK: query: show partitions repairtable_n2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions repairtable +PREHOOK: Input: default@repairtable_n2 +POSTHOOK: query: show partitions repairtable_n2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=c/p2=a -PREHOOK: query: DROP TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n2 +PREHOOK: query: DROP TABLE default.repairtable_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@repairtable -PREHOOK: Output: default@repairtable -POSTHOOK: query: DROP TABLE default.repairtable +PREHOOK: Input: default@repairtable_n2 +PREHOOK: Output: default@repairtable_n2 +POSTHOOK: query: DROP TABLE default.repairtable_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@repairtable -POSTHOOK: Output: default@repairtable +POSTHOOK: Input: default@repairtable_n2 +POSTHOOK: Output: default@repairtable_n2 diff --git a/ql/src/test/results/clientpositive/msck_repair_3.q.out b/ql/src/test/results/clientpositive/msck_repair_3.q.out index c702f37f8b..0e153fbe69 100644 --- a/ql/src/test/results/clientpositive/msck_repair_3.q.out +++ b/ql/src/test/results/clientpositive/msck_repair_3.q.out @@ -1,73 +1,68 @@ -PREHOOK: query: DROP TABLE IF EXISTS repairtable +PREHOOK: query: DROP TABLE IF EXISTS repairtable_n3 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS repairtable +POSTHOOK: query: DROP TABLE IF EXISTS repairtable_n3 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) +PREHOOK: query: CREATE TABLE repairtable_n3(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@repairtable -POSTHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) +PREHOOK: Output: default@repairtable_n3 +POSTHOOK: query: CREATE TABLE repairtable_n3(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@repairtable -PREHOOK: query: MSCK TABLE repairtable +POSTHOOK: Output: default@repairtable_n3 +PREHOOK: query: MSCK TABLE repairtable_n3 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE repairtable +PREHOOK: Output: default@repairtable_n3 +POSTHOOK: query: MSCK TABLE repairtable_n3 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -PREHOOK: query: show partitions repairtable +POSTHOOK: Output: default@repairtable_n3 +PREHOOK: query: show partitions repairtable_n3 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions repairtable +PREHOOK: Input: default@repairtable_n3 +POSTHOOK: query: show partitions repairtable_n3 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -PREHOOK: query: MSCK TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n3 +PREHOOK: query: MSCK TABLE default.repairtable_n3 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable +PREHOOK: Output: default@repairtable_n3 +POSTHOOK: query: MSCK TABLE default.repairtable_n3 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=c/p2=a -PREHOOK: query: show partitions repairtable +POSTHOOK: Output: default@repairtable_n3 +PREHOOK: query: show partitions repairtable_n3 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions repairtable +PREHOOK: Input: default@repairtable_n3 +POSTHOOK: query: show partitions repairtable_n3 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -PREHOOK: query: MSCK REPAIR TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n3 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n3 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable +PREHOOK: Output: default@repairtable_n3 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n3 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=c/p2=a -#### A masked pattern was here #### -PREHOOK: query: show partitions repairtable +POSTHOOK: Output: default@repairtable_n3 +PREHOOK: query: show partitions repairtable_n3 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions repairtable +PREHOOK: Input: default@repairtable_n3 +POSTHOOK: query: show partitions repairtable_n3 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=c/p2=a -PREHOOK: query: MSCK TABLE repairtable +POSTHOOK: Input: default@repairtable_n3 +PREHOOK: query: MSCK TABLE repairtable_n3 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE repairtable +PREHOOK: Output: default@repairtable_n3 +POSTHOOK: query: MSCK TABLE repairtable_n3 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -PREHOOK: query: show partitions repairtable +POSTHOOK: Output: default@repairtable_n3 +PREHOOK: query: show partitions repairtable_n3 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions repairtable +PREHOOK: Input: default@repairtable_n3 +POSTHOOK: query: show partitions repairtable_n3 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=c/p2=a -PREHOOK: query: DROP TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n3 +PREHOOK: query: DROP TABLE default.repairtable_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@repairtable -PREHOOK: Output: default@repairtable -POSTHOOK: query: DROP TABLE default.repairtable +PREHOOK: Input: default@repairtable_n3 +PREHOOK: Output: default@repairtable_n3 +POSTHOOK: query: DROP TABLE default.repairtable_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@repairtable -POSTHOOK: Output: default@repairtable +POSTHOOK: Input: default@repairtable_n3 +POSTHOOK: Output: default@repairtable_n3 diff --git a/ql/src/test/results/clientpositive/msck_repair_batchsize.q.out b/ql/src/test/results/clientpositive/msck_repair_batchsize.q.out index aed9271079..ab4b83137d 100644 --- a/ql/src/test/results/clientpositive/msck_repair_batchsize.q.out +++ b/ql/src/test/results/clientpositive/msck_repair_batchsize.q.out @@ -1,102 +1,93 @@ -PREHOOK: query: DROP TABLE IF EXISTS repairtable +PREHOOK: query: DROP TABLE IF EXISTS repairtable_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS repairtable +POSTHOOK: query: DROP TABLE IF EXISTS repairtable_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) +PREHOOK: query: CREATE TABLE repairtable_n0(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@repairtable -POSTHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) +PREHOOK: Output: default@repairtable_n0 +POSTHOOK: query: CREATE TABLE repairtable_n0(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@repairtable -PREHOOK: query: MSCK TABLE repairtable +POSTHOOK: Output: default@repairtable_n0 +PREHOOK: query: MSCK TABLE repairtable_n0 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE repairtable +PREHOOK: Output: default@repairtable_n0 +POSTHOOK: query: MSCK TABLE repairtable_n0 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -PREHOOK: query: MSCK TABLE default.repairtable +POSTHOOK: Output: default@repairtable_n0 +PREHOOK: query: MSCK TABLE default.repairtable_n0 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable +PREHOOK: Output: default@repairtable_n0 +POSTHOOK: query: MSCK TABLE default.repairtable_n0 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=a/p2=a repairtable:p1=b/p2=a repairtable:p1=c/p2=a -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n0 +PREHOOK: query: show partitions default.repairtable_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n0 +POSTHOOK: query: show partitions default.repairtable_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -PREHOOK: query: MSCK REPAIR TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n0 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n0 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable +PREHOOK: Output: default@repairtable_n0 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n0 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=a/p2=a repairtable:p1=b/p2=a repairtable:p1=c/p2=a -#### A masked pattern was here #### -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n0 +PREHOOK: query: show partitions default.repairtable_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n0 +POSTHOOK: query: show partitions default.repairtable_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=a/p2=a -p1=b/p2=a -p1=c/p2=a -PREHOOK: query: MSCK TABLE repairtable +POSTHOOK: Input: default@repairtable_n0 +PREHOOK: query: MSCK TABLE repairtable_n0 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE repairtable +PREHOOK: Output: default@repairtable_n0 +POSTHOOK: query: MSCK TABLE repairtable_n0 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -PREHOOK: query: show partitions repairtable +POSTHOOK: Output: default@repairtable_n0 +PREHOOK: query: show partitions repairtable_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions repairtable +PREHOOK: Input: default@repairtable_n0 +POSTHOOK: query: show partitions repairtable_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=a/p2=a -p1=b/p2=a -p1=c/p2=a -PREHOOK: query: DROP TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n0 +PREHOOK: query: DROP TABLE default.repairtable_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@repairtable -PREHOOK: Output: default@repairtable -POSTHOOK: query: DROP TABLE default.repairtable +PREHOOK: Input: default@repairtable_n0 +PREHOOK: Output: default@repairtable_n0 +POSTHOOK: query: DROP TABLE default.repairtable_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@repairtable -POSTHOOK: Output: default@repairtable +POSTHOOK: Input: default@repairtable_n0 +POSTHOOK: Output: default@repairtable_n0 #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@repairtable +PREHOOK: Output: default@repairtable_n0 #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@repairtable -PREHOOK: query: MSCK TABLE repairtable +POSTHOOK: Output: default@repairtable_n0 +PREHOOK: query: MSCK TABLE repairtable_n0 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE repairtable +PREHOOK: Output: default@repairtable_n0 +POSTHOOK: query: MSCK TABLE repairtable_n0 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=c/p2=a -PREHOOK: query: show partitions repairtable +POSTHOOK: Output: default@repairtable_n0 +Partitions not in metastore: repairtable_n0:p1=c/p2=a +PREHOOK: query: show partitions repairtable_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions repairtable +PREHOOK: Input: default@repairtable_n0 +POSTHOOK: query: show partitions repairtable_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -PREHOOK: query: DROP TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n0 +PREHOOK: query: DROP TABLE default.repairtable_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@repairtable -PREHOOK: Output: default@repairtable -POSTHOOK: query: DROP TABLE default.repairtable +PREHOOK: Input: default@repairtable_n0 +PREHOOK: Output: default@repairtable_n0 +POSTHOOK: query: DROP TABLE default.repairtable_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@repairtable -POSTHOOK: Output: default@repairtable +POSTHOOK: Input: default@repairtable_n0 +POSTHOOK: Output: default@repairtable_n0 diff --git a/ql/src/test/results/clientpositive/msck_repair_drop.q.out b/ql/src/test/results/clientpositive/msck_repair_drop.q.out index 6d89d8f4b9..971c138127 100644 --- a/ql/src/test/results/clientpositive/msck_repair_drop.q.out +++ b/ql/src/test/results/clientpositive/msck_repair_drop.q.out @@ -1,293 +1,197 @@ -PREHOOK: query: DROP TABLE IF EXISTS repairtable +PREHOOK: query: DROP TABLE IF EXISTS repairtable_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS repairtable +POSTHOOK: query: DROP TABLE IF EXISTS repairtable_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) +PREHOOK: query: CREATE TABLE repairtable_n1(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@repairtable -POSTHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: CREATE TABLE repairtable_n1(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@repairtable -PREHOOK: query: MSCK TABLE default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: MSCK TABLE default.repairtable_n1 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK TABLE default.repairtable_n1 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=1/p2=11 repairtable:p1=1/p2=12 repairtable:p1=2/p2=21 repairtable:p1=2/p2=210 repairtable:p1=2/p2=22 repairtable:p1=2/p2=23 repairtable:p1=2/p2=24 repairtable:p1=2/p2=25 repairtable:p1=2/p2=26 repairtable:p1=2/p2=27 repairtable:p1=2/p2=28 repairtable:p1=2/p2=29 -PREHOOK: query: MSCK REPAIR TABLE default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=1/p2=11 repairtable:p1=1/p2=12 repairtable:p1=2/p2=21 repairtable:p1=2/p2=210 repairtable:p1=2/p2=22 repairtable:p1=2/p2=23 repairtable:p1=2/p2=24 repairtable:p1=2/p2=25 repairtable:p1=2/p2=26 repairtable:p1=2/p2=27 repairtable:p1=2/p2=28 repairtable:p1=2/p2=29 -#### A masked pattern was here #### -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: show partitions default.repairtable_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n1 +POSTHOOK: query: show partitions default.repairtable_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=1/p2=11 -p1=1/p2=12 -p1=2/p2=21 -p1=2/p2=210 -p1=2/p2=22 -p1=2/p2=23 -p1=2/p2=24 -p1=2/p2=25 -p1=2/p2=26 -p1=2/p2=27 -p1=2/p2=28 -p1=2/p2=29 +POSTHOOK: Input: default@repairtable_n1 #### A masked pattern was here #### -PREHOOK: query: MSCK TABLE default.repairtable DROP PARTITIONS +PREHOOK: query: MSCK TABLE default.repairtable_n1 DROP PARTITIONS PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable DROP PARTITIONS +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK TABLE default.repairtable_n1 DROP PARTITIONS POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions missing from filesystem: repairtable:p1=2/p2=21 repairtable:p1=2/p2=210 repairtable:p1=2/p2=22 repairtable:p1=2/p2=23 repairtable:p1=2/p2=24 repairtable:p1=2/p2=25 repairtable:p1=2/p2=26 repairtable:p1=2/p2=27 repairtable:p1=2/p2=28 repairtable:p1=2/p2=29 -PREHOOK: query: MSCK REPAIR TABLE default.repairtable DROP PARTITIONS +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable DROP PARTITIONS +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions missing from filesystem: repairtable:p1=2/p2=21 repairtable:p1=2/p2=210 repairtable:p1=2/p2=22 repairtable:p1=2/p2=23 repairtable:p1=2/p2=24 repairtable:p1=2/p2=25 repairtable:p1=2/p2=26 repairtable:p1=2/p2=27 repairtable:p1=2/p2=28 repairtable:p1=2/p2=29 -#### A masked pattern was here #### -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: show partitions default.repairtable_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n1 +POSTHOOK: query: show partitions default.repairtable_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=1/p2=11 -p1=1/p2=12 -PREHOOK: query: MSCK TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n1 +PREHOOK: query: MSCK TABLE default.repairtable_n1 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK TABLE default.repairtable_n1 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=2/p2=21 repairtable:p1=2/p2=210 repairtable:p1=2/p2=22 repairtable:p1=2/p2=23 repairtable:p1=2/p2=24 repairtable:p1=2/p2=25 repairtable:p1=2/p2=26 repairtable:p1=2/p2=27 repairtable:p1=2/p2=28 repairtable:p1=2/p2=29 -PREHOOK: query: MSCK REPAIR TABLE default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=2/p2=21 repairtable:p1=2/p2=210 repairtable:p1=2/p2=22 repairtable:p1=2/p2=23 repairtable:p1=2/p2=24 repairtable:p1=2/p2=25 repairtable:p1=2/p2=26 repairtable:p1=2/p2=27 repairtable:p1=2/p2=28 repairtable:p1=2/p2=29 -#### A masked pattern was here #### -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: show partitions default.repairtable_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n1 +POSTHOOK: query: show partitions default.repairtable_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=1/p2=11 -p1=1/p2=12 -p1=2/p2=21 -p1=2/p2=210 -p1=2/p2=22 -p1=2/p2=23 -p1=2/p2=24 -p1=2/p2=25 -p1=2/p2=26 -p1=2/p2=27 -p1=2/p2=28 -p1=2/p2=29 +POSTHOOK: Input: default@repairtable_n1 #### A masked pattern was here #### -PREHOOK: query: MSCK TABLE default.repairtable DROP PARTITIONS +PREHOOK: query: MSCK TABLE default.repairtable_n1 DROP PARTITIONS PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable DROP PARTITIONS +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK TABLE default.repairtable_n1 DROP PARTITIONS POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions missing from filesystem: repairtable:p1=2/p2=21 repairtable:p1=2/p2=210 repairtable:p1=2/p2=22 repairtable:p1=2/p2=23 repairtable:p1=2/p2=24 repairtable:p1=2/p2=25 repairtable:p1=2/p2=26 repairtable:p1=2/p2=27 repairtable:p1=2/p2=28 repairtable:p1=2/p2=29 -PREHOOK: query: MSCK REPAIR TABLE default.repairtable DROP PARTITIONS +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable DROP PARTITIONS +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions missing from filesystem: repairtable:p1=2/p2=21 repairtable:p1=2/p2=210 repairtable:p1=2/p2=22 repairtable:p1=2/p2=23 repairtable:p1=2/p2=24 repairtable:p1=2/p2=25 repairtable:p1=2/p2=26 repairtable:p1=2/p2=27 repairtable:p1=2/p2=28 repairtable:p1=2/p2=29 -#### A masked pattern was here #### -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: show partitions default.repairtable_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n1 +POSTHOOK: query: show partitions default.repairtable_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=1/p2=11 -p1=1/p2=12 -PREHOOK: query: MSCK TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n1 +PREHOOK: query: MSCK TABLE default.repairtable_n1 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK TABLE default.repairtable_n1 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=2/p2=21 repairtable:p1=2/p2=210 repairtable:p1=2/p2=22 repairtable:p1=2/p2=23 repairtable:p1=2/p2=24 repairtable:p1=2/p2=25 repairtable:p1=2/p2=26 repairtable:p1=2/p2=27 repairtable:p1=2/p2=28 repairtable:p1=2/p2=29 -PREHOOK: query: MSCK REPAIR TABLE default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=2/p2=21 repairtable:p1=2/p2=210 repairtable:p1=2/p2=22 repairtable:p1=2/p2=23 repairtable:p1=2/p2=24 repairtable:p1=2/p2=25 repairtable:p1=2/p2=26 repairtable:p1=2/p2=27 repairtable:p1=2/p2=28 repairtable:p1=2/p2=29 -#### A masked pattern was here #### -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: show partitions default.repairtable_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n1 +POSTHOOK: query: show partitions default.repairtable_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=1/p2=11 -p1=1/p2=12 -p1=2/p2=21 -p1=2/p2=210 -p1=2/p2=22 -p1=2/p2=23 -p1=2/p2=24 -p1=2/p2=25 -p1=2/p2=26 -p1=2/p2=27 -p1=2/p2=28 -p1=2/p2=29 +POSTHOOK: Input: default@repairtable_n1 #### A masked pattern was here #### -PREHOOK: query: MSCK TABLE default.repairtable DROP PARTITIONS +PREHOOK: query: MSCK TABLE default.repairtable_n1 DROP PARTITIONS PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable DROP PARTITIONS +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK TABLE default.repairtable_n1 DROP PARTITIONS POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions missing from filesystem: repairtable:p1=2/p2=21 repairtable:p1=2/p2=210 repairtable:p1=2/p2=22 repairtable:p1=2/p2=23 repairtable:p1=2/p2=24 repairtable:p1=2/p2=25 repairtable:p1=2/p2=26 repairtable:p1=2/p2=27 repairtable:p1=2/p2=28 repairtable:p1=2/p2=29 -PREHOOK: query: MSCK REPAIR TABLE default.repairtable DROP PARTITIONS +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable DROP PARTITIONS +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions missing from filesystem: repairtable:p1=2/p2=21 repairtable:p1=2/p2=210 repairtable:p1=2/p2=22 repairtable:p1=2/p2=23 repairtable:p1=2/p2=24 repairtable:p1=2/p2=25 repairtable:p1=2/p2=26 repairtable:p1=2/p2=27 repairtable:p1=2/p2=28 repairtable:p1=2/p2=29 -#### A masked pattern was here #### -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: show partitions default.repairtable_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n1 +POSTHOOK: query: show partitions default.repairtable_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=1/p2=11 -p1=1/p2=12 -PREHOOK: query: MSCK TABLE default.repairtable +POSTHOOK: Input: default@repairtable_n1 +PREHOOK: query: MSCK TABLE default.repairtable_n1 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK TABLE default.repairtable_n1 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=3/p2=31 repairtable:p1=3/p2=32 -PREHOOK: query: MSCK REPAIR TABLE default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=3/p2=31 repairtable:p1=3/p2=32 -#### A masked pattern was here #### -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: show partitions default.repairtable_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n1 +POSTHOOK: query: show partitions default.repairtable_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=1/p2=11 -p1=1/p2=12 -p1=3/p2=31 -p1=3/p2=32 +POSTHOOK: Input: default@repairtable_n1 #### A masked pattern was here #### -PREHOOK: query: MSCK TABLE default.repairtable ADD PARTITIONS +PREHOOK: query: MSCK TABLE default.repairtable_n1 ADD PARTITIONS PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable ADD PARTITIONS +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK TABLE default.repairtable_n1 ADD PARTITIONS POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=4/p2=41 repairtable:p1=4/p2=42 -Partitions missing from filesystem: repairtable:p1=3/p2=31 repairtable:p1=3/p2=32 -PREHOOK: query: MSCK REPAIR TABLE default.repairtable ADD PARTITIONS +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 ADD PARTITIONS PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable ADD PARTITIONS +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 ADD PARTITIONS POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=4/p2=41 repairtable:p1=4/p2=42 -Partitions missing from filesystem: repairtable:p1=3/p2=31 repairtable:p1=3/p2=32 -#### A masked pattern was here #### -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: show partitions default.repairtable_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n1 +POSTHOOK: query: show partitions default.repairtable_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=1/p2=11 -p1=1/p2=12 -p1=3/p2=31 -p1=3/p2=32 -p1=4/p2=41 -p1=4/p2=42 -PREHOOK: query: MSCK TABLE default.repairtable DROP PARTITIONS +POSTHOOK: Input: default@repairtable_n1 +PREHOOK: query: MSCK TABLE default.repairtable_n1 DROP PARTITIONS PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable DROP PARTITIONS +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK TABLE default.repairtable_n1 DROP PARTITIONS POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=5/p2=51 repairtable:p1=5/p2=52 -Partitions missing from filesystem: repairtable:p1=3/p2=31 repairtable:p1=3/p2=32 -PREHOOK: query: MSCK REPAIR TABLE default.repairtable DROP PARTITIONS +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable DROP PARTITIONS +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 DROP PARTITIONS POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=5/p2=51 repairtable:p1=5/p2=52 -Partitions missing from filesystem: repairtable:p1=3/p2=31 repairtable:p1=3/p2=32 -#### A masked pattern was here #### -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: show partitions default.repairtable_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n1 +POSTHOOK: query: show partitions default.repairtable_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=1/p2=11 -p1=1/p2=12 -p1=4/p2=41 -p1=4/p2=42 +POSTHOOK: Input: default@repairtable_n1 #### A masked pattern was here #### -PREHOOK: query: MSCK TABLE default.repairtable SYNC PARTITIONS +PREHOOK: query: MSCK TABLE default.repairtable_n1 SYNC PARTITIONS PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable SYNC PARTITIONS +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK TABLE default.repairtable_n1 SYNC PARTITIONS POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=5/p2=51 repairtable:p1=5/p2=52 -Partitions missing from filesystem: repairtable:p1=4/p2=41 repairtable:p1=4/p2=42 -PREHOOK: query: MSCK REPAIR TABLE default.repairtable SYNC PARTITIONS +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 SYNC PARTITIONS PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable SYNC PARTITIONS +PREHOOK: Output: default@repairtable_n1 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 SYNC PARTITIONS POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=5/p2=51 repairtable:p1=5/p2=52 -Partitions missing from filesystem: repairtable:p1=4/p2=41 repairtable:p1=4/p2=42 -#### A masked pattern was here #### -PREHOOK: query: show partitions default.repairtable +POSTHOOK: Output: default@repairtable_n1 +PREHOOK: query: show partitions default.repairtable_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@repairtable -POSTHOOK: query: show partitions default.repairtable +PREHOOK: Input: default@repairtable_n1 +POSTHOOK: query: show partitions default.repairtable_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@repairtable -p1=1/p2=11 -p1=1/p2=12 -p1=5/p2=51 -p1=5/p2=52 +POSTHOOK: Input: default@repairtable_n1 diff --git a/ql/src/test/results/clientpositive/multi_insert_gby.q.out b/ql/src/test/results/clientpositive/multi_insert_gby.q.out index c5f4605452..514e453832 100644 --- a/ql/src/test/results/clientpositive/multi_insert_gby.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_gby.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: create table e1 (key string, count int) +PREHOOK: query: create table e1_n0 (key string, count int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@e1 -POSTHOOK: query: create table e1 (key string, count int) +PREHOOK: Output: default@e1_n0 +POSTHOOK: query: create table e1_n0 (key string, count int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@e1 -PREHOOK: query: create table e2 (key string, count int) +POSTHOOK: Output: default@e1_n0 +PREHOOK: query: create table e2_n1 (key string, count int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@e2 -POSTHOOK: query: create table e2 (key string, count int) +PREHOOK: Output: default@e2_n1 +POSTHOOK: query: create table e2_n1 (key string, count int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@e2 +POSTHOOK: Output: default@e2_n1 PREHOOK: query: explain FROM src -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n0 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n1 SELECT key, COUNT(*) WHERE key>500 GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: explain FROM src -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n0 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n1 SELECT key, COUNT(*) WHERE key>500 GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -74,7 +74,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n0 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -110,7 +110,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n1 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -135,7 +135,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n0 Stage: Stage-3 Stats Work @@ -143,7 +143,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e1 + Table: default.e1_n0 Stage: Stage-4 Map Reduce @@ -174,7 +174,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e2 + Table: default.e2_n1 Stage: Stage-1 Move Operator @@ -184,7 +184,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n1 Stage: Stage-6 Map Reduce @@ -210,34 +210,34 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n0 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n1 SELECT key, COUNT(*) WHERE key>500 GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@e1 -PREHOOK: Output: default@e2 +PREHOOK: Output: default@e1_n0 +PREHOOK: Output: default@e2_n1 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n0 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n1 SELECT key, COUNT(*) WHERE key>500 GROUP BY key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@e1 -POSTHOOK: Output: default@e2 -POSTHOOK: Lineage: e1.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: e2.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: select * from e1 +POSTHOOK: Output: default@e1_n0 +POSTHOOK: Output: default@e2_n1 +POSTHOOK: Lineage: e1_n0.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e1_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: e2_n1.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e2_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: select * from e1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@e1 +PREHOOK: Input: default@e1_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from e1 +POSTHOOK: query: select * from e1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e1 +POSTHOOK: Input: default@e1_n0 #### A masked pattern was here #### 452 1 453 1 @@ -276,24 +276,24 @@ POSTHOOK: Input: default@e1 496 1 497 1 498 3 -PREHOOK: query: select * from e2 +PREHOOK: query: select * from e2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@e2 +PREHOOK: Input: default@e2_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from e2 +POSTHOOK: query: select * from e2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e2 +POSTHOOK: Input: default@e2_n1 #### A masked pattern was here #### PREHOOK: query: explain FROM src -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n0 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n1 SELECT key, COUNT(*) GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: explain FROM src -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n0 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n1 SELECT key, COUNT(*) GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -342,7 +342,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n1 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -378,7 +378,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n0 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -403,7 +403,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n1 Stage: Stage-3 Stats Work @@ -411,7 +411,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e2 + Table: default.e2_n1 Stage: Stage-4 Map Reduce @@ -442,7 +442,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e1 + Table: default.e1_n0 Stage: Stage-0 Move Operator @@ -452,7 +452,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n0 Stage: Stage-6 Map Reduce @@ -478,34 +478,34 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n0 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n1 SELECT key, COUNT(*) GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@e1 -PREHOOK: Output: default@e2 +PREHOOK: Output: default@e1_n0 +PREHOOK: Output: default@e2_n1 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n0 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n1 SELECT key, COUNT(*) GROUP BY key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@e1 -POSTHOOK: Output: default@e2 -POSTHOOK: Lineage: e1.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: e2.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: select * from e1 +POSTHOOK: Output: default@e1_n0 +POSTHOOK: Output: default@e2_n1 +POSTHOOK: Lineage: e1_n0.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e1_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: e2_n1.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e2_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: select * from e1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@e1 +PREHOOK: Input: default@e1_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from e1 +POSTHOOK: query: select * from e1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e1 +POSTHOOK: Input: default@e1_n0 #### A masked pattern was here #### 452 1 453 1 @@ -544,13 +544,13 @@ POSTHOOK: Input: default@e1 496 1 497 1 498 3 -PREHOOK: query: select * from e2 +PREHOOK: query: select * from e2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@e2 +PREHOOK: Input: default@e2_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from e2 +POSTHOOK: query: select * from e2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e2 +POSTHOOK: Input: default@e2_n1 #### A masked pattern was here #### 0 3 10 1 diff --git a/ql/src/test/results/clientpositive/multi_insert_gby2.q.out b/ql/src/test/results/clientpositive/multi_insert_gby2.q.out index bb229f6621..0b392fe968 100644 --- a/ql/src/test/results/clientpositive/multi_insert_gby2.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_gby2.q.out @@ -6,26 +6,26 @@ POSTHOOK: query: create table e1 (count int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@e1 -PREHOOK: query: create table e2 (percentile double) +PREHOOK: query: create table e2_n0 (percentile double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@e2 -POSTHOOK: query: create table e2 (percentile double) +PREHOOK: Output: default@e2_n0 +POSTHOOK: query: create table e2_n0 (percentile double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@e2 +POSTHOOK: Output: default@e2_n0 PREHOOK: query: explain FROM (select key, cast(key as double) as value from src order by key) a INSERT OVERWRITE TABLE e1 SELECT COUNT(*) -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n0 SELECT percentile_approx(value, 0.5) PREHOOK: type: QUERY POSTHOOK: query: explain FROM (select key, cast(key as double) as value from src order by key) a INSERT OVERWRITE TABLE e1 SELECT COUNT(*) -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n0 SELECT percentile_approx(value, 0.5) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -103,7 +103,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n0 Select Operator expressions: _col0 (type: double) outputColumnNames: percentile @@ -151,7 +151,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n0 Stage: Stage-4 Stats Work @@ -159,28 +159,28 @@ STAGE PLANS: Column Stats Desc: Columns: percentile Column Types: double - Table: default.e2 + Table: default.e2_n0 PREHOOK: query: FROM (select key, cast(key as double) as value from src order by key) a INSERT OVERWRITE TABLE e1 SELECT COUNT(*) -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n0 SELECT percentile_approx(value, 0.5) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@e1 -PREHOOK: Output: default@e2 +PREHOOK: Output: default@e2_n0 POSTHOOK: query: FROM (select key, cast(key as double) as value from src order by key) a INSERT OVERWRITE TABLE e1 SELECT COUNT(*) -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n0 SELECT percentile_approx(value, 0.5) POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@e1 -POSTHOOK: Output: default@e2 +POSTHOOK: Output: default@e2_n0 POSTHOOK: Lineage: e1.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e2.percentile EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: e2_n0.percentile EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: select * from e1 PREHOOK: type: QUERY PREHOOK: Input: default@e1 @@ -190,12 +190,12 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@e1 #### A masked pattern was here #### 500 -PREHOOK: query: select * from e2 +PREHOOK: query: select * from e2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@e2 +PREHOOK: Input: default@e2_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from e2 +POSTHOOK: query: select * from e2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e2 +POSTHOOK: Input: default@e2_n0 #### A masked pattern was here #### 255.5 diff --git a/ql/src/test/results/clientpositive/multi_insert_gby3.q.out b/ql/src/test/results/clientpositive/multi_insert_gby3.q.out index 45b8b2df6e..6c75853ef7 100644 --- a/ql/src/test/results/clientpositive/multi_insert_gby3.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_gby3.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: create table e1 (key string, keyD double) +PREHOOK: query: create table e1_n2 (key string, keyD double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@e1 -POSTHOOK: query: create table e1 (key string, keyD double) +PREHOOK: Output: default@e1_n2 +POSTHOOK: query: create table e1_n2 (key string, keyD double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@e1 -PREHOOK: query: create table e2 (key string, keyD double, value string) +POSTHOOK: Output: default@e1_n2 +PREHOOK: query: create table e2_n3 (key string, keyD double, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@e2 -POSTHOOK: query: create table e2 (key string, keyD double, value string) +PREHOOK: Output: default@e2_n3 +POSTHOOK: query: create table e2_n3 (key string, keyD double, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@e2 +POSTHOOK: Output: default@e2_n3 PREHOOK: query: create table e3 (key string, keyD double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -24,16 +24,16 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@e3 PREHOOK: query: explain FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value PREHOOK: type: QUERY POSTHOOK: query: explain FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -83,7 +83,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n2 Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: key, keyd @@ -116,7 +116,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n3 Select Operator expressions: _col0 (type: string), _col1 (type: double), _col2 (type: string) outputColumnNames: key, keyd, value @@ -141,7 +141,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n2 Stage: Stage-3 Stats Work @@ -149,7 +149,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, keyd Column Types: string, double - Table: default.e1 + Table: default.e1_n2 Stage: Stage-4 Map Reduce @@ -180,7 +180,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, keyd, value Column Types: string, double, string - Table: default.e2 + Table: default.e2_n3 Stage: Stage-1 Move Operator @@ -190,7 +190,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n3 Stage: Stage-6 Map Reduce @@ -217,16 +217,16 @@ STAGE PLANS: PREHOOK: query: explain FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key PREHOOK: type: QUERY POSTHOOK: query: explain FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -276,7 +276,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n2 Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: key, keyd @@ -309,7 +309,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n3 Select Operator expressions: _col0 (type: string), _col1 (type: double), _col2 (type: string) outputColumnNames: key, keyd, value @@ -334,7 +334,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n2 Stage: Stage-3 Stats Work @@ -342,7 +342,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, keyd Column Types: string, double - Table: default.e1 + Table: default.e1_n2 Stage: Stage-4 Map Reduce @@ -373,7 +373,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, keyd, value Column Types: string, double, string - Table: default.e2 + Table: default.e2_n3 Stage: Stage-1 Move Operator @@ -383,7 +383,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n3 Stage: Stage-6 Map Reduce @@ -409,35 +409,35 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@e1 -PREHOOK: Output: default@e2 +PREHOOK: Output: default@e1_n2 +PREHOOK: Output: default@e2_n3 POSTHOOK: query: FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@e1 -POSTHOOK: Output: default@e2 -POSTHOOK: Lineage: e1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: e1.keyd EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: e2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: e2.keyd EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: e2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from e1 +POSTHOOK: Output: default@e1_n2 +POSTHOOK: Output: default@e2_n3 +POSTHOOK: Lineage: e1_n2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: e1_n2.keyd EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: e2_n3.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: e2_n3.keyd EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: e2_n3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from e1_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@e1 +PREHOOK: Input: default@e1_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from e1 +POSTHOOK: query: select * from e1_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e1 +POSTHOOK: Input: default@e1_n2 #### A masked pattern was here #### 0 1.0 10 1.0 @@ -748,13 +748,13 @@ POSTHOOK: Input: default@e1 96 1.0 97 1.0 98 1.0 -PREHOOK: query: select * from e2 +PREHOOK: query: select * from e2_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@e2 +PREHOOK: Input: default@e2_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from e2 +POSTHOOK: query: select * from e2_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e2 +POSTHOOK: Input: default@e2_n3 #### A masked pattern was here #### 0 0.0 val_0 10 10.0 val_10 @@ -1066,35 +1066,35 @@ POSTHOOK: Input: default@e2 97 194.0 val_97 98 196.0 val_98 PREHOOK: query: FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@e1 -PREHOOK: Output: default@e2 +PREHOOK: Output: default@e1_n2 +PREHOOK: Output: default@e2_n3 POSTHOOK: query: FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@e1 -POSTHOOK: Output: default@e2 -POSTHOOK: Lineage: e1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: e1.keyd EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: e2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: e2.keyd EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: e2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from e1 +POSTHOOK: Output: default@e1_n2 +POSTHOOK: Output: default@e2_n3 +POSTHOOK: Lineage: e1_n2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: e1_n2.keyd EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: e2_n3.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: e2_n3.keyd EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: e2_n3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from e1_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@e1 +PREHOOK: Input: default@e1_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from e1 +POSTHOOK: query: select * from e1_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e1 +POSTHOOK: Input: default@e1_n2 #### A masked pattern was here #### 0 1.0 10 1.0 @@ -1405,13 +1405,13 @@ POSTHOOK: Input: default@e1 96 1.0 97 1.0 98 1.0 -PREHOOK: query: select * from e2 +PREHOOK: query: select * from e2_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@e2 +PREHOOK: Input: default@e2_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from e2 +POSTHOOK: query: select * from e2_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e2 +POSTHOOK: Input: default@e2_n3 #### A masked pattern was here #### 0 0.0 val_0 10 10.0 val_10 @@ -1724,14 +1724,14 @@ POSTHOOK: Input: default@e2 98 196.0 val_98 PREHOOK: query: explain from src -insert overwrite table e1 +insert overwrite table e1_n2 select key, count(distinct value) group by key insert overwrite table e3 select value, count(distinct key) group by value PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table e1 +insert overwrite table e1_n2 select key, count(distinct value) group by key insert overwrite table e3 select value, count(distinct key) group by value @@ -1802,7 +1802,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n2 Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: key, keyd @@ -1827,7 +1827,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n2 Stage: Stage-3 Stats Work @@ -1835,7 +1835,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, keyd Column Types: string, double - Table: default.e1 + Table: default.e1_n2 Stage: Stage-4 Map Reduce @@ -1948,18 +1948,18 @@ STAGE PLANS: PREHOOK: query: explain FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value INSERT overwrite table e3 SELECT key, COUNT(distinct keyD) group by key, keyD, value PREHOOK: type: QUERY POSTHOOK: query: explain FROM (select key, cast(key as double) as keyD, value from src order by key) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n2 SELECT key, COUNT(distinct value) group by key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n3 SELECT key, sum(keyD), value group by key, value INSERT overwrite table e3 SELECT key, COUNT(distinct keyD) group by key, keyD, value @@ -2026,7 +2026,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n2 Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: key, keyd @@ -2059,7 +2059,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n3 Select Operator expressions: _col0 (type: string), _col1 (type: double), _col2 (type: string) outputColumnNames: key, keyd, value @@ -2084,7 +2084,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n2 Stage: Stage-4 Stats Work @@ -2092,7 +2092,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, keyd Column Types: string, double - Table: default.e1 + Table: default.e1_n2 Stage: Stage-5 Map Reduce @@ -2123,7 +2123,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, keyd, value Column Types: string, double, string - Table: default.e2 + Table: default.e2_n3 Stage: Stage-9 Stats Work @@ -2141,7 +2141,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n3 Stage: Stage-7 Map Reduce diff --git a/ql/src/test/results/clientpositive/multi_insert_gby4.q.out b/ql/src/test/results/clientpositive/multi_insert_gby4.q.out index b080cf4a61..eed39eedf8 100644 --- a/ql/src/test/results/clientpositive/multi_insert_gby4.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_gby4.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table e1 (key string, count int) +PREHOOK: query: create table e1_n4 (key string, count int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@e1 -POSTHOOK: query: create table e1 (key string, count int) +PREHOOK: Output: default@e1_n4 +POSTHOOK: query: create table e1_n4 (key string, count int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@e1 -PREHOOK: query: create table e2 (key string, count int) +POSTHOOK: Output: default@e1_n4 +PREHOOK: query: create table e2_n5 (key string, count int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@e2 -POSTHOOK: query: create table e2 (key string, count int) +PREHOOK: Output: default@e2_n5 +POSTHOOK: query: create table e2_n5 (key string, count int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@e2 -PREHOOK: query: create table e3 (key string, count int) +POSTHOOK: Output: default@e2_n5 +PREHOOK: query: create table e3_n0 (key string, count int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@e3 -POSTHOOK: query: create table e3 (key string, count int) +PREHOOK: Output: default@e3_n0 +POSTHOOK: query: create table e3_n0 (key string, count int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@e3 +POSTHOOK: Output: default@e3_n0 PREHOOK: query: explain FROM (SELECT key, value FROM src) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n4 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n5 SELECT key, COUNT(*) WHERE key>500 GROUP BY key -INSERT OVERWRITE TABLE e3 +INSERT OVERWRITE TABLE e3_n0 SELECT key, COUNT(*) WHERE key>490 GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: explain FROM (SELECT key, value FROM src) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n4 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n5 SELECT key, COUNT(*) WHERE key>500 GROUP BY key -INSERT OVERWRITE TABLE e3 +INSERT OVERWRITE TABLE e3_n0 SELECT key, COUNT(*) WHERE key>490 GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -95,7 +95,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -131,7 +131,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -167,7 +167,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e3 + name: default.e3_n0 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, count @@ -192,7 +192,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e1 + name: default.e1_n4 Stage: Stage-4 Stats Work @@ -200,7 +200,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e1 + Table: default.e1_n4 Stage: Stage-5 Map Reduce @@ -231,7 +231,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e2 + Table: default.e2_n5 Stage: Stage-8 Stats Work @@ -239,7 +239,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, count Column Types: string, int - Table: default.e3 + Table: default.e3_n0 Stage: Stage-1 Move Operator @@ -249,7 +249,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e2 + name: default.e2_n5 Stage: Stage-7 Map Reduce @@ -282,7 +282,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.e3 + name: default.e3_n0 Stage: Stage-9 Map Reduce @@ -308,42 +308,42 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM (SELECT key, value FROM src) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n4 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n5 SELECT key, COUNT(*) WHERE key>500 GROUP BY key -INSERT OVERWRITE TABLE e3 +INSERT OVERWRITE TABLE e3_n0 SELECT key, COUNT(*) WHERE key>490 GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@e1 -PREHOOK: Output: default@e2 -PREHOOK: Output: default@e3 +PREHOOK: Output: default@e1_n4 +PREHOOK: Output: default@e2_n5 +PREHOOK: Output: default@e3_n0 POSTHOOK: query: FROM (SELECT key, value FROM src) a -INSERT OVERWRITE TABLE e1 +INSERT OVERWRITE TABLE e1_n4 SELECT key, COUNT(*) WHERE key>450 GROUP BY key -INSERT OVERWRITE TABLE e2 +INSERT OVERWRITE TABLE e2_n5 SELECT key, COUNT(*) WHERE key>500 GROUP BY key -INSERT OVERWRITE TABLE e3 +INSERT OVERWRITE TABLE e3_n0 SELECT key, COUNT(*) WHERE key>490 GROUP BY key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@e1 -POSTHOOK: Output: default@e2 -POSTHOOK: Output: default@e3 -POSTHOOK: Lineage: e1.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: e2.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: e3.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: e3.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: select * from e1 +POSTHOOK: Output: default@e1_n4 +POSTHOOK: Output: default@e2_n5 +POSTHOOK: Output: default@e3_n0 +POSTHOOK: Lineage: e1_n4.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e1_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: e2_n5.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e2_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: e3_n0.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: e3_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: select * from e1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@e1 +PREHOOK: Input: default@e1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from e1 +POSTHOOK: query: select * from e1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e1 +POSTHOOK: Input: default@e1_n4 #### A masked pattern was here #### 452 1 453 1 @@ -382,21 +382,21 @@ POSTHOOK: Input: default@e1 496 1 497 1 498 3 -PREHOOK: query: select * from e2 +PREHOOK: query: select * from e2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@e2 +PREHOOK: Input: default@e2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from e2 +POSTHOOK: query: select * from e2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e2 +POSTHOOK: Input: default@e2_n5 #### A masked pattern was here #### -PREHOOK: query: select * from e3 +PREHOOK: query: select * from e3_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@e3 +PREHOOK: Input: default@e3_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from e3 +POSTHOOK: query: select * from e3_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@e3 +POSTHOOK: Input: default@e3_n0 #### A masked pattern was here #### 491 1 492 2 diff --git a/ql/src/test/results/clientpositive/multi_insert_mixed.q.out b/ql/src/test/results/clientpositive/multi_insert_mixed.q.out index 2cc71bc02a..e9729a4ace 100644 --- a/ql/src/test/results/clientpositive/multi_insert_mixed.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_mixed.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: create table src_multi1 like src +PREHOOK: query: create table src_multi1_n2 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_multi1 -POSTHOOK: query: create table src_multi1 like src +PREHOOK: Output: default@src_multi1_n2 +POSTHOOK: query: create table src_multi1_n2 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_multi1 -PREHOOK: query: create table src_multi2 like src +POSTHOOK: Output: default@src_multi1_n2 +PREHOOK: query: create table src_multi2_n3 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_multi2 -POSTHOOK: query: create table src_multi2 like src +PREHOOK: Output: default@src_multi2_n3 +POSTHOOK: query: create table src_multi2_n3 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_multi2 +POSTHOOK: Output: default@src_multi2_n3 PREHOOK: query: create table src_multi3 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -24,14 +24,14 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@src_multi3 PREHOOK: query: explain from src -insert overwrite table src_multi1 select key, count(1) group by key order by key -insert overwrite table src_multi2 select value, count(1) group by value order by value +insert overwrite table src_multi1_n2 select key, count(1) group by key order by key +insert overwrite table src_multi2_n3 select value, count(1) group by value order by value insert overwrite table src_multi3 select * where key < 10 PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table src_multi1 select key, count(1) group by key order by key -insert overwrite table src_multi2 select value, count(1) group by value order by value +insert overwrite table src_multi1_n2 select key, count(1) group by key order by key +insert overwrite table src_multi2_n3 select value, count(1) group by value order by value insert overwrite table src_multi3 select * where key < 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -152,7 +152,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n2 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -182,7 +182,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n2 Stage: Stage-5 Stats Work @@ -190,7 +190,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n2 Stage: Stage-6 Map Reduce @@ -239,7 +239,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n3 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -269,7 +269,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n3 Stage: Stage-8 Stats Work @@ -277,7 +277,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n3 Stage: Stage-2 Move Operator @@ -321,36 +321,36 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from src -insert overwrite table src_multi1 select key, count(1) group by key order by key -insert overwrite table src_multi2 select value, count(1) group by value order by value +insert overwrite table src_multi1_n2 select key, count(1) group by key order by key +insert overwrite table src_multi2_n3 select value, count(1) group by value order by value insert overwrite table src_multi3 select * where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n2 +PREHOOK: Output: default@src_multi2_n3 PREHOOK: Output: default@src_multi3 POSTHOOK: query: from src -insert overwrite table src_multi1 select key, count(1) group by key order by key -insert overwrite table src_multi2 select value, count(1) group by value order by value +insert overwrite table src_multi1_n2 select key, count(1) group by key order by key +insert overwrite table src_multi2_n3 select value, count(1) group by value order by value insert overwrite table src_multi3 select * where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 +POSTHOOK: Output: default@src_multi1_n2 +POSTHOOK: Output: default@src_multi2_n3 POSTHOOK: Output: default@src_multi3 -POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: src_multi1_n2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n2.value EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: src_multi2_n3.key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n3.value EXPRESSION [(src)src.null, ] POSTHOOK: Lineage: src_multi3.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: src_multi3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +PREHOOK: query: select * from src_multi1_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n2 #### A masked pattern was here #### 0 3 10 1 @@ -661,13 +661,13 @@ POSTHOOK: Input: default@src_multi1 96 1 97 2 98 2 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n3 #### A masked pattern was here #### val_0 3 val_10 1 diff --git a/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out b/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out index 53f073f159..fa32ee480b 100644 --- a/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: create table src_multi1 like src +PREHOOK: query: create table src_multi1_n4 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_multi1 -POSTHOOK: query: create table src_multi1 like src +PREHOOK: Output: default@src_multi1_n4 +POSTHOOK: query: create table src_multi1_n4 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_multi1 -PREHOOK: query: create table src_multi2 like src +POSTHOOK: Output: default@src_multi1_n4 +PREHOOK: query: create table src_multi2_n5 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_multi2 -POSTHOOK: query: create table src_multi2 like src +PREHOOK: Output: default@src_multi2_n5 +POSTHOOK: query: create table src_multi2_n5 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_multi2 +POSTHOOK: Output: default@src_multi2_n5 PREHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -54,7 +54,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -82,7 +82,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -123,7 +123,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-3 Stats Work @@ -131,7 +131,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -141,7 +141,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-5 Stats Work @@ -149,7 +149,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-6 Map Reduce @@ -175,30 +175,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -210,13 +210,13 @@ POSTHOOK: Input: default@src_multi1 5 val_5 8 val_8 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 12 val_12 @@ -229,13 +229,13 @@ POSTHOOK: Input: default@src_multi2 19 val_19 PREHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -272,7 +272,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -300,7 +300,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -350,7 +350,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-3 Stats Work @@ -358,7 +358,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -368,7 +368,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-10 Stats Work @@ -376,7 +376,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-5 Map Reduce @@ -388,7 +388,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-7 Map Reduce @@ -400,7 +400,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-8 Move Operator @@ -432,30 +432,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -467,13 +467,13 @@ POSTHOOK: Input: default@src_multi1 5 val_5 8 val_8 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 12 val_12 @@ -486,13 +486,13 @@ POSTHOOK: Input: default@src_multi2 19 val_19 PREHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -529,7 +529,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -557,7 +557,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -598,7 +598,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-3 Stats Work @@ -606,7 +606,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -616,7 +616,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-5 Stats Work @@ -624,7 +624,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-10 Conditional Operator @@ -645,7 +645,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-8 Map Reduce @@ -657,7 +657,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-9 Move Operator @@ -689,30 +689,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -724,13 +724,13 @@ POSTHOOK: Input: default@src_multi1 5 val_5 8 val_8 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 12 val_12 @@ -743,13 +743,13 @@ POSTHOOK: Input: default@src_multi2 19 val_19 PREHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -791,7 +791,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -819,7 +819,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -869,7 +869,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-3 Stats Work @@ -877,7 +877,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -887,7 +887,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-10 Stats Work @@ -895,7 +895,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-5 Map Reduce @@ -907,7 +907,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-7 Map Reduce @@ -919,7 +919,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-8 Move Operator @@ -946,7 +946,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-13 Map Reduce @@ -958,7 +958,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-14 Move Operator @@ -990,30 +990,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -1025,13 +1025,13 @@ POSTHOOK: Input: default@src_multi1 5 val_5 8 val_8 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 12 val_12 @@ -1044,13 +1044,13 @@ POSTHOOK: Input: default@src_multi2 19 val_19 PREHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -1096,7 +1096,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -1127,7 +1127,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -1155,7 +1155,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-3 Stats Work @@ -1163,7 +1163,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -1173,7 +1173,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-6 Stats Work @@ -1181,7 +1181,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-5 Map Reduce @@ -1230,30 +1230,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 2 val_2 @@ -1261,13 +1261,13 @@ POSTHOOK: Input: default@src_multi1 5 val_5 8 val_8 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 12 val_12 @@ -1277,13 +1277,13 @@ POSTHOOK: Input: default@src_multi2 19 val_19 PREHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -1339,7 +1339,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -1370,7 +1370,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -1407,7 +1407,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-3 Stats Work @@ -1415,7 +1415,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -1425,7 +1425,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-11 Stats Work @@ -1433,7 +1433,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-5 Map Reduce @@ -1445,7 +1445,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-7 Map Reduce @@ -1457,7 +1457,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-8 Move Operator @@ -1507,7 +1507,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-14 Map Reduce @@ -1519,7 +1519,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-15 Move Operator @@ -1551,30 +1551,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 2 val_2 @@ -1582,13 +1582,13 @@ POSTHOOK: Input: default@src_multi1 5 val_5 8 val_8 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 12 val_12 @@ -1598,13 +1598,13 @@ POSTHOOK: Input: default@src_multi2 19 val_19 PREHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -1650,7 +1650,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -1681,7 +1681,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -1709,7 +1709,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-3 Stats Work @@ -1717,7 +1717,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -1727,7 +1727,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-6 Stats Work @@ -1735,7 +1735,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-5 Map Reduce @@ -1784,30 +1784,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 2 val_2 @@ -1815,13 +1815,13 @@ POSTHOOK: Input: default@src_multi1 5 val_5 8 val_8 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 12 val_12 @@ -1831,13 +1831,13 @@ POSTHOOK: Input: default@src_multi2 19 val_19 PREHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -1893,7 +1893,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -1924,7 +1924,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -1961,7 +1961,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-3 Stats Work @@ -1969,7 +1969,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -1979,7 +1979,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-11 Stats Work @@ -1987,7 +1987,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-5 Map Reduce @@ -1999,7 +1999,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-7 Map Reduce @@ -2011,7 +2011,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-8 Move Operator @@ -2061,7 +2061,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-14 Map Reduce @@ -2073,7 +2073,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-15 Move Operator @@ -2105,30 +2105,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 group by key, value -insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value +insert overwrite table src_multi1_n4 select * where key < 10 group by key, value +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 2 val_2 @@ -2136,13 +2136,13 @@ POSTHOOK: Input: default@src_multi1 5 val_5 8 val_8 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 12 val_12 @@ -2152,13 +2152,13 @@ POSTHOOK: Input: default@src_multi2 19 val_19 PREHOOK: query: explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY POSTHOOK: query: explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -2192,7 +2192,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -2216,7 +2216,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -2251,7 +2251,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -2275,7 +2275,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -2316,7 +2316,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-3 Stats Work @@ -2324,7 +2324,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -2334,7 +2334,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-5 Stats Work @@ -2342,7 +2342,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-6 Map Reduce @@ -2368,30 +2368,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -2413,13 +2413,13 @@ POSTHOOK: Input: default@src_multi1 8 val_8 9 val_9 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 11 val_11 @@ -2441,13 +2441,13 @@ POSTHOOK: Input: default@src_multi2 19 val_19 PREHOOK: query: explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY POSTHOOK: query: explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -2486,7 +2486,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -2510,7 +2510,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -2545,7 +2545,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -2569,7 +2569,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -2619,7 +2619,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-3 Stats Work @@ -2627,7 +2627,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -2637,7 +2637,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-10 Stats Work @@ -2645,7 +2645,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-5 Map Reduce @@ -2657,7 +2657,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-7 Map Reduce @@ -2669,7 +2669,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-8 Move Operator @@ -2701,30 +2701,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -2746,13 +2746,13 @@ POSTHOOK: Input: default@src_multi1 8 val_8 9 val_9 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 11 val_11 @@ -2774,13 +2774,13 @@ POSTHOOK: Input: default@src_multi2 19 val_19 PREHOOK: query: explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY POSTHOOK: query: explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -2819,7 +2819,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -2843,7 +2843,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -2878,7 +2878,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -2902,7 +2902,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -2943,7 +2943,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-3 Stats Work @@ -2951,7 +2951,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -2961,7 +2961,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-5 Stats Work @@ -2969,7 +2969,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-10 Conditional Operator @@ -2990,7 +2990,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-8 Map Reduce @@ -3002,7 +3002,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-9 Move Operator @@ -3034,30 +3034,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -3079,13 +3079,13 @@ POSTHOOK: Input: default@src_multi1 8 val_8 9 val_9 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 11 val_11 @@ -3107,13 +3107,13 @@ POSTHOOK: Input: default@src_multi2 19 val_19 PREHOOK: query: explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY POSTHOOK: query: explain from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -3157,7 +3157,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -3181,7 +3181,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -3216,7 +3216,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -3240,7 +3240,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -3290,7 +3290,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-3 Stats Work @@ -3298,7 +3298,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -3308,7 +3308,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-10 Stats Work @@ -3316,7 +3316,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-5 Map Reduce @@ -3328,7 +3328,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-7 Map Reduce @@ -3340,7 +3340,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-8 Move Operator @@ -3367,7 +3367,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-13 Map Reduce @@ -3379,7 +3379,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-14 Move Operator @@ -3411,30 +3411,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from (select * from src union all select * from src) s -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -3456,13 +3456,13 @@ POSTHOOK: Input: default@src_multi1 8 val_8 9 val_9 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 11 val_11 @@ -4341,14 +4341,14 @@ Found 2 items #### A masked pattern was here #### PREHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -4385,7 +4385,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -4413,7 +4413,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -4463,7 +4463,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-5 Stats Work @@ -4471,7 +4471,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -4481,7 +4481,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-7 Stats Work @@ -4489,7 +4489,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-8 Map Reduce @@ -4594,34 +4594,34 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -4633,13 +4633,13 @@ POSTHOOK: Input: default@src_multi1 5 val_5 8 val_8 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 12 val_12 @@ -4654,14 +4654,14 @@ Found 2 items #### A masked pattern was here #### PREHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -4703,7 +4703,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -4731,7 +4731,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -4781,7 +4781,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-5 Stats Work @@ -4789,7 +4789,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -4799,7 +4799,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-7 Stats Work @@ -4807,7 +4807,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-12 Conditional Operator @@ -4828,7 +4828,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-10 Map Reduce @@ -4840,7 +4840,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-11 Move Operator @@ -4951,34 +4951,34 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -4990,13 +4990,13 @@ POSTHOOK: Input: default@src_multi1 5 val_5 8 val_8 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 12 val_12 @@ -5011,14 +5011,14 @@ Found 2 items #### A masked pattern was here #### PREHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -5060,7 +5060,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -5088,7 +5088,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -5147,7 +5147,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-5 Stats Work @@ -5155,7 +5155,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -5165,7 +5165,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-12 Stats Work @@ -5173,7 +5173,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-7 Map Reduce @@ -5185,7 +5185,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-9 Map Reduce @@ -5197,7 +5197,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-10 Move Operator @@ -5308,34 +5308,34 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -5347,13 +5347,13 @@ POSTHOOK: Input: default@src_multi1 5 val_5 8 val_8 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 12 val_12 @@ -5368,14 +5368,14 @@ Found 2 items #### A masked pattern was here #### PREHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### PREHOOK: type: QUERY POSTHOOK: query: explain from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -5422,7 +5422,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -5450,7 +5450,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -5509,7 +5509,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-5 Stats Work @@ -5517,7 +5517,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n4 Stage: Stage-1 Move Operator @@ -5527,7 +5527,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-12 Stats Work @@ -5535,7 +5535,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n5 Stage: Stage-7 Map Reduce @@ -5547,7 +5547,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-9 Map Reduce @@ -5559,7 +5559,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n4 Stage: Stage-10 Move Operator @@ -5586,7 +5586,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-15 Map Reduce @@ -5598,7 +5598,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n5 Stage: Stage-16 Move Operator @@ -5709,34 +5709,34 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 +PREHOOK: Output: default@src_multi1_n4 +PREHOOK: Output: default@src_multi2_n5 POSTHOOK: query: from src -insert overwrite table src_multi1 select * where key < 10 -insert overwrite table src_multi2 select * where key > 10 and key < 20 +insert overwrite table src_multi1_n4 select * where key < 10 +insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 #### A masked pattern was here #### POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Output: default@src_multi1_n4 +POSTHOOK: Output: default@src_multi2_n5 +POSTHOOK: Lineage: src_multi1_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi1_n4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_multi2_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_multi1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n4 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -5748,13 +5748,13 @@ POSTHOOK: Input: default@src_multi1 5 val_5 8 val_8 9 val_9 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n5 #### A masked pattern was here #### 11 val_11 12 val_12 diff --git a/ql/src/test/results/clientpositive/multi_insert_partitioned.q.out b/ql/src/test/results/clientpositive/multi_insert_partitioned.q.out index 76b4f74127..72433bdf0d 100644 --- a/ql/src/test/results/clientpositive/multi_insert_partitioned.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_partitioned.q.out @@ -1,42 +1,42 @@ -PREHOOK: query: drop table intermediate +PREHOOK: query: drop table intermediate_n3 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table intermediate +POSTHOOK: query: drop table intermediate_n3 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +PREHOOK: query: create table intermediate_n3(key int) partitioned by (p int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@intermediate -POSTHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +PREHOOK: Output: default@intermediate_n3 +POSTHOOK: query: create table intermediate_n3(key int) partitioned by (p int) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@intermediate -PREHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +POSTHOOK: Output: default@intermediate_n3 +PREHOOK: query: insert into table intermediate_n3 partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@intermediate@p=455 -POSTHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +PREHOOK: Output: default@intermediate_n3@p=455 +POSTHOOK: query: insert into table intermediate_n3 partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@intermediate@p=455 -POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +POSTHOOK: Output: default@intermediate_n3@p=455 +POSTHOOK: Lineage: intermediate_n3 PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate_n3 partition(p='456') select distinct key from src where key is not null order by key asc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@intermediate@p=456 -POSTHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +PREHOOK: Output: default@intermediate_n3@p=456 +POSTHOOK: query: insert into table intermediate_n3 partition(p='456') select distinct key from src where key is not null order by key asc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@intermediate@p=456 -POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +POSTHOOK: Output: default@intermediate_n3@p=456 +POSTHOOK: Lineage: intermediate_n3 PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate_n3 partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@intermediate@p=457 -POSTHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +PREHOOK: Output: default@intermediate_n3@p=457 +POSTHOOK: query: insert into table intermediate_n3 partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@intermediate@p=457 -POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Output: default@intermediate_n3@p=457 +POSTHOOK: Lineage: intermediate_n3 PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: drop table multi_partitioned PREHOOK: type: DROPTABLE POSTHOOK: query: drop table multi_partitioned @@ -49,30 +49,30 @@ POSTHOOK: query: create table multi_partitioned (key int, key2 int) partitioned POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@multi_partitioned -PREHOOK: query: from intermediate +PREHOOK: query: from intermediate_n3 insert into table multi_partitioned partition(p=1) select p, key insert into table multi_partitioned partition(p=2) select key, p PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n3 +PREHOOK: Input: default@intermediate_n3@p=455 +PREHOOK: Input: default@intermediate_n3@p=456 +PREHOOK: Input: default@intermediate_n3@p=457 PREHOOK: Output: default@multi_partitioned@p=1 PREHOOK: Output: default@multi_partitioned@p=2 -POSTHOOK: query: from intermediate +POSTHOOK: query: from intermediate_n3 insert into table multi_partitioned partition(p=1) select p, key insert into table multi_partitioned partition(p=2) select key, p POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n3 +POSTHOOK: Input: default@intermediate_n3@p=455 +POSTHOOK: Input: default@intermediate_n3@p=456 +POSTHOOK: Input: default@intermediate_n3@p=457 POSTHOOK: Output: default@multi_partitioned@p=1 POSTHOOK: Output: default@multi_partitioned@p=2 -POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key2 SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:p, type:int, comment:null), ] PREHOOK: query: select * from multi_partitioned order by key, key2, p PREHOOK: type: QUERY PREHOOK: Input: default@multi_partitioned @@ -137,30 +137,30 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: from intermediate +PREHOOK: query: from intermediate_n3 insert overwrite table multi_partitioned partition(p=2) select p, key insert overwrite table multi_partitioned partition(p=1) select key, p PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n3 +PREHOOK: Input: default@intermediate_n3@p=455 +PREHOOK: Input: default@intermediate_n3@p=456 +PREHOOK: Input: default@intermediate_n3@p=457 PREHOOK: Output: default@multi_partitioned@p=1 PREHOOK: Output: default@multi_partitioned@p=2 -POSTHOOK: query: from intermediate +POSTHOOK: query: from intermediate_n3 insert overwrite table multi_partitioned partition(p=2) select p, key insert overwrite table multi_partitioned partition(p=1) select key, p POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n3 +POSTHOOK: Input: default@intermediate_n3@p=455 +POSTHOOK: Input: default@intermediate_n3@p=456 +POSTHOOK: Input: default@intermediate_n3@p=457 POSTHOOK: Output: default@multi_partitioned@p=1 POSTHOOK: Output: default@multi_partitioned@p=2 -POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key2 SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from multi_partitioned order by key, key2, p PREHOOK: type: QUERY PREHOOK: Input: default@multi_partitioned @@ -225,30 +225,30 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: from intermediate +PREHOOK: query: from intermediate_n3 insert into table multi_partitioned partition(p=2) select p, key insert overwrite table multi_partitioned partition(p=1) select key, p PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n3 +PREHOOK: Input: default@intermediate_n3@p=455 +PREHOOK: Input: default@intermediate_n3@p=456 +PREHOOK: Input: default@intermediate_n3@p=457 PREHOOK: Output: default@multi_partitioned@p=1 PREHOOK: Output: default@multi_partitioned@p=2 -POSTHOOK: query: from intermediate +POSTHOOK: query: from intermediate_n3 insert into table multi_partitioned partition(p=2) select p, key insert overwrite table multi_partitioned partition(p=1) select key, p POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n3 +POSTHOOK: Input: default@intermediate_n3@p=455 +POSTHOOK: Input: default@intermediate_n3@p=456 +POSTHOOK: Input: default@intermediate_n3@p=457 POSTHOOK: Output: default@multi_partitioned@p=1 POSTHOOK: Output: default@multi_partitioned@p=2 -POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=2).key2 SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from multi_partitioned order by key, key2, p PREHOOK: type: QUERY PREHOOK: Input: default@multi_partitioned @@ -319,36 +319,36 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: from intermediate +PREHOOK: query: from intermediate_n3 insert into table multi_partitioned partition(p) select p, key, p insert into table multi_partitioned partition(p=1) select key, p PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n3 +PREHOOK: Input: default@intermediate_n3@p=455 +PREHOOK: Input: default@intermediate_n3@p=456 +PREHOOK: Input: default@intermediate_n3@p=457 PREHOOK: Output: default@multi_partitioned PREHOOK: Output: default@multi_partitioned@p=1 -POSTHOOK: query: from intermediate +POSTHOOK: query: from intermediate_n3 insert into table multi_partitioned partition(p) select p, key, p insert into table multi_partitioned partition(p=1) select key, p POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n3 +POSTHOOK: Input: default@intermediate_n3@p=455 +POSTHOOK: Input: default@intermediate_n3@p=456 +POSTHOOK: Input: default@intermediate_n3@p=457 POSTHOOK: Output: default@multi_partitioned@p=1 POSTHOOK: Output: default@multi_partitioned@p=455 POSTHOOK: Output: default@multi_partitioned@p=456 POSTHOOK: Output: default@multi_partitioned@p=457 -POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=455).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=456).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=457).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=455).key SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=455).key2 SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=456).key SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=456).key2 SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=457).key SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=457).key2 SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select key, key2, p from multi_partitioned order by key, key2, p PREHOOK: type: QUERY PREHOOK: Input: default@multi_partitioned @@ -437,29 +437,29 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: from intermediate +PREHOOK: query: from intermediate_n3 insert into table multi_partitioned partition(p) select p, key, 1 insert into table multi_partitioned partition(p=1) select key, p PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Input: default@intermediate_n3 +PREHOOK: Input: default@intermediate_n3@p=455 +PREHOOK: Input: default@intermediate_n3@p=456 +PREHOOK: Input: default@intermediate_n3@p=457 PREHOOK: Output: default@multi_partitioned PREHOOK: Output: default@multi_partitioned@p=1 -POSTHOOK: query: from intermediate +POSTHOOK: query: from intermediate_n3 insert into table multi_partitioned partition(p) select p, key, 1 insert into table multi_partitioned partition(p=1) select key, p POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Input: default@intermediate_n3 +POSTHOOK: Input: default@intermediate_n3@p=455 +POSTHOOK: Input: default@intermediate_n3@p=456 +POSTHOOK: Input: default@intermediate_n3@p=457 POSTHOOK: Output: default@multi_partitioned@p=1 -POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi_partitioned PARTITION(p=1).key2 SIMPLE [(intermediate_n3)intermediate_n3.FieldSchema(name:p, type:int, comment:null), ] PREHOOK: query: select key, key2, p from multi_partitioned order by key, key2, p PREHOOK: type: QUERY PREHOOK: Input: default@multi_partitioned @@ -568,11 +568,11 @@ POSTHOOK: query: drop table multi_partitioned POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@multi_partitioned POSTHOOK: Output: default@multi_partitioned -PREHOOK: query: drop table intermediate +PREHOOK: query: drop table intermediate_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@intermediate -PREHOOK: Output: default@intermediate -POSTHOOK: query: drop table intermediate +PREHOOK: Input: default@intermediate_n3 +PREHOOK: Output: default@intermediate_n3 +POSTHOOK: query: drop table intermediate_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@intermediate -POSTHOOK: Output: default@intermediate +POSTHOOK: Input: default@intermediate_n3 +POSTHOOK: Output: default@intermediate_n3 diff --git a/ql/src/test/results/clientpositive/multi_insert_union_src.q.out b/ql/src/test/results/clientpositive/multi_insert_union_src.q.out index 44b953d264..e8dabfd7cc 100644 --- a/ql/src/test/results/clientpositive/multi_insert_union_src.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_union_src.q.out @@ -1,52 +1,52 @@ -PREHOOK: query: drop table if exists src2 +PREHOOK: query: drop table if exists src2_n4 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists src2 +POSTHOOK: query: drop table if exists src2_n4 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table if exists src_multi1 +PREHOOK: query: drop table if exists src_multi1_n3 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists src_multi1 +POSTHOOK: query: drop table if exists src_multi1_n3 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table if exists src_multi2 +PREHOOK: query: drop table if exists src_multi2_n4 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists src_multi2 +POSTHOOK: query: drop table if exists src_multi2_n4 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE src2 as SELECT * FROM src +PREHOOK: query: CREATE TABLE src2_n4 as SELECT * FROM src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src2 -POSTHOOK: query: CREATE TABLE src2 as SELECT * FROM src +PREHOOK: Output: default@src2_n4 +POSTHOOK: query: CREATE TABLE src2_n4 as SELECT * FROM src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src2 -POSTHOOK: Lineage: src2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table src_multi1 like src +POSTHOOK: Output: default@src2_n4 +POSTHOOK: Lineage: src2_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src2_n4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table src_multi1_n3 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_multi1 -POSTHOOK: query: create table src_multi1 like src +PREHOOK: Output: default@src_multi1_n3 +POSTHOOK: query: create table src_multi1_n3 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_multi1 -PREHOOK: query: create table src_multi2 like src +POSTHOOK: Output: default@src_multi1_n3 +PREHOOK: query: create table src_multi2_n4 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_multi2 -POSTHOOK: query: create table src_multi2 like src +PREHOOK: Output: default@src_multi2_n4 +POSTHOOK: query: create table src_multi2_n4 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_multi2 +POSTHOOK: Output: default@src_multi2_n4 PREHOOK: query: explain -from (select * from src1 where key < 10 union all select * from src2 where key > 100) s -insert overwrite table src_multi1 select key, value where key < 150 order by key -insert overwrite table src_multi2 select key, value where key > 400 order by value +from (select * from src1 where key < 10 union all select * from src2_n4 where key > 100) s +insert overwrite table src_multi1_n3 select key, value where key < 150 order by key +insert overwrite table src_multi2_n4 select key, value where key > 400 order by value PREHOOK: type: QUERY POSTHOOK: query: explain -from (select * from src1 where key < 10 union all select * from src2 where key > 100) s -insert overwrite table src_multi1 select key, value where key < 150 order by key -insert overwrite table src_multi2 select key, value where key > 400 order by value +from (select * from src1 where key < 10 union all select * from src2_n4 where key > 100) s +insert overwrite table src_multi1_n3 select key, value where key < 150 order by key +insert overwrite table src_multi2_n4 select key, value where key > 400 order by value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -90,7 +90,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe TableScan - alias: src2 + alias: src2_n4 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(key) > 100.0D) (type: boolean) @@ -130,7 +130,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n3 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -160,7 +160,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi1 + name: default.src_multi1_n3 Stage: Stage-3 Stats Work @@ -168,7 +168,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi1 + Table: default.src_multi1_n3 Stage: Stage-4 Map Reduce @@ -192,7 +192,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -222,7 +222,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_multi2 + name: default.src_multi2_n4 Stage: Stage-5 Stats Work @@ -230,35 +230,35 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_multi2 + Table: default.src_multi2_n4 -PREHOOK: query: from (select * from src1 where key < 10 union all select * from src2 where key > 100) s -insert overwrite table src_multi1 select key, value where key < 150 order by key -insert overwrite table src_multi2 select key, value where key > 400 order by value +PREHOOK: query: from (select * from src1 where key < 10 union all select * from src2_n4 where key > 100) s +insert overwrite table src_multi1_n3 select key, value where key < 150 order by key +insert overwrite table src_multi2_n4 select key, value where key > 400 order by value PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Input: default@src2 -PREHOOK: Output: default@src_multi1 -PREHOOK: Output: default@src_multi2 -POSTHOOK: query: from (select * from src1 where key < 10 union all select * from src2 where key > 100) s -insert overwrite table src_multi1 select key, value where key < 150 order by key -insert overwrite table src_multi2 select key, value where key > 400 order by value +PREHOOK: Input: default@src2_n4 +PREHOOK: Output: default@src_multi1_n3 +PREHOOK: Output: default@src_multi2_n4 +POSTHOOK: query: from (select * from src1 where key < 10 union all select * from src2_n4 where key > 100) s +insert overwrite table src_multi1_n3 select key, value where key < 150 order by key +insert overwrite table src_multi2_n4 select key, value where key > 400 order by value POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Input: default@src2 -POSTHOOK: Output: default@src_multi1 -POSTHOOK: Output: default@src_multi2 -POSTHOOK: Lineage: src_multi1.key EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), (src2)src2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: src_multi1.value EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), (src2)src2.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), (src2)src2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: src_multi2.value EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), (src2)src2.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select * from src_multi1 +POSTHOOK: Input: default@src2_n4 +POSTHOOK: Output: default@src_multi1_n3 +POSTHOOK: Output: default@src_multi2_n4 +POSTHOOK: Lineage: src_multi1_n3.key EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), (src2_n4)src2_n4.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: src_multi1_n3.value EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), (src2_n4)src2_n4.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: src_multi2_n4.key EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), (src2_n4)src2_n4.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: src_multi2_n4.value EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), (src2_n4)src2_n4.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from src_multi1_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi1 +PREHOOK: Input: default@src_multi1_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi1 +POSTHOOK: query: select * from src_multi1_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi1 +POSTHOOK: Input: default@src_multi1_n3 #### A masked pattern was here #### 103 val_103 103 val_103 @@ -302,13 +302,13 @@ POSTHOOK: Input: default@src_multi1 146 val_146 149 val_149 149 val_149 -PREHOOK: query: select * from src_multi2 +PREHOOK: query: select * from src_multi2_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@src_multi2 +PREHOOK: Input: default@src_multi2_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from src_multi2 +POSTHOOK: query: select * from src_multi2_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_multi2 +POSTHOOK: Input: default@src_multi2_n4 #### A masked pattern was here #### 401 val_401 401 val_401 diff --git a/ql/src/test/results/clientpositive/multigroupby_singlemr.q.out b/ql/src/test/results/clientpositive/multigroupby_singlemr.q.out index 931aa24e52..d74fad2c26 100644 --- a/ql/src/test/results/clientpositive/multigroupby_singlemr.q.out +++ b/ql/src/test/results/clientpositive/multigroupby_singlemr.q.out @@ -6,22 +6,22 @@ POSTHOOK: query: CREATE TABLE TBL(C1 INT, C2 INT, C3 INT, C4 INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@TBL -PREHOOK: query: CREATE TABLE DEST1(d1 INT, d2 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n17(d1 INT, d2 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(d1 INT, d2 INT) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n17 +POSTHOOK: query: CREATE TABLE DEST1_n17(d1 INT, d2 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(d1 INT, d2 INT, d3 INT) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n17 +PREHOOK: query: CREATE TABLE DEST2_n15(d1 INT, d2 INT, d3 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(d1 INT, d2 INT, d3 INT) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n15 +POSTHOOK: query: CREATE TABLE DEST2_n15(d1 INT, d2 INT, d3 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n15 PREHOOK: query: CREATE TABLE DEST3(d1 INT, d2 INT, d3 INT, d4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -40,13 +40,13 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST4 PREHOOK: query: EXPLAIN FROM TBL -INSERT OVERWRITE TABLE DEST1 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1 -INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2 +INSERT OVERWRITE TABLE DEST1_n17 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1 +INSERT OVERWRITE TABLE DEST2_n15 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM TBL -INSERT OVERWRITE TABLE DEST1 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1 -INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2 +INSERT OVERWRITE TABLE DEST1_n17 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1 +INSERT OVERWRITE TABLE DEST2_n15 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -116,7 +116,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n17 Select Operator expressions: _col0 (type: int), _col1 (type: int) outputColumnNames: d1, d2 @@ -141,7 +141,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n17 Stage: Stage-3 Stats Work @@ -149,7 +149,7 @@ STAGE PLANS: Column Stats Desc: Columns: d1, d2 Column Types: int, int - Table: default.dest1 + Table: default.dest1_n17 Stage: Stage-4 Map Reduce @@ -180,7 +180,7 @@ STAGE PLANS: Column Stats Desc: Columns: d1, d2, d3 Column Types: int, int, int - Table: default.dest2 + Table: default.dest2_n15 Stage: Stage-5 Map Reduce @@ -211,7 +211,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n15 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) outputColumnNames: d1, d2, d3 @@ -236,7 +236,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n15 Stage: Stage-7 Map Reduce @@ -263,13 +263,13 @@ STAGE PLANS: PREHOOK: query: EXPLAIN FROM TBL -INSERT OVERWRITE TABLE DEST1 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1 -INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C2, TBL.C1 +INSERT OVERWRITE TABLE DEST1_n17 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1 +INSERT OVERWRITE TABLE DEST2_n15 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C2, TBL.C1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM TBL -INSERT OVERWRITE TABLE DEST1 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1 -INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C2, TBL.C1 +INSERT OVERWRITE TABLE DEST1_n17 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1 +INSERT OVERWRITE TABLE DEST2_n15 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C2, TBL.C1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -339,7 +339,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n17 Select Operator expressions: _col0 (type: int), _col1 (type: int) outputColumnNames: d1, d2 @@ -364,7 +364,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n17 Stage: Stage-3 Stats Work @@ -372,7 +372,7 @@ STAGE PLANS: Column Stats Desc: Columns: d1, d2 Column Types: int, int - Table: default.dest1 + Table: default.dest1_n17 Stage: Stage-4 Map Reduce @@ -403,7 +403,7 @@ STAGE PLANS: Column Stats Desc: Columns: d1, d2, d3 Column Types: int, int, int - Table: default.dest2 + Table: default.dest2_n15 Stage: Stage-5 Map Reduce @@ -434,7 +434,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n15 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) outputColumnNames: d1, d2, d3 @@ -459,7 +459,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n15 Stage: Stage-7 Map Reduce @@ -487,12 +487,12 @@ STAGE PLANS: PREHOOK: query: EXPLAIN FROM TBL INSERT OVERWRITE TABLE DEST3 SELECT TBL.C1, TBL.C2, TBL.C3, COUNT(TBL.C4) GROUP BY TBL.C1, TBL.C2, TBL.C3 -INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2 +INSERT OVERWRITE TABLE DEST2_n15 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM TBL INSERT OVERWRITE TABLE DEST3 SELECT TBL.C1, TBL.C2, TBL.C3, COUNT(TBL.C4) GROUP BY TBL.C1, TBL.C2, TBL.C3 -INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2 +INSERT OVERWRITE TABLE DEST2_n15 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -626,7 +626,7 @@ STAGE PLANS: Column Stats Desc: Columns: d1, d2, d3 Column Types: int, int, int - Table: default.dest2 + Table: default.dest2_n15 Stage: Stage-5 Map Reduce @@ -657,7 +657,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n15 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) outputColumnNames: d1, d2, d3 @@ -682,7 +682,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n15 Stage: Stage-7 Map Reduce @@ -899,14 +899,14 @@ STAGE PLANS: PREHOOK: query: EXPLAIN FROM TBL INSERT OVERWRITE TABLE DEST3 SELECT TBL.C1, TBL.C2, TBL.C3, COUNT(TBL.C4) GROUP BY TBL.C1, TBL.C2, TBL.C3 -INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2 -INSERT OVERWRITE TABLE DEST1 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1 +INSERT OVERWRITE TABLE DEST2_n15 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2 +INSERT OVERWRITE TABLE DEST1_n17 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM TBL INSERT OVERWRITE TABLE DEST3 SELECT TBL.C1, TBL.C2, TBL.C3, COUNT(TBL.C4) GROUP BY TBL.C1, TBL.C2, TBL.C3 -INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2 -INSERT OVERWRITE TABLE DEST1 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1 +INSERT OVERWRITE TABLE DEST2_n15 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2 +INSERT OVERWRITE TABLE DEST1_n17 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -1060,7 +1060,7 @@ STAGE PLANS: Column Stats Desc: Columns: d1, d2, d3 Column Types: int, int, int - Table: default.dest2 + Table: default.dest2_n15 Stage: Stage-10 Stats Work @@ -1068,7 +1068,7 @@ STAGE PLANS: Column Stats Desc: Columns: d1, d2 Column Types: int, int - Table: default.dest1 + Table: default.dest1_n17 Stage: Stage-6 Map Reduce @@ -1099,7 +1099,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n15 Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) outputColumnNames: d1, d2, d3 @@ -1124,7 +1124,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n15 Stage: Stage-8 Map Reduce @@ -1178,7 +1178,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n17 Select Operator expressions: _col0 (type: int), _col1 (type: int) outputColumnNames: d1, d2 @@ -1203,7 +1203,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n17 Stage: Stage-11 Map Reduce diff --git a/ql/src/test/results/clientpositive/named_column_join.q.out b/ql/src/test/results/clientpositive/named_column_join.q.out index d32dc20c46..b67b74a75a 100644 --- a/ql/src/test/results/clientpositive/named_column_join.q.out +++ b/ql/src/test/results/clientpositive/named_column_join.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: create table t (a int, b int) +PREHOOK: query: create table t_n8 (a int, b int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t (a int, b int) +PREHOOK: Output: default@t_n8 +POSTHOOK: query: create table t_n8 (a int, b int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values (1,2),(2,1),(3,4),(4,3),(3,3),(null,null),(null,1),(2,null) +POSTHOOK: Output: default@t_n8 +PREHOOK: query: insert into t_n8 values (1,2),(2,1),(3,4),(4,3),(3,3),(null,null),(null,1),(2,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (1,2),(2,1),(3,4),(4,3),(3,3),(null,null),(null,1),(2,null) +PREHOOK: Output: default@t_n8 +POSTHOOK: query: insert into t_n8 values (1,2),(2,1),(3,4),(4,3),(3,3),(null,null),(null,1),(2,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.a SCRIPT [] -POSTHOOK: Lineage: t.b SCRIPT [] -PREHOOK: query: explain select * from t t1 join t t2 using (a) +POSTHOOK: Output: default@t_n8 +POSTHOOK: Lineage: t_n8.a SCRIPT [] +POSTHOOK: Lineage: t_n8.b SCRIPT [] +PREHOOK: query: explain select * from t_n8 t1 join t_n8 t2 using (a) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t t1 join t t2 using (a) +POSTHOOK: query: explain select * from t_n8 t1 join t_n8 t2 using (a) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -87,13 +87,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from t t1 join t t2 using (a) +PREHOOK: query: select * from t_n8 t1 join t_n8 t2 using (a) PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from t t1 join t t2 using (a) +POSTHOOK: query: select * from t_n8 t1 join t_n8 t2 using (a) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 #### A masked pattern was here #### 1 2 2 2 1 1 @@ -105,26 +105,26 @@ POSTHOOK: Input: default@t 3 4 3 3 4 4 4 3 3 -PREHOOK: query: select * from t t1 join t t2 using (a,b) +PREHOOK: query: select * from t_n8 t1 join t_n8 t2 using (a,b) PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from t t1 join t t2 using (a,b) +POSTHOOK: query: select * from t_n8 t1 join t_n8 t2 using (a,b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 #### A masked pattern was here #### 1 2 2 1 3 3 3 4 4 3 -PREHOOK: query: select t1.a,t2.b,t1.b,t2.a from t t1 join t t2 using (a) +PREHOOK: query: select t1.a,t2.b,t1.b,t2.a from t_n8 t1 join t_n8 t2 using (a) PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 #### A masked pattern was here #### -POSTHOOK: query: select t1.a,t2.b,t1.b,t2.a from t t1 join t t2 using (a) +POSTHOOK: query: select t1.a,t2.b,t1.b,t2.a from t_n8 t1 join t_n8 t2 using (a) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 #### A masked pattern was here #### 1 2 2 1 2 1 1 2 @@ -136,13 +136,13 @@ POSTHOOK: Input: default@t 3 4 3 3 3 4 4 3 4 3 3 4 -PREHOOK: query: select * from t t1 left outer join t t2 using (a,b) +PREHOOK: query: select * from t_n8 t1 left outer join t_n8 t2 using (a,b) PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from t t1 left outer join t t2 using (a,b) +POSTHOOK: query: select * from t_n8 t1 left outer join t_n8 t2 using (a,b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 #### A masked pattern was here #### 1 2 2 1 @@ -152,13 +152,13 @@ POSTHOOK: Input: default@t 4 3 NULL 1 NULL NULL -PREHOOK: query: select t1.a,t1.b from t t1 right outer join t t2 on (t1.a=t2.a and t1.b=t2.b) +PREHOOK: query: select t1.a,t1.b from t_n8 t1 right outer join t_n8 t2 on (t1.a=t2.a and t1.b=t2.b) PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 #### A masked pattern was here #### -POSTHOOK: query: select t1.a,t1.b from t t1 right outer join t t2 on (t1.a=t2.a and t1.b=t2.b) +POSTHOOK: query: select t1.a,t1.b from t_n8 t1 right outer join t_n8 t2 on (t1.a=t2.a and t1.b=t2.b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 #### A masked pattern was here #### 1 2 2 1 @@ -168,13 +168,13 @@ POSTHOOK: Input: default@t NULL NULL NULL NULL NULL NULL -PREHOOK: query: select * from t t1 right outer join t t2 using (a,b) +PREHOOK: query: select * from t_n8 t1 right outer join t_n8 t2 using (a,b) PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from t t1 right outer join t t2 using (a,b) +POSTHOOK: query: select * from t_n8 t1 right outer join t_n8 t2 using (a,b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 #### A masked pattern was here #### 1 2 2 1 @@ -184,26 +184,26 @@ POSTHOOK: Input: default@t 4 3 NULL 1 NULL NULL -PREHOOK: query: select * from t t1 inner join t t2 using (a,b) +PREHOOK: query: select * from t_n8 t1 inner join t_n8 t2 using (a,b) PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from t t1 inner join t t2 using (a,b) +POSTHOOK: query: select * from t_n8 t1 inner join t_n8 t2 using (a,b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 #### A masked pattern was here #### 1 2 2 1 3 3 3 4 4 3 -PREHOOK: query: select * from t t1 left outer join t t2 using (b) +PREHOOK: query: select * from t_n8 t1 left outer join t_n8 t2 using (b) PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from t t1 left outer join t t2 using (b) +POSTHOOK: query: select * from t_n8 t1 left outer join t_n8 t2 using (b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 #### A masked pattern was here #### 1 2 2 1 2 NULL @@ -217,13 +217,13 @@ POSTHOOK: Input: default@t 4 3 3 NULL 2 NULL NULL NULL NULL -PREHOOK: query: select * from t t1 right outer join t t2 using (b) +PREHOOK: query: select * from t_n8 t1 right outer join t_n8 t2 using (b) PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from t t1 right outer join t t2 using (b) +POSTHOOK: query: select * from t_n8 t1 right outer join t_n8 t2 using (b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 #### A masked pattern was here #### 1 2 2 1 2 NULL @@ -237,13 +237,13 @@ POSTHOOK: Input: default@t 4 3 3 NULL NULL 2 NULL NULL NULL -PREHOOK: query: select * from t t1 inner join t t2 using (b) +PREHOOK: query: select * from t_n8 t1 inner join t_n8 t2 using (b) PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from t t1 inner join t t2 using (b) +POSTHOOK: query: select * from t_n8 t1 inner join t_n8 t2 using (b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 #### A masked pattern was here #### 1 2 2 1 2 NULL @@ -255,28 +255,28 @@ POSTHOOK: Input: default@t 3 4 3 3 4 4 4 3 3 -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n4 PREHOOK: type: DROPVIEW -POSTHOOK: query: drop view v +POSTHOOK: query: drop view v_n4 POSTHOOK: type: DROPVIEW -PREHOOK: query: create view v as select * from t t1 join t t2 using (a,b) +PREHOOK: query: create view v_n4 as select * from t_n8 t1 join t_n8 t2 using (a,b) PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select * from t t1 join t t2 using (a,b) +PREHOOK: Output: default@v_n4 +POSTHOOK: query: create view v_n4 as select * from t_n8 t1 join t_n8 t2 using (a,b) POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.a SIMPLE [(t)t1.FieldSchema(name:a, type:int, comment:null), ] -POSTHOOK: Lineage: v.b SIMPLE [(t)t1.FieldSchema(name:b, type:int, comment:null), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n4 +POSTHOOK: Lineage: v_n4.a SIMPLE [(t_n8)t1.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: v_n4.b SIMPLE [(t_n8)t1.FieldSchema(name:b, type:int, comment:null), ] +PREHOOK: query: desc formatted v_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n4 +POSTHOOK: query: desc formatted v_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n4 # col_name data_type comment a int b int @@ -300,50 +300,50 @@ Bucket Columns: [] Sort Columns: [] # View Information -View Original Text: select * from t t1 join t t2 using (a,b) -View Expanded Text: select `t1`.`a`, `t1`.`b` from `default`.`t` `t1` join `default`.`t` `t2` using (`a`,`b`) +View Original Text: select * from t_n8 t1 join t_n8 t2 using (a,b) +View Expanded Text: select `t1`.`a`, `t1`.`b` from `default`.`t_n8` `t1` join `default`.`t_n8` `t2` using (`a`,`b`) View Rewrite Enabled: No -PREHOOK: query: select * from v +PREHOOK: query: select * from v_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@t -PREHOOK: Input: default@v +PREHOOK: Input: default@t_n8 +PREHOOK: Input: default@v_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from v +POSTHOOK: query: select * from v_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t -POSTHOOK: Input: default@v +POSTHOOK: Input: default@t_n8 +POSTHOOK: Input: default@v_n4 #### A masked pattern was here #### 1 2 2 1 3 3 3 4 4 3 -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n4 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n4 +PREHOOK: Output: default@v_n4 +POSTHOOK: query: drop view v_n4 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as select * from t t1 right outer join t t2 using (b,a) +POSTHOOK: Input: default@v_n4 +POSTHOOK: Output: default@v_n4 +PREHOOK: query: create view v_n4 as select * from t_n8 t1 right outer join t_n8 t2 using (b,a) PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select * from t t1 right outer join t t2 using (b,a) +PREHOOK: Output: default@v_n4 +POSTHOOK: query: create view v_n4 as select * from t_n8 t1 right outer join t_n8 t2 using (b,a) POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.a SIMPLE [(t)t2.FieldSchema(name:a, type:int, comment:null), ] -POSTHOOK: Lineage: v.b SIMPLE [(t)t2.FieldSchema(name:b, type:int, comment:null), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n4 +POSTHOOK: Lineage: v_n4.a SIMPLE [(t_n8)t2.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: v_n4.b SIMPLE [(t_n8)t2.FieldSchema(name:b, type:int, comment:null), ] +PREHOOK: query: desc formatted v_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n4 +POSTHOOK: query: desc formatted v_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n4 # col_name data_type comment b int a int @@ -367,18 +367,18 @@ Bucket Columns: [] Sort Columns: [] # View Information -View Original Text: select * from t t1 right outer join t t2 using (b,a) -View Expanded Text: select `t2`.`b`, `t2`.`a` from `default`.`t` `t1` right outer join `default`.`t` `t2` using (`b`,`a`) +View Original Text: select * from t_n8 t1 right outer join t_n8 t2 using (b,a) +View Expanded Text: select `t2`.`b`, `t2`.`a` from `default`.`t_n8` `t1` right outer join `default`.`t_n8` `t2` using (`b`,`a`) View Rewrite Enabled: No -PREHOOK: query: select * from v +PREHOOK: query: select * from v_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@t -PREHOOK: Input: default@v +PREHOOK: Input: default@t_n8 +PREHOOK: Input: default@v_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from v +POSTHOOK: query: select * from v_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t -POSTHOOK: Input: default@v +POSTHOOK: Input: default@t_n8 +POSTHOOK: Input: default@v_n4 #### A masked pattern was here #### 1 2 1 NULL @@ -388,13 +388,13 @@ POSTHOOK: Input: default@v 4 3 NULL 2 NULL NULL -PREHOOK: query: select * from (select t1.b b from t t1 inner join t t2 using (b)) t3 join t t4 using(b) +PREHOOK: query: select * from (select t1.b b from t_n8 t1 inner join t_n8 t2 using (b)) t3 join t_n8 t4 using(b) PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from (select t1.b b from t t1 inner join t t2 using (b)) t3 join t t4 using(b) +POSTHOOK: query: select * from (select t1.b b from t_n8 t1 inner join t_n8 t2 using (b)) t3 join t_n8 t4 using(b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 #### A masked pattern was here #### 1 2 1 2 @@ -414,13 +414,13 @@ POSTHOOK: Input: default@t 3 4 3 4 4 3 -PREHOOK: query: select * from (select t2.a a from t t1 inner join t t2 using (b)) t3 join t t4 using(a) +PREHOOK: query: select * from (select t2.a a from t_n8 t1 inner join t_n8 t2 using (b)) t3 join t_n8 t4 using(a) PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 #### A masked pattern was here #### -POSTHOOK: query: select * from (select t2.a a from t t1 inner join t t2 using (b)) t3 join t t4 using(a) +POSTHOOK: query: select * from (select t2.a a from t_n8 t1 inner join t_n8 t2 using (b)) t3 join t_n8 t4 using(a) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 #### A masked pattern was here #### 1 2 2 1 @@ -435,24 +435,24 @@ POSTHOOK: Input: default@t 3 4 4 3 4 3 -PREHOOK: query: create table tt as select * from (select t2.a a from t t1 inner join t t2 using (b)) t3 join t t4 using(a) +PREHOOK: query: create table tt_n0 as select * from (select t2.a a from t_n8 t1 inner join t_n8 t2 using (b)) t3 join t_n8 t4 using(a) PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n8 PREHOOK: Output: database:default -PREHOOK: Output: default@tt -POSTHOOK: query: create table tt as select * from (select t2.a a from t t1 inner join t t2 using (b)) t3 join t t4 using(a) +PREHOOK: Output: default@tt_n0 +POSTHOOK: query: create table tt_n0 as select * from (select t2.a a from t_n8 t1 inner join t_n8 t2 using (b)) t3 join t_n8 t4 using(a) POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n8 POSTHOOK: Output: database:default -POSTHOOK: Output: default@tt -POSTHOOK: Lineage: tt.a SIMPLE [(t)t2.FieldSchema(name:a, type:int, comment:null), ] -POSTHOOK: Lineage: tt.b SIMPLE [(t)t4.FieldSchema(name:b, type:int, comment:null), ] -PREHOOK: query: desc formatted tt +POSTHOOK: Output: default@tt_n0 +POSTHOOK: Lineage: tt_n0.a SIMPLE [(t_n8)t2.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: tt_n0.b SIMPLE [(t_n8)t4.FieldSchema(name:b, type:int, comment:null), ] +PREHOOK: query: desc formatted tt_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tt -POSTHOOK: query: desc formatted tt +PREHOOK: Input: default@tt_n0 +POSTHOOK: query: desc formatted tt_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tt +POSTHOOK: Input: default@tt_n0 # col_name data_type comment a int b int diff --git a/ql/src/test/results/clientpositive/nested_column_pruning.q.out b/ql/src/test/results/clientpositive/nested_column_pruning.q.out index 500ab21926..69ddfc0ff2 100644 --- a/ql/src/test/results/clientpositive/nested_column_pruning.q.out +++ b/ql/src/test/results/clientpositive/nested_column_pruning.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: DROP TABLE IF EXISTS dummy +PREHOOK: query: DROP TABLE IF EXISTS dummy_n5 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS dummy +POSTHOOK: query: DROP TABLE IF EXISTS dummy_n5 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE dummy (i int) +PREHOOK: query: CREATE TABLE dummy_n5 (i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dummy -POSTHOOK: query: CREATE TABLE dummy (i int) +PREHOOK: Output: default@dummy_n5 +POSTHOOK: query: CREATE TABLE dummy_n5 (i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dummy -PREHOOK: query: INSERT INTO TABLE dummy VALUES (42) +POSTHOOK: Output: default@dummy_n5 +PREHOOK: query: INSERT INTO TABLE dummy_n5 VALUES (42) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@dummy -POSTHOOK: query: INSERT INTO TABLE dummy VALUES (42) +PREHOOK: Output: default@dummy_n5 +POSTHOOK: query: INSERT INTO TABLE dummy_n5 VALUES (42) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@dummy -POSTHOOK: Lineage: dummy.i SCRIPT [] -PREHOOK: query: DROP TABLE IF EXISTS nested_tbl_1 +POSTHOOK: Output: default@dummy_n5 +POSTHOOK: Lineage: dummy_n5.i SCRIPT [] +PREHOOK: query: DROP TABLE IF EXISTS nested_tbl_1_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS nested_tbl_1 +POSTHOOK: query: DROP TABLE IF EXISTS nested_tbl_1_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE nested_tbl_1 ( +PREHOOK: query: CREATE TABLE nested_tbl_1_n1 ( a int, s1 struct, f6: int>, s2 struct, f11: map>>, @@ -34,8 +34,8 @@ PREHOOK: query: CREATE TABLE nested_tbl_1 ( ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@nested_tbl_1 -POSTHOOK: query: CREATE TABLE nested_tbl_1 ( +PREHOOK: Output: default@nested_tbl_1_n1 +POSTHOOK: query: CREATE TABLE nested_tbl_1_n1 ( a int, s1 struct, f6: int>, s2 struct, f11: map>>, @@ -46,8 +46,8 @@ POSTHOOK: query: CREATE TABLE nested_tbl_1 ( ) STORED AS PARQUET POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@nested_tbl_1 -PREHOOK: query: INSERT INTO TABLE nested_tbl_1 SELECT +POSTHOOK: Output: default@nested_tbl_1_n1 +PREHOOK: query: INSERT INTO TABLE nested_tbl_1_n1 SELECT 1, named_struct('f1', false, 'f2', 'foo', 'f3', named_struct('f4', 4, 'f5', cast(5.0 as double)), 'f6', 4), named_struct('f7', 'f7', 'f8', named_struct('f9', true, 'f10', array(10, 11), 'f11', map('key1', true, 'key2', false))), named_struct('f12', array(named_struct('f13', 'foo', 'f14', 14), named_struct('f13', 'bar', 'f14', 28))), @@ -55,11 +55,11 @@ PREHOOK: query: INSERT INTO TABLE nested_tbl_1 SELECT named_struct('f16', array(named_struct('f17', 'foo', 'f18', named_struct('f19', 14)), named_struct('f17', 'bar', 'f18', named_struct('f19', 28)))), map('key1', named_struct('f20', array(named_struct('f21', named_struct('f22', 1)))), 'key2', named_struct('f20', array(named_struct('f21', named_struct('f22', 2))))) -FROM dummy +FROM dummy_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@dummy -PREHOOK: Output: default@nested_tbl_1 -POSTHOOK: query: INSERT INTO TABLE nested_tbl_1 SELECT +PREHOOK: Input: default@dummy_n5 +PREHOOK: Output: default@nested_tbl_1_n1 +POSTHOOK: query: INSERT INTO TABLE nested_tbl_1_n1 SELECT 1, named_struct('f1', false, 'f2', 'foo', 'f3', named_struct('f4', 4, 'f5', cast(5.0 as double)), 'f6', 4), named_struct('f7', 'f7', 'f8', named_struct('f9', true, 'f10', array(10, 11), 'f11', map('key1', true, 'key2', false))), named_struct('f12', array(named_struct('f13', 'foo', 'f14', 14), named_struct('f13', 'bar', 'f14', 28))), @@ -67,30 +67,30 @@ POSTHOOK: query: INSERT INTO TABLE nested_tbl_1 SELECT named_struct('f16', array(named_struct('f17', 'foo', 'f18', named_struct('f19', 14)), named_struct('f17', 'bar', 'f18', named_struct('f19', 28)))), map('key1', named_struct('f20', array(named_struct('f21', named_struct('f22', 1)))), 'key2', named_struct('f20', array(named_struct('f21', named_struct('f22', 2))))) -FROM dummy +FROM dummy_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dummy -POSTHOOK: Output: default@nested_tbl_1 -POSTHOOK: Lineage: nested_tbl_1.a SIMPLE [] -POSTHOOK: Lineage: nested_tbl_1.s1 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_1.s2 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_1.s3 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_1.s4 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_1.s5 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_1.s6 EXPRESSION [] -PREHOOK: query: DROP TABLE IF EXISTS nested_tbl_2 +POSTHOOK: Input: default@dummy_n5 +POSTHOOK: Output: default@nested_tbl_1_n1 +POSTHOOK: Lineage: nested_tbl_1_n1.a SIMPLE [] +POSTHOOK: Lineage: nested_tbl_1_n1.s1 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_1_n1.s2 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_1_n1.s3 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_1_n1.s4 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_1_n1.s5 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_1_n1.s6 EXPRESSION [] +PREHOOK: query: DROP TABLE IF EXISTS nested_tbl_2_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS nested_tbl_2 +POSTHOOK: query: DROP TABLE IF EXISTS nested_tbl_2_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE nested_tbl_2 LIKE nested_tbl_1 +PREHOOK: query: CREATE TABLE nested_tbl_2_n1 LIKE nested_tbl_1_n1 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@nested_tbl_2 -POSTHOOK: query: CREATE TABLE nested_tbl_2 LIKE nested_tbl_1 +PREHOOK: Output: default@nested_tbl_2_n1 +POSTHOOK: query: CREATE TABLE nested_tbl_2_n1 LIKE nested_tbl_1_n1 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@nested_tbl_2 -PREHOOK: query: INSERT INTO TABLE nested_tbl_2 SELECT +POSTHOOK: Output: default@nested_tbl_2_n1 +PREHOOK: query: INSERT INTO TABLE nested_tbl_2_n1 SELECT 2, named_struct('f1', true, 'f2', 'bar', 'f3', named_struct('f4', 4, 'f5', cast(6.5 as double)), 'f6', 4), named_struct('f7', 'f72', 'f8', named_struct('f9', false, 'f10', array(20, 22), 'f11', map('key3', true, 'key4', false))), named_struct('f12', array(named_struct('f13', 'bar', 'f14', 28), named_struct('f13', 'foo', 'f14', 56))), @@ -98,11 +98,11 @@ PREHOOK: query: INSERT INTO TABLE nested_tbl_2 SELECT named_struct('f16', array(named_struct('f17', 'bar', 'f18', named_struct('f19', 28)), named_struct('f17', 'foo', 'f18', named_struct('f19', 56)))), map('key3', named_struct('f20', array(named_struct('f21', named_struct('f22', 3)))), 'key4', named_struct('f20', array(named_struct('f21', named_struct('f22', 4))))) -FROM dummy +FROM dummy_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@dummy -PREHOOK: Output: default@nested_tbl_2 -POSTHOOK: query: INSERT INTO TABLE nested_tbl_2 SELECT +PREHOOK: Input: default@dummy_n5 +PREHOOK: Output: default@nested_tbl_2_n1 +POSTHOOK: query: INSERT INTO TABLE nested_tbl_2_n1 SELECT 2, named_struct('f1', true, 'f2', 'bar', 'f3', named_struct('f4', 4, 'f5', cast(6.5 as double)), 'f6', 4), named_struct('f7', 'f72', 'f8', named_struct('f9', false, 'f10', array(20, 22), 'f11', map('key3', true, 'key4', false))), named_struct('f12', array(named_struct('f13', 'bar', 'f14', 28), named_struct('f13', 'foo', 'f14', 56))), @@ -110,20 +110,20 @@ POSTHOOK: query: INSERT INTO TABLE nested_tbl_2 SELECT named_struct('f16', array(named_struct('f17', 'bar', 'f18', named_struct('f19', 28)), named_struct('f17', 'foo', 'f18', named_struct('f19', 56)))), map('key3', named_struct('f20', array(named_struct('f21', named_struct('f22', 3)))), 'key4', named_struct('f20', array(named_struct('f21', named_struct('f22', 4))))) -FROM dummy +FROM dummy_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dummy -POSTHOOK: Output: default@nested_tbl_2 -POSTHOOK: Lineage: nested_tbl_2.a SIMPLE [] -POSTHOOK: Lineage: nested_tbl_2.s1 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_2.s2 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_2.s3 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_2.s4 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_2.s5 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_2.s6 EXPRESSION [] -PREHOOK: query: EXPLAIN SELECT a FROM nested_tbl_1 +POSTHOOK: Input: default@dummy_n5 +POSTHOOK: Output: default@nested_tbl_2_n1 +POSTHOOK: Lineage: nested_tbl_2_n1.a SIMPLE [] +POSTHOOK: Lineage: nested_tbl_2_n1.s1 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_2_n1.s2 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_2_n1.s3 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_2_n1.s4 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_2_n1.s5 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_2_n1.s6 EXPRESSION [] +PREHOOK: query: EXPLAIN SELECT a FROM nested_tbl_1_n1 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT a FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT a FROM nested_tbl_1_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -134,7 +134,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: int) @@ -154,18 +154,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a FROM nested_tbl_1 +PREHOOK: query: SELECT a FROM nested_tbl_1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT a FROM nested_tbl_1 +POSTHOOK: query: SELECT a FROM nested_tbl_1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 1 -PREHOOK: query: EXPLAIN SELECT s1.f1 FROM nested_tbl_1 +PREHOOK: query: EXPLAIN SELECT s1.f1 FROM nested_tbl_1_n1 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f1 FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT s1.f1 FROM nested_tbl_1_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -176,7 +176,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f1 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -197,18 +197,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f1 FROM nested_tbl_1 +PREHOOK: query: SELECT s1.f1 FROM nested_tbl_1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f1 FROM nested_tbl_1 +POSTHOOK: query: SELECT s1.f1 FROM nested_tbl_1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### false -PREHOOK: query: EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1 +PREHOOK: query: EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1_n1 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -219,7 +219,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f1, s1.f2 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -240,18 +240,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f1, s1.f2 FROM nested_tbl_1 +PREHOOK: query: SELECT s1.f1, s1.f2 FROM nested_tbl_1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f1, s1.f2 FROM nested_tbl_1 +POSTHOOK: query: SELECT s1.f1, s1.f2 FROM nested_tbl_1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### false foo -PREHOOK: query: EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1 +PREHOOK: query: EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n1 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -262,7 +262,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f3 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -283,18 +283,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1 +PREHOOK: query: SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1 +POSTHOOK: query: SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### {"f4":4,"f5":5.0} 4 -PREHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1 +PREHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n1 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -305,7 +305,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f3.f5 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -326,18 +326,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1 +PREHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1 +POSTHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 5.0 -PREHOOK: query: EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1 +PREHOOK: query: EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n1 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -348,7 +348,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f3.f4, s2.f8.f9 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -369,18 +369,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1 +PREHOOK: query: SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1 +POSTHOOK: query: SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 4 true -PREHOOK: query: EXPLAIN SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE +PREHOOK: query: EXPLAIN SELECT s1.f2 FROM nested_tbl_1_n1 WHERE s1.f1 = FALSE PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE +POSTHOOK: query: EXPLAIN SELECT s1.f2 FROM nested_tbl_1_n1 WHERE s1.f1 = FALSE POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -391,7 +391,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f2, s1.f1 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -415,18 +415,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE +PREHOOK: query: SELECT s1.f2 FROM nested_tbl_1_n1 WHERE s1.f1 = FALSE PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE +POSTHOOK: query: SELECT s1.f2 FROM nested_tbl_1_n1 WHERE s1.f1 = FALSE POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### foo -PREHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4 +PREHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n1 WHERE s1.f3.f4 = 4 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4 +POSTHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n1 WHERE s1.f3.f4 = 4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -437,7 +437,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f3.f5, s1.f3.f4 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -461,18 +461,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4 +PREHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1_n1 WHERE s1.f3.f4 = 4 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4 +POSTHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1_n1 WHERE s1.f3.f4 = 4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 5.0 -PREHOOK: query: EXPLAIN SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE +PREHOOK: query: EXPLAIN SELECT s2.f8 FROM nested_tbl_1_n1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE +POSTHOOK: query: EXPLAIN SELECT s2.f8 FROM nested_tbl_1_n1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -483,7 +483,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f2, s2.f8 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -507,20 +507,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE +PREHOOK: query: SELECT s2.f8 FROM nested_tbl_1_n1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE +POSTHOOK: query: SELECT s2.f8 FROM nested_tbl_1_n1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### {"f9":true,"f10":[10,11],"f11":{"key1":true,"key2":false}} -PREHOOK: query: EXPLAIN SELECT col1, col2 FROM nested_tbl_1 +PREHOOK: query: EXPLAIN SELECT col1, col2 FROM nested_tbl_1_n1 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1 LATERAL VIEW explode(s3.f12) tbl2 AS col2 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT col1, col2 FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT col1, col2 FROM nested_tbl_1_n1 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1 LATERAL VIEW explode(s3.f12) tbl2 AS col2 POSTHOOK: type: QUERY @@ -533,7 +533,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s2.f8.f10 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Lateral View Forward @@ -644,25 +644,25 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT col1, col2 FROM nested_tbl_1 +PREHOOK: query: SELECT col1, col2 FROM nested_tbl_1_n1 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1 LATERAL VIEW explode(s3.f12) tbl2 AS col2 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT col1, col2 FROM nested_tbl_1 +POSTHOOK: query: SELECT col1, col2 FROM nested_tbl_1_n1 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1 LATERAL VIEW explode(s3.f12) tbl2 AS col2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 10 {"f13":"foo","f14":14} 10 {"f13":"bar","f14":28} 11 {"f13":"foo","f14":14} 11 {"f13":"bar","f14":28} -PREHOOK: query: EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1 +PREHOOK: query: EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n1 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -673,7 +673,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s2.f8.f10, s1.f3.f4 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -694,18 +694,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1 +PREHOOK: query: SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1 +POSTHOOK: query: SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 3 -PREHOOK: query: EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5 +PREHOOK: query: EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3.f5 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5 +POSTHOOK: query: EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3.f5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -716,7 +716,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f3.f5, s1.f3.f4 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -756,18 +756,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5 +PREHOOK: query: SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3.f5 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5 +POSTHOOK: query: SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3.f5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 5.0 1 -PREHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 +PREHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 +POSTHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -778,7 +778,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f3 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -818,18 +818,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 +PREHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 +POSTHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### {"f4":4,"f5":5.0} 1 -PREHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3 +PREHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3 ORDER BY s1.f3 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3 +POSTHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3 ORDER BY s1.f3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -841,7 +841,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f3 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -902,22 +902,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3 +PREHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3 ORDER BY s1.f3 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3 +POSTHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3 ORDER BY s1.f3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### {"f4":4,"f5":5.0} 1 PREHOOK: query: EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2 +FROM nested_tbl_1_n1 t1 JOIN nested_tbl_2_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == FALSE PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2 +FROM nested_tbl_1_n1 t1 JOIN nested_tbl_2_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == FALSE POSTHOOK: type: QUERY @@ -989,29 +989,29 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2 +FROM nested_tbl_1_n1 t1 JOIN nested_tbl_2_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == FALSE PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 -PREHOOK: Input: default@nested_tbl_2 +PREHOOK: Input: default@nested_tbl_1_n1 +PREHOOK: Input: default@nested_tbl_2_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2 +FROM nested_tbl_1_n1 t1 JOIN nested_tbl_2_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == FALSE POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 -POSTHOOK: Input: default@nested_tbl_2 +POSTHOOK: Input: default@nested_tbl_1_n1 +POSTHOOK: Input: default@nested_tbl_2_n1 #### A masked pattern was here #### 5.0 {"f9":false,"f10":[20,22],"f11":{"key3":true,"key4":false}} PREHOOK: query: EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == TRUE PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == TRUE POSTHOOK: type: QUERY @@ -1083,26 +1083,26 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == TRUE PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == TRUE POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 5.0 {"f9":true,"f10":[10,11],"f11":{"key1":true,"key2":false}} PREHOOK: query: EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1178,25 +1178,25 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 5.0 Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f1 <> t2.s2.f8.f9 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f1 <> t2.s2.f8.f9 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1259,24 +1259,24 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f1 <> t2.s2.f8.f9 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f1 <> t2.s2.f8.f9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 5.0 PREHOOK: query: EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1349,63 +1349,63 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 5.0 -PREHOOK: query: DROP TABLE IF EXISTS nested_tbl_3 +PREHOOK: query: DROP TABLE IF EXISTS nested_tbl_3_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS nested_tbl_3 +POSTHOOK: query: DROP TABLE IF EXISTS nested_tbl_3_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE nested_tbl_3 (f1 boolean, f2 string) PARTITIONED BY (f3 int) STORED AS PARQUET +PREHOOK: query: CREATE TABLE nested_tbl_3_n1 (f1 boolean, f2 string) PARTITIONED BY (f3 int) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@nested_tbl_3 -POSTHOOK: query: CREATE TABLE nested_tbl_3 (f1 boolean, f2 string) PARTITIONED BY (f3 int) STORED AS PARQUET +PREHOOK: Output: default@nested_tbl_3_n1 +POSTHOOK: query: CREATE TABLE nested_tbl_3_n1 (f1 boolean, f2 string) PARTITIONED BY (f3 int) STORED AS PARQUET POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@nested_tbl_3 -PREHOOK: query: INSERT OVERWRITE TABLE nested_tbl_3 PARTITION(f3) +POSTHOOK: Output: default@nested_tbl_3_n1 +PREHOOK: query: INSERT OVERWRITE TABLE nested_tbl_3_n1 PARTITION(f3) SELECT s1.f1 AS f1, S1.f2 AS f2, s1.f6 AS f3 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 -PREHOOK: Output: default@nested_tbl_3 -POSTHOOK: query: INSERT OVERWRITE TABLE nested_tbl_3 PARTITION(f3) +PREHOOK: Input: default@nested_tbl_1_n1 +PREHOOK: Output: default@nested_tbl_3_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE nested_tbl_3_n1 PARTITION(f3) SELECT s1.f1 AS f1, S1.f2 AS f2, s1.f6 AS f3 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 -POSTHOOK: Output: default@nested_tbl_3@f3=4 -POSTHOOK: Lineage: nested_tbl_3 PARTITION(f3=4).f1 EXPRESSION [(nested_tbl_1)nested_tbl_1.FieldSchema(name:s1, type:struct,f6:int>, comment:null), ] -POSTHOOK: Lineage: nested_tbl_3 PARTITION(f3=4).f2 EXPRESSION [(nested_tbl_1)nested_tbl_1.FieldSchema(name:s1, type:struct,f6:int>, comment:null), ] -PREHOOK: query: SELECT * FROM nested_tbl_3 +POSTHOOK: Input: default@nested_tbl_1_n1 +POSTHOOK: Output: default@nested_tbl_3_n1@f3=4 +POSTHOOK: Lineage: nested_tbl_3_n1 PARTITION(f3=4).f1 EXPRESSION [(nested_tbl_1_n1)nested_tbl_1_n1.FieldSchema(name:s1, type:struct,f6:int>, comment:null), ] +POSTHOOK: Lineage: nested_tbl_3_n1 PARTITION(f3=4).f2 EXPRESSION [(nested_tbl_1_n1)nested_tbl_1_n1.FieldSchema(name:s1, type:struct,f6:int>, comment:null), ] +PREHOOK: query: SELECT * FROM nested_tbl_3_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_3 -PREHOOK: Input: default@nested_tbl_3@f3=4 +PREHOOK: Input: default@nested_tbl_3_n1 +PREHOOK: Input: default@nested_tbl_3_n1@f3=4 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM nested_tbl_3 +POSTHOOK: query: SELECT * FROM nested_tbl_3_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_3 -POSTHOOK: Input: default@nested_tbl_3@f3=4 +POSTHOOK: Input: default@nested_tbl_3_n1 +POSTHOOK: Input: default@nested_tbl_3_n1@f3=4 #### A masked pattern was here #### false foo 4 PREHOOK: query: EXPLAIN SELECT count(s1.f6), s3.f12[0].f14 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s3.f12[0].f14 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT count(s1.f6), s3.f12[0].f14 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s3.f12[0].f14 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1417,7 +1417,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s3.f12, s1.f6 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -1462,26 +1462,26 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT count(s1.f6), s3.f12[0].f14 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s3.f12[0].f14 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT count(s1.f6), s3.f12[0].f14 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s3.f12[0].f14 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 1 14 PREHOOK: query: EXPLAIN SELECT count(s1.f6), s4['key1'].f15 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s4['key1'].f15 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT count(s1.f6), s4['key1'].f15 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s4['key1'].f15 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1493,7 +1493,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f6 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -1538,26 +1538,26 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT count(s1.f6), s4['key1'].f15 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s4['key1'].f15 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT count(s1.f6), s4['key1'].f15 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s4['key1'].f15 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 1 1 PREHOOK: query: EXPLAIN SELECT count(s1.f6), s5.f16[0].f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s5.f16[0].f18.f19 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT count(s1.f6), s5.f16[0].f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s5.f16[0].f18.f19 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1569,7 +1569,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s5.f16, s1.f6 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -1614,26 +1614,26 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT count(s1.f6), s5.f16[0].f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s5.f16[0].f18.f19 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT count(s1.f6), s5.f16[0].f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s5.f16[0].f18.f19 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 1 14 PREHOOK: query: EXPLAIN SELECT count(s1.f6), s5.f16.f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s5.f16.f18.f19 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT count(s1.f6), s5.f16.f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s5.f16.f18.f19 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1645,7 +1645,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f6, s5.f16 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -1690,26 +1690,26 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT count(s1.f6), s5.f16.f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s5.f16.f18.f19 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT count(s1.f6), s5.f16.f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s5.f16.f18.f19 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 1 [14,28] PREHOOK: query: EXPLAIN SELECT count(s1.f6), s6['key1'].f20[0].f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s6['key1'].f20[0].f21.f22 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT count(s1.f6), s6['key1'].f20[0].f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s6['key1'].f20[0].f21.f22 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1721,7 +1721,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f6 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -1766,26 +1766,26 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT count(s1.f6), s6['key1'].f20[0].f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s6['key1'].f20[0].f21.f22 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT count(s1.f6), s6['key1'].f20[0].f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s6['key1'].f20[0].f21.f22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 1 1 PREHOOK: query: EXPLAIN SELECT count(s1.f6), s6['key1'].f20.f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s6['key1'].f20.f21.f22 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT count(s1.f6), s6['key1'].f20.f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s6['key1'].f20.f21.f22 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1797,7 +1797,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n1 Pruned Column Paths: s1.f6 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -1842,15 +1842,15 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT count(s1.f6), s6['key1'].f20.f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s6['key1'].f20.f21.f22 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT count(s1.f6), s6['key1'].f20.f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n1 GROUP BY s6['key1'].f20.f21.f22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n1 #### A masked pattern was here #### 1 [1] diff --git a/ql/src/test/results/clientpositive/newline.q.out b/ql/src/test/results/clientpositive/newline.q.out index 132946e88d..bea4e6ce1c 100644 --- a/ql/src/test/results/clientpositive/newline.q.out +++ b/ql/src/test/results/clientpositive/newline.q.out @@ -1,32 +1,32 @@ -PREHOOK: query: create table tmp_tmp(key string, value string) stored as rcfile +PREHOOK: query: create table tmp_tmp_n0(key string, value string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmp_tmp -POSTHOOK: query: create table tmp_tmp(key string, value string) stored as rcfile +PREHOOK: Output: default@tmp_tmp_n0 +POSTHOOK: query: create table tmp_tmp_n0(key string, value string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmp_tmp -PREHOOK: query: insert overwrite table tmp_tmp +POSTHOOK: Output: default@tmp_tmp_n0 +PREHOOK: query: insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python newline.py' AS key, value FROM src limit 6 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmp_tmp -POSTHOOK: query: insert overwrite table tmp_tmp +PREHOOK: Output: default@tmp_tmp_n0 +POSTHOOK: query: insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python newline.py' AS key, value FROM src limit 6 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmp_tmp -POSTHOOK: Lineage: tmp_tmp.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tmp_tmp.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from tmp_tmp +POSTHOOK: Output: default@tmp_tmp_n0 +POSTHOOK: Lineage: tmp_tmp_n0.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_tmp_n0.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from tmp_tmp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tmp_tmp +PREHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from tmp_tmp +POSTHOOK: query: select * from tmp_tmp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp_tmp +POSTHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### 1 2 NULL 1 2 NULL @@ -38,43 +38,43 @@ POSTHOOK: Input: default@tmp_tmp 2 NULL 1 2 NULL -PREHOOK: query: drop table tmp_tmp +PREHOOK: query: drop table tmp_tmp_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tmp_tmp -PREHOOK: Output: default@tmp_tmp -POSTHOOK: query: drop table tmp_tmp +PREHOOK: Input: default@tmp_tmp_n0 +PREHOOK: Output: default@tmp_tmp_n0 +POSTHOOK: query: drop table tmp_tmp_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tmp_tmp -POSTHOOK: Output: default@tmp_tmp -PREHOOK: query: create table tmp_tmp(key string, value string) stored as rcfile +POSTHOOK: Input: default@tmp_tmp_n0 +POSTHOOK: Output: default@tmp_tmp_n0 +PREHOOK: query: create table tmp_tmp_n0(key string, value string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmp_tmp -POSTHOOK: query: create table tmp_tmp(key string, value string) stored as rcfile +PREHOOK: Output: default@tmp_tmp_n0 +POSTHOOK: query: create table tmp_tmp_n0(key string, value string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmp_tmp -PREHOOK: query: insert overwrite table tmp_tmp +POSTHOOK: Output: default@tmp_tmp_n0 +PREHOOK: query: insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python escapednewline.py' AS key, value FROM src limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmp_tmp -POSTHOOK: query: insert overwrite table tmp_tmp +PREHOOK: Output: default@tmp_tmp_n0 +POSTHOOK: query: insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python escapednewline.py' AS key, value FROM src limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmp_tmp -POSTHOOK: Lineage: tmp_tmp.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tmp_tmp.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from tmp_tmp +POSTHOOK: Output: default@tmp_tmp_n0 +POSTHOOK: Lineage: tmp_tmp_n0.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_tmp_n0.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from tmp_tmp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tmp_tmp +PREHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from tmp_tmp +POSTHOOK: query: select * from tmp_tmp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp_tmp +POSTHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### 1\n2 NULL 1\n2 NULL @@ -82,41 +82,41 @@ POSTHOOK: Input: default@tmp_tmp 1\n2 NULL 1\n2 NULL PREHOOK: query: SELECT TRANSFORM(key, value) USING -'cat' AS (key, value) FROM tmp_tmp +'cat' AS (key, value) FROM tmp_tmp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tmp_tmp +PREHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT TRANSFORM(key, value) USING -'cat' AS (key, value) FROM tmp_tmp +'cat' AS (key, value) FROM tmp_tmp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp_tmp +POSTHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### 1\n2 NULL 1\n2 NULL 1\n2 NULL 1\n2 NULL 1\n2 NULL -PREHOOK: query: insert overwrite table tmp_tmp +PREHOOK: query: insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python escapedcarriagereturn.py' AS key, value FROM src limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmp_tmp -POSTHOOK: query: insert overwrite table tmp_tmp +PREHOOK: Output: default@tmp_tmp_n0 +POSTHOOK: query: insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python escapedcarriagereturn.py' AS key, value FROM src limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmp_tmp -POSTHOOK: Lineage: tmp_tmp.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tmp_tmp.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from tmp_tmp +POSTHOOK: Output: default@tmp_tmp_n0 +POSTHOOK: Lineage: tmp_tmp_n0.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_tmp_n0.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from tmp_tmp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tmp_tmp +PREHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from tmp_tmp +POSTHOOK: query: select * from tmp_tmp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp_tmp +POSTHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### 1\r2 NULL 1\r2 NULL @@ -124,41 +124,41 @@ POSTHOOK: Input: default@tmp_tmp 1\r2 NULL 1\r2 NULL PREHOOK: query: SELECT TRANSFORM(key, value) USING -'cat' AS (key, value) FROM tmp_tmp +'cat' AS (key, value) FROM tmp_tmp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tmp_tmp +PREHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT TRANSFORM(key, value) USING -'cat' AS (key, value) FROM tmp_tmp +'cat' AS (key, value) FROM tmp_tmp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp_tmp +POSTHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### 1\r2 NULL 1\r2 NULL 1\r2 NULL 1\r2 NULL 1\r2 NULL -PREHOOK: query: insert overwrite table tmp_tmp +PREHOOK: query: insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python escapedtab.py' AS key, value FROM src limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmp_tmp -POSTHOOK: query: insert overwrite table tmp_tmp +PREHOOK: Output: default@tmp_tmp_n0 +POSTHOOK: query: insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python escapedtab.py' AS key, value FROM src limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmp_tmp -POSTHOOK: Lineage: tmp_tmp.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tmp_tmp.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from tmp_tmp +POSTHOOK: Output: default@tmp_tmp_n0 +POSTHOOK: Lineage: tmp_tmp_n0.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_tmp_n0.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from tmp_tmp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tmp_tmp +PREHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from tmp_tmp +POSTHOOK: query: select * from tmp_tmp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp_tmp +POSTHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### 1\t2 NULL 1\t2 NULL @@ -166,41 +166,41 @@ POSTHOOK: Input: default@tmp_tmp 1\t2 NULL 1\t2 NULL PREHOOK: query: SELECT TRANSFORM(key, value) USING -'cat' AS (key, value) FROM tmp_tmp +'cat' AS (key, value) FROM tmp_tmp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tmp_tmp +PREHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT TRANSFORM(key, value) USING -'cat' AS (key, value) FROM tmp_tmp +'cat' AS (key, value) FROM tmp_tmp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp_tmp +POSTHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### 1\t2 NULL 1\t2 NULL 1\t2 NULL 1\t2 NULL 1\t2 NULL -PREHOOK: query: insert overwrite table tmp_tmp +PREHOOK: query: insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python doubleescapedtab.py' AS key, value FROM src limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmp_tmp -POSTHOOK: query: insert overwrite table tmp_tmp +PREHOOK: Output: default@tmp_tmp_n0 +POSTHOOK: query: insert overwrite table tmp_tmp_n0 SELECT TRANSFORM(key, value) USING 'python doubleescapedtab.py' AS key, value FROM src limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmp_tmp -POSTHOOK: Lineage: tmp_tmp.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tmp_tmp.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from tmp_tmp +POSTHOOK: Output: default@tmp_tmp_n0 +POSTHOOK: Lineage: tmp_tmp_n0.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tmp_tmp_n0.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from tmp_tmp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tmp_tmp +PREHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from tmp_tmp +POSTHOOK: query: select * from tmp_tmp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp_tmp +POSTHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### 1\ 2 NULL 1\ 2 NULL @@ -208,14 +208,14 @@ POSTHOOK: Input: default@tmp_tmp 1\\t2 NULL 1\\t2 NULL PREHOOK: query: SELECT TRANSFORM(key, value) USING -'cat' AS (key, value) FROM tmp_tmp +'cat' AS (key, value) FROM tmp_tmp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tmp_tmp +PREHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT TRANSFORM(key, value) USING -'cat' AS (key, value) FROM tmp_tmp +'cat' AS (key, value) FROM tmp_tmp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp_tmp +POSTHOOK: Input: default@tmp_tmp_n0 #### A masked pattern was here #### 1\ 2 NULL 1\ 2 NULL diff --git a/ql/src/test/results/clientpositive/notable_alias1.q.out b/ql/src/test/results/clientpositive/notable_alias1.q.out index 84f70a8fe8..4004bf5a96 100644 --- a/ql/src/test/results/clientpositive/notable_alias1.q.out +++ b/ql/src/test/results/clientpositive/notable_alias1.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n4(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n4 +POSTHOOK: query: CREATE TABLE dest1_n4(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n4 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', key, count(1) WHERE src.key < 100 group by key +INSERT OVERWRITE TABLE dest1_n4 SELECT '1234', key, count(1) WHERE src.key < 100 group by key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', key, count(1) WHERE src.key < 100 group by key +INSERT OVERWRITE TABLE dest1_n4 SELECT '1234', key, count(1) WHERE src.key < 100 group by key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -61,7 +61,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n4 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: double) outputColumnNames: dummy, key, value @@ -86,7 +86,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n4 Stage: Stage-2 Stats Work @@ -94,7 +94,7 @@ STAGE PLANS: Column Stats Desc: Columns: dummy, key, value Column Types: string, int, double - Table: default.dest1 + Table: default.dest1_n4 Stage: Stage-3 Map Reduce @@ -120,25 +120,25 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', key, count(1) WHERE src.key < 100 group by key +INSERT OVERWRITE TABLE dest1_n4 SELECT '1234', key, count(1) WHERE src.key < 100 group by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n4 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', key, count(1) WHERE src.key < 100 group by key +INSERT OVERWRITE TABLE dest1_n4 SELECT '1234', key, count(1) WHERE src.key < 100 group by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.dummy SIMPLE [] -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.null, ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n4 +POSTHOOK: Lineage: dest1_n4.dummy SIMPLE [] +POSTHOOK: Lineage: dest1_n4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n4.value EXPRESSION [(src)src.null, ] +PREHOOK: query: SELECT dest1_n4.* FROM dest1_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n4.* FROM dest1_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n4 #### A masked pattern was here #### 1234 0 3.0 1234 10 1.0 diff --git a/ql/src/test/results/clientpositive/notable_alias2.q.out b/ql/src/test/results/clientpositive/notable_alias2.q.out index 8b7c992584..0f09fd2c2e 100644 --- a/ql/src/test/results/clientpositive/notable_alias2.q.out +++ b/ql/src/test/results/clientpositive/notable_alias2.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n44(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n44 +POSTHOOK: query: CREATE TABLE dest1_n44(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n44 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key +INSERT OVERWRITE TABLE dest1_n44 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key +INSERT OVERWRITE TABLE dest1_n44 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -61,7 +61,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n44 Select Operator expressions: _col0 (type: string), _col1 (type: int), _col2 (type: double) outputColumnNames: dummy, key, value @@ -86,7 +86,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n44 Stage: Stage-2 Stats Work @@ -94,7 +94,7 @@ STAGE PLANS: Column Stats Desc: Columns: dummy, key, value Column Types: string, int, double - Table: default.dest1 + Table: default.dest1_n44 Stage: Stage-3 Map Reduce @@ -120,25 +120,25 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key +INSERT OVERWRITE TABLE dest1_n44 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n44 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key +INSERT OVERWRITE TABLE dest1_n44 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.dummy SIMPLE [] -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.null, ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n44 +POSTHOOK: Lineage: dest1_n44.dummy SIMPLE [] +POSTHOOK: Lineage: dest1_n44.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n44.value EXPRESSION [(src)src.null, ] +PREHOOK: query: SELECT dest1_n44.* FROM dest1_n44 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n44 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n44.* FROM dest1_n44 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n44 #### A masked pattern was here #### 1234 0 3.0 1234 10 1.0 diff --git a/ql/src/test/results/clientpositive/notable_alias3.q.out b/ql/src/test/results/clientpositive/notable_alias3.q.out index ddc28ddbe3..40d3f64aa7 100644 --- a/ql/src/test/results/clientpositive/notable_alias3.q.out +++ b/ql/src/test/results/clientpositive/notable_alias3.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE TABLE dest1(c string, key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n85(c string, key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c string, key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n85 +POSTHOOK: query: CREATE TABLE dest1_n85(c string, key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n85 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, sum(src.value) WHERE src.key < 100 group by key +INSERT OVERWRITE TABLE dest1_n85 SELECT '1234', src.key, sum(src.value) WHERE src.key < 100 group by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n85 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, sum(src.value) WHERE src.key < 100 group by key +INSERT OVERWRITE TABLE dest1_n85 SELECT '1234', src.key, sum(src.value) WHERE src.key < 100 group by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c SIMPLE [] -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@dest1_n85 +POSTHOOK: Lineage: dest1_n85.c SIMPLE [] +POSTHOOK: Lineage: dest1_n85.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n85.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] diff --git a/ql/src/test/results/clientpositive/nullMap.q.out b/ql/src/test/results/clientpositive/nullMap.q.out index af9b606a53..4d3158c9ca 100644 --- a/ql/src/test/results/clientpositive/nullMap.q.out +++ b/ql/src/test/results/clientpositive/nullMap.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table map_txt ( +PREHOOK: query: create table map_txt_n0 ( id int, content map ) @@ -7,8 +7,8 @@ null defined as '\\N' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@map_txt -POSTHOOK: query: create table map_txt ( +PREHOOK: Output: default@map_txt_n0 +POSTHOOK: query: create table map_txt_n0 ( id int, content map ) @@ -17,30 +17,30 @@ null defined as '\\N' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@map_txt -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/mapNull.txt' INTO TABLE map_txt +POSTHOOK: Output: default@map_txt_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/mapNull.txt' INTO TABLE map_txt_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@map_txt -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/mapNull.txt' INTO TABLE map_txt +PREHOOK: Output: default@map_txt_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/mapNull.txt' INTO TABLE map_txt_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@map_txt -PREHOOK: query: select * from map_txt +POSTHOOK: Output: default@map_txt_n0 +PREHOOK: query: select * from map_txt_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@map_txt +PREHOOK: Input: default@map_txt_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from map_txt +POSTHOOK: query: select * from map_txt_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@map_txt +POSTHOOK: Input: default@map_txt_n0 #### A masked pattern was here #### 1 NULL -PREHOOK: query: select id, map_keys(content) from map_txt +PREHOOK: query: select id, map_keys(content) from map_txt_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@map_txt +PREHOOK: Input: default@map_txt_n0 #### A masked pattern was here #### -POSTHOOK: query: select id, map_keys(content) from map_txt +POSTHOOK: query: select id, map_keys(content) from map_txt_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@map_txt +POSTHOOK: Input: default@map_txt_n0 #### A masked pattern was here #### 1 [] diff --git a/ql/src/test/results/clientpositive/null_column.q.out b/ql/src/test/results/clientpositive/null_column.q.out index 1ca69ff9a3..a96e8c1cf6 100644 --- a/ql/src/test/results/clientpositive/null_column.q.out +++ b/ql/src/test/results/clientpositive/null_column.q.out @@ -28,31 +28,31 @@ NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: create table tt(a int, b string) +PREHOOK: query: create table tt_n1(a int, b string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tt -POSTHOOK: query: create table tt(a int, b string) +PREHOOK: Output: default@tt_n1 +POSTHOOK: query: create table tt_n1(a int, b string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tt -PREHOOK: query: insert overwrite table tt select null, null from temp_null +POSTHOOK: Output: default@tt_n1 +PREHOOK: query: insert overwrite table tt_n1 select null, null from temp_null PREHOOK: type: QUERY PREHOOK: Input: default@temp_null -PREHOOK: Output: default@tt -POSTHOOK: query: insert overwrite table tt select null, null from temp_null +PREHOOK: Output: default@tt_n1 +POSTHOOK: query: insert overwrite table tt_n1 select null, null from temp_null POSTHOOK: type: QUERY POSTHOOK: Input: default@temp_null -POSTHOOK: Output: default@tt -POSTHOOK: Lineage: tt.a EXPRESSION [] -POSTHOOK: Lineage: tt.b EXPRESSION [] -PREHOOK: query: select * from tt +POSTHOOK: Output: default@tt_n1 +POSTHOOK: Lineage: tt_n1.a EXPRESSION [] +POSTHOOK: Lineage: tt_n1.b EXPRESSION [] +PREHOOK: query: select * from tt_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@tt +PREHOOK: Input: default@tt_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from tt +POSTHOOK: query: select * from tt_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tt +POSTHOOK: Input: default@tt_n1 #### A masked pattern was here #### NULL NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/nullability_transitive_inference.q.out b/ql/src/test/results/clientpositive/nullability_transitive_inference.q.out index 97a7f2ebf4..c745a7d5e9 100644 --- a/ql/src/test/results/clientpositive/nullability_transitive_inference.q.out +++ b/ql/src/test/results/clientpositive/nullability_transitive_inference.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table emps ( +PREHOOK: query: create table emps_n5 ( empid int, deptno int, name varchar(256), @@ -7,8 +7,8 @@ PREHOOK: query: create table emps ( stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@emps -POSTHOOK: query: create table emps ( +PREHOOK: Output: default@emps_n5 +POSTHOOK: query: create table emps_n5 ( empid int, deptno int, name varchar(256), @@ -17,90 +17,90 @@ POSTHOOK: query: create table emps ( stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@emps -PREHOOK: query: insert into emps values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), +POSTHOOK: Output: default@emps_n5 +PREHOOK: query: insert into emps_n5 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250), (110, 10, 'Bill', 10000, 250) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@emps -POSTHOOK: query: insert into emps values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), +PREHOOK: Output: default@emps_n5 +POSTHOOK: query: insert into emps_n5 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500), (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250), (110, 10, 'Bill', 10000, 250) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@emps -POSTHOOK: Lineage: emps.commission SCRIPT [] -POSTHOOK: Lineage: emps.deptno SCRIPT [] -POSTHOOK: Lineage: emps.empid SCRIPT [] -POSTHOOK: Lineage: emps.name SCRIPT [] -POSTHOOK: Lineage: emps.salary SCRIPT [] -PREHOOK: query: analyze table emps compute statistics for columns +POSTHOOK: Output: default@emps_n5 +POSTHOOK: Lineage: emps_n5.commission SCRIPT [] +POSTHOOK: Lineage: emps_n5.deptno SCRIPT [] +POSTHOOK: Lineage: emps_n5.empid SCRIPT [] +POSTHOOK: Lineage: emps_n5.name SCRIPT [] +POSTHOOK: Lineage: emps_n5.salary SCRIPT [] +PREHOOK: query: analyze table emps_n5 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@emps -PREHOOK: Output: default@emps +PREHOOK: Input: default@emps_n5 +PREHOOK: Output: default@emps_n5 #### A masked pattern was here #### -POSTHOOK: query: analyze table emps compute statistics for columns +POSTHOOK: query: analyze table emps_n5 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@emps -POSTHOOK: Output: default@emps +POSTHOOK: Input: default@emps_n5 +POSTHOOK: Output: default@emps_n5 #### A masked pattern was here #### -PREHOOK: query: create table depts ( +PREHOOK: query: create table depts_n4 ( deptno int, name varchar(256), locationid int) stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@depts -POSTHOOK: query: create table depts ( +PREHOOK: Output: default@depts_n4 +POSTHOOK: query: create table depts_n4 ( deptno int, name varchar(256), locationid int) stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@depts -PREHOOK: query: insert into depts values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20) +POSTHOOK: Output: default@depts_n4 +PREHOOK: query: insert into depts_n4 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@depts -POSTHOOK: query: insert into depts values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20) +PREHOOK: Output: default@depts_n4 +POSTHOOK: query: insert into depts_n4 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@depts -POSTHOOK: Lineage: depts.deptno SCRIPT [] -POSTHOOK: Lineage: depts.locationid SCRIPT [] -POSTHOOK: Lineage: depts.name SCRIPT [] -PREHOOK: query: analyze table depts compute statistics for columns +POSTHOOK: Output: default@depts_n4 +POSTHOOK: Lineage: depts_n4.deptno SCRIPT [] +POSTHOOK: Lineage: depts_n4.locationid SCRIPT [] +POSTHOOK: Lineage: depts_n4.name SCRIPT [] +PREHOOK: query: analyze table depts_n4 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@depts -PREHOOK: Output: default@depts +PREHOOK: Input: default@depts_n4 +PREHOOK: Output: default@depts_n4 #### A masked pattern was here #### -POSTHOOK: query: analyze table depts compute statistics for columns +POSTHOOK: query: analyze table depts_n4 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@depts -POSTHOOK: Output: default@depts +POSTHOOK: Input: default@depts_n4 +POSTHOOK: Output: default@depts_n4 #### A masked pattern was here #### -PREHOOK: query: alter table emps add constraint pk1 primary key (empid) disable novalidate rely +PREHOOK: query: alter table emps_n5 add constraint pk1 primary key (empid) disable novalidate rely PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -POSTHOOK: query: alter table emps add constraint pk1 primary key (empid) disable novalidate rely +POSTHOOK: query: alter table emps_n5 add constraint pk1 primary key (empid) disable novalidate rely POSTHOOK: type: ALTERTABLE_ADDCONSTRAINT -PREHOOK: query: alter table depts add constraint pk2 primary key (deptno) disable novalidate rely +PREHOOK: query: alter table depts_n4 add constraint pk2 primary key (deptno) disable novalidate rely PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -POSTHOOK: query: alter table depts add constraint pk2 primary key (deptno) disable novalidate rely +POSTHOOK: query: alter table depts_n4 add constraint pk2 primary key (deptno) disable novalidate rely POSTHOOK: type: ALTERTABLE_ADDCONSTRAINT -PREHOOK: query: alter table emps add constraint fk1 foreign key (deptno) references depts(deptno) disable novalidate rely +PREHOOK: query: alter table emps_n5 add constraint fk1 foreign key (deptno) references depts_n4(deptno) disable novalidate rely PREHOOK: type: ALTERTABLE_ADDCONSTRAINT -POSTHOOK: query: alter table emps add constraint fk1 foreign key (deptno) references depts(deptno) disable novalidate rely +POSTHOOK: query: alter table emps_n5 add constraint fk1 foreign key (deptno) references depts_n4(deptno) disable novalidate rely POSTHOOK: type: ALTERTABLE_ADDCONSTRAINT PREHOOK: query: explain -select empid from emps -join depts using (deptno) where depts.deptno >= 20 -group by empid, depts.deptno +select empid from emps_n5 +join depts_n4 using (deptno) where depts_n4.deptno >= 20 +group by empid, depts_n4.deptno PREHOOK: type: QUERY POSTHOOK: query: explain -select empid from emps -join depts using (deptno) where depts.deptno >= 20 -group by empid, depts.deptno +select empid from emps_n5 +join depts_n4 using (deptno) where depts_n4.deptno >= 20 +group by empid, depts_n4.deptno POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -111,7 +111,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: emps + alias: emps_n5 Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (deptno >= 20) (type: boolean) @@ -127,7 +127,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int) TableScan - alias: depts + alias: depts_n4 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (deptno >= 20) (type: boolean) @@ -164,18 +164,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select empid from emps -join depts using (deptno) where depts.deptno >= 20 -group by empid, depts.deptno +PREHOOK: query: select empid from emps_n5 +join depts_n4 using (deptno) where depts_n4.deptno >= 20 +group by empid, depts_n4.deptno PREHOOK: type: QUERY -PREHOOK: Input: default@depts -PREHOOK: Input: default@emps +PREHOOK: Input: default@depts_n4 +PREHOOK: Input: default@emps_n5 #### A masked pattern was here #### -POSTHOOK: query: select empid from emps -join depts using (deptno) where depts.deptno >= 20 -group by empid, depts.deptno +POSTHOOK: query: select empid from emps_n5 +join depts_n4 using (deptno) where depts_n4.deptno >= 20 +group by empid, depts_n4.deptno POSTHOOK: type: QUERY -POSTHOOK: Input: default@depts -POSTHOOK: Input: default@emps +POSTHOOK: Input: default@depts_n4 +POSTHOOK: Input: default@emps_n5 #### A masked pattern was here #### 200 diff --git a/ql/src/test/results/clientpositive/nullformatCTAS.q.out b/ql/src/test/results/clientpositive/nullformatCTAS.q.out index 76f67233d3..b6b51d5e54 100644 --- a/ql/src/test/results/clientpositive/nullformatCTAS.q.out +++ b/ql/src/test/results/clientpositive/nullformatCTAS.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: DROP TABLE IF EXISTS base_tab +PREHOOK: query: DROP TABLE IF EXISTS base_tab_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS base_tab +POSTHOOK: query: DROP TABLE IF EXISTS base_tab_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE base_tab(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE base_tab_n2(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@base_tab -POSTHOOK: query: CREATE TABLE base_tab(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE +PREHOOK: Output: default@base_tab_n2 +POSTHOOK: query: CREATE TABLE base_tab_n2(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@base_tab -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab +POSTHOOK: Output: default@base_tab_n2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@base_tab -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab +PREHOOK: Output: default@base_tab_n2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@base_tab -PREHOOK: query: DESCRIBE EXTENDED base_tab +POSTHOOK: Output: default@base_tab_n2 +PREHOOK: query: DESCRIBE EXTENDED base_tab_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@base_tab -POSTHOOK: query: DESCRIBE EXTENDED base_tab +PREHOOK: Input: default@base_tab_n2 +POSTHOOK: query: DESCRIBE EXTENDED base_tab_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@base_tab +POSTHOOK: Input: default@base_tab_n2 a string b string c string @@ -35,10 +35,10 @@ PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE IF EXISTS null_tab3 POSTHOOK: type: DROPTABLE PREHOOK: query: EXPLAIN CREATE TABLE null_tab3 ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' - AS SELECT a, b FROM base_tab + AS SELECT a, b FROM base_tab_n2 PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: EXPLAIN CREATE TABLE null_tab3 ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' - AS SELECT a, b FROM base_tab + AS SELECT a, b FROM base_tab_n2 POSTHOOK: type: CREATETABLE_AS_SELECT STAGE DEPENDENCIES: Stage-1 is a root stage @@ -56,7 +56,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: base_tab + alias: base_tab_n2 Statistics: Num rows: 1 Data size: 1300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) @@ -131,19 +131,19 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: CREATE TABLE null_tab3 ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' - AS SELECT a, b FROM base_tab + AS SELECT a, b FROM base_tab_n2 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@base_tab +PREHOOK: Input: default@base_tab_n2 PREHOOK: Output: database:default PREHOOK: Output: default@null_tab3 POSTHOOK: query: CREATE TABLE null_tab3 ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' - AS SELECT a, b FROM base_tab + AS SELECT a, b FROM base_tab_n2 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@base_tab +POSTHOOK: Input: default@base_tab_n2 POSTHOOK: Output: database:default POSTHOOK: Output: default@null_tab3 -POSTHOOK: Lineage: null_tab3.a SIMPLE [(base_tab)base_tab.FieldSchema(name:a, type:string, comment:null), ] -POSTHOOK: Lineage: null_tab3.b SIMPLE [(base_tab)base_tab.FieldSchema(name:b, type:string, comment:null), ] +POSTHOOK: Lineage: null_tab3.a SIMPLE [(base_tab_n2)base_tab_n2.FieldSchema(name:a, type:string, comment:null), ] +POSTHOOK: Lineage: null_tab3.b SIMPLE [(base_tab_n2)base_tab_n2.FieldSchema(name:b, type:string, comment:null), ] PREHOOK: query: DESCRIBE EXTENDED null_tab3 PREHOOK: type: DESCTABLE PREHOOK: Input: default@null_tab3 @@ -238,11 +238,11 @@ POSTHOOK: query: DROP TABLE null_tab3 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@null_tab3 POSTHOOK: Output: default@null_tab3 -PREHOOK: query: DROP TABLE base_tab +PREHOOK: query: DROP TABLE base_tab_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@base_tab -PREHOOK: Output: default@base_tab -POSTHOOK: query: DROP TABLE base_tab +PREHOOK: Input: default@base_tab_n2 +PREHOOK: Output: default@base_tab_n2 +POSTHOOK: query: DROP TABLE base_tab_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@base_tab -POSTHOOK: Output: default@base_tab +POSTHOOK: Input: default@base_tab_n2 +POSTHOOK: Output: default@base_tab_n2 diff --git a/ql/src/test/results/clientpositive/nullformatdir.q.out b/ql/src/test/results/clientpositive/nullformatdir.q.out index 5e6c9869a8..317811020f 100644 --- a/ql/src/test/results/clientpositive/nullformatdir.q.out +++ b/ql/src/test/results/clientpositive/nullformatdir.q.out @@ -1,42 +1,42 @@ -PREHOOK: query: DROP TABLE IF EXISTS base_tab +PREHOOK: query: DROP TABLE IF EXISTS base_tab_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS base_tab +POSTHOOK: query: DROP TABLE IF EXISTS base_tab_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE base_tab(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE base_tab_n1(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@base_tab -POSTHOOK: query: CREATE TABLE base_tab(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE +PREHOOK: Output: default@base_tab_n1 +POSTHOOK: query: CREATE TABLE base_tab_n1(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@base_tab -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab +POSTHOOK: Output: default@base_tab_n1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@base_tab -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab +PREHOOK: Output: default@base_tab_n1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@base_tab -PREHOOK: query: DESCRIBE EXTENDED base_tab +POSTHOOK: Output: default@base_tab_n1 +PREHOOK: query: DESCRIBE EXTENDED base_tab_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@base_tab -POSTHOOK: query: DESCRIBE EXTENDED base_tab +PREHOOK: Input: default@base_tab_n1 +POSTHOOK: query: DESCRIBE EXTENDED base_tab_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@base_tab +POSTHOOK: Input: default@base_tab_n1 a string b string c string d string #### A masked pattern was here #### - ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' SELECT a,b FROM base_tab + ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' SELECT a,b FROM base_tab_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@base_tab +PREHOOK: Input: default@base_tab_n1 #### A masked pattern was here #### - ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' SELECT a,b FROM base_tab + ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' SELECT a,b FROM base_tab_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@base_tab +POSTHOOK: Input: default@base_tab_n1 #### A masked pattern was here #### 1.01 1.01 @@ -87,11 +87,11 @@ NULL NULL 1.0 1 1.0 1 #### A masked pattern was here #### -PREHOOK: query: DROP TABLE base_tab +PREHOOK: query: DROP TABLE base_tab_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@base_tab -PREHOOK: Output: default@base_tab -POSTHOOK: query: DROP TABLE base_tab +PREHOOK: Input: default@base_tab_n1 +PREHOOK: Output: default@base_tab_n1 +POSTHOOK: query: DROP TABLE base_tab_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@base_tab -POSTHOOK: Output: default@base_tab +POSTHOOK: Input: default@base_tab_n1 +POSTHOOK: Output: default@base_tab_n1 diff --git a/ql/src/test/results/clientpositive/nullgroup3.q.out b/ql/src/test/results/clientpositive/nullgroup3.q.out index a98ea4becd..c17d0cc71d 100644 --- a/ql/src/test/results/clientpositive/nullgroup3.q.out +++ b/ql/src/test/results/clientpositive/nullgroup3.q.out @@ -1,34 +1,34 @@ -PREHOOK: query: CREATE TABLE tstparttbl(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE tstparttbl_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tstparttbl -POSTHOOK: query: CREATE TABLE tstparttbl(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE +PREHOOK: Output: default@tstparttbl_n0 +POSTHOOK: query: CREATE TABLE tstparttbl_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstparttbl -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-09') +POSTHOOK: Output: default@tstparttbl_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tstparttbl -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-09') +PREHOOK: Output: default@tstparttbl_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tstparttbl -POSTHOOK: Output: default@tstparttbl@ds=2008-04-09 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-08') +POSTHOOK: Output: default@tstparttbl_n0 +POSTHOOK: Output: default@tstparttbl_n0@ds=2008-04-09 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tstparttbl -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-08') +PREHOOK: Output: default@tstparttbl_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tstparttbl -POSTHOOK: Output: default@tstparttbl@ds=2008-04-08 +POSTHOOK: Output: default@tstparttbl_n0 +POSTHOOK: Output: default@tstparttbl_n0@ds=2008-04-08 PREHOOK: query: explain -select count(1) from tstparttbl +select count(1) from tstparttbl_n0 PREHOOK: type: QUERY POSTHOOK: query: explain -select count(1) from tstparttbl +select count(1) from tstparttbl_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -39,7 +39,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: tstparttbl + alias: tstparttbl_n0 Statistics: Num rows: 120 Data size: 58120 Basic stats: PARTIAL Column stats: NONE Select Operator Statistics: Num rows: 120 Data size: 58120 Basic stats: PARTIAL Column stats: NONE @@ -73,50 +73,50 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(1) from tstparttbl +PREHOOK: query: select count(1) from tstparttbl_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tstparttbl -PREHOOK: Input: default@tstparttbl@ds=2008-04-08 -PREHOOK: Input: default@tstparttbl@ds=2008-04-09 +PREHOOK: Input: default@tstparttbl_n0 +PREHOOK: Input: default@tstparttbl_n0@ds=2008-04-08 +PREHOOK: Input: default@tstparttbl_n0@ds=2008-04-09 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from tstparttbl +POSTHOOK: query: select count(1) from tstparttbl_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstparttbl -POSTHOOK: Input: default@tstparttbl@ds=2008-04-08 -POSTHOOK: Input: default@tstparttbl@ds=2008-04-09 +POSTHOOK: Input: default@tstparttbl_n0 +POSTHOOK: Input: default@tstparttbl_n0@ds=2008-04-08 +POSTHOOK: Input: default@tstparttbl_n0@ds=2008-04-09 #### A masked pattern was here #### 500 -PREHOOK: query: CREATE TABLE tstparttbl2(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE tstparttbl2_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tstparttbl2 -POSTHOOK: query: CREATE TABLE tstparttbl2(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE +PREHOOK: Output: default@tstparttbl2_n0 +POSTHOOK: query: CREATE TABLE tstparttbl2_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstparttbl2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-09') +POSTHOOK: Output: default@tstparttbl2_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tstparttbl2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-09') +PREHOOK: Output: default@tstparttbl2_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tstparttbl2 -POSTHOOK: Output: default@tstparttbl2@ds=2008-04-09 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-08') +POSTHOOK: Output: default@tstparttbl2_n0 +POSTHOOK: Output: default@tstparttbl2_n0@ds=2008-04-09 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tstparttbl2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-08') +PREHOOK: Output: default@tstparttbl2_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tstparttbl2 -POSTHOOK: Output: default@tstparttbl2@ds=2008-04-08 +POSTHOOK: Output: default@tstparttbl2_n0 +POSTHOOK: Output: default@tstparttbl2_n0@ds=2008-04-08 PREHOOK: query: explain -select count(1) from tstparttbl2 +select count(1) from tstparttbl2_n0 PREHOOK: type: QUERY POSTHOOK: query: explain -select count(1) from tstparttbl2 +select count(1) from tstparttbl2_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -127,7 +127,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: tstparttbl2 + alias: tstparttbl2_n0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -161,58 +161,58 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(1) from tstparttbl2 +PREHOOK: query: select count(1) from tstparttbl2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tstparttbl2 -PREHOOK: Input: default@tstparttbl2@ds=2008-04-08 -PREHOOK: Input: default@tstparttbl2@ds=2008-04-09 +PREHOOK: Input: default@tstparttbl2_n0 +PREHOOK: Input: default@tstparttbl2_n0@ds=2008-04-08 +PREHOOK: Input: default@tstparttbl2_n0@ds=2008-04-09 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from tstparttbl2 +POSTHOOK: query: select count(1) from tstparttbl2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstparttbl2 -POSTHOOK: Input: default@tstparttbl2@ds=2008-04-08 -POSTHOOK: Input: default@tstparttbl2@ds=2008-04-09 +POSTHOOK: Input: default@tstparttbl2_n0 +POSTHOOK: Input: default@tstparttbl2_n0@ds=2008-04-08 +POSTHOOK: Input: default@tstparttbl2_n0@ds=2008-04-09 #### A masked pattern was here #### 0 -PREHOOK: query: DROP TABLE tstparttbl +PREHOOK: query: DROP TABLE tstparttbl_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tstparttbl -PREHOOK: Output: default@tstparttbl -POSTHOOK: query: DROP TABLE tstparttbl +PREHOOK: Input: default@tstparttbl_n0 +PREHOOK: Output: default@tstparttbl_n0 +POSTHOOK: query: DROP TABLE tstparttbl_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tstparttbl -POSTHOOK: Output: default@tstparttbl -PREHOOK: query: CREATE TABLE tstparttbl(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE +POSTHOOK: Input: default@tstparttbl_n0 +POSTHOOK: Output: default@tstparttbl_n0 +PREHOOK: query: CREATE TABLE tstparttbl_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tstparttbl -POSTHOOK: query: CREATE TABLE tstparttbl(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE +PREHOOK: Output: default@tstparttbl_n0 +POSTHOOK: query: CREATE TABLE tstparttbl_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstparttbl -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-09') +POSTHOOK: Output: default@tstparttbl_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tstparttbl -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-09') +PREHOOK: Output: default@tstparttbl_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tstparttbl -POSTHOOK: Output: default@tstparttbl@ds=2008-04-09 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-08') +POSTHOOK: Output: default@tstparttbl_n0 +POSTHOOK: Output: default@tstparttbl_n0@ds=2008-04-09 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tstparttbl -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-08') +PREHOOK: Output: default@tstparttbl_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tstparttbl -POSTHOOK: Output: default@tstparttbl@ds=2008-04-08 +POSTHOOK: Output: default@tstparttbl_n0 +POSTHOOK: Output: default@tstparttbl_n0@ds=2008-04-08 PREHOOK: query: explain -select count(1) from tstparttbl +select count(1) from tstparttbl_n0 PREHOOK: type: QUERY POSTHOOK: query: explain -select count(1) from tstparttbl +select count(1) from tstparttbl_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -223,7 +223,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: tstparttbl + alias: tstparttbl_n0 Statistics: Num rows: 120 Data size: 58120 Basic stats: PARTIAL Column stats: NONE Select Operator Statistics: Num rows: 120 Data size: 58120 Basic stats: PARTIAL Column stats: NONE @@ -257,58 +257,58 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(1) from tstparttbl +PREHOOK: query: select count(1) from tstparttbl_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tstparttbl -PREHOOK: Input: default@tstparttbl@ds=2008-04-08 -PREHOOK: Input: default@tstparttbl@ds=2008-04-09 +PREHOOK: Input: default@tstparttbl_n0 +PREHOOK: Input: default@tstparttbl_n0@ds=2008-04-08 +PREHOOK: Input: default@tstparttbl_n0@ds=2008-04-09 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from tstparttbl +POSTHOOK: query: select count(1) from tstparttbl_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstparttbl -POSTHOOK: Input: default@tstparttbl@ds=2008-04-08 -POSTHOOK: Input: default@tstparttbl@ds=2008-04-09 +POSTHOOK: Input: default@tstparttbl_n0 +POSTHOOK: Input: default@tstparttbl_n0@ds=2008-04-08 +POSTHOOK: Input: default@tstparttbl_n0@ds=2008-04-09 #### A masked pattern was here #### 500 -PREHOOK: query: DROP TABLE tstparttbl2 +PREHOOK: query: DROP TABLE tstparttbl2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tstparttbl2 -PREHOOK: Output: default@tstparttbl2 -POSTHOOK: query: DROP TABLE tstparttbl2 +PREHOOK: Input: default@tstparttbl2_n0 +PREHOOK: Output: default@tstparttbl2_n0 +POSTHOOK: query: DROP TABLE tstparttbl2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tstparttbl2 -POSTHOOK: Output: default@tstparttbl2 -PREHOOK: query: CREATE TABLE tstparttbl2(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE +POSTHOOK: Input: default@tstparttbl2_n0 +POSTHOOK: Output: default@tstparttbl2_n0 +PREHOOK: query: CREATE TABLE tstparttbl2_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tstparttbl2 -POSTHOOK: query: CREATE TABLE tstparttbl2(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE +PREHOOK: Output: default@tstparttbl2_n0 +POSTHOOK: query: CREATE TABLE tstparttbl2_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstparttbl2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-09') +POSTHOOK: Output: default@tstparttbl2_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tstparttbl2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-09') +PREHOOK: Output: default@tstparttbl2_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tstparttbl2 -POSTHOOK: Output: default@tstparttbl2@ds=2008-04-09 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-08') +POSTHOOK: Output: default@tstparttbl2_n0 +POSTHOOK: Output: default@tstparttbl2_n0@ds=2008-04-09 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tstparttbl2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-08') +PREHOOK: Output: default@tstparttbl2_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tstparttbl2 -POSTHOOK: Output: default@tstparttbl2@ds=2008-04-08 +POSTHOOK: Output: default@tstparttbl2_n0 +POSTHOOK: Output: default@tstparttbl2_n0@ds=2008-04-08 PREHOOK: query: explain -select count(1) from tstparttbl2 +select count(1) from tstparttbl2_n0 PREHOOK: type: QUERY POSTHOOK: query: explain -select count(1) from tstparttbl2 +select count(1) from tstparttbl2_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -319,7 +319,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: tstparttbl2 + alias: tstparttbl2_n0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -353,16 +353,16 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(1) from tstparttbl2 +PREHOOK: query: select count(1) from tstparttbl2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tstparttbl2 -PREHOOK: Input: default@tstparttbl2@ds=2008-04-08 -PREHOOK: Input: default@tstparttbl2@ds=2008-04-09 +PREHOOK: Input: default@tstparttbl2_n0 +PREHOOK: Input: default@tstparttbl2_n0@ds=2008-04-08 +PREHOOK: Input: default@tstparttbl2_n0@ds=2008-04-09 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from tstparttbl2 +POSTHOOK: query: select count(1) from tstparttbl2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstparttbl2 -POSTHOOK: Input: default@tstparttbl2@ds=2008-04-08 -POSTHOOK: Input: default@tstparttbl2@ds=2008-04-09 +POSTHOOK: Input: default@tstparttbl2_n0 +POSTHOOK: Input: default@tstparttbl2_n0@ds=2008-04-08 +POSTHOOK: Input: default@tstparttbl2_n0@ds=2008-04-09 #### A masked pattern was here #### 0 diff --git a/ql/src/test/results/clientpositive/optimize_filter_literal.q.out b/ql/src/test/results/clientpositive/optimize_filter_literal.q.out index bb4f27d832..3e8ca3a7f9 100644 --- a/ql/src/test/results/clientpositive/optimize_filter_literal.q.out +++ b/ql/src/test/results/clientpositive/optimize_filter_literal.q.out @@ -1,155 +1,155 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_n21(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_n21 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_n21(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin -PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE +POSTHOOK: Output: default@srcbucket_mapjoin_n21 +PREHOOK: query: CREATE TABLE tab_part_n13 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tab_part -POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE +PREHOOK: Output: default@tab_part_n13 +POSTHOOK: query: CREATE TABLE tab_part_n13 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab_part -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: Output: default@tab_part_n13 +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n22 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@srcbucket_mapjoin_part_n22 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_n22 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n22 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n21 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_n21 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n21 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin -POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_n21 +POSTHOOK: Output: default@srcbucket_mapjoin_n21@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n21 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_n21@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n21 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_n21@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n22 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n22 +POSTHOOK: Output: default@srcbucket_mapjoin_part_n22@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n22@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n22@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n22@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: Output: default@srcbucket_mapjoin_part_n22@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: Output: default@srcbucket_mapjoin_part_n22@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part +POSTHOOK: Output: default@srcbucket_mapjoin_part_n22@ds=2008-04-08 +PREHOOK: query: insert overwrite table tab_part_n13 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n22 PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part -PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -PREHOOK: Output: default@tab_part@ds=2008-04-08 -POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin_part +PREHOOK: Input: default@srcbucket_mapjoin_part_n22 +PREHOOK: Input: default@srcbucket_mapjoin_part_n22@ds=2008-04-08 +PREHOOK: Output: default@tab_part_n13@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab_part_n13 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part_n22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part -POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 -POSTHOOK: Output: default@tab_part@ds=2008-04-08 -POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: analyze table tab_part partition (ds='2008-04-08') compute statistics for columns +POSTHOOK: Input: default@srcbucket_mapjoin_part_n22 +POSTHOOK: Input: default@srcbucket_mapjoin_part_n22@ds=2008-04-08 +POSTHOOK: Output: default@tab_part_n13@ds=2008-04-08 +POSTHOOK: Lineage: tab_part_n13 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part_n22)srcbucket_mapjoin_part_n22.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab_part_n13 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part_n22)srcbucket_mapjoin_part_n22.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: analyze table tab_part_n13 partition (ds='2008-04-08') compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@tab_part -PREHOOK: Input: default@tab_part@ds=2008-04-08 -PREHOOK: Output: default@tab_part -PREHOOK: Output: default@tab_part@ds=2008-04-08 +PREHOOK: Input: default@tab_part_n13 +PREHOOK: Input: default@tab_part_n13@ds=2008-04-08 +PREHOOK: Output: default@tab_part_n13 +PREHOOK: Output: default@tab_part_n13@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: analyze table tab_part partition (ds='2008-04-08') compute statistics for columns +POSTHOOK: query: analyze table tab_part_n13 partition (ds='2008-04-08') compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@tab_part -POSTHOOK: Input: default@tab_part@ds=2008-04-08 -POSTHOOK: Output: default@tab_part -POSTHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: Input: default@tab_part_n13 +POSTHOOK: Input: default@tab_part_n13@ds=2008-04-08 +POSTHOOK: Output: default@tab_part_n13 +POSTHOOK: Output: default@tab_part_n13@ds=2008-04-08 #### A masked pattern was here #### -PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE +PREHOOK: query: CREATE TABLE tab_n14(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tab -POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE +PREHOOK: Output: default@tab_n14 +POSTHOOK: query: CREATE TABLE tab_n14(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab -PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin +POSTHOOK: Output: default@tab_n14 +PREHOOK: query: insert overwrite table tab_n14 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n21 PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin -PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -PREHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08') -select key,value from srcbucket_mapjoin +PREHOOK: Input: default@srcbucket_mapjoin_n21 +PREHOOK: Input: default@srcbucket_mapjoin_n21@ds=2008-04-08 +PREHOOK: Output: default@tab_n14@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab_n14 partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_n21 POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin -POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 -POSTHOOK: Output: default@tab@ds=2008-04-08 -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: analyze table tab partition (ds='2008-04-08') compute statistics for columns +POSTHOOK: Input: default@srcbucket_mapjoin_n21 +POSTHOOK: Input: default@srcbucket_mapjoin_n21@ds=2008-04-08 +POSTHOOK: Output: default@tab_n14@ds=2008-04-08 +POSTHOOK: Lineage: tab_n14 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_n21)srcbucket_mapjoin_n21.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab_n14 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_n21)srcbucket_mapjoin_n21.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: analyze table tab_n14 partition (ds='2008-04-08') compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab@ds=2008-04-08 -PREHOOK: Output: default@tab -PREHOOK: Output: default@tab@ds=2008-04-08 +PREHOOK: Input: default@tab_n14 +PREHOOK: Input: default@tab_n14@ds=2008-04-08 +PREHOOK: Output: default@tab_n14 +PREHOOK: Output: default@tab_n14@ds=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: analyze table tab partition (ds='2008-04-08') compute statistics for columns +POSTHOOK: query: analyze table tab_n14 partition (ds='2008-04-08') compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab@ds=2008-04-08 -POSTHOOK: Output: default@tab -POSTHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: Input: default@tab_n14 +POSTHOOK: Input: default@tab_n14@ds=2008-04-08 +POSTHOOK: Output: default@tab_n14 +POSTHOOK: Output: default@tab_n14@ds=2008-04-08 #### A masked pattern was here #### Warning: Shuffle Join JOIN[12][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from -(select * from tab where tab.key = 0)a +(select * from tab_n14 where tab_n14.key = 0)a full outer join -(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key +(select * from tab_part_n13 where tab_part_n13.key = 98)b join tab_part_n13 c on a.key = b.key and b.key = c.key PREHOOK: type: QUERY -PREHOOK: Input: default@tab -PREHOOK: Input: default@tab_part +PREHOOK: Input: default@tab_n14 +PREHOOK: Input: default@tab_part_n13 #### A masked pattern was here #### POSTHOOK: query: select * from -(select * from tab where tab.key = 0)a +(select * from tab_n14 where tab_n14.key = 0)a full outer join -(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key +(select * from tab_part_n13 where tab_part_n13.key = 98)b join tab_part_n13 c on a.key = b.key and b.key = c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab -POSTHOOK: Input: default@tab_part +POSTHOOK: Input: default@tab_n14 +POSTHOOK: Input: default@tab_part_n13 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/orc_dictionary_threshold.q.out b/ql/src/test/results/clientpositive/orc_dictionary_threshold.q.out index bde2b5357d..279cc76645 100644 --- a/ql/src/test/results/clientpositive/orc_dictionary_threshold.q.out +++ b/ql/src/test/results/clientpositive/orc_dictionary_threshold.q.out @@ -1,33 +1,33 @@ -PREHOOK: query: CREATE TABLE test_orc (key STRING) +PREHOOK: query: CREATE TABLE test_orc_n5 (key STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_orc -POSTHOOK: query: CREATE TABLE test_orc (key STRING) +PREHOOK: Output: default@test_orc_n5 +POSTHOOK: query: CREATE TABLE test_orc_n5 (key STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_orc -PREHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT key FROM src TABLESAMPLE (10 ROWS) +POSTHOOK: Output: default@test_orc_n5 +PREHOOK: query: INSERT OVERWRITE TABLE test_orc_n5 SELECT key FROM src TABLESAMPLE (10 ROWS) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_orc -POSTHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT key FROM src TABLESAMPLE (10 ROWS) +PREHOOK: Output: default@test_orc_n5 +POSTHOOK: query: INSERT OVERWRITE TABLE test_orc_n5 SELECT key FROM src TABLESAMPLE (10 ROWS) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_orc -POSTHOOK: Lineage: test_orc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM test_orc +POSTHOOK: Output: default@test_orc_n5 +POSTHOOK: Lineage: test_orc_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM test_orc_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@test_orc +PREHOOK: Input: default@test_orc_n5 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_orc +POSTHOOK: query: SELECT * FROM test_orc_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_orc +POSTHOOK: Input: default@test_orc_n5 #### A masked pattern was here #### 238 86 @@ -39,14 +39,14 @@ POSTHOOK: Input: default@test_orc 278 98 484 -PREHOOK: query: ALTER TABLE test_orc SET SERDEPROPERTIES ('orc.stripe.size' = '1') +PREHOOK: query: ALTER TABLE test_orc_n5 SET SERDEPROPERTIES ('orc.stripe.size' = '1') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES -PREHOOK: Input: default@test_orc -PREHOOK: Output: default@test_orc -POSTHOOK: query: ALTER TABLE test_orc SET SERDEPROPERTIES ('orc.stripe.size' = '1') +PREHOOK: Input: default@test_orc_n5 +PREHOOK: Output: default@test_orc_n5 +POSTHOOK: query: ALTER TABLE test_orc_n5 SET SERDEPROPERTIES ('orc.stripe.size' = '1') POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES -POSTHOOK: Input: default@test_orc -POSTHOOK: Output: default@test_orc +POSTHOOK: Input: default@test_orc_n5 +POSTHOOK: Output: default@test_orc_n5 PREHOOK: query: CREATE TABLE src_thousand(key STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -65,7 +65,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1kv2.cogroup.txt' POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@src_thousand -PREHOOK: query: INSERT OVERWRITE TABLE test_orc +PREHOOK: query: INSERT OVERWRITE TABLE test_orc_n5 SELECT key FROM ( SELECT CONCAT("a", key) AS key FROM src_thousand UNION ALL @@ -91,8 +91,8 @@ SELECT CONCAT("k", key) AS key FROM src_thousand ) a ORDER BY key LIMIT 11000 PREHOOK: type: QUERY PREHOOK: Input: default@src_thousand -PREHOOK: Output: default@test_orc -POSTHOOK: query: INSERT OVERWRITE TABLE test_orc +PREHOOK: Output: default@test_orc_n5 +POSTHOOK: query: INSERT OVERWRITE TABLE test_orc_n5 SELECT key FROM ( SELECT CONCAT("a", key) AS key FROM src_thousand UNION ALL @@ -118,14 +118,14 @@ SELECT CONCAT("k", key) AS key FROM src_thousand ) a ORDER BY key LIMIT 11000 POSTHOOK: type: QUERY POSTHOOK: Input: default@src_thousand -POSTHOOK: Output: default@test_orc -POSTHOOK: Lineage: test_orc.key EXPRESSION [(src_thousand)src_thousand.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT SUM(HASH(key)) FROM test_orc +POSTHOOK: Output: default@test_orc_n5 +POSTHOOK: Lineage: test_orc_n5.key EXPRESSION [(src_thousand)src_thousand.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT SUM(HASH(key)) FROM test_orc_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@test_orc +PREHOOK: Input: default@test_orc_n5 #### A masked pattern was here #### -POSTHOOK: query: SELECT SUM(HASH(key)) FROM test_orc +POSTHOOK: query: SELECT SUM(HASH(key)) FROM test_orc_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_orc +POSTHOOK: Input: default@test_orc_n5 #### A masked pattern was here #### 1082202951192 diff --git a/ql/src/test/results/clientpositive/orc_diff_part_cols.q.out b/ql/src/test/results/clientpositive/orc_diff_part_cols.q.out index 7e8347ac49..90ca247107 100644 --- a/ql/src/test/results/clientpositive/orc_diff_part_cols.q.out +++ b/ql/src/test/results/clientpositive/orc_diff_part_cols.q.out @@ -1,57 +1,57 @@ -PREHOOK: query: CREATE TABLE test_orc (key STRING) +PREHOOK: query: CREATE TABLE test_orc_n0 (key STRING) PARTITIONED BY (part STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_orc -POSTHOOK: query: CREATE TABLE test_orc (key STRING) +PREHOOK: Output: default@test_orc_n0 +POSTHOOK: query: CREATE TABLE test_orc_n0 (key STRING) PARTITIONED BY (part STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_orc -PREHOOK: query: INSERT OVERWRITE TABLE test_orc PARTITION (part = '1') SELECT key FROM src tablesample (5 rows) +POSTHOOK: Output: default@test_orc_n0 +PREHOOK: query: INSERT OVERWRITE TABLE test_orc_n0 PARTITION (part = '1') SELECT key FROM src tablesample (5 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_orc@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_orc PARTITION (part = '1') SELECT key FROM src tablesample (5 rows) +PREHOOK: Output: default@test_orc_n0@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_orc_n0 PARTITION (part = '1') SELECT key FROM src tablesample (5 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_orc@part=1 -POSTHOOK: Lineage: test_orc PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: ALTER TABLE test_orc ADD COLUMNS (cnt INT) +POSTHOOK: Output: default@test_orc_n0@part=1 +POSTHOOK: Lineage: test_orc_n0 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: ALTER TABLE test_orc_n0 ADD COLUMNS (cnt INT) PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@test_orc -PREHOOK: Output: default@test_orc -POSTHOOK: query: ALTER TABLE test_orc ADD COLUMNS (cnt INT) +PREHOOK: Input: default@test_orc_n0 +PREHOOK: Output: default@test_orc_n0 +POSTHOOK: query: ALTER TABLE test_orc_n0 ADD COLUMNS (cnt INT) POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@test_orc -POSTHOOK: Output: default@test_orc -PREHOOK: query: INSERT OVERWRITE TABLE test_orc PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key LIMIT 5 +POSTHOOK: Input: default@test_orc_n0 +POSTHOOK: Output: default@test_orc_n0 +PREHOOK: query: INSERT OVERWRITE TABLE test_orc_n0 PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key LIMIT 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_orc@part=2 -POSTHOOK: query: INSERT OVERWRITE TABLE test_orc PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key LIMIT 5 +PREHOOK: Output: default@test_orc_n0@part=2 +POSTHOOK: query: INSERT OVERWRITE TABLE test_orc_n0 PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key LIMIT 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_orc@part=2 -POSTHOOK: Lineage: test_orc PARTITION(part=2).cnt EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: test_orc PARTITION(part=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM test_orc +POSTHOOK: Output: default@test_orc_n0@part=2 +POSTHOOK: Lineage: test_orc_n0 PARTITION(part=2).cnt EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: test_orc_n0 PARTITION(part=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM test_orc_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_orc -PREHOOK: Input: default@test_orc@part=1 -PREHOOK: Input: default@test_orc@part=2 +PREHOOK: Input: default@test_orc_n0 +PREHOOK: Input: default@test_orc_n0@part=1 +PREHOOK: Input: default@test_orc_n0@part=2 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_orc +POSTHOOK: query: SELECT * FROM test_orc_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_orc -POSTHOOK: Input: default@test_orc@part=1 -POSTHOOK: Input: default@test_orc@part=2 +POSTHOOK: Input: default@test_orc_n0 +POSTHOOK: Input: default@test_orc_n0@part=1 +POSTHOOK: Input: default@test_orc_n0@part=2 #### A masked pattern was here #### 0 3 2 10 1 2 diff --git a/ql/src/test/results/clientpositive/orc_diff_part_cols2.q.out b/ql/src/test/results/clientpositive/orc_diff_part_cols2.q.out index 7900cb9b55..732deb35e3 100644 --- a/ql/src/test/results/clientpositive/orc_diff_part_cols2.q.out +++ b/ql/src/test/results/clientpositive/orc_diff_part_cols2.q.out @@ -1,37 +1,37 @@ -PREHOOK: query: CREATE TABLE test_orc (key STRING) +PREHOOK: query: CREATE TABLE test_orc_n4 (key STRING) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_orc -POSTHOOK: query: CREATE TABLE test_orc (key STRING) +PREHOOK: Output: default@test_orc_n4 +POSTHOOK: query: CREATE TABLE test_orc_n4 (key STRING) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_orc -PREHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT key FROM src LIMIT 5 +POSTHOOK: Output: default@test_orc_n4 +PREHOOK: query: INSERT OVERWRITE TABLE test_orc_n4 SELECT key FROM src LIMIT 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_orc -POSTHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT key FROM src LIMIT 5 +PREHOOK: Output: default@test_orc_n4 +POSTHOOK: query: INSERT OVERWRITE TABLE test_orc_n4 SELECT key FROM src LIMIT 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_orc -POSTHOOK: Lineage: test_orc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: ALTER TABLE test_orc ADD COLUMNS (value STRING) +POSTHOOK: Output: default@test_orc_n4 +POSTHOOK: Lineage: test_orc_n4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: ALTER TABLE test_orc_n4 ADD COLUMNS (value STRING) PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@test_orc -PREHOOK: Output: default@test_orc -POSTHOOK: query: ALTER TABLE test_orc ADD COLUMNS (value STRING) +PREHOOK: Input: default@test_orc_n4 +PREHOOK: Output: default@test_orc_n4 +POSTHOOK: query: ALTER TABLE test_orc_n4 ADD COLUMNS (value STRING) POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@test_orc -POSTHOOK: Output: default@test_orc -PREHOOK: query: SELECT * FROM test_orc +POSTHOOK: Input: default@test_orc_n4 +POSTHOOK: Output: default@test_orc_n4 +PREHOOK: query: SELECT * FROM test_orc_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@test_orc +PREHOOK: Input: default@test_orc_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_orc +POSTHOOK: query: SELECT * FROM test_orc_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_orc +POSTHOOK: Input: default@test_orc_n4 #### A masked pattern was here #### 165 NULL 238 NULL diff --git a/ql/src/test/results/clientpositive/orc_empty_files.q.out b/ql/src/test/results/clientpositive/orc_empty_files.q.out index 78f2b5cbbc..88c4a380a4 100644 --- a/ql/src/test/results/clientpositive/orc_empty_files.q.out +++ b/ql/src/test/results/clientpositive/orc_empty_files.q.out @@ -1,35 +1,35 @@ -PREHOOK: query: CREATE TABLE test_orc (key STRING, cnt INT) +PREHOOK: query: CREATE TABLE test_orc_n2 (key STRING, cnt INT) CLUSTERED BY (key) INTO 3 BUCKETS ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_orc -POSTHOOK: query: CREATE TABLE test_orc (key STRING, cnt INT) +PREHOOK: Output: default@test_orc_n2 +POSTHOOK: query: CREATE TABLE test_orc_n2 (key STRING, cnt INT) CLUSTERED BY (key) INTO 3 BUCKETS ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_orc -PREHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT one, COUNT(*) FROM (SELECT 1 AS one FROM src) a GROUP BY one +POSTHOOK: Output: default@test_orc_n2 +PREHOOK: query: INSERT OVERWRITE TABLE test_orc_n2 SELECT one, COUNT(*) FROM (SELECT 1 AS one FROM src) a GROUP BY one PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_orc -POSTHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT one, COUNT(*) FROM (SELECT 1 AS one FROM src) a GROUP BY one +PREHOOK: Output: default@test_orc_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE test_orc_n2 SELECT one, COUNT(*) FROM (SELECT 1 AS one FROM src) a GROUP BY one POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_orc -POSTHOOK: Lineage: test_orc.cnt EXPRESSION [] -POSTHOOK: Lineage: test_orc.key EXPRESSION [] -PREHOOK: query: SELECT count(*) FROM test_orc +POSTHOOK: Output: default@test_orc_n2 +POSTHOOK: Lineage: test_orc_n2.cnt EXPRESSION [] +POSTHOOK: Lineage: test_orc_n2.key EXPRESSION [] +PREHOOK: query: SELECT count(*) FROM test_orc_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@test_orc +PREHOOK: Input: default@test_orc_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(*) FROM test_orc +POSTHOOK: query: SELECT count(*) FROM test_orc_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_orc +POSTHOOK: Input: default@test_orc_n2 #### A masked pattern was here #### 1 diff --git a/ql/src/test/results/clientpositive/orc_empty_strings.q.out b/ql/src/test/results/clientpositive/orc_empty_strings.q.out index 862836dfe9..5e9225b967 100644 --- a/ql/src/test/results/clientpositive/orc_empty_strings.q.out +++ b/ql/src/test/results/clientpositive/orc_empty_strings.q.out @@ -1,33 +1,33 @@ -PREHOOK: query: CREATE TABLE test_orc (key STRING) +PREHOOK: query: CREATE TABLE test_orc_n3 (key STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_orc -POSTHOOK: query: CREATE TABLE test_orc (key STRING) +PREHOOK: Output: default@test_orc_n3 +POSTHOOK: query: CREATE TABLE test_orc_n3 (key STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_orc -PREHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT '' FROM src tablesample (10 rows) +POSTHOOK: Output: default@test_orc_n3 +PREHOOK: query: INSERT OVERWRITE TABLE test_orc_n3 SELECT '' FROM src tablesample (10 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_orc -POSTHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT '' FROM src tablesample (10 rows) +PREHOOK: Output: default@test_orc_n3 +POSTHOOK: query: INSERT OVERWRITE TABLE test_orc_n3 SELECT '' FROM src tablesample (10 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_orc -POSTHOOK: Lineage: test_orc.key SIMPLE [] -PREHOOK: query: SELECT * FROM test_orc +POSTHOOK: Output: default@test_orc_n3 +POSTHOOK: Lineage: test_orc_n3.key SIMPLE [] +PREHOOK: query: SELECT * FROM test_orc_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@test_orc +PREHOOK: Input: default@test_orc_n3 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_orc +POSTHOOK: query: SELECT * FROM test_orc_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_orc +POSTHOOK: Input: default@test_orc_n3 #### A masked pattern was here #### @@ -39,22 +39,22 @@ POSTHOOK: Input: default@test_orc -PREHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT IF (key % 3 = 0, key, '') FROM src tablesample (10 rows) +PREHOOK: query: INSERT OVERWRITE TABLE test_orc_n3 SELECT IF (key % 3 = 0, key, '') FROM src tablesample (10 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_orc -POSTHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT IF (key % 3 = 0, key, '') FROM src tablesample (10 rows) +PREHOOK: Output: default@test_orc_n3 +POSTHOOK: query: INSERT OVERWRITE TABLE test_orc_n3 SELECT IF (key % 3 = 0, key, '') FROM src tablesample (10 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_orc -POSTHOOK: Lineage: test_orc.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM test_orc +POSTHOOK: Output: default@test_orc_n3 +POSTHOOK: Lineage: test_orc_n3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM test_orc_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@test_orc +PREHOOK: Input: default@test_orc_n3 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_orc +POSTHOOK: query: SELECT * FROM test_orc_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_orc +POSTHOOK: Input: default@test_orc_n3 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/orc_ends_with_nulls.q.out b/ql/src/test/results/clientpositive/orc_ends_with_nulls.q.out index 84e5252f0f..319e9bcdb0 100644 --- a/ql/src/test/results/clientpositive/orc_ends_with_nulls.q.out +++ b/ql/src/test/results/clientpositive/orc_ends_with_nulls.q.out @@ -20,31 +20,31 @@ POSTHOOK: query: ALTER TABLE test_orc SET SERDEPROPERTIES ('orc.row.index.stride POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES POSTHOOK: Input: default@test_orc POSTHOOK: Output: default@test_orc -PREHOOK: query: CREATE TABLE src_null(a STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE src_null_n0(a STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_null -POSTHOOK: query: CREATE TABLE src_null(a STRING) STORED AS TEXTFILE +PREHOOK: Output: default@src_null_n0 +POSTHOOK: query: CREATE TABLE src_null_n0(a STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_null -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null +POSTHOOK: Output: default@src_null_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@src_null -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null +PREHOOK: Output: default@src_null_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@src_null -PREHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT a FROM src_null +POSTHOOK: Output: default@src_null_n0 +PREHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT a FROM src_null_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@src_null +PREHOOK: Input: default@src_null_n0 PREHOOK: Output: default@test_orc -POSTHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT a FROM src_null +POSTHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT a FROM src_null_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_null +POSTHOOK: Input: default@src_null_n0 POSTHOOK: Output: default@test_orc -POSTHOOK: Lineage: test_orc.key SIMPLE [(src_null)src_null.FieldSchema(name:a, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc.key SIMPLE [(src_null_n0)src_null_n0.FieldSchema(name:a, type:string, comment:null), ] PREHOOK: query: SELECT * FROM test_orc LIMIT 5 PREHOOK: type: QUERY PREHOOK: Input: default@test_orc diff --git a/ql/src/test/results/clientpositive/orc_file_dump.q.out b/ql/src/test/results/clientpositive/orc_file_dump.q.out index ce8932d7b8..2067145ce5 100644 --- a/ql/src/test/results/clientpositive/orc_file_dump.q.out +++ b/ql/src/test/results/clientpositive/orc_file_dump.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE staging(t tinyint, +PREHOOK: query: CREATE TABLE staging_n4(t tinyint, si smallint, i int, b bigint, @@ -13,8 +13,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@staging -POSTHOOK: query: CREATE TABLE staging(t tinyint, +PREHOOK: Output: default@staging_n4 +POSTHOOK: query: CREATE TABLE staging_n4(t tinyint, si smallint, i int, b bigint, @@ -29,16 +29,16 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@staging -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging +POSTHOOK: Output: default@staging_n4 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@staging -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging +PREHOOK: Output: default@staging_n4 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@staging -PREHOOK: query: CREATE TABLE orc_ppd(t tinyint, +POSTHOOK: Output: default@staging_n4 +PREHOOK: query: CREATE TABLE orc_ppd_n0(t tinyint, si smallint, i int, b bigint, @@ -52,8 +52,8 @@ PREHOOK: query: CREATE TABLE orc_ppd(t tinyint, STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orc_ppd -POSTHOOK: query: CREATE TABLE orc_ppd(t tinyint, +PREHOOK: Output: default@orc_ppd_n0 +POSTHOOK: query: CREATE TABLE orc_ppd_n0(t tinyint, si smallint, i int, b bigint, @@ -67,29 +67,29 @@ POSTHOOK: query: CREATE TABLE orc_ppd(t tinyint, STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orc_ppd -PREHOOK: query: insert overwrite table orc_ppd select * from staging +POSTHOOK: Output: default@orc_ppd_n0 +PREHOOK: query: insert overwrite table orc_ppd_n0 select * from staging_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@staging -PREHOOK: Output: default@orc_ppd -POSTHOOK: query: insert overwrite table orc_ppd select * from staging +PREHOOK: Input: default@staging_n4 +PREHOOK: Output: default@orc_ppd_n0 +POSTHOOK: query: insert overwrite table orc_ppd_n0 select * from staging_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@staging -POSTHOOK: Output: default@orc_ppd -POSTHOOK: Lineage: orc_ppd.b SIMPLE [(staging)staging.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: orc_ppd.bin SIMPLE [(staging)staging.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: orc_ppd.bo SIMPLE [(staging)staging.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: orc_ppd.d SIMPLE [(staging)staging.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: orc_ppd.dec SIMPLE [(staging)staging.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] -POSTHOOK: Lineage: orc_ppd.f SIMPLE [(staging)staging.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: orc_ppd.i SIMPLE [(staging)staging.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: orc_ppd.s SIMPLE [(staging)staging.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: orc_ppd.si SIMPLE [(staging)staging.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: orc_ppd.t SIMPLE [(staging)staging.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: orc_ppd.ts SIMPLE [(staging)staging.FieldSchema(name:ts, type:timestamp, comment:null), ] -PREHOOK: query: select * from orc_ppd limit 1 +POSTHOOK: Input: default@staging_n4 +POSTHOOK: Output: default@orc_ppd_n0 +POSTHOOK: Lineage: orc_ppd_n0.b SIMPLE [(staging_n4)staging_n4.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_ppd_n0.bin SIMPLE [(staging_n4)staging_n4.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: orc_ppd_n0.bo SIMPLE [(staging_n4)staging_n4.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: orc_ppd_n0.d SIMPLE [(staging_n4)staging_n4.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: orc_ppd_n0.dec SIMPLE [(staging_n4)staging_n4.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] +POSTHOOK: Lineage: orc_ppd_n0.f SIMPLE [(staging_n4)staging_n4.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: orc_ppd_n0.i SIMPLE [(staging_n4)staging_n4.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: orc_ppd_n0.s SIMPLE [(staging_n4)staging_n4.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: orc_ppd_n0.si SIMPLE [(staging_n4)staging_n4.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: orc_ppd_n0.t SIMPLE [(staging_n4)staging_n4.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: orc_ppd_n0.ts SIMPLE [(staging_n4)staging_n4.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: select * from orc_ppd_n0 limit 1 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_ppd +PREHOOK: Input: default@orc_ppd_n0 #### A masked pattern was here #### -- BEGIN ORC FILE DUMP -- #### A masked pattern was here #### @@ -276,17 +276,17 @@ ________________________________________________________________________________ -- END ORC FILE DUMP -- 124 336 65664 4294967435 74.72 42.47 true bob davidson 2013-03-01 09:11:58.703302 45.40 yard duty -PREHOOK: query: alter table orc_ppd set tblproperties("orc.bloom.filter.fpp"="0.01") +PREHOOK: query: alter table orc_ppd_n0 set tblproperties("orc.bloom.filter.fpp"="0.01") PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@orc_ppd -PREHOOK: Output: default@orc_ppd -PREHOOK: query: insert overwrite table orc_ppd select * from staging +PREHOOK: Input: default@orc_ppd_n0 +PREHOOK: Output: default@orc_ppd_n0 +PREHOOK: query: insert overwrite table orc_ppd_n0 select * from staging_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@staging -PREHOOK: Output: default@orc_ppd -PREHOOK: query: select * from orc_ppd limit 1 +PREHOOK: Input: default@staging_n4 +PREHOOK: Output: default@orc_ppd_n0 +PREHOOK: query: select * from orc_ppd_n0 limit 1 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_ppd +PREHOOK: Input: default@orc_ppd_n0 #### A masked pattern was here #### -- BEGIN ORC FILE DUMP -- #### A masked pattern was here #### @@ -488,9 +488,9 @@ PARTITIONED BY (ds string, hr int) STORED AS ORC tblproperties("orc.row.index.st PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@orc_ppd_part -PREHOOK: query: insert overwrite table orc_ppd_part partition(ds = "2015", hr = 10) select * from staging +PREHOOK: query: insert overwrite table orc_ppd_part partition(ds = "2015", hr = 10) select * from staging_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@staging +PREHOOK: Input: default@staging_n4 PREHOOK: Output: default@orc_ppd_part@ds=2015/hr=10 PREHOOK: query: select * from orc_ppd_part limit 1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/orc_int_type_promotion.q.out b/ql/src/test/results/clientpositive/orc_int_type_promotion.q.out index 4b7b0b0e89..73d55e7de0 100644 --- a/ql/src/test/results/clientpositive/orc_int_type_promotion.q.out +++ b/ql/src/test/results/clientpositive/orc_int_type_promotion.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table if not exists alltypes ( +PREHOOK: query: create table if not exists alltypes_n0 ( bo boolean, ti tinyint, si smallint, @@ -20,8 +20,8 @@ collection items terminated by ',' map keys terminated by ':' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@alltypes -POSTHOOK: query: create table if not exists alltypes ( +PREHOOK: Output: default@alltypes_n0 +POSTHOOK: query: create table if not exists alltypes_n0 ( bo boolean, ti tinyint, si smallint, @@ -43,8 +43,8 @@ collection items terminated by ',' map keys terminated by ':' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@alltypes -PREHOOK: query: create table if not exists alltypes_orc ( +POSTHOOK: Output: default@alltypes_n0 +PREHOOK: query: create table if not exists alltypes_orc_n0 ( bo boolean, ti tinyint, si smallint, @@ -64,8 +64,8 @@ PREHOOK: query: create table if not exists alltypes_orc ( ) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@alltypes_orc -POSTHOOK: query: create table if not exists alltypes_orc ( +PREHOOK: Output: default@alltypes_orc_n0 +POSTHOOK: query: create table if not exists alltypes_orc_n0 ( bo boolean, ti tinyint, si smallint, @@ -85,96 +85,96 @@ POSTHOOK: query: create table if not exists alltypes_orc ( ) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@alltypes_orc -PREHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes +POSTHOOK: Output: default@alltypes_orc_n0 +PREHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@alltypes -POSTHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes +PREHOOK: Output: default@alltypes_n0 +POSTHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@alltypes -PREHOOK: query: insert overwrite table alltypes_orc select * from alltypes +POSTHOOK: Output: default@alltypes_n0 +PREHOOK: query: insert overwrite table alltypes_orc_n0 select * from alltypes_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@alltypes -PREHOOK: Output: default@alltypes_orc -POSTHOOK: query: insert overwrite table alltypes_orc select * from alltypes +PREHOOK: Input: default@alltypes_n0 +PREHOOK: Output: default@alltypes_orc_n0 +POSTHOOK: query: insert overwrite table alltypes_orc_n0 select * from alltypes_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alltypes -POSTHOOK: Output: default@alltypes_orc -POSTHOOK: Lineage: alltypes_orc.bi SIMPLE [(alltypes)alltypes.FieldSchema(name:bi, type:bigint, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.bo SIMPLE [(alltypes)alltypes.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.c SIMPLE [(alltypes)alltypes.FieldSchema(name:c, type:char(5), comment:null), ] -POSTHOOK: Lineage: alltypes_orc.d SIMPLE [(alltypes)alltypes.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.da SIMPLE [(alltypes)alltypes.FieldSchema(name:da, type:date, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.de SIMPLE [(alltypes)alltypes.FieldSchema(name:de, type:decimal(10,3), comment:null), ] -POSTHOOK: Lineage: alltypes_orc.f SIMPLE [(alltypes)alltypes.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.i SIMPLE [(alltypes)alltypes.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.l SIMPLE [(alltypes)alltypes.FieldSchema(name:l, type:array, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.m SIMPLE [(alltypes)alltypes.FieldSchema(name:m, type:map, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.s SIMPLE [(alltypes)alltypes.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.si SIMPLE [(alltypes)alltypes.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.st SIMPLE [(alltypes)alltypes.FieldSchema(name:st, type:struct, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.ti SIMPLE [(alltypes)alltypes.FieldSchema(name:ti, type:tinyint, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.ts SIMPLE [(alltypes)alltypes.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.vc SIMPLE [(alltypes)alltypes.FieldSchema(name:vc, type:varchar(5), comment:null), ] -PREHOOK: query: select * from alltypes_orc +POSTHOOK: Input: default@alltypes_n0 +POSTHOOK: Output: default@alltypes_orc_n0 +POSTHOOK: Lineage: alltypes_orc_n0.bi SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:bi, type:bigint, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.bo SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.c SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:c, type:char(5), comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.d SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.da SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:da, type:date, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.de SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:de, type:decimal(10,3), comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.f SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.i SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.l SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:l, type:array, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.m SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:m, type:map, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.s SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.si SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.st SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:st, type:struct, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.ti SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:ti, type:tinyint, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.ts SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n0.vc SIMPLE [(alltypes_n0)alltypes_n0.FieldSchema(name:vc, type:varchar(5), comment:null), ] +PREHOOK: query: select * from alltypes_orc_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@alltypes_orc +PREHOOK: Input: default@alltypes_orc_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from alltypes_orc +POSTHOOK: query: select * from alltypes_orc_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alltypes_orc +POSTHOOK: Input: default@alltypes_orc_n0 #### A masked pattern was here #### true 10 100 1000 10000 4.0 20.0 4.222 1969-12-31 15:59:58.174 1970-01-01 string hello hello {"k1":"v1","k2":"v2"} [100,200] {"c1":null,"c2":" \"foo\"}"} false 20 200 2000 20000 8.0 40.0 2.222 1970-12-31 15:59:58.174 1971-01-01 abcd world world {"k3":"v3","k4":"v4"} [200,300] {"c1":null,"c2":" \"bar\"}"} -PREHOOK: query: alter table alltypes_orc change si si int +PREHOOK: query: alter table alltypes_orc_n0 change si si int PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@alltypes_orc -PREHOOK: Output: default@alltypes_orc -POSTHOOK: query: alter table alltypes_orc change si si int +PREHOOK: Input: default@alltypes_orc_n0 +PREHOOK: Output: default@alltypes_orc_n0 +POSTHOOK: query: alter table alltypes_orc_n0 change si si int POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@alltypes_orc -POSTHOOK: Output: default@alltypes_orc -PREHOOK: query: select * from alltypes_orc +POSTHOOK: Input: default@alltypes_orc_n0 +POSTHOOK: Output: default@alltypes_orc_n0 +PREHOOK: query: select * from alltypes_orc_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@alltypes_orc +PREHOOK: Input: default@alltypes_orc_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from alltypes_orc +POSTHOOK: query: select * from alltypes_orc_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alltypes_orc +POSTHOOK: Input: default@alltypes_orc_n0 #### A masked pattern was here #### true 10 100 1000 10000 4.0 20.0 4.222 1969-12-31 15:59:58.174 1970-01-01 string hello hello {"k1":"v1","k2":"v2"} [100,200] {"c1":null,"c2":" \"foo\"}"} false 20 200 2000 20000 8.0 40.0 2.222 1970-12-31 15:59:58.174 1971-01-01 abcd world world {"k3":"v3","k4":"v4"} [200,300] {"c1":null,"c2":" \"bar\"}"} -PREHOOK: query: alter table alltypes_orc change si si bigint +PREHOOK: query: alter table alltypes_orc_n0 change si si bigint PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@alltypes_orc -PREHOOK: Output: default@alltypes_orc -POSTHOOK: query: alter table alltypes_orc change si si bigint +PREHOOK: Input: default@alltypes_orc_n0 +PREHOOK: Output: default@alltypes_orc_n0 +POSTHOOK: query: alter table alltypes_orc_n0 change si si bigint POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@alltypes_orc -POSTHOOK: Output: default@alltypes_orc -PREHOOK: query: alter table alltypes_orc change i i bigint +POSTHOOK: Input: default@alltypes_orc_n0 +POSTHOOK: Output: default@alltypes_orc_n0 +PREHOOK: query: alter table alltypes_orc_n0 change i i bigint PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@alltypes_orc -PREHOOK: Output: default@alltypes_orc -POSTHOOK: query: alter table alltypes_orc change i i bigint +PREHOOK: Input: default@alltypes_orc_n0 +PREHOOK: Output: default@alltypes_orc_n0 +POSTHOOK: query: alter table alltypes_orc_n0 change i i bigint POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@alltypes_orc -POSTHOOK: Output: default@alltypes_orc -PREHOOK: query: select * from alltypes_orc +POSTHOOK: Input: default@alltypes_orc_n0 +POSTHOOK: Output: default@alltypes_orc_n0 +PREHOOK: query: select * from alltypes_orc_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@alltypes_orc +PREHOOK: Input: default@alltypes_orc_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from alltypes_orc +POSTHOOK: query: select * from alltypes_orc_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alltypes_orc +POSTHOOK: Input: default@alltypes_orc_n0 #### A masked pattern was here #### true 10 100 1000 10000 4.0 20.0 4.222 1969-12-31 15:59:58.174 1970-01-01 string hello hello {"k1":"v1","k2":"v2"} [100,200] {"c1":null,"c2":" \"foo\"}"} false 20 200 2000 20000 8.0 40.0 2.222 1970-12-31 15:59:58.174 1971-01-01 abcd world world {"k3":"v3","k4":"v4"} [200,300] {"c1":null,"c2":" \"bar\"}"} -PREHOOK: query: explain select ti, si, i, bi from alltypes_orc +PREHOOK: query: explain select ti, si, i, bi from alltypes_orc_n0 PREHOOK: type: QUERY -POSTHOOK: query: explain select ti, si, i, bi from alltypes_orc +POSTHOOK: query: explain select ti, si, i, bi from alltypes_orc_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -185,7 +185,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: alltypes_orc + alias: alltypes_orc_n0 Statistics: Num rows: 2 Data size: 1908 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ti (type: tinyint), si (type: bigint), i (type: bigint), bi (type: bigint) @@ -206,13 +206,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select ti, si, i, bi from alltypes_orc +PREHOOK: query: select ti, si, i, bi from alltypes_orc_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@alltypes_orc +PREHOOK: Input: default@alltypes_orc_n0 #### A masked pattern was here #### -POSTHOOK: query: select ti, si, i, bi from alltypes_orc +POSTHOOK: query: select ti, si, i, bi from alltypes_orc_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alltypes_orc +POSTHOOK: Input: default@alltypes_orc_n0 #### A masked pattern was here #### 10 100 1000 10000 20 200 2000 20000 diff --git a/ql/src/test/results/clientpositive/orc_merge1.q.out b/ql/src/test/results/clientpositive/orc_merge1.q.out index cad6c00a8e..622e899155 100644 --- a/ql/src/test/results/clientpositive/orc_merge1.q.out +++ b/ql/src/test/results/clientpositive/orc_merge1.q.out @@ -1,52 +1,52 @@ -PREHOOK: query: DROP TABLE orcfile_merge1 +PREHOOK: query: DROP TABLE orcfile_merge1_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE orcfile_merge1 +POSTHOOK: query: DROP TABLE orcfile_merge1_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE orcfile_merge1b +PREHOOK: query: DROP TABLE orcfile_merge1b_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE orcfile_merge1b +POSTHOOK: query: DROP TABLE orcfile_merge1b_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE orcfile_merge1c +PREHOOK: query: DROP TABLE orcfile_merge1c_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE orcfile_merge1c +POSTHOOK: query: DROP TABLE orcfile_merge1c_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE orcfile_merge1 (key INT, value STRING) +PREHOOK: query: CREATE TABLE orcfile_merge1_n1 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orcfile_merge1 -POSTHOOK: query: CREATE TABLE orcfile_merge1 (key INT, value STRING) +PREHOOK: Output: default@orcfile_merge1_n1 +POSTHOOK: query: CREATE TABLE orcfile_merge1_n1 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orcfile_merge1 -PREHOOK: query: CREATE TABLE orcfile_merge1b (key INT, value STRING) +POSTHOOK: Output: default@orcfile_merge1_n1 +PREHOOK: query: CREATE TABLE orcfile_merge1b_n1 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orcfile_merge1b -POSTHOOK: query: CREATE TABLE orcfile_merge1b (key INT, value STRING) +PREHOOK: Output: default@orcfile_merge1b_n1 +POSTHOOK: query: CREATE TABLE orcfile_merge1b_n1 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orcfile_merge1b -PREHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING) +POSTHOOK: Output: default@orcfile_merge1b_n1 +PREHOOK: query: CREATE TABLE orcfile_merge1c_n1 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orcfile_merge1c -POSTHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING) +PREHOOK: Output: default@orcfile_merge1c_n1 +POSTHOOK: query: CREATE TABLE orcfile_merge1c_n1 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orcfile_merge1c +POSTHOOK: Output: default@orcfile_merge1c_n1 PREHOOK: query: EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src POSTHOOK: type: QUERY @@ -73,7 +73,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1 + name: default.orcfile_merge1_n1 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), UDFToString(_col2) (type: string) outputColumnNames: key, value, ds, part @@ -120,7 +120,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1 + name: default.orcfile_merge1_n1 Stage: Stage-2 Stats Work @@ -128,34 +128,34 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.orcfile_merge1 + Table: default.orcfile_merge1_n1 -PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) +PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@orcfile_merge1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) +PREHOOK: Output: default@orcfile_merge1_n1@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@orcfile_merge1@ds=1/part=0 -POSTHOOK: Output: default@orcfile_merge1@ds=1/part=1 -POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@orcfile_merge1_n1@ds=1/part=0 +POSTHOOK: Output: default@orcfile_merge1_n1@ds=1/part=1 +POSTHOOK: Lineage: orcfile_merge1_n1 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1_n1 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1_n1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1_n1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 2 items #### A masked pattern was here #### PREHOOK: query: EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1b_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1b_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src POSTHOOK: type: QUERY @@ -187,7 +187,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1b + name: default.orcfile_merge1b_n1 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), UDFToString(_col2) (type: string) outputColumnNames: key, value, ds, part @@ -243,7 +243,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1b + name: default.orcfile_merge1b_n1 Stage: Stage-2 Stats Work @@ -251,7 +251,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.orcfile_merge1b + Table: default.orcfile_merge1b_n1 Stage: Stage-3 Map Reduce @@ -263,7 +263,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1b + name: default.orcfile_merge1b_n1 Stage: Stage-5 Map Reduce @@ -275,7 +275,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1b + name: default.orcfile_merge1b_n1 Stage: Stage-6 Move Operator @@ -283,32 +283,32 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) +PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@orcfile_merge1b@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) +PREHOOK: Output: default@orcfile_merge1b_n1@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@orcfile_merge1b@ds=1/part=0 -POSTHOOK: Output: default@orcfile_merge1b@ds=1/part=1 -POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@orcfile_merge1b_n1@ds=1/part=0 +POSTHOOK: Output: default@orcfile_merge1b_n1@ds=1/part=1 +POSTHOOK: Lineage: orcfile_merge1b_n1 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1b_n1 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1b_n1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1b_n1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### PREHOOK: query: EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1c_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1c_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src POSTHOOK: type: QUERY @@ -340,7 +340,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1c + name: default.orcfile_merge1c_n1 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), UDFToString(_col2) (type: string) outputColumnNames: key, value, ds, part @@ -396,7 +396,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1c + name: default.orcfile_merge1c_n1 Stage: Stage-2 Stats Work @@ -404,7 +404,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.orcfile_merge1c + Table: default.orcfile_merge1c_n1 Stage: Stage-3 Merge File Operator @@ -426,142 +426,142 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) +PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@orcfile_merge1c@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) +PREHOOK: Output: default@orcfile_merge1c_n1@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c_n1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@orcfile_merge1c@ds=1/part=0 -POSTHOOK: Output: default@orcfile_merge1c@ds=1/part=1 -POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@orcfile_merge1c_n1@ds=1/part=0 +POSTHOOK: Output: default@orcfile_merge1c_n1@ds=1/part=1 +POSTHOOK: Lineage: orcfile_merge1c_n1 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1c_n1 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1c_n1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1c_n1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1 WHERE ds='1' + FROM orcfile_merge1_n1 WHERE ds='1' ) t PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1 -PREHOOK: Input: default@orcfile_merge1@ds=1/part=0 -PREHOOK: Input: default@orcfile_merge1@ds=1/part=1 +PREHOOK: Input: default@orcfile_merge1_n1 +PREHOOK: Input: default@orcfile_merge1_n1@ds=1/part=0 +PREHOOK: Input: default@orcfile_merge1_n1@ds=1/part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1 WHERE ds='1' + FROM orcfile_merge1_n1 WHERE ds='1' ) t POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge1 -POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0 -POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1 +POSTHOOK: Input: default@orcfile_merge1_n1 +POSTHOOK: Input: default@orcfile_merge1_n1@ds=1/part=0 +POSTHOOK: Input: default@orcfile_merge1_n1@ds=1/part=1 #### A masked pattern was here #### -21975308766 PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1b WHERE ds='1' + FROM orcfile_merge1b_n1 WHERE ds='1' ) t PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1b -PREHOOK: Input: default@orcfile_merge1b@ds=1/part=0 -PREHOOK: Input: default@orcfile_merge1b@ds=1/part=1 +PREHOOK: Input: default@orcfile_merge1b_n1 +PREHOOK: Input: default@orcfile_merge1b_n1@ds=1/part=0 +PREHOOK: Input: default@orcfile_merge1b_n1@ds=1/part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1b WHERE ds='1' + FROM orcfile_merge1b_n1 WHERE ds='1' ) t POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge1b -POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=0 -POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=1 +POSTHOOK: Input: default@orcfile_merge1b_n1 +POSTHOOK: Input: default@orcfile_merge1b_n1@ds=1/part=0 +POSTHOOK: Input: default@orcfile_merge1b_n1@ds=1/part=1 #### A masked pattern was here #### -21975308766 PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1c WHERE ds='1' + FROM orcfile_merge1c_n1 WHERE ds='1' ) t PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1c -PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0 -PREHOOK: Input: default@orcfile_merge1c@ds=1/part=1 +PREHOOK: Input: default@orcfile_merge1c_n1 +PREHOOK: Input: default@orcfile_merge1c_n1@ds=1/part=0 +PREHOOK: Input: default@orcfile_merge1c_n1@ds=1/part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1c WHERE ds='1' + FROM orcfile_merge1c_n1 WHERE ds='1' ) t POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge1c -POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=0 -POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=1 +POSTHOOK: Input: default@orcfile_merge1c_n1 +POSTHOOK: Input: default@orcfile_merge1c_n1@ds=1/part=0 +POSTHOOK: Input: default@orcfile_merge1c_n1@ds=1/part=1 #### A masked pattern was here #### -21975308766 -PREHOOK: query: select count(*) from orcfile_merge1 +PREHOOK: query: select count(*) from orcfile_merge1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1 -PREHOOK: Input: default@orcfile_merge1@ds=1/part=0 -PREHOOK: Input: default@orcfile_merge1@ds=1/part=1 +PREHOOK: Input: default@orcfile_merge1_n1 +PREHOOK: Input: default@orcfile_merge1_n1@ds=1/part=0 +PREHOOK: Input: default@orcfile_merge1_n1@ds=1/part=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from orcfile_merge1 +POSTHOOK: query: select count(*) from orcfile_merge1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge1 -POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0 -POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1 +POSTHOOK: Input: default@orcfile_merge1_n1 +POSTHOOK: Input: default@orcfile_merge1_n1@ds=1/part=0 +POSTHOOK: Input: default@orcfile_merge1_n1@ds=1/part=1 #### A masked pattern was here #### 500 -PREHOOK: query: select count(*) from orcfile_merge1b +PREHOOK: query: select count(*) from orcfile_merge1b_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1b -PREHOOK: Input: default@orcfile_merge1b@ds=1/part=0 -PREHOOK: Input: default@orcfile_merge1b@ds=1/part=1 +PREHOOK: Input: default@orcfile_merge1b_n1 +PREHOOK: Input: default@orcfile_merge1b_n1@ds=1/part=0 +PREHOOK: Input: default@orcfile_merge1b_n1@ds=1/part=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from orcfile_merge1b +POSTHOOK: query: select count(*) from orcfile_merge1b_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge1b -POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=0 -POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=1 +POSTHOOK: Input: default@orcfile_merge1b_n1 +POSTHOOK: Input: default@orcfile_merge1b_n1@ds=1/part=0 +POSTHOOK: Input: default@orcfile_merge1b_n1@ds=1/part=1 #### A masked pattern was here #### 500 -PREHOOK: query: select count(*) from orcfile_merge1c +PREHOOK: query: select count(*) from orcfile_merge1c_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1c -PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0 -PREHOOK: Input: default@orcfile_merge1c@ds=1/part=1 +PREHOOK: Input: default@orcfile_merge1c_n1 +PREHOOK: Input: default@orcfile_merge1c_n1@ds=1/part=0 +PREHOOK: Input: default@orcfile_merge1c_n1@ds=1/part=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from orcfile_merge1c +POSTHOOK: query: select count(*) from orcfile_merge1c_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge1c -POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=0 -POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=1 +POSTHOOK: Input: default@orcfile_merge1c_n1 +POSTHOOK: Input: default@orcfile_merge1c_n1@ds=1/part=0 +POSTHOOK: Input: default@orcfile_merge1c_n1@ds=1/part=1 #### A masked pattern was here #### 500 -PREHOOK: query: DROP TABLE orcfile_merge1 +PREHOOK: query: DROP TABLE orcfile_merge1_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@orcfile_merge1 -PREHOOK: Output: default@orcfile_merge1 -POSTHOOK: query: DROP TABLE orcfile_merge1 +PREHOOK: Input: default@orcfile_merge1_n1 +PREHOOK: Output: default@orcfile_merge1_n1 +POSTHOOK: query: DROP TABLE orcfile_merge1_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@orcfile_merge1 -POSTHOOK: Output: default@orcfile_merge1 -PREHOOK: query: DROP TABLE orcfile_merge1b +POSTHOOK: Input: default@orcfile_merge1_n1 +POSTHOOK: Output: default@orcfile_merge1_n1 +PREHOOK: query: DROP TABLE orcfile_merge1b_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@orcfile_merge1b -PREHOOK: Output: default@orcfile_merge1b -POSTHOOK: query: DROP TABLE orcfile_merge1b +PREHOOK: Input: default@orcfile_merge1b_n1 +PREHOOK: Output: default@orcfile_merge1b_n1 +POSTHOOK: query: DROP TABLE orcfile_merge1b_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@orcfile_merge1b -POSTHOOK: Output: default@orcfile_merge1b -PREHOOK: query: DROP TABLE orcfile_merge1c +POSTHOOK: Input: default@orcfile_merge1b_n1 +POSTHOOK: Output: default@orcfile_merge1b_n1 +PREHOOK: query: DROP TABLE orcfile_merge1c_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@orcfile_merge1c -PREHOOK: Output: default@orcfile_merge1c -POSTHOOK: query: DROP TABLE orcfile_merge1c +PREHOOK: Input: default@orcfile_merge1c_n1 +PREHOOK: Output: default@orcfile_merge1c_n1 +POSTHOOK: query: DROP TABLE orcfile_merge1c_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@orcfile_merge1c -POSTHOOK: Output: default@orcfile_merge1c +POSTHOOK: Input: default@orcfile_merge1c_n1 +POSTHOOK: Output: default@orcfile_merge1c_n1 diff --git a/ql/src/test/results/clientpositive/orc_merge11.q.out b/ql/src/test/results/clientpositive/orc_merge11.q.out index a63a01878e..a4ec749f60 100644 --- a/ql/src/test/results/clientpositive/orc_merge11.q.out +++ b/ql/src/test/results/clientpositive/orc_merge11.q.out @@ -1,74 +1,74 @@ -PREHOOK: query: DROP TABLE orcfile_merge1 +PREHOOK: query: DROP TABLE orcfile_merge1_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE orcfile_merge1 +POSTHOOK: query: DROP TABLE orcfile_merge1_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE orc_split_elim +PREHOOK: query: DROP TABLE orc_split_elim_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE orc_split_elim +POSTHOOK: query: DROP TABLE orc_split_elim_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: query: create table orc_split_elim_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orc_split_elim -POSTHOOK: query: create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: Output: default@orc_split_elim_n0 +POSTHOOK: query: create table orc_split_elim_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orc_split_elim -PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim +POSTHOOK: Output: default@orc_split_elim_n0 +PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@orc_split_elim -POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim +PREHOOK: Output: default@orc_split_elim_n0 +POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@orc_split_elim -PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim +POSTHOOK: Output: default@orc_split_elim_n0 +PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@orc_split_elim -POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim +PREHOOK: Output: default@orc_split_elim_n0 +POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@orc_split_elim -PREHOOK: query: create table orcfile_merge1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc tblproperties("orc.compress.size"="4096") +POSTHOOK: Output: default@orc_split_elim_n0 +PREHOOK: query: create table orcfile_merge1_n2 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc tblproperties("orc.compress.size"="4096") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orcfile_merge1 -POSTHOOK: query: create table orcfile_merge1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc tblproperties("orc.compress.size"="4096") +PREHOOK: Output: default@orcfile_merge1_n2 +POSTHOOK: query: create table orcfile_merge1_n2 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc tblproperties("orc.compress.size"="4096") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orcfile_merge1 -PREHOOK: query: insert overwrite table orcfile_merge1 select * from orc_split_elim +POSTHOOK: Output: default@orcfile_merge1_n2 +PREHOOK: query: insert overwrite table orcfile_merge1_n2 select * from orc_split_elim_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_split_elim -PREHOOK: Output: default@orcfile_merge1 -POSTHOOK: query: insert overwrite table orcfile_merge1 select * from orc_split_elim +PREHOOK: Input: default@orc_split_elim_n0 +PREHOOK: Output: default@orcfile_merge1_n2 +POSTHOOK: query: insert overwrite table orcfile_merge1_n2 select * from orc_split_elim_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_split_elim -POSTHOOK: Output: default@orcfile_merge1 -POSTHOOK: Lineage: orcfile_merge1.decimal1 SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orcfile_merge1.string1 SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orcfile_merge1.subtype SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orcfile_merge1.ts SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orcfile_merge1.userid SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: insert into table orcfile_merge1 select * from orc_split_elim +POSTHOOK: Input: default@orc_split_elim_n0 +POSTHOOK: Output: default@orcfile_merge1_n2 +POSTHOOK: Lineage: orcfile_merge1_n2.decimal1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orcfile_merge1_n2.string1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orcfile_merge1_n2.subtype SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orcfile_merge1_n2.ts SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orcfile_merge1_n2.userid SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orcfile_merge1_n2 select * from orc_split_elim_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_split_elim -PREHOOK: Output: default@orcfile_merge1 -POSTHOOK: query: insert into table orcfile_merge1 select * from orc_split_elim +PREHOOK: Input: default@orc_split_elim_n0 +PREHOOK: Output: default@orcfile_merge1_n2 +POSTHOOK: query: insert into table orcfile_merge1_n2 select * from orc_split_elim_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_split_elim -POSTHOOK: Output: default@orcfile_merge1 -POSTHOOK: Lineage: orcfile_merge1.decimal1 SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orcfile_merge1.string1 SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orcfile_merge1.subtype SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orcfile_merge1.ts SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orcfile_merge1.userid SIMPLE [(orc_split_elim)orc_split_elim.FieldSchema(name:userid, type:bigint, comment:null), ] +POSTHOOK: Input: default@orc_split_elim_n0 +POSTHOOK: Output: default@orcfile_merge1_n2 +POSTHOOK: Lineage: orcfile_merge1_n2.decimal1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orcfile_merge1_n2.string1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orcfile_merge1_n2.subtype SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orcfile_merge1_n2.ts SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orcfile_merge1_n2.userid SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:userid, type:bigint, comment:null), ] Found 2 items #### A masked pattern was here #### -PREHOOK: query: select * from orcfile_merge1 limit 1 +PREHOOK: query: select * from orcfile_merge1_n2 limit 1 PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1 +PREHOOK: Input: default@orcfile_merge1_n2 #### A masked pattern was here #### -- BEGIN ORC FILE DUMP -- #### A masked pattern was here #### @@ -253,25 +253,25 @@ ________________________________________________________________________________ -- END ORC FILE DUMP -- 2 foo 0.8 1 1969-12-31 16:00:00 -PREHOOK: query: ALTER TABLE orcfile_merge1 CONCATENATE +PREHOOK: query: ALTER TABLE orcfile_merge1_n2 CONCATENATE PREHOOK: type: ALTER_TABLE_MERGE -PREHOOK: Input: default@orcfile_merge1 -PREHOOK: Output: default@orcfile_merge1 +PREHOOK: Input: default@orcfile_merge1_n2 +PREHOOK: Output: default@orcfile_merge1_n2 Found 1 items #### A masked pattern was here #### -PREHOOK: query: select count(*) from orc_split_elim +PREHOOK: query: select count(*) from orc_split_elim_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_split_elim +PREHOOK: Input: default@orc_split_elim_n0 #### A masked pattern was here #### 50000 -PREHOOK: query: select count(*) from orcfile_merge1 +PREHOOK: query: select count(*) from orcfile_merge1_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1 +PREHOOK: Input: default@orcfile_merge1_n2 #### A masked pattern was here #### 100000 -PREHOOK: query: select * from orcfile_merge1 limit 1 +PREHOOK: query: select * from orcfile_merge1_n2 limit 1 PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1 +PREHOOK: Input: default@orcfile_merge1_n2 #### A masked pattern was here #### -- BEGIN ORC FILE DUMP -- #### A masked pattern was here #### @@ -430,15 +430,15 @@ ________________________________________________________________________________ -- END ORC FILE DUMP -- 2 foo 0.8 1 1969-12-31 16:00:00 -PREHOOK: query: INSERT OVERWRITE DIRECTORY 'output' stored as orcfile select * from orc_split_elim +PREHOOK: query: INSERT OVERWRITE DIRECTORY 'output' stored as orcfile select * from orc_split_elim_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_split_elim +PREHOOK: Input: default@orc_split_elim_n0 PREHOOK: Output: output -PREHOOK: query: DROP TABLE orc_split_elim +PREHOOK: query: DROP TABLE orc_split_elim_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@orc_split_elim -PREHOOK: Output: default@orc_split_elim -PREHOOK: query: DROP TABLE orcfile_merge1 +PREHOOK: Input: default@orc_split_elim_n0 +PREHOOK: Output: default@orc_split_elim_n0 +PREHOOK: query: DROP TABLE orcfile_merge1_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@orcfile_merge1 -PREHOOK: Output: default@orcfile_merge1 +PREHOOK: Input: default@orcfile_merge1_n2 +PREHOOK: Output: default@orcfile_merge1_n2 diff --git a/ql/src/test/results/clientpositive/orc_merge2.q.out b/ql/src/test/results/clientpositive/orc_merge2.q.out index 70788fde9f..2f48619925 100644 --- a/ql/src/test/results/clientpositive/orc_merge2.q.out +++ b/ql/src/test/results/clientpositive/orc_merge2.q.out @@ -1,25 +1,25 @@ -PREHOOK: query: DROP TABLE orcfile_merge2a +PREHOOK: query: DROP TABLE orcfile_merge2a_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE orcfile_merge2a +POSTHOOK: query: DROP TABLE orcfile_merge2a_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE orcfile_merge2a (key INT, value STRING) +PREHOOK: query: CREATE TABLE orcfile_merge2a_n0 (key INT, value STRING) PARTITIONED BY (one string, two string, three string) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orcfile_merge2a -POSTHOOK: query: CREATE TABLE orcfile_merge2a (key INT, value STRING) +PREHOOK: Output: default@orcfile_merge2a_n0 +POSTHOOK: query: CREATE TABLE orcfile_merge2a_n0 (key INT, value STRING) PARTITIONED BY (one string, two string, three string) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orcfile_merge2a -PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three) +POSTHOOK: Output: default@orcfile_merge2a_n0 +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a_n0 PARTITION (one='1', two, three) SELECT key, value, PMOD(HASH(key), 10) as two, PMOD(HASH(value), 10) as three FROM src PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three) +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a_n0 PARTITION (one='1', two, three) SELECT key, value, PMOD(HASH(key), 10) as two, PMOD(HASH(value), 10) as three FROM src @@ -52,7 +52,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge2a + name: default.orcfile_merge2a_n0 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), UDFToString(_col2) (type: string), UDFToString(_col3) (type: string) outputColumnNames: key, value, one, two, three @@ -109,7 +109,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge2a + name: default.orcfile_merge2a_n0 Stage: Stage-2 Stats Work @@ -117,7 +117,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.orcfile_merge2a + Table: default.orcfile_merge2a_n0 Stage: Stage-3 Merge File Operator @@ -139,134 +139,134 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three) +PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a_n0 PARTITION (one='1', two, three) SELECT key, value, PMOD(HASH(key), 10) as two, PMOD(HASH(value), 10) as three FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@orcfile_merge2a@one=1 -POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three) +PREHOOK: Output: default@orcfile_merge2a_n0@one=1 +POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a_n0 PARTITION (one='1', two, three) SELECT key, value, PMOD(HASH(key), 10) as two, PMOD(HASH(value), 10) as three FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=0/three=2 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=0/three=8 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=1/three=3 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=1/three=9 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=2/three=0 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=2/three=4 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=3/three=1 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=3/three=5 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=4/three=2 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=4/three=6 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=5/three=3 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=5/three=7 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=6/three=4 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=6/three=8 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=7/three=5 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=7/three=9 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=8/three=0 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=8/three=6 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=9/three=1 -POSTHOOK: Output: default@orcfile_merge2a@one=1/two=9/three=7 -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=8).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=9).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=4).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=5).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=6).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=6).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=4).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=8).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=5).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=9).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=6).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=6).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=0/three=2 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=0/three=8 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=1/three=3 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=1/three=9 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=2/three=0 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=2/three=4 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=3/three=1 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=3/three=5 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=4/three=2 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=4/three=6 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=5/three=3 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=5/three=7 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=6/three=4 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=6/three=8 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=7/three=5 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=7/three=9 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=8/three=0 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=8/three=6 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=9/three=1 +POSTHOOK: Output: default@orcfile_merge2a_n0@one=1/two=9/three=7 +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=0,three=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=0,three=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=0,three=8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=0,three=8).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=1,three=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=1,three=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=1,three=9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=1,three=9).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=2,three=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=2,three=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=2,three=4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=2,three=4).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=3,three=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=3,three=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=3,three=5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=3,three=5).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=4,three=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=4,three=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=4,three=6).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=4,three=6).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=5,three=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=5,three=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=5,three=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=5,three=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=6,three=4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=6,three=4).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=6,three=8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=6,three=8).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=7,three=5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=7,three=5).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=7,three=9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=7,three=9).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=8,three=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=8,three=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=8,three=6).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=8,three=6).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=9,three=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=9,three=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=9,three=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge2a_n0 PARTITION(one=1,two=9,three=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge2a + FROM orcfile_merge2a_n0 ) t PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge2a -PREHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=2 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=8 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=3 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=9 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=0 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=4 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=1 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=5 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=2 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=6 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=3 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=7 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=4 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=8 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=5 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=9 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=0 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=6 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=1 -PREHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=7 +PREHOOK: Input: default@orcfile_merge2a_n0 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=0/three=2 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=0/three=8 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=1/three=3 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=1/three=9 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=2/three=0 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=2/three=4 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=3/three=1 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=3/three=5 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=4/three=2 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=4/three=6 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=5/three=3 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=5/three=7 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=6/three=4 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=6/three=8 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=7/three=5 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=7/three=9 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=8/three=0 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=8/three=6 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=9/three=1 +PREHOOK: Input: default@orcfile_merge2a_n0@one=1/two=9/three=7 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge2a + FROM orcfile_merge2a_n0 ) t POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge2a -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=2 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=8 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=3 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=9 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=0 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=4 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=1 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=5 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=2 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=6 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=3 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=7 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=4 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=8 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=5 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=9 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=0 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=6 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=1 -POSTHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=7 +POSTHOOK: Input: default@orcfile_merge2a_n0 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=0/three=2 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=0/three=8 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=1/three=3 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=1/three=9 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=2/three=0 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=2/three=4 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=3/three=1 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=3/three=5 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=4/three=2 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=4/three=6 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=5/three=3 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=5/three=7 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=6/three=4 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=6/three=8 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=7/three=5 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=7/three=9 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=8/three=0 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=8/three=6 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=9/three=1 +POSTHOOK: Input: default@orcfile_merge2a_n0@one=1/two=9/three=7 #### A masked pattern was here #### -4209012844 PREHOOK: query: SELECT SUM(HASH(c)) FROM ( @@ -286,11 +286,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -4209012844 -PREHOOK: query: DROP TABLE orcfile_merge2a +PREHOOK: query: DROP TABLE orcfile_merge2a_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@orcfile_merge2a -PREHOOK: Output: default@orcfile_merge2a -POSTHOOK: query: DROP TABLE orcfile_merge2a +PREHOOK: Input: default@orcfile_merge2a_n0 +PREHOOK: Output: default@orcfile_merge2a_n0 +POSTHOOK: query: DROP TABLE orcfile_merge2a_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@orcfile_merge2a -POSTHOOK: Output: default@orcfile_merge2a +POSTHOOK: Input: default@orcfile_merge2a_n0 +POSTHOOK: Output: default@orcfile_merge2a_n0 diff --git a/ql/src/test/results/clientpositive/orc_merge3.q.out b/ql/src/test/results/clientpositive/orc_merge3.q.out index 597b00078e..2ac7c3104b 100644 --- a/ql/src/test/results/clientpositive/orc_merge3.q.out +++ b/ql/src/test/results/clientpositive/orc_merge3.q.out @@ -1,58 +1,58 @@ -PREHOOK: query: DROP TABLE orcfile_merge3a +PREHOOK: query: DROP TABLE orcfile_merge3a_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE orcfile_merge3a +POSTHOOK: query: DROP TABLE orcfile_merge3a_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE orcfile_merge3b +PREHOOK: query: DROP TABLE orcfile_merge3b_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE orcfile_merge3b +POSTHOOK: query: DROP TABLE orcfile_merge3b_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE orcfile_merge3a (key int, value string) +PREHOOK: query: CREATE TABLE orcfile_merge3a_n0 (key int, value string) PARTITIONED BY (ds string) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orcfile_merge3a -POSTHOOK: query: CREATE TABLE orcfile_merge3a (key int, value string) +PREHOOK: Output: default@orcfile_merge3a_n0 +POSTHOOK: query: CREATE TABLE orcfile_merge3a_n0 (key int, value string) PARTITIONED BY (ds string) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orcfile_merge3a -PREHOOK: query: CREATE TABLE orcfile_merge3b (key int, value string) STORED AS ORC +POSTHOOK: Output: default@orcfile_merge3a_n0 +PREHOOK: query: CREATE TABLE orcfile_merge3b_n0 (key int, value string) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orcfile_merge3b -POSTHOOK: query: CREATE TABLE orcfile_merge3b (key int, value string) STORED AS ORC +PREHOOK: Output: default@orcfile_merge3b_n0 +POSTHOOK: query: CREATE TABLE orcfile_merge3b_n0 (key int, value string) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orcfile_merge3b -PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1') +POSTHOOK: Output: default@orcfile_merge3b_n0 +PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a_n0 PARTITION (ds='1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@orcfile_merge3a@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1') +PREHOOK: Output: default@orcfile_merge3a_n0@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a_n0 PARTITION (ds='1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@orcfile_merge3a@ds=1 -POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='2') +POSTHOOK: Output: default@orcfile_merge3a_n0@ds=1 +POSTHOOK: Lineage: orcfile_merge3a_n0 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge3a_n0 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a_n0 PARTITION (ds='2') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@orcfile_merge3a@ds=2 -POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='2') +PREHOOK: Output: default@orcfile_merge3a_n0@ds=2 +POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a_n0 PARTITION (ds='2') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@orcfile_merge3a@ds=2 -POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b - SELECT key, value FROM orcfile_merge3a +POSTHOOK: Output: default@orcfile_merge3a_n0@ds=2 +POSTHOOK: Lineage: orcfile_merge3a_n0 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge3a_n0 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b_n0 + SELECT key, value FROM orcfile_merge3a_n0 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b - SELECT key, value FROM orcfile_merge3a +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b_n0 + SELECT key, value FROM orcfile_merge3a_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -69,7 +69,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: orcfile_merge3a + alias: orcfile_merge3a_n0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) @@ -82,7 +82,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge3b + name: default.orcfile_merge3b_n0 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -127,7 +127,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge3b + name: default.orcfile_merge3b_n0 Stage: Stage-2 Stats Work @@ -135,7 +135,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.orcfile_merge3b + Table: default.orcfile_merge3b_n0 Stage: Stage-3 Merge File Operator @@ -157,71 +157,71 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3b - SELECT key, value FROM orcfile_merge3a +PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3b_n0 + SELECT key, value FROM orcfile_merge3a_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge3a -PREHOOK: Input: default@orcfile_merge3a@ds=1 -PREHOOK: Input: default@orcfile_merge3a@ds=2 -PREHOOK: Output: default@orcfile_merge3b -POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3b - SELECT key, value FROM orcfile_merge3a +PREHOOK: Input: default@orcfile_merge3a_n0 +PREHOOK: Input: default@orcfile_merge3a_n0@ds=1 +PREHOOK: Input: default@orcfile_merge3a_n0@ds=2 +PREHOOK: Output: default@orcfile_merge3b_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3b_n0 + SELECT key, value FROM orcfile_merge3a_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge3a -POSTHOOK: Input: default@orcfile_merge3a@ds=1 -POSTHOOK: Input: default@orcfile_merge3a@ds=2 -POSTHOOK: Output: default@orcfile_merge3b -POSTHOOK: Lineage: orcfile_merge3b.key SIMPLE [(orcfile_merge3a)orcfile_merge3a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: orcfile_merge3b.value SIMPLE [(orcfile_merge3a)orcfile_merge3a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Input: default@orcfile_merge3a_n0 +POSTHOOK: Input: default@orcfile_merge3a_n0@ds=1 +POSTHOOK: Input: default@orcfile_merge3a_n0@ds=2 +POSTHOOK: Output: default@orcfile_merge3b_n0 +POSTHOOK: Lineage: orcfile_merge3b_n0.key SIMPLE [(orcfile_merge3a_n0)orcfile_merge3a_n0.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: orcfile_merge3b_n0.value SIMPLE [(orcfile_merge3a_n0)orcfile_merge3a_n0.FieldSchema(name:value, type:string, comment:null), ] Found 1 items #### A masked pattern was here #### PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c) - FROM orcfile_merge3a + FROM orcfile_merge3a_n0 ) t PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge3a -PREHOOK: Input: default@orcfile_merge3a@ds=1 -PREHOOK: Input: default@orcfile_merge3a@ds=2 +PREHOOK: Input: default@orcfile_merge3a_n0 +PREHOOK: Input: default@orcfile_merge3a_n0@ds=1 +PREHOOK: Input: default@orcfile_merge3a_n0@ds=2 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c) - FROM orcfile_merge3a + FROM orcfile_merge3a_n0 ) t POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge3a -POSTHOOK: Input: default@orcfile_merge3a@ds=1 -POSTHOOK: Input: default@orcfile_merge3a@ds=2 +POSTHOOK: Input: default@orcfile_merge3a_n0 +POSTHOOK: Input: default@orcfile_merge3a_n0@ds=1 +POSTHOOK: Input: default@orcfile_merge3a_n0@ds=2 #### A masked pattern was here #### 14412220296 PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c) - FROM orcfile_merge3b + FROM orcfile_merge3b_n0 ) t PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge3b +PREHOOK: Input: default@orcfile_merge3b_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c) - FROM orcfile_merge3b + FROM orcfile_merge3b_n0 ) t POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge3b +POSTHOOK: Input: default@orcfile_merge3b_n0 #### A masked pattern was here #### 14412220296 -PREHOOK: query: DROP TABLE orcfile_merge3a +PREHOOK: query: DROP TABLE orcfile_merge3a_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@orcfile_merge3a -PREHOOK: Output: default@orcfile_merge3a -POSTHOOK: query: DROP TABLE orcfile_merge3a +PREHOOK: Input: default@orcfile_merge3a_n0 +PREHOOK: Output: default@orcfile_merge3a_n0 +POSTHOOK: query: DROP TABLE orcfile_merge3a_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@orcfile_merge3a -POSTHOOK: Output: default@orcfile_merge3a -PREHOOK: query: DROP TABLE orcfile_merge3b +POSTHOOK: Input: default@orcfile_merge3a_n0 +POSTHOOK: Output: default@orcfile_merge3a_n0 +PREHOOK: query: DROP TABLE orcfile_merge3b_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@orcfile_merge3b -PREHOOK: Output: default@orcfile_merge3b -POSTHOOK: query: DROP TABLE orcfile_merge3b +PREHOOK: Input: default@orcfile_merge3b_n0 +PREHOOK: Output: default@orcfile_merge3b_n0 +POSTHOOK: query: DROP TABLE orcfile_merge3b_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@orcfile_merge3b -POSTHOOK: Output: default@orcfile_merge3b +POSTHOOK: Input: default@orcfile_merge3b_n0 +POSTHOOK: Output: default@orcfile_merge3b_n0 diff --git a/ql/src/test/results/clientpositive/orc_merge5.q.out b/ql/src/test/results/clientpositive/orc_merge5.q.out index 91e89ccbf4..0e87ce6dba 100644 --- a/ql/src/test/results/clientpositive/orc_merge5.q.out +++ b/ql/src/test/results/clientpositive/orc_merge5.q.out @@ -1,30 +1,30 @@ -PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: query: create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: Output: default@orc_merge5_n5 +POSTHOOK: query: create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orc_merge5 -PREHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +POSTHOOK: Output: default@orc_merge5_n5 +PREHOOK: query: create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: Output: default@orc_merge5b_n0 +POSTHOOK: query: create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5 +POSTHOOK: Output: default@orc_merge5b_n0 +PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n5 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5 +PREHOOK: Output: default@orc_merge5_n5 +POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n5 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@orc_merge5 -PREHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: Output: default@orc_merge5_n5 +PREHOOK: query: explain insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -36,7 +36,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: orc_merge5 + alias: orc_merge5_n5 filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -53,7 +53,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orc_merge5b + name: default.orc_merge5b_n0 Select Operator expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp) outputColumnNames: userid, string1, subtype, decimal1, ts @@ -89,7 +89,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orc_merge5b + name: default.orc_merge5b_n0 Stage: Stage-2 Stats Work @@ -97,45 +97,45 @@ STAGE PLANS: Column Stats Desc: Columns: userid, string1, subtype, decimal1, ts Column Types: bigint, string, double, decimal(10,0), timestamp - Table: default.orc_merge5b + Table: default.orc_merge5b_n0 -PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 -PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: Input: default@orc_merge5_n5 +PREHOOK: Output: default@orc_merge5b_n0 +POSTHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 -POSTHOOK: Output: default@orc_merge5b -POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: analyze table orc_merge5b compute statistics noscan +POSTHOOK: Input: default@orc_merge5_n5 +POSTHOOK: Output: default@orc_merge5b_n0 +POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b_n0.string1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b_n0.subtype SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b_n0.ts SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b_n0.userid SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: analyze table orc_merge5b_n0 compute statistics noscan PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5b -PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: analyze table orc_merge5b compute statistics noscan +PREHOOK: Input: default@orc_merge5b_n0 +PREHOOK: Output: default@orc_merge5b_n0 +POSTHOOK: query: analyze table orc_merge5b_n0 compute statistics noscan POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5b -POSTHOOK: Output: default@orc_merge5b +POSTHOOK: Input: default@orc_merge5b_n0 +POSTHOOK: Output: default@orc_merge5b_n0 Found 3 items #### A masked pattern was here #### -PREHOOK: query: select * from orc_merge5b +PREHOOK: query: select * from orc_merge5b_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5b +PREHOOK: Input: default@orc_merge5b_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from orc_merge5b +POSTHOOK: query: select * from orc_merge5b_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5b +POSTHOOK: Input: default@orc_merge5b_n0 #### A masked pattern was here #### 13 bar 80.0 2 1969-12-31 16:00:05 2 foo 0.8 1 1969-12-31 16:00:00 5 eat 0.8 6 1969-12-31 16:00:20 -PREHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: explain insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -152,7 +152,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: orc_merge5 + alias: orc_merge5_n5 filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -169,7 +169,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orc_merge5b + name: default.orc_merge5b_n0 Select Operator expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp) outputColumnNames: userid, string1, subtype, decimal1, ts @@ -214,7 +214,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orc_merge5b + name: default.orc_merge5b_n0 Stage: Stage-2 Stats Work @@ -222,7 +222,7 @@ STAGE PLANS: Column Stats Desc: Columns: userid, string1, subtype, decimal1, ts Column Types: bigint, string, double, decimal(10,0), timestamp - Table: default.orc_merge5b + Table: default.orc_merge5b_n0 Stage: Stage-3 Merge File Operator @@ -244,77 +244,77 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 -PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: Input: default@orc_merge5_n5 +PREHOOK: Output: default@orc_merge5b_n0 +POSTHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 -POSTHOOK: Output: default@orc_merge5b -POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: analyze table orc_merge5b compute statistics noscan +POSTHOOK: Input: default@orc_merge5_n5 +POSTHOOK: Output: default@orc_merge5b_n0 +POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b_n0.string1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b_n0.subtype SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b_n0.ts SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b_n0.userid SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: analyze table orc_merge5b_n0 compute statistics noscan PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5b -PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: analyze table orc_merge5b compute statistics noscan +PREHOOK: Input: default@orc_merge5b_n0 +PREHOOK: Output: default@orc_merge5b_n0 +POSTHOOK: query: analyze table orc_merge5b_n0 compute statistics noscan POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5b -POSTHOOK: Output: default@orc_merge5b +POSTHOOK: Input: default@orc_merge5b_n0 +POSTHOOK: Output: default@orc_merge5b_n0 Found 1 items #### A masked pattern was here #### -PREHOOK: query: select * from orc_merge5b +PREHOOK: query: select * from orc_merge5b_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5b +PREHOOK: Input: default@orc_merge5b_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from orc_merge5b +POSTHOOK: query: select * from orc_merge5b_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5b +POSTHOOK: Input: default@orc_merge5b_n0 #### A masked pattern was here #### 13 bar 80.0 2 1969-12-31 16:00:05 2 foo 0.8 1 1969-12-31 16:00:00 5 eat 0.8 6 1969-12-31 16:00:20 -PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 -PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: Input: default@orc_merge5_n5 +PREHOOK: Output: default@orc_merge5b_n0 +POSTHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 -POSTHOOK: Output: default@orc_merge5b -POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: analyze table orc_merge5b compute statistics noscan +POSTHOOK: Input: default@orc_merge5_n5 +POSTHOOK: Output: default@orc_merge5b_n0 +POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b_n0.string1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b_n0.subtype SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b_n0.ts SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b_n0.userid SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: analyze table orc_merge5b_n0 compute statistics noscan PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5b -PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: analyze table orc_merge5b compute statistics noscan +PREHOOK: Input: default@orc_merge5b_n0 +PREHOOK: Output: default@orc_merge5b_n0 +POSTHOOK: query: analyze table orc_merge5b_n0 compute statistics noscan POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5b -POSTHOOK: Output: default@orc_merge5b +POSTHOOK: Input: default@orc_merge5b_n0 +POSTHOOK: Output: default@orc_merge5b_n0 Found 3 items #### A masked pattern was here #### -PREHOOK: query: select * from orc_merge5b +PREHOOK: query: select * from orc_merge5b_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5b +PREHOOK: Input: default@orc_merge5b_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from orc_merge5b +POSTHOOK: query: select * from orc_merge5b_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5b +POSTHOOK: Input: default@orc_merge5b_n0 #### A masked pattern was here #### 13 bar 80.0 2 1969-12-31 16:00:05 2 foo 0.8 1 1969-12-31 16:00:00 5 eat 0.8 6 1969-12-31 16:00:20 -PREHOOK: query: explain alter table orc_merge5b concatenate +PREHOOK: query: explain alter table orc_merge5b_n0 concatenate PREHOOK: type: ALTER_TABLE_MERGE -POSTHOOK: query: explain alter table orc_merge5b concatenate +POSTHOOK: query: explain alter table orc_merge5b_n0 concatenate POSTHOOK: type: ALTER_TABLE_MERGE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -332,37 +332,37 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orc_merge5b + name: default.orc_merge5b_n0 Stage: Stage-2 Stats Work Basic Stats Work: -PREHOOK: query: alter table orc_merge5b concatenate +PREHOOK: query: alter table orc_merge5b_n0 concatenate PREHOOK: type: ALTER_TABLE_MERGE -PREHOOK: Input: default@orc_merge5b -PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: alter table orc_merge5b concatenate +PREHOOK: Input: default@orc_merge5b_n0 +PREHOOK: Output: default@orc_merge5b_n0 +POSTHOOK: query: alter table orc_merge5b_n0 concatenate POSTHOOK: type: ALTER_TABLE_MERGE -POSTHOOK: Input: default@orc_merge5b -POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: analyze table orc_merge5b compute statistics noscan +POSTHOOK: Input: default@orc_merge5b_n0 +POSTHOOK: Output: default@orc_merge5b_n0 +PREHOOK: query: analyze table orc_merge5b_n0 compute statistics noscan PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5b -PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: analyze table orc_merge5b compute statistics noscan +PREHOOK: Input: default@orc_merge5b_n0 +PREHOOK: Output: default@orc_merge5b_n0 +POSTHOOK: query: analyze table orc_merge5b_n0 compute statistics noscan POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5b -POSTHOOK: Output: default@orc_merge5b +POSTHOOK: Input: default@orc_merge5b_n0 +POSTHOOK: Output: default@orc_merge5b_n0 Found 1 items #### A masked pattern was here #### -PREHOOK: query: select * from orc_merge5b +PREHOOK: query: select * from orc_merge5b_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5b +PREHOOK: Input: default@orc_merge5b_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from orc_merge5b +POSTHOOK: query: select * from orc_merge5b_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5b +POSTHOOK: Input: default@orc_merge5b_n0 #### A masked pattern was here #### 13 bar 80.0 2 1969-12-31 16:00:05 2 foo 0.8 1 1969-12-31 16:00:00 diff --git a/ql/src/test/results/clientpositive/orc_merge6.q.out b/ql/src/test/results/clientpositive/orc_merge6.q.out index 68034dac4c..39813b76f5 100644 --- a/ql/src/test/results/clientpositive/orc_merge6.q.out +++ b/ql/src/test/results/clientpositive/orc_merge6.q.out @@ -1,30 +1,30 @@ -PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: Output: default@orc_merge5_n4 +POSTHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orc_merge5 -PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc +POSTHOOK: Output: default@orc_merge5_n4 +PREHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orc_merge5a -POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc +PREHOOK: Output: default@orc_merge5a_n1 +POSTHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orc_merge5a -PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5 +POSTHOOK: Output: default@orc_merge5a_n1 +PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5 +PREHOOK: Output: default@orc_merge5_n4 +POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@orc_merge5 -PREHOOK: query: explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: Output: default@orc_merge5_n4 +PREHOOK: query: explain insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -36,7 +36,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: orc_merge5 + alias: orc_merge5_n4 filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -53,7 +53,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orc_merge5a + name: default.orc_merge5a_n1 Select Operator expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp), '2000' (type: string), UDFToInteger('24') (type: int) outputColumnNames: userid, string1, subtype, decimal1, ts, year, hour @@ -100,7 +100,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orc_merge5a + name: default.orc_merge5a_n1 Stage: Stage-2 Stats Work @@ -108,77 +108,77 @@ STAGE PLANS: Column Stats Desc: Columns: userid, string1, subtype, decimal1, ts Column Types: bigint, string, double, decimal(10,0), timestamp - Table: default.orc_merge5a + Table: default.orc_merge5a_n1 -PREHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 -PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: Input: default@orc_merge5_n4 +PREHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 -POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: Input: default@orc_merge5_n4 +POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).userid SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 -PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 -POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: Input: default@orc_merge5_n4 +PREHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 +POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 -POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +POSTHOOK: Input: default@orc_merge5_n4 +POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +PREHOOK: Input: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: query: analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 -PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan +POSTHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +PREHOOK: query: analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics noscan PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 -POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan +PREHOOK: Input: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 +POSTHOOK: query: analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics noscan POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 +POSTHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 Found 3 items #### A masked pattern was here #### Found 3 items #### A masked pattern was here #### -PREHOOK: query: show partitions orc_merge5a +PREHOOK: query: show partitions orc_merge5a_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: show partitions orc_merge5a +PREHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: query: show partitions orc_merge5a_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@orc_merge5a +POSTHOOK: Input: default@orc_merge5a_n1 year=2000/hour=24 year=2001/hour=24 -PREHOOK: query: select * from orc_merge5a +PREHOOK: query: select * from orc_merge5a_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5a -PREHOOK: Input: default@orc_merge5a@year=2000/hour=24 -PREHOOK: Input: default@orc_merge5a@year=2001/hour=24 +PREHOOK: Input: default@orc_merge5a_n1 +PREHOOK: Input: default@orc_merge5a_n1@year=2000/hour=24 +PREHOOK: Input: default@orc_merge5a_n1@year=2001/hour=24 #### A masked pattern was here #### -POSTHOOK: query: select * from orc_merge5a +POSTHOOK: query: select * from orc_merge5a_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5a -POSTHOOK: Input: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24 +POSTHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: Input: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: Input: default@orc_merge5a_n1@year=2001/hour=24 #### A masked pattern was here #### 13 bar 80.0 2 1969-12-31 16:00:05 2000 24 13 bar 80.0 2 1969-12-31 16:00:05 2001 24 @@ -186,9 +186,9 @@ POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24 2 foo 0.8 1 1969-12-31 16:00:00 2001 24 5 eat 0.8 6 1969-12-31 16:00:20 2000 24 5 eat 0.8 6 1969-12-31 16:00:20 2001 24 -PREHOOK: query: explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: explain insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -205,7 +205,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: orc_merge5 + alias: orc_merge5_n4 filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -222,7 +222,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orc_merge5a + name: default.orc_merge5a_n1 Select Operator expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp), '2000' (type: string), UDFToInteger('24') (type: int) outputColumnNames: userid, string1, subtype, decimal1, ts, year, hour @@ -278,7 +278,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orc_merge5a + name: default.orc_merge5a_n1 Stage: Stage-2 Stats Work @@ -286,7 +286,7 @@ STAGE PLANS: Column Stats Desc: Columns: userid, string1, subtype, decimal1, ts Column Types: bigint, string, double, decimal(10,0), timestamp - Table: default.orc_merge5a + Table: default.orc_merge5a_n1 Stage: Stage-3 Merge File Operator @@ -308,75 +308,75 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 -PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: Input: default@orc_merge5_n4 +PREHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 -POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: Input: default@orc_merge5_n4 +POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).userid SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 -PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 -POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: Input: default@orc_merge5_n4 +PREHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 +POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 -POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +POSTHOOK: Input: default@orc_merge5_n4 +POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +PREHOOK: Input: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: query: analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 -PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan +POSTHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +PREHOOK: query: analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics noscan PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 -POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan +PREHOOK: Input: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 +POSTHOOK: query: analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics noscan POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 +POSTHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 Found 1 items #### A masked pattern was here #### Found 1 items #### A masked pattern was here #### -PREHOOK: query: show partitions orc_merge5a +PREHOOK: query: show partitions orc_merge5a_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: show partitions orc_merge5a +PREHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: query: show partitions orc_merge5a_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@orc_merge5a +POSTHOOK: Input: default@orc_merge5a_n1 year=2000/hour=24 year=2001/hour=24 -PREHOOK: query: select * from orc_merge5a +PREHOOK: query: select * from orc_merge5a_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5a -PREHOOK: Input: default@orc_merge5a@year=2000/hour=24 -PREHOOK: Input: default@orc_merge5a@year=2001/hour=24 +PREHOOK: Input: default@orc_merge5a_n1 +PREHOOK: Input: default@orc_merge5a_n1@year=2000/hour=24 +PREHOOK: Input: default@orc_merge5a_n1@year=2001/hour=24 #### A masked pattern was here #### -POSTHOOK: query: select * from orc_merge5a +POSTHOOK: query: select * from orc_merge5a_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5a -POSTHOOK: Input: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24 +POSTHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: Input: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: Input: default@orc_merge5a_n1@year=2001/hour=24 #### A masked pattern was here #### 13 bar 80.0 2 1969-12-31 16:00:05 2000 24 13 bar 80.0 2 1969-12-31 16:00:05 2001 24 @@ -384,75 +384,75 @@ POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24 2 foo 0.8 1 1969-12-31 16:00:00 2001 24 5 eat 0.8 6 1969-12-31 16:00:20 2000 24 5 eat 0.8 6 1969-12-31 16:00:20 2001 24 -PREHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 -PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: Input: default@orc_merge5_n4 +PREHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 -POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: Input: default@orc_merge5_n4 +POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).userid SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 -PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 -POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: Input: default@orc_merge5_n4 +PREHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 +POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 -POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +POSTHOOK: Input: default@orc_merge5_n4 +POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +PREHOOK: Input: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: query: analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 -PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan +POSTHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +PREHOOK: query: analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics noscan PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 -POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan +PREHOOK: Input: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 +POSTHOOK: query: analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics noscan POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 +POSTHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 Found 3 items #### A masked pattern was here #### Found 3 items #### A masked pattern was here #### -PREHOOK: query: show partitions orc_merge5a +PREHOOK: query: show partitions orc_merge5a_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: show partitions orc_merge5a +PREHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: query: show partitions orc_merge5a_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@orc_merge5a +POSTHOOK: Input: default@orc_merge5a_n1 year=2000/hour=24 year=2001/hour=24 -PREHOOK: query: select * from orc_merge5a +PREHOOK: query: select * from orc_merge5a_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5a -PREHOOK: Input: default@orc_merge5a@year=2000/hour=24 -PREHOOK: Input: default@orc_merge5a@year=2001/hour=24 +PREHOOK: Input: default@orc_merge5a_n1 +PREHOOK: Input: default@orc_merge5a_n1@year=2000/hour=24 +PREHOOK: Input: default@orc_merge5a_n1@year=2001/hour=24 #### A masked pattern was here #### -POSTHOOK: query: select * from orc_merge5a +POSTHOOK: query: select * from orc_merge5a_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5a -POSTHOOK: Input: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24 +POSTHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: Input: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: Input: default@orc_merge5a_n1@year=2001/hour=24 #### A masked pattern was here #### 13 bar 80.0 2 1969-12-31 16:00:05 2000 24 13 bar 80.0 2 1969-12-31 16:00:05 2001 24 @@ -460,9 +460,9 @@ POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24 2 foo 0.8 1 1969-12-31 16:00:00 2001 24 5 eat 0.8 6 1969-12-31 16:00:20 2000 24 5 eat 0.8 6 1969-12-31 16:00:20 2001 24 -PREHOOK: query: explain alter table orc_merge5a partition(year="2000",hour=24) concatenate +PREHOOK: query: explain alter table orc_merge5a_n1 partition(year="2000",hour=24) concatenate PREHOOK: type: ALTER_PARTITION_MERGE -POSTHOOK: query: explain alter table orc_merge5a partition(year="2000",hour=24) concatenate +POSTHOOK: query: explain alter table orc_merge5a_n1 partition(year="2000",hour=24) concatenate POSTHOOK: type: ALTER_PARTITION_MERGE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -483,71 +483,71 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orc_merge5a + name: default.orc_merge5a_n1 Stage: Stage-2 Stats Work Basic Stats Work: -PREHOOK: query: alter table orc_merge5a partition(year="2000",hour=24) concatenate +PREHOOK: query: alter table orc_merge5a_n1 partition(year="2000",hour=24) concatenate PREHOOK: type: ALTER_PARTITION_MERGE -PREHOOK: Input: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: query: alter table orc_merge5a partition(year="2000",hour=24) concatenate +PREHOOK: Input: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: query: alter table orc_merge5a_n1 partition(year="2000",hour=24) concatenate POSTHOOK: type: ALTER_PARTITION_MERGE -POSTHOOK: Input: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 -PREHOOK: query: alter table orc_merge5a partition(year="2001",hour=24) concatenate +POSTHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +PREHOOK: query: alter table orc_merge5a_n1 partition(year="2001",hour=24) concatenate PREHOOK: type: ALTER_PARTITION_MERGE -PREHOOK: Input: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 -POSTHOOK: query: alter table orc_merge5a partition(year="2001",hour=24) concatenate +PREHOOK: Input: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 +POSTHOOK: query: alter table orc_merge5a_n1 partition(year="2001",hour=24) concatenate POSTHOOK: type: ALTER_PARTITION_MERGE -POSTHOOK: Input: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 -PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +POSTHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 +PREHOOK: query: analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +PREHOOK: Input: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: query: analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 -PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan +POSTHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24 +PREHOOK: query: analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics noscan PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a -PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 -POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan +PREHOOK: Input: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1 +PREHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 +POSTHOOK: query: analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics noscan POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a -POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 +POSTHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1 +POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24 Found 1 items #### A masked pattern was here #### Found 1 items #### A masked pattern was here #### -PREHOOK: query: show partitions orc_merge5a +PREHOOK: query: show partitions orc_merge5a_n1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@orc_merge5a -POSTHOOK: query: show partitions orc_merge5a +PREHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: query: show partitions orc_merge5a_n1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@orc_merge5a +POSTHOOK: Input: default@orc_merge5a_n1 year=2000/hour=24 year=2001/hour=24 -PREHOOK: query: select * from orc_merge5a +PREHOOK: query: select * from orc_merge5a_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5a -PREHOOK: Input: default@orc_merge5a@year=2000/hour=24 -PREHOOK: Input: default@orc_merge5a@year=2001/hour=24 +PREHOOK: Input: default@orc_merge5a_n1 +PREHOOK: Input: default@orc_merge5a_n1@year=2000/hour=24 +PREHOOK: Input: default@orc_merge5a_n1@year=2001/hour=24 #### A masked pattern was here #### -POSTHOOK: query: select * from orc_merge5a +POSTHOOK: query: select * from orc_merge5a_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5a -POSTHOOK: Input: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24 +POSTHOOK: Input: default@orc_merge5a_n1 +POSTHOOK: Input: default@orc_merge5a_n1@year=2000/hour=24 +POSTHOOK: Input: default@orc_merge5a_n1@year=2001/hour=24 #### A masked pattern was here #### 13 bar 80.0 2 1969-12-31 16:00:05 2000 24 13 bar 80.0 2 1969-12-31 16:00:05 2001 24 diff --git a/ql/src/test/results/clientpositive/orc_merge8.q.out b/ql/src/test/results/clientpositive/orc_merge8.q.out index f4f4b4a547..ed3b054cfb 100644 --- a/ql/src/test/results/clientpositive/orc_merge8.q.out +++ b/ql/src/test/results/clientpositive/orc_merge8.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table if not exists alltypes ( +PREHOOK: query: create table if not exists alltypes_n1 ( bo boolean, ti tinyint, si smallint, @@ -20,8 +20,8 @@ collection items terminated by ',' map keys terminated by ':' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@alltypes -POSTHOOK: query: create table if not exists alltypes ( +PREHOOK: Output: default@alltypes_n1 +POSTHOOK: query: create table if not exists alltypes_n1 ( bo boolean, ti tinyint, si smallint, @@ -43,88 +43,88 @@ collection items terminated by ',' map keys terminated by ':' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@alltypes -PREHOOK: query: create table alltypes_orc like alltypes +POSTHOOK: Output: default@alltypes_n1 +PREHOOK: query: create table alltypes_orc_n1 like alltypes_n1 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@alltypes_orc -POSTHOOK: query: create table alltypes_orc like alltypes +PREHOOK: Output: default@alltypes_orc_n1 +POSTHOOK: query: create table alltypes_orc_n1 like alltypes_n1 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@alltypes_orc -PREHOOK: query: alter table alltypes_orc set fileformat orc +POSTHOOK: Output: default@alltypes_orc_n1 +PREHOOK: query: alter table alltypes_orc_n1 set fileformat orc PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@alltypes_orc -PREHOOK: Output: default@alltypes_orc -POSTHOOK: query: alter table alltypes_orc set fileformat orc +PREHOOK: Input: default@alltypes_orc_n1 +PREHOOK: Output: default@alltypes_orc_n1 +POSTHOOK: query: alter table alltypes_orc_n1 set fileformat orc POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@alltypes_orc -POSTHOOK: Output: default@alltypes_orc -PREHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes +POSTHOOK: Input: default@alltypes_orc_n1 +POSTHOOK: Output: default@alltypes_orc_n1 +PREHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@alltypes -POSTHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes +PREHOOK: Output: default@alltypes_n1 +POSTHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@alltypes -PREHOOK: query: insert overwrite table alltypes_orc select * from alltypes +POSTHOOK: Output: default@alltypes_n1 +PREHOOK: query: insert overwrite table alltypes_orc_n1 select * from alltypes_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@alltypes -PREHOOK: Output: default@alltypes_orc -POSTHOOK: query: insert overwrite table alltypes_orc select * from alltypes +PREHOOK: Input: default@alltypes_n1 +PREHOOK: Output: default@alltypes_orc_n1 +POSTHOOK: query: insert overwrite table alltypes_orc_n1 select * from alltypes_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alltypes -POSTHOOK: Output: default@alltypes_orc -POSTHOOK: Lineage: alltypes_orc.bi SIMPLE [(alltypes)alltypes.FieldSchema(name:bi, type:bigint, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.bo SIMPLE [(alltypes)alltypes.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.c SIMPLE [(alltypes)alltypes.FieldSchema(name:c, type:char(5), comment:null), ] -POSTHOOK: Lineage: alltypes_orc.d SIMPLE [(alltypes)alltypes.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.da SIMPLE [(alltypes)alltypes.FieldSchema(name:da, type:date, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.de SIMPLE [(alltypes)alltypes.FieldSchema(name:de, type:decimal(10,3), comment:null), ] -POSTHOOK: Lineage: alltypes_orc.f SIMPLE [(alltypes)alltypes.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.i SIMPLE [(alltypes)alltypes.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.l SIMPLE [(alltypes)alltypes.FieldSchema(name:l, type:array, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.m SIMPLE [(alltypes)alltypes.FieldSchema(name:m, type:map, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.s SIMPLE [(alltypes)alltypes.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.si SIMPLE [(alltypes)alltypes.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.st SIMPLE [(alltypes)alltypes.FieldSchema(name:st, type:struct, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.ti SIMPLE [(alltypes)alltypes.FieldSchema(name:ti, type:tinyint, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.ts SIMPLE [(alltypes)alltypes.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.vc SIMPLE [(alltypes)alltypes.FieldSchema(name:vc, type:varchar(5), comment:null), ] -PREHOOK: query: insert into table alltypes_orc select * from alltypes +POSTHOOK: Input: default@alltypes_n1 +POSTHOOK: Output: default@alltypes_orc_n1 +POSTHOOK: Lineage: alltypes_orc_n1.bi SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:bi, type:bigint, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.bo SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.c SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:c, type:char(5), comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.d SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.da SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:da, type:date, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.de SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:de, type:decimal(10,3), comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.f SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.i SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.l SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:l, type:array, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.m SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:m, type:map, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.s SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.si SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.st SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:st, type:struct, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.ti SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:ti, type:tinyint, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.ts SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.vc SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:vc, type:varchar(5), comment:null), ] +PREHOOK: query: insert into table alltypes_orc_n1 select * from alltypes_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@alltypes -PREHOOK: Output: default@alltypes_orc -POSTHOOK: query: insert into table alltypes_orc select * from alltypes +PREHOOK: Input: default@alltypes_n1 +PREHOOK: Output: default@alltypes_orc_n1 +POSTHOOK: query: insert into table alltypes_orc_n1 select * from alltypes_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alltypes -POSTHOOK: Output: default@alltypes_orc -POSTHOOK: Lineage: alltypes_orc.bi SIMPLE [(alltypes)alltypes.FieldSchema(name:bi, type:bigint, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.bo SIMPLE [(alltypes)alltypes.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.c SIMPLE [(alltypes)alltypes.FieldSchema(name:c, type:char(5), comment:null), ] -POSTHOOK: Lineage: alltypes_orc.d SIMPLE [(alltypes)alltypes.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.da SIMPLE [(alltypes)alltypes.FieldSchema(name:da, type:date, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.de SIMPLE [(alltypes)alltypes.FieldSchema(name:de, type:decimal(10,3), comment:null), ] -POSTHOOK: Lineage: alltypes_orc.f SIMPLE [(alltypes)alltypes.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.i SIMPLE [(alltypes)alltypes.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.l SIMPLE [(alltypes)alltypes.FieldSchema(name:l, type:array, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.m SIMPLE [(alltypes)alltypes.FieldSchema(name:m, type:map, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.s SIMPLE [(alltypes)alltypes.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.si SIMPLE [(alltypes)alltypes.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.st SIMPLE [(alltypes)alltypes.FieldSchema(name:st, type:struct, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.ti SIMPLE [(alltypes)alltypes.FieldSchema(name:ti, type:tinyint, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.ts SIMPLE [(alltypes)alltypes.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.vc SIMPLE [(alltypes)alltypes.FieldSchema(name:vc, type:varchar(5), comment:null), ] +POSTHOOK: Input: default@alltypes_n1 +POSTHOOK: Output: default@alltypes_orc_n1 +POSTHOOK: Lineage: alltypes_orc_n1.bi SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:bi, type:bigint, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.bo SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.c SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:c, type:char(5), comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.d SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.da SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:da, type:date, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.de SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:de, type:decimal(10,3), comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.f SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.i SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.l SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:l, type:array, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.m SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:m, type:map, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.s SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.si SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.st SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:st, type:struct, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.ti SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:ti, type:tinyint, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.ts SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n1.vc SIMPLE [(alltypes_n1)alltypes_n1.FieldSchema(name:vc, type:varchar(5), comment:null), ] Found 2 items #### A masked pattern was here #### -PREHOOK: query: alter table alltypes_orc concatenate +PREHOOK: query: alter table alltypes_orc_n1 concatenate PREHOOK: type: ALTER_TABLE_MERGE -PREHOOK: Input: default@alltypes_orc -PREHOOK: Output: default@alltypes_orc -POSTHOOK: query: alter table alltypes_orc concatenate +PREHOOK: Input: default@alltypes_orc_n1 +PREHOOK: Output: default@alltypes_orc_n1 +POSTHOOK: query: alter table alltypes_orc_n1 concatenate POSTHOOK: type: ALTER_TABLE_MERGE -POSTHOOK: Input: default@alltypes_orc -POSTHOOK: Output: default@alltypes_orc +POSTHOOK: Input: default@alltypes_orc_n1 +POSTHOOK: Output: default@alltypes_orc_n1 Found 1 items #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out b/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out index cad6c00a8e..2b4aec3cdc 100644 --- a/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out +++ b/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out @@ -1,52 +1,52 @@ -PREHOOK: query: DROP TABLE orcfile_merge1 +PREHOOK: query: DROP TABLE orcfile_merge1_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE orcfile_merge1 +POSTHOOK: query: DROP TABLE orcfile_merge1_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE orcfile_merge1b +PREHOOK: query: DROP TABLE orcfile_merge1b_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE orcfile_merge1b +POSTHOOK: query: DROP TABLE orcfile_merge1b_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE orcfile_merge1c +PREHOOK: query: DROP TABLE orcfile_merge1c_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE orcfile_merge1c +POSTHOOK: query: DROP TABLE orcfile_merge1c_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE orcfile_merge1 (key INT, value STRING) +PREHOOK: query: CREATE TABLE orcfile_merge1_n0 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orcfile_merge1 -POSTHOOK: query: CREATE TABLE orcfile_merge1 (key INT, value STRING) +PREHOOK: Output: default@orcfile_merge1_n0 +POSTHOOK: query: CREATE TABLE orcfile_merge1_n0 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orcfile_merge1 -PREHOOK: query: CREATE TABLE orcfile_merge1b (key INT, value STRING) +POSTHOOK: Output: default@orcfile_merge1_n0 +PREHOOK: query: CREATE TABLE orcfile_merge1b_n0 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orcfile_merge1b -POSTHOOK: query: CREATE TABLE orcfile_merge1b (key INT, value STRING) +PREHOOK: Output: default@orcfile_merge1b_n0 +POSTHOOK: query: CREATE TABLE orcfile_merge1b_n0 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orcfile_merge1b -PREHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING) +POSTHOOK: Output: default@orcfile_merge1b_n0 +PREHOOK: query: CREATE TABLE orcfile_merge1c_n0 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orcfile_merge1c -POSTHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING) +PREHOOK: Output: default@orcfile_merge1c_n0 +POSTHOOK: query: CREATE TABLE orcfile_merge1c_n0 (key INT, value STRING) PARTITIONED BY (ds STRING, part STRING) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orcfile_merge1c +POSTHOOK: Output: default@orcfile_merge1c_n0 PREHOOK: query: EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src POSTHOOK: type: QUERY @@ -73,7 +73,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1 + name: default.orcfile_merge1_n0 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), UDFToString(_col2) (type: string) outputColumnNames: key, value, ds, part @@ -120,7 +120,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1 + name: default.orcfile_merge1_n0 Stage: Stage-2 Stats Work @@ -128,34 +128,34 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.orcfile_merge1 + Table: default.orcfile_merge1_n0 -PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) +PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@orcfile_merge1@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) +PREHOOK: Output: default@orcfile_merge1_n0@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@orcfile_merge1@ds=1/part=0 -POSTHOOK: Output: default@orcfile_merge1@ds=1/part=1 -POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@orcfile_merge1_n0@ds=1/part=0 +POSTHOOK: Output: default@orcfile_merge1_n0@ds=1/part=1 +POSTHOOK: Lineage: orcfile_merge1_n0 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1_n0 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1_n0 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1_n0 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 2 items #### A masked pattern was here #### PREHOOK: query: EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1b_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1b_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src POSTHOOK: type: QUERY @@ -187,7 +187,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1b + name: default.orcfile_merge1b_n0 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), UDFToString(_col2) (type: string) outputColumnNames: key, value, ds, part @@ -243,7 +243,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1b + name: default.orcfile_merge1b_n0 Stage: Stage-2 Stats Work @@ -251,7 +251,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.orcfile_merge1b + Table: default.orcfile_merge1b_n0 Stage: Stage-3 Map Reduce @@ -263,7 +263,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1b + name: default.orcfile_merge1b_n0 Stage: Stage-5 Map Reduce @@ -275,7 +275,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1b + name: default.orcfile_merge1b_n0 Stage: Stage-6 Move Operator @@ -283,32 +283,32 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) +PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@orcfile_merge1b@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) +PREHOOK: Output: default@orcfile_merge1b_n0@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@orcfile_merge1b@ds=1/part=0 -POSTHOOK: Output: default@orcfile_merge1b@ds=1/part=1 -POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@orcfile_merge1b_n0@ds=1/part=0 +POSTHOOK: Output: default@orcfile_merge1b_n0@ds=1/part=1 +POSTHOOK: Lineage: orcfile_merge1b_n0 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1b_n0 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1b_n0 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1b_n0 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### PREHOOK: query: EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1c_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN - INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) + INSERT OVERWRITE TABLE orcfile_merge1c_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src POSTHOOK: type: QUERY @@ -340,7 +340,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1c + name: default.orcfile_merge1c_n0 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), UDFToString(_col2) (type: string) outputColumnNames: key, value, ds, part @@ -396,7 +396,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge1c + name: default.orcfile_merge1c_n0 Stage: Stage-2 Stats Work @@ -404,7 +404,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.orcfile_merge1c + Table: default.orcfile_merge1c_n0 Stage: Stage-3 Merge File Operator @@ -426,142 +426,142 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) +PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@orcfile_merge1c@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) +PREHOOK: Output: default@orcfile_merge1c_n0@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c_n0 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@orcfile_merge1c@ds=1/part=0 -POSTHOOK: Output: default@orcfile_merge1c@ds=1/part=1 -POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@orcfile_merge1c_n0@ds=1/part=0 +POSTHOOK: Output: default@orcfile_merge1c_n0@ds=1/part=1 +POSTHOOK: Lineage: orcfile_merge1c_n0 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1c_n0 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1c_n0 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: orcfile_merge1c_n0 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1 WHERE ds='1' + FROM orcfile_merge1_n0 WHERE ds='1' ) t PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1 -PREHOOK: Input: default@orcfile_merge1@ds=1/part=0 -PREHOOK: Input: default@orcfile_merge1@ds=1/part=1 +PREHOOK: Input: default@orcfile_merge1_n0 +PREHOOK: Input: default@orcfile_merge1_n0@ds=1/part=0 +PREHOOK: Input: default@orcfile_merge1_n0@ds=1/part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1 WHERE ds='1' + FROM orcfile_merge1_n0 WHERE ds='1' ) t POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge1 -POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0 -POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1 +POSTHOOK: Input: default@orcfile_merge1_n0 +POSTHOOK: Input: default@orcfile_merge1_n0@ds=1/part=0 +POSTHOOK: Input: default@orcfile_merge1_n0@ds=1/part=1 #### A masked pattern was here #### -21975308766 PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1b WHERE ds='1' + FROM orcfile_merge1b_n0 WHERE ds='1' ) t PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1b -PREHOOK: Input: default@orcfile_merge1b@ds=1/part=0 -PREHOOK: Input: default@orcfile_merge1b@ds=1/part=1 +PREHOOK: Input: default@orcfile_merge1b_n0 +PREHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=0 +PREHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1b WHERE ds='1' + FROM orcfile_merge1b_n0 WHERE ds='1' ) t POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge1b -POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=0 -POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=1 +POSTHOOK: Input: default@orcfile_merge1b_n0 +POSTHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=0 +POSTHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=1 #### A masked pattern was here #### -21975308766 PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1c WHERE ds='1' + FROM orcfile_merge1c_n0 WHERE ds='1' ) t PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1c -PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0 -PREHOOK: Input: default@orcfile_merge1c@ds=1/part=1 +PREHOOK: Input: default@orcfile_merge1c_n0 +PREHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=0 +PREHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) - FROM orcfile_merge1c WHERE ds='1' + FROM orcfile_merge1c_n0 WHERE ds='1' ) t POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge1c -POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=0 -POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=1 +POSTHOOK: Input: default@orcfile_merge1c_n0 +POSTHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=0 +POSTHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=1 #### A masked pattern was here #### -21975308766 -PREHOOK: query: select count(*) from orcfile_merge1 +PREHOOK: query: select count(*) from orcfile_merge1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1 -PREHOOK: Input: default@orcfile_merge1@ds=1/part=0 -PREHOOK: Input: default@orcfile_merge1@ds=1/part=1 +PREHOOK: Input: default@orcfile_merge1_n0 +PREHOOK: Input: default@orcfile_merge1_n0@ds=1/part=0 +PREHOOK: Input: default@orcfile_merge1_n0@ds=1/part=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from orcfile_merge1 +POSTHOOK: query: select count(*) from orcfile_merge1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge1 -POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0 -POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1 +POSTHOOK: Input: default@orcfile_merge1_n0 +POSTHOOK: Input: default@orcfile_merge1_n0@ds=1/part=0 +POSTHOOK: Input: default@orcfile_merge1_n0@ds=1/part=1 #### A masked pattern was here #### 500 -PREHOOK: query: select count(*) from orcfile_merge1b +PREHOOK: query: select count(*) from orcfile_merge1b_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1b -PREHOOK: Input: default@orcfile_merge1b@ds=1/part=0 -PREHOOK: Input: default@orcfile_merge1b@ds=1/part=1 +PREHOOK: Input: default@orcfile_merge1b_n0 +PREHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=0 +PREHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from orcfile_merge1b +POSTHOOK: query: select count(*) from orcfile_merge1b_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge1b -POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=0 -POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=1 +POSTHOOK: Input: default@orcfile_merge1b_n0 +POSTHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=0 +POSTHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=1 #### A masked pattern was here #### 500 -PREHOOK: query: select count(*) from orcfile_merge1c +PREHOOK: query: select count(*) from orcfile_merge1c_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@orcfile_merge1c -PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0 -PREHOOK: Input: default@orcfile_merge1c@ds=1/part=1 +PREHOOK: Input: default@orcfile_merge1c_n0 +PREHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=0 +PREHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from orcfile_merge1c +POSTHOOK: query: select count(*) from orcfile_merge1c_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orcfile_merge1c -POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=0 -POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=1 +POSTHOOK: Input: default@orcfile_merge1c_n0 +POSTHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=0 +POSTHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=1 #### A masked pattern was here #### 500 -PREHOOK: query: DROP TABLE orcfile_merge1 +PREHOOK: query: DROP TABLE orcfile_merge1_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@orcfile_merge1 -PREHOOK: Output: default@orcfile_merge1 -POSTHOOK: query: DROP TABLE orcfile_merge1 +PREHOOK: Input: default@orcfile_merge1_n0 +PREHOOK: Output: default@orcfile_merge1_n0 +POSTHOOK: query: DROP TABLE orcfile_merge1_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@orcfile_merge1 -POSTHOOK: Output: default@orcfile_merge1 -PREHOOK: query: DROP TABLE orcfile_merge1b +POSTHOOK: Input: default@orcfile_merge1_n0 +POSTHOOK: Output: default@orcfile_merge1_n0 +PREHOOK: query: DROP TABLE orcfile_merge1b_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@orcfile_merge1b -PREHOOK: Output: default@orcfile_merge1b -POSTHOOK: query: DROP TABLE orcfile_merge1b +PREHOOK: Input: default@orcfile_merge1b_n0 +PREHOOK: Output: default@orcfile_merge1b_n0 +POSTHOOK: query: DROP TABLE orcfile_merge1b_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@orcfile_merge1b -POSTHOOK: Output: default@orcfile_merge1b -PREHOOK: query: DROP TABLE orcfile_merge1c +POSTHOOK: Input: default@orcfile_merge1b_n0 +POSTHOOK: Output: default@orcfile_merge1b_n0 +PREHOOK: query: DROP TABLE orcfile_merge1c_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@orcfile_merge1c -PREHOOK: Output: default@orcfile_merge1c -POSTHOOK: query: DROP TABLE orcfile_merge1c +PREHOOK: Input: default@orcfile_merge1c_n0 +PREHOOK: Output: default@orcfile_merge1c_n0 +POSTHOOK: query: DROP TABLE orcfile_merge1c_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@orcfile_merge1c -POSTHOOK: Output: default@orcfile_merge1c +POSTHOOK: Input: default@orcfile_merge1c_n0 +POSTHOOK: Output: default@orcfile_merge1c_n0 diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out index d9cb7877d3..5a1b00b4d1 100644 --- a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out +++ b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: query: create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: Output: default@orc_merge5_n3 +POSTHOOK: query: create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orc_merge5 +POSTHOOK: Output: default@orc_merge5_n3 PREHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -14,17 +14,17 @@ POSTHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtyp POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5 +PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5 +PREHOOK: Output: default@orc_merge5_n3 +POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@orc_merge5 -PREHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: Output: default@orc_merge5_n3 +PREHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -36,7 +36,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: orc_merge5 + alias: orc_merge5_n3 Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (userid <= 13L) (type: boolean) @@ -98,84 +98,84 @@ STAGE PLANS: Column Types: bigint, string, double, decimal(10,0), timestamp Table: default.orc_merge5b -PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 +PREHOOK: Input: default@orc_merge5_n3 PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Input: default@orc_merge5_n3 POSTHOOK: Output: default@orc_merge5b -POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 +PREHOOK: Input: default@orc_merge5_n3 PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Input: default@orc_merge5_n3 POSTHOOK: Output: default@orc_merge5b -POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 +PREHOOK: Input: default@orc_merge5_n3 PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Input: default@orc_merge5_n3 POSTHOOK: Output: default@orc_merge5b -POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 +PREHOOK: Input: default@orc_merge5_n3 PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Input: default@orc_merge5_n3 POSTHOOK: Output: default@orc_merge5b -POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 +PREHOOK: Input: default@orc_merge5_n3 PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Input: default@orc_merge5_n3 POSTHOOK: Output: default@orc_merge5b -POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_merge5 +PREHOOK: Input: default@orc_merge5_n3 PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Input: default@orc_merge5_n3 POSTHOOK: Output: default@orc_merge5b -POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:userid, type:bigint, comment:null), ] PREHOOK: query: analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat_schema.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat_schema.q.out index c11bac431f..5e76eaae10 100644 --- a/ql/src/test/results/clientpositive/orc_merge_incompat_schema.q.out +++ b/ql/src/test/results/clientpositive/orc_merge_incompat_schema.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE orc_create_staging ( +PREHOOK: query: CREATE TABLE orc_create_staging_n2 ( str STRING, mp MAP, lst ARRAY, @@ -9,8 +9,8 @@ PREHOOK: query: CREATE TABLE orc_create_staging ( MAP KEYS TERMINATED BY ':' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orc_create_staging -POSTHOOK: query: CREATE TABLE orc_create_staging ( +PREHOOK: Output: default@orc_create_staging_n2 +POSTHOOK: query: CREATE TABLE orc_create_staging_n2 ( str STRING, mp MAP, lst ARRAY, @@ -21,16 +21,16 @@ POSTHOOK: query: CREATE TABLE orc_create_staging ( MAP KEYS TERMINATED BY ':' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orc_create_staging -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging +POSTHOOK: Output: default@orc_create_staging_n2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@orc_create_staging -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging +PREHOOK: Output: default@orc_create_staging_n2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@orc_create_staging -PREHOOK: query: CREATE TABLE orc_create_complex ( +POSTHOOK: Output: default@orc_create_staging_n2 +PREHOOK: query: CREATE TABLE orc_create_complex_n2 ( str STRING, mp MAP, lst ARRAY, @@ -39,8 +39,8 @@ PREHOOK: query: CREATE TABLE orc_create_complex ( ) STORED AS ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="1000", "orc.compress.size"="10000") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orc_create_complex -POSTHOOK: query: CREATE TABLE orc_create_complex ( +PREHOOK: Output: default@orc_create_complex_n2 +POSTHOOK: query: CREATE TABLE orc_create_complex_n2 ( str STRING, mp MAP, lst ARRAY, @@ -49,113 +49,113 @@ POSTHOOK: query: CREATE TABLE orc_create_complex ( ) STORED AS ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="1000", "orc.compress.size"="10000") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orc_create_complex -PREHOOK: query: INSERT OVERWRITE TABLE orc_create_complex SELECT str,mp,lst,strct,0 FROM orc_create_staging +POSTHOOK: Output: default@orc_create_complex_n2 +PREHOOK: query: INSERT OVERWRITE TABLE orc_create_complex_n2 SELECT str,mp,lst,strct,0 FROM orc_create_staging_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_create_staging -PREHOOK: Output: default@orc_create_complex -POSTHOOK: query: INSERT OVERWRITE TABLE orc_create_complex SELECT str,mp,lst,strct,0 FROM orc_create_staging +PREHOOK: Input: default@orc_create_staging_n2 +PREHOOK: Output: default@orc_create_complex_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE orc_create_complex_n2 SELECT str,mp,lst,strct,0 FROM orc_create_staging_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_create_staging -POSTHOOK: Output: default@orc_create_complex -POSTHOOK: Lineage: orc_create_complex.lst SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:lst, type:array, comment:null), ] -POSTHOOK: Lineage: orc_create_complex.mp SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:mp, type:map, comment:null), ] -POSTHOOK: Lineage: orc_create_complex.str SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:str, type:string, comment:null), ] -POSTHOOK: Lineage: orc_create_complex.strct SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:strct, type:struct, comment:null), ] -POSTHOOK: Lineage: orc_create_complex.val SIMPLE [] -PREHOOK: query: INSERT INTO TABLE orc_create_complex SELECT str,mp,lst,strct,0 FROM orc_create_staging +POSTHOOK: Input: default@orc_create_staging_n2 +POSTHOOK: Output: default@orc_create_complex_n2 +POSTHOOK: Lineage: orc_create_complex_n2.lst SIMPLE [(orc_create_staging_n2)orc_create_staging_n2.FieldSchema(name:lst, type:array, comment:null), ] +POSTHOOK: Lineage: orc_create_complex_n2.mp SIMPLE [(orc_create_staging_n2)orc_create_staging_n2.FieldSchema(name:mp, type:map, comment:null), ] +POSTHOOK: Lineage: orc_create_complex_n2.str SIMPLE [(orc_create_staging_n2)orc_create_staging_n2.FieldSchema(name:str, type:string, comment:null), ] +POSTHOOK: Lineage: orc_create_complex_n2.strct SIMPLE [(orc_create_staging_n2)orc_create_staging_n2.FieldSchema(name:strct, type:struct, comment:null), ] +POSTHOOK: Lineage: orc_create_complex_n2.val SIMPLE [] +PREHOOK: query: INSERT INTO TABLE orc_create_complex_n2 SELECT str,mp,lst,strct,0 FROM orc_create_staging_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_create_staging -PREHOOK: Output: default@orc_create_complex -POSTHOOK: query: INSERT INTO TABLE orc_create_complex SELECT str,mp,lst,strct,0 FROM orc_create_staging +PREHOOK: Input: default@orc_create_staging_n2 +PREHOOK: Output: default@orc_create_complex_n2 +POSTHOOK: query: INSERT INTO TABLE orc_create_complex_n2 SELECT str,mp,lst,strct,0 FROM orc_create_staging_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_create_staging -POSTHOOK: Output: default@orc_create_complex -POSTHOOK: Lineage: orc_create_complex.lst SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:lst, type:array, comment:null), ] -POSTHOOK: Lineage: orc_create_complex.mp SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:mp, type:map, comment:null), ] -POSTHOOK: Lineage: orc_create_complex.str SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:str, type:string, comment:null), ] -POSTHOOK: Lineage: orc_create_complex.strct SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:strct, type:struct, comment:null), ] -POSTHOOK: Lineage: orc_create_complex.val SIMPLE [] +POSTHOOK: Input: default@orc_create_staging_n2 +POSTHOOK: Output: default@orc_create_complex_n2 +POSTHOOK: Lineage: orc_create_complex_n2.lst SIMPLE [(orc_create_staging_n2)orc_create_staging_n2.FieldSchema(name:lst, type:array, comment:null), ] +POSTHOOK: Lineage: orc_create_complex_n2.mp SIMPLE [(orc_create_staging_n2)orc_create_staging_n2.FieldSchema(name:mp, type:map, comment:null), ] +POSTHOOK: Lineage: orc_create_complex_n2.str SIMPLE [(orc_create_staging_n2)orc_create_staging_n2.FieldSchema(name:str, type:string, comment:null), ] +POSTHOOK: Lineage: orc_create_complex_n2.strct SIMPLE [(orc_create_staging_n2)orc_create_staging_n2.FieldSchema(name:strct, type:struct, comment:null), ] +POSTHOOK: Lineage: orc_create_complex_n2.val SIMPLE [] Found 2 items #### A masked pattern was here #### -PREHOOK: query: select sum(hash(*)) from orc_create_complex +PREHOOK: query: select sum(hash(*)) from orc_create_complex_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_create_complex +PREHOOK: Input: default@orc_create_complex_n2 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(*)) from orc_create_complex +POSTHOOK: query: select sum(hash(*)) from orc_create_complex_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_create_complex +POSTHOOK: Input: default@orc_create_complex_n2 #### A masked pattern was here #### 953053114 -PREHOOK: query: ALTER TABLE orc_create_complex CONCATENATE +PREHOOK: query: ALTER TABLE orc_create_complex_n2 CONCATENATE PREHOOK: type: ALTER_TABLE_MERGE -PREHOOK: Input: default@orc_create_complex -PREHOOK: Output: default@orc_create_complex -POSTHOOK: query: ALTER TABLE orc_create_complex CONCATENATE +PREHOOK: Input: default@orc_create_complex_n2 +PREHOOK: Output: default@orc_create_complex_n2 +POSTHOOK: query: ALTER TABLE orc_create_complex_n2 CONCATENATE POSTHOOK: type: ALTER_TABLE_MERGE -POSTHOOK: Input: default@orc_create_complex -POSTHOOK: Output: default@orc_create_complex +POSTHOOK: Input: default@orc_create_complex_n2 +POSTHOOK: Output: default@orc_create_complex_n2 Found 1 items #### A masked pattern was here #### -PREHOOK: query: select sum(hash(*)) from orc_create_complex +PREHOOK: query: select sum(hash(*)) from orc_create_complex_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_create_complex +PREHOOK: Input: default@orc_create_complex_n2 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(*)) from orc_create_complex +POSTHOOK: query: select sum(hash(*)) from orc_create_complex_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_create_complex +POSTHOOK: Input: default@orc_create_complex_n2 #### A masked pattern was here #### 953053114 -PREHOOK: query: ALTER TABLE orc_create_complex +PREHOOK: query: ALTER TABLE orc_create_complex_n2 CHANGE COLUMN strct strct STRUCT PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@orc_create_complex -PREHOOK: Output: default@orc_create_complex -POSTHOOK: query: ALTER TABLE orc_create_complex +PREHOOK: Input: default@orc_create_complex_n2 +PREHOOK: Output: default@orc_create_complex_n2 +POSTHOOK: query: ALTER TABLE orc_create_complex_n2 CHANGE COLUMN strct strct STRUCT POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@orc_create_complex -POSTHOOK: Output: default@orc_create_complex -PREHOOK: query: INSERT INTO TABLE orc_create_complex SELECT str,mp,lst,NAMED_STRUCT('A',strct.A,'B',strct.B,'C','c'),0 FROM orc_create_staging +POSTHOOK: Input: default@orc_create_complex_n2 +POSTHOOK: Output: default@orc_create_complex_n2 +PREHOOK: query: INSERT INTO TABLE orc_create_complex_n2 SELECT str,mp,lst,NAMED_STRUCT('A',strct.A,'B',strct.B,'C','c'),0 FROM orc_create_staging_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_create_staging -PREHOOK: Output: default@orc_create_complex -POSTHOOK: query: INSERT INTO TABLE orc_create_complex SELECT str,mp,lst,NAMED_STRUCT('A',strct.A,'B',strct.B,'C','c'),0 FROM orc_create_staging +PREHOOK: Input: default@orc_create_staging_n2 +PREHOOK: Output: default@orc_create_complex_n2 +POSTHOOK: query: INSERT INTO TABLE orc_create_complex_n2 SELECT str,mp,lst,NAMED_STRUCT('A',strct.A,'B',strct.B,'C','c'),0 FROM orc_create_staging_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_create_staging -POSTHOOK: Output: default@orc_create_complex -POSTHOOK: Lineage: orc_create_complex.lst SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:lst, type:array, comment:null), ] -POSTHOOK: Lineage: orc_create_complex.mp SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:mp, type:map, comment:null), ] -POSTHOOK: Lineage: orc_create_complex.str SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:str, type:string, comment:null), ] -POSTHOOK: Lineage: orc_create_complex.strct EXPRESSION [(orc_create_staging)orc_create_staging.FieldSchema(name:strct, type:struct, comment:null), ] -POSTHOOK: Lineage: orc_create_complex.val SIMPLE [] +POSTHOOK: Input: default@orc_create_staging_n2 +POSTHOOK: Output: default@orc_create_complex_n2 +POSTHOOK: Lineage: orc_create_complex_n2.lst SIMPLE [(orc_create_staging_n2)orc_create_staging_n2.FieldSchema(name:lst, type:array, comment:null), ] +POSTHOOK: Lineage: orc_create_complex_n2.mp SIMPLE [(orc_create_staging_n2)orc_create_staging_n2.FieldSchema(name:mp, type:map, comment:null), ] +POSTHOOK: Lineage: orc_create_complex_n2.str SIMPLE [(orc_create_staging_n2)orc_create_staging_n2.FieldSchema(name:str, type:string, comment:null), ] +POSTHOOK: Lineage: orc_create_complex_n2.strct EXPRESSION [(orc_create_staging_n2)orc_create_staging_n2.FieldSchema(name:strct, type:struct, comment:null), ] +POSTHOOK: Lineage: orc_create_complex_n2.val SIMPLE [] Found 2 items #### A masked pattern was here #### -PREHOOK: query: select sum(hash(*)) from orc_create_complex +PREHOOK: query: select sum(hash(*)) from orc_create_complex_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_create_complex +PREHOOK: Input: default@orc_create_complex_n2 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(*)) from orc_create_complex +POSTHOOK: query: select sum(hash(*)) from orc_create_complex_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_create_complex +POSTHOOK: Input: default@orc_create_complex_n2 #### A masked pattern was here #### 4334574594 -PREHOOK: query: ALTER TABLE orc_create_complex CONCATENATE +PREHOOK: query: ALTER TABLE orc_create_complex_n2 CONCATENATE PREHOOK: type: ALTER_TABLE_MERGE -PREHOOK: Input: default@orc_create_complex -PREHOOK: Output: default@orc_create_complex -POSTHOOK: query: ALTER TABLE orc_create_complex CONCATENATE +PREHOOK: Input: default@orc_create_complex_n2 +PREHOOK: Output: default@orc_create_complex_n2 +POSTHOOK: query: ALTER TABLE orc_create_complex_n2 CONCATENATE POSTHOOK: type: ALTER_TABLE_MERGE -POSTHOOK: Input: default@orc_create_complex -POSTHOOK: Output: default@orc_create_complex +POSTHOOK: Input: default@orc_create_complex_n2 +POSTHOOK: Output: default@orc_create_complex_n2 Found 2 items #### A masked pattern was here #### -PREHOOK: query: select sum(hash(*)) from orc_create_complex +PREHOOK: query: select sum(hash(*)) from orc_create_complex_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_create_complex +PREHOOK: Input: default@orc_create_complex_n2 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(*)) from orc_create_complex +POSTHOOK: query: select sum(hash(*)) from orc_create_complex_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_create_complex +POSTHOOK: Input: default@orc_create_complex_n2 #### A masked pattern was here #### 4334574594 diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat_writer_version.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat_writer_version.q.out index 109f7b1a38..a12661b062 100644 --- a/ql/src/test/results/clientpositive/orc_merge_incompat_writer_version.q.out +++ b/ql/src/test/results/clientpositive/orc_merge_incompat_writer_version.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: DROP TABLE part_orc +PREHOOK: query: DROP TABLE part_orc_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE part_orc +POSTHOOK: query: DROP TABLE part_orc_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE part_orc( +PREHOOK: query: CREATE TABLE part_orc_n0( p_partkey int, p_name string, p_mfgr string, @@ -16,8 +16,8 @@ PREHOOK: query: CREATE TABLE part_orc( STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part_orc -POSTHOOK: query: CREATE TABLE part_orc( +PREHOOK: Output: default@part_orc_n0 +POSTHOOK: query: CREATE TABLE part_orc_n0( p_partkey int, p_name string, p_mfgr string, @@ -31,78 +31,78 @@ POSTHOOK: query: CREATE TABLE part_orc( STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_orc -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part.orc' OVERWRITE INTO TABLE part_orc +POSTHOOK: Output: default@part_orc_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part.orc' OVERWRITE INTO TABLE part_orc_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@part_orc -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part.orc' OVERWRITE INTO TABLE part_orc +PREHOOK: Output: default@part_orc_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part.orc' OVERWRITE INTO TABLE part_orc_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@part_orc -PREHOOK: query: create table part_orc_staging as select * from part_orc +POSTHOOK: Output: default@part_orc_n0 +PREHOOK: query: create table part_orc_staging as select * from part_orc_n0 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@part_orc +PREHOOK: Input: default@part_orc_n0 PREHOOK: Output: database:default PREHOOK: Output: default@part_orc_staging -POSTHOOK: query: create table part_orc_staging as select * from part_orc +POSTHOOK: query: create table part_orc_staging as select * from part_orc_n0 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@part_orc +POSTHOOK: Input: default@part_orc_n0 POSTHOOK: Output: database:default POSTHOOK: Output: default@part_orc_staging -POSTHOOK: Lineage: part_orc_staging.p_brand SIMPLE [(part_orc)part_orc.FieldSchema(name:p_brand, type:string, comment:null), ] -POSTHOOK: Lineage: part_orc_staging.p_comment SIMPLE [(part_orc)part_orc.FieldSchema(name:p_comment, type:string, comment:null), ] -POSTHOOK: Lineage: part_orc_staging.p_container SIMPLE [(part_orc)part_orc.FieldSchema(name:p_container, type:string, comment:null), ] -POSTHOOK: Lineage: part_orc_staging.p_mfgr SIMPLE [(part_orc)part_orc.FieldSchema(name:p_mfgr, type:string, comment:null), ] -POSTHOOK: Lineage: part_orc_staging.p_name SIMPLE [(part_orc)part_orc.FieldSchema(name:p_name, type:string, comment:null), ] -POSTHOOK: Lineage: part_orc_staging.p_partkey SIMPLE [(part_orc)part_orc.FieldSchema(name:p_partkey, type:int, comment:null), ] -POSTHOOK: Lineage: part_orc_staging.p_retailprice SIMPLE [(part_orc)part_orc.FieldSchema(name:p_retailprice, type:double, comment:null), ] -POSTHOOK: Lineage: part_orc_staging.p_size SIMPLE [(part_orc)part_orc.FieldSchema(name:p_size, type:int, comment:null), ] -POSTHOOK: Lineage: part_orc_staging.p_type SIMPLE [(part_orc)part_orc.FieldSchema(name:p_type, type:string, comment:null), ] -PREHOOK: query: insert into table part_orc select * from part_orc_staging +POSTHOOK: Lineage: part_orc_staging.p_brand SIMPLE [(part_orc_n0)part_orc_n0.FieldSchema(name:p_brand, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc_staging.p_comment SIMPLE [(part_orc_n0)part_orc_n0.FieldSchema(name:p_comment, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc_staging.p_container SIMPLE [(part_orc_n0)part_orc_n0.FieldSchema(name:p_container, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc_staging.p_mfgr SIMPLE [(part_orc_n0)part_orc_n0.FieldSchema(name:p_mfgr, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc_staging.p_name SIMPLE [(part_orc_n0)part_orc_n0.FieldSchema(name:p_name, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc_staging.p_partkey SIMPLE [(part_orc_n0)part_orc_n0.FieldSchema(name:p_partkey, type:int, comment:null), ] +POSTHOOK: Lineage: part_orc_staging.p_retailprice SIMPLE [(part_orc_n0)part_orc_n0.FieldSchema(name:p_retailprice, type:double, comment:null), ] +POSTHOOK: Lineage: part_orc_staging.p_size SIMPLE [(part_orc_n0)part_orc_n0.FieldSchema(name:p_size, type:int, comment:null), ] +POSTHOOK: Lineage: part_orc_staging.p_type SIMPLE [(part_orc_n0)part_orc_n0.FieldSchema(name:p_type, type:string, comment:null), ] +PREHOOK: query: insert into table part_orc_n0 select * from part_orc_staging PREHOOK: type: QUERY PREHOOK: Input: default@part_orc_staging -PREHOOK: Output: default@part_orc -POSTHOOK: query: insert into table part_orc select * from part_orc_staging +PREHOOK: Output: default@part_orc_n0 +POSTHOOK: query: insert into table part_orc_n0 select * from part_orc_staging POSTHOOK: type: QUERY POSTHOOK: Input: default@part_orc_staging -POSTHOOK: Output: default@part_orc -POSTHOOK: Lineage: part_orc.p_brand SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_brand, type:string, comment:null), ] -POSTHOOK: Lineage: part_orc.p_comment SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_comment, type:string, comment:null), ] -POSTHOOK: Lineage: part_orc.p_container SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_container, type:string, comment:null), ] -POSTHOOK: Lineage: part_orc.p_mfgr SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_mfgr, type:string, comment:null), ] -POSTHOOK: Lineage: part_orc.p_name SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_name, type:string, comment:null), ] -POSTHOOK: Lineage: part_orc.p_partkey SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_partkey, type:int, comment:null), ] -POSTHOOK: Lineage: part_orc.p_retailprice SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_retailprice, type:double, comment:null), ] -POSTHOOK: Lineage: part_orc.p_size SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_size, type:int, comment:null), ] -POSTHOOK: Lineage: part_orc.p_type SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_type, type:string, comment:null), ] +POSTHOOK: Output: default@part_orc_n0 +POSTHOOK: Lineage: part_orc_n0.p_brand SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_brand, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc_n0.p_comment SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_comment, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc_n0.p_container SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_container, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc_n0.p_mfgr SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_mfgr, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc_n0.p_name SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_name, type:string, comment:null), ] +POSTHOOK: Lineage: part_orc_n0.p_partkey SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_partkey, type:int, comment:null), ] +POSTHOOK: Lineage: part_orc_n0.p_retailprice SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_retailprice, type:double, comment:null), ] +POSTHOOK: Lineage: part_orc_n0.p_size SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_size, type:int, comment:null), ] +POSTHOOK: Lineage: part_orc_n0.p_type SIMPLE [(part_orc_staging)part_orc_staging.FieldSchema(name:p_type, type:string, comment:null), ] Found 2 items #### A masked pattern was here #### -PREHOOK: query: select sum(hash(*)) from part_orc +PREHOOK: query: select sum(hash(*)) from part_orc_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@part_orc +PREHOOK: Input: default@part_orc_n0 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(*)) from part_orc +POSTHOOK: query: select sum(hash(*)) from part_orc_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_orc +POSTHOOK: Input: default@part_orc_n0 #### A masked pattern was here #### 26132451616 -PREHOOK: query: ALTER TABLE part_orc CONCATENATE +PREHOOK: query: ALTER TABLE part_orc_n0 CONCATENATE PREHOOK: type: ALTER_TABLE_MERGE -PREHOOK: Input: default@part_orc -PREHOOK: Output: default@part_orc -POSTHOOK: query: ALTER TABLE part_orc CONCATENATE +PREHOOK: Input: default@part_orc_n0 +PREHOOK: Output: default@part_orc_n0 +POSTHOOK: query: ALTER TABLE part_orc_n0 CONCATENATE POSTHOOK: type: ALTER_TABLE_MERGE -POSTHOOK: Input: default@part_orc -POSTHOOK: Output: default@part_orc +POSTHOOK: Input: default@part_orc_n0 +POSTHOOK: Output: default@part_orc_n0 Found 2 items #### A masked pattern was here #### -PREHOOK: query: select sum(hash(*)) from part_orc +PREHOOK: query: select sum(hash(*)) from part_orc_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@part_orc +PREHOOK: Input: default@part_orc_n0 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(*)) from part_orc +POSTHOOK: query: select sum(hash(*)) from part_orc_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_orc +POSTHOOK: Input: default@part_orc_n0 #### A masked pattern was here #### 26132451616 diff --git a/ql/src/test/results/clientpositive/orc_min_max.q.out b/ql/src/test/results/clientpositive/orc_min_max.q.out index 59dcba5fde..8d991368b0 100644 --- a/ql/src/test/results/clientpositive/orc_min_max.q.out +++ b/ql/src/test/results/clientpositive/orc_min_max.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table if not exists alltypes ( +PREHOOK: query: create table if not exists alltypes_n2 ( bo boolean, ti tinyint, si smallint, @@ -20,8 +20,8 @@ collection items terminated by ',' map keys terminated by ':' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@alltypes -POSTHOOK: query: create table if not exists alltypes ( +PREHOOK: Output: default@alltypes_n2 +POSTHOOK: query: create table if not exists alltypes_n2 ( bo boolean, ti tinyint, si smallint, @@ -43,70 +43,70 @@ collection items terminated by ',' map keys terminated by ':' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@alltypes -PREHOOK: query: create table alltypes_orc like alltypes +POSTHOOK: Output: default@alltypes_n2 +PREHOOK: query: create table alltypes_orc_n3 like alltypes_n2 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@alltypes_orc -POSTHOOK: query: create table alltypes_orc like alltypes +PREHOOK: Output: default@alltypes_orc_n3 +POSTHOOK: query: create table alltypes_orc_n3 like alltypes_n2 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@alltypes_orc -PREHOOK: query: alter table alltypes_orc set fileformat orc +POSTHOOK: Output: default@alltypes_orc_n3 +PREHOOK: query: alter table alltypes_orc_n3 set fileformat orc PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@alltypes_orc -PREHOOK: Output: default@alltypes_orc -POSTHOOK: query: alter table alltypes_orc set fileformat orc +PREHOOK: Input: default@alltypes_orc_n3 +PREHOOK: Output: default@alltypes_orc_n3 +POSTHOOK: query: alter table alltypes_orc_n3 set fileformat orc POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@alltypes_orc -POSTHOOK: Output: default@alltypes_orc -PREHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes +POSTHOOK: Input: default@alltypes_orc_n3 +POSTHOOK: Output: default@alltypes_orc_n3 +PREHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@alltypes -POSTHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes +PREHOOK: Output: default@alltypes_n2 +POSTHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@alltypes -PREHOOK: query: insert overwrite table alltypes_orc select * from alltypes +POSTHOOK: Output: default@alltypes_n2 +PREHOOK: query: insert overwrite table alltypes_orc_n3 select * from alltypes_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@alltypes -PREHOOK: Output: default@alltypes_orc -POSTHOOK: query: insert overwrite table alltypes_orc select * from alltypes +PREHOOK: Input: default@alltypes_n2 +PREHOOK: Output: default@alltypes_orc_n3 +POSTHOOK: query: insert overwrite table alltypes_orc_n3 select * from alltypes_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alltypes -POSTHOOK: Output: default@alltypes_orc -POSTHOOK: Lineage: alltypes_orc.bi SIMPLE [(alltypes)alltypes.FieldSchema(name:bi, type:bigint, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.bo SIMPLE [(alltypes)alltypes.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.c SIMPLE [(alltypes)alltypes.FieldSchema(name:c, type:char(5), comment:null), ] -POSTHOOK: Lineage: alltypes_orc.d SIMPLE [(alltypes)alltypes.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.da SIMPLE [(alltypes)alltypes.FieldSchema(name:da, type:date, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.de SIMPLE [(alltypes)alltypes.FieldSchema(name:de, type:decimal(10,3), comment:null), ] -POSTHOOK: Lineage: alltypes_orc.f SIMPLE [(alltypes)alltypes.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.i SIMPLE [(alltypes)alltypes.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.l SIMPLE [(alltypes)alltypes.FieldSchema(name:l, type:array, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.m SIMPLE [(alltypes)alltypes.FieldSchema(name:m, type:map, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.s SIMPLE [(alltypes)alltypes.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.si SIMPLE [(alltypes)alltypes.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.st SIMPLE [(alltypes)alltypes.FieldSchema(name:st, type:struct, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.ti SIMPLE [(alltypes)alltypes.FieldSchema(name:ti, type:tinyint, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.ts SIMPLE [(alltypes)alltypes.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: alltypes_orc.vc SIMPLE [(alltypes)alltypes.FieldSchema(name:vc, type:varchar(5), comment:null), ] -PREHOOK: query: select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) from alltypes +POSTHOOK: Input: default@alltypes_n2 +POSTHOOK: Output: default@alltypes_orc_n3 +POSTHOOK: Lineage: alltypes_orc_n3.bi SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:bi, type:bigint, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.bo SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.c SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:c, type:char(5), comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.d SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.da SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:da, type:date, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.de SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:de, type:decimal(10,3), comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.f SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.i SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.l SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:l, type:array, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.m SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:m, type:map, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.s SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.si SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.st SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:st, type:struct, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.ti SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:ti, type:tinyint, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.ts SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: alltypes_orc_n3.vc SIMPLE [(alltypes_n2)alltypes_n2.FieldSchema(name:vc, type:varchar(5), comment:null), ] +PREHOOK: query: select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) from alltypes_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@alltypes +PREHOOK: Input: default@alltypes_n2 #### A masked pattern was here #### -POSTHOOK: query: select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) from alltypes +POSTHOOK: query: select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) from alltypes_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alltypes +POSTHOOK: Input: default@alltypes_n2 #### A masked pattern was here #### false true 10 20 100 200 1000 2000 10000 20000 4.0 8.0 20.0 40.0 2.222 4.222 1969-12-31 15:59:58.174 1970-12-31 15:59:58.174 1970-01-01 1971-01-01 abcd string hello world hello world -PREHOOK: query: select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) from alltypes_orc +PREHOOK: query: select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) from alltypes_orc_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@alltypes_orc +PREHOOK: Input: default@alltypes_orc_n3 #### A masked pattern was here #### -POSTHOOK: query: select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) from alltypes_orc +POSTHOOK: query: select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) from alltypes_orc_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alltypes_orc +POSTHOOK: Input: default@alltypes_orc_n3 #### A masked pattern was here #### false true 10 20 100 200 1000 2000 10000 20000 4.0 8.0 20.0 40.0 2.222 4.222 1969-12-31 15:59:58.174 1970-12-31 15:59:58.174 1970-01-01 1971-01-01 abcd string hello world hello world diff --git a/ql/src/test/results/clientpositive/orc_nested_column_pruning.q.out b/ql/src/test/results/clientpositive/orc_nested_column_pruning.q.out index 55a8463f44..3deb0b98b5 100644 --- a/ql/src/test/results/clientpositive/orc_nested_column_pruning.q.out +++ b/ql/src/test/results/clientpositive/orc_nested_column_pruning.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: DROP TABLE IF EXISTS dummy +PREHOOK: query: DROP TABLE IF EXISTS dummy_n4 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS dummy +POSTHOOK: query: DROP TABLE IF EXISTS dummy_n4 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE dummy (i int) +PREHOOK: query: CREATE TABLE dummy_n4 (i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dummy -POSTHOOK: query: CREATE TABLE dummy (i int) +PREHOOK: Output: default@dummy_n4 +POSTHOOK: query: CREATE TABLE dummy_n4 (i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dummy -PREHOOK: query: INSERT INTO TABLE dummy VALUES (42) +POSTHOOK: Output: default@dummy_n4 +PREHOOK: query: INSERT INTO TABLE dummy_n4 VALUES (42) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@dummy -POSTHOOK: query: INSERT INTO TABLE dummy VALUES (42) +PREHOOK: Output: default@dummy_n4 +POSTHOOK: query: INSERT INTO TABLE dummy_n4 VALUES (42) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@dummy -POSTHOOK: Lineage: dummy.i SCRIPT [] -PREHOOK: query: DROP TABLE IF EXISTS nested_tbl_1 +POSTHOOK: Output: default@dummy_n4 +POSTHOOK: Lineage: dummy_n4.i SCRIPT [] +PREHOOK: query: DROP TABLE IF EXISTS nested_tbl_1_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS nested_tbl_1 +POSTHOOK: query: DROP TABLE IF EXISTS nested_tbl_1_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE nested_tbl_1 ( +PREHOOK: query: CREATE TABLE nested_tbl_1_n0 ( a int, s1 struct, f6: int>, s2 struct, f11: map>>, @@ -34,8 +34,8 @@ PREHOOK: query: CREATE TABLE nested_tbl_1 ( ) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@nested_tbl_1 -POSTHOOK: query: CREATE TABLE nested_tbl_1 ( +PREHOOK: Output: default@nested_tbl_1_n0 +POSTHOOK: query: CREATE TABLE nested_tbl_1_n0 ( a int, s1 struct, f6: int>, s2 struct, f11: map>>, @@ -46,8 +46,8 @@ POSTHOOK: query: CREATE TABLE nested_tbl_1 ( ) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@nested_tbl_1 -PREHOOK: query: INSERT INTO TABLE nested_tbl_1 SELECT +POSTHOOK: Output: default@nested_tbl_1_n0 +PREHOOK: query: INSERT INTO TABLE nested_tbl_1_n0 SELECT 1, named_struct('f1', false, 'f2', 'foo', 'f3', named_struct('f4', 4, 'f5', cast(5.0 as double)), 'f6', 4), named_struct('f7', 'f7', 'f8', named_struct('f9', true, 'f10', array(10, 11), 'f11', map('key1', true, 'key2', false))), named_struct('f12', array(named_struct('f13', 'foo', 'f14', 14), named_struct('f13', 'bar', 'f14', 28))), @@ -55,11 +55,11 @@ PREHOOK: query: INSERT INTO TABLE nested_tbl_1 SELECT named_struct('f16', array(named_struct('f17', 'foo', 'f18', named_struct('f19', 14)), named_struct('f17', 'bar', 'f18', named_struct('f19', 28)))), map('key1', named_struct('f20', array(named_struct('f21', named_struct('f22', 1)))), 'key2', named_struct('f20', array(named_struct('f21', named_struct('f22', 2))))) -FROM dummy +FROM dummy_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@dummy -PREHOOK: Output: default@nested_tbl_1 -POSTHOOK: query: INSERT INTO TABLE nested_tbl_1 SELECT +PREHOOK: Input: default@dummy_n4 +PREHOOK: Output: default@nested_tbl_1_n0 +POSTHOOK: query: INSERT INTO TABLE nested_tbl_1_n0 SELECT 1, named_struct('f1', false, 'f2', 'foo', 'f3', named_struct('f4', 4, 'f5', cast(5.0 as double)), 'f6', 4), named_struct('f7', 'f7', 'f8', named_struct('f9', true, 'f10', array(10, 11), 'f11', map('key1', true, 'key2', false))), named_struct('f12', array(named_struct('f13', 'foo', 'f14', 14), named_struct('f13', 'bar', 'f14', 28))), @@ -67,30 +67,30 @@ POSTHOOK: query: INSERT INTO TABLE nested_tbl_1 SELECT named_struct('f16', array(named_struct('f17', 'foo', 'f18', named_struct('f19', 14)), named_struct('f17', 'bar', 'f18', named_struct('f19', 28)))), map('key1', named_struct('f20', array(named_struct('f21', named_struct('f22', 1)))), 'key2', named_struct('f20', array(named_struct('f21', named_struct('f22', 2))))) -FROM dummy +FROM dummy_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dummy -POSTHOOK: Output: default@nested_tbl_1 -POSTHOOK: Lineage: nested_tbl_1.a SIMPLE [] -POSTHOOK: Lineage: nested_tbl_1.s1 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_1.s2 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_1.s3 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_1.s4 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_1.s5 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_1.s6 EXPRESSION [] -PREHOOK: query: DROP TABLE IF EXISTS nested_tbl_2 +POSTHOOK: Input: default@dummy_n4 +POSTHOOK: Output: default@nested_tbl_1_n0 +POSTHOOK: Lineage: nested_tbl_1_n0.a SIMPLE [] +POSTHOOK: Lineage: nested_tbl_1_n0.s1 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_1_n0.s2 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_1_n0.s3 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_1_n0.s4 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_1_n0.s5 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_1_n0.s6 EXPRESSION [] +PREHOOK: query: DROP TABLE IF EXISTS nested_tbl_2_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS nested_tbl_2 +POSTHOOK: query: DROP TABLE IF EXISTS nested_tbl_2_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE nested_tbl_2 LIKE nested_tbl_1 +PREHOOK: query: CREATE TABLE nested_tbl_2_n0 LIKE nested_tbl_1_n0 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@nested_tbl_2 -POSTHOOK: query: CREATE TABLE nested_tbl_2 LIKE nested_tbl_1 +PREHOOK: Output: default@nested_tbl_2_n0 +POSTHOOK: query: CREATE TABLE nested_tbl_2_n0 LIKE nested_tbl_1_n0 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@nested_tbl_2 -PREHOOK: query: INSERT INTO TABLE nested_tbl_2 SELECT +POSTHOOK: Output: default@nested_tbl_2_n0 +PREHOOK: query: INSERT INTO TABLE nested_tbl_2_n0 SELECT 2, named_struct('f1', true, 'f2', 'bar', 'f3', named_struct('f4', 4, 'f5', cast(6.5 as double)), 'f6', 4), named_struct('f7', 'f72', 'f8', named_struct('f9', false, 'f10', array(20, 22), 'f11', map('key3', true, 'key4', false))), named_struct('f12', array(named_struct('f13', 'bar', 'f14', 28), named_struct('f13', 'foo', 'f14', 56))), @@ -98,11 +98,11 @@ PREHOOK: query: INSERT INTO TABLE nested_tbl_2 SELECT named_struct('f16', array(named_struct('f17', 'bar', 'f18', named_struct('f19', 28)), named_struct('f17', 'foo', 'f18', named_struct('f19', 56)))), map('key3', named_struct('f20', array(named_struct('f21', named_struct('f22', 3)))), 'key4', named_struct('f20', array(named_struct('f21', named_struct('f22', 4))))) -FROM dummy +FROM dummy_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@dummy -PREHOOK: Output: default@nested_tbl_2 -POSTHOOK: query: INSERT INTO TABLE nested_tbl_2 SELECT +PREHOOK: Input: default@dummy_n4 +PREHOOK: Output: default@nested_tbl_2_n0 +POSTHOOK: query: INSERT INTO TABLE nested_tbl_2_n0 SELECT 2, named_struct('f1', true, 'f2', 'bar', 'f3', named_struct('f4', 4, 'f5', cast(6.5 as double)), 'f6', 4), named_struct('f7', 'f72', 'f8', named_struct('f9', false, 'f10', array(20, 22), 'f11', map('key3', true, 'key4', false))), named_struct('f12', array(named_struct('f13', 'bar', 'f14', 28), named_struct('f13', 'foo', 'f14', 56))), @@ -110,20 +110,20 @@ POSTHOOK: query: INSERT INTO TABLE nested_tbl_2 SELECT named_struct('f16', array(named_struct('f17', 'bar', 'f18', named_struct('f19', 28)), named_struct('f17', 'foo', 'f18', named_struct('f19', 56)))), map('key3', named_struct('f20', array(named_struct('f21', named_struct('f22', 3)))), 'key4', named_struct('f20', array(named_struct('f21', named_struct('f22', 4))))) -FROM dummy +FROM dummy_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dummy -POSTHOOK: Output: default@nested_tbl_2 -POSTHOOK: Lineage: nested_tbl_2.a SIMPLE [] -POSTHOOK: Lineage: nested_tbl_2.s1 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_2.s2 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_2.s3 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_2.s4 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_2.s5 EXPRESSION [] -POSTHOOK: Lineage: nested_tbl_2.s6 EXPRESSION [] -PREHOOK: query: EXPLAIN SELECT a FROM nested_tbl_1 +POSTHOOK: Input: default@dummy_n4 +POSTHOOK: Output: default@nested_tbl_2_n0 +POSTHOOK: Lineage: nested_tbl_2_n0.a SIMPLE [] +POSTHOOK: Lineage: nested_tbl_2_n0.s1 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_2_n0.s2 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_2_n0.s3 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_2_n0.s4 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_2_n0.s5 EXPRESSION [] +POSTHOOK: Lineage: nested_tbl_2_n0.s6 EXPRESSION [] +PREHOOK: query: EXPLAIN SELECT a FROM nested_tbl_1_n0 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT a FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT a FROM nested_tbl_1_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -134,7 +134,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: int) @@ -154,18 +154,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a FROM nested_tbl_1 +PREHOOK: query: SELECT a FROM nested_tbl_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT a FROM nested_tbl_1 +POSTHOOK: query: SELECT a FROM nested_tbl_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 1 -PREHOOK: query: EXPLAIN SELECT s1.f1 FROM nested_tbl_1 +PREHOOK: query: EXPLAIN SELECT s1.f1 FROM nested_tbl_1_n0 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f1 FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT s1.f1 FROM nested_tbl_1_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -176,7 +176,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f1 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -197,18 +197,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f1 FROM nested_tbl_1 +PREHOOK: query: SELECT s1.f1 FROM nested_tbl_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f1 FROM nested_tbl_1 +POSTHOOK: query: SELECT s1.f1 FROM nested_tbl_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### false -PREHOOK: query: EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1 +PREHOOK: query: EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1_n0 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -219,7 +219,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f1, s1.f2 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -240,18 +240,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f1, s1.f2 FROM nested_tbl_1 +PREHOOK: query: SELECT s1.f1, s1.f2 FROM nested_tbl_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f1, s1.f2 FROM nested_tbl_1 +POSTHOOK: query: SELECT s1.f1, s1.f2 FROM nested_tbl_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### false foo -PREHOOK: query: EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1 +PREHOOK: query: EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n0 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -262,7 +262,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f3 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -283,18 +283,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1 +PREHOOK: query: SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1 +POSTHOOK: query: SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### {"f4":4,"f5":5.0} 4 -PREHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1 +PREHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n0 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -305,7 +305,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f3.f5 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -326,18 +326,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1 +PREHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1 +POSTHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 5.0 -PREHOOK: query: EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1 +PREHOOK: query: EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n0 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -348,7 +348,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f3.f4, s2.f8.f9 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -369,18 +369,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1 +PREHOOK: query: SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1 +POSTHOOK: query: SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 4 true -PREHOOK: query: EXPLAIN SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE +PREHOOK: query: EXPLAIN SELECT s1.f2 FROM nested_tbl_1_n0 WHERE s1.f1 = FALSE PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE +POSTHOOK: query: EXPLAIN SELECT s1.f2 FROM nested_tbl_1_n0 WHERE s1.f1 = FALSE POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -391,7 +391,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f2, s1.f1 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -415,18 +415,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE +PREHOOK: query: SELECT s1.f2 FROM nested_tbl_1_n0 WHERE s1.f1 = FALSE PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE +POSTHOOK: query: SELECT s1.f2 FROM nested_tbl_1_n0 WHERE s1.f1 = FALSE POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### foo -PREHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4 +PREHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n0 WHERE s1.f3.f4 = 4 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4 +POSTHOOK: query: EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n0 WHERE s1.f3.f4 = 4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -437,7 +437,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f3.f5, s1.f3.f4 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -461,18 +461,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4 +PREHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1_n0 WHERE s1.f3.f4 = 4 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4 +POSTHOOK: query: SELECT s1.f3.f5 FROM nested_tbl_1_n0 WHERE s1.f3.f4 = 4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 5.0 -PREHOOK: query: EXPLAIN SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE +PREHOOK: query: EXPLAIN SELECT s2.f8 FROM nested_tbl_1_n0 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE +POSTHOOK: query: EXPLAIN SELECT s2.f8 FROM nested_tbl_1_n0 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -483,7 +483,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f2, s2.f8 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -507,20 +507,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE +PREHOOK: query: SELECT s2.f8 FROM nested_tbl_1_n0 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE +POSTHOOK: query: SELECT s2.f8 FROM nested_tbl_1_n0 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### {"f9":true,"f10":[10,11],"f11":{"key1":true,"key2":false}} -PREHOOK: query: EXPLAIN SELECT col1, col2 FROM nested_tbl_1 +PREHOOK: query: EXPLAIN SELECT col1, col2 FROM nested_tbl_1_n0 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1 LATERAL VIEW explode(s3.f12) tbl2 AS col2 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT col1, col2 FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT col1, col2 FROM nested_tbl_1_n0 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1 LATERAL VIEW explode(s3.f12) tbl2 AS col2 POSTHOOK: type: QUERY @@ -533,7 +533,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s2.f8.f10 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Lateral View Forward @@ -644,25 +644,25 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT col1, col2 FROM nested_tbl_1 +PREHOOK: query: SELECT col1, col2 FROM nested_tbl_1_n0 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1 LATERAL VIEW explode(s3.f12) tbl2 AS col2 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT col1, col2 FROM nested_tbl_1 +POSTHOOK: query: SELECT col1, col2 FROM nested_tbl_1_n0 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1 LATERAL VIEW explode(s3.f12) tbl2 AS col2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 10 {"f13":"foo","f14":14} 10 {"f13":"bar","f14":28} 11 {"f13":"foo","f14":14} 11 {"f13":"bar","f14":28} -PREHOOK: query: EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1 +PREHOOK: query: EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n0 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1 +POSTHOOK: query: EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -673,7 +673,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s2.f8.f10, s1.f3.f4 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -694,18 +694,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1 +PREHOOK: query: SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1 +POSTHOOK: query: SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 3 -PREHOOK: query: EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5 +PREHOOK: query: EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3.f5 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5 +POSTHOOK: query: EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3.f5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -716,7 +716,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f3.f5, s1.f3.f4 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -756,18 +756,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5 +PREHOOK: query: SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3.f5 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5 +POSTHOOK: query: SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3.f5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 5.0 1 -PREHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 +PREHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 +POSTHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -778,7 +778,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f3 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -818,18 +818,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 +PREHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 +POSTHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### {"f4":4,"f5":5.0} 1 -PREHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3 +PREHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3 ORDER BY s1.f3 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3 +POSTHOOK: query: EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3 ORDER BY s1.f3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -841,7 +841,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f3 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -902,22 +902,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3 +PREHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3 ORDER BY s1.f3 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3 +POSTHOOK: query: SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3 ORDER BY s1.f3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### {"f4":4,"f5":5.0} 1 PREHOOK: query: EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2 +FROM nested_tbl_1_n0 t1 JOIN nested_tbl_2_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == FALSE PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2 +FROM nested_tbl_1_n0 t1 JOIN nested_tbl_2_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == FALSE POSTHOOK: type: QUERY @@ -989,29 +989,29 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2 +FROM nested_tbl_1_n0 t1 JOIN nested_tbl_2_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == FALSE PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 -PREHOOK: Input: default@nested_tbl_2 +PREHOOK: Input: default@nested_tbl_1_n0 +PREHOOK: Input: default@nested_tbl_2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2 +FROM nested_tbl_1_n0 t1 JOIN nested_tbl_2_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == FALSE POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 -POSTHOOK: Input: default@nested_tbl_2 +POSTHOOK: Input: default@nested_tbl_1_n0 +POSTHOOK: Input: default@nested_tbl_2_n0 #### A masked pattern was here #### 5.0 {"f9":false,"f10":[20,22],"f11":{"key3":true,"key4":false}} PREHOOK: query: EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == TRUE PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == TRUE POSTHOOK: type: QUERY @@ -1083,26 +1083,26 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == TRUE PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT t1.s1.f3.f5, t2.s2.f8 -FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 WHERE t2.s2.f8.f9 == TRUE POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 5.0 {"f9":true,"f10":[10,11],"f11":{"key1":true,"key2":false}} PREHOOK: query: EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1178,25 +1178,25 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 5.0 Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f1 <> t2.s2.f8.f9 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f1 <> t2.s2.f8.f9 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1259,24 +1259,24 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f1 <> t2.s2.f8.f9 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f1 <> t2.s2.f8.f9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 5.0 PREHOOK: query: EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1349,63 +1349,63 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT t1.s1.f3.f5 -FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2 +FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 5.0 -PREHOOK: query: DROP TABLE IF EXISTS nested_tbl_3 +PREHOOK: query: DROP TABLE IF EXISTS nested_tbl_3_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS nested_tbl_3 +POSTHOOK: query: DROP TABLE IF EXISTS nested_tbl_3_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE nested_tbl_3 (f1 boolean, f2 string) PARTITIONED BY (f3 int) STORED AS ORC +PREHOOK: query: CREATE TABLE nested_tbl_3_n0 (f1 boolean, f2 string) PARTITIONED BY (f3 int) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@nested_tbl_3 -POSTHOOK: query: CREATE TABLE nested_tbl_3 (f1 boolean, f2 string) PARTITIONED BY (f3 int) STORED AS ORC +PREHOOK: Output: default@nested_tbl_3_n0 +POSTHOOK: query: CREATE TABLE nested_tbl_3_n0 (f1 boolean, f2 string) PARTITIONED BY (f3 int) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@nested_tbl_3 -PREHOOK: query: INSERT OVERWRITE TABLE nested_tbl_3 PARTITION(f3) +POSTHOOK: Output: default@nested_tbl_3_n0 +PREHOOK: query: INSERT OVERWRITE TABLE nested_tbl_3_n0 PARTITION(f3) SELECT s1.f1 AS f1, S1.f2 AS f2, s1.f6 AS f3 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 -PREHOOK: Output: default@nested_tbl_3 -POSTHOOK: query: INSERT OVERWRITE TABLE nested_tbl_3 PARTITION(f3) +PREHOOK: Input: default@nested_tbl_1_n0 +PREHOOK: Output: default@nested_tbl_3_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE nested_tbl_3_n0 PARTITION(f3) SELECT s1.f1 AS f1, S1.f2 AS f2, s1.f6 AS f3 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 -POSTHOOK: Output: default@nested_tbl_3@f3=4 -POSTHOOK: Lineage: nested_tbl_3 PARTITION(f3=4).f1 EXPRESSION [(nested_tbl_1)nested_tbl_1.FieldSchema(name:s1, type:struct,f6:int>, comment:null), ] -POSTHOOK: Lineage: nested_tbl_3 PARTITION(f3=4).f2 EXPRESSION [(nested_tbl_1)nested_tbl_1.FieldSchema(name:s1, type:struct,f6:int>, comment:null), ] -PREHOOK: query: SELECT * FROM nested_tbl_3 +POSTHOOK: Input: default@nested_tbl_1_n0 +POSTHOOK: Output: default@nested_tbl_3_n0@f3=4 +POSTHOOK: Lineage: nested_tbl_3_n0 PARTITION(f3=4).f1 EXPRESSION [(nested_tbl_1_n0)nested_tbl_1_n0.FieldSchema(name:s1, type:struct,f6:int>, comment:null), ] +POSTHOOK: Lineage: nested_tbl_3_n0 PARTITION(f3=4).f2 EXPRESSION [(nested_tbl_1_n0)nested_tbl_1_n0.FieldSchema(name:s1, type:struct,f6:int>, comment:null), ] +PREHOOK: query: SELECT * FROM nested_tbl_3_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_3 -PREHOOK: Input: default@nested_tbl_3@f3=4 +PREHOOK: Input: default@nested_tbl_3_n0 +PREHOOK: Input: default@nested_tbl_3_n0@f3=4 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM nested_tbl_3 +POSTHOOK: query: SELECT * FROM nested_tbl_3_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_3 -POSTHOOK: Input: default@nested_tbl_3@f3=4 +POSTHOOK: Input: default@nested_tbl_3_n0 +POSTHOOK: Input: default@nested_tbl_3_n0@f3=4 #### A masked pattern was here #### false foo 4 PREHOOK: query: EXPLAIN SELECT count(s1.f6), s3.f12[0].f14 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s3.f12[0].f14 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT count(s1.f6), s3.f12[0].f14 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s3.f12[0].f14 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1417,7 +1417,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s3.f12, s1.f6 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -1462,26 +1462,26 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT count(s1.f6), s3.f12[0].f14 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s3.f12[0].f14 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT count(s1.f6), s3.f12[0].f14 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s3.f12[0].f14 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 1 14 PREHOOK: query: EXPLAIN SELECT count(s1.f6), s4['key1'].f15 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s4['key1'].f15 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT count(s1.f6), s4['key1'].f15 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s4['key1'].f15 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1493,7 +1493,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f6 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -1538,26 +1538,26 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT count(s1.f6), s4['key1'].f15 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s4['key1'].f15 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT count(s1.f6), s4['key1'].f15 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s4['key1'].f15 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 1 1 PREHOOK: query: EXPLAIN SELECT count(s1.f6), s5.f16[0].f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s5.f16[0].f18.f19 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT count(s1.f6), s5.f16[0].f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s5.f16[0].f18.f19 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1569,7 +1569,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s5.f16, s1.f6 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -1614,26 +1614,26 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT count(s1.f6), s5.f16[0].f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s5.f16[0].f18.f19 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT count(s1.f6), s5.f16[0].f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s5.f16[0].f18.f19 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 1 14 PREHOOK: query: EXPLAIN SELECT count(s1.f6), s5.f16.f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s5.f16.f18.f19 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT count(s1.f6), s5.f16.f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s5.f16.f18.f19 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1645,7 +1645,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f6, s5.f16 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -1690,26 +1690,26 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT count(s1.f6), s5.f16.f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s5.f16.f18.f19 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT count(s1.f6), s5.f16.f18.f19 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s5.f16.f18.f19 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 1 [14,28] PREHOOK: query: EXPLAIN SELECT count(s1.f6), s6['key1'].f20[0].f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s6['key1'].f20[0].f21.f22 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT count(s1.f6), s6['key1'].f20[0].f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s6['key1'].f20[0].f21.f22 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1721,7 +1721,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f6 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -1766,26 +1766,26 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT count(s1.f6), s6['key1'].f20[0].f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s6['key1'].f20[0].f21.f22 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT count(s1.f6), s6['key1'].f20[0].f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s6['key1'].f20[0].f21.f22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 1 1 PREHOOK: query: EXPLAIN SELECT count(s1.f6), s6['key1'].f20.f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s6['key1'].f20.f21.f22 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT count(s1.f6), s6['key1'].f20.f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s6['key1'].f20.f21.f22 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1797,7 +1797,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: nested_tbl_1 + alias: nested_tbl_1_n0 Pruned Column Paths: s1.f6 Statistics: Num rows: 1 Data size: 1125 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -1842,15 +1842,15 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT count(s1.f6), s6['key1'].f20.f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s6['key1'].f20.f21.f22 PREHOOK: type: QUERY -PREHOOK: Input: default@nested_tbl_1 +PREHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT count(s1.f6), s6['key1'].f20.f21.f22 -FROM nested_tbl_1 +FROM nested_tbl_1_n0 GROUP BY s6['key1'].f20.f21.f22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@nested_tbl_1 +POSTHOOK: Input: default@nested_tbl_1_n0 #### A masked pattern was here #### 1 [1] diff --git a/ql/src/test/results/clientpositive/orc_ppd_boolean.q.out b/ql/src/test/results/clientpositive/orc_ppd_boolean.q.out index b0b8a32672..02c4c1e96a 100644 --- a/ql/src/test/results/clientpositive/orc_ppd_boolean.q.out +++ b/ql/src/test/results/clientpositive/orc_ppd_boolean.q.out @@ -1,92 +1,92 @@ -PREHOOK: query: create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), b boolean) stored as orc tblproperties("orc.stripe.size"="16777216") +PREHOOK: query: create table newtypesorc_n0(c char(10), v varchar(10), d decimal(5,3), b boolean) stored as orc tblproperties("orc.stripe.size"="16777216") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@newtypesorc -POSTHOOK: query: create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), b boolean) stored as orc tblproperties("orc.stripe.size"="16777216") +PREHOOK: Output: default@newtypesorc_n0 +POSTHOOK: query: create table newtypesorc_n0(c char(10), v varchar(10), d decimal(5,3), b boolean) stored as orc tblproperties("orc.stripe.size"="16777216") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@newtypesorc -PREHOOK: query: insert overwrite table newtypesorc select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, true from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, false from src src2) uniontbl +POSTHOOK: Output: default@newtypesorc_n0 +PREHOOK: query: insert overwrite table newtypesorc_n0 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, true from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, false from src src2) uniontbl PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@newtypesorc -POSTHOOK: query: insert overwrite table newtypesorc select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, true from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, false from src src2) uniontbl +PREHOOK: Output: default@newtypesorc_n0 +POSTHOOK: query: insert overwrite table newtypesorc_n0 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, true from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, false from src src2) uniontbl POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@newtypesorc -POSTHOOK: Lineage: newtypesorc.b EXPRESSION [] -POSTHOOK: Lineage: newtypesorc.c EXPRESSION [] -POSTHOOK: Lineage: newtypesorc.d EXPRESSION [] -POSTHOOK: Lineage: newtypesorc.v EXPRESSION [] -PREHOOK: query: select sum(hash(*)) from newtypesorc where b=true +POSTHOOK: Output: default@newtypesorc_n0 +POSTHOOK: Lineage: newtypesorc_n0.b EXPRESSION [] +POSTHOOK: Lineage: newtypesorc_n0.c EXPRESSION [] +POSTHOOK: Lineage: newtypesorc_n0.d EXPRESSION [] +POSTHOOK: Lineage: newtypesorc_n0.v EXPRESSION [] +PREHOOK: query: select sum(hash(*)) from newtypesorc_n0 where b=true PREHOOK: type: QUERY -PREHOOK: Input: default@newtypesorc +PREHOOK: Input: default@newtypesorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(*)) from newtypesorc where b=true +POSTHOOK: query: select sum(hash(*)) from newtypesorc_n0 where b=true POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypesorc +POSTHOOK: Input: default@newtypesorc_n0 #### A masked pattern was here #### -252951953500 -PREHOOK: query: select sum(hash(*)) from newtypesorc where b=false +PREHOOK: query: select sum(hash(*)) from newtypesorc_n0 where b=false PREHOOK: type: QUERY -PREHOOK: Input: default@newtypesorc +PREHOOK: Input: default@newtypesorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(*)) from newtypesorc where b=false +POSTHOOK: query: select sum(hash(*)) from newtypesorc_n0 where b=false POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypesorc +POSTHOOK: Input: default@newtypesorc_n0 #### A masked pattern was here #### 334427776000 -PREHOOK: query: select sum(hash(*)) from newtypesorc where b!=true +PREHOOK: query: select sum(hash(*)) from newtypesorc_n0 where b!=true PREHOOK: type: QUERY -PREHOOK: Input: default@newtypesorc +PREHOOK: Input: default@newtypesorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(*)) from newtypesorc where b!=true +POSTHOOK: query: select sum(hash(*)) from newtypesorc_n0 where b!=true POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypesorc +POSTHOOK: Input: default@newtypesorc_n0 #### A masked pattern was here #### 334427776000 -PREHOOK: query: select sum(hash(*)) from newtypesorc where b!=false +PREHOOK: query: select sum(hash(*)) from newtypesorc_n0 where b!=false PREHOOK: type: QUERY -PREHOOK: Input: default@newtypesorc +PREHOOK: Input: default@newtypesorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(*)) from newtypesorc where b!=false +POSTHOOK: query: select sum(hash(*)) from newtypesorc_n0 where b!=false POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypesorc +POSTHOOK: Input: default@newtypesorc_n0 #### A masked pattern was here #### -252951953500 -PREHOOK: query: select sum(hash(*)) from newtypesorc where b 0 PREHOOK: type: QUERY PREHOOK: Input: default@unique_2 diff --git a/ql/src/test/results/clientpositive/orc_ppd_schema_evol_2a.q.out b/ql/src/test/results/clientpositive/orc_ppd_schema_evol_2a.q.out index 5dead1ccf1..18fcc644aa 100644 --- a/ql/src/test/results/clientpositive/orc_ppd_schema_evol_2a.q.out +++ b/ql/src/test/results/clientpositive/orc_ppd_schema_evol_2a.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table unique_1( +PREHOOK: query: create table unique_1_n2( i int, d string, s string) @@ -7,8 +7,8 @@ fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@unique_1 -POSTHOOK: query: create table unique_1( +PREHOOK: Output: default@unique_1_n2 +POSTHOOK: query: create table unique_1_n2( i int, d string, s string) @@ -17,54 +17,54 @@ fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@unique_1 -PREHOOK: query: load data local inpath '../../data/files/unique_1.txt' into table unique_1 +POSTHOOK: Output: default@unique_1_n2 +PREHOOK: query: load data local inpath '../../data/files/unique_1.txt' into table unique_1_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@unique_1 -POSTHOOK: query: load data local inpath '../../data/files/unique_1.txt' into table unique_1 +PREHOOK: Output: default@unique_1_n2 +POSTHOOK: query: load data local inpath '../../data/files/unique_1.txt' into table unique_1_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@unique_1 -PREHOOK: query: create table test1 stored as orc as select * from unique_1 order by d +POSTHOOK: Output: default@unique_1_n2 +PREHOOK: query: create table test1_n11 stored as orc as select * from unique_1_n2 order by d PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@unique_1 +PREHOOK: Input: default@unique_1_n2 PREHOOK: Output: database:default -PREHOOK: Output: default@test1 -POSTHOOK: query: create table test1 stored as orc as select * from unique_1 order by d +PREHOOK: Output: default@test1_n11 +POSTHOOK: query: create table test1_n11 stored as orc as select * from unique_1_n2 order by d POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@unique_1 +POSTHOOK: Input: default@unique_1_n2 POSTHOOK: Output: database:default -POSTHOOK: Output: default@test1 -POSTHOOK: Lineage: test1.d SIMPLE [(unique_1)unique_1.FieldSchema(name:d, type:string, comment:null), ] -POSTHOOK: Lineage: test1.i SIMPLE [(unique_1)unique_1.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: test1.s SIMPLE [(unique_1)unique_1.FieldSchema(name:s, type:string, comment:null), ] -unique_1.i unique_1.d unique_1.s -PREHOOK: query: alter table test1 change column d d double +POSTHOOK: Output: default@test1_n11 +POSTHOOK: Lineage: test1_n11.d SIMPLE [(unique_1_n2)unique_1_n2.FieldSchema(name:d, type:string, comment:null), ] +POSTHOOK: Lineage: test1_n11.i SIMPLE [(unique_1_n2)unique_1_n2.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: test1_n11.s SIMPLE [(unique_1_n2)unique_1_n2.FieldSchema(name:s, type:string, comment:null), ] +unique_1_n2.i unique_1_n2.d unique_1_n2.s +PREHOOK: query: alter table test1_n11 change column d d double PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@test1 -PREHOOK: Output: default@test1 -PREHOOK: query: select s from test1 where d = -4996703.42 +PREHOOK: Input: default@test1_n11 +PREHOOK: Output: default@test1_n11 +PREHOOK: query: select s from test1_n11 where d = -4996703.42 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 +PREHOOK: Input: default@test1_n11 #### A masked pattern was here #### s luke quirinius -PREHOOK: query: select s from test1 where d = -4996703.42 +PREHOOK: query: select s from test1_n11 where d = -4996703.42 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 +PREHOOK: Input: default@test1_n11 #### A masked pattern was here #### s luke quirinius -PREHOOK: query: select s from test1 where d = -4996703.42 +PREHOOK: query: select s from test1_n11 where d = -4996703.42 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 +PREHOOK: Input: default@test1_n11 #### A masked pattern was here #### s luke quirinius -PREHOOK: query: select s from test1 where d = -4996703.42 +PREHOOK: query: select s from test1_n11 where d = -4996703.42 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 +PREHOOK: Input: default@test1_n11 #### A masked pattern was here #### s luke quirinius diff --git a/ql/src/test/results/clientpositive/orc_ppd_schema_evol_2b.q.out b/ql/src/test/results/clientpositive/orc_ppd_schema_evol_2b.q.out index b85ec989b3..916400021d 100644 --- a/ql/src/test/results/clientpositive/orc_ppd_schema_evol_2b.q.out +++ b/ql/src/test/results/clientpositive/orc_ppd_schema_evol_2b.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table unique_1( +PREHOOK: query: create table unique_1_n1( i int, d string, s string) @@ -7,8 +7,8 @@ fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@unique_1 -POSTHOOK: query: create table unique_1( +PREHOOK: Output: default@unique_1_n1 +POSTHOOK: query: create table unique_1_n1( i int, d string, s string) @@ -17,16 +17,16 @@ fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@unique_1 -PREHOOK: query: load data local inpath '../../data/files/unique_1.txt' into table unique_1 +POSTHOOK: Output: default@unique_1_n1 +PREHOOK: query: load data local inpath '../../data/files/unique_1.txt' into table unique_1_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@unique_1 -POSTHOOK: query: load data local inpath '../../data/files/unique_1.txt' into table unique_1 +PREHOOK: Output: default@unique_1_n1 +POSTHOOK: query: load data local inpath '../../data/files/unique_1.txt' into table unique_1_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@unique_1 -PREHOOK: query: create table unique_2( +POSTHOOK: Output: default@unique_1_n1 +PREHOOK: query: create table unique_2_n0( i int, d string, s string) @@ -35,8 +35,8 @@ fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@unique_2 -POSTHOOK: query: create table unique_2( +PREHOOK: Output: default@unique_2_n0 +POSTHOOK: query: create table unique_2_n0( i int, d string, s string) @@ -45,80 +45,80 @@ fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@unique_2 -PREHOOK: query: load data local inpath '../../data/files/unique_2.txt' into table unique_2 +POSTHOOK: Output: default@unique_2_n0 +PREHOOK: query: load data local inpath '../../data/files/unique_2.txt' into table unique_2_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@unique_2 -POSTHOOK: query: load data local inpath '../../data/files/unique_2.txt' into table unique_2 +PREHOOK: Output: default@unique_2_n0 +POSTHOOK: query: load data local inpath '../../data/files/unique_2.txt' into table unique_2_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@unique_2 -PREHOOK: query: create table test_two_files( +POSTHOOK: Output: default@unique_2_n0 +PREHOOK: query: create table test_two_files_n0( i int, d string, s string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_two_files -POSTHOOK: query: create table test_two_files( +PREHOOK: Output: default@test_two_files_n0 +POSTHOOK: query: create table test_two_files_n0( i int, d string, s string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_two_files -PREHOOK: query: insert into table test_two_files select * from unique_1 where cast(d as double) <= 0 order by cast(d as double) +POSTHOOK: Output: default@test_two_files_n0 +PREHOOK: query: insert into table test_two_files_n0 select * from unique_1_n1 where cast(d as double) <= 0 order by cast(d as double) PREHOOK: type: QUERY -PREHOOK: Input: default@unique_1 -PREHOOK: Output: default@test_two_files -POSTHOOK: query: insert into table test_two_files select * from unique_1 where cast(d as double) <= 0 order by cast(d as double) +PREHOOK: Input: default@unique_1_n1 +PREHOOK: Output: default@test_two_files_n0 +POSTHOOK: query: insert into table test_two_files_n0 select * from unique_1_n1 where cast(d as double) <= 0 order by cast(d as double) POSTHOOK: type: QUERY -POSTHOOK: Input: default@unique_1 -POSTHOOK: Output: default@test_two_files -POSTHOOK: Lineage: test_two_files.d SIMPLE [(unique_1)unique_1.FieldSchema(name:d, type:string, comment:null), ] -POSTHOOK: Lineage: test_two_files.i SIMPLE [(unique_1)unique_1.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: test_two_files.s SIMPLE [(unique_1)unique_1.FieldSchema(name:s, type:string, comment:null), ] -unique_1.i unique_1.d unique_1.s -PREHOOK: query: insert into table test_two_files select * from unique_2 where cast(d as double) > 0 order by cast(d as double) +POSTHOOK: Input: default@unique_1_n1 +POSTHOOK: Output: default@test_two_files_n0 +POSTHOOK: Lineage: test_two_files_n0.d SIMPLE [(unique_1_n1)unique_1_n1.FieldSchema(name:d, type:string, comment:null), ] +POSTHOOK: Lineage: test_two_files_n0.i SIMPLE [(unique_1_n1)unique_1_n1.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: test_two_files_n0.s SIMPLE [(unique_1_n1)unique_1_n1.FieldSchema(name:s, type:string, comment:null), ] +unique_1_n1.i unique_1_n1.d unique_1_n1.s +PREHOOK: query: insert into table test_two_files_n0 select * from unique_2_n0 where cast(d as double) > 0 order by cast(d as double) PREHOOK: type: QUERY -PREHOOK: Input: default@unique_2 -PREHOOK: Output: default@test_two_files -POSTHOOK: query: insert into table test_two_files select * from unique_2 where cast(d as double) > 0 order by cast(d as double) +PREHOOK: Input: default@unique_2_n0 +PREHOOK: Output: default@test_two_files_n0 +POSTHOOK: query: insert into table test_two_files_n0 select * from unique_2_n0 where cast(d as double) > 0 order by cast(d as double) POSTHOOK: type: QUERY -POSTHOOK: Input: default@unique_2 -POSTHOOK: Output: default@test_two_files -POSTHOOK: Lineage: test_two_files.d SIMPLE [(unique_2)unique_2.FieldSchema(name:d, type:string, comment:null), ] -POSTHOOK: Lineage: test_two_files.i SIMPLE [(unique_2)unique_2.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: test_two_files.s SIMPLE [(unique_2)unique_2.FieldSchema(name:s, type:string, comment:null), ] -unique_2.i unique_2.d unique_2.s -PREHOOK: query: alter table test_two_files change column d d double +POSTHOOK: Input: default@unique_2_n0 +POSTHOOK: Output: default@test_two_files_n0 +POSTHOOK: Lineage: test_two_files_n0.d SIMPLE [(unique_2_n0)unique_2_n0.FieldSchema(name:d, type:string, comment:null), ] +POSTHOOK: Lineage: test_two_files_n0.i SIMPLE [(unique_2_n0)unique_2_n0.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: test_two_files_n0.s SIMPLE [(unique_2_n0)unique_2_n0.FieldSchema(name:s, type:string, comment:null), ] +unique_2_n0.i unique_2_n0.d unique_2_n0.s +PREHOOK: query: alter table test_two_files_n0 change column d d double PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@test_two_files -PREHOOK: Output: default@test_two_files -PREHOOK: query: select s from test_two_files where d = -4996703.42 +PREHOOK: Input: default@test_two_files_n0 +PREHOOK: Output: default@test_two_files_n0 +PREHOOK: query: select s from test_two_files_n0 where d = -4996703.42 PREHOOK: type: QUERY -PREHOOK: Input: default@test_two_files +PREHOOK: Input: default@test_two_files_n0 #### A masked pattern was here #### s luke quirinius -PREHOOK: query: select s from test_two_files where d = -4996703.42 +PREHOOK: query: select s from test_two_files_n0 where d = -4996703.42 PREHOOK: type: QUERY -PREHOOK: Input: default@test_two_files +PREHOOK: Input: default@test_two_files_n0 #### A masked pattern was here #### s luke quirinius -PREHOOK: query: select s from test_two_files where d = -4996703.42 +PREHOOK: query: select s from test_two_files_n0 where d = -4996703.42 PREHOOK: type: QUERY -PREHOOK: Input: default@test_two_files +PREHOOK: Input: default@test_two_files_n0 #### A masked pattern was here #### s luke quirinius -PREHOOK: query: select s from test_two_files where d = -4996703.42 +PREHOOK: query: select s from test_two_files_n0 where d = -4996703.42 PREHOOK: type: QUERY -PREHOOK: Input: default@test_two_files +PREHOOK: Input: default@test_two_files_n0 #### A masked pattern was here #### s luke quirinius diff --git a/ql/src/test/results/clientpositive/orc_ppd_str_conversion.q.out b/ql/src/test/results/clientpositive/orc_ppd_str_conversion.q.out index 71ff84d7e5..88e372ca6f 100644 --- a/ql/src/test/results/clientpositive/orc_ppd_str_conversion.q.out +++ b/ql/src/test/results/clientpositive/orc_ppd_str_conversion.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: create table orc_test( col1 varchar(15), col2 char(10)) stored as orc +PREHOOK: query: create table orc_test_n0( col1 varchar(15), col2 char(10)) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@orc_test -POSTHOOK: query: create table orc_test( col1 varchar(15), col2 char(10)) stored as orc +PREHOOK: Output: default@orc_test_n0 +POSTHOOK: query: create table orc_test_n0( col1 varchar(15), col2 char(10)) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@orc_test +POSTHOOK: Output: default@orc_test_n0 PREHOOK: query: create table text_test( col1 varchar(15), col2 char(10)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -14,26 +14,26 @@ POSTHOOK: query: create table text_test( col1 varchar(15), col2 char(10)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@text_test -PREHOOK: query: insert into orc_test values ('val1', '1') +PREHOOK: query: insert into orc_test_n0 values ('val1', '1') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@orc_test -POSTHOOK: query: insert into orc_test values ('val1', '1') +PREHOOK: Output: default@orc_test_n0 +POSTHOOK: query: insert into orc_test_n0 values ('val1', '1') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@orc_test -POSTHOOK: Lineage: orc_test.col1 SCRIPT [] -POSTHOOK: Lineage: orc_test.col2 SCRIPT [] -PREHOOK: query: insert overwrite table text_test select * from orc_test +POSTHOOK: Output: default@orc_test_n0 +POSTHOOK: Lineage: orc_test_n0.col1 SCRIPT [] +POSTHOOK: Lineage: orc_test_n0.col2 SCRIPT [] +PREHOOK: query: insert overwrite table text_test select * from orc_test_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@orc_test +PREHOOK: Input: default@orc_test_n0 PREHOOK: Output: default@text_test -POSTHOOK: query: insert overwrite table text_test select * from orc_test +POSTHOOK: query: insert overwrite table text_test select * from orc_test_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_test +POSTHOOK: Input: default@orc_test_n0 POSTHOOK: Output: default@text_test -POSTHOOK: Lineage: text_test.col1 SIMPLE [(orc_test)orc_test.FieldSchema(name:col1, type:varchar(15), comment:null), ] -POSTHOOK: Lineage: text_test.col2 SIMPLE [(orc_test)orc_test.FieldSchema(name:col2, type:char(10), comment:null), ] +POSTHOOK: Lineage: text_test.col1 SIMPLE [(orc_test_n0)orc_test_n0.FieldSchema(name:col1, type:varchar(15), comment:null), ] +POSTHOOK: Lineage: text_test.col2 SIMPLE [(orc_test_n0)orc_test_n0.FieldSchema(name:col2, type:char(10), comment:null), ] PREHOOK: query: explain select * from text_test where col2='1' PREHOOK: type: QUERY POSTHOOK: query: explain select * from text_test where col2='1' @@ -79,21 +79,21 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@text_test #### A masked pattern was here #### val1 1 -PREHOOK: query: select * from orc_test where col2='1' +PREHOOK: query: select * from orc_test_n0 where col2='1' PREHOOK: type: QUERY -PREHOOK: Input: default@orc_test +PREHOOK: Input: default@orc_test_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from orc_test where col2='1' +POSTHOOK: query: select * from orc_test_n0 where col2='1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_test +POSTHOOK: Input: default@orc_test_n0 #### A masked pattern was here #### val1 1 -PREHOOK: query: select * from orc_test where col2='1' +PREHOOK: query: select * from orc_test_n0 where col2='1' PREHOOK: type: QUERY -PREHOOK: Input: default@orc_test +PREHOOK: Input: default@orc_test_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from orc_test where col2='1' +POSTHOOK: query: select * from orc_test_n0 where col2='1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@orc_test +POSTHOOK: Input: default@orc_test_n0 #### A masked pattern was here #### val1 1 diff --git a/ql/src/test/results/clientpositive/orc_schema_evolution.q.out b/ql/src/test/results/clientpositive/orc_schema_evolution.q.out index b2f4d9df34..aa84313fc8 100644 --- a/ql/src/test/results/clientpositive/orc_schema_evolution.q.out +++ b/ql/src/test/results/clientpositive/orc_schema_evolution.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: create table src_orc (key smallint, val string) stored as orc +PREHOOK: query: create table src_orc_n3 (key smallint, val string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_orc -POSTHOOK: query: create table src_orc (key smallint, val string) stored as orc +PREHOOK: Output: default@src_orc_n3 +POSTHOOK: query: create table src_orc_n3 (key smallint, val string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_orc +POSTHOOK: Output: default@src_orc_n3 PREHOOK: query: create table src_orc2 (key smallint, val string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -14,74 +14,74 @@ POSTHOOK: query: create table src_orc2 (key smallint, val string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_orc2 -PREHOOK: query: insert overwrite table src_orc select * from src +PREHOOK: query: insert overwrite table src_orc_n3 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_orc -POSTHOOK: query: insert overwrite table src_orc select * from src +PREHOOK: Output: default@src_orc_n3 +POSTHOOK: query: insert overwrite table src_orc_n3 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_orc -POSTHOOK: Lineage: src_orc.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_orc.val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select sum(hash(*)) from src_orc +POSTHOOK: Output: default@src_orc_n3 +POSTHOOK: Lineage: src_orc_n3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_orc_n3.val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select sum(hash(*)) from src_orc_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@src_orc +PREHOOK: Input: default@src_orc_n3 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(*)) from src_orc +POSTHOOK: query: select sum(hash(*)) from src_orc_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_orc +POSTHOOK: Input: default@src_orc_n3 #### A masked pattern was here #### 36214430891 -PREHOOK: query: alter table src_orc change key key smallint +PREHOOK: query: alter table src_orc_n3 change key key smallint PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@src_orc -PREHOOK: Output: default@src_orc -POSTHOOK: query: alter table src_orc change key key smallint +PREHOOK: Input: default@src_orc_n3 +PREHOOK: Output: default@src_orc_n3 +POSTHOOK: query: alter table src_orc_n3 change key key smallint POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@src_orc -POSTHOOK: Output: default@src_orc -PREHOOK: query: select sum(hash(*)) from src_orc +POSTHOOK: Input: default@src_orc_n3 +POSTHOOK: Output: default@src_orc_n3 +PREHOOK: query: select sum(hash(*)) from src_orc_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@src_orc +PREHOOK: Input: default@src_orc_n3 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(*)) from src_orc +POSTHOOK: query: select sum(hash(*)) from src_orc_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_orc +POSTHOOK: Input: default@src_orc_n3 #### A masked pattern was here #### 36214430891 -PREHOOK: query: alter table src_orc change key key int +PREHOOK: query: alter table src_orc_n3 change key key int PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@src_orc -PREHOOK: Output: default@src_orc -POSTHOOK: query: alter table src_orc change key key int +PREHOOK: Input: default@src_orc_n3 +PREHOOK: Output: default@src_orc_n3 +POSTHOOK: query: alter table src_orc_n3 change key key int POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@src_orc -POSTHOOK: Output: default@src_orc -PREHOOK: query: select sum(hash(*)) from src_orc +POSTHOOK: Input: default@src_orc_n3 +POSTHOOK: Output: default@src_orc_n3 +PREHOOK: query: select sum(hash(*)) from src_orc_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@src_orc +PREHOOK: Input: default@src_orc_n3 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(*)) from src_orc +POSTHOOK: query: select sum(hash(*)) from src_orc_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_orc +POSTHOOK: Input: default@src_orc_n3 #### A masked pattern was here #### 36214430891 -PREHOOK: query: alter table src_orc change key key bigint +PREHOOK: query: alter table src_orc_n3 change key key bigint PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@src_orc -PREHOOK: Output: default@src_orc -POSTHOOK: query: alter table src_orc change key key bigint +PREHOOK: Input: default@src_orc_n3 +PREHOOK: Output: default@src_orc_n3 +POSTHOOK: query: alter table src_orc_n3 change key key bigint POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@src_orc -POSTHOOK: Output: default@src_orc -PREHOOK: query: select sum(hash(*)) from src_orc +POSTHOOK: Input: default@src_orc_n3 +POSTHOOK: Output: default@src_orc_n3 +PREHOOK: query: select sum(hash(*)) from src_orc_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@src_orc +PREHOOK: Input: default@src_orc_n3 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(*)) from src_orc +POSTHOOK: query: select sum(hash(*)) from src_orc_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_orc +POSTHOOK: Input: default@src_orc_n3 #### A masked pattern was here #### 36214430891 PREHOOK: query: insert overwrite table src_orc2 select * from src diff --git a/ql/src/test/results/clientpositive/orc_wide_table.q.out b/ql/src/test/results/clientpositive/orc_wide_table.q.out index fcb7657907..8f10810200 100644 --- a/ql/src/test/results/clientpositive/orc_wide_table.q.out +++ b/ql/src/test/results/clientpositive/orc_wide_table.q.out @@ -2,9 +2,9 @@ PREHOOK: query: drop table if exists test_txt PREHOOK: type: DROPTABLE POSTHOOK: query: drop table if exists test_txt POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table if exists test_orc +PREHOOK: query: drop table if exists test_orc_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists test_orc +POSTHOOK: query: drop table if exists test_orc_n1 POSTHOOK: type: DROPTABLE PREHOOK: query: create table test_txt( c1 varchar(64), @@ -4024,2030 +4024,2030 @@ POSTHOOK: query: load data local inpath '../../data/files/2000_cols_data.csv' ov POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@test_txt -PREHOOK: query: create table test_orc like test_txt +PREHOOK: query: create table test_orc_n1 like test_txt PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_orc -POSTHOOK: query: create table test_orc like test_txt +PREHOOK: Output: default@test_orc_n1 +POSTHOOK: query: create table test_orc_n1 like test_txt POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_orc -PREHOOK: query: alter table test_orc set fileformat orc +POSTHOOK: Output: default@test_orc_n1 +PREHOOK: query: alter table test_orc_n1 set fileformat orc PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@test_orc -PREHOOK: Output: default@test_orc -POSTHOOK: query: alter table test_orc set fileformat orc +PREHOOK: Input: default@test_orc_n1 +PREHOOK: Output: default@test_orc_n1 +POSTHOOK: query: alter table test_orc_n1 set fileformat orc POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@test_orc -POSTHOOK: Output: default@test_orc -PREHOOK: query: insert into table test_orc select * from test_txt +POSTHOOK: Input: default@test_orc_n1 +POSTHOOK: Output: default@test_orc_n1 +PREHOOK: query: insert into table test_orc_n1 select * from test_txt PREHOOK: type: QUERY PREHOOK: Input: default@test_txt -PREHOOK: Output: default@test_orc -POSTHOOK: query: insert into table test_orc select * from test_txt +PREHOOK: Output: default@test_orc_n1 +POSTHOOK: query: insert into table test_orc_n1 select * from test_txt POSTHOOK: type: QUERY POSTHOOK: Input: default@test_txt -POSTHOOK: Output: default@test_orc -POSTHOOK: Lineage: test_orc.c1 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c10 SIMPLE [(test_txt)test_txt.FieldSchema(name:c10, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c100 SIMPLE [(test_txt)test_txt.FieldSchema(name:c100, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1000 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1000, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1001 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1001, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1002 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1002, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1003 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1003, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1004 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1004, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1005 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1005, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1006 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1006, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1007 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1007, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1008 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1008, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1009 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1009, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c101 SIMPLE [(test_txt)test_txt.FieldSchema(name:c101, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1010 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1010, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1011 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1011, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1012 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1012, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1013 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1013, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1014 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1014, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1015 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1015, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1016 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1016, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1017 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1017, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1018 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1018, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1019 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1019, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c102 SIMPLE [(test_txt)test_txt.FieldSchema(name:c102, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1020 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1020, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1021 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1021, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1022 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1022, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1023 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1023, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1024 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1024, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1025 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1025, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1026 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1026, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1027 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1027, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1028 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1028, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1029 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1029, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c103 SIMPLE [(test_txt)test_txt.FieldSchema(name:c103, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1030 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1030, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1031 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1031, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1032 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1032, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1033 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1033, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1034 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1034, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1035 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1035, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1036 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1036, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1037 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1037, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1038 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1038, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1039 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1039, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c104 SIMPLE [(test_txt)test_txt.FieldSchema(name:c104, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1040 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1040, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1041 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1041, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1042 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1042, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1043 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1043, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1044 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1044, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1045 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1045, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1046 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1046, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1047 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1047, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1048 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1048, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1049 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1049, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c105 SIMPLE [(test_txt)test_txt.FieldSchema(name:c105, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1050 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1050, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1051 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1051, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1052 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1052, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1053 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1053, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1054 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1054, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1055 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1055, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1056 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1056, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1057 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1057, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1058 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1058, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1059 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1059, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c106 SIMPLE [(test_txt)test_txt.FieldSchema(name:c106, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1060 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1060, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1061 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1061, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1062 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1062, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1063 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1063, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1064 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1064, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1065 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1065, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1066 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1066, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1067 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1067, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1068 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1068, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1069 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1069, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c107 SIMPLE [(test_txt)test_txt.FieldSchema(name:c107, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1070 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1070, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1071 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1071, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1072 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1072, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1073 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1073, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1074 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1074, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1075 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1075, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1076 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1076, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1077 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1077, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1078 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1078, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1079 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1079, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c108 SIMPLE [(test_txt)test_txt.FieldSchema(name:c108, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1080 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1080, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1081 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1081, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1082 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1082, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1083 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1083, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1084 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1084, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1085 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1085, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1086 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1086, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1087 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1087, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1088 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1088, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1089 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1089, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c109 SIMPLE [(test_txt)test_txt.FieldSchema(name:c109, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1090 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1090, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1091 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1091, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1092 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1092, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1093 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1093, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1094 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1094, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1095 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1095, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1096 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1096, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1097 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1097, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1098 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1098, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1099 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1099, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c11 SIMPLE [(test_txt)test_txt.FieldSchema(name:c11, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c110 SIMPLE [(test_txt)test_txt.FieldSchema(name:c110, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1100 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1100, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1101 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1101, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1102 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1102, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1103 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1103, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1104 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1104, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1105 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1105, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1106 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1106, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1107 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1107, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1108 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1108, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1109 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1109, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c111 SIMPLE [(test_txt)test_txt.FieldSchema(name:c111, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1110 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1110, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1111 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1111, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1112 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1112, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1113 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1113, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1114 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1114, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1115 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1115, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1116 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1116, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1117 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1117, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1118 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1118, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1119 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1119, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c112 SIMPLE [(test_txt)test_txt.FieldSchema(name:c112, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1120 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1120, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1121 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1121, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1122 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1122, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1123 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1123, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1124 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1124, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1125 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1125, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1126 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1126, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1127 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1127, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1128 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1128, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1129 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1129, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c113 SIMPLE [(test_txt)test_txt.FieldSchema(name:c113, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1130 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1130, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1131 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1131, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1132 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1132, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1133 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1133, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1134 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1134, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1135 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1135, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1136 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1136, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1137 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1137, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1138 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1138, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1139 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1139, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c114 SIMPLE [(test_txt)test_txt.FieldSchema(name:c114, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1140 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1140, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1141 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1141, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1142 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1142, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1143 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1143, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1144 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1144, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1145 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1145, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1146 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1146, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1147 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1147, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1148 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1148, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1149 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1149, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c115 SIMPLE [(test_txt)test_txt.FieldSchema(name:c115, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1150 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1150, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1151 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1151, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1152 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1152, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1153 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1153, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1154 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1154, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1155 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1155, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1156 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1156, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1157 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1157, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1158 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1158, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1159 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1159, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c116 SIMPLE [(test_txt)test_txt.FieldSchema(name:c116, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1160 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1160, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1161 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1161, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1162 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1162, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1163 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1163, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1164 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1164, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1165 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1165, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1166 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1166, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1167 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1167, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1168 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1168, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1169 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1169, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c117 SIMPLE [(test_txt)test_txt.FieldSchema(name:c117, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1170 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1170, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1171 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1171, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1172 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1172, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1173 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1173, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1174 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1174, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1175 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1175, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1176 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1176, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1177 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1177, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1178 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1178, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1179 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1179, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c118 SIMPLE [(test_txt)test_txt.FieldSchema(name:c118, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1180 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1180, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1181 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1181, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1182 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1182, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1183 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1183, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1184 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1184, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1185 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1185, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1186 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1186, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1187 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1187, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1188 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1188, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1189 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1189, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c119 SIMPLE [(test_txt)test_txt.FieldSchema(name:c119, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1190 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1190, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1191 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1191, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1192 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1192, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1193 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1193, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1194 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1194, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1195 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1195, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1196 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1196, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1197 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1197, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1198 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1198, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1199 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1199, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c12 SIMPLE [(test_txt)test_txt.FieldSchema(name:c12, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c120 SIMPLE [(test_txt)test_txt.FieldSchema(name:c120, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1200 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1200, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1201 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1201, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1202 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1202, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1203 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1203, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1204 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1204, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1205 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1205, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1206 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1206, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1207 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1207, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1208 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1208, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1209 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1209, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c121 SIMPLE [(test_txt)test_txt.FieldSchema(name:c121, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1210 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1210, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1211 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1211, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1212 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1212, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1213 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1213, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1214 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1214, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1215 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1215, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1216 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1216, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1217 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1217, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1218 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1218, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1219 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1219, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c122 SIMPLE [(test_txt)test_txt.FieldSchema(name:c122, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1220 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1220, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1221 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1221, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1222 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1222, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1223 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1223, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1224 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1224, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1225 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1225, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1226 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1226, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1227 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1227, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1228 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1228, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1229 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1229, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c123 SIMPLE [(test_txt)test_txt.FieldSchema(name:c123, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1230 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1230, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1231 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1231, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1232 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1232, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1233 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1233, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1234 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1234, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1235 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1235, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1236 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1236, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1237 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1237, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1238 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1238, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1239 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1239, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c124 SIMPLE [(test_txt)test_txt.FieldSchema(name:c124, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1240 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1240, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1241 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1241, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1242 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1242, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1243 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1243, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1244 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1244, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1245 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1245, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1246 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1246, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1247 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1247, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1248 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1248, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1249 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1249, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c125 SIMPLE [(test_txt)test_txt.FieldSchema(name:c125, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1250 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1250, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1251 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1251, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1252 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1252, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1253 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1253, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1254 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1254, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1255 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1255, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1256 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1256, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1257 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1257, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1258 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1258, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1259 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1259, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c126 SIMPLE [(test_txt)test_txt.FieldSchema(name:c126, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1260 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1260, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1261 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1261, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1262 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1262, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1263 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1263, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1264 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1264, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1265 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1265, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1266 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1266, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1267 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1267, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1268 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1268, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1269 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1269, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c127 SIMPLE [(test_txt)test_txt.FieldSchema(name:c127, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1270 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1270, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1271 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1271, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1272 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1272, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1273 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1273, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1274 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1274, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1275 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1275, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1276 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1276, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1277 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1277, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1278 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1278, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1279 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1279, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c128 SIMPLE [(test_txt)test_txt.FieldSchema(name:c128, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1280 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1280, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1281 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1281, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1282 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1282, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1283 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1283, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1284 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1284, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1285 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1285, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1286 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1286, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1287 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1287, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1288 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1288, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1289 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1289, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c129 SIMPLE [(test_txt)test_txt.FieldSchema(name:c129, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1290 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1290, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1291 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1291, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1292 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1292, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1293 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1293, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1294 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1294, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1295 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1295, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1296 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1296, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1297 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1297, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1298 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1298, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1299 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1299, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c13 SIMPLE [(test_txt)test_txt.FieldSchema(name:c13, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c130 SIMPLE [(test_txt)test_txt.FieldSchema(name:c130, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1300 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1300, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1301 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1301, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1302 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1302, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1303 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1303, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1304 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1304, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1305 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1305, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1306 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1306, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1307 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1307, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1308 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1308, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1309 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1309, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c131 SIMPLE [(test_txt)test_txt.FieldSchema(name:c131, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1310 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1310, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1311 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1311, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1312 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1312, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1313 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1313, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1314 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1314, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1315 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1315, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1316 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1316, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1317 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1317, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1318 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1318, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1319 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1319, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c132 SIMPLE [(test_txt)test_txt.FieldSchema(name:c132, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1320 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1320, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1321 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1321, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1322 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1322, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1323 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1323, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1324 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1324, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1325 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1325, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1326 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1326, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1327 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1327, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1328 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1328, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1329 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1329, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c133 SIMPLE [(test_txt)test_txt.FieldSchema(name:c133, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1330 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1330, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1331 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1331, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1332 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1332, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1333 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1333, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1334 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1334, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1335 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1335, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1336 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1336, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1337 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1337, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1338 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1338, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1339 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1339, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c134 SIMPLE [(test_txt)test_txt.FieldSchema(name:c134, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1340 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1340, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1341 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1341, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1342 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1342, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1343 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1343, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1344 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1344, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1345 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1345, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1346 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1346, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1347 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1347, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1348 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1348, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1349 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1349, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c135 SIMPLE [(test_txt)test_txt.FieldSchema(name:c135, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1350 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1350, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1351 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1351, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1352 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1352, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1353 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1353, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1354 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1354, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1355 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1355, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1356 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1356, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1357 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1357, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1358 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1358, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1359 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1359, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c136 SIMPLE [(test_txt)test_txt.FieldSchema(name:c136, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1360 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1360, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1361 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1361, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1362 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1362, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1363 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1363, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1364 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1364, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1365 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1365, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1366 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1366, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1367 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1367, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1368 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1368, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1369 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1369, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c137 SIMPLE [(test_txt)test_txt.FieldSchema(name:c137, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1370 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1370, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1371 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1371, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1372 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1372, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1373 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1373, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1374 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1374, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1375 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1375, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1376 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1376, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1377 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1377, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1378 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1378, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1379 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1379, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c138 SIMPLE [(test_txt)test_txt.FieldSchema(name:c138, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1380 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1380, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1381 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1381, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1382 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1382, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1383 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1383, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1384 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1384, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1385 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1385, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1386 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1386, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1387 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1387, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1388 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1388, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1389 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1389, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c139 SIMPLE [(test_txt)test_txt.FieldSchema(name:c139, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1390 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1390, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1391 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1391, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1392 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1392, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1393 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1393, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1394 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1394, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1395 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1395, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1396 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1396, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1397 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1397, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1398 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1398, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1399 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1399, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c14 SIMPLE [(test_txt)test_txt.FieldSchema(name:c14, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c140 SIMPLE [(test_txt)test_txt.FieldSchema(name:c140, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1400 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1400, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1401 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1401, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1402 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1402, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1403 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1403, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1404 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1404, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1405 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1405, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1406 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1406, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1407 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1407, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1408 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1408, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1409 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1409, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c141 SIMPLE [(test_txt)test_txt.FieldSchema(name:c141, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1410 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1410, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1411 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1411, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1412 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1412, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1413 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1413, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1414 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1414, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1415 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1415, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1416 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1416, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1417 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1417, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1418 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1418, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1419 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1419, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c142 SIMPLE [(test_txt)test_txt.FieldSchema(name:c142, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1420 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1420, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1421 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1421, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1422 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1422, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1423 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1423, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1424 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1424, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1425 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1425, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1426 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1426, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1427 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1427, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1428 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1428, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1429 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1429, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c143 SIMPLE [(test_txt)test_txt.FieldSchema(name:c143, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1430 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1430, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1431 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1431, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1432 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1432, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1433 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1433, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1434 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1434, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1435 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1435, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1436 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1436, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1437 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1437, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1438 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1438, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1439 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1439, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c144 SIMPLE [(test_txt)test_txt.FieldSchema(name:c144, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1440 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1440, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1441 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1441, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1442 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1442, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1443 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1443, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1444 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1444, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1445 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1445, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1446 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1446, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1447 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1447, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1448 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1448, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1449 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1449, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c145 SIMPLE [(test_txt)test_txt.FieldSchema(name:c145, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1450 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1450, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1451 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1451, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1452 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1452, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1453 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1453, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1454 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1454, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1455 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1455, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1456 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1456, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1457 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1457, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1458 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1458, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1459 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1459, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c146 SIMPLE [(test_txt)test_txt.FieldSchema(name:c146, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1460 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1460, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1461 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1461, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1462 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1462, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1463 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1463, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1464 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1464, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1465 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1465, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1466 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1466, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1467 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1467, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1468 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1468, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1469 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1469, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c147 SIMPLE [(test_txt)test_txt.FieldSchema(name:c147, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1470 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1470, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1471 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1471, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1472 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1472, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1473 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1473, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1474 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1474, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1475 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1475, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1476 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1476, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1477 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1477, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1478 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1478, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1479 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1479, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c148 SIMPLE [(test_txt)test_txt.FieldSchema(name:c148, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1480 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1480, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1481 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1481, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1482 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1482, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1483 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1483, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1484 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1484, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1485 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1485, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1486 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1486, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1487 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1487, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1488 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1488, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1489 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1489, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c149 SIMPLE [(test_txt)test_txt.FieldSchema(name:c149, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1490 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1490, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1491 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1491, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1492 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1492, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1493 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1493, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1494 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1494, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1495 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1495, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1496 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1496, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1497 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1497, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1498 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1498, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1499 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1499, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c15 SIMPLE [(test_txt)test_txt.FieldSchema(name:c15, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c150 SIMPLE [(test_txt)test_txt.FieldSchema(name:c150, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1500 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1500, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1501 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1501, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1502 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1502, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1503 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1503, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1504 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1504, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1505 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1505, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1506 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1506, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1507 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1507, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1508 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1508, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1509 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1509, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c151 SIMPLE [(test_txt)test_txt.FieldSchema(name:c151, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1510 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1510, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1511 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1511, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1512 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1512, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1513 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1513, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1514 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1514, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1515 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1515, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1516 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1516, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1517 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1517, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1518 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1518, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1519 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1519, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c152 SIMPLE [(test_txt)test_txt.FieldSchema(name:c152, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1520 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1520, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1521 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1521, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1522 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1522, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1523 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1523, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1524 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1524, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1525 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1525, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1526 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1526, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1527 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1527, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1528 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1528, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1529 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1529, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c153 SIMPLE [(test_txt)test_txt.FieldSchema(name:c153, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1530 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1530, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1531 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1531, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1532 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1532, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1533 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1533, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1534 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1534, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1535 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1535, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1536 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1536, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1537 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1537, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1538 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1538, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1539 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1539, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c154 SIMPLE [(test_txt)test_txt.FieldSchema(name:c154, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1540 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1540, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1541 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1541, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1542 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1542, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1543 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1543, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1544 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1544, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1545 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1545, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1546 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1546, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1547 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1547, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1548 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1548, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1549 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1549, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c155 SIMPLE [(test_txt)test_txt.FieldSchema(name:c155, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1550 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1550, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1551 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1551, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1552 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1552, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1553 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1553, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1554 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1554, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1555 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1555, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1556 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1556, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1557 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1557, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1558 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1558, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1559 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1559, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c156 SIMPLE [(test_txt)test_txt.FieldSchema(name:c156, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1560 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1560, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1561 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1561, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1562 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1562, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1563 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1563, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1564 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1564, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1565 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1565, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1566 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1566, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1567 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1567, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1568 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1568, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1569 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1569, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c157 SIMPLE [(test_txt)test_txt.FieldSchema(name:c157, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1570 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1570, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1571 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1571, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1572 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1572, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1573 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1573, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1574 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1574, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1575 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1575, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1576 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1576, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1577 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1577, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1578 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1578, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1579 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1579, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c158 SIMPLE [(test_txt)test_txt.FieldSchema(name:c158, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1580 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1580, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1581 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1581, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1582 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1582, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1583 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1583, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1584 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1584, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1585 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1585, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1586 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1586, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1587 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1587, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1588 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1588, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1589 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1589, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c159 SIMPLE [(test_txt)test_txt.FieldSchema(name:c159, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1590 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1590, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1591 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1591, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1592 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1592, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1593 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1593, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1594 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1594, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1595 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1595, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1596 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1596, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1597 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1597, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1598 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1598, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1599 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1599, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c16 SIMPLE [(test_txt)test_txt.FieldSchema(name:c16, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c160 SIMPLE [(test_txt)test_txt.FieldSchema(name:c160, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1600 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1600, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1601 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1601, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1602 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1602, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1603 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1603, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1604 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1604, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1605 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1605, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1606 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1606, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1607 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1607, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1608 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1608, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1609 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1609, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c161 SIMPLE [(test_txt)test_txt.FieldSchema(name:c161, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1610 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1610, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1611 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1611, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1612 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1612, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1613 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1613, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1614 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1614, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1615 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1615, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1616 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1616, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1617 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1617, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1618 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1618, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1619 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1619, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c162 SIMPLE [(test_txt)test_txt.FieldSchema(name:c162, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1620 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1620, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1621 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1621, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1622 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1622, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1623 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1623, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1624 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1624, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1625 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1625, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1626 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1626, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1627 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1627, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1628 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1628, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1629 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1629, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c163 SIMPLE [(test_txt)test_txt.FieldSchema(name:c163, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1630 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1630, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1631 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1631, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1632 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1632, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1633 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1633, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1634 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1634, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1635 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1635, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1636 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1636, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1637 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1637, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1638 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1638, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1639 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1639, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c164 SIMPLE [(test_txt)test_txt.FieldSchema(name:c164, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1640 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1640, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1641 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1641, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1642 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1642, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1643 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1643, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1644 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1644, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1645 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1645, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1646 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1646, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1647 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1647, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1648 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1648, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1649 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1649, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c165 SIMPLE [(test_txt)test_txt.FieldSchema(name:c165, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1650 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1650, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1651 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1651, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1652 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1652, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1653 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1653, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1654 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1654, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1655 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1655, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1656 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1656, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1657 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1657, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1658 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1658, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1659 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1659, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c166 SIMPLE [(test_txt)test_txt.FieldSchema(name:c166, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1660 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1660, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1661 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1661, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1662 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1662, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1663 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1663, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1664 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1664, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1665 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1665, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1666 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1666, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1667 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1667, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1668 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1668, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1669 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1669, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c167 SIMPLE [(test_txt)test_txt.FieldSchema(name:c167, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1670 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1670, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1671 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1671, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1672 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1672, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1673 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1673, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1674 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1674, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1675 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1675, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1676 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1676, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1677 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1677, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1678 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1678, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1679 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1679, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c168 SIMPLE [(test_txt)test_txt.FieldSchema(name:c168, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1680 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1680, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1681 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1681, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1682 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1682, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1683 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1683, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1684 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1684, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1685 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1685, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1686 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1686, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1687 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1687, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1688 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1688, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1689 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1689, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c169 SIMPLE [(test_txt)test_txt.FieldSchema(name:c169, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1690 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1690, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1691 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1691, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1692 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1692, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1693 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1693, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1694 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1694, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1695 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1695, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1696 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1696, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1697 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1697, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1698 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1698, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1699 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1699, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c17 SIMPLE [(test_txt)test_txt.FieldSchema(name:c17, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c170 SIMPLE [(test_txt)test_txt.FieldSchema(name:c170, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1700 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1700, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1701 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1701, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1702 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1702, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1703 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1703, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1704 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1704, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1705 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1705, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1706 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1706, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1707 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1707, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1708 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1708, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1709 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1709, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c171 SIMPLE [(test_txt)test_txt.FieldSchema(name:c171, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1710 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1710, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1711 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1711, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1712 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1712, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1713 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1713, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1714 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1714, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1715 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1715, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1716 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1716, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1717 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1717, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1718 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1718, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1719 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1719, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c172 SIMPLE [(test_txt)test_txt.FieldSchema(name:c172, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1720 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1720, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1721 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1721, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1722 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1722, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1723 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1723, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1724 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1724, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1725 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1725, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1726 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1726, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1727 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1727, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1728 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1728, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1729 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1729, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c173 SIMPLE [(test_txt)test_txt.FieldSchema(name:c173, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1730 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1730, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1731 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1731, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1732 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1732, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1733 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1733, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1734 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1734, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1735 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1735, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1736 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1736, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1737 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1737, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1738 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1738, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1739 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1739, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c174 SIMPLE [(test_txt)test_txt.FieldSchema(name:c174, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1740 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1740, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1741 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1741, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1742 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1742, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1743 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1743, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1744 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1744, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1745 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1745, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1746 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1746, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1747 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1747, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1748 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1748, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1749 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1749, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c175 SIMPLE [(test_txt)test_txt.FieldSchema(name:c175, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1750 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1750, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1751 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1751, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1752 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1752, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1753 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1753, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1754 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1754, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1755 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1755, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1756 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1756, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1757 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1757, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1758 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1758, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1759 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1759, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c176 SIMPLE [(test_txt)test_txt.FieldSchema(name:c176, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1760 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1760, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1761 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1761, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1762 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1762, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1763 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1763, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1764 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1764, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1765 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1765, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1766 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1766, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1767 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1767, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1768 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1768, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1769 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1769, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c177 SIMPLE [(test_txt)test_txt.FieldSchema(name:c177, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1770 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1770, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1771 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1771, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1772 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1772, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1773 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1773, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1774 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1774, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1775 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1775, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1776 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1776, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1777 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1777, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1778 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1778, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1779 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1779, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c178 SIMPLE [(test_txt)test_txt.FieldSchema(name:c178, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1780 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1780, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1781 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1781, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1782 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1782, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1783 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1783, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1784 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1784, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1785 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1785, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1786 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1786, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1787 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1787, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1788 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1788, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1789 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1789, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c179 SIMPLE [(test_txt)test_txt.FieldSchema(name:c179, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1790 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1790, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1791 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1791, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1792 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1792, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1793 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1793, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1794 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1794, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1795 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1795, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1796 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1796, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1797 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1797, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1798 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1798, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1799 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1799, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c18 SIMPLE [(test_txt)test_txt.FieldSchema(name:c18, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c180 SIMPLE [(test_txt)test_txt.FieldSchema(name:c180, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1800 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1800, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1801 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1801, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1802 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1802, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1803 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1803, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1804 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1804, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1805 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1805, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1806 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1806, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1807 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1807, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1808 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1808, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1809 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1809, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c181 SIMPLE [(test_txt)test_txt.FieldSchema(name:c181, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1810 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1810, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1811 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1811, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1812 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1812, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1813 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1813, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1814 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1814, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1815 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1815, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1816 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1816, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1817 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1817, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1818 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1818, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1819 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1819, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c182 SIMPLE [(test_txt)test_txt.FieldSchema(name:c182, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1820 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1820, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1821 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1821, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1822 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1822, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1823 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1823, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1824 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1824, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1825 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1825, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1826 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1826, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1827 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1827, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1828 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1828, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1829 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1829, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c183 SIMPLE [(test_txt)test_txt.FieldSchema(name:c183, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1830 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1830, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1831 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1831, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1832 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1832, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1833 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1833, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1834 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1834, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1835 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1835, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1836 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1836, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1837 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1837, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1838 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1838, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1839 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1839, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c184 SIMPLE [(test_txt)test_txt.FieldSchema(name:c184, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1840 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1840, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1841 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1841, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1842 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1842, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1843 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1843, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1844 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1844, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1845 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1845, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1846 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1846, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1847 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1847, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1848 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1848, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1849 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1849, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c185 SIMPLE [(test_txt)test_txt.FieldSchema(name:c185, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1850 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1850, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1851 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1851, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1852 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1852, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1853 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1853, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1854 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1854, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1855 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1855, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1856 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1856, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1857 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1857, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1858 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1858, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1859 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1859, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c186 SIMPLE [(test_txt)test_txt.FieldSchema(name:c186, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1860 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1860, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1861 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1861, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1862 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1862, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1863 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1863, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1864 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1864, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1865 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1865, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1866 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1866, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1867 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1867, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1868 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1868, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1869 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1869, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c187 SIMPLE [(test_txt)test_txt.FieldSchema(name:c187, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1870 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1870, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1871 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1871, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1872 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1872, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1873 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1873, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1874 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1874, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1875 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1875, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1876 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1876, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1877 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1877, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1878 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1878, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1879 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1879, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c188 SIMPLE [(test_txt)test_txt.FieldSchema(name:c188, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1880 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1880, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1881 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1881, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1882 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1882, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1883 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1883, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1884 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1884, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1885 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1885, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1886 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1886, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1887 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1887, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1888 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1888, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1889 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1889, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c189 SIMPLE [(test_txt)test_txt.FieldSchema(name:c189, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1890 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1890, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1891 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1891, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1892 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1892, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1893 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1893, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1894 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1894, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1895 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1895, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1896 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1896, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1897 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1897, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1898 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1898, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1899 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1899, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c19 SIMPLE [(test_txt)test_txt.FieldSchema(name:c19, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c190 SIMPLE [(test_txt)test_txt.FieldSchema(name:c190, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1900 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1900, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1901 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1901, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1902 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1902, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1903 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1903, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1904 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1904, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1905 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1905, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1906 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1906, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1907 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1907, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1908 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1908, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1909 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1909, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c191 SIMPLE [(test_txt)test_txt.FieldSchema(name:c191, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1910 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1910, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1911 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1911, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1912 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1912, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1913 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1913, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1914 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1914, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1915 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1915, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1916 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1916, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1917 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1917, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1918 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1918, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1919 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1919, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c192 SIMPLE [(test_txt)test_txt.FieldSchema(name:c192, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1920 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1920, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1921 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1921, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1922 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1922, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1923 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1923, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1924 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1924, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1925 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1925, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1926 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1926, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1927 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1927, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1928 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1928, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1929 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1929, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c193 SIMPLE [(test_txt)test_txt.FieldSchema(name:c193, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1930 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1930, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1931 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1931, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1932 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1932, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1933 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1933, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1934 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1934, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1935 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1935, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1936 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1936, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1937 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1937, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1938 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1938, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1939 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1939, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c194 SIMPLE [(test_txt)test_txt.FieldSchema(name:c194, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1940 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1940, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1941 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1941, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1942 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1942, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1943 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1943, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1944 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1944, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1945 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1945, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1946 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1946, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1947 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1947, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1948 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1948, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1949 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1949, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c195 SIMPLE [(test_txt)test_txt.FieldSchema(name:c195, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1950 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1950, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1951 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1951, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1952 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1952, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1953 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1953, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1954 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1954, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1955 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1955, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1956 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1956, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c1957 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1957, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1958 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1958, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1959 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1959, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c196 SIMPLE [(test_txt)test_txt.FieldSchema(name:c196, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1960 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1960, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1961 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1961, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1962 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1962, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1963 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1963, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1964 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1964, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1965 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1965, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1966 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1966, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1967 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1967, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1968 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1968, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1969 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1969, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c197 SIMPLE [(test_txt)test_txt.FieldSchema(name:c197, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1970 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1970, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1971 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1971, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1972 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1972, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1973 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1973, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1974 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1974, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1975 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1975, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1976 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1976, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1977 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1977, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1978 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1978, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1979 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1979, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c198 SIMPLE [(test_txt)test_txt.FieldSchema(name:c198, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1980 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1980, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1981 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1981, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1982 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1982, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1983 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1983, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1984 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1984, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c1985 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1985, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1986 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1986, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1987 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1987, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1988 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1988, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1989 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1989, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c199 SIMPLE [(test_txt)test_txt.FieldSchema(name:c199, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1990 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1990, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1991 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1991, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1992 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1992, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c1993 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1993, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1994 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1994, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1995 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1995, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c1996 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1996, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c1997 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1997, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c1998 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1998, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c1999 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1999, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c2 SIMPLE [(test_txt)test_txt.FieldSchema(name:c2, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c20 SIMPLE [(test_txt)test_txt.FieldSchema(name:c20, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c200 SIMPLE [(test_txt)test_txt.FieldSchema(name:c200, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c2000 SIMPLE [(test_txt)test_txt.FieldSchema(name:c2000, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c201 SIMPLE [(test_txt)test_txt.FieldSchema(name:c201, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c202 SIMPLE [(test_txt)test_txt.FieldSchema(name:c202, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c203 SIMPLE [(test_txt)test_txt.FieldSchema(name:c203, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c204 SIMPLE [(test_txt)test_txt.FieldSchema(name:c204, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c205 SIMPLE [(test_txt)test_txt.FieldSchema(name:c205, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c206 SIMPLE [(test_txt)test_txt.FieldSchema(name:c206, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c207 SIMPLE [(test_txt)test_txt.FieldSchema(name:c207, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c208 SIMPLE [(test_txt)test_txt.FieldSchema(name:c208, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c209 SIMPLE [(test_txt)test_txt.FieldSchema(name:c209, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c21 SIMPLE [(test_txt)test_txt.FieldSchema(name:c21, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c210 SIMPLE [(test_txt)test_txt.FieldSchema(name:c210, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c211 SIMPLE [(test_txt)test_txt.FieldSchema(name:c211, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c212 SIMPLE [(test_txt)test_txt.FieldSchema(name:c212, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c213 SIMPLE [(test_txt)test_txt.FieldSchema(name:c213, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c214 SIMPLE [(test_txt)test_txt.FieldSchema(name:c214, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c215 SIMPLE [(test_txt)test_txt.FieldSchema(name:c215, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c216 SIMPLE [(test_txt)test_txt.FieldSchema(name:c216, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c217 SIMPLE [(test_txt)test_txt.FieldSchema(name:c217, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c218 SIMPLE [(test_txt)test_txt.FieldSchema(name:c218, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c219 SIMPLE [(test_txt)test_txt.FieldSchema(name:c219, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c22 SIMPLE [(test_txt)test_txt.FieldSchema(name:c22, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c220 SIMPLE [(test_txt)test_txt.FieldSchema(name:c220, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c221 SIMPLE [(test_txt)test_txt.FieldSchema(name:c221, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c222 SIMPLE [(test_txt)test_txt.FieldSchema(name:c222, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c223 SIMPLE [(test_txt)test_txt.FieldSchema(name:c223, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c224 SIMPLE [(test_txt)test_txt.FieldSchema(name:c224, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c225 SIMPLE [(test_txt)test_txt.FieldSchema(name:c225, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c226 SIMPLE [(test_txt)test_txt.FieldSchema(name:c226, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c227 SIMPLE [(test_txt)test_txt.FieldSchema(name:c227, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c228 SIMPLE [(test_txt)test_txt.FieldSchema(name:c228, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c229 SIMPLE [(test_txt)test_txt.FieldSchema(name:c229, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c23 SIMPLE [(test_txt)test_txt.FieldSchema(name:c23, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c230 SIMPLE [(test_txt)test_txt.FieldSchema(name:c230, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c231 SIMPLE [(test_txt)test_txt.FieldSchema(name:c231, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c232 SIMPLE [(test_txt)test_txt.FieldSchema(name:c232, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c233 SIMPLE [(test_txt)test_txt.FieldSchema(name:c233, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c234 SIMPLE [(test_txt)test_txt.FieldSchema(name:c234, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c235 SIMPLE [(test_txt)test_txt.FieldSchema(name:c235, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c236 SIMPLE [(test_txt)test_txt.FieldSchema(name:c236, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c237 SIMPLE [(test_txt)test_txt.FieldSchema(name:c237, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c238 SIMPLE [(test_txt)test_txt.FieldSchema(name:c238, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c239 SIMPLE [(test_txt)test_txt.FieldSchema(name:c239, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c24 SIMPLE [(test_txt)test_txt.FieldSchema(name:c24, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c240 SIMPLE [(test_txt)test_txt.FieldSchema(name:c240, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c241 SIMPLE [(test_txt)test_txt.FieldSchema(name:c241, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c242 SIMPLE [(test_txt)test_txt.FieldSchema(name:c242, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c243 SIMPLE [(test_txt)test_txt.FieldSchema(name:c243, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c244 SIMPLE [(test_txt)test_txt.FieldSchema(name:c244, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c245 SIMPLE [(test_txt)test_txt.FieldSchema(name:c245, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c246 SIMPLE [(test_txt)test_txt.FieldSchema(name:c246, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c247 SIMPLE [(test_txt)test_txt.FieldSchema(name:c247, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c248 SIMPLE [(test_txt)test_txt.FieldSchema(name:c248, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c249 SIMPLE [(test_txt)test_txt.FieldSchema(name:c249, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c25 SIMPLE [(test_txt)test_txt.FieldSchema(name:c25, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c250 SIMPLE [(test_txt)test_txt.FieldSchema(name:c250, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c251 SIMPLE [(test_txt)test_txt.FieldSchema(name:c251, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c252 SIMPLE [(test_txt)test_txt.FieldSchema(name:c252, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c253 SIMPLE [(test_txt)test_txt.FieldSchema(name:c253, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c254 SIMPLE [(test_txt)test_txt.FieldSchema(name:c254, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c255 SIMPLE [(test_txt)test_txt.FieldSchema(name:c255, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c256 SIMPLE [(test_txt)test_txt.FieldSchema(name:c256, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c257 SIMPLE [(test_txt)test_txt.FieldSchema(name:c257, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c258 SIMPLE [(test_txt)test_txt.FieldSchema(name:c258, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c259 SIMPLE [(test_txt)test_txt.FieldSchema(name:c259, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c26 SIMPLE [(test_txt)test_txt.FieldSchema(name:c26, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c260 SIMPLE [(test_txt)test_txt.FieldSchema(name:c260, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c261 SIMPLE [(test_txt)test_txt.FieldSchema(name:c261, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c262 SIMPLE [(test_txt)test_txt.FieldSchema(name:c262, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c263 SIMPLE [(test_txt)test_txt.FieldSchema(name:c263, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c264 SIMPLE [(test_txt)test_txt.FieldSchema(name:c264, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c265 SIMPLE [(test_txt)test_txt.FieldSchema(name:c265, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c266 SIMPLE [(test_txt)test_txt.FieldSchema(name:c266, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c267 SIMPLE [(test_txt)test_txt.FieldSchema(name:c267, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c268 SIMPLE [(test_txt)test_txt.FieldSchema(name:c268, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c269 SIMPLE [(test_txt)test_txt.FieldSchema(name:c269, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c27 SIMPLE [(test_txt)test_txt.FieldSchema(name:c27, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c270 SIMPLE [(test_txt)test_txt.FieldSchema(name:c270, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c271 SIMPLE [(test_txt)test_txt.FieldSchema(name:c271, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c272 SIMPLE [(test_txt)test_txt.FieldSchema(name:c272, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c273 SIMPLE [(test_txt)test_txt.FieldSchema(name:c273, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c274 SIMPLE [(test_txt)test_txt.FieldSchema(name:c274, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c275 SIMPLE [(test_txt)test_txt.FieldSchema(name:c275, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c276 SIMPLE [(test_txt)test_txt.FieldSchema(name:c276, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c277 SIMPLE [(test_txt)test_txt.FieldSchema(name:c277, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c278 SIMPLE [(test_txt)test_txt.FieldSchema(name:c278, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c279 SIMPLE [(test_txt)test_txt.FieldSchema(name:c279, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c28 SIMPLE [(test_txt)test_txt.FieldSchema(name:c28, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c280 SIMPLE [(test_txt)test_txt.FieldSchema(name:c280, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c281 SIMPLE [(test_txt)test_txt.FieldSchema(name:c281, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c282 SIMPLE [(test_txt)test_txt.FieldSchema(name:c282, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c283 SIMPLE [(test_txt)test_txt.FieldSchema(name:c283, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c284 SIMPLE [(test_txt)test_txt.FieldSchema(name:c284, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c285 SIMPLE [(test_txt)test_txt.FieldSchema(name:c285, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c286 SIMPLE [(test_txt)test_txt.FieldSchema(name:c286, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c287 SIMPLE [(test_txt)test_txt.FieldSchema(name:c287, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c288 SIMPLE [(test_txt)test_txt.FieldSchema(name:c288, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c289 SIMPLE [(test_txt)test_txt.FieldSchema(name:c289, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c29 SIMPLE [(test_txt)test_txt.FieldSchema(name:c29, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c290 SIMPLE [(test_txt)test_txt.FieldSchema(name:c290, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c291 SIMPLE [(test_txt)test_txt.FieldSchema(name:c291, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c292 SIMPLE [(test_txt)test_txt.FieldSchema(name:c292, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c293 SIMPLE [(test_txt)test_txt.FieldSchema(name:c293, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c294 SIMPLE [(test_txt)test_txt.FieldSchema(name:c294, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c295 SIMPLE [(test_txt)test_txt.FieldSchema(name:c295, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c296 SIMPLE [(test_txt)test_txt.FieldSchema(name:c296, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c297 SIMPLE [(test_txt)test_txt.FieldSchema(name:c297, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c298 SIMPLE [(test_txt)test_txt.FieldSchema(name:c298, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c299 SIMPLE [(test_txt)test_txt.FieldSchema(name:c299, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c3 SIMPLE [(test_txt)test_txt.FieldSchema(name:c3, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c30 SIMPLE [(test_txt)test_txt.FieldSchema(name:c30, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c300 SIMPLE [(test_txt)test_txt.FieldSchema(name:c300, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c301 SIMPLE [(test_txt)test_txt.FieldSchema(name:c301, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c302 SIMPLE [(test_txt)test_txt.FieldSchema(name:c302, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c303 SIMPLE [(test_txt)test_txt.FieldSchema(name:c303, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c304 SIMPLE [(test_txt)test_txt.FieldSchema(name:c304, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c305 SIMPLE [(test_txt)test_txt.FieldSchema(name:c305, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c306 SIMPLE [(test_txt)test_txt.FieldSchema(name:c306, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c307 SIMPLE [(test_txt)test_txt.FieldSchema(name:c307, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c308 SIMPLE [(test_txt)test_txt.FieldSchema(name:c308, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c309 SIMPLE [(test_txt)test_txt.FieldSchema(name:c309, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c31 SIMPLE [(test_txt)test_txt.FieldSchema(name:c31, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c310 SIMPLE [(test_txt)test_txt.FieldSchema(name:c310, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c311 SIMPLE [(test_txt)test_txt.FieldSchema(name:c311, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c312 SIMPLE [(test_txt)test_txt.FieldSchema(name:c312, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c313 SIMPLE [(test_txt)test_txt.FieldSchema(name:c313, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c314 SIMPLE [(test_txt)test_txt.FieldSchema(name:c314, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c315 SIMPLE [(test_txt)test_txt.FieldSchema(name:c315, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c316 SIMPLE [(test_txt)test_txt.FieldSchema(name:c316, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c317 SIMPLE [(test_txt)test_txt.FieldSchema(name:c317, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c318 SIMPLE [(test_txt)test_txt.FieldSchema(name:c318, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c319 SIMPLE [(test_txt)test_txt.FieldSchema(name:c319, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c32 SIMPLE [(test_txt)test_txt.FieldSchema(name:c32, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c320 SIMPLE [(test_txt)test_txt.FieldSchema(name:c320, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c321 SIMPLE [(test_txt)test_txt.FieldSchema(name:c321, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c322 SIMPLE [(test_txt)test_txt.FieldSchema(name:c322, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c323 SIMPLE [(test_txt)test_txt.FieldSchema(name:c323, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c324 SIMPLE [(test_txt)test_txt.FieldSchema(name:c324, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c325 SIMPLE [(test_txt)test_txt.FieldSchema(name:c325, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c326 SIMPLE [(test_txt)test_txt.FieldSchema(name:c326, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c327 SIMPLE [(test_txt)test_txt.FieldSchema(name:c327, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c328 SIMPLE [(test_txt)test_txt.FieldSchema(name:c328, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c329 SIMPLE [(test_txt)test_txt.FieldSchema(name:c329, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c33 SIMPLE [(test_txt)test_txt.FieldSchema(name:c33, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c330 SIMPLE [(test_txt)test_txt.FieldSchema(name:c330, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c331 SIMPLE [(test_txt)test_txt.FieldSchema(name:c331, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c332 SIMPLE [(test_txt)test_txt.FieldSchema(name:c332, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c333 SIMPLE [(test_txt)test_txt.FieldSchema(name:c333, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c334 SIMPLE [(test_txt)test_txt.FieldSchema(name:c334, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c335 SIMPLE [(test_txt)test_txt.FieldSchema(name:c335, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c336 SIMPLE [(test_txt)test_txt.FieldSchema(name:c336, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c337 SIMPLE [(test_txt)test_txt.FieldSchema(name:c337, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c338 SIMPLE [(test_txt)test_txt.FieldSchema(name:c338, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c339 SIMPLE [(test_txt)test_txt.FieldSchema(name:c339, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c34 SIMPLE [(test_txt)test_txt.FieldSchema(name:c34, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c340 SIMPLE [(test_txt)test_txt.FieldSchema(name:c340, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c341 SIMPLE [(test_txt)test_txt.FieldSchema(name:c341, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c342 SIMPLE [(test_txt)test_txt.FieldSchema(name:c342, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c343 SIMPLE [(test_txt)test_txt.FieldSchema(name:c343, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c344 SIMPLE [(test_txt)test_txt.FieldSchema(name:c344, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c345 SIMPLE [(test_txt)test_txt.FieldSchema(name:c345, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c346 SIMPLE [(test_txt)test_txt.FieldSchema(name:c346, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c347 SIMPLE [(test_txt)test_txt.FieldSchema(name:c347, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c348 SIMPLE [(test_txt)test_txt.FieldSchema(name:c348, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c349 SIMPLE [(test_txt)test_txt.FieldSchema(name:c349, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c35 SIMPLE [(test_txt)test_txt.FieldSchema(name:c35, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c350 SIMPLE [(test_txt)test_txt.FieldSchema(name:c350, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c351 SIMPLE [(test_txt)test_txt.FieldSchema(name:c351, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c352 SIMPLE [(test_txt)test_txt.FieldSchema(name:c352, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c353 SIMPLE [(test_txt)test_txt.FieldSchema(name:c353, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c354 SIMPLE [(test_txt)test_txt.FieldSchema(name:c354, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c355 SIMPLE [(test_txt)test_txt.FieldSchema(name:c355, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c356 SIMPLE [(test_txt)test_txt.FieldSchema(name:c356, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c357 SIMPLE [(test_txt)test_txt.FieldSchema(name:c357, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c358 SIMPLE [(test_txt)test_txt.FieldSchema(name:c358, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c359 SIMPLE [(test_txt)test_txt.FieldSchema(name:c359, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c36 SIMPLE [(test_txt)test_txt.FieldSchema(name:c36, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c360 SIMPLE [(test_txt)test_txt.FieldSchema(name:c360, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c361 SIMPLE [(test_txt)test_txt.FieldSchema(name:c361, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c362 SIMPLE [(test_txt)test_txt.FieldSchema(name:c362, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c363 SIMPLE [(test_txt)test_txt.FieldSchema(name:c363, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c364 SIMPLE [(test_txt)test_txt.FieldSchema(name:c364, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c365 SIMPLE [(test_txt)test_txt.FieldSchema(name:c365, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c366 SIMPLE [(test_txt)test_txt.FieldSchema(name:c366, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c367 SIMPLE [(test_txt)test_txt.FieldSchema(name:c367, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c368 SIMPLE [(test_txt)test_txt.FieldSchema(name:c368, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c369 SIMPLE [(test_txt)test_txt.FieldSchema(name:c369, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c37 SIMPLE [(test_txt)test_txt.FieldSchema(name:c37, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c370 SIMPLE [(test_txt)test_txt.FieldSchema(name:c370, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c371 SIMPLE [(test_txt)test_txt.FieldSchema(name:c371, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c372 SIMPLE [(test_txt)test_txt.FieldSchema(name:c372, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c373 SIMPLE [(test_txt)test_txt.FieldSchema(name:c373, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c374 SIMPLE [(test_txt)test_txt.FieldSchema(name:c374, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c375 SIMPLE [(test_txt)test_txt.FieldSchema(name:c375, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c376 SIMPLE [(test_txt)test_txt.FieldSchema(name:c376, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c377 SIMPLE [(test_txt)test_txt.FieldSchema(name:c377, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c378 SIMPLE [(test_txt)test_txt.FieldSchema(name:c378, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c379 SIMPLE [(test_txt)test_txt.FieldSchema(name:c379, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c38 SIMPLE [(test_txt)test_txt.FieldSchema(name:c38, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c380 SIMPLE [(test_txt)test_txt.FieldSchema(name:c380, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c381 SIMPLE [(test_txt)test_txt.FieldSchema(name:c381, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c382 SIMPLE [(test_txt)test_txt.FieldSchema(name:c382, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c383 SIMPLE [(test_txt)test_txt.FieldSchema(name:c383, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c384 SIMPLE [(test_txt)test_txt.FieldSchema(name:c384, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c385 SIMPLE [(test_txt)test_txt.FieldSchema(name:c385, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c386 SIMPLE [(test_txt)test_txt.FieldSchema(name:c386, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c387 SIMPLE [(test_txt)test_txt.FieldSchema(name:c387, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c388 SIMPLE [(test_txt)test_txt.FieldSchema(name:c388, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c389 SIMPLE [(test_txt)test_txt.FieldSchema(name:c389, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c39 SIMPLE [(test_txt)test_txt.FieldSchema(name:c39, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c390 SIMPLE [(test_txt)test_txt.FieldSchema(name:c390, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c391 SIMPLE [(test_txt)test_txt.FieldSchema(name:c391, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c392 SIMPLE [(test_txt)test_txt.FieldSchema(name:c392, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c393 SIMPLE [(test_txt)test_txt.FieldSchema(name:c393, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c394 SIMPLE [(test_txt)test_txt.FieldSchema(name:c394, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c395 SIMPLE [(test_txt)test_txt.FieldSchema(name:c395, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c396 SIMPLE [(test_txt)test_txt.FieldSchema(name:c396, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c397 SIMPLE [(test_txt)test_txt.FieldSchema(name:c397, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c398 SIMPLE [(test_txt)test_txt.FieldSchema(name:c398, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c399 SIMPLE [(test_txt)test_txt.FieldSchema(name:c399, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c4 SIMPLE [(test_txt)test_txt.FieldSchema(name:c4, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c40 SIMPLE [(test_txt)test_txt.FieldSchema(name:c40, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c400 SIMPLE [(test_txt)test_txt.FieldSchema(name:c400, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c401 SIMPLE [(test_txt)test_txt.FieldSchema(name:c401, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c402 SIMPLE [(test_txt)test_txt.FieldSchema(name:c402, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c403 SIMPLE [(test_txt)test_txt.FieldSchema(name:c403, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c404 SIMPLE [(test_txt)test_txt.FieldSchema(name:c404, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c405 SIMPLE [(test_txt)test_txt.FieldSchema(name:c405, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c406 SIMPLE [(test_txt)test_txt.FieldSchema(name:c406, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c407 SIMPLE [(test_txt)test_txt.FieldSchema(name:c407, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c408 SIMPLE [(test_txt)test_txt.FieldSchema(name:c408, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c409 SIMPLE [(test_txt)test_txt.FieldSchema(name:c409, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c41 SIMPLE [(test_txt)test_txt.FieldSchema(name:c41, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c410 SIMPLE [(test_txt)test_txt.FieldSchema(name:c410, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c411 SIMPLE [(test_txt)test_txt.FieldSchema(name:c411, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c412 SIMPLE [(test_txt)test_txt.FieldSchema(name:c412, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c413 SIMPLE [(test_txt)test_txt.FieldSchema(name:c413, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c414 SIMPLE [(test_txt)test_txt.FieldSchema(name:c414, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c415 SIMPLE [(test_txt)test_txt.FieldSchema(name:c415, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c416 SIMPLE [(test_txt)test_txt.FieldSchema(name:c416, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c417 SIMPLE [(test_txt)test_txt.FieldSchema(name:c417, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c418 SIMPLE [(test_txt)test_txt.FieldSchema(name:c418, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c419 SIMPLE [(test_txt)test_txt.FieldSchema(name:c419, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c42 SIMPLE [(test_txt)test_txt.FieldSchema(name:c42, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c420 SIMPLE [(test_txt)test_txt.FieldSchema(name:c420, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c421 SIMPLE [(test_txt)test_txt.FieldSchema(name:c421, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c422 SIMPLE [(test_txt)test_txt.FieldSchema(name:c422, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c423 SIMPLE [(test_txt)test_txt.FieldSchema(name:c423, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c424 SIMPLE [(test_txt)test_txt.FieldSchema(name:c424, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c425 SIMPLE [(test_txt)test_txt.FieldSchema(name:c425, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c426 SIMPLE [(test_txt)test_txt.FieldSchema(name:c426, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c427 SIMPLE [(test_txt)test_txt.FieldSchema(name:c427, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c428 SIMPLE [(test_txt)test_txt.FieldSchema(name:c428, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c429 SIMPLE [(test_txt)test_txt.FieldSchema(name:c429, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c43 SIMPLE [(test_txt)test_txt.FieldSchema(name:c43, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c430 SIMPLE [(test_txt)test_txt.FieldSchema(name:c430, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c431 SIMPLE [(test_txt)test_txt.FieldSchema(name:c431, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c432 SIMPLE [(test_txt)test_txt.FieldSchema(name:c432, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c433 SIMPLE [(test_txt)test_txt.FieldSchema(name:c433, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c434 SIMPLE [(test_txt)test_txt.FieldSchema(name:c434, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c435 SIMPLE [(test_txt)test_txt.FieldSchema(name:c435, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c436 SIMPLE [(test_txt)test_txt.FieldSchema(name:c436, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c437 SIMPLE [(test_txt)test_txt.FieldSchema(name:c437, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c438 SIMPLE [(test_txt)test_txt.FieldSchema(name:c438, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c439 SIMPLE [(test_txt)test_txt.FieldSchema(name:c439, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c44 SIMPLE [(test_txt)test_txt.FieldSchema(name:c44, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c440 SIMPLE [(test_txt)test_txt.FieldSchema(name:c440, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c441 SIMPLE [(test_txt)test_txt.FieldSchema(name:c441, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c442 SIMPLE [(test_txt)test_txt.FieldSchema(name:c442, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c443 SIMPLE [(test_txt)test_txt.FieldSchema(name:c443, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c444 SIMPLE [(test_txt)test_txt.FieldSchema(name:c444, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c445 SIMPLE [(test_txt)test_txt.FieldSchema(name:c445, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c446 SIMPLE [(test_txt)test_txt.FieldSchema(name:c446, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c447 SIMPLE [(test_txt)test_txt.FieldSchema(name:c447, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c448 SIMPLE [(test_txt)test_txt.FieldSchema(name:c448, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c449 SIMPLE [(test_txt)test_txt.FieldSchema(name:c449, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c45 SIMPLE [(test_txt)test_txt.FieldSchema(name:c45, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c450 SIMPLE [(test_txt)test_txt.FieldSchema(name:c450, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c451 SIMPLE [(test_txt)test_txt.FieldSchema(name:c451, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c452 SIMPLE [(test_txt)test_txt.FieldSchema(name:c452, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c453 SIMPLE [(test_txt)test_txt.FieldSchema(name:c453, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c454 SIMPLE [(test_txt)test_txt.FieldSchema(name:c454, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c455 SIMPLE [(test_txt)test_txt.FieldSchema(name:c455, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c456 SIMPLE [(test_txt)test_txt.FieldSchema(name:c456, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c457 SIMPLE [(test_txt)test_txt.FieldSchema(name:c457, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c458 SIMPLE [(test_txt)test_txt.FieldSchema(name:c458, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c459 SIMPLE [(test_txt)test_txt.FieldSchema(name:c459, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c46 SIMPLE [(test_txt)test_txt.FieldSchema(name:c46, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c460 SIMPLE [(test_txt)test_txt.FieldSchema(name:c460, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c461 SIMPLE [(test_txt)test_txt.FieldSchema(name:c461, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c462 SIMPLE [(test_txt)test_txt.FieldSchema(name:c462, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c463 SIMPLE [(test_txt)test_txt.FieldSchema(name:c463, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c464 SIMPLE [(test_txt)test_txt.FieldSchema(name:c464, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c465 SIMPLE [(test_txt)test_txt.FieldSchema(name:c465, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c466 SIMPLE [(test_txt)test_txt.FieldSchema(name:c466, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c467 SIMPLE [(test_txt)test_txt.FieldSchema(name:c467, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c468 SIMPLE [(test_txt)test_txt.FieldSchema(name:c468, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c469 SIMPLE [(test_txt)test_txt.FieldSchema(name:c469, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c47 SIMPLE [(test_txt)test_txt.FieldSchema(name:c47, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c470 SIMPLE [(test_txt)test_txt.FieldSchema(name:c470, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c471 SIMPLE [(test_txt)test_txt.FieldSchema(name:c471, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c472 SIMPLE [(test_txt)test_txt.FieldSchema(name:c472, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c473 SIMPLE [(test_txt)test_txt.FieldSchema(name:c473, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c474 SIMPLE [(test_txt)test_txt.FieldSchema(name:c474, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c475 SIMPLE [(test_txt)test_txt.FieldSchema(name:c475, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c476 SIMPLE [(test_txt)test_txt.FieldSchema(name:c476, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c477 SIMPLE [(test_txt)test_txt.FieldSchema(name:c477, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c478 SIMPLE [(test_txt)test_txt.FieldSchema(name:c478, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c479 SIMPLE [(test_txt)test_txt.FieldSchema(name:c479, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c48 SIMPLE [(test_txt)test_txt.FieldSchema(name:c48, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c480 SIMPLE [(test_txt)test_txt.FieldSchema(name:c480, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c481 SIMPLE [(test_txt)test_txt.FieldSchema(name:c481, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c482 SIMPLE [(test_txt)test_txt.FieldSchema(name:c482, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c483 SIMPLE [(test_txt)test_txt.FieldSchema(name:c483, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c484 SIMPLE [(test_txt)test_txt.FieldSchema(name:c484, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c485 SIMPLE [(test_txt)test_txt.FieldSchema(name:c485, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c486 SIMPLE [(test_txt)test_txt.FieldSchema(name:c486, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c487 SIMPLE [(test_txt)test_txt.FieldSchema(name:c487, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c488 SIMPLE [(test_txt)test_txt.FieldSchema(name:c488, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c489 SIMPLE [(test_txt)test_txt.FieldSchema(name:c489, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c49 SIMPLE [(test_txt)test_txt.FieldSchema(name:c49, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c490 SIMPLE [(test_txt)test_txt.FieldSchema(name:c490, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c491 SIMPLE [(test_txt)test_txt.FieldSchema(name:c491, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c492 SIMPLE [(test_txt)test_txt.FieldSchema(name:c492, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c493 SIMPLE [(test_txt)test_txt.FieldSchema(name:c493, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c494 SIMPLE [(test_txt)test_txt.FieldSchema(name:c494, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c495 SIMPLE [(test_txt)test_txt.FieldSchema(name:c495, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c496 SIMPLE [(test_txt)test_txt.FieldSchema(name:c496, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c497 SIMPLE [(test_txt)test_txt.FieldSchema(name:c497, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c498 SIMPLE [(test_txt)test_txt.FieldSchema(name:c498, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c499 SIMPLE [(test_txt)test_txt.FieldSchema(name:c499, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c5 SIMPLE [(test_txt)test_txt.FieldSchema(name:c5, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c50 SIMPLE [(test_txt)test_txt.FieldSchema(name:c50, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c500 SIMPLE [(test_txt)test_txt.FieldSchema(name:c500, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c501 SIMPLE [(test_txt)test_txt.FieldSchema(name:c501, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c502 SIMPLE [(test_txt)test_txt.FieldSchema(name:c502, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c503 SIMPLE [(test_txt)test_txt.FieldSchema(name:c503, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c504 SIMPLE [(test_txt)test_txt.FieldSchema(name:c504, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c505 SIMPLE [(test_txt)test_txt.FieldSchema(name:c505, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c506 SIMPLE [(test_txt)test_txt.FieldSchema(name:c506, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c507 SIMPLE [(test_txt)test_txt.FieldSchema(name:c507, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c508 SIMPLE [(test_txt)test_txt.FieldSchema(name:c508, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c509 SIMPLE [(test_txt)test_txt.FieldSchema(name:c509, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c51 SIMPLE [(test_txt)test_txt.FieldSchema(name:c51, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c510 SIMPLE [(test_txt)test_txt.FieldSchema(name:c510, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c511 SIMPLE [(test_txt)test_txt.FieldSchema(name:c511, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c512 SIMPLE [(test_txt)test_txt.FieldSchema(name:c512, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c513 SIMPLE [(test_txt)test_txt.FieldSchema(name:c513, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c514 SIMPLE [(test_txt)test_txt.FieldSchema(name:c514, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c515 SIMPLE [(test_txt)test_txt.FieldSchema(name:c515, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c516 SIMPLE [(test_txt)test_txt.FieldSchema(name:c516, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c517 SIMPLE [(test_txt)test_txt.FieldSchema(name:c517, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c518 SIMPLE [(test_txt)test_txt.FieldSchema(name:c518, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c519 SIMPLE [(test_txt)test_txt.FieldSchema(name:c519, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c52 SIMPLE [(test_txt)test_txt.FieldSchema(name:c52, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c520 SIMPLE [(test_txt)test_txt.FieldSchema(name:c520, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c521 SIMPLE [(test_txt)test_txt.FieldSchema(name:c521, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c522 SIMPLE [(test_txt)test_txt.FieldSchema(name:c522, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c523 SIMPLE [(test_txt)test_txt.FieldSchema(name:c523, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c524 SIMPLE [(test_txt)test_txt.FieldSchema(name:c524, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c525 SIMPLE [(test_txt)test_txt.FieldSchema(name:c525, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c526 SIMPLE [(test_txt)test_txt.FieldSchema(name:c526, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c527 SIMPLE [(test_txt)test_txt.FieldSchema(name:c527, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c528 SIMPLE [(test_txt)test_txt.FieldSchema(name:c528, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c529 SIMPLE [(test_txt)test_txt.FieldSchema(name:c529, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c53 SIMPLE [(test_txt)test_txt.FieldSchema(name:c53, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c530 SIMPLE [(test_txt)test_txt.FieldSchema(name:c530, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c531 SIMPLE [(test_txt)test_txt.FieldSchema(name:c531, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c532 SIMPLE [(test_txt)test_txt.FieldSchema(name:c532, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c533 SIMPLE [(test_txt)test_txt.FieldSchema(name:c533, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c534 SIMPLE [(test_txt)test_txt.FieldSchema(name:c534, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c535 SIMPLE [(test_txt)test_txt.FieldSchema(name:c535, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c536 SIMPLE [(test_txt)test_txt.FieldSchema(name:c536, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c537 SIMPLE [(test_txt)test_txt.FieldSchema(name:c537, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c538 SIMPLE [(test_txt)test_txt.FieldSchema(name:c538, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c539 SIMPLE [(test_txt)test_txt.FieldSchema(name:c539, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c54 SIMPLE [(test_txt)test_txt.FieldSchema(name:c54, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c540 SIMPLE [(test_txt)test_txt.FieldSchema(name:c540, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c541 SIMPLE [(test_txt)test_txt.FieldSchema(name:c541, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c542 SIMPLE [(test_txt)test_txt.FieldSchema(name:c542, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c543 SIMPLE [(test_txt)test_txt.FieldSchema(name:c543, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c544 SIMPLE [(test_txt)test_txt.FieldSchema(name:c544, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c545 SIMPLE [(test_txt)test_txt.FieldSchema(name:c545, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c546 SIMPLE [(test_txt)test_txt.FieldSchema(name:c546, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c547 SIMPLE [(test_txt)test_txt.FieldSchema(name:c547, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c548 SIMPLE [(test_txt)test_txt.FieldSchema(name:c548, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c549 SIMPLE [(test_txt)test_txt.FieldSchema(name:c549, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c55 SIMPLE [(test_txt)test_txt.FieldSchema(name:c55, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c550 SIMPLE [(test_txt)test_txt.FieldSchema(name:c550, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c551 SIMPLE [(test_txt)test_txt.FieldSchema(name:c551, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c552 SIMPLE [(test_txt)test_txt.FieldSchema(name:c552, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c553 SIMPLE [(test_txt)test_txt.FieldSchema(name:c553, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c554 SIMPLE [(test_txt)test_txt.FieldSchema(name:c554, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c555 SIMPLE [(test_txt)test_txt.FieldSchema(name:c555, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c556 SIMPLE [(test_txt)test_txt.FieldSchema(name:c556, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c557 SIMPLE [(test_txt)test_txt.FieldSchema(name:c557, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c558 SIMPLE [(test_txt)test_txt.FieldSchema(name:c558, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c559 SIMPLE [(test_txt)test_txt.FieldSchema(name:c559, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c56 SIMPLE [(test_txt)test_txt.FieldSchema(name:c56, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c560 SIMPLE [(test_txt)test_txt.FieldSchema(name:c560, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c561 SIMPLE [(test_txt)test_txt.FieldSchema(name:c561, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c562 SIMPLE [(test_txt)test_txt.FieldSchema(name:c562, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c563 SIMPLE [(test_txt)test_txt.FieldSchema(name:c563, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c564 SIMPLE [(test_txt)test_txt.FieldSchema(name:c564, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c565 SIMPLE [(test_txt)test_txt.FieldSchema(name:c565, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c566 SIMPLE [(test_txt)test_txt.FieldSchema(name:c566, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c567 SIMPLE [(test_txt)test_txt.FieldSchema(name:c567, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c568 SIMPLE [(test_txt)test_txt.FieldSchema(name:c568, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c569 SIMPLE [(test_txt)test_txt.FieldSchema(name:c569, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c57 SIMPLE [(test_txt)test_txt.FieldSchema(name:c57, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c570 SIMPLE [(test_txt)test_txt.FieldSchema(name:c570, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c571 SIMPLE [(test_txt)test_txt.FieldSchema(name:c571, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c572 SIMPLE [(test_txt)test_txt.FieldSchema(name:c572, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c573 SIMPLE [(test_txt)test_txt.FieldSchema(name:c573, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c574 SIMPLE [(test_txt)test_txt.FieldSchema(name:c574, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c575 SIMPLE [(test_txt)test_txt.FieldSchema(name:c575, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c576 SIMPLE [(test_txt)test_txt.FieldSchema(name:c576, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c577 SIMPLE [(test_txt)test_txt.FieldSchema(name:c577, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c578 SIMPLE [(test_txt)test_txt.FieldSchema(name:c578, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c579 SIMPLE [(test_txt)test_txt.FieldSchema(name:c579, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c58 SIMPLE [(test_txt)test_txt.FieldSchema(name:c58, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c580 SIMPLE [(test_txt)test_txt.FieldSchema(name:c580, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c581 SIMPLE [(test_txt)test_txt.FieldSchema(name:c581, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c582 SIMPLE [(test_txt)test_txt.FieldSchema(name:c582, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c583 SIMPLE [(test_txt)test_txt.FieldSchema(name:c583, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c584 SIMPLE [(test_txt)test_txt.FieldSchema(name:c584, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c585 SIMPLE [(test_txt)test_txt.FieldSchema(name:c585, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c586 SIMPLE [(test_txt)test_txt.FieldSchema(name:c586, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c587 SIMPLE [(test_txt)test_txt.FieldSchema(name:c587, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c588 SIMPLE [(test_txt)test_txt.FieldSchema(name:c588, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c589 SIMPLE [(test_txt)test_txt.FieldSchema(name:c589, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c59 SIMPLE [(test_txt)test_txt.FieldSchema(name:c59, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c590 SIMPLE [(test_txt)test_txt.FieldSchema(name:c590, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c591 SIMPLE [(test_txt)test_txt.FieldSchema(name:c591, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c592 SIMPLE [(test_txt)test_txt.FieldSchema(name:c592, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c593 SIMPLE [(test_txt)test_txt.FieldSchema(name:c593, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c594 SIMPLE [(test_txt)test_txt.FieldSchema(name:c594, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c595 SIMPLE [(test_txt)test_txt.FieldSchema(name:c595, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c596 SIMPLE [(test_txt)test_txt.FieldSchema(name:c596, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c597 SIMPLE [(test_txt)test_txt.FieldSchema(name:c597, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c598 SIMPLE [(test_txt)test_txt.FieldSchema(name:c598, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c599 SIMPLE [(test_txt)test_txt.FieldSchema(name:c599, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c6 SIMPLE [(test_txt)test_txt.FieldSchema(name:c6, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c60 SIMPLE [(test_txt)test_txt.FieldSchema(name:c60, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c600 SIMPLE [(test_txt)test_txt.FieldSchema(name:c600, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c601 SIMPLE [(test_txt)test_txt.FieldSchema(name:c601, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c602 SIMPLE [(test_txt)test_txt.FieldSchema(name:c602, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c603 SIMPLE [(test_txt)test_txt.FieldSchema(name:c603, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c604 SIMPLE [(test_txt)test_txt.FieldSchema(name:c604, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c605 SIMPLE [(test_txt)test_txt.FieldSchema(name:c605, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c606 SIMPLE [(test_txt)test_txt.FieldSchema(name:c606, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c607 SIMPLE [(test_txt)test_txt.FieldSchema(name:c607, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c608 SIMPLE [(test_txt)test_txt.FieldSchema(name:c608, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c609 SIMPLE [(test_txt)test_txt.FieldSchema(name:c609, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c61 SIMPLE [(test_txt)test_txt.FieldSchema(name:c61, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c610 SIMPLE [(test_txt)test_txt.FieldSchema(name:c610, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c611 SIMPLE [(test_txt)test_txt.FieldSchema(name:c611, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c612 SIMPLE [(test_txt)test_txt.FieldSchema(name:c612, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c613 SIMPLE [(test_txt)test_txt.FieldSchema(name:c613, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c614 SIMPLE [(test_txt)test_txt.FieldSchema(name:c614, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c615 SIMPLE [(test_txt)test_txt.FieldSchema(name:c615, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c616 SIMPLE [(test_txt)test_txt.FieldSchema(name:c616, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c617 SIMPLE [(test_txt)test_txt.FieldSchema(name:c617, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c618 SIMPLE [(test_txt)test_txt.FieldSchema(name:c618, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c619 SIMPLE [(test_txt)test_txt.FieldSchema(name:c619, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c62 SIMPLE [(test_txt)test_txt.FieldSchema(name:c62, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c620 SIMPLE [(test_txt)test_txt.FieldSchema(name:c620, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c621 SIMPLE [(test_txt)test_txt.FieldSchema(name:c621, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c622 SIMPLE [(test_txt)test_txt.FieldSchema(name:c622, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c623 SIMPLE [(test_txt)test_txt.FieldSchema(name:c623, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c624 SIMPLE [(test_txt)test_txt.FieldSchema(name:c624, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c625 SIMPLE [(test_txt)test_txt.FieldSchema(name:c625, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c626 SIMPLE [(test_txt)test_txt.FieldSchema(name:c626, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c627 SIMPLE [(test_txt)test_txt.FieldSchema(name:c627, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c628 SIMPLE [(test_txt)test_txt.FieldSchema(name:c628, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c629 SIMPLE [(test_txt)test_txt.FieldSchema(name:c629, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c63 SIMPLE [(test_txt)test_txt.FieldSchema(name:c63, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c630 SIMPLE [(test_txt)test_txt.FieldSchema(name:c630, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c631 SIMPLE [(test_txt)test_txt.FieldSchema(name:c631, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c632 SIMPLE [(test_txt)test_txt.FieldSchema(name:c632, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c633 SIMPLE [(test_txt)test_txt.FieldSchema(name:c633, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c634 SIMPLE [(test_txt)test_txt.FieldSchema(name:c634, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c635 SIMPLE [(test_txt)test_txt.FieldSchema(name:c635, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c636 SIMPLE [(test_txt)test_txt.FieldSchema(name:c636, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c637 SIMPLE [(test_txt)test_txt.FieldSchema(name:c637, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c638 SIMPLE [(test_txt)test_txt.FieldSchema(name:c638, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c639 SIMPLE [(test_txt)test_txt.FieldSchema(name:c639, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c64 SIMPLE [(test_txt)test_txt.FieldSchema(name:c64, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c640 SIMPLE [(test_txt)test_txt.FieldSchema(name:c640, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c641 SIMPLE [(test_txt)test_txt.FieldSchema(name:c641, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c642 SIMPLE [(test_txt)test_txt.FieldSchema(name:c642, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c643 SIMPLE [(test_txt)test_txt.FieldSchema(name:c643, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c644 SIMPLE [(test_txt)test_txt.FieldSchema(name:c644, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c645 SIMPLE [(test_txt)test_txt.FieldSchema(name:c645, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c646 SIMPLE [(test_txt)test_txt.FieldSchema(name:c646, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c647 SIMPLE [(test_txt)test_txt.FieldSchema(name:c647, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c648 SIMPLE [(test_txt)test_txt.FieldSchema(name:c648, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c649 SIMPLE [(test_txt)test_txt.FieldSchema(name:c649, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c65 SIMPLE [(test_txt)test_txt.FieldSchema(name:c65, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c650 SIMPLE [(test_txt)test_txt.FieldSchema(name:c650, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c651 SIMPLE [(test_txt)test_txt.FieldSchema(name:c651, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c652 SIMPLE [(test_txt)test_txt.FieldSchema(name:c652, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c653 SIMPLE [(test_txt)test_txt.FieldSchema(name:c653, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c654 SIMPLE [(test_txt)test_txt.FieldSchema(name:c654, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c655 SIMPLE [(test_txt)test_txt.FieldSchema(name:c655, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c656 SIMPLE [(test_txt)test_txt.FieldSchema(name:c656, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c657 SIMPLE [(test_txt)test_txt.FieldSchema(name:c657, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c658 SIMPLE [(test_txt)test_txt.FieldSchema(name:c658, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c659 SIMPLE [(test_txt)test_txt.FieldSchema(name:c659, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c66 SIMPLE [(test_txt)test_txt.FieldSchema(name:c66, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c660 SIMPLE [(test_txt)test_txt.FieldSchema(name:c660, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c661 SIMPLE [(test_txt)test_txt.FieldSchema(name:c661, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c662 SIMPLE [(test_txt)test_txt.FieldSchema(name:c662, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c663 SIMPLE [(test_txt)test_txt.FieldSchema(name:c663, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c664 SIMPLE [(test_txt)test_txt.FieldSchema(name:c664, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c665 SIMPLE [(test_txt)test_txt.FieldSchema(name:c665, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c666 SIMPLE [(test_txt)test_txt.FieldSchema(name:c666, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c667 SIMPLE [(test_txt)test_txt.FieldSchema(name:c667, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c668 SIMPLE [(test_txt)test_txt.FieldSchema(name:c668, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c669 SIMPLE [(test_txt)test_txt.FieldSchema(name:c669, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c67 SIMPLE [(test_txt)test_txt.FieldSchema(name:c67, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c670 SIMPLE [(test_txt)test_txt.FieldSchema(name:c670, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c671 SIMPLE [(test_txt)test_txt.FieldSchema(name:c671, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c672 SIMPLE [(test_txt)test_txt.FieldSchema(name:c672, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c673 SIMPLE [(test_txt)test_txt.FieldSchema(name:c673, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c674 SIMPLE [(test_txt)test_txt.FieldSchema(name:c674, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c675 SIMPLE [(test_txt)test_txt.FieldSchema(name:c675, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c676 SIMPLE [(test_txt)test_txt.FieldSchema(name:c676, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c677 SIMPLE [(test_txt)test_txt.FieldSchema(name:c677, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c678 SIMPLE [(test_txt)test_txt.FieldSchema(name:c678, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c679 SIMPLE [(test_txt)test_txt.FieldSchema(name:c679, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c68 SIMPLE [(test_txt)test_txt.FieldSchema(name:c68, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c680 SIMPLE [(test_txt)test_txt.FieldSchema(name:c680, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c681 SIMPLE [(test_txt)test_txt.FieldSchema(name:c681, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c682 SIMPLE [(test_txt)test_txt.FieldSchema(name:c682, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c683 SIMPLE [(test_txt)test_txt.FieldSchema(name:c683, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c684 SIMPLE [(test_txt)test_txt.FieldSchema(name:c684, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c685 SIMPLE [(test_txt)test_txt.FieldSchema(name:c685, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c686 SIMPLE [(test_txt)test_txt.FieldSchema(name:c686, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c687 SIMPLE [(test_txt)test_txt.FieldSchema(name:c687, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c688 SIMPLE [(test_txt)test_txt.FieldSchema(name:c688, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c689 SIMPLE [(test_txt)test_txt.FieldSchema(name:c689, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c69 SIMPLE [(test_txt)test_txt.FieldSchema(name:c69, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c690 SIMPLE [(test_txt)test_txt.FieldSchema(name:c690, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c691 SIMPLE [(test_txt)test_txt.FieldSchema(name:c691, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c692 SIMPLE [(test_txt)test_txt.FieldSchema(name:c692, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c693 SIMPLE [(test_txt)test_txt.FieldSchema(name:c693, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c694 SIMPLE [(test_txt)test_txt.FieldSchema(name:c694, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c695 SIMPLE [(test_txt)test_txt.FieldSchema(name:c695, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c696 SIMPLE [(test_txt)test_txt.FieldSchema(name:c696, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c697 SIMPLE [(test_txt)test_txt.FieldSchema(name:c697, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c698 SIMPLE [(test_txt)test_txt.FieldSchema(name:c698, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c699 SIMPLE [(test_txt)test_txt.FieldSchema(name:c699, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c7 SIMPLE [(test_txt)test_txt.FieldSchema(name:c7, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c70 SIMPLE [(test_txt)test_txt.FieldSchema(name:c70, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c700 SIMPLE [(test_txt)test_txt.FieldSchema(name:c700, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c701 SIMPLE [(test_txt)test_txt.FieldSchema(name:c701, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c702 SIMPLE [(test_txt)test_txt.FieldSchema(name:c702, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c703 SIMPLE [(test_txt)test_txt.FieldSchema(name:c703, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c704 SIMPLE [(test_txt)test_txt.FieldSchema(name:c704, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c705 SIMPLE [(test_txt)test_txt.FieldSchema(name:c705, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c706 SIMPLE [(test_txt)test_txt.FieldSchema(name:c706, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c707 SIMPLE [(test_txt)test_txt.FieldSchema(name:c707, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c708 SIMPLE [(test_txt)test_txt.FieldSchema(name:c708, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c709 SIMPLE [(test_txt)test_txt.FieldSchema(name:c709, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c71 SIMPLE [(test_txt)test_txt.FieldSchema(name:c71, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c710 SIMPLE [(test_txt)test_txt.FieldSchema(name:c710, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c711 SIMPLE [(test_txt)test_txt.FieldSchema(name:c711, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c712 SIMPLE [(test_txt)test_txt.FieldSchema(name:c712, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c713 SIMPLE [(test_txt)test_txt.FieldSchema(name:c713, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c714 SIMPLE [(test_txt)test_txt.FieldSchema(name:c714, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c715 SIMPLE [(test_txt)test_txt.FieldSchema(name:c715, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c716 SIMPLE [(test_txt)test_txt.FieldSchema(name:c716, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c717 SIMPLE [(test_txt)test_txt.FieldSchema(name:c717, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c718 SIMPLE [(test_txt)test_txt.FieldSchema(name:c718, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c719 SIMPLE [(test_txt)test_txt.FieldSchema(name:c719, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c72 SIMPLE [(test_txt)test_txt.FieldSchema(name:c72, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c720 SIMPLE [(test_txt)test_txt.FieldSchema(name:c720, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c721 SIMPLE [(test_txt)test_txt.FieldSchema(name:c721, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c722 SIMPLE [(test_txt)test_txt.FieldSchema(name:c722, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c723 SIMPLE [(test_txt)test_txt.FieldSchema(name:c723, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c724 SIMPLE [(test_txt)test_txt.FieldSchema(name:c724, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c725 SIMPLE [(test_txt)test_txt.FieldSchema(name:c725, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c726 SIMPLE [(test_txt)test_txt.FieldSchema(name:c726, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c727 SIMPLE [(test_txt)test_txt.FieldSchema(name:c727, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c728 SIMPLE [(test_txt)test_txt.FieldSchema(name:c728, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c729 SIMPLE [(test_txt)test_txt.FieldSchema(name:c729, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c73 SIMPLE [(test_txt)test_txt.FieldSchema(name:c73, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c730 SIMPLE [(test_txt)test_txt.FieldSchema(name:c730, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c731 SIMPLE [(test_txt)test_txt.FieldSchema(name:c731, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c732 SIMPLE [(test_txt)test_txt.FieldSchema(name:c732, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c733 SIMPLE [(test_txt)test_txt.FieldSchema(name:c733, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c734 SIMPLE [(test_txt)test_txt.FieldSchema(name:c734, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c735 SIMPLE [(test_txt)test_txt.FieldSchema(name:c735, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c736 SIMPLE [(test_txt)test_txt.FieldSchema(name:c736, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c737 SIMPLE [(test_txt)test_txt.FieldSchema(name:c737, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c738 SIMPLE [(test_txt)test_txt.FieldSchema(name:c738, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c739 SIMPLE [(test_txt)test_txt.FieldSchema(name:c739, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c74 SIMPLE [(test_txt)test_txt.FieldSchema(name:c74, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c740 SIMPLE [(test_txt)test_txt.FieldSchema(name:c740, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c741 SIMPLE [(test_txt)test_txt.FieldSchema(name:c741, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c742 SIMPLE [(test_txt)test_txt.FieldSchema(name:c742, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c743 SIMPLE [(test_txt)test_txt.FieldSchema(name:c743, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c744 SIMPLE [(test_txt)test_txt.FieldSchema(name:c744, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c745 SIMPLE [(test_txt)test_txt.FieldSchema(name:c745, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c746 SIMPLE [(test_txt)test_txt.FieldSchema(name:c746, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c747 SIMPLE [(test_txt)test_txt.FieldSchema(name:c747, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c748 SIMPLE [(test_txt)test_txt.FieldSchema(name:c748, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c749 SIMPLE [(test_txt)test_txt.FieldSchema(name:c749, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c75 SIMPLE [(test_txt)test_txt.FieldSchema(name:c75, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c750 SIMPLE [(test_txt)test_txt.FieldSchema(name:c750, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c751 SIMPLE [(test_txt)test_txt.FieldSchema(name:c751, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c752 SIMPLE [(test_txt)test_txt.FieldSchema(name:c752, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c753 SIMPLE [(test_txt)test_txt.FieldSchema(name:c753, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c754 SIMPLE [(test_txt)test_txt.FieldSchema(name:c754, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c755 SIMPLE [(test_txt)test_txt.FieldSchema(name:c755, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c756 SIMPLE [(test_txt)test_txt.FieldSchema(name:c756, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c757 SIMPLE [(test_txt)test_txt.FieldSchema(name:c757, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c758 SIMPLE [(test_txt)test_txt.FieldSchema(name:c758, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c759 SIMPLE [(test_txt)test_txt.FieldSchema(name:c759, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c76 SIMPLE [(test_txt)test_txt.FieldSchema(name:c76, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c760 SIMPLE [(test_txt)test_txt.FieldSchema(name:c760, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c761 SIMPLE [(test_txt)test_txt.FieldSchema(name:c761, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c762 SIMPLE [(test_txt)test_txt.FieldSchema(name:c762, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c763 SIMPLE [(test_txt)test_txt.FieldSchema(name:c763, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c764 SIMPLE [(test_txt)test_txt.FieldSchema(name:c764, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c765 SIMPLE [(test_txt)test_txt.FieldSchema(name:c765, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c766 SIMPLE [(test_txt)test_txt.FieldSchema(name:c766, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c767 SIMPLE [(test_txt)test_txt.FieldSchema(name:c767, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c768 SIMPLE [(test_txt)test_txt.FieldSchema(name:c768, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c769 SIMPLE [(test_txt)test_txt.FieldSchema(name:c769, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c77 SIMPLE [(test_txt)test_txt.FieldSchema(name:c77, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c770 SIMPLE [(test_txt)test_txt.FieldSchema(name:c770, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c771 SIMPLE [(test_txt)test_txt.FieldSchema(name:c771, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c772 SIMPLE [(test_txt)test_txt.FieldSchema(name:c772, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c773 SIMPLE [(test_txt)test_txt.FieldSchema(name:c773, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c774 SIMPLE [(test_txt)test_txt.FieldSchema(name:c774, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c775 SIMPLE [(test_txt)test_txt.FieldSchema(name:c775, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c776 SIMPLE [(test_txt)test_txt.FieldSchema(name:c776, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c777 SIMPLE [(test_txt)test_txt.FieldSchema(name:c777, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c778 SIMPLE [(test_txt)test_txt.FieldSchema(name:c778, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c779 SIMPLE [(test_txt)test_txt.FieldSchema(name:c779, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c78 SIMPLE [(test_txt)test_txt.FieldSchema(name:c78, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c780 SIMPLE [(test_txt)test_txt.FieldSchema(name:c780, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c781 SIMPLE [(test_txt)test_txt.FieldSchema(name:c781, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c782 SIMPLE [(test_txt)test_txt.FieldSchema(name:c782, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c783 SIMPLE [(test_txt)test_txt.FieldSchema(name:c783, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c784 SIMPLE [(test_txt)test_txt.FieldSchema(name:c784, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c785 SIMPLE [(test_txt)test_txt.FieldSchema(name:c785, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c786 SIMPLE [(test_txt)test_txt.FieldSchema(name:c786, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c787 SIMPLE [(test_txt)test_txt.FieldSchema(name:c787, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c788 SIMPLE [(test_txt)test_txt.FieldSchema(name:c788, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c789 SIMPLE [(test_txt)test_txt.FieldSchema(name:c789, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c79 SIMPLE [(test_txt)test_txt.FieldSchema(name:c79, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c790 SIMPLE [(test_txt)test_txt.FieldSchema(name:c790, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c791 SIMPLE [(test_txt)test_txt.FieldSchema(name:c791, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c792 SIMPLE [(test_txt)test_txt.FieldSchema(name:c792, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c793 SIMPLE [(test_txt)test_txt.FieldSchema(name:c793, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c794 SIMPLE [(test_txt)test_txt.FieldSchema(name:c794, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c795 SIMPLE [(test_txt)test_txt.FieldSchema(name:c795, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c796 SIMPLE [(test_txt)test_txt.FieldSchema(name:c796, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c797 SIMPLE [(test_txt)test_txt.FieldSchema(name:c797, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c798 SIMPLE [(test_txt)test_txt.FieldSchema(name:c798, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c799 SIMPLE [(test_txt)test_txt.FieldSchema(name:c799, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c8 SIMPLE [(test_txt)test_txt.FieldSchema(name:c8, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c80 SIMPLE [(test_txt)test_txt.FieldSchema(name:c80, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c800 SIMPLE [(test_txt)test_txt.FieldSchema(name:c800, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c801 SIMPLE [(test_txt)test_txt.FieldSchema(name:c801, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c802 SIMPLE [(test_txt)test_txt.FieldSchema(name:c802, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c803 SIMPLE [(test_txt)test_txt.FieldSchema(name:c803, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c804 SIMPLE [(test_txt)test_txt.FieldSchema(name:c804, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c805 SIMPLE [(test_txt)test_txt.FieldSchema(name:c805, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c806 SIMPLE [(test_txt)test_txt.FieldSchema(name:c806, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c807 SIMPLE [(test_txt)test_txt.FieldSchema(name:c807, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c808 SIMPLE [(test_txt)test_txt.FieldSchema(name:c808, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c809 SIMPLE [(test_txt)test_txt.FieldSchema(name:c809, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c81 SIMPLE [(test_txt)test_txt.FieldSchema(name:c81, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c810 SIMPLE [(test_txt)test_txt.FieldSchema(name:c810, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c811 SIMPLE [(test_txt)test_txt.FieldSchema(name:c811, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c812 SIMPLE [(test_txt)test_txt.FieldSchema(name:c812, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c813 SIMPLE [(test_txt)test_txt.FieldSchema(name:c813, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c814 SIMPLE [(test_txt)test_txt.FieldSchema(name:c814, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c815 SIMPLE [(test_txt)test_txt.FieldSchema(name:c815, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c816 SIMPLE [(test_txt)test_txt.FieldSchema(name:c816, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c817 SIMPLE [(test_txt)test_txt.FieldSchema(name:c817, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c818 SIMPLE [(test_txt)test_txt.FieldSchema(name:c818, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c819 SIMPLE [(test_txt)test_txt.FieldSchema(name:c819, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c82 SIMPLE [(test_txt)test_txt.FieldSchema(name:c82, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c820 SIMPLE [(test_txt)test_txt.FieldSchema(name:c820, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c821 SIMPLE [(test_txt)test_txt.FieldSchema(name:c821, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c822 SIMPLE [(test_txt)test_txt.FieldSchema(name:c822, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c823 SIMPLE [(test_txt)test_txt.FieldSchema(name:c823, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c824 SIMPLE [(test_txt)test_txt.FieldSchema(name:c824, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c825 SIMPLE [(test_txt)test_txt.FieldSchema(name:c825, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c826 SIMPLE [(test_txt)test_txt.FieldSchema(name:c826, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c827 SIMPLE [(test_txt)test_txt.FieldSchema(name:c827, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c828 SIMPLE [(test_txt)test_txt.FieldSchema(name:c828, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c829 SIMPLE [(test_txt)test_txt.FieldSchema(name:c829, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c83 SIMPLE [(test_txt)test_txt.FieldSchema(name:c83, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c830 SIMPLE [(test_txt)test_txt.FieldSchema(name:c830, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c831 SIMPLE [(test_txt)test_txt.FieldSchema(name:c831, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c832 SIMPLE [(test_txt)test_txt.FieldSchema(name:c832, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c833 SIMPLE [(test_txt)test_txt.FieldSchema(name:c833, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c834 SIMPLE [(test_txt)test_txt.FieldSchema(name:c834, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c835 SIMPLE [(test_txt)test_txt.FieldSchema(name:c835, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c836 SIMPLE [(test_txt)test_txt.FieldSchema(name:c836, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c837 SIMPLE [(test_txt)test_txt.FieldSchema(name:c837, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c838 SIMPLE [(test_txt)test_txt.FieldSchema(name:c838, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c839 SIMPLE [(test_txt)test_txt.FieldSchema(name:c839, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c84 SIMPLE [(test_txt)test_txt.FieldSchema(name:c84, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c840 SIMPLE [(test_txt)test_txt.FieldSchema(name:c840, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c841 SIMPLE [(test_txt)test_txt.FieldSchema(name:c841, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c842 SIMPLE [(test_txt)test_txt.FieldSchema(name:c842, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c843 SIMPLE [(test_txt)test_txt.FieldSchema(name:c843, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c844 SIMPLE [(test_txt)test_txt.FieldSchema(name:c844, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c845 SIMPLE [(test_txt)test_txt.FieldSchema(name:c845, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c846 SIMPLE [(test_txt)test_txt.FieldSchema(name:c846, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c847 SIMPLE [(test_txt)test_txt.FieldSchema(name:c847, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c848 SIMPLE [(test_txt)test_txt.FieldSchema(name:c848, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c849 SIMPLE [(test_txt)test_txt.FieldSchema(name:c849, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c85 SIMPLE [(test_txt)test_txt.FieldSchema(name:c85, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c850 SIMPLE [(test_txt)test_txt.FieldSchema(name:c850, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c851 SIMPLE [(test_txt)test_txt.FieldSchema(name:c851, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c852 SIMPLE [(test_txt)test_txt.FieldSchema(name:c852, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c853 SIMPLE [(test_txt)test_txt.FieldSchema(name:c853, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c854 SIMPLE [(test_txt)test_txt.FieldSchema(name:c854, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c855 SIMPLE [(test_txt)test_txt.FieldSchema(name:c855, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c856 SIMPLE [(test_txt)test_txt.FieldSchema(name:c856, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c857 SIMPLE [(test_txt)test_txt.FieldSchema(name:c857, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c858 SIMPLE [(test_txt)test_txt.FieldSchema(name:c858, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c859 SIMPLE [(test_txt)test_txt.FieldSchema(name:c859, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c86 SIMPLE [(test_txt)test_txt.FieldSchema(name:c86, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c860 SIMPLE [(test_txt)test_txt.FieldSchema(name:c860, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c861 SIMPLE [(test_txt)test_txt.FieldSchema(name:c861, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c862 SIMPLE [(test_txt)test_txt.FieldSchema(name:c862, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c863 SIMPLE [(test_txt)test_txt.FieldSchema(name:c863, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c864 SIMPLE [(test_txt)test_txt.FieldSchema(name:c864, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c865 SIMPLE [(test_txt)test_txt.FieldSchema(name:c865, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c866 SIMPLE [(test_txt)test_txt.FieldSchema(name:c866, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c867 SIMPLE [(test_txt)test_txt.FieldSchema(name:c867, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c868 SIMPLE [(test_txt)test_txt.FieldSchema(name:c868, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c869 SIMPLE [(test_txt)test_txt.FieldSchema(name:c869, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c87 SIMPLE [(test_txt)test_txt.FieldSchema(name:c87, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c870 SIMPLE [(test_txt)test_txt.FieldSchema(name:c870, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c871 SIMPLE [(test_txt)test_txt.FieldSchema(name:c871, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c872 SIMPLE [(test_txt)test_txt.FieldSchema(name:c872, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c873 SIMPLE [(test_txt)test_txt.FieldSchema(name:c873, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c874 SIMPLE [(test_txt)test_txt.FieldSchema(name:c874, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c875 SIMPLE [(test_txt)test_txt.FieldSchema(name:c875, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c876 SIMPLE [(test_txt)test_txt.FieldSchema(name:c876, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c877 SIMPLE [(test_txt)test_txt.FieldSchema(name:c877, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c878 SIMPLE [(test_txt)test_txt.FieldSchema(name:c878, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c879 SIMPLE [(test_txt)test_txt.FieldSchema(name:c879, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c88 SIMPLE [(test_txt)test_txt.FieldSchema(name:c88, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c880 SIMPLE [(test_txt)test_txt.FieldSchema(name:c880, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c881 SIMPLE [(test_txt)test_txt.FieldSchema(name:c881, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c882 SIMPLE [(test_txt)test_txt.FieldSchema(name:c882, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c883 SIMPLE [(test_txt)test_txt.FieldSchema(name:c883, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c884 SIMPLE [(test_txt)test_txt.FieldSchema(name:c884, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c885 SIMPLE [(test_txt)test_txt.FieldSchema(name:c885, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c886 SIMPLE [(test_txt)test_txt.FieldSchema(name:c886, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c887 SIMPLE [(test_txt)test_txt.FieldSchema(name:c887, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c888 SIMPLE [(test_txt)test_txt.FieldSchema(name:c888, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c889 SIMPLE [(test_txt)test_txt.FieldSchema(name:c889, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c89 SIMPLE [(test_txt)test_txt.FieldSchema(name:c89, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c890 SIMPLE [(test_txt)test_txt.FieldSchema(name:c890, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c891 SIMPLE [(test_txt)test_txt.FieldSchema(name:c891, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c892 SIMPLE [(test_txt)test_txt.FieldSchema(name:c892, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c893 SIMPLE [(test_txt)test_txt.FieldSchema(name:c893, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c894 SIMPLE [(test_txt)test_txt.FieldSchema(name:c894, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c895 SIMPLE [(test_txt)test_txt.FieldSchema(name:c895, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c896 SIMPLE [(test_txt)test_txt.FieldSchema(name:c896, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c897 SIMPLE [(test_txt)test_txt.FieldSchema(name:c897, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c898 SIMPLE [(test_txt)test_txt.FieldSchema(name:c898, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c899 SIMPLE [(test_txt)test_txt.FieldSchema(name:c899, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c9 SIMPLE [(test_txt)test_txt.FieldSchema(name:c9, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c90 SIMPLE [(test_txt)test_txt.FieldSchema(name:c90, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c900 SIMPLE [(test_txt)test_txt.FieldSchema(name:c900, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c901 SIMPLE [(test_txt)test_txt.FieldSchema(name:c901, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c902 SIMPLE [(test_txt)test_txt.FieldSchema(name:c902, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c903 SIMPLE [(test_txt)test_txt.FieldSchema(name:c903, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c904 SIMPLE [(test_txt)test_txt.FieldSchema(name:c904, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c905 SIMPLE [(test_txt)test_txt.FieldSchema(name:c905, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c906 SIMPLE [(test_txt)test_txt.FieldSchema(name:c906, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c907 SIMPLE [(test_txt)test_txt.FieldSchema(name:c907, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c908 SIMPLE [(test_txt)test_txt.FieldSchema(name:c908, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c909 SIMPLE [(test_txt)test_txt.FieldSchema(name:c909, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c91 SIMPLE [(test_txt)test_txt.FieldSchema(name:c91, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c910 SIMPLE [(test_txt)test_txt.FieldSchema(name:c910, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c911 SIMPLE [(test_txt)test_txt.FieldSchema(name:c911, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c912 SIMPLE [(test_txt)test_txt.FieldSchema(name:c912, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c913 SIMPLE [(test_txt)test_txt.FieldSchema(name:c913, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c914 SIMPLE [(test_txt)test_txt.FieldSchema(name:c914, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c915 SIMPLE [(test_txt)test_txt.FieldSchema(name:c915, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c916 SIMPLE [(test_txt)test_txt.FieldSchema(name:c916, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c917 SIMPLE [(test_txt)test_txt.FieldSchema(name:c917, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c918 SIMPLE [(test_txt)test_txt.FieldSchema(name:c918, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c919 SIMPLE [(test_txt)test_txt.FieldSchema(name:c919, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c92 SIMPLE [(test_txt)test_txt.FieldSchema(name:c92, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c920 SIMPLE [(test_txt)test_txt.FieldSchema(name:c920, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c921 SIMPLE [(test_txt)test_txt.FieldSchema(name:c921, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c922 SIMPLE [(test_txt)test_txt.FieldSchema(name:c922, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c923 SIMPLE [(test_txt)test_txt.FieldSchema(name:c923, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c924 SIMPLE [(test_txt)test_txt.FieldSchema(name:c924, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c925 SIMPLE [(test_txt)test_txt.FieldSchema(name:c925, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c926 SIMPLE [(test_txt)test_txt.FieldSchema(name:c926, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c927 SIMPLE [(test_txt)test_txt.FieldSchema(name:c927, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c928 SIMPLE [(test_txt)test_txt.FieldSchema(name:c928, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c929 SIMPLE [(test_txt)test_txt.FieldSchema(name:c929, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c93 SIMPLE [(test_txt)test_txt.FieldSchema(name:c93, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c930 SIMPLE [(test_txt)test_txt.FieldSchema(name:c930, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c931 SIMPLE [(test_txt)test_txt.FieldSchema(name:c931, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c932 SIMPLE [(test_txt)test_txt.FieldSchema(name:c932, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c933 SIMPLE [(test_txt)test_txt.FieldSchema(name:c933, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c934 SIMPLE [(test_txt)test_txt.FieldSchema(name:c934, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c935 SIMPLE [(test_txt)test_txt.FieldSchema(name:c935, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c936 SIMPLE [(test_txt)test_txt.FieldSchema(name:c936, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c937 SIMPLE [(test_txt)test_txt.FieldSchema(name:c937, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c938 SIMPLE [(test_txt)test_txt.FieldSchema(name:c938, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c939 SIMPLE [(test_txt)test_txt.FieldSchema(name:c939, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c94 SIMPLE [(test_txt)test_txt.FieldSchema(name:c94, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c940 SIMPLE [(test_txt)test_txt.FieldSchema(name:c940, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c941 SIMPLE [(test_txt)test_txt.FieldSchema(name:c941, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c942 SIMPLE [(test_txt)test_txt.FieldSchema(name:c942, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c943 SIMPLE [(test_txt)test_txt.FieldSchema(name:c943, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c944 SIMPLE [(test_txt)test_txt.FieldSchema(name:c944, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c945 SIMPLE [(test_txt)test_txt.FieldSchema(name:c945, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c946 SIMPLE [(test_txt)test_txt.FieldSchema(name:c946, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c947 SIMPLE [(test_txt)test_txt.FieldSchema(name:c947, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c948 SIMPLE [(test_txt)test_txt.FieldSchema(name:c948, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c949 SIMPLE [(test_txt)test_txt.FieldSchema(name:c949, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c95 SIMPLE [(test_txt)test_txt.FieldSchema(name:c95, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c950 SIMPLE [(test_txt)test_txt.FieldSchema(name:c950, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c951 SIMPLE [(test_txt)test_txt.FieldSchema(name:c951, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c952 SIMPLE [(test_txt)test_txt.FieldSchema(name:c952, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c953 SIMPLE [(test_txt)test_txt.FieldSchema(name:c953, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c954 SIMPLE [(test_txt)test_txt.FieldSchema(name:c954, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c955 SIMPLE [(test_txt)test_txt.FieldSchema(name:c955, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c956 SIMPLE [(test_txt)test_txt.FieldSchema(name:c956, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c957 SIMPLE [(test_txt)test_txt.FieldSchema(name:c957, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c958 SIMPLE [(test_txt)test_txt.FieldSchema(name:c958, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c959 SIMPLE [(test_txt)test_txt.FieldSchema(name:c959, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c96 SIMPLE [(test_txt)test_txt.FieldSchema(name:c96, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c960 SIMPLE [(test_txt)test_txt.FieldSchema(name:c960, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c961 SIMPLE [(test_txt)test_txt.FieldSchema(name:c961, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c962 SIMPLE [(test_txt)test_txt.FieldSchema(name:c962, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c963 SIMPLE [(test_txt)test_txt.FieldSchema(name:c963, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c964 SIMPLE [(test_txt)test_txt.FieldSchema(name:c964, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c965 SIMPLE [(test_txt)test_txt.FieldSchema(name:c965, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c966 SIMPLE [(test_txt)test_txt.FieldSchema(name:c966, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c967 SIMPLE [(test_txt)test_txt.FieldSchema(name:c967, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c968 SIMPLE [(test_txt)test_txt.FieldSchema(name:c968, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c969 SIMPLE [(test_txt)test_txt.FieldSchema(name:c969, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c97 SIMPLE [(test_txt)test_txt.FieldSchema(name:c97, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c970 SIMPLE [(test_txt)test_txt.FieldSchema(name:c970, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c971 SIMPLE [(test_txt)test_txt.FieldSchema(name:c971, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c972 SIMPLE [(test_txt)test_txt.FieldSchema(name:c972, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c973 SIMPLE [(test_txt)test_txt.FieldSchema(name:c973, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c974 SIMPLE [(test_txt)test_txt.FieldSchema(name:c974, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c975 SIMPLE [(test_txt)test_txt.FieldSchema(name:c975, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c976 SIMPLE [(test_txt)test_txt.FieldSchema(name:c976, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c977 SIMPLE [(test_txt)test_txt.FieldSchema(name:c977, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c978 SIMPLE [(test_txt)test_txt.FieldSchema(name:c978, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c979 SIMPLE [(test_txt)test_txt.FieldSchema(name:c979, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c98 SIMPLE [(test_txt)test_txt.FieldSchema(name:c98, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c980 SIMPLE [(test_txt)test_txt.FieldSchema(name:c980, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c981 SIMPLE [(test_txt)test_txt.FieldSchema(name:c981, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c982 SIMPLE [(test_txt)test_txt.FieldSchema(name:c982, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c983 SIMPLE [(test_txt)test_txt.FieldSchema(name:c983, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c984 SIMPLE [(test_txt)test_txt.FieldSchema(name:c984, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c985 SIMPLE [(test_txt)test_txt.FieldSchema(name:c985, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c986 SIMPLE [(test_txt)test_txt.FieldSchema(name:c986, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c987 SIMPLE [(test_txt)test_txt.FieldSchema(name:c987, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c988 SIMPLE [(test_txt)test_txt.FieldSchema(name:c988, type:string, comment:null), ] -POSTHOOK: Lineage: test_orc.c989 SIMPLE [(test_txt)test_txt.FieldSchema(name:c989, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c99 SIMPLE [(test_txt)test_txt.FieldSchema(name:c99, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c990 SIMPLE [(test_txt)test_txt.FieldSchema(name:c990, type:float, comment:null), ] -POSTHOOK: Lineage: test_orc.c991 SIMPLE [(test_txt)test_txt.FieldSchema(name:c991, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c992 SIMPLE [(test_txt)test_txt.FieldSchema(name:c992, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c993 SIMPLE [(test_txt)test_txt.FieldSchema(name:c993, type:char(4), comment:null), ] -POSTHOOK: Lineage: test_orc.c994 SIMPLE [(test_txt)test_txt.FieldSchema(name:c994, type:decimal(16,10), comment:null), ] -POSTHOOK: Lineage: test_orc.c995 SIMPLE [(test_txt)test_txt.FieldSchema(name:c995, type:boolean, comment:null), ] -POSTHOOK: Lineage: test_orc.c996 SIMPLE [(test_txt)test_txt.FieldSchema(name:c996, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c997 SIMPLE [(test_txt)test_txt.FieldSchema(name:c997, type:int, comment:null), ] -POSTHOOK: Lineage: test_orc.c998 SIMPLE [(test_txt)test_txt.FieldSchema(name:c998, type:varchar(64), comment:null), ] -POSTHOOK: Lineage: test_orc.c999 SIMPLE [(test_txt)test_txt.FieldSchema(name:c999, type:char(4), comment:null), ] +POSTHOOK: Output: default@test_orc_n1 +POSTHOOK: Lineage: test_orc_n1.c1 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c10 SIMPLE [(test_txt)test_txt.FieldSchema(name:c10, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c100 SIMPLE [(test_txt)test_txt.FieldSchema(name:c100, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1000 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1000, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1001 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1001, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1002 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1002, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1003 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1003, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1004 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1004, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1005 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1005, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1006 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1006, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1007 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1007, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1008 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1008, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1009 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1009, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c101 SIMPLE [(test_txt)test_txt.FieldSchema(name:c101, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1010 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1010, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1011 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1011, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1012 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1012, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1013 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1013, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1014 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1014, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1015 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1015, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1016 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1016, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1017 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1017, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1018 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1018, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1019 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1019, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c102 SIMPLE [(test_txt)test_txt.FieldSchema(name:c102, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1020 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1020, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1021 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1021, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1022 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1022, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1023 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1023, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1024 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1024, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1025 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1025, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1026 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1026, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1027 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1027, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1028 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1028, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1029 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1029, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c103 SIMPLE [(test_txt)test_txt.FieldSchema(name:c103, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1030 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1030, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1031 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1031, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1032 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1032, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1033 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1033, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1034 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1034, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1035 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1035, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1036 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1036, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1037 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1037, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1038 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1038, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1039 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1039, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c104 SIMPLE [(test_txt)test_txt.FieldSchema(name:c104, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1040 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1040, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1041 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1041, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1042 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1042, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1043 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1043, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1044 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1044, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1045 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1045, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1046 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1046, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1047 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1047, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1048 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1048, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1049 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1049, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c105 SIMPLE [(test_txt)test_txt.FieldSchema(name:c105, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1050 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1050, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1051 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1051, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1052 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1052, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1053 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1053, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1054 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1054, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1055 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1055, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1056 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1056, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1057 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1057, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1058 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1058, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1059 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1059, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c106 SIMPLE [(test_txt)test_txt.FieldSchema(name:c106, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1060 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1060, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1061 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1061, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1062 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1062, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1063 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1063, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1064 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1064, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1065 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1065, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1066 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1066, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1067 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1067, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1068 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1068, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1069 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1069, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c107 SIMPLE [(test_txt)test_txt.FieldSchema(name:c107, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1070 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1070, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1071 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1071, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1072 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1072, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1073 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1073, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1074 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1074, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1075 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1075, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1076 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1076, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1077 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1077, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1078 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1078, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1079 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1079, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c108 SIMPLE [(test_txt)test_txt.FieldSchema(name:c108, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1080 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1080, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1081 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1081, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1082 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1082, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1083 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1083, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1084 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1084, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1085 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1085, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1086 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1086, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1087 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1087, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1088 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1088, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1089 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1089, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c109 SIMPLE [(test_txt)test_txt.FieldSchema(name:c109, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1090 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1090, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1091 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1091, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1092 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1092, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1093 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1093, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1094 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1094, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1095 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1095, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1096 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1096, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1097 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1097, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1098 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1098, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1099 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1099, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c11 SIMPLE [(test_txt)test_txt.FieldSchema(name:c11, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c110 SIMPLE [(test_txt)test_txt.FieldSchema(name:c110, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1100 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1100, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1101 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1101, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1102 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1102, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1103 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1103, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1104 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1104, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1105 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1105, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1106 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1106, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1107 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1107, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1108 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1108, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1109 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1109, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c111 SIMPLE [(test_txt)test_txt.FieldSchema(name:c111, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1110 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1110, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1111 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1111, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1112 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1112, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1113 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1113, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1114 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1114, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1115 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1115, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1116 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1116, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1117 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1117, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1118 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1118, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1119 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1119, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c112 SIMPLE [(test_txt)test_txt.FieldSchema(name:c112, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1120 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1120, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1121 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1121, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1122 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1122, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1123 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1123, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1124 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1124, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1125 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1125, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1126 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1126, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1127 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1127, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1128 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1128, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1129 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1129, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c113 SIMPLE [(test_txt)test_txt.FieldSchema(name:c113, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1130 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1130, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1131 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1131, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1132 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1132, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1133 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1133, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1134 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1134, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1135 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1135, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1136 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1136, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1137 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1137, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1138 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1138, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1139 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1139, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c114 SIMPLE [(test_txt)test_txt.FieldSchema(name:c114, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1140 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1140, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1141 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1141, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1142 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1142, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1143 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1143, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1144 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1144, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1145 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1145, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1146 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1146, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1147 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1147, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1148 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1148, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1149 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1149, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c115 SIMPLE [(test_txt)test_txt.FieldSchema(name:c115, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1150 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1150, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1151 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1151, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1152 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1152, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1153 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1153, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1154 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1154, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1155 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1155, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1156 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1156, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1157 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1157, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1158 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1158, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1159 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1159, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c116 SIMPLE [(test_txt)test_txt.FieldSchema(name:c116, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1160 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1160, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1161 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1161, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1162 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1162, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1163 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1163, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1164 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1164, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1165 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1165, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1166 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1166, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1167 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1167, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1168 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1168, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1169 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1169, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c117 SIMPLE [(test_txt)test_txt.FieldSchema(name:c117, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1170 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1170, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1171 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1171, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1172 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1172, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1173 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1173, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1174 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1174, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1175 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1175, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1176 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1176, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1177 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1177, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1178 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1178, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1179 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1179, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c118 SIMPLE [(test_txt)test_txt.FieldSchema(name:c118, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1180 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1180, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1181 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1181, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1182 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1182, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1183 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1183, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1184 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1184, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1185 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1185, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1186 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1186, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1187 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1187, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1188 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1188, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1189 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1189, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c119 SIMPLE [(test_txt)test_txt.FieldSchema(name:c119, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1190 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1190, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1191 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1191, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1192 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1192, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1193 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1193, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1194 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1194, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1195 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1195, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1196 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1196, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1197 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1197, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1198 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1198, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1199 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1199, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c12 SIMPLE [(test_txt)test_txt.FieldSchema(name:c12, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c120 SIMPLE [(test_txt)test_txt.FieldSchema(name:c120, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1200 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1200, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1201 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1201, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1202 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1202, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1203 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1203, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1204 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1204, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1205 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1205, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1206 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1206, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1207 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1207, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1208 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1208, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1209 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1209, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c121 SIMPLE [(test_txt)test_txt.FieldSchema(name:c121, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1210 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1210, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1211 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1211, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1212 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1212, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1213 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1213, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1214 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1214, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1215 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1215, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1216 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1216, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1217 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1217, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1218 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1218, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1219 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1219, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c122 SIMPLE [(test_txt)test_txt.FieldSchema(name:c122, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1220 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1220, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1221 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1221, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1222 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1222, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1223 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1223, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1224 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1224, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1225 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1225, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1226 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1226, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1227 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1227, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1228 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1228, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1229 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1229, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c123 SIMPLE [(test_txt)test_txt.FieldSchema(name:c123, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1230 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1230, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1231 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1231, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1232 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1232, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1233 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1233, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1234 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1234, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1235 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1235, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1236 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1236, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1237 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1237, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1238 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1238, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1239 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1239, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c124 SIMPLE [(test_txt)test_txt.FieldSchema(name:c124, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1240 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1240, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1241 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1241, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1242 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1242, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1243 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1243, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1244 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1244, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1245 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1245, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1246 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1246, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1247 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1247, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1248 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1248, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1249 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1249, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c125 SIMPLE [(test_txt)test_txt.FieldSchema(name:c125, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1250 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1250, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1251 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1251, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1252 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1252, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1253 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1253, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1254 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1254, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1255 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1255, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1256 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1256, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1257 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1257, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1258 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1258, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1259 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1259, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c126 SIMPLE [(test_txt)test_txt.FieldSchema(name:c126, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1260 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1260, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1261 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1261, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1262 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1262, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1263 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1263, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1264 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1264, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1265 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1265, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1266 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1266, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1267 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1267, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1268 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1268, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1269 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1269, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c127 SIMPLE [(test_txt)test_txt.FieldSchema(name:c127, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1270 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1270, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1271 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1271, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1272 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1272, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1273 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1273, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1274 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1274, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1275 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1275, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1276 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1276, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1277 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1277, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1278 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1278, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1279 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1279, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c128 SIMPLE [(test_txt)test_txt.FieldSchema(name:c128, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1280 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1280, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1281 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1281, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1282 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1282, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1283 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1283, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1284 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1284, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1285 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1285, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1286 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1286, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1287 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1287, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1288 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1288, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1289 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1289, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c129 SIMPLE [(test_txt)test_txt.FieldSchema(name:c129, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1290 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1290, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1291 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1291, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1292 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1292, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1293 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1293, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1294 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1294, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1295 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1295, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1296 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1296, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1297 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1297, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1298 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1298, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1299 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1299, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c13 SIMPLE [(test_txt)test_txt.FieldSchema(name:c13, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c130 SIMPLE [(test_txt)test_txt.FieldSchema(name:c130, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1300 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1300, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1301 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1301, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1302 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1302, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1303 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1303, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1304 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1304, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1305 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1305, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1306 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1306, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1307 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1307, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1308 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1308, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1309 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1309, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c131 SIMPLE [(test_txt)test_txt.FieldSchema(name:c131, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1310 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1310, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1311 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1311, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1312 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1312, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1313 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1313, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1314 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1314, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1315 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1315, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1316 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1316, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1317 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1317, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1318 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1318, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1319 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1319, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c132 SIMPLE [(test_txt)test_txt.FieldSchema(name:c132, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1320 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1320, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1321 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1321, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1322 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1322, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1323 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1323, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1324 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1324, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1325 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1325, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1326 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1326, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1327 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1327, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1328 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1328, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1329 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1329, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c133 SIMPLE [(test_txt)test_txt.FieldSchema(name:c133, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1330 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1330, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1331 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1331, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1332 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1332, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1333 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1333, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1334 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1334, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1335 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1335, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1336 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1336, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1337 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1337, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1338 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1338, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1339 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1339, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c134 SIMPLE [(test_txt)test_txt.FieldSchema(name:c134, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1340 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1340, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1341 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1341, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1342 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1342, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1343 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1343, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1344 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1344, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1345 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1345, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1346 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1346, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1347 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1347, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1348 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1348, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1349 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1349, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c135 SIMPLE [(test_txt)test_txt.FieldSchema(name:c135, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1350 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1350, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1351 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1351, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1352 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1352, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1353 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1353, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1354 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1354, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1355 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1355, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1356 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1356, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1357 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1357, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1358 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1358, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1359 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1359, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c136 SIMPLE [(test_txt)test_txt.FieldSchema(name:c136, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1360 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1360, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1361 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1361, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1362 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1362, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1363 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1363, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1364 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1364, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1365 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1365, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1366 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1366, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1367 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1367, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1368 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1368, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1369 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1369, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c137 SIMPLE [(test_txt)test_txt.FieldSchema(name:c137, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1370 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1370, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1371 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1371, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1372 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1372, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1373 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1373, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1374 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1374, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1375 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1375, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1376 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1376, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1377 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1377, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1378 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1378, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1379 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1379, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c138 SIMPLE [(test_txt)test_txt.FieldSchema(name:c138, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1380 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1380, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1381 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1381, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1382 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1382, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1383 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1383, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1384 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1384, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1385 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1385, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1386 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1386, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1387 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1387, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1388 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1388, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1389 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1389, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c139 SIMPLE [(test_txt)test_txt.FieldSchema(name:c139, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1390 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1390, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1391 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1391, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1392 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1392, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1393 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1393, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1394 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1394, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1395 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1395, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1396 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1396, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1397 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1397, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1398 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1398, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1399 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1399, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c14 SIMPLE [(test_txt)test_txt.FieldSchema(name:c14, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c140 SIMPLE [(test_txt)test_txt.FieldSchema(name:c140, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1400 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1400, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1401 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1401, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1402 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1402, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1403 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1403, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1404 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1404, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1405 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1405, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1406 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1406, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1407 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1407, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1408 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1408, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1409 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1409, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c141 SIMPLE [(test_txt)test_txt.FieldSchema(name:c141, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1410 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1410, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1411 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1411, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1412 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1412, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1413 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1413, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1414 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1414, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1415 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1415, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1416 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1416, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1417 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1417, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1418 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1418, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1419 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1419, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c142 SIMPLE [(test_txt)test_txt.FieldSchema(name:c142, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1420 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1420, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1421 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1421, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1422 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1422, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1423 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1423, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1424 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1424, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1425 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1425, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1426 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1426, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1427 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1427, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1428 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1428, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1429 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1429, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c143 SIMPLE [(test_txt)test_txt.FieldSchema(name:c143, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1430 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1430, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1431 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1431, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1432 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1432, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1433 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1433, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1434 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1434, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1435 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1435, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1436 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1436, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1437 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1437, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1438 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1438, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1439 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1439, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c144 SIMPLE [(test_txt)test_txt.FieldSchema(name:c144, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1440 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1440, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1441 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1441, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1442 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1442, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1443 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1443, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1444 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1444, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1445 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1445, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1446 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1446, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1447 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1447, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1448 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1448, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1449 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1449, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c145 SIMPLE [(test_txt)test_txt.FieldSchema(name:c145, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1450 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1450, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1451 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1451, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1452 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1452, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1453 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1453, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1454 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1454, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1455 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1455, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1456 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1456, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1457 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1457, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1458 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1458, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1459 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1459, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c146 SIMPLE [(test_txt)test_txt.FieldSchema(name:c146, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1460 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1460, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1461 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1461, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1462 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1462, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1463 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1463, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1464 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1464, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1465 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1465, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1466 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1466, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1467 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1467, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1468 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1468, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1469 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1469, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c147 SIMPLE [(test_txt)test_txt.FieldSchema(name:c147, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1470 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1470, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1471 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1471, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1472 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1472, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1473 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1473, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1474 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1474, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1475 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1475, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1476 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1476, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1477 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1477, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1478 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1478, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1479 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1479, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c148 SIMPLE [(test_txt)test_txt.FieldSchema(name:c148, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1480 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1480, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1481 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1481, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1482 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1482, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1483 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1483, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1484 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1484, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1485 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1485, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1486 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1486, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1487 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1487, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1488 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1488, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1489 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1489, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c149 SIMPLE [(test_txt)test_txt.FieldSchema(name:c149, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1490 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1490, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1491 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1491, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1492 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1492, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1493 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1493, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1494 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1494, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1495 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1495, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1496 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1496, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1497 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1497, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1498 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1498, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1499 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1499, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c15 SIMPLE [(test_txt)test_txt.FieldSchema(name:c15, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c150 SIMPLE [(test_txt)test_txt.FieldSchema(name:c150, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1500 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1500, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1501 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1501, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1502 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1502, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1503 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1503, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1504 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1504, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1505 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1505, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1506 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1506, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1507 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1507, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1508 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1508, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1509 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1509, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c151 SIMPLE [(test_txt)test_txt.FieldSchema(name:c151, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1510 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1510, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1511 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1511, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1512 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1512, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1513 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1513, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1514 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1514, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1515 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1515, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1516 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1516, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1517 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1517, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1518 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1518, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1519 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1519, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c152 SIMPLE [(test_txt)test_txt.FieldSchema(name:c152, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1520 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1520, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1521 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1521, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1522 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1522, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1523 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1523, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1524 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1524, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1525 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1525, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1526 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1526, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1527 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1527, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1528 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1528, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1529 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1529, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c153 SIMPLE [(test_txt)test_txt.FieldSchema(name:c153, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1530 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1530, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1531 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1531, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1532 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1532, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1533 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1533, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1534 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1534, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1535 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1535, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1536 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1536, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1537 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1537, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1538 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1538, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1539 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1539, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c154 SIMPLE [(test_txt)test_txt.FieldSchema(name:c154, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1540 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1540, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1541 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1541, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1542 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1542, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1543 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1543, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1544 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1544, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1545 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1545, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1546 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1546, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1547 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1547, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1548 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1548, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1549 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1549, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c155 SIMPLE [(test_txt)test_txt.FieldSchema(name:c155, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1550 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1550, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1551 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1551, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1552 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1552, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1553 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1553, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1554 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1554, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1555 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1555, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1556 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1556, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1557 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1557, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1558 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1558, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1559 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1559, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c156 SIMPLE [(test_txt)test_txt.FieldSchema(name:c156, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1560 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1560, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1561 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1561, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1562 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1562, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1563 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1563, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1564 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1564, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1565 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1565, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1566 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1566, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1567 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1567, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1568 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1568, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1569 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1569, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c157 SIMPLE [(test_txt)test_txt.FieldSchema(name:c157, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1570 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1570, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1571 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1571, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1572 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1572, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1573 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1573, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1574 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1574, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1575 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1575, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1576 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1576, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1577 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1577, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1578 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1578, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1579 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1579, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c158 SIMPLE [(test_txt)test_txt.FieldSchema(name:c158, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1580 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1580, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1581 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1581, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1582 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1582, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1583 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1583, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1584 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1584, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1585 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1585, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1586 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1586, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1587 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1587, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1588 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1588, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1589 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1589, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c159 SIMPLE [(test_txt)test_txt.FieldSchema(name:c159, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1590 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1590, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1591 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1591, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1592 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1592, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1593 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1593, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1594 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1594, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1595 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1595, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1596 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1596, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1597 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1597, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1598 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1598, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1599 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1599, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c16 SIMPLE [(test_txt)test_txt.FieldSchema(name:c16, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c160 SIMPLE [(test_txt)test_txt.FieldSchema(name:c160, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1600 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1600, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1601 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1601, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1602 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1602, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1603 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1603, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1604 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1604, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1605 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1605, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1606 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1606, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1607 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1607, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1608 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1608, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1609 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1609, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c161 SIMPLE [(test_txt)test_txt.FieldSchema(name:c161, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1610 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1610, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1611 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1611, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1612 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1612, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1613 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1613, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1614 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1614, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1615 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1615, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1616 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1616, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1617 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1617, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1618 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1618, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1619 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1619, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c162 SIMPLE [(test_txt)test_txt.FieldSchema(name:c162, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1620 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1620, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1621 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1621, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1622 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1622, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1623 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1623, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1624 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1624, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1625 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1625, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1626 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1626, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1627 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1627, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1628 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1628, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1629 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1629, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c163 SIMPLE [(test_txt)test_txt.FieldSchema(name:c163, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1630 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1630, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1631 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1631, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1632 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1632, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1633 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1633, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1634 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1634, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1635 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1635, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1636 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1636, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1637 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1637, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1638 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1638, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1639 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1639, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c164 SIMPLE [(test_txt)test_txt.FieldSchema(name:c164, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1640 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1640, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1641 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1641, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1642 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1642, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1643 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1643, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1644 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1644, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1645 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1645, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1646 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1646, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1647 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1647, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1648 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1648, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1649 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1649, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c165 SIMPLE [(test_txt)test_txt.FieldSchema(name:c165, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1650 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1650, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1651 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1651, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1652 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1652, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1653 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1653, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1654 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1654, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1655 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1655, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1656 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1656, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1657 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1657, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1658 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1658, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1659 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1659, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c166 SIMPLE [(test_txt)test_txt.FieldSchema(name:c166, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1660 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1660, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1661 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1661, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1662 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1662, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1663 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1663, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1664 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1664, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1665 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1665, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1666 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1666, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1667 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1667, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1668 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1668, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1669 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1669, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c167 SIMPLE [(test_txt)test_txt.FieldSchema(name:c167, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1670 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1670, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1671 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1671, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1672 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1672, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1673 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1673, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1674 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1674, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1675 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1675, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1676 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1676, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1677 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1677, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1678 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1678, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1679 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1679, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c168 SIMPLE [(test_txt)test_txt.FieldSchema(name:c168, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1680 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1680, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1681 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1681, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1682 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1682, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1683 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1683, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1684 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1684, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1685 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1685, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1686 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1686, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1687 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1687, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1688 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1688, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1689 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1689, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c169 SIMPLE [(test_txt)test_txt.FieldSchema(name:c169, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1690 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1690, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1691 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1691, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1692 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1692, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1693 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1693, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1694 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1694, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1695 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1695, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1696 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1696, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1697 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1697, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1698 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1698, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1699 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1699, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c17 SIMPLE [(test_txt)test_txt.FieldSchema(name:c17, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c170 SIMPLE [(test_txt)test_txt.FieldSchema(name:c170, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1700 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1700, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1701 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1701, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1702 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1702, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1703 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1703, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1704 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1704, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1705 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1705, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1706 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1706, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1707 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1707, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1708 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1708, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1709 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1709, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c171 SIMPLE [(test_txt)test_txt.FieldSchema(name:c171, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1710 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1710, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1711 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1711, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1712 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1712, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1713 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1713, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1714 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1714, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1715 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1715, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1716 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1716, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1717 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1717, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1718 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1718, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1719 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1719, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c172 SIMPLE [(test_txt)test_txt.FieldSchema(name:c172, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1720 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1720, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1721 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1721, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1722 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1722, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1723 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1723, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1724 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1724, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1725 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1725, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1726 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1726, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1727 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1727, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1728 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1728, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1729 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1729, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c173 SIMPLE [(test_txt)test_txt.FieldSchema(name:c173, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1730 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1730, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1731 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1731, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1732 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1732, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1733 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1733, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1734 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1734, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1735 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1735, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1736 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1736, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1737 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1737, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1738 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1738, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1739 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1739, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c174 SIMPLE [(test_txt)test_txt.FieldSchema(name:c174, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1740 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1740, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1741 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1741, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1742 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1742, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1743 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1743, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1744 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1744, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1745 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1745, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1746 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1746, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1747 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1747, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1748 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1748, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1749 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1749, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c175 SIMPLE [(test_txt)test_txt.FieldSchema(name:c175, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1750 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1750, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1751 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1751, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1752 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1752, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1753 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1753, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1754 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1754, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1755 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1755, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1756 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1756, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1757 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1757, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1758 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1758, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1759 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1759, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c176 SIMPLE [(test_txt)test_txt.FieldSchema(name:c176, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1760 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1760, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1761 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1761, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1762 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1762, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1763 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1763, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1764 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1764, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1765 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1765, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1766 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1766, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1767 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1767, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1768 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1768, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1769 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1769, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c177 SIMPLE [(test_txt)test_txt.FieldSchema(name:c177, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1770 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1770, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1771 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1771, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1772 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1772, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1773 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1773, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1774 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1774, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1775 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1775, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1776 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1776, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1777 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1777, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1778 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1778, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1779 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1779, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c178 SIMPLE [(test_txt)test_txt.FieldSchema(name:c178, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1780 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1780, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1781 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1781, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1782 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1782, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1783 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1783, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1784 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1784, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1785 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1785, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1786 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1786, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1787 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1787, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1788 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1788, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1789 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1789, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c179 SIMPLE [(test_txt)test_txt.FieldSchema(name:c179, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1790 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1790, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1791 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1791, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1792 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1792, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1793 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1793, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1794 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1794, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1795 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1795, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1796 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1796, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1797 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1797, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1798 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1798, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1799 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1799, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c18 SIMPLE [(test_txt)test_txt.FieldSchema(name:c18, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c180 SIMPLE [(test_txt)test_txt.FieldSchema(name:c180, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1800 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1800, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1801 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1801, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1802 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1802, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1803 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1803, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1804 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1804, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1805 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1805, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1806 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1806, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1807 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1807, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1808 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1808, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1809 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1809, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c181 SIMPLE [(test_txt)test_txt.FieldSchema(name:c181, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1810 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1810, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1811 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1811, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1812 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1812, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1813 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1813, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1814 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1814, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1815 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1815, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1816 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1816, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1817 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1817, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1818 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1818, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1819 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1819, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c182 SIMPLE [(test_txt)test_txt.FieldSchema(name:c182, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1820 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1820, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1821 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1821, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1822 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1822, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1823 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1823, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1824 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1824, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1825 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1825, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1826 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1826, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1827 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1827, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1828 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1828, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1829 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1829, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c183 SIMPLE [(test_txt)test_txt.FieldSchema(name:c183, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1830 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1830, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1831 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1831, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1832 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1832, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1833 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1833, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1834 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1834, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1835 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1835, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1836 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1836, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1837 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1837, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1838 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1838, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1839 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1839, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c184 SIMPLE [(test_txt)test_txt.FieldSchema(name:c184, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1840 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1840, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1841 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1841, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1842 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1842, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1843 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1843, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1844 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1844, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1845 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1845, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1846 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1846, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1847 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1847, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1848 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1848, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1849 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1849, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c185 SIMPLE [(test_txt)test_txt.FieldSchema(name:c185, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1850 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1850, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1851 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1851, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1852 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1852, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1853 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1853, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1854 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1854, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1855 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1855, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1856 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1856, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1857 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1857, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1858 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1858, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1859 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1859, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c186 SIMPLE [(test_txt)test_txt.FieldSchema(name:c186, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1860 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1860, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1861 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1861, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1862 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1862, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1863 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1863, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1864 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1864, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1865 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1865, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1866 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1866, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1867 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1867, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1868 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1868, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1869 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1869, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c187 SIMPLE [(test_txt)test_txt.FieldSchema(name:c187, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1870 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1870, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1871 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1871, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1872 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1872, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1873 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1873, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1874 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1874, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1875 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1875, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1876 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1876, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1877 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1877, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1878 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1878, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1879 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1879, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c188 SIMPLE [(test_txt)test_txt.FieldSchema(name:c188, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1880 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1880, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1881 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1881, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1882 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1882, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1883 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1883, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1884 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1884, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1885 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1885, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1886 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1886, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1887 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1887, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1888 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1888, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1889 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1889, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c189 SIMPLE [(test_txt)test_txt.FieldSchema(name:c189, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1890 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1890, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1891 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1891, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1892 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1892, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1893 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1893, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1894 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1894, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1895 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1895, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1896 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1896, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1897 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1897, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1898 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1898, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1899 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1899, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c19 SIMPLE [(test_txt)test_txt.FieldSchema(name:c19, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c190 SIMPLE [(test_txt)test_txt.FieldSchema(name:c190, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1900 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1900, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1901 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1901, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1902 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1902, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1903 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1903, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1904 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1904, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1905 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1905, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1906 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1906, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1907 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1907, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1908 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1908, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1909 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1909, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c191 SIMPLE [(test_txt)test_txt.FieldSchema(name:c191, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1910 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1910, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1911 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1911, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1912 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1912, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1913 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1913, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1914 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1914, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1915 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1915, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1916 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1916, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1917 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1917, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1918 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1918, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1919 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1919, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c192 SIMPLE [(test_txt)test_txt.FieldSchema(name:c192, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1920 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1920, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1921 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1921, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1922 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1922, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1923 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1923, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1924 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1924, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1925 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1925, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1926 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1926, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1927 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1927, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1928 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1928, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1929 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1929, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c193 SIMPLE [(test_txt)test_txt.FieldSchema(name:c193, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1930 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1930, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1931 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1931, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1932 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1932, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1933 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1933, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1934 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1934, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1935 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1935, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1936 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1936, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1937 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1937, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1938 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1938, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1939 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1939, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c194 SIMPLE [(test_txt)test_txt.FieldSchema(name:c194, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1940 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1940, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1941 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1941, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1942 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1942, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1943 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1943, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1944 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1944, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1945 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1945, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1946 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1946, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1947 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1947, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1948 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1948, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1949 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1949, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c195 SIMPLE [(test_txt)test_txt.FieldSchema(name:c195, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1950 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1950, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1951 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1951, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1952 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1952, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1953 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1953, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1954 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1954, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1955 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1955, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1956 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1956, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1957 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1957, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1958 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1958, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1959 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1959, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c196 SIMPLE [(test_txt)test_txt.FieldSchema(name:c196, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1960 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1960, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1961 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1961, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1962 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1962, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1963 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1963, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1964 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1964, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1965 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1965, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1966 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1966, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1967 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1967, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1968 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1968, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1969 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1969, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c197 SIMPLE [(test_txt)test_txt.FieldSchema(name:c197, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1970 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1970, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1971 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1971, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1972 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1972, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1973 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1973, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1974 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1974, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1975 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1975, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1976 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1976, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1977 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1977, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1978 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1978, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1979 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1979, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c198 SIMPLE [(test_txt)test_txt.FieldSchema(name:c198, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1980 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1980, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1981 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1981, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1982 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1982, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1983 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1983, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1984 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1984, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1985 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1985, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1986 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1986, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1987 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1987, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1988 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1988, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1989 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1989, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c199 SIMPLE [(test_txt)test_txt.FieldSchema(name:c199, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1990 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1990, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1991 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1991, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1992 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1992, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1993 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1993, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1994 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1994, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1995 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1995, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1996 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1996, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1997 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1997, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1998 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1998, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c1999 SIMPLE [(test_txt)test_txt.FieldSchema(name:c1999, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c2 SIMPLE [(test_txt)test_txt.FieldSchema(name:c2, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c20 SIMPLE [(test_txt)test_txt.FieldSchema(name:c20, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c200 SIMPLE [(test_txt)test_txt.FieldSchema(name:c200, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c2000 SIMPLE [(test_txt)test_txt.FieldSchema(name:c2000, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c201 SIMPLE [(test_txt)test_txt.FieldSchema(name:c201, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c202 SIMPLE [(test_txt)test_txt.FieldSchema(name:c202, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c203 SIMPLE [(test_txt)test_txt.FieldSchema(name:c203, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c204 SIMPLE [(test_txt)test_txt.FieldSchema(name:c204, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c205 SIMPLE [(test_txt)test_txt.FieldSchema(name:c205, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c206 SIMPLE [(test_txt)test_txt.FieldSchema(name:c206, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c207 SIMPLE [(test_txt)test_txt.FieldSchema(name:c207, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c208 SIMPLE [(test_txt)test_txt.FieldSchema(name:c208, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c209 SIMPLE [(test_txt)test_txt.FieldSchema(name:c209, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c21 SIMPLE [(test_txt)test_txt.FieldSchema(name:c21, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c210 SIMPLE [(test_txt)test_txt.FieldSchema(name:c210, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c211 SIMPLE [(test_txt)test_txt.FieldSchema(name:c211, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c212 SIMPLE [(test_txt)test_txt.FieldSchema(name:c212, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c213 SIMPLE [(test_txt)test_txt.FieldSchema(name:c213, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c214 SIMPLE [(test_txt)test_txt.FieldSchema(name:c214, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c215 SIMPLE [(test_txt)test_txt.FieldSchema(name:c215, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c216 SIMPLE [(test_txt)test_txt.FieldSchema(name:c216, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c217 SIMPLE [(test_txt)test_txt.FieldSchema(name:c217, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c218 SIMPLE [(test_txt)test_txt.FieldSchema(name:c218, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c219 SIMPLE [(test_txt)test_txt.FieldSchema(name:c219, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c22 SIMPLE [(test_txt)test_txt.FieldSchema(name:c22, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c220 SIMPLE [(test_txt)test_txt.FieldSchema(name:c220, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c221 SIMPLE [(test_txt)test_txt.FieldSchema(name:c221, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c222 SIMPLE [(test_txt)test_txt.FieldSchema(name:c222, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c223 SIMPLE [(test_txt)test_txt.FieldSchema(name:c223, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c224 SIMPLE [(test_txt)test_txt.FieldSchema(name:c224, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c225 SIMPLE [(test_txt)test_txt.FieldSchema(name:c225, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c226 SIMPLE [(test_txt)test_txt.FieldSchema(name:c226, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c227 SIMPLE [(test_txt)test_txt.FieldSchema(name:c227, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c228 SIMPLE [(test_txt)test_txt.FieldSchema(name:c228, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c229 SIMPLE [(test_txt)test_txt.FieldSchema(name:c229, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c23 SIMPLE [(test_txt)test_txt.FieldSchema(name:c23, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c230 SIMPLE [(test_txt)test_txt.FieldSchema(name:c230, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c231 SIMPLE [(test_txt)test_txt.FieldSchema(name:c231, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c232 SIMPLE [(test_txt)test_txt.FieldSchema(name:c232, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c233 SIMPLE [(test_txt)test_txt.FieldSchema(name:c233, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c234 SIMPLE [(test_txt)test_txt.FieldSchema(name:c234, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c235 SIMPLE [(test_txt)test_txt.FieldSchema(name:c235, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c236 SIMPLE [(test_txt)test_txt.FieldSchema(name:c236, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c237 SIMPLE [(test_txt)test_txt.FieldSchema(name:c237, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c238 SIMPLE [(test_txt)test_txt.FieldSchema(name:c238, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c239 SIMPLE [(test_txt)test_txt.FieldSchema(name:c239, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c24 SIMPLE [(test_txt)test_txt.FieldSchema(name:c24, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c240 SIMPLE [(test_txt)test_txt.FieldSchema(name:c240, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c241 SIMPLE [(test_txt)test_txt.FieldSchema(name:c241, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c242 SIMPLE [(test_txt)test_txt.FieldSchema(name:c242, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c243 SIMPLE [(test_txt)test_txt.FieldSchema(name:c243, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c244 SIMPLE [(test_txt)test_txt.FieldSchema(name:c244, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c245 SIMPLE [(test_txt)test_txt.FieldSchema(name:c245, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c246 SIMPLE [(test_txt)test_txt.FieldSchema(name:c246, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c247 SIMPLE [(test_txt)test_txt.FieldSchema(name:c247, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c248 SIMPLE [(test_txt)test_txt.FieldSchema(name:c248, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c249 SIMPLE [(test_txt)test_txt.FieldSchema(name:c249, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c25 SIMPLE [(test_txt)test_txt.FieldSchema(name:c25, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c250 SIMPLE [(test_txt)test_txt.FieldSchema(name:c250, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c251 SIMPLE [(test_txt)test_txt.FieldSchema(name:c251, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c252 SIMPLE [(test_txt)test_txt.FieldSchema(name:c252, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c253 SIMPLE [(test_txt)test_txt.FieldSchema(name:c253, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c254 SIMPLE [(test_txt)test_txt.FieldSchema(name:c254, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c255 SIMPLE [(test_txt)test_txt.FieldSchema(name:c255, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c256 SIMPLE [(test_txt)test_txt.FieldSchema(name:c256, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c257 SIMPLE [(test_txt)test_txt.FieldSchema(name:c257, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c258 SIMPLE [(test_txt)test_txt.FieldSchema(name:c258, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c259 SIMPLE [(test_txt)test_txt.FieldSchema(name:c259, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c26 SIMPLE [(test_txt)test_txt.FieldSchema(name:c26, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c260 SIMPLE [(test_txt)test_txt.FieldSchema(name:c260, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c261 SIMPLE [(test_txt)test_txt.FieldSchema(name:c261, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c262 SIMPLE [(test_txt)test_txt.FieldSchema(name:c262, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c263 SIMPLE [(test_txt)test_txt.FieldSchema(name:c263, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c264 SIMPLE [(test_txt)test_txt.FieldSchema(name:c264, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c265 SIMPLE [(test_txt)test_txt.FieldSchema(name:c265, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c266 SIMPLE [(test_txt)test_txt.FieldSchema(name:c266, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c267 SIMPLE [(test_txt)test_txt.FieldSchema(name:c267, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c268 SIMPLE [(test_txt)test_txt.FieldSchema(name:c268, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c269 SIMPLE [(test_txt)test_txt.FieldSchema(name:c269, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c27 SIMPLE [(test_txt)test_txt.FieldSchema(name:c27, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c270 SIMPLE [(test_txt)test_txt.FieldSchema(name:c270, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c271 SIMPLE [(test_txt)test_txt.FieldSchema(name:c271, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c272 SIMPLE [(test_txt)test_txt.FieldSchema(name:c272, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c273 SIMPLE [(test_txt)test_txt.FieldSchema(name:c273, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c274 SIMPLE [(test_txt)test_txt.FieldSchema(name:c274, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c275 SIMPLE [(test_txt)test_txt.FieldSchema(name:c275, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c276 SIMPLE [(test_txt)test_txt.FieldSchema(name:c276, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c277 SIMPLE [(test_txt)test_txt.FieldSchema(name:c277, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c278 SIMPLE [(test_txt)test_txt.FieldSchema(name:c278, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c279 SIMPLE [(test_txt)test_txt.FieldSchema(name:c279, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c28 SIMPLE [(test_txt)test_txt.FieldSchema(name:c28, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c280 SIMPLE [(test_txt)test_txt.FieldSchema(name:c280, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c281 SIMPLE [(test_txt)test_txt.FieldSchema(name:c281, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c282 SIMPLE [(test_txt)test_txt.FieldSchema(name:c282, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c283 SIMPLE [(test_txt)test_txt.FieldSchema(name:c283, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c284 SIMPLE [(test_txt)test_txt.FieldSchema(name:c284, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c285 SIMPLE [(test_txt)test_txt.FieldSchema(name:c285, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c286 SIMPLE [(test_txt)test_txt.FieldSchema(name:c286, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c287 SIMPLE [(test_txt)test_txt.FieldSchema(name:c287, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c288 SIMPLE [(test_txt)test_txt.FieldSchema(name:c288, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c289 SIMPLE [(test_txt)test_txt.FieldSchema(name:c289, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c29 SIMPLE [(test_txt)test_txt.FieldSchema(name:c29, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c290 SIMPLE [(test_txt)test_txt.FieldSchema(name:c290, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c291 SIMPLE [(test_txt)test_txt.FieldSchema(name:c291, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c292 SIMPLE [(test_txt)test_txt.FieldSchema(name:c292, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c293 SIMPLE [(test_txt)test_txt.FieldSchema(name:c293, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c294 SIMPLE [(test_txt)test_txt.FieldSchema(name:c294, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c295 SIMPLE [(test_txt)test_txt.FieldSchema(name:c295, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c296 SIMPLE [(test_txt)test_txt.FieldSchema(name:c296, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c297 SIMPLE [(test_txt)test_txt.FieldSchema(name:c297, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c298 SIMPLE [(test_txt)test_txt.FieldSchema(name:c298, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c299 SIMPLE [(test_txt)test_txt.FieldSchema(name:c299, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c3 SIMPLE [(test_txt)test_txt.FieldSchema(name:c3, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c30 SIMPLE [(test_txt)test_txt.FieldSchema(name:c30, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c300 SIMPLE [(test_txt)test_txt.FieldSchema(name:c300, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c301 SIMPLE [(test_txt)test_txt.FieldSchema(name:c301, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c302 SIMPLE [(test_txt)test_txt.FieldSchema(name:c302, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c303 SIMPLE [(test_txt)test_txt.FieldSchema(name:c303, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c304 SIMPLE [(test_txt)test_txt.FieldSchema(name:c304, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c305 SIMPLE [(test_txt)test_txt.FieldSchema(name:c305, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c306 SIMPLE [(test_txt)test_txt.FieldSchema(name:c306, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c307 SIMPLE [(test_txt)test_txt.FieldSchema(name:c307, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c308 SIMPLE [(test_txt)test_txt.FieldSchema(name:c308, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c309 SIMPLE [(test_txt)test_txt.FieldSchema(name:c309, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c31 SIMPLE [(test_txt)test_txt.FieldSchema(name:c31, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c310 SIMPLE [(test_txt)test_txt.FieldSchema(name:c310, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c311 SIMPLE [(test_txt)test_txt.FieldSchema(name:c311, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c312 SIMPLE [(test_txt)test_txt.FieldSchema(name:c312, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c313 SIMPLE [(test_txt)test_txt.FieldSchema(name:c313, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c314 SIMPLE [(test_txt)test_txt.FieldSchema(name:c314, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c315 SIMPLE [(test_txt)test_txt.FieldSchema(name:c315, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c316 SIMPLE [(test_txt)test_txt.FieldSchema(name:c316, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c317 SIMPLE [(test_txt)test_txt.FieldSchema(name:c317, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c318 SIMPLE [(test_txt)test_txt.FieldSchema(name:c318, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c319 SIMPLE [(test_txt)test_txt.FieldSchema(name:c319, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c32 SIMPLE [(test_txt)test_txt.FieldSchema(name:c32, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c320 SIMPLE [(test_txt)test_txt.FieldSchema(name:c320, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c321 SIMPLE [(test_txt)test_txt.FieldSchema(name:c321, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c322 SIMPLE [(test_txt)test_txt.FieldSchema(name:c322, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c323 SIMPLE [(test_txt)test_txt.FieldSchema(name:c323, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c324 SIMPLE [(test_txt)test_txt.FieldSchema(name:c324, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c325 SIMPLE [(test_txt)test_txt.FieldSchema(name:c325, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c326 SIMPLE [(test_txt)test_txt.FieldSchema(name:c326, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c327 SIMPLE [(test_txt)test_txt.FieldSchema(name:c327, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c328 SIMPLE [(test_txt)test_txt.FieldSchema(name:c328, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c329 SIMPLE [(test_txt)test_txt.FieldSchema(name:c329, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c33 SIMPLE [(test_txt)test_txt.FieldSchema(name:c33, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c330 SIMPLE [(test_txt)test_txt.FieldSchema(name:c330, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c331 SIMPLE [(test_txt)test_txt.FieldSchema(name:c331, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c332 SIMPLE [(test_txt)test_txt.FieldSchema(name:c332, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c333 SIMPLE [(test_txt)test_txt.FieldSchema(name:c333, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c334 SIMPLE [(test_txt)test_txt.FieldSchema(name:c334, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c335 SIMPLE [(test_txt)test_txt.FieldSchema(name:c335, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c336 SIMPLE [(test_txt)test_txt.FieldSchema(name:c336, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c337 SIMPLE [(test_txt)test_txt.FieldSchema(name:c337, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c338 SIMPLE [(test_txt)test_txt.FieldSchema(name:c338, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c339 SIMPLE [(test_txt)test_txt.FieldSchema(name:c339, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c34 SIMPLE [(test_txt)test_txt.FieldSchema(name:c34, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c340 SIMPLE [(test_txt)test_txt.FieldSchema(name:c340, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c341 SIMPLE [(test_txt)test_txt.FieldSchema(name:c341, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c342 SIMPLE [(test_txt)test_txt.FieldSchema(name:c342, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c343 SIMPLE [(test_txt)test_txt.FieldSchema(name:c343, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c344 SIMPLE [(test_txt)test_txt.FieldSchema(name:c344, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c345 SIMPLE [(test_txt)test_txt.FieldSchema(name:c345, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c346 SIMPLE [(test_txt)test_txt.FieldSchema(name:c346, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c347 SIMPLE [(test_txt)test_txt.FieldSchema(name:c347, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c348 SIMPLE [(test_txt)test_txt.FieldSchema(name:c348, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c349 SIMPLE [(test_txt)test_txt.FieldSchema(name:c349, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c35 SIMPLE [(test_txt)test_txt.FieldSchema(name:c35, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c350 SIMPLE [(test_txt)test_txt.FieldSchema(name:c350, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c351 SIMPLE [(test_txt)test_txt.FieldSchema(name:c351, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c352 SIMPLE [(test_txt)test_txt.FieldSchema(name:c352, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c353 SIMPLE [(test_txt)test_txt.FieldSchema(name:c353, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c354 SIMPLE [(test_txt)test_txt.FieldSchema(name:c354, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c355 SIMPLE [(test_txt)test_txt.FieldSchema(name:c355, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c356 SIMPLE [(test_txt)test_txt.FieldSchema(name:c356, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c357 SIMPLE [(test_txt)test_txt.FieldSchema(name:c357, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c358 SIMPLE [(test_txt)test_txt.FieldSchema(name:c358, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c359 SIMPLE [(test_txt)test_txt.FieldSchema(name:c359, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c36 SIMPLE [(test_txt)test_txt.FieldSchema(name:c36, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c360 SIMPLE [(test_txt)test_txt.FieldSchema(name:c360, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c361 SIMPLE [(test_txt)test_txt.FieldSchema(name:c361, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c362 SIMPLE [(test_txt)test_txt.FieldSchema(name:c362, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c363 SIMPLE [(test_txt)test_txt.FieldSchema(name:c363, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c364 SIMPLE [(test_txt)test_txt.FieldSchema(name:c364, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c365 SIMPLE [(test_txt)test_txt.FieldSchema(name:c365, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c366 SIMPLE [(test_txt)test_txt.FieldSchema(name:c366, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c367 SIMPLE [(test_txt)test_txt.FieldSchema(name:c367, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c368 SIMPLE [(test_txt)test_txt.FieldSchema(name:c368, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c369 SIMPLE [(test_txt)test_txt.FieldSchema(name:c369, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c37 SIMPLE [(test_txt)test_txt.FieldSchema(name:c37, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c370 SIMPLE [(test_txt)test_txt.FieldSchema(name:c370, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c371 SIMPLE [(test_txt)test_txt.FieldSchema(name:c371, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c372 SIMPLE [(test_txt)test_txt.FieldSchema(name:c372, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c373 SIMPLE [(test_txt)test_txt.FieldSchema(name:c373, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c374 SIMPLE [(test_txt)test_txt.FieldSchema(name:c374, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c375 SIMPLE [(test_txt)test_txt.FieldSchema(name:c375, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c376 SIMPLE [(test_txt)test_txt.FieldSchema(name:c376, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c377 SIMPLE [(test_txt)test_txt.FieldSchema(name:c377, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c378 SIMPLE [(test_txt)test_txt.FieldSchema(name:c378, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c379 SIMPLE [(test_txt)test_txt.FieldSchema(name:c379, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c38 SIMPLE [(test_txt)test_txt.FieldSchema(name:c38, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c380 SIMPLE [(test_txt)test_txt.FieldSchema(name:c380, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c381 SIMPLE [(test_txt)test_txt.FieldSchema(name:c381, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c382 SIMPLE [(test_txt)test_txt.FieldSchema(name:c382, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c383 SIMPLE [(test_txt)test_txt.FieldSchema(name:c383, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c384 SIMPLE [(test_txt)test_txt.FieldSchema(name:c384, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c385 SIMPLE [(test_txt)test_txt.FieldSchema(name:c385, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c386 SIMPLE [(test_txt)test_txt.FieldSchema(name:c386, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c387 SIMPLE [(test_txt)test_txt.FieldSchema(name:c387, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c388 SIMPLE [(test_txt)test_txt.FieldSchema(name:c388, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c389 SIMPLE [(test_txt)test_txt.FieldSchema(name:c389, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c39 SIMPLE [(test_txt)test_txt.FieldSchema(name:c39, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c390 SIMPLE [(test_txt)test_txt.FieldSchema(name:c390, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c391 SIMPLE [(test_txt)test_txt.FieldSchema(name:c391, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c392 SIMPLE [(test_txt)test_txt.FieldSchema(name:c392, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c393 SIMPLE [(test_txt)test_txt.FieldSchema(name:c393, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c394 SIMPLE [(test_txt)test_txt.FieldSchema(name:c394, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c395 SIMPLE [(test_txt)test_txt.FieldSchema(name:c395, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c396 SIMPLE [(test_txt)test_txt.FieldSchema(name:c396, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c397 SIMPLE [(test_txt)test_txt.FieldSchema(name:c397, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c398 SIMPLE [(test_txt)test_txt.FieldSchema(name:c398, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c399 SIMPLE [(test_txt)test_txt.FieldSchema(name:c399, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c4 SIMPLE [(test_txt)test_txt.FieldSchema(name:c4, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c40 SIMPLE [(test_txt)test_txt.FieldSchema(name:c40, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c400 SIMPLE [(test_txt)test_txt.FieldSchema(name:c400, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c401 SIMPLE [(test_txt)test_txt.FieldSchema(name:c401, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c402 SIMPLE [(test_txt)test_txt.FieldSchema(name:c402, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c403 SIMPLE [(test_txt)test_txt.FieldSchema(name:c403, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c404 SIMPLE [(test_txt)test_txt.FieldSchema(name:c404, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c405 SIMPLE [(test_txt)test_txt.FieldSchema(name:c405, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c406 SIMPLE [(test_txt)test_txt.FieldSchema(name:c406, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c407 SIMPLE [(test_txt)test_txt.FieldSchema(name:c407, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c408 SIMPLE [(test_txt)test_txt.FieldSchema(name:c408, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c409 SIMPLE [(test_txt)test_txt.FieldSchema(name:c409, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c41 SIMPLE [(test_txt)test_txt.FieldSchema(name:c41, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c410 SIMPLE [(test_txt)test_txt.FieldSchema(name:c410, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c411 SIMPLE [(test_txt)test_txt.FieldSchema(name:c411, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c412 SIMPLE [(test_txt)test_txt.FieldSchema(name:c412, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c413 SIMPLE [(test_txt)test_txt.FieldSchema(name:c413, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c414 SIMPLE [(test_txt)test_txt.FieldSchema(name:c414, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c415 SIMPLE [(test_txt)test_txt.FieldSchema(name:c415, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c416 SIMPLE [(test_txt)test_txt.FieldSchema(name:c416, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c417 SIMPLE [(test_txt)test_txt.FieldSchema(name:c417, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c418 SIMPLE [(test_txt)test_txt.FieldSchema(name:c418, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c419 SIMPLE [(test_txt)test_txt.FieldSchema(name:c419, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c42 SIMPLE [(test_txt)test_txt.FieldSchema(name:c42, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c420 SIMPLE [(test_txt)test_txt.FieldSchema(name:c420, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c421 SIMPLE [(test_txt)test_txt.FieldSchema(name:c421, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c422 SIMPLE [(test_txt)test_txt.FieldSchema(name:c422, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c423 SIMPLE [(test_txt)test_txt.FieldSchema(name:c423, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c424 SIMPLE [(test_txt)test_txt.FieldSchema(name:c424, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c425 SIMPLE [(test_txt)test_txt.FieldSchema(name:c425, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c426 SIMPLE [(test_txt)test_txt.FieldSchema(name:c426, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c427 SIMPLE [(test_txt)test_txt.FieldSchema(name:c427, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c428 SIMPLE [(test_txt)test_txt.FieldSchema(name:c428, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c429 SIMPLE [(test_txt)test_txt.FieldSchema(name:c429, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c43 SIMPLE [(test_txt)test_txt.FieldSchema(name:c43, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c430 SIMPLE [(test_txt)test_txt.FieldSchema(name:c430, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c431 SIMPLE [(test_txt)test_txt.FieldSchema(name:c431, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c432 SIMPLE [(test_txt)test_txt.FieldSchema(name:c432, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c433 SIMPLE [(test_txt)test_txt.FieldSchema(name:c433, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c434 SIMPLE [(test_txt)test_txt.FieldSchema(name:c434, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c435 SIMPLE [(test_txt)test_txt.FieldSchema(name:c435, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c436 SIMPLE [(test_txt)test_txt.FieldSchema(name:c436, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c437 SIMPLE [(test_txt)test_txt.FieldSchema(name:c437, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c438 SIMPLE [(test_txt)test_txt.FieldSchema(name:c438, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c439 SIMPLE [(test_txt)test_txt.FieldSchema(name:c439, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c44 SIMPLE [(test_txt)test_txt.FieldSchema(name:c44, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c440 SIMPLE [(test_txt)test_txt.FieldSchema(name:c440, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c441 SIMPLE [(test_txt)test_txt.FieldSchema(name:c441, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c442 SIMPLE [(test_txt)test_txt.FieldSchema(name:c442, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c443 SIMPLE [(test_txt)test_txt.FieldSchema(name:c443, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c444 SIMPLE [(test_txt)test_txt.FieldSchema(name:c444, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c445 SIMPLE [(test_txt)test_txt.FieldSchema(name:c445, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c446 SIMPLE [(test_txt)test_txt.FieldSchema(name:c446, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c447 SIMPLE [(test_txt)test_txt.FieldSchema(name:c447, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c448 SIMPLE [(test_txt)test_txt.FieldSchema(name:c448, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c449 SIMPLE [(test_txt)test_txt.FieldSchema(name:c449, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c45 SIMPLE [(test_txt)test_txt.FieldSchema(name:c45, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c450 SIMPLE [(test_txt)test_txt.FieldSchema(name:c450, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c451 SIMPLE [(test_txt)test_txt.FieldSchema(name:c451, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c452 SIMPLE [(test_txt)test_txt.FieldSchema(name:c452, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c453 SIMPLE [(test_txt)test_txt.FieldSchema(name:c453, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c454 SIMPLE [(test_txt)test_txt.FieldSchema(name:c454, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c455 SIMPLE [(test_txt)test_txt.FieldSchema(name:c455, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c456 SIMPLE [(test_txt)test_txt.FieldSchema(name:c456, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c457 SIMPLE [(test_txt)test_txt.FieldSchema(name:c457, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c458 SIMPLE [(test_txt)test_txt.FieldSchema(name:c458, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c459 SIMPLE [(test_txt)test_txt.FieldSchema(name:c459, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c46 SIMPLE [(test_txt)test_txt.FieldSchema(name:c46, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c460 SIMPLE [(test_txt)test_txt.FieldSchema(name:c460, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c461 SIMPLE [(test_txt)test_txt.FieldSchema(name:c461, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c462 SIMPLE [(test_txt)test_txt.FieldSchema(name:c462, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c463 SIMPLE [(test_txt)test_txt.FieldSchema(name:c463, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c464 SIMPLE [(test_txt)test_txt.FieldSchema(name:c464, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c465 SIMPLE [(test_txt)test_txt.FieldSchema(name:c465, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c466 SIMPLE [(test_txt)test_txt.FieldSchema(name:c466, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c467 SIMPLE [(test_txt)test_txt.FieldSchema(name:c467, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c468 SIMPLE [(test_txt)test_txt.FieldSchema(name:c468, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c469 SIMPLE [(test_txt)test_txt.FieldSchema(name:c469, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c47 SIMPLE [(test_txt)test_txt.FieldSchema(name:c47, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c470 SIMPLE [(test_txt)test_txt.FieldSchema(name:c470, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c471 SIMPLE [(test_txt)test_txt.FieldSchema(name:c471, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c472 SIMPLE [(test_txt)test_txt.FieldSchema(name:c472, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c473 SIMPLE [(test_txt)test_txt.FieldSchema(name:c473, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c474 SIMPLE [(test_txt)test_txt.FieldSchema(name:c474, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c475 SIMPLE [(test_txt)test_txt.FieldSchema(name:c475, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c476 SIMPLE [(test_txt)test_txt.FieldSchema(name:c476, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c477 SIMPLE [(test_txt)test_txt.FieldSchema(name:c477, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c478 SIMPLE [(test_txt)test_txt.FieldSchema(name:c478, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c479 SIMPLE [(test_txt)test_txt.FieldSchema(name:c479, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c48 SIMPLE [(test_txt)test_txt.FieldSchema(name:c48, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c480 SIMPLE [(test_txt)test_txt.FieldSchema(name:c480, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c481 SIMPLE [(test_txt)test_txt.FieldSchema(name:c481, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c482 SIMPLE [(test_txt)test_txt.FieldSchema(name:c482, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c483 SIMPLE [(test_txt)test_txt.FieldSchema(name:c483, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c484 SIMPLE [(test_txt)test_txt.FieldSchema(name:c484, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c485 SIMPLE [(test_txt)test_txt.FieldSchema(name:c485, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c486 SIMPLE [(test_txt)test_txt.FieldSchema(name:c486, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c487 SIMPLE [(test_txt)test_txt.FieldSchema(name:c487, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c488 SIMPLE [(test_txt)test_txt.FieldSchema(name:c488, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c489 SIMPLE [(test_txt)test_txt.FieldSchema(name:c489, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c49 SIMPLE [(test_txt)test_txt.FieldSchema(name:c49, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c490 SIMPLE [(test_txt)test_txt.FieldSchema(name:c490, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c491 SIMPLE [(test_txt)test_txt.FieldSchema(name:c491, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c492 SIMPLE [(test_txt)test_txt.FieldSchema(name:c492, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c493 SIMPLE [(test_txt)test_txt.FieldSchema(name:c493, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c494 SIMPLE [(test_txt)test_txt.FieldSchema(name:c494, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c495 SIMPLE [(test_txt)test_txt.FieldSchema(name:c495, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c496 SIMPLE [(test_txt)test_txt.FieldSchema(name:c496, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c497 SIMPLE [(test_txt)test_txt.FieldSchema(name:c497, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c498 SIMPLE [(test_txt)test_txt.FieldSchema(name:c498, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c499 SIMPLE [(test_txt)test_txt.FieldSchema(name:c499, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c5 SIMPLE [(test_txt)test_txt.FieldSchema(name:c5, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c50 SIMPLE [(test_txt)test_txt.FieldSchema(name:c50, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c500 SIMPLE [(test_txt)test_txt.FieldSchema(name:c500, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c501 SIMPLE [(test_txt)test_txt.FieldSchema(name:c501, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c502 SIMPLE [(test_txt)test_txt.FieldSchema(name:c502, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c503 SIMPLE [(test_txt)test_txt.FieldSchema(name:c503, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c504 SIMPLE [(test_txt)test_txt.FieldSchema(name:c504, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c505 SIMPLE [(test_txt)test_txt.FieldSchema(name:c505, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c506 SIMPLE [(test_txt)test_txt.FieldSchema(name:c506, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c507 SIMPLE [(test_txt)test_txt.FieldSchema(name:c507, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c508 SIMPLE [(test_txt)test_txt.FieldSchema(name:c508, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c509 SIMPLE [(test_txt)test_txt.FieldSchema(name:c509, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c51 SIMPLE [(test_txt)test_txt.FieldSchema(name:c51, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c510 SIMPLE [(test_txt)test_txt.FieldSchema(name:c510, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c511 SIMPLE [(test_txt)test_txt.FieldSchema(name:c511, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c512 SIMPLE [(test_txt)test_txt.FieldSchema(name:c512, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c513 SIMPLE [(test_txt)test_txt.FieldSchema(name:c513, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c514 SIMPLE [(test_txt)test_txt.FieldSchema(name:c514, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c515 SIMPLE [(test_txt)test_txt.FieldSchema(name:c515, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c516 SIMPLE [(test_txt)test_txt.FieldSchema(name:c516, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c517 SIMPLE [(test_txt)test_txt.FieldSchema(name:c517, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c518 SIMPLE [(test_txt)test_txt.FieldSchema(name:c518, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c519 SIMPLE [(test_txt)test_txt.FieldSchema(name:c519, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c52 SIMPLE [(test_txt)test_txt.FieldSchema(name:c52, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c520 SIMPLE [(test_txt)test_txt.FieldSchema(name:c520, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c521 SIMPLE [(test_txt)test_txt.FieldSchema(name:c521, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c522 SIMPLE [(test_txt)test_txt.FieldSchema(name:c522, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c523 SIMPLE [(test_txt)test_txt.FieldSchema(name:c523, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c524 SIMPLE [(test_txt)test_txt.FieldSchema(name:c524, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c525 SIMPLE [(test_txt)test_txt.FieldSchema(name:c525, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c526 SIMPLE [(test_txt)test_txt.FieldSchema(name:c526, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c527 SIMPLE [(test_txt)test_txt.FieldSchema(name:c527, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c528 SIMPLE [(test_txt)test_txt.FieldSchema(name:c528, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c529 SIMPLE [(test_txt)test_txt.FieldSchema(name:c529, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c53 SIMPLE [(test_txt)test_txt.FieldSchema(name:c53, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c530 SIMPLE [(test_txt)test_txt.FieldSchema(name:c530, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c531 SIMPLE [(test_txt)test_txt.FieldSchema(name:c531, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c532 SIMPLE [(test_txt)test_txt.FieldSchema(name:c532, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c533 SIMPLE [(test_txt)test_txt.FieldSchema(name:c533, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c534 SIMPLE [(test_txt)test_txt.FieldSchema(name:c534, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c535 SIMPLE [(test_txt)test_txt.FieldSchema(name:c535, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c536 SIMPLE [(test_txt)test_txt.FieldSchema(name:c536, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c537 SIMPLE [(test_txt)test_txt.FieldSchema(name:c537, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c538 SIMPLE [(test_txt)test_txt.FieldSchema(name:c538, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c539 SIMPLE [(test_txt)test_txt.FieldSchema(name:c539, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c54 SIMPLE [(test_txt)test_txt.FieldSchema(name:c54, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c540 SIMPLE [(test_txt)test_txt.FieldSchema(name:c540, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c541 SIMPLE [(test_txt)test_txt.FieldSchema(name:c541, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c542 SIMPLE [(test_txt)test_txt.FieldSchema(name:c542, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c543 SIMPLE [(test_txt)test_txt.FieldSchema(name:c543, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c544 SIMPLE [(test_txt)test_txt.FieldSchema(name:c544, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c545 SIMPLE [(test_txt)test_txt.FieldSchema(name:c545, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c546 SIMPLE [(test_txt)test_txt.FieldSchema(name:c546, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c547 SIMPLE [(test_txt)test_txt.FieldSchema(name:c547, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c548 SIMPLE [(test_txt)test_txt.FieldSchema(name:c548, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c549 SIMPLE [(test_txt)test_txt.FieldSchema(name:c549, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c55 SIMPLE [(test_txt)test_txt.FieldSchema(name:c55, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c550 SIMPLE [(test_txt)test_txt.FieldSchema(name:c550, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c551 SIMPLE [(test_txt)test_txt.FieldSchema(name:c551, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c552 SIMPLE [(test_txt)test_txt.FieldSchema(name:c552, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c553 SIMPLE [(test_txt)test_txt.FieldSchema(name:c553, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c554 SIMPLE [(test_txt)test_txt.FieldSchema(name:c554, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c555 SIMPLE [(test_txt)test_txt.FieldSchema(name:c555, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c556 SIMPLE [(test_txt)test_txt.FieldSchema(name:c556, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c557 SIMPLE [(test_txt)test_txt.FieldSchema(name:c557, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c558 SIMPLE [(test_txt)test_txt.FieldSchema(name:c558, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c559 SIMPLE [(test_txt)test_txt.FieldSchema(name:c559, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c56 SIMPLE [(test_txt)test_txt.FieldSchema(name:c56, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c560 SIMPLE [(test_txt)test_txt.FieldSchema(name:c560, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c561 SIMPLE [(test_txt)test_txt.FieldSchema(name:c561, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c562 SIMPLE [(test_txt)test_txt.FieldSchema(name:c562, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c563 SIMPLE [(test_txt)test_txt.FieldSchema(name:c563, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c564 SIMPLE [(test_txt)test_txt.FieldSchema(name:c564, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c565 SIMPLE [(test_txt)test_txt.FieldSchema(name:c565, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c566 SIMPLE [(test_txt)test_txt.FieldSchema(name:c566, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c567 SIMPLE [(test_txt)test_txt.FieldSchema(name:c567, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c568 SIMPLE [(test_txt)test_txt.FieldSchema(name:c568, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c569 SIMPLE [(test_txt)test_txt.FieldSchema(name:c569, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c57 SIMPLE [(test_txt)test_txt.FieldSchema(name:c57, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c570 SIMPLE [(test_txt)test_txt.FieldSchema(name:c570, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c571 SIMPLE [(test_txt)test_txt.FieldSchema(name:c571, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c572 SIMPLE [(test_txt)test_txt.FieldSchema(name:c572, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c573 SIMPLE [(test_txt)test_txt.FieldSchema(name:c573, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c574 SIMPLE [(test_txt)test_txt.FieldSchema(name:c574, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c575 SIMPLE [(test_txt)test_txt.FieldSchema(name:c575, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c576 SIMPLE [(test_txt)test_txt.FieldSchema(name:c576, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c577 SIMPLE [(test_txt)test_txt.FieldSchema(name:c577, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c578 SIMPLE [(test_txt)test_txt.FieldSchema(name:c578, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c579 SIMPLE [(test_txt)test_txt.FieldSchema(name:c579, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c58 SIMPLE [(test_txt)test_txt.FieldSchema(name:c58, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c580 SIMPLE [(test_txt)test_txt.FieldSchema(name:c580, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c581 SIMPLE [(test_txt)test_txt.FieldSchema(name:c581, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c582 SIMPLE [(test_txt)test_txt.FieldSchema(name:c582, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c583 SIMPLE [(test_txt)test_txt.FieldSchema(name:c583, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c584 SIMPLE [(test_txt)test_txt.FieldSchema(name:c584, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c585 SIMPLE [(test_txt)test_txt.FieldSchema(name:c585, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c586 SIMPLE [(test_txt)test_txt.FieldSchema(name:c586, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c587 SIMPLE [(test_txt)test_txt.FieldSchema(name:c587, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c588 SIMPLE [(test_txt)test_txt.FieldSchema(name:c588, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c589 SIMPLE [(test_txt)test_txt.FieldSchema(name:c589, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c59 SIMPLE [(test_txt)test_txt.FieldSchema(name:c59, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c590 SIMPLE [(test_txt)test_txt.FieldSchema(name:c590, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c591 SIMPLE [(test_txt)test_txt.FieldSchema(name:c591, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c592 SIMPLE [(test_txt)test_txt.FieldSchema(name:c592, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c593 SIMPLE [(test_txt)test_txt.FieldSchema(name:c593, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c594 SIMPLE [(test_txt)test_txt.FieldSchema(name:c594, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c595 SIMPLE [(test_txt)test_txt.FieldSchema(name:c595, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c596 SIMPLE [(test_txt)test_txt.FieldSchema(name:c596, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c597 SIMPLE [(test_txt)test_txt.FieldSchema(name:c597, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c598 SIMPLE [(test_txt)test_txt.FieldSchema(name:c598, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c599 SIMPLE [(test_txt)test_txt.FieldSchema(name:c599, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c6 SIMPLE [(test_txt)test_txt.FieldSchema(name:c6, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c60 SIMPLE [(test_txt)test_txt.FieldSchema(name:c60, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c600 SIMPLE [(test_txt)test_txt.FieldSchema(name:c600, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c601 SIMPLE [(test_txt)test_txt.FieldSchema(name:c601, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c602 SIMPLE [(test_txt)test_txt.FieldSchema(name:c602, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c603 SIMPLE [(test_txt)test_txt.FieldSchema(name:c603, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c604 SIMPLE [(test_txt)test_txt.FieldSchema(name:c604, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c605 SIMPLE [(test_txt)test_txt.FieldSchema(name:c605, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c606 SIMPLE [(test_txt)test_txt.FieldSchema(name:c606, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c607 SIMPLE [(test_txt)test_txt.FieldSchema(name:c607, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c608 SIMPLE [(test_txt)test_txt.FieldSchema(name:c608, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c609 SIMPLE [(test_txt)test_txt.FieldSchema(name:c609, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c61 SIMPLE [(test_txt)test_txt.FieldSchema(name:c61, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c610 SIMPLE [(test_txt)test_txt.FieldSchema(name:c610, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c611 SIMPLE [(test_txt)test_txt.FieldSchema(name:c611, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c612 SIMPLE [(test_txt)test_txt.FieldSchema(name:c612, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c613 SIMPLE [(test_txt)test_txt.FieldSchema(name:c613, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c614 SIMPLE [(test_txt)test_txt.FieldSchema(name:c614, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c615 SIMPLE [(test_txt)test_txt.FieldSchema(name:c615, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c616 SIMPLE [(test_txt)test_txt.FieldSchema(name:c616, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c617 SIMPLE [(test_txt)test_txt.FieldSchema(name:c617, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c618 SIMPLE [(test_txt)test_txt.FieldSchema(name:c618, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c619 SIMPLE [(test_txt)test_txt.FieldSchema(name:c619, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c62 SIMPLE [(test_txt)test_txt.FieldSchema(name:c62, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c620 SIMPLE [(test_txt)test_txt.FieldSchema(name:c620, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c621 SIMPLE [(test_txt)test_txt.FieldSchema(name:c621, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c622 SIMPLE [(test_txt)test_txt.FieldSchema(name:c622, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c623 SIMPLE [(test_txt)test_txt.FieldSchema(name:c623, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c624 SIMPLE [(test_txt)test_txt.FieldSchema(name:c624, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c625 SIMPLE [(test_txt)test_txt.FieldSchema(name:c625, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c626 SIMPLE [(test_txt)test_txt.FieldSchema(name:c626, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c627 SIMPLE [(test_txt)test_txt.FieldSchema(name:c627, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c628 SIMPLE [(test_txt)test_txt.FieldSchema(name:c628, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c629 SIMPLE [(test_txt)test_txt.FieldSchema(name:c629, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c63 SIMPLE [(test_txt)test_txt.FieldSchema(name:c63, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c630 SIMPLE [(test_txt)test_txt.FieldSchema(name:c630, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c631 SIMPLE [(test_txt)test_txt.FieldSchema(name:c631, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c632 SIMPLE [(test_txt)test_txt.FieldSchema(name:c632, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c633 SIMPLE [(test_txt)test_txt.FieldSchema(name:c633, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c634 SIMPLE [(test_txt)test_txt.FieldSchema(name:c634, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c635 SIMPLE [(test_txt)test_txt.FieldSchema(name:c635, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c636 SIMPLE [(test_txt)test_txt.FieldSchema(name:c636, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c637 SIMPLE [(test_txt)test_txt.FieldSchema(name:c637, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c638 SIMPLE [(test_txt)test_txt.FieldSchema(name:c638, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c639 SIMPLE [(test_txt)test_txt.FieldSchema(name:c639, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c64 SIMPLE [(test_txt)test_txt.FieldSchema(name:c64, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c640 SIMPLE [(test_txt)test_txt.FieldSchema(name:c640, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c641 SIMPLE [(test_txt)test_txt.FieldSchema(name:c641, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c642 SIMPLE [(test_txt)test_txt.FieldSchema(name:c642, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c643 SIMPLE [(test_txt)test_txt.FieldSchema(name:c643, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c644 SIMPLE [(test_txt)test_txt.FieldSchema(name:c644, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c645 SIMPLE [(test_txt)test_txt.FieldSchema(name:c645, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c646 SIMPLE [(test_txt)test_txt.FieldSchema(name:c646, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c647 SIMPLE [(test_txt)test_txt.FieldSchema(name:c647, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c648 SIMPLE [(test_txt)test_txt.FieldSchema(name:c648, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c649 SIMPLE [(test_txt)test_txt.FieldSchema(name:c649, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c65 SIMPLE [(test_txt)test_txt.FieldSchema(name:c65, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c650 SIMPLE [(test_txt)test_txt.FieldSchema(name:c650, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c651 SIMPLE [(test_txt)test_txt.FieldSchema(name:c651, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c652 SIMPLE [(test_txt)test_txt.FieldSchema(name:c652, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c653 SIMPLE [(test_txt)test_txt.FieldSchema(name:c653, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c654 SIMPLE [(test_txt)test_txt.FieldSchema(name:c654, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c655 SIMPLE [(test_txt)test_txt.FieldSchema(name:c655, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c656 SIMPLE [(test_txt)test_txt.FieldSchema(name:c656, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c657 SIMPLE [(test_txt)test_txt.FieldSchema(name:c657, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c658 SIMPLE [(test_txt)test_txt.FieldSchema(name:c658, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c659 SIMPLE [(test_txt)test_txt.FieldSchema(name:c659, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c66 SIMPLE [(test_txt)test_txt.FieldSchema(name:c66, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c660 SIMPLE [(test_txt)test_txt.FieldSchema(name:c660, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c661 SIMPLE [(test_txt)test_txt.FieldSchema(name:c661, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c662 SIMPLE [(test_txt)test_txt.FieldSchema(name:c662, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c663 SIMPLE [(test_txt)test_txt.FieldSchema(name:c663, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c664 SIMPLE [(test_txt)test_txt.FieldSchema(name:c664, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c665 SIMPLE [(test_txt)test_txt.FieldSchema(name:c665, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c666 SIMPLE [(test_txt)test_txt.FieldSchema(name:c666, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c667 SIMPLE [(test_txt)test_txt.FieldSchema(name:c667, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c668 SIMPLE [(test_txt)test_txt.FieldSchema(name:c668, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c669 SIMPLE [(test_txt)test_txt.FieldSchema(name:c669, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c67 SIMPLE [(test_txt)test_txt.FieldSchema(name:c67, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c670 SIMPLE [(test_txt)test_txt.FieldSchema(name:c670, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c671 SIMPLE [(test_txt)test_txt.FieldSchema(name:c671, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c672 SIMPLE [(test_txt)test_txt.FieldSchema(name:c672, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c673 SIMPLE [(test_txt)test_txt.FieldSchema(name:c673, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c674 SIMPLE [(test_txt)test_txt.FieldSchema(name:c674, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c675 SIMPLE [(test_txt)test_txt.FieldSchema(name:c675, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c676 SIMPLE [(test_txt)test_txt.FieldSchema(name:c676, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c677 SIMPLE [(test_txt)test_txt.FieldSchema(name:c677, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c678 SIMPLE [(test_txt)test_txt.FieldSchema(name:c678, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c679 SIMPLE [(test_txt)test_txt.FieldSchema(name:c679, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c68 SIMPLE [(test_txt)test_txt.FieldSchema(name:c68, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c680 SIMPLE [(test_txt)test_txt.FieldSchema(name:c680, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c681 SIMPLE [(test_txt)test_txt.FieldSchema(name:c681, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c682 SIMPLE [(test_txt)test_txt.FieldSchema(name:c682, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c683 SIMPLE [(test_txt)test_txt.FieldSchema(name:c683, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c684 SIMPLE [(test_txt)test_txt.FieldSchema(name:c684, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c685 SIMPLE [(test_txt)test_txt.FieldSchema(name:c685, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c686 SIMPLE [(test_txt)test_txt.FieldSchema(name:c686, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c687 SIMPLE [(test_txt)test_txt.FieldSchema(name:c687, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c688 SIMPLE [(test_txt)test_txt.FieldSchema(name:c688, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c689 SIMPLE [(test_txt)test_txt.FieldSchema(name:c689, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c69 SIMPLE [(test_txt)test_txt.FieldSchema(name:c69, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c690 SIMPLE [(test_txt)test_txt.FieldSchema(name:c690, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c691 SIMPLE [(test_txt)test_txt.FieldSchema(name:c691, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c692 SIMPLE [(test_txt)test_txt.FieldSchema(name:c692, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c693 SIMPLE [(test_txt)test_txt.FieldSchema(name:c693, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c694 SIMPLE [(test_txt)test_txt.FieldSchema(name:c694, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c695 SIMPLE [(test_txt)test_txt.FieldSchema(name:c695, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c696 SIMPLE [(test_txt)test_txt.FieldSchema(name:c696, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c697 SIMPLE [(test_txt)test_txt.FieldSchema(name:c697, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c698 SIMPLE [(test_txt)test_txt.FieldSchema(name:c698, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c699 SIMPLE [(test_txt)test_txt.FieldSchema(name:c699, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c7 SIMPLE [(test_txt)test_txt.FieldSchema(name:c7, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c70 SIMPLE [(test_txt)test_txt.FieldSchema(name:c70, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c700 SIMPLE [(test_txt)test_txt.FieldSchema(name:c700, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c701 SIMPLE [(test_txt)test_txt.FieldSchema(name:c701, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c702 SIMPLE [(test_txt)test_txt.FieldSchema(name:c702, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c703 SIMPLE [(test_txt)test_txt.FieldSchema(name:c703, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c704 SIMPLE [(test_txt)test_txt.FieldSchema(name:c704, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c705 SIMPLE [(test_txt)test_txt.FieldSchema(name:c705, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c706 SIMPLE [(test_txt)test_txt.FieldSchema(name:c706, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c707 SIMPLE [(test_txt)test_txt.FieldSchema(name:c707, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c708 SIMPLE [(test_txt)test_txt.FieldSchema(name:c708, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c709 SIMPLE [(test_txt)test_txt.FieldSchema(name:c709, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c71 SIMPLE [(test_txt)test_txt.FieldSchema(name:c71, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c710 SIMPLE [(test_txt)test_txt.FieldSchema(name:c710, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c711 SIMPLE [(test_txt)test_txt.FieldSchema(name:c711, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c712 SIMPLE [(test_txt)test_txt.FieldSchema(name:c712, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c713 SIMPLE [(test_txt)test_txt.FieldSchema(name:c713, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c714 SIMPLE [(test_txt)test_txt.FieldSchema(name:c714, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c715 SIMPLE [(test_txt)test_txt.FieldSchema(name:c715, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c716 SIMPLE [(test_txt)test_txt.FieldSchema(name:c716, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c717 SIMPLE [(test_txt)test_txt.FieldSchema(name:c717, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c718 SIMPLE [(test_txt)test_txt.FieldSchema(name:c718, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c719 SIMPLE [(test_txt)test_txt.FieldSchema(name:c719, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c72 SIMPLE [(test_txt)test_txt.FieldSchema(name:c72, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c720 SIMPLE [(test_txt)test_txt.FieldSchema(name:c720, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c721 SIMPLE [(test_txt)test_txt.FieldSchema(name:c721, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c722 SIMPLE [(test_txt)test_txt.FieldSchema(name:c722, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c723 SIMPLE [(test_txt)test_txt.FieldSchema(name:c723, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c724 SIMPLE [(test_txt)test_txt.FieldSchema(name:c724, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c725 SIMPLE [(test_txt)test_txt.FieldSchema(name:c725, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c726 SIMPLE [(test_txt)test_txt.FieldSchema(name:c726, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c727 SIMPLE [(test_txt)test_txt.FieldSchema(name:c727, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c728 SIMPLE [(test_txt)test_txt.FieldSchema(name:c728, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c729 SIMPLE [(test_txt)test_txt.FieldSchema(name:c729, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c73 SIMPLE [(test_txt)test_txt.FieldSchema(name:c73, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c730 SIMPLE [(test_txt)test_txt.FieldSchema(name:c730, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c731 SIMPLE [(test_txt)test_txt.FieldSchema(name:c731, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c732 SIMPLE [(test_txt)test_txt.FieldSchema(name:c732, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c733 SIMPLE [(test_txt)test_txt.FieldSchema(name:c733, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c734 SIMPLE [(test_txt)test_txt.FieldSchema(name:c734, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c735 SIMPLE [(test_txt)test_txt.FieldSchema(name:c735, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c736 SIMPLE [(test_txt)test_txt.FieldSchema(name:c736, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c737 SIMPLE [(test_txt)test_txt.FieldSchema(name:c737, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c738 SIMPLE [(test_txt)test_txt.FieldSchema(name:c738, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c739 SIMPLE [(test_txt)test_txt.FieldSchema(name:c739, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c74 SIMPLE [(test_txt)test_txt.FieldSchema(name:c74, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c740 SIMPLE [(test_txt)test_txt.FieldSchema(name:c740, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c741 SIMPLE [(test_txt)test_txt.FieldSchema(name:c741, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c742 SIMPLE [(test_txt)test_txt.FieldSchema(name:c742, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c743 SIMPLE [(test_txt)test_txt.FieldSchema(name:c743, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c744 SIMPLE [(test_txt)test_txt.FieldSchema(name:c744, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c745 SIMPLE [(test_txt)test_txt.FieldSchema(name:c745, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c746 SIMPLE [(test_txt)test_txt.FieldSchema(name:c746, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c747 SIMPLE [(test_txt)test_txt.FieldSchema(name:c747, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c748 SIMPLE [(test_txt)test_txt.FieldSchema(name:c748, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c749 SIMPLE [(test_txt)test_txt.FieldSchema(name:c749, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c75 SIMPLE [(test_txt)test_txt.FieldSchema(name:c75, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c750 SIMPLE [(test_txt)test_txt.FieldSchema(name:c750, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c751 SIMPLE [(test_txt)test_txt.FieldSchema(name:c751, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c752 SIMPLE [(test_txt)test_txt.FieldSchema(name:c752, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c753 SIMPLE [(test_txt)test_txt.FieldSchema(name:c753, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c754 SIMPLE [(test_txt)test_txt.FieldSchema(name:c754, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c755 SIMPLE [(test_txt)test_txt.FieldSchema(name:c755, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c756 SIMPLE [(test_txt)test_txt.FieldSchema(name:c756, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c757 SIMPLE [(test_txt)test_txt.FieldSchema(name:c757, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c758 SIMPLE [(test_txt)test_txt.FieldSchema(name:c758, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c759 SIMPLE [(test_txt)test_txt.FieldSchema(name:c759, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c76 SIMPLE [(test_txt)test_txt.FieldSchema(name:c76, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c760 SIMPLE [(test_txt)test_txt.FieldSchema(name:c760, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c761 SIMPLE [(test_txt)test_txt.FieldSchema(name:c761, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c762 SIMPLE [(test_txt)test_txt.FieldSchema(name:c762, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c763 SIMPLE [(test_txt)test_txt.FieldSchema(name:c763, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c764 SIMPLE [(test_txt)test_txt.FieldSchema(name:c764, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c765 SIMPLE [(test_txt)test_txt.FieldSchema(name:c765, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c766 SIMPLE [(test_txt)test_txt.FieldSchema(name:c766, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c767 SIMPLE [(test_txt)test_txt.FieldSchema(name:c767, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c768 SIMPLE [(test_txt)test_txt.FieldSchema(name:c768, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c769 SIMPLE [(test_txt)test_txt.FieldSchema(name:c769, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c77 SIMPLE [(test_txt)test_txt.FieldSchema(name:c77, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c770 SIMPLE [(test_txt)test_txt.FieldSchema(name:c770, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c771 SIMPLE [(test_txt)test_txt.FieldSchema(name:c771, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c772 SIMPLE [(test_txt)test_txt.FieldSchema(name:c772, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c773 SIMPLE [(test_txt)test_txt.FieldSchema(name:c773, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c774 SIMPLE [(test_txt)test_txt.FieldSchema(name:c774, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c775 SIMPLE [(test_txt)test_txt.FieldSchema(name:c775, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c776 SIMPLE [(test_txt)test_txt.FieldSchema(name:c776, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c777 SIMPLE [(test_txt)test_txt.FieldSchema(name:c777, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c778 SIMPLE [(test_txt)test_txt.FieldSchema(name:c778, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c779 SIMPLE [(test_txt)test_txt.FieldSchema(name:c779, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c78 SIMPLE [(test_txt)test_txt.FieldSchema(name:c78, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c780 SIMPLE [(test_txt)test_txt.FieldSchema(name:c780, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c781 SIMPLE [(test_txt)test_txt.FieldSchema(name:c781, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c782 SIMPLE [(test_txt)test_txt.FieldSchema(name:c782, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c783 SIMPLE [(test_txt)test_txt.FieldSchema(name:c783, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c784 SIMPLE [(test_txt)test_txt.FieldSchema(name:c784, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c785 SIMPLE [(test_txt)test_txt.FieldSchema(name:c785, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c786 SIMPLE [(test_txt)test_txt.FieldSchema(name:c786, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c787 SIMPLE [(test_txt)test_txt.FieldSchema(name:c787, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c788 SIMPLE [(test_txt)test_txt.FieldSchema(name:c788, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c789 SIMPLE [(test_txt)test_txt.FieldSchema(name:c789, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c79 SIMPLE [(test_txt)test_txt.FieldSchema(name:c79, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c790 SIMPLE [(test_txt)test_txt.FieldSchema(name:c790, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c791 SIMPLE [(test_txt)test_txt.FieldSchema(name:c791, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c792 SIMPLE [(test_txt)test_txt.FieldSchema(name:c792, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c793 SIMPLE [(test_txt)test_txt.FieldSchema(name:c793, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c794 SIMPLE [(test_txt)test_txt.FieldSchema(name:c794, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c795 SIMPLE [(test_txt)test_txt.FieldSchema(name:c795, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c796 SIMPLE [(test_txt)test_txt.FieldSchema(name:c796, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c797 SIMPLE [(test_txt)test_txt.FieldSchema(name:c797, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c798 SIMPLE [(test_txt)test_txt.FieldSchema(name:c798, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c799 SIMPLE [(test_txt)test_txt.FieldSchema(name:c799, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c8 SIMPLE [(test_txt)test_txt.FieldSchema(name:c8, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c80 SIMPLE [(test_txt)test_txt.FieldSchema(name:c80, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c800 SIMPLE [(test_txt)test_txt.FieldSchema(name:c800, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c801 SIMPLE [(test_txt)test_txt.FieldSchema(name:c801, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c802 SIMPLE [(test_txt)test_txt.FieldSchema(name:c802, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c803 SIMPLE [(test_txt)test_txt.FieldSchema(name:c803, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c804 SIMPLE [(test_txt)test_txt.FieldSchema(name:c804, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c805 SIMPLE [(test_txt)test_txt.FieldSchema(name:c805, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c806 SIMPLE [(test_txt)test_txt.FieldSchema(name:c806, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c807 SIMPLE [(test_txt)test_txt.FieldSchema(name:c807, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c808 SIMPLE [(test_txt)test_txt.FieldSchema(name:c808, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c809 SIMPLE [(test_txt)test_txt.FieldSchema(name:c809, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c81 SIMPLE [(test_txt)test_txt.FieldSchema(name:c81, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c810 SIMPLE [(test_txt)test_txt.FieldSchema(name:c810, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c811 SIMPLE [(test_txt)test_txt.FieldSchema(name:c811, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c812 SIMPLE [(test_txt)test_txt.FieldSchema(name:c812, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c813 SIMPLE [(test_txt)test_txt.FieldSchema(name:c813, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c814 SIMPLE [(test_txt)test_txt.FieldSchema(name:c814, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c815 SIMPLE [(test_txt)test_txt.FieldSchema(name:c815, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c816 SIMPLE [(test_txt)test_txt.FieldSchema(name:c816, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c817 SIMPLE [(test_txt)test_txt.FieldSchema(name:c817, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c818 SIMPLE [(test_txt)test_txt.FieldSchema(name:c818, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c819 SIMPLE [(test_txt)test_txt.FieldSchema(name:c819, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c82 SIMPLE [(test_txt)test_txt.FieldSchema(name:c82, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c820 SIMPLE [(test_txt)test_txt.FieldSchema(name:c820, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c821 SIMPLE [(test_txt)test_txt.FieldSchema(name:c821, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c822 SIMPLE [(test_txt)test_txt.FieldSchema(name:c822, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c823 SIMPLE [(test_txt)test_txt.FieldSchema(name:c823, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c824 SIMPLE [(test_txt)test_txt.FieldSchema(name:c824, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c825 SIMPLE [(test_txt)test_txt.FieldSchema(name:c825, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c826 SIMPLE [(test_txt)test_txt.FieldSchema(name:c826, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c827 SIMPLE [(test_txt)test_txt.FieldSchema(name:c827, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c828 SIMPLE [(test_txt)test_txt.FieldSchema(name:c828, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c829 SIMPLE [(test_txt)test_txt.FieldSchema(name:c829, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c83 SIMPLE [(test_txt)test_txt.FieldSchema(name:c83, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c830 SIMPLE [(test_txt)test_txt.FieldSchema(name:c830, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c831 SIMPLE [(test_txt)test_txt.FieldSchema(name:c831, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c832 SIMPLE [(test_txt)test_txt.FieldSchema(name:c832, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c833 SIMPLE [(test_txt)test_txt.FieldSchema(name:c833, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c834 SIMPLE [(test_txt)test_txt.FieldSchema(name:c834, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c835 SIMPLE [(test_txt)test_txt.FieldSchema(name:c835, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c836 SIMPLE [(test_txt)test_txt.FieldSchema(name:c836, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c837 SIMPLE [(test_txt)test_txt.FieldSchema(name:c837, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c838 SIMPLE [(test_txt)test_txt.FieldSchema(name:c838, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c839 SIMPLE [(test_txt)test_txt.FieldSchema(name:c839, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c84 SIMPLE [(test_txt)test_txt.FieldSchema(name:c84, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c840 SIMPLE [(test_txt)test_txt.FieldSchema(name:c840, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c841 SIMPLE [(test_txt)test_txt.FieldSchema(name:c841, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c842 SIMPLE [(test_txt)test_txt.FieldSchema(name:c842, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c843 SIMPLE [(test_txt)test_txt.FieldSchema(name:c843, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c844 SIMPLE [(test_txt)test_txt.FieldSchema(name:c844, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c845 SIMPLE [(test_txt)test_txt.FieldSchema(name:c845, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c846 SIMPLE [(test_txt)test_txt.FieldSchema(name:c846, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c847 SIMPLE [(test_txt)test_txt.FieldSchema(name:c847, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c848 SIMPLE [(test_txt)test_txt.FieldSchema(name:c848, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c849 SIMPLE [(test_txt)test_txt.FieldSchema(name:c849, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c85 SIMPLE [(test_txt)test_txt.FieldSchema(name:c85, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c850 SIMPLE [(test_txt)test_txt.FieldSchema(name:c850, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c851 SIMPLE [(test_txt)test_txt.FieldSchema(name:c851, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c852 SIMPLE [(test_txt)test_txt.FieldSchema(name:c852, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c853 SIMPLE [(test_txt)test_txt.FieldSchema(name:c853, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c854 SIMPLE [(test_txt)test_txt.FieldSchema(name:c854, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c855 SIMPLE [(test_txt)test_txt.FieldSchema(name:c855, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c856 SIMPLE [(test_txt)test_txt.FieldSchema(name:c856, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c857 SIMPLE [(test_txt)test_txt.FieldSchema(name:c857, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c858 SIMPLE [(test_txt)test_txt.FieldSchema(name:c858, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c859 SIMPLE [(test_txt)test_txt.FieldSchema(name:c859, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c86 SIMPLE [(test_txt)test_txt.FieldSchema(name:c86, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c860 SIMPLE [(test_txt)test_txt.FieldSchema(name:c860, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c861 SIMPLE [(test_txt)test_txt.FieldSchema(name:c861, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c862 SIMPLE [(test_txt)test_txt.FieldSchema(name:c862, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c863 SIMPLE [(test_txt)test_txt.FieldSchema(name:c863, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c864 SIMPLE [(test_txt)test_txt.FieldSchema(name:c864, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c865 SIMPLE [(test_txt)test_txt.FieldSchema(name:c865, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c866 SIMPLE [(test_txt)test_txt.FieldSchema(name:c866, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c867 SIMPLE [(test_txt)test_txt.FieldSchema(name:c867, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c868 SIMPLE [(test_txt)test_txt.FieldSchema(name:c868, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c869 SIMPLE [(test_txt)test_txt.FieldSchema(name:c869, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c87 SIMPLE [(test_txt)test_txt.FieldSchema(name:c87, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c870 SIMPLE [(test_txt)test_txt.FieldSchema(name:c870, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c871 SIMPLE [(test_txt)test_txt.FieldSchema(name:c871, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c872 SIMPLE [(test_txt)test_txt.FieldSchema(name:c872, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c873 SIMPLE [(test_txt)test_txt.FieldSchema(name:c873, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c874 SIMPLE [(test_txt)test_txt.FieldSchema(name:c874, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c875 SIMPLE [(test_txt)test_txt.FieldSchema(name:c875, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c876 SIMPLE [(test_txt)test_txt.FieldSchema(name:c876, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c877 SIMPLE [(test_txt)test_txt.FieldSchema(name:c877, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c878 SIMPLE [(test_txt)test_txt.FieldSchema(name:c878, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c879 SIMPLE [(test_txt)test_txt.FieldSchema(name:c879, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c88 SIMPLE [(test_txt)test_txt.FieldSchema(name:c88, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c880 SIMPLE [(test_txt)test_txt.FieldSchema(name:c880, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c881 SIMPLE [(test_txt)test_txt.FieldSchema(name:c881, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c882 SIMPLE [(test_txt)test_txt.FieldSchema(name:c882, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c883 SIMPLE [(test_txt)test_txt.FieldSchema(name:c883, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c884 SIMPLE [(test_txt)test_txt.FieldSchema(name:c884, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c885 SIMPLE [(test_txt)test_txt.FieldSchema(name:c885, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c886 SIMPLE [(test_txt)test_txt.FieldSchema(name:c886, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c887 SIMPLE [(test_txt)test_txt.FieldSchema(name:c887, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c888 SIMPLE [(test_txt)test_txt.FieldSchema(name:c888, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c889 SIMPLE [(test_txt)test_txt.FieldSchema(name:c889, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c89 SIMPLE [(test_txt)test_txt.FieldSchema(name:c89, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c890 SIMPLE [(test_txt)test_txt.FieldSchema(name:c890, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c891 SIMPLE [(test_txt)test_txt.FieldSchema(name:c891, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c892 SIMPLE [(test_txt)test_txt.FieldSchema(name:c892, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c893 SIMPLE [(test_txt)test_txt.FieldSchema(name:c893, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c894 SIMPLE [(test_txt)test_txt.FieldSchema(name:c894, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c895 SIMPLE [(test_txt)test_txt.FieldSchema(name:c895, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c896 SIMPLE [(test_txt)test_txt.FieldSchema(name:c896, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c897 SIMPLE [(test_txt)test_txt.FieldSchema(name:c897, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c898 SIMPLE [(test_txt)test_txt.FieldSchema(name:c898, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c899 SIMPLE [(test_txt)test_txt.FieldSchema(name:c899, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c9 SIMPLE [(test_txt)test_txt.FieldSchema(name:c9, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c90 SIMPLE [(test_txt)test_txt.FieldSchema(name:c90, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c900 SIMPLE [(test_txt)test_txt.FieldSchema(name:c900, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c901 SIMPLE [(test_txt)test_txt.FieldSchema(name:c901, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c902 SIMPLE [(test_txt)test_txt.FieldSchema(name:c902, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c903 SIMPLE [(test_txt)test_txt.FieldSchema(name:c903, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c904 SIMPLE [(test_txt)test_txt.FieldSchema(name:c904, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c905 SIMPLE [(test_txt)test_txt.FieldSchema(name:c905, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c906 SIMPLE [(test_txt)test_txt.FieldSchema(name:c906, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c907 SIMPLE [(test_txt)test_txt.FieldSchema(name:c907, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c908 SIMPLE [(test_txt)test_txt.FieldSchema(name:c908, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c909 SIMPLE [(test_txt)test_txt.FieldSchema(name:c909, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c91 SIMPLE [(test_txt)test_txt.FieldSchema(name:c91, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c910 SIMPLE [(test_txt)test_txt.FieldSchema(name:c910, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c911 SIMPLE [(test_txt)test_txt.FieldSchema(name:c911, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c912 SIMPLE [(test_txt)test_txt.FieldSchema(name:c912, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c913 SIMPLE [(test_txt)test_txt.FieldSchema(name:c913, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c914 SIMPLE [(test_txt)test_txt.FieldSchema(name:c914, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c915 SIMPLE [(test_txt)test_txt.FieldSchema(name:c915, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c916 SIMPLE [(test_txt)test_txt.FieldSchema(name:c916, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c917 SIMPLE [(test_txt)test_txt.FieldSchema(name:c917, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c918 SIMPLE [(test_txt)test_txt.FieldSchema(name:c918, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c919 SIMPLE [(test_txt)test_txt.FieldSchema(name:c919, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c92 SIMPLE [(test_txt)test_txt.FieldSchema(name:c92, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c920 SIMPLE [(test_txt)test_txt.FieldSchema(name:c920, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c921 SIMPLE [(test_txt)test_txt.FieldSchema(name:c921, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c922 SIMPLE [(test_txt)test_txt.FieldSchema(name:c922, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c923 SIMPLE [(test_txt)test_txt.FieldSchema(name:c923, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c924 SIMPLE [(test_txt)test_txt.FieldSchema(name:c924, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c925 SIMPLE [(test_txt)test_txt.FieldSchema(name:c925, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c926 SIMPLE [(test_txt)test_txt.FieldSchema(name:c926, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c927 SIMPLE [(test_txt)test_txt.FieldSchema(name:c927, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c928 SIMPLE [(test_txt)test_txt.FieldSchema(name:c928, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c929 SIMPLE [(test_txt)test_txt.FieldSchema(name:c929, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c93 SIMPLE [(test_txt)test_txt.FieldSchema(name:c93, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c930 SIMPLE [(test_txt)test_txt.FieldSchema(name:c930, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c931 SIMPLE [(test_txt)test_txt.FieldSchema(name:c931, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c932 SIMPLE [(test_txt)test_txt.FieldSchema(name:c932, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c933 SIMPLE [(test_txt)test_txt.FieldSchema(name:c933, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c934 SIMPLE [(test_txt)test_txt.FieldSchema(name:c934, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c935 SIMPLE [(test_txt)test_txt.FieldSchema(name:c935, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c936 SIMPLE [(test_txt)test_txt.FieldSchema(name:c936, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c937 SIMPLE [(test_txt)test_txt.FieldSchema(name:c937, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c938 SIMPLE [(test_txt)test_txt.FieldSchema(name:c938, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c939 SIMPLE [(test_txt)test_txt.FieldSchema(name:c939, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c94 SIMPLE [(test_txt)test_txt.FieldSchema(name:c94, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c940 SIMPLE [(test_txt)test_txt.FieldSchema(name:c940, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c941 SIMPLE [(test_txt)test_txt.FieldSchema(name:c941, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c942 SIMPLE [(test_txt)test_txt.FieldSchema(name:c942, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c943 SIMPLE [(test_txt)test_txt.FieldSchema(name:c943, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c944 SIMPLE [(test_txt)test_txt.FieldSchema(name:c944, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c945 SIMPLE [(test_txt)test_txt.FieldSchema(name:c945, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c946 SIMPLE [(test_txt)test_txt.FieldSchema(name:c946, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c947 SIMPLE [(test_txt)test_txt.FieldSchema(name:c947, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c948 SIMPLE [(test_txt)test_txt.FieldSchema(name:c948, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c949 SIMPLE [(test_txt)test_txt.FieldSchema(name:c949, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c95 SIMPLE [(test_txt)test_txt.FieldSchema(name:c95, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c950 SIMPLE [(test_txt)test_txt.FieldSchema(name:c950, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c951 SIMPLE [(test_txt)test_txt.FieldSchema(name:c951, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c952 SIMPLE [(test_txt)test_txt.FieldSchema(name:c952, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c953 SIMPLE [(test_txt)test_txt.FieldSchema(name:c953, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c954 SIMPLE [(test_txt)test_txt.FieldSchema(name:c954, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c955 SIMPLE [(test_txt)test_txt.FieldSchema(name:c955, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c956 SIMPLE [(test_txt)test_txt.FieldSchema(name:c956, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c957 SIMPLE [(test_txt)test_txt.FieldSchema(name:c957, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c958 SIMPLE [(test_txt)test_txt.FieldSchema(name:c958, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c959 SIMPLE [(test_txt)test_txt.FieldSchema(name:c959, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c96 SIMPLE [(test_txt)test_txt.FieldSchema(name:c96, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c960 SIMPLE [(test_txt)test_txt.FieldSchema(name:c960, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c961 SIMPLE [(test_txt)test_txt.FieldSchema(name:c961, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c962 SIMPLE [(test_txt)test_txt.FieldSchema(name:c962, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c963 SIMPLE [(test_txt)test_txt.FieldSchema(name:c963, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c964 SIMPLE [(test_txt)test_txt.FieldSchema(name:c964, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c965 SIMPLE [(test_txt)test_txt.FieldSchema(name:c965, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c966 SIMPLE [(test_txt)test_txt.FieldSchema(name:c966, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c967 SIMPLE [(test_txt)test_txt.FieldSchema(name:c967, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c968 SIMPLE [(test_txt)test_txt.FieldSchema(name:c968, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c969 SIMPLE [(test_txt)test_txt.FieldSchema(name:c969, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c97 SIMPLE [(test_txt)test_txt.FieldSchema(name:c97, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c970 SIMPLE [(test_txt)test_txt.FieldSchema(name:c970, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c971 SIMPLE [(test_txt)test_txt.FieldSchema(name:c971, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c972 SIMPLE [(test_txt)test_txt.FieldSchema(name:c972, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c973 SIMPLE [(test_txt)test_txt.FieldSchema(name:c973, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c974 SIMPLE [(test_txt)test_txt.FieldSchema(name:c974, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c975 SIMPLE [(test_txt)test_txt.FieldSchema(name:c975, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c976 SIMPLE [(test_txt)test_txt.FieldSchema(name:c976, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c977 SIMPLE [(test_txt)test_txt.FieldSchema(name:c977, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c978 SIMPLE [(test_txt)test_txt.FieldSchema(name:c978, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c979 SIMPLE [(test_txt)test_txt.FieldSchema(name:c979, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c98 SIMPLE [(test_txt)test_txt.FieldSchema(name:c98, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c980 SIMPLE [(test_txt)test_txt.FieldSchema(name:c980, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c981 SIMPLE [(test_txt)test_txt.FieldSchema(name:c981, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c982 SIMPLE [(test_txt)test_txt.FieldSchema(name:c982, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c983 SIMPLE [(test_txt)test_txt.FieldSchema(name:c983, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c984 SIMPLE [(test_txt)test_txt.FieldSchema(name:c984, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c985 SIMPLE [(test_txt)test_txt.FieldSchema(name:c985, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c986 SIMPLE [(test_txt)test_txt.FieldSchema(name:c986, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c987 SIMPLE [(test_txt)test_txt.FieldSchema(name:c987, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c988 SIMPLE [(test_txt)test_txt.FieldSchema(name:c988, type:string, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c989 SIMPLE [(test_txt)test_txt.FieldSchema(name:c989, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c99 SIMPLE [(test_txt)test_txt.FieldSchema(name:c99, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c990 SIMPLE [(test_txt)test_txt.FieldSchema(name:c990, type:float, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c991 SIMPLE [(test_txt)test_txt.FieldSchema(name:c991, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c992 SIMPLE [(test_txt)test_txt.FieldSchema(name:c992, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c993 SIMPLE [(test_txt)test_txt.FieldSchema(name:c993, type:char(4), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c994 SIMPLE [(test_txt)test_txt.FieldSchema(name:c994, type:decimal(16,10), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c995 SIMPLE [(test_txt)test_txt.FieldSchema(name:c995, type:boolean, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c996 SIMPLE [(test_txt)test_txt.FieldSchema(name:c996, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c997 SIMPLE [(test_txt)test_txt.FieldSchema(name:c997, type:int, comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c998 SIMPLE [(test_txt)test_txt.FieldSchema(name:c998, type:varchar(64), comment:null), ] +POSTHOOK: Lineage: test_orc_n1.c999 SIMPLE [(test_txt)test_txt.FieldSchema(name:c999, type:char(4), comment:null), ] PREHOOK: query: select c1, c2, c1999 from test_txt order by c1 PREHOOK: type: QUERY PREHOOK: Input: default@test_txt @@ -6058,13 +6058,13 @@ POSTHOOK: Input: default@test_txt #### A masked pattern was here #### CKDHJLAG CCJN MDBNKMLGNJ 88791 96216 CMELDBCBHGGLFNJHIKGFMCN 28199 66817 -PREHOOK: query: select c1, c2, c1999 from test_orc order by c1 +PREHOOK: query: select c1, c2, c1999 from test_orc_n1 order by c1 PREHOOK: type: QUERY -PREHOOK: Input: default@test_orc +PREHOOK: Input: default@test_orc_n1 #### A masked pattern was here #### -POSTHOOK: query: select c1, c2, c1999 from test_orc order by c1 +POSTHOOK: query: select c1, c2, c1999 from test_orc_n1 order by c1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_orc +POSTHOOK: Input: default@test_orc_n1 #### A masked pattern was here #### CKDHJLAG CCJN MDBNKMLGNJ 88791 96216 CMELDBCBHGGLFNJHIKGFMCN 28199 66817 diff --git a/ql/src/test/results/clientpositive/order3.q.out b/ql/src/test/results/clientpositive/order3.q.out index ff1ab78283..bc4c6eb7bf 100644 --- a/ql/src/test/results/clientpositive/order3.q.out +++ b/ql/src/test/results/clientpositive/order3.q.out @@ -1,31 +1,31 @@ -PREHOOK: query: drop table if exists test +PREHOOK: query: drop table if exists test_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists test +POSTHOOK: query: drop table if exists test_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table test(key int, value1 int, value2 string) +PREHOOK: query: create table test_n0(key int, value1 int, value2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test -POSTHOOK: query: create table test(key int, value1 int, value2 string) +PREHOOK: Output: default@test_n0 +POSTHOOK: query: create table test_n0(key int, value1 int, value2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test -PREHOOK: query: insert into table test values (1, 1, 'val111'), (1, 2, 'val121'), (1, 3, 'val131'), (2, 1, 'val211'), (2, 2, 'val221'), (2, 2, 'val222'), (2, 3, 'val231'), (2, 4, 'val241'), +POSTHOOK: Output: default@test_n0 +PREHOOK: query: insert into table test_n0 values (1, 1, 'val111'), (1, 2, 'val121'), (1, 3, 'val131'), (2, 1, 'val211'), (2, 2, 'val221'), (2, 2, 'val222'), (2, 3, 'val231'), (2, 4, 'val241'), (3, 1, 'val311'), (3, 2, 'val321'), (3, 2, 'val322'), (3, 3, 'val331'), (3, 3, 'val332'), (3, 3, 'val333'), (4, 1, 'val411') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test -POSTHOOK: query: insert into table test values (1, 1, 'val111'), (1, 2, 'val121'), (1, 3, 'val131'), (2, 1, 'val211'), (2, 2, 'val221'), (2, 2, 'val222'), (2, 3, 'val231'), (2, 4, 'val241'), +PREHOOK: Output: default@test_n0 +POSTHOOK: query: insert into table test_n0 values (1, 1, 'val111'), (1, 2, 'val121'), (1, 3, 'val131'), (2, 1, 'val211'), (2, 2, 'val221'), (2, 2, 'val222'), (2, 3, 'val231'), (2, 4, 'val241'), (3, 1, 'val311'), (3, 2, 'val321'), (3, 2, 'val322'), (3, 3, 'val331'), (3, 3, 'val332'), (3, 3, 'val333'), (4, 1, 'val411') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test -POSTHOOK: Lineage: test.key SCRIPT [] -POSTHOOK: Lineage: test.value1 SCRIPT [] -POSTHOOK: Lineage: test.value2 SCRIPT [] -PREHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +POSTHOOK: Output: default@test_n0 +POSTHOOK: Lineage: test_n0.key SCRIPT [] +POSTHOOK: Lineage: test_n0.value1 SCRIPT [] +POSTHOOK: Lineage: test_n0.value2 SCRIPT [] +PREHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +POSTHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -104,20 +104,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT T1.KEY AS MYKEY FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +PREHOOK: query: SELECT T1.KEY AS MYKEY FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT T1.KEY AS MYKEY FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +POSTHOOK: query: SELECT T1.KEY AS MYKEY FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n0 #### A masked pattern was here #### 1 2 3 -PREHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +PREHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +POSTHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -200,20 +200,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +PREHOOK: query: SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +POSTHOOK: query: SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n0 #### A masked pattern was here #### 1 3 2 4 3 3 -PREHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 +PREHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST_n0 T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 +POSTHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST_n0 T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -296,20 +296,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 +PREHOOK: query: SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST_n0 T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 +POSTHOOK: query: SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST_n0 T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n0 #### A masked pattern was here #### 1 3 AAA 2 5 AAA 3 6 AAA -PREHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +PREHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +POSTHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -384,20 +384,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT T1.KEY AS MYKEY FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +PREHOOK: query: SELECT T1.KEY AS MYKEY FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT T1.KEY AS MYKEY FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +POSTHOOK: query: SELECT T1.KEY AS MYKEY FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n0 #### A masked pattern was here #### 1 2 3 -PREHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +PREHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +POSTHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -476,20 +476,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +PREHOOK: query: SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 +POSTHOOK: query: SELECT T1.KEY AS MYKEY, MAX(T1.VALUE1) AS MYVALUE1 FROM TEST_n0 T1 GROUP BY T1.KEY ORDER BY T1.KEY LIMIT 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n0 #### A masked pattern was here #### 1 3 2 4 3 3 -PREHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 +PREHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST_n0 T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 +POSTHOOK: query: EXPLAIN SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST_n0 T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -572,13 +572,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 +PREHOOK: query: SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST_n0 T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 +POSTHOOK: query: SELECT T1.KEY AS MYKEY, COUNT(T1.VALUE1) AS MYVALUE1, 'AAA' AS C FROM TEST_n0 T1 GROUP BY T1.KEY, 'AAA' ORDER BY T1.KEY, 'AAA' LIMIT 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n0 #### A masked pattern was here #### 1 3 AAA 2 5 AAA diff --git a/ql/src/test/results/clientpositive/order_by_expr_1.q.out b/ql/src/test/results/clientpositive/order_by_expr_1.q.out index 601cf7b45c..28602a510a 100644 --- a/ql/src/test/results/clientpositive/order_by_expr_1.q.out +++ b/ql/src/test/results/clientpositive/order_by_expr_1.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: create table t(a int, b int) +PREHOOK: query: create table t_n5(a int, b int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t(a int, b int) +PREHOOK: Output: default@t_n5 +POSTHOOK: query: create table t_n5(a int, b int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9) +POSTHOOK: Output: default@t_n5 +PREHOOK: query: insert into t_n5 values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9) +PREHOOK: Output: default@t_n5 +POSTHOOK: query: insert into t_n5 values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.a SCRIPT [] -POSTHOOK: Lineage: t.b SCRIPT [] -PREHOOK: query: select a, count(a) from t group by a order by count(a), a +POSTHOOK: Output: default@t_n5 +POSTHOOK: Lineage: t_n5.a SCRIPT [] +POSTHOOK: Lineage: t_n5.b SCRIPT [] +PREHOOK: query: select a, count(a) from t_n5 group by a order by count(a), a PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a, count(a) from t group by a order by count(a), a +POSTHOOK: query: select a, count(a) from t_n5 group by a order by count(a), a POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -1000 1 2 1 @@ -35,7 +35,7 @@ PREHOOK: query: explain select interval '2-2' year to month + interval '3-3' year to month, interval '2-2' year to month - interval '3-3' year to month -from t +from t_n5 order by interval '2-2' year to month + interval '3-3' year to month limit 2 PREHOOK: type: QUERY @@ -43,7 +43,7 @@ POSTHOOK: query: explain select interval '2-2' year to month + interval '3-3' year to month, interval '2-2' year to month - interval '3-3' year to month -from t +from t_n5 order by interval '2-2' year to month + interval '3-3' year to month limit 2 POSTHOOK: type: QUERY @@ -56,7 +56,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t + alias: t_n5 Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: INTERVAL'5-5' (type: interval_year_month), INTERVAL'-1-1' (type: interval_year_month) @@ -80,13 +80,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select a,b, count(*) from t group by a, b order by a+b +PREHOOK: query: select a,b, count(*) from t_n5 group by a, b order by a+b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a,b, count(*) from t group by a, b order by a+b +POSTHOOK: query: select a,b, count(*) from t_n5 group by a, b order by a+b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -1000 100 1 20 -100 1 @@ -96,13 +96,13 @@ POSTHOOK: Input: default@t 4 5 1 3 7 1 8 9 1 -PREHOOK: query: select a,b, count(*) from t group by a, b order by count(*), b desc +PREHOOK: query: select a,b, count(*) from t_n5 group by a, b order by count(*), b desc PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a,b, count(*) from t group by a, b order by count(*), b desc +POSTHOOK: query: select a,b, count(*) from t_n5 group by a, b order by count(*), b desc POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -1000 100 1 8 9 1 @@ -112,13 +112,13 @@ POSTHOOK: Input: default@t 1 3 1 20 -100 1 1 2 2 -PREHOOK: query: select a,b,count(*),a+b from t group by a, b order by a+b +PREHOOK: query: select a,b,count(*),a+b from t_n5 group by a, b order by a+b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a,b,count(*),a+b from t group by a, b order by a+b +POSTHOOK: query: select a,b,count(*),a+b from t_n5 group by a, b order by a+b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -1000 100 1 -900 20 -100 1 -80 @@ -128,13 +128,13 @@ POSTHOOK: Input: default@t 4 5 1 9 3 7 1 10 8 9 1 17 -PREHOOK: query: select a,b from t order by a+b +PREHOOK: query: select a,b from t_n5 order by a+b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a,b from t order by a+b +POSTHOOK: query: select a,b from t_n5 order by a+b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -1000 100 20 -100 @@ -145,13 +145,13 @@ POSTHOOK: Input: default@t 4 5 3 7 8 9 -PREHOOK: query: select a,b,a+b from t order by a+b +PREHOOK: query: select a,b,a+b from t_n5 order by a+b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a,b,a+b from t order by a+b +POSTHOOK: query: select a,b,a+b from t_n5 order by a+b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -1000 100 -900 20 -100 -80 @@ -162,13 +162,13 @@ POSTHOOK: Input: default@t 4 5 9 3 7 10 8 9 17 -PREHOOK: query: select a,b,a+b from t order by a+b desc +PREHOOK: query: select a,b,a+b from t_n5 order by a+b desc PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a,b,a+b from t order by a+b desc +POSTHOOK: query: select a,b,a+b from t_n5 order by a+b desc POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### 8 9 17 3 7 10 @@ -179,31 +179,31 @@ POSTHOOK: Input: default@t 1 2 3 20 -100 -80 -1000 100 -900 -PREHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from t limit 1 +PREHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from t_n5 limit 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from t limit 1 +POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from t_n5 limit 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### 1.0000000000000000000 -PREHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from t order by c limit 1 +PREHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from t_n5 order by c limit 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from t order by c limit 1 +POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from t_n5 order by c limit 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### 1.0000000000000000000 -PREHOOK: query: select a from t order by b +PREHOOK: query: select a from t_n5 order by b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a from t order by b +POSTHOOK: query: select a from t_n5 order by b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### 20 1 @@ -214,13 +214,13 @@ POSTHOOK: Input: default@t 3 8 -1000 -PREHOOK: query: select a from t order by 0-b +PREHOOK: query: select a from t_n5 order by 0-b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a from t order by 0-b +POSTHOOK: query: select a from t_n5 order by 0-b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -1000 8 @@ -231,13 +231,13 @@ POSTHOOK: Input: default@t 1 1 20 -PREHOOK: query: select b from t order by 0-b +PREHOOK: query: select b from t_n5 order by 0-b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select b from t order by 0-b +POSTHOOK: query: select b from t_n5 order by 0-b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### 100 9 @@ -248,13 +248,13 @@ POSTHOOK: Input: default@t 2 2 -100 -PREHOOK: query: select b from t order by a, 0-b +PREHOOK: query: select b from t_n5 order by a, 0-b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select b from t order by a, 0-b +POSTHOOK: query: select b from t_n5 order by a, 0-b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### 100 3 @@ -265,13 +265,13 @@ POSTHOOK: Input: default@t 5 9 -100 -PREHOOK: query: select b from t order by a+1, 0-b +PREHOOK: query: select b from t_n5 order by a+1, 0-b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select b from t order by a+1, 0-b +POSTHOOK: query: select b from t_n5 order by a+1, 0-b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### 100 3 @@ -282,13 +282,13 @@ POSTHOOK: Input: default@t 5 9 -100 -PREHOOK: query: select b from t order by 0-b, a+1 +PREHOOK: query: select b from t_n5 order by 0-b, a+1 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select b from t order by 0-b, a+1 +POSTHOOK: query: select b from t_n5 order by 0-b, a+1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### 100 9 @@ -299,9 +299,9 @@ POSTHOOK: Input: default@t 2 2 -100 -PREHOOK: query: explain select b from t order by 0-b, a+1 +PREHOOK: query: explain select b from t_n5 order by 0-b, a+1 PREHOOK: type: QUERY -POSTHOOK: query: explain select b from t order by 0-b, a+1 +POSTHOOK: query: explain select b from t_n5 order by 0-b, a+1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -312,7 +312,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t + alias: t_n5 Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: b (type: int), (0 - b) (type: int), (a + 1) (type: int) @@ -343,13 +343,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select a,b from t order by 0-b +PREHOOK: query: select a,b from t_n5 order by 0-b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a,b from t order by 0-b +POSTHOOK: query: select a,b from t_n5 order by 0-b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -1000 100 8 9 @@ -360,13 +360,13 @@ POSTHOOK: Input: default@t 1 2 1 2 20 -100 -PREHOOK: query: select a,b from t order by a, a+1, 0-b +PREHOOK: query: select a,b from t_n5 order by a, a+1, 0-b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a,b from t order by a, a+1, 0-b +POSTHOOK: query: select a,b from t_n5 order by a, a+1, 0-b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -1000 100 1 3 @@ -377,13 +377,13 @@ POSTHOOK: Input: default@t 4 5 8 9 20 -100 -PREHOOK: query: select a,b from t order by 0-b, a+1 +PREHOOK: query: select a,b from t_n5 order by 0-b, a+1 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a,b from t order by 0-b, a+1 +POSTHOOK: query: select a,b from t_n5 order by 0-b, a+1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -1000 100 8 9 @@ -394,13 +394,13 @@ POSTHOOK: Input: default@t 1 2 1 2 20 -100 -PREHOOK: query: select a+1,b from t order by a, a+1, 0-b +PREHOOK: query: select a+1,b from t_n5 order by a, a+1, 0-b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a+1,b from t order by a, a+1, 0-b +POSTHOOK: query: select a+1,b from t_n5 order by a, a+1, 0-b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -999 100 2 3 @@ -411,13 +411,13 @@ POSTHOOK: Input: default@t 5 5 9 9 21 -100 -PREHOOK: query: select a+1 as c, b from t order by a, a+1, 0-b +PREHOOK: query: select a+1 as c, b from t_n5 order by a, a+1, 0-b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a+1 as c, b from t order by a, a+1, 0-b +POSTHOOK: query: select a+1 as c, b from t_n5 order by a, a+1, 0-b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -999 100 2 3 @@ -428,13 +428,13 @@ POSTHOOK: Input: default@t 5 5 9 9 21 -100 -PREHOOK: query: select a, a+1 as c, b from t order by a, a+1, 0-b +PREHOOK: query: select a, a+1 as c, b from t_n5 order by a, a+1, 0-b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a, a+1 as c, b from t order by a, a+1, 0-b +POSTHOOK: query: select a, a+1 as c, b from t_n5 order by a, a+1, 0-b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -1000 -999 100 1 2 3 @@ -445,13 +445,13 @@ POSTHOOK: Input: default@t 4 5 5 8 9 9 20 21 -100 -PREHOOK: query: select a, a+1 as c, b, 2*b from t order by a, a+1, 0-b +PREHOOK: query: select a, a+1 as c, b, 2*b from t_n5 order by a, a+1, 0-b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a, a+1 as c, b, 2*b from t order by a, a+1, 0-b +POSTHOOK: query: select a, a+1 as c, b, 2*b from t_n5 order by a, a+1, 0-b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -1000 -999 100 200 1 2 3 6 @@ -462,9 +462,9 @@ POSTHOOK: Input: default@t 4 5 5 10 8 9 9 18 20 21 -100 -200 -PREHOOK: query: explain select a, a+1 as c, b, 2*b from t order by a, a+1, 0-b +PREHOOK: query: explain select a, a+1 as c, b, 2*b from t_n5 order by a, a+1, 0-b PREHOOK: type: QUERY -POSTHOOK: query: explain select a, a+1 as c, b, 2*b from t order by a, a+1, 0-b +POSTHOOK: query: explain select a, a+1 as c, b, 2*b from t_n5 order by a, a+1, 0-b POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -475,7 +475,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t + alias: t_n5 Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: int), b (type: int), (2 * b) (type: int), (a + 1) (type: int), (0 - b) (type: int) @@ -506,13 +506,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select a, a+1 as c, b, 2*b from t order by a+1, 0-b +PREHOOK: query: select a, a+1 as c, b, 2*b from t_n5 order by a+1, 0-b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a, a+1 as c, b, 2*b from t order by a+1, 0-b +POSTHOOK: query: select a, a+1 as c, b, 2*b from t_n5 order by a+1, 0-b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -1000 -999 100 200 1 2 3 6 @@ -523,13 +523,13 @@ POSTHOOK: Input: default@t 4 5 5 10 8 9 9 18 20 21 -100 -200 -PREHOOK: query: select a,b, count(*) as c from t group by a, b order by c, a+b desc +PREHOOK: query: select a,b, count(*) as c from t_n5 group by a, b order by c, a+b desc PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a,b, count(*) as c from t group by a, b order by c, a+b desc +POSTHOOK: query: select a,b, count(*) as c from t_n5 group by a, b order by c, a+b desc POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### 8 9 1 3 7 1 @@ -539,13 +539,13 @@ POSTHOOK: Input: default@t 20 -100 1 -1000 100 1 1 2 2 -PREHOOK: query: select a, max(b) from t group by a order by count(b), a desc +PREHOOK: query: select a, max(b) from t_n5 group by a order by count(b), a desc PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a, max(b) from t group by a order by count(b), a desc +POSTHOOK: query: select a, max(b) from t_n5 group by a order by count(b), a desc POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### 20 -100 8 9 @@ -554,13 +554,13 @@ POSTHOOK: Input: default@t 2 4 -1000 100 1 3 -PREHOOK: query: select a, max(b) from t group by a order by count(b), a +PREHOOK: query: select a, max(b) from t_n5 group by a order by count(b), a PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n5 #### A masked pattern was here #### -POSTHOOK: query: select a, max(b) from t group by a order by count(b), a +POSTHOOK: query: select a, max(b) from t_n5 group by a order by count(b), a POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n5 #### A masked pattern was here #### -1000 100 2 4 diff --git a/ql/src/test/results/clientpositive/order_by_expr_2.q.out b/ql/src/test/results/clientpositive/order_by_expr_2.q.out index 9ab02f451f..66ac0bb7cf 100644 --- a/ql/src/test/results/clientpositive/order_by_expr_2.q.out +++ b/ql/src/test/results/clientpositive/order_by_expr_2.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: create table t(a int, b int) +PREHOOK: query: create table t_n14(a int, b int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t(a int, b int) +PREHOOK: Output: default@t_n14 +POSTHOOK: query: create table t_n14(a int, b int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9) +POSTHOOK: Output: default@t_n14 +PREHOOK: query: insert into t_n14 values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9) +PREHOOK: Output: default@t_n14 +POSTHOOK: query: insert into t_n14 values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.a SCRIPT [] -POSTHOOK: Lineage: t.b SCRIPT [] -PREHOOK: query: select a as b, b as a from t order by a +POSTHOOK: Output: default@t_n14 +POSTHOOK: Lineage: t_n14.a SCRIPT [] +POSTHOOK: Lineage: t_n14.b SCRIPT [] +PREHOOK: query: select a as b, b as a from t_n14 order by a PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n14 #### A masked pattern was here #### -POSTHOOK: query: select a as b, b as a from t order by a +POSTHOOK: query: select a as b, b as a from t_n14 order by a POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n14 #### A masked pattern was here #### 20 -100 1 2 @@ -33,13 +33,13 @@ POSTHOOK: Input: default@t 3 7 8 9 -1000 100 -PREHOOK: query: select a as b, b as a from t order by t.a +PREHOOK: query: select a as b, b as a from t_n14 order by t_n14.a PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n14 #### A masked pattern was here #### -POSTHOOK: query: select a as b, b as a from t order by t.a +POSTHOOK: query: select a as b, b as a from t_n14 order by t_n14.a POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n14 #### A masked pattern was here #### -1000 100 1 3 @@ -50,13 +50,13 @@ POSTHOOK: Input: default@t 4 5 8 9 20 -100 -PREHOOK: query: select a as b from t order by b +PREHOOK: query: select a as b from t_n14 order by b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n14 #### A masked pattern was here #### -POSTHOOK: query: select a as b from t order by b +POSTHOOK: query: select a as b from t_n14 order by b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n14 #### A masked pattern was here #### -1000 1 @@ -67,13 +67,13 @@ POSTHOOK: Input: default@t 4 8 20 -PREHOOK: query: select a as b from t order by 0-a +PREHOOK: query: select a as b from t_n14 order by 0-a PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n14 #### A masked pattern was here #### -POSTHOOK: query: select a as b from t order by 0-a +POSTHOOK: query: select a as b from t_n14 order by 0-a POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n14 #### A masked pattern was here #### 20 8 @@ -84,13 +84,13 @@ POSTHOOK: Input: default@t 1 1 -1000 -PREHOOK: query: select a,b,count(*),a+b from t group by a, b order by a+b +PREHOOK: query: select a,b,count(*),a+b from t_n14 group by a, b order by a+b PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n14 #### A masked pattern was here #### -POSTHOOK: query: select a,b,count(*),a+b from t group by a, b order by a+b +POSTHOOK: query: select a,b,count(*),a+b from t_n14 group by a, b order by a+b POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n14 #### A masked pattern was here #### -1000 100 1 -900 20 -100 1 -80 diff --git a/ql/src/test/results/clientpositive/order_by_pos.q.out b/ql/src/test/results/clientpositive/order_by_pos.q.out index c0dbcbf044..e79149c78c 100644 --- a/ql/src/test/results/clientpositive/order_by_pos.q.out +++ b/ql/src/test/results/clientpositive/order_by_pos.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: create table t(a int, b int) +PREHOOK: query: create table t_n3(a int, b int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t(a int, b int) +PREHOOK: Output: default@t_n3 +POSTHOOK: query: create table t_n3(a int, b int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9) +POSTHOOK: Output: default@t_n3 +PREHOOK: query: insert into t_n3 values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9) +PREHOOK: Output: default@t_n3 +POSTHOOK: query: insert into t_n3 values (1,2),(1,2),(1,3),(2,4),(20,-100),(-1000,100),(4,5),(3,7),(8,9) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.a SCRIPT [] -POSTHOOK: Lineage: t.b SCRIPT [] -PREHOOK: query: select * from t order by 2 +POSTHOOK: Output: default@t_n3 +POSTHOOK: Lineage: t_n3.a SCRIPT [] +POSTHOOK: Lineage: t_n3.b SCRIPT [] +PREHOOK: query: select * from t_n3 order by 2 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from t order by 2 +POSTHOOK: query: select * from t_n3 order by 2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n3 #### A masked pattern was here #### 20 -100 1 2 @@ -33,13 +33,13 @@ POSTHOOK: Input: default@t 3 7 8 9 -1000 100 -PREHOOK: query: select * from t order by 1 +PREHOOK: query: select * from t_n3 order by 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from t order by 1 +POSTHOOK: query: select * from t_n3 order by 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n3 #### A masked pattern was here #### -1000 100 1 3 @@ -50,13 +50,13 @@ POSTHOOK: Input: default@t 4 5 8 9 20 -100 -PREHOOK: query: select * from t union select * from t order by 1, 2 +PREHOOK: query: select * from t_n3 union select * from t_n3 order by 1, 2 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from t union select * from t order by 1, 2 +POSTHOOK: query: select * from t_n3 union select * from t_n3 order by 1, 2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n3 #### A masked pattern was here #### -1000 100 1 2 @@ -66,13 +66,13 @@ POSTHOOK: Input: default@t 4 5 8 9 20 -100 -PREHOOK: query: select * from t union select * from t order by 2 +PREHOOK: query: select * from t_n3 union select * from t_n3 order by 2 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from t union select * from t order by 2 +POSTHOOK: query: select * from t_n3 union select * from t_n3 order by 2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n3 #### A masked pattern was here #### 20 -100 1 2 @@ -82,13 +82,13 @@ POSTHOOK: Input: default@t 3 7 8 9 -1000 100 -PREHOOK: query: select * from t union select * from t order by 1 +PREHOOK: query: select * from t_n3 union select * from t_n3 order by 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from t union select * from t order by 1 +POSTHOOK: query: select * from t_n3 union select * from t_n3 order by 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n3 #### A masked pattern was here #### -1000 100 1 3 @@ -98,13 +98,13 @@ POSTHOOK: Input: default@t 4 5 8 9 20 -100 -PREHOOK: query: select * from (select a, count(a) from t group by a)subq order by 2, 1 +PREHOOK: query: select * from (select a, count(a) from t_n3 group by a)subq order by 2, 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from (select a, count(a) from t group by a)subq order by 2, 1 +POSTHOOK: query: select * from (select a, count(a) from t_n3 group by a)subq order by 2, 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n3 #### A masked pattern was here #### -1000 1 2 1 @@ -113,13 +113,13 @@ POSTHOOK: Input: default@t 8 1 20 1 1 3 -PREHOOK: query: select * from (select a,b, count(*) from t group by a, b)subq order by 3, 2 desc +PREHOOK: query: select * from (select a,b, count(*) from t_n3 group by a, b)subq order by 3, 2 desc PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from (select a,b, count(*) from t group by a, b)subq order by 3, 2 desc +POSTHOOK: query: select * from (select a,b, count(*) from t_n3 group by a, b)subq order by 3, 2 desc POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n3 #### A masked pattern was here #### -1000 100 1 8 9 1 diff --git a/ql/src/test/results/clientpositive/parallel.q.out b/ql/src/test/results/clientpositive/parallel.q.out index 472df0b92d..24345e4556 100644 --- a/ql/src/test/results/clientpositive/parallel.q.out +++ b/ql/src/test/results/clientpositive/parallel.q.out @@ -6,23 +6,23 @@ POSTHOOK: query: create table if not exists src_a like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_a -PREHOOK: query: create table if not exists src_b like src +PREHOOK: query: create table if not exists src_b_n0 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_b -POSTHOOK: query: create table if not exists src_b like src +PREHOOK: Output: default@src_b_n0 +POSTHOOK: query: create table if not exists src_b_n0 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_b +POSTHOOK: Output: default@src_b_n0 PREHOOK: query: explain from (select key, value from src group by key, value) s insert overwrite table src_a select s.key, s.value group by s.key, s.value -insert overwrite table src_b select s.key, s.value group by s.key, s.value +insert overwrite table src_b_n0 select s.key, s.value group by s.key, s.value PREHOOK: type: QUERY POSTHOOK: query: explain from (select key, value from src group by key, value) s insert overwrite table src_a select s.key, s.value group by s.key, s.value -insert overwrite table src_b select s.key, s.value group by s.key, s.value +insert overwrite table src_b_n0 select s.key, s.value group by s.key, s.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -122,7 +122,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_b + name: default.src_b_n0 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -186,7 +186,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_b + Table: default.src_b_n0 Stage: Stage-1 Move Operator @@ -196,7 +196,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_b + name: default.src_b_n0 Stage: Stage-7 Map Reduce @@ -223,22 +223,22 @@ STAGE PLANS: PREHOOK: query: from (select key, value from src group by key, value) s insert overwrite table src_a select s.key, s.value group by s.key, s.value -insert overwrite table src_b select s.key, s.value group by s.key, s.value +insert overwrite table src_b_n0 select s.key, s.value group by s.key, s.value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@src_a -PREHOOK: Output: default@src_b +PREHOOK: Output: default@src_b_n0 POSTHOOK: query: from (select key, value from src group by key, value) s insert overwrite table src_a select s.key, s.value group by s.key, s.value -insert overwrite table src_b select s.key, s.value group by s.key, s.value +insert overwrite table src_b_n0 select s.key, s.value group by s.key, s.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@src_a -POSTHOOK: Output: default@src_b +POSTHOOK: Output: default@src_b_n0 POSTHOOK: Lineage: src_a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: src_a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_b_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_b_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: select * from src_a PREHOOK: type: QUERY PREHOOK: Input: default@src_a @@ -556,13 +556,13 @@ POSTHOOK: Input: default@src_a 96 val_96 97 val_97 98 val_98 -PREHOOK: query: select * from src_b +PREHOOK: query: select * from src_b_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@src_b +PREHOOK: Input: default@src_b_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from src_b +POSTHOOK: query: select * from src_b_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_b +POSTHOOK: Input: default@src_b_n0 #### A masked pattern was here #### 0 val_0 10 val_10 @@ -875,22 +875,22 @@ POSTHOOK: Input: default@src_b 98 val_98 PREHOOK: query: from (select key, value from src group by key, value) s insert overwrite table src_a select s.key, s.value group by s.key, s.value -insert overwrite table src_b select s.key, s.value group by s.key, s.value +insert overwrite table src_b_n0 select s.key, s.value group by s.key, s.value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@src_a -PREHOOK: Output: default@src_b +PREHOOK: Output: default@src_b_n0 POSTHOOK: query: from (select key, value from src group by key, value) s insert overwrite table src_a select s.key, s.value group by s.key, s.value -insert overwrite table src_b select s.key, s.value group by s.key, s.value +insert overwrite table src_b_n0 select s.key, s.value group by s.key, s.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@src_a -POSTHOOK: Output: default@src_b +POSTHOOK: Output: default@src_b_n0 POSTHOOK: Lineage: src_a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: src_a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_b_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_b_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: select * from src_a PREHOOK: type: QUERY PREHOOK: Input: default@src_a @@ -1208,13 +1208,13 @@ POSTHOOK: Input: default@src_a 96 val_96 97 val_97 98 val_98 -PREHOOK: query: select * from src_b +PREHOOK: query: select * from src_b_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@src_b +PREHOOK: Input: default@src_b_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from src_b +POSTHOOK: query: select * from src_b_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_b +POSTHOOK: Input: default@src_b_n0 #### A masked pattern was here #### 0 val_0 10 val_10 diff --git a/ql/src/test/results/clientpositive/parallel_colstats.q.out b/ql/src/test/results/clientpositive/parallel_colstats.q.out index 472df0b92d..35beff3694 100644 --- a/ql/src/test/results/clientpositive/parallel_colstats.q.out +++ b/ql/src/test/results/clientpositive/parallel_colstats.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: create table if not exists src_a like src +PREHOOK: query: create table if not exists src_a_n0 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_a -POSTHOOK: query: create table if not exists src_a like src +PREHOOK: Output: default@src_a_n0 +POSTHOOK: query: create table if not exists src_a_n0 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_a -PREHOOK: query: create table if not exists src_b like src +POSTHOOK: Output: default@src_a_n0 +PREHOOK: query: create table if not exists src_b_n1 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_b -POSTHOOK: query: create table if not exists src_b like src +PREHOOK: Output: default@src_b_n1 +POSTHOOK: query: create table if not exists src_b_n1 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_b +POSTHOOK: Output: default@src_b_n1 PREHOOK: query: explain from (select key, value from src group by key, value) s -insert overwrite table src_a select s.key, s.value group by s.key, s.value -insert overwrite table src_b select s.key, s.value group by s.key, s.value +insert overwrite table src_a_n0 select s.key, s.value group by s.key, s.value +insert overwrite table src_b_n1 select s.key, s.value group by s.key, s.value PREHOOK: type: QUERY POSTHOOK: query: explain from (select key, value from src group by key, value) s -insert overwrite table src_a select s.key, s.value group by s.key, s.value -insert overwrite table src_b select s.key, s.value group by s.key, s.value +insert overwrite table src_a_n0 select s.key, s.value group by s.key, s.value +insert overwrite table src_b_n1 select s.key, s.value group by s.key, s.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -94,7 +94,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_a + name: default.src_a_n0 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -122,7 +122,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_b + name: default.src_b_n1 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -147,7 +147,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_a + name: default.src_a_n0 Stage: Stage-4 Stats Work @@ -155,7 +155,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_a + Table: default.src_a_n0 Stage: Stage-5 Map Reduce @@ -186,7 +186,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.src_b + Table: default.src_b_n1 Stage: Stage-1 Move Operator @@ -196,7 +196,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src_b + name: default.src_b_n1 Stage: Stage-7 Map Reduce @@ -222,30 +222,30 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from (select key, value from src group by key, value) s -insert overwrite table src_a select s.key, s.value group by s.key, s.value -insert overwrite table src_b select s.key, s.value group by s.key, s.value +insert overwrite table src_a_n0 select s.key, s.value group by s.key, s.value +insert overwrite table src_b_n1 select s.key, s.value group by s.key, s.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_a -PREHOOK: Output: default@src_b +PREHOOK: Output: default@src_a_n0 +PREHOOK: Output: default@src_b_n1 POSTHOOK: query: from (select key, value from src group by key, value) s -insert overwrite table src_a select s.key, s.value group by s.key, s.value -insert overwrite table src_b select s.key, s.value group by s.key, s.value +insert overwrite table src_a_n0 select s.key, s.value group by s.key, s.value +insert overwrite table src_b_n1 select s.key, s.value group by s.key, s.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_a -POSTHOOK: Output: default@src_b -POSTHOOK: Lineage: src_a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_a +POSTHOOK: Output: default@src_a_n0 +POSTHOOK: Output: default@src_b_n1 +POSTHOOK: Lineage: src_a_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_a_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_b_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_b_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_a_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@src_a +PREHOOK: Input: default@src_a_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from src_a +POSTHOOK: query: select * from src_a_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_a +POSTHOOK: Input: default@src_a_n0 #### A masked pattern was here #### 0 val_0 10 val_10 @@ -556,13 +556,13 @@ POSTHOOK: Input: default@src_a 96 val_96 97 val_97 98 val_98 -PREHOOK: query: select * from src_b +PREHOOK: query: select * from src_b_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@src_b +PREHOOK: Input: default@src_b_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from src_b +POSTHOOK: query: select * from src_b_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_b +POSTHOOK: Input: default@src_b_n1 #### A masked pattern was here #### 0 val_0 10 val_10 @@ -874,30 +874,30 @@ POSTHOOK: Input: default@src_b 97 val_97 98 val_98 PREHOOK: query: from (select key, value from src group by key, value) s -insert overwrite table src_a select s.key, s.value group by s.key, s.value -insert overwrite table src_b select s.key, s.value group by s.key, s.value +insert overwrite table src_a_n0 select s.key, s.value group by s.key, s.value +insert overwrite table src_b_n1 select s.key, s.value group by s.key, s.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_a -PREHOOK: Output: default@src_b +PREHOOK: Output: default@src_a_n0 +PREHOOK: Output: default@src_b_n1 POSTHOOK: query: from (select key, value from src group by key, value) s -insert overwrite table src_a select s.key, s.value group by s.key, s.value -insert overwrite table src_b select s.key, s.value group by s.key, s.value +insert overwrite table src_a_n0 select s.key, s.value group by s.key, s.value +insert overwrite table src_b_n1 select s.key, s.value group by s.key, s.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_a -POSTHOOK: Output: default@src_b -POSTHOOK: Lineage: src_a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src_b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from src_a +POSTHOOK: Output: default@src_a_n0 +POSTHOOK: Output: default@src_b_n1 +POSTHOOK: Lineage: src_a_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_a_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src_b_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_b_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from src_a_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@src_a +PREHOOK: Input: default@src_a_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from src_a +POSTHOOK: query: select * from src_a_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_a +POSTHOOK: Input: default@src_a_n0 #### A masked pattern was here #### 0 val_0 10 val_10 @@ -1208,13 +1208,13 @@ POSTHOOK: Input: default@src_a 96 val_96 97 val_97 98 val_98 -PREHOOK: query: select * from src_b +PREHOOK: query: select * from src_b_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@src_b +PREHOOK: Input: default@src_b_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from src_b +POSTHOOK: query: select * from src_b_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src_b +POSTHOOK: Input: default@src_b_n1 #### A masked pattern was here #### 0 val_0 10 val_10 diff --git a/ql/src/test/results/clientpositive/parallel_join1.q.out b/ql/src/test/results/clientpositive/parallel_join1.q.out index 357d03fc96..d344f69b59 100644 --- a/ql/src/test/results/clientpositive/parallel_join1.q.out +++ b/ql/src/test/results/clientpositive/parallel_join1.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1_n19(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n19 +POSTHOOK: query: CREATE TABLE dest_j1_n19(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 +POSTHOOK: Output: default@dest_j1_n19 PREHOOK: query: EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n19 SELECT src1.key, src2.value PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n19 SELECT src1.key, src2.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -75,7 +75,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n19 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -100,7 +100,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n19 Stage: Stage-2 Stats Work @@ -108,7 +108,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest_j1 + Table: default.dest_j1_n19 Stage: Stage-3 Map Reduce @@ -134,24 +134,24 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n19 SELECT src1.key, src2.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest_j1 +PREHOOK: Output: default@dest_j1_n19 POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n19 SELECT src1.key, src2.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest_j1.* FROM dest_j1 +POSTHOOK: Output: default@dest_j1_n19 +POSTHOOK: Lineage: dest_j1_n19.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n19.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest_j1_n19.* FROM dest_j1_n19 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n19 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest_j1.* FROM dest_j1 +POSTHOOK: query: SELECT dest_j1_n19.* FROM dest_j1_n19 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n19 #### A masked pattern was here #### 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/parquet_ctas.q.out b/ql/src/test/results/clientpositive/parquet_ctas.q.out index 6eb529210d..58231c546b 100644 --- a/ql/src/test/results/clientpositive/parquet_ctas.q.out +++ b/ql/src/test/results/clientpositive/parquet_ctas.q.out @@ -1,6 +1,6 @@ -PREHOOK: query: drop table staging +PREHOOK: query: drop table staging_n3 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table staging +POSTHOOK: query: drop table staging_n3 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table parquet_ctas PREHOOK: type: DROPTABLE @@ -18,36 +18,36 @@ PREHOOK: query: drop table parquet_ctas_mixed PREHOOK: type: DROPTABLE POSTHOOK: query: drop table parquet_ctas_mixed POSTHOOK: type: DROPTABLE -PREHOOK: query: create table staging (key int, value string) stored as textfile +PREHOOK: query: create table staging_n3 (key int, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@staging -POSTHOOK: query: create table staging (key int, value string) stored as textfile +PREHOOK: Output: default@staging_n3 +POSTHOOK: query: create table staging_n3 (key int, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@staging -PREHOOK: query: insert into table staging select * from src order by key limit 10 +POSTHOOK: Output: default@staging_n3 +PREHOOK: query: insert into table staging_n3 select * from src order by key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@staging -POSTHOOK: query: insert into table staging select * from src order by key limit 10 +PREHOOK: Output: default@staging_n3 +POSTHOOK: query: insert into table staging_n3 select * from src order by key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@staging -POSTHOOK: Lineage: staging.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: staging.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table parquet_ctas stored as parquet as select * from staging +POSTHOOK: Output: default@staging_n3 +POSTHOOK: Lineage: staging_n3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: staging_n3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table parquet_ctas stored as parquet as select * from staging_n3 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@staging +PREHOOK: Input: default@staging_n3 PREHOOK: Output: database:default PREHOOK: Output: default@parquet_ctas -POSTHOOK: query: create table parquet_ctas stored as parquet as select * from staging +POSTHOOK: query: create table parquet_ctas stored as parquet as select * from staging_n3 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@staging +POSTHOOK: Input: default@staging_n3 POSTHOOK: Output: database:default POSTHOOK: Output: default@parquet_ctas -POSTHOOK: Lineage: parquet_ctas.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: parquet_ctas.value SIMPLE [(staging)staging.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_ctas.key SIMPLE [(staging_n3)staging_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_ctas.value SIMPLE [(staging_n3)staging_n3.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: describe parquet_ctas PREHOOK: type: DESCTABLE PREHOOK: Input: default@parquet_ctas @@ -74,18 +74,18 @@ POSTHOOK: Input: default@parquet_ctas 103 val_103 104 val_104 104 val_104 -PREHOOK: query: create table parquet_ctas_advanced stored as parquet as select key+1,concat(value,"value") from staging +PREHOOK: query: create table parquet_ctas_advanced stored as parquet as select key+1,concat(value,"value") from staging_n3 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@staging +PREHOOK: Input: default@staging_n3 PREHOOK: Output: database:default PREHOOK: Output: default@parquet_ctas_advanced -POSTHOOK: query: create table parquet_ctas_advanced stored as parquet as select key+1,concat(value,"value") from staging +POSTHOOK: query: create table parquet_ctas_advanced stored as parquet as select key+1,concat(value,"value") from staging_n3 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@staging +POSTHOOK: Input: default@staging_n3 POSTHOOK: Output: database:default POSTHOOK: Output: default@parquet_ctas_advanced -POSTHOOK: Lineage: parquet_ctas_advanced._c0 EXPRESSION [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: parquet_ctas_advanced._c1 EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_ctas_advanced._c0 EXPRESSION [(staging_n3)staging_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_ctas_advanced._c1 EXPRESSION [(staging_n3)staging_n3.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: describe parquet_ctas_advanced PREHOOK: type: DESCTABLE PREHOOK: Input: default@parquet_ctas_advanced @@ -112,18 +112,18 @@ POSTHOOK: Input: default@parquet_ctas_advanced 104 val_103value 105 val_104value 105 val_104value -PREHOOK: query: create table parquet_ctas_alias stored as parquet as select key+1 as mykey,concat(value,"value") as myvalue from staging +PREHOOK: query: create table parquet_ctas_alias stored as parquet as select key+1 as mykey,concat(value,"value") as myvalue from staging_n3 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@staging +PREHOOK: Input: default@staging_n3 PREHOOK: Output: database:default PREHOOK: Output: default@parquet_ctas_alias -POSTHOOK: query: create table parquet_ctas_alias stored as parquet as select key+1 as mykey,concat(value,"value") as myvalue from staging +POSTHOOK: query: create table parquet_ctas_alias stored as parquet as select key+1 as mykey,concat(value,"value") as myvalue from staging_n3 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@staging +POSTHOOK: Input: default@staging_n3 POSTHOOK: Output: database:default POSTHOOK: Output: default@parquet_ctas_alias -POSTHOOK: Lineage: parquet_ctas_alias.mykey EXPRESSION [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: parquet_ctas_alias.myvalue EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_ctas_alias.mykey EXPRESSION [(staging_n3)staging_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_ctas_alias.myvalue EXPRESSION [(staging_n3)staging_n3.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: describe parquet_ctas_alias PREHOOK: type: DESCTABLE PREHOOK: Input: default@parquet_ctas_alias @@ -150,19 +150,19 @@ POSTHOOK: Input: default@parquet_ctas_alias 104 val_103value 105 val_104value 105 val_104value -PREHOOK: query: create table parquet_ctas_mixed stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging +PREHOOK: query: create table parquet_ctas_mixed stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging_n3 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@staging +PREHOOK: Input: default@staging_n3 PREHOOK: Output: database:default PREHOOK: Output: default@parquet_ctas_mixed -POSTHOOK: query: create table parquet_ctas_mixed stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging +POSTHOOK: query: create table parquet_ctas_mixed stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging_n3 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@staging +POSTHOOK: Input: default@staging_n3 POSTHOOK: Output: database:default POSTHOOK: Output: default@parquet_ctas_mixed -POSTHOOK: Lineage: parquet_ctas_mixed._c1 EXPRESSION [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: parquet_ctas_mixed.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: parquet_ctas_mixed.myvalue EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_ctas_mixed._c1 EXPRESSION [(staging_n3)staging_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_ctas_mixed.key SIMPLE [(staging_n3)staging_n3.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_ctas_mixed.myvalue EXPRESSION [(staging_n3)staging_n3.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: describe parquet_ctas_mixed PREHOOK: type: DESCTABLE PREHOOK: Input: default@parquet_ctas_mixed diff --git a/ql/src/test/results/clientpositive/parquet_decimal.q.out b/ql/src/test/results/clientpositive/parquet_decimal.q.out index 88331df38f..d566e1f7c6 100644 --- a/ql/src/test/results/clientpositive/parquet_decimal.q.out +++ b/ql/src/test/results/clientpositive/parquet_decimal.q.out @@ -1,60 +1,60 @@ -PREHOOK: query: DROP TABLE IF EXISTS `dec` +PREHOOK: query: DROP TABLE IF EXISTS `dec_n1` PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS `dec` +POSTHOOK: query: DROP TABLE IF EXISTS `dec_n1` POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE `dec`(name string, value decimal(8,4)) +PREHOOK: query: CREATE TABLE `dec_n1`(name string, value decimal(8,4)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dec -POSTHOOK: query: CREATE TABLE `dec`(name string, value decimal(8,4)) +PREHOOK: Output: default@dec_n1 +POSTHOOK: query: CREATE TABLE `dec_n1`(name string, value decimal(8,4)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dec -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.txt' INTO TABLE `dec` +POSTHOOK: Output: default@dec_n1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.txt' INTO TABLE `dec_n1` PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@dec -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.txt' INTO TABLE `dec` +PREHOOK: Output: default@dec_n1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.txt' INTO TABLE `dec_n1` POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@dec -PREHOOK: query: DROP TABLE IF EXISTS parq_dec +POSTHOOK: Output: default@dec_n1 +PREHOOK: query: DROP TABLE IF EXISTS parq_dec_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS parq_dec +POSTHOOK: query: DROP TABLE IF EXISTS parq_dec_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE parq_dec(name string, value decimal(5,2)) STORED AS PARQUET +PREHOOK: query: CREATE TABLE parq_dec_n1(name string, value decimal(5,2)) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@parq_dec -POSTHOOK: query: CREATE TABLE parq_dec(name string, value decimal(5,2)) STORED AS PARQUET +PREHOOK: Output: default@parq_dec_n1 +POSTHOOK: query: CREATE TABLE parq_dec_n1(name string, value decimal(5,2)) STORED AS PARQUET POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@parq_dec -PREHOOK: query: DESC parq_dec +POSTHOOK: Output: default@parq_dec_n1 +PREHOOK: query: DESC parq_dec_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@parq_dec -POSTHOOK: query: DESC parq_dec +PREHOOK: Input: default@parq_dec_n1 +POSTHOOK: query: DESC parq_dec_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@parq_dec +POSTHOOK: Input: default@parq_dec_n1 name string value decimal(5,2) -PREHOOK: query: INSERT OVERWRITE TABLE parq_dec SELECT name, value FROM `dec` +PREHOOK: query: INSERT OVERWRITE TABLE parq_dec_n1 SELECT name, value FROM `dec_n1` PREHOOK: type: QUERY -PREHOOK: Input: default@dec -PREHOOK: Output: default@parq_dec -POSTHOOK: query: INSERT OVERWRITE TABLE parq_dec SELECT name, value FROM `dec` +PREHOOK: Input: default@dec_n1 +PREHOOK: Output: default@parq_dec_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE parq_dec_n1 SELECT name, value FROM `dec_n1` POSTHOOK: type: QUERY -POSTHOOK: Input: default@dec -POSTHOOK: Output: default@parq_dec -POSTHOOK: Lineage: parq_dec.name SIMPLE [(dec)dec.FieldSchema(name:name, type:string, comment:null), ] -POSTHOOK: Lineage: parq_dec.value EXPRESSION [(dec)dec.FieldSchema(name:value, type:decimal(8,4), comment:null), ] -PREHOOK: query: SELECT * FROM parq_dec +POSTHOOK: Input: default@dec_n1 +POSTHOOK: Output: default@parq_dec_n1 +POSTHOOK: Lineage: parq_dec_n1.name SIMPLE [(dec_n1)dec_n1.FieldSchema(name:name, type:string, comment:null), ] +POSTHOOK: Lineage: parq_dec_n1.value EXPRESSION [(dec_n1)dec_n1.FieldSchema(name:value, type:decimal(8,4), comment:null), ] +PREHOOK: query: SELECT * FROM parq_dec_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@parq_dec +PREHOOK: Input: default@parq_dec_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM parq_dec +POSTHOOK: query: SELECT * FROM parq_dec_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@parq_dec +POSTHOOK: Input: default@parq_dec_n1 #### A masked pattern was here #### Tom 234.79 Beck 77.34 @@ -66,13 +66,13 @@ Mary 33.33 Tom 19.00 Beck 0.00 Beck 79.90 -PREHOOK: query: SELECT value, count(*) FROM parq_dec GROUP BY value ORDER BY value +PREHOOK: query: SELECT value, count(*) FROM parq_dec_n1 GROUP BY value ORDER BY value PREHOOK: type: QUERY -PREHOOK: Input: default@parq_dec +PREHOOK: Input: default@parq_dec_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT value, count(*) FROM parq_dec GROUP BY value ORDER BY value +POSTHOOK: query: SELECT value, count(*) FROM parq_dec_n1 GROUP BY value ORDER BY value POSTHOOK: type: QUERY -POSTHOOK: Input: default@parq_dec +POSTHOOK: Input: default@parq_dec_n1 #### A masked pattern was here #### -12.25 1 0.00 1 @@ -84,29 +84,29 @@ POSTHOOK: Input: default@parq_dec 77.34 1 79.90 1 234.79 1 -PREHOOK: query: TRUNCATE TABLE parq_dec +PREHOOK: query: TRUNCATE TABLE parq_dec_n1 PREHOOK: type: TRUNCATETABLE -PREHOOK: Output: default@parq_dec -POSTHOOK: query: TRUNCATE TABLE parq_dec +PREHOOK: Output: default@parq_dec_n1 +POSTHOOK: query: TRUNCATE TABLE parq_dec_n1 POSTHOOK: type: TRUNCATETABLE -POSTHOOK: Output: default@parq_dec -PREHOOK: query: INSERT OVERWRITE TABLE parq_dec SELECT name, NULL FROM `dec` +POSTHOOK: Output: default@parq_dec_n1 +PREHOOK: query: INSERT OVERWRITE TABLE parq_dec_n1 SELECT name, NULL FROM `dec_n1` PREHOOK: type: QUERY -PREHOOK: Input: default@dec -PREHOOK: Output: default@parq_dec -POSTHOOK: query: INSERT OVERWRITE TABLE parq_dec SELECT name, NULL FROM `dec` +PREHOOK: Input: default@dec_n1 +PREHOOK: Output: default@parq_dec_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE parq_dec_n1 SELECT name, NULL FROM `dec_n1` POSTHOOK: type: QUERY -POSTHOOK: Input: default@dec -POSTHOOK: Output: default@parq_dec -POSTHOOK: Lineage: parq_dec.name SIMPLE [(dec)dec.FieldSchema(name:name, type:string, comment:null), ] -POSTHOOK: Lineage: parq_dec.value EXPRESSION [] -PREHOOK: query: SELECT * FROM parq_dec +POSTHOOK: Input: default@dec_n1 +POSTHOOK: Output: default@parq_dec_n1 +POSTHOOK: Lineage: parq_dec_n1.name SIMPLE [(dec_n1)dec_n1.FieldSchema(name:name, type:string, comment:null), ] +POSTHOOK: Lineage: parq_dec_n1.value EXPRESSION [] +PREHOOK: query: SELECT * FROM parq_dec_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@parq_dec +PREHOOK: Input: default@parq_dec_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM parq_dec +POSTHOOK: query: SELECT * FROM parq_dec_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@parq_dec +POSTHOOK: Input: default@parq_dec_n1 #### A masked pattern was here #### Tom NULL Beck NULL @@ -164,22 +164,22 @@ POSTHOOK: Input: default@parq_dec1 0.2 3.2 8.0 -PREHOOK: query: DROP TABLE `dec` +PREHOOK: query: DROP TABLE `dec_n1` PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dec -PREHOOK: Output: default@dec -POSTHOOK: query: DROP TABLE `dec` +PREHOOK: Input: default@dec_n1 +PREHOOK: Output: default@dec_n1 +POSTHOOK: query: DROP TABLE `dec_n1` POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dec -POSTHOOK: Output: default@dec -PREHOOK: query: DROP TABLE parq_dec +POSTHOOK: Input: default@dec_n1 +POSTHOOK: Output: default@dec_n1 +PREHOOK: query: DROP TABLE parq_dec_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@parq_dec -PREHOOK: Output: default@parq_dec -POSTHOOK: query: DROP TABLE parq_dec +PREHOOK: Input: default@parq_dec_n1 +PREHOOK: Output: default@parq_dec_n1 +POSTHOOK: query: DROP TABLE parq_dec_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@parq_dec -POSTHOOK: Output: default@parq_dec +POSTHOOK: Input: default@parq_dec_n1 +POSTHOOK: Output: default@parq_dec_n1 PREHOOK: query: DROP TABLE parq_dec1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@parq_dec1 diff --git a/ql/src/test/results/clientpositive/parquet_join.q.out b/ql/src/test/results/clientpositive/parquet_join.q.out index 482d535ad2..3b6417f221 100644 --- a/ql/src/test/results/clientpositive/parquet_join.q.out +++ b/ql/src/test/results/clientpositive/parquet_join.q.out @@ -1,6 +1,6 @@ -PREHOOK: query: drop table if exists staging +PREHOOK: query: drop table if exists staging_n5 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists staging +POSTHOOK: query: drop table if exists staging_n5 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table if exists parquet_jointable1 PREHOOK: type: DROPTABLE @@ -18,49 +18,49 @@ PREHOOK: query: drop table if exists parquet_jointable2_bucketed_sorted PREHOOK: type: DROPTABLE POSTHOOK: query: drop table if exists parquet_jointable2_bucketed_sorted POSTHOOK: type: DROPTABLE -PREHOOK: query: create table staging (key int, value string) stored as textfile +PREHOOK: query: create table staging_n5 (key int, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@staging -POSTHOOK: query: create table staging (key int, value string) stored as textfile +PREHOOK: Output: default@staging_n5 +POSTHOOK: query: create table staging_n5 (key int, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@staging -PREHOOK: query: insert into table staging select distinct key, value from src order by key limit 2 +POSTHOOK: Output: default@staging_n5 +PREHOOK: query: insert into table staging_n5 select distinct key, value from src order by key limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@staging -POSTHOOK: query: insert into table staging select distinct key, value from src order by key limit 2 +PREHOOK: Output: default@staging_n5 +POSTHOOK: query: insert into table staging_n5 select distinct key, value from src order by key limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@staging -POSTHOOK: Lineage: staging.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: staging.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table parquet_jointable1 stored as parquet as select * from staging +POSTHOOK: Output: default@staging_n5 +POSTHOOK: Lineage: staging_n5.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: staging_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table parquet_jointable1 stored as parquet as select * from staging_n5 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@staging +PREHOOK: Input: default@staging_n5 PREHOOK: Output: database:default PREHOOK: Output: default@parquet_jointable1 -POSTHOOK: query: create table parquet_jointable1 stored as parquet as select * from staging +POSTHOOK: query: create table parquet_jointable1 stored as parquet as select * from staging_n5 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@staging +POSTHOOK: Input: default@staging_n5 POSTHOOK: Output: database:default POSTHOOK: Output: default@parquet_jointable1 -POSTHOOK: Lineage: parquet_jointable1.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: parquet_jointable1.value SIMPLE [(staging)staging.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: create table parquet_jointable2 stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging +POSTHOOK: Lineage: parquet_jointable1.key SIMPLE [(staging_n5)staging_n5.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_jointable1.value SIMPLE [(staging_n5)staging_n5.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: create table parquet_jointable2 stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging_n5 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@staging +PREHOOK: Input: default@staging_n5 PREHOOK: Output: database:default PREHOOK: Output: default@parquet_jointable2 -POSTHOOK: query: create table parquet_jointable2 stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging +POSTHOOK: query: create table parquet_jointable2 stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging_n5 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@staging +POSTHOOK: Input: default@staging_n5 POSTHOOK: Output: database:default POSTHOOK: Output: default@parquet_jointable2 -POSTHOOK: Lineage: parquet_jointable2._c1 EXPRESSION [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: parquet_jointable2.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: parquet_jointable2.myvalue EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_jointable2._c1 EXPRESSION [(staging_n5)staging_n5.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_jointable2.key SIMPLE [(staging_n5)staging_n5.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_jointable2.myvalue EXPRESSION [(staging_n5)staging_n5.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key PREHOOK: type: QUERY POSTHOOK: query: explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key @@ -237,16 +237,16 @@ POSTHOOK: query: create table parquet_jointable1_bucketed_sorted (key int,value POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@parquet_jointable1_bucketed_sorted -PREHOOK: query: insert overwrite table parquet_jointable1_bucketed_sorted select key,concat(value,"value1") as value from staging cluster by key +PREHOOK: query: insert overwrite table parquet_jointable1_bucketed_sorted select key,concat(value,"value1") as value from staging_n5 cluster by key PREHOOK: type: QUERY -PREHOOK: Input: default@staging +PREHOOK: Input: default@staging_n5 PREHOOK: Output: default@parquet_jointable1_bucketed_sorted -POSTHOOK: query: insert overwrite table parquet_jointable1_bucketed_sorted select key,concat(value,"value1") as value from staging cluster by key +POSTHOOK: query: insert overwrite table parquet_jointable1_bucketed_sorted select key,concat(value,"value1") as value from staging_n5 cluster by key POSTHOOK: type: QUERY -POSTHOOK: Input: default@staging +POSTHOOK: Input: default@staging_n5 POSTHOOK: Output: default@parquet_jointable1_bucketed_sorted -POSTHOOK: Lineage: parquet_jointable1_bucketed_sorted.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: parquet_jointable1_bucketed_sorted.value EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_jointable1_bucketed_sorted.key SIMPLE [(staging_n5)staging_n5.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_jointable1_bucketed_sorted.value EXPRESSION [(staging_n5)staging_n5.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: create table parquet_jointable2_bucketed_sorted (key int,value1 string, value2 string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -255,17 +255,17 @@ POSTHOOK: query: create table parquet_jointable2_bucketed_sorted (key int,value1 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@parquet_jointable2_bucketed_sorted -PREHOOK: query: insert overwrite table parquet_jointable2_bucketed_sorted select key,concat(value,"value2-1") as value1,concat(value,"value2-2") as value2 from staging cluster by key +PREHOOK: query: insert overwrite table parquet_jointable2_bucketed_sorted select key,concat(value,"value2-1") as value1,concat(value,"value2-2") as value2 from staging_n5 cluster by key PREHOOK: type: QUERY -PREHOOK: Input: default@staging +PREHOOK: Input: default@staging_n5 PREHOOK: Output: default@parquet_jointable2_bucketed_sorted -POSTHOOK: query: insert overwrite table parquet_jointable2_bucketed_sorted select key,concat(value,"value2-1") as value1,concat(value,"value2-2") as value2 from staging cluster by key +POSTHOOK: query: insert overwrite table parquet_jointable2_bucketed_sorted select key,concat(value,"value2-1") as value1,concat(value,"value2-2") as value2 from staging_n5 cluster by key POSTHOOK: type: QUERY -POSTHOOK: Input: default@staging +POSTHOOK: Input: default@staging_n5 POSTHOOK: Output: default@parquet_jointable2_bucketed_sorted -POSTHOOK: Lineage: parquet_jointable2_bucketed_sorted.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: parquet_jointable2_bucketed_sorted.value1 EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: parquet_jointable2_bucketed_sorted.value2 EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_jointable2_bucketed_sorted.key SIMPLE [(staging_n5)staging_n5.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_jointable2_bucketed_sorted.value1 EXPRESSION [(staging_n5)staging_n5.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_jointable2_bucketed_sorted.value2 EXPRESSION [(staging_n5)staging_n5.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain select p1.value,p2.value2 from parquet_jointable1_bucketed_sorted p1 join parquet_jointable2_bucketed_sorted p2 on p1.key=p2.key PREHOOK: type: QUERY POSTHOOK: query: explain select p1.value,p2.value2 from parquet_jointable1_bucketed_sorted p1 join parquet_jointable2_bucketed_sorted p2 on p1.key=p2.key diff --git a/ql/src/test/results/clientpositive/parquet_join2.q.out b/ql/src/test/results/clientpositive/parquet_join2.q.out index 5f266112b4..b0caa20379 100644 --- a/ql/src/test/results/clientpositive/parquet_join2.q.out +++ b/ql/src/test/results/clientpositive/parquet_join2.q.out @@ -1,62 +1,62 @@ -PREHOOK: query: CREATE TABLE tbl1(id INT) STORED AS PARQUET +PREHOOK: query: CREATE TABLE tbl1_n6(id INT) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tbl1 -POSTHOOK: query: CREATE TABLE tbl1(id INT) STORED AS PARQUET +PREHOOK: Output: default@tbl1_n6 +POSTHOOK: query: CREATE TABLE tbl1_n6(id INT) STORED AS PARQUET POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tbl1 -PREHOOK: query: INSERT INTO tbl1 VALUES(1), (2) +POSTHOOK: Output: default@tbl1_n6 +PREHOOK: query: INSERT INTO tbl1_n6 VALUES(1), (2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@tbl1 -POSTHOOK: query: INSERT INTO tbl1 VALUES(1), (2) +PREHOOK: Output: default@tbl1_n6 +POSTHOOK: query: INSERT INTO tbl1_n6 VALUES(1), (2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@tbl1 -POSTHOOK: Lineage: tbl1.id SCRIPT [] -PREHOOK: query: CREATE TABLE tbl2(id INT, value STRING) STORED AS PARQUET +POSTHOOK: Output: default@tbl1_n6 +POSTHOOK: Lineage: tbl1_n6.id SCRIPT [] +PREHOOK: query: CREATE TABLE tbl2_n5(id INT, value STRING) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tbl2 -POSTHOOK: query: CREATE TABLE tbl2(id INT, value STRING) STORED AS PARQUET +PREHOOK: Output: default@tbl2_n5 +POSTHOOK: query: CREATE TABLE tbl2_n5(id INT, value STRING) STORED AS PARQUET POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tbl2 -PREHOOK: query: INSERT INTO tbl2 VALUES(1, 'value1') +POSTHOOK: Output: default@tbl2_n5 +PREHOOK: query: INSERT INTO tbl2_n5 VALUES(1, 'value1') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@tbl2 -POSTHOOK: query: INSERT INTO tbl2 VALUES(1, 'value1') +PREHOOK: Output: default@tbl2_n5 +POSTHOOK: query: INSERT INTO tbl2_n5 VALUES(1, 'value1') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@tbl2 -POSTHOOK: Lineage: tbl2.id SCRIPT [] -POSTHOOK: Lineage: tbl2.value SCRIPT [] -PREHOOK: query: INSERT INTO tbl2 VALUES(1, 'value2') +POSTHOOK: Output: default@tbl2_n5 +POSTHOOK: Lineage: tbl2_n5.id SCRIPT [] +POSTHOOK: Lineage: tbl2_n5.value SCRIPT [] +PREHOOK: query: INSERT INTO tbl2_n5 VALUES(1, 'value2') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@tbl2 -POSTHOOK: query: INSERT INTO tbl2 VALUES(1, 'value2') +PREHOOK: Output: default@tbl2_n5 +POSTHOOK: query: INSERT INTO tbl2_n5 VALUES(1, 'value2') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@tbl2 -POSTHOOK: Lineage: tbl2.id SCRIPT [] -POSTHOOK: Lineage: tbl2.value SCRIPT [] -PREHOOK: query: select tbl1.id, t1.value, t2.value -FROM tbl1 -JOIN (SELECT * FROM tbl2 WHERE value='value1') t1 ON tbl1.id=t1.id -JOIN (SELECT * FROM tbl2 WHERE value='value2') t2 ON tbl1.id=t2.id +POSTHOOK: Output: default@tbl2_n5 +POSTHOOK: Lineage: tbl2_n5.id SCRIPT [] +POSTHOOK: Lineage: tbl2_n5.value SCRIPT [] +PREHOOK: query: select tbl1_n6.id, t1.value, t2.value +FROM tbl1_n6 +JOIN (SELECT * FROM tbl2_n5 WHERE value='value1') t1 ON tbl1_n6.id=t1.id +JOIN (SELECT * FROM tbl2_n5 WHERE value='value2') t2 ON tbl1_n6.id=t2.id PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl1_n6 +PREHOOK: Input: default@tbl2_n5 #### A masked pattern was here #### -POSTHOOK: query: select tbl1.id, t1.value, t2.value -FROM tbl1 -JOIN (SELECT * FROM tbl2 WHERE value='value1') t1 ON tbl1.id=t1.id -JOIN (SELECT * FROM tbl2 WHERE value='value2') t2 ON tbl1.id=t2.id +POSTHOOK: query: select tbl1_n6.id, t1.value, t2.value +FROM tbl1_n6 +JOIN (SELECT * FROM tbl2_n5 WHERE value='value1') t1 ON tbl1_n6.id=t1.id +JOIN (SELECT * FROM tbl2_n5 WHERE value='value2') t2 ON tbl1_n6.id=t2.id POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl1_n6 +POSTHOOK: Input: default@tbl2_n5 #### A masked pattern was here #### 1 value1 value2 diff --git a/ql/src/test/results/clientpositive/parquet_map_null.q.out b/ql/src/test/results/clientpositive/parquet_map_null.q.out index f2a4304f74..ce341745ff 100644 --- a/ql/src/test/results/clientpositive/parquet_map_null.q.out +++ b/ql/src/test/results/clientpositive/parquet_map_null.q.out @@ -1,38 +1,38 @@ -PREHOOK: query: DROP TABLE IF EXISTS avro_table +PREHOOK: query: DROP TABLE IF EXISTS avro_table_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS avro_table +POSTHOOK: query: DROP TABLE IF EXISTS avro_table_n0 POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE IF EXISTS parquet_table PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE IF EXISTS parquet_table POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE avro_table (avreau_col_1 map) STORED AS AVRO +PREHOOK: query: CREATE TABLE avro_table_n0 (avreau_col_1 map) STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@avro_table -POSTHOOK: query: CREATE TABLE avro_table (avreau_col_1 map) STORED AS AVRO +PREHOOK: Output: default@avro_table_n0 +POSTHOOK: query: CREATE TABLE avro_table_n0 (avreau_col_1 map) STORED AS AVRO POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@avro_table -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table +POSTHOOK: Output: default@avro_table_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@avro_table -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table +PREHOOK: Output: default@avro_table_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@avro_table -PREHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table +POSTHOOK: Output: default@avro_table_n0 +PREHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table_n0 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@avro_table +PREHOOK: Input: default@avro_table_n0 PREHOOK: Output: database:default PREHOOK: Output: default@parquet_table -POSTHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table +POSTHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table_n0 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@avro_table +POSTHOOK: Input: default@avro_table_n0 POSTHOOK: Output: database:default POSTHOOK: Output: default@parquet_table -POSTHOOK: Lineage: parquet_table.avreau_col_1 SIMPLE [(avro_table)avro_table.FieldSchema(name:avreau_col_1, type:map, comment:), ] +POSTHOOK: Lineage: parquet_table.avreau_col_1 SIMPLE [(avro_table_n0)avro_table_n0.FieldSchema(name:avreau_col_1, type:map, comment:), ] PREHOOK: query: SELECT * FROM parquet_table PREHOOK: type: QUERY PREHOOK: Input: default@parquet_table @@ -46,14 +46,14 @@ POSTHOOK: Input: default@parquet_table {"key1":null,"key2":"val2"} {"key3":"val3","key4":null} {"key3":"val3","key4":null} -PREHOOK: query: DROP TABLE avro_table +PREHOOK: query: DROP TABLE avro_table_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@avro_table -PREHOOK: Output: default@avro_table -POSTHOOK: query: DROP TABLE avro_table +PREHOOK: Input: default@avro_table_n0 +PREHOOK: Output: default@avro_table_n0 +POSTHOOK: query: DROP TABLE avro_table_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@avro_table -POSTHOOK: Output: default@avro_table +POSTHOOK: Input: default@avro_table_n0 +POSTHOOK: Output: default@avro_table_n0 PREHOOK: query: DROP TABLE parquet_table PREHOOK: type: DROPTABLE PREHOOK: Input: default@parquet_table diff --git a/ql/src/test/results/clientpositive/parquet_nested_complex.q.out b/ql/src/test/results/clientpositive/parquet_nested_complex.q.out index d1f0edc838..cb5dec5ee2 100644 --- a/ql/src/test/results/clientpositive/parquet_nested_complex.q.out +++ b/ql/src/test/results/clientpositive/parquet_nested_complex.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table nestedcomplex ( +PREHOOK: query: create table nestedcomplex_n0 ( simple_int int, max_nested_array array>>>>>>>>>>>>>>>>>>>>>>, max_nested_map array>>>>>>>>>>>>>>>>>>>>>, @@ -12,8 +12,8 @@ WITH SERDEPROPERTIES ( ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@nestedcomplex -POSTHOOK: query: create table nestedcomplex ( +PREHOOK: Output: default@nestedcomplex_n0 +POSTHOOK: query: create table nestedcomplex_n0 ( simple_int int, max_nested_array array>>>>>>>>>>>>>>>>>>>>>>, max_nested_map array>>>>>>>>>>>>>>>>>>>>>, @@ -27,24 +27,24 @@ WITH SERDEPROPERTIES ( ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@nestedcomplex -PREHOOK: query: describe nestedcomplex +POSTHOOK: Output: default@nestedcomplex_n0 +PREHOOK: query: describe nestedcomplex_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@nestedcomplex -POSTHOOK: query: describe nestedcomplex +PREHOOK: Input: default@nestedcomplex_n0 +POSTHOOK: query: describe nestedcomplex_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@nestedcomplex +POSTHOOK: Input: default@nestedcomplex_n0 simple_int int max_nested_array array>>>>>>>>>>>>>>>>>>>>>> max_nested_map array>>>>>>>>>>>>>>>>>>>>> max_nested_struct array>>>>>>>>>>>>>>>>>>>>>> simple_string string -PREHOOK: query: describe extended nestedcomplex +PREHOOK: query: describe extended nestedcomplex_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@nestedcomplex -POSTHOOK: query: describe extended nestedcomplex +PREHOOK: Input: default@nestedcomplex_n0 +POSTHOOK: query: describe extended nestedcomplex_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@nestedcomplex +POSTHOOK: Input: default@nestedcomplex_n0 simple_int int max_nested_array array>>>>>>>>>>>>>>>>>>>>>> max_nested_map array>>>>>>>>>>>>>>>>>>>>> @@ -52,29 +52,29 @@ max_nested_struct array>>>>>>>>>>>>>>>>>>>>>>, comment:null), ] -POSTHOOK: Lineage: parquet_nested_complex.max_nested_map SIMPLE [(nestedcomplex)nestedcomplex.FieldSchema(name:max_nested_map, type:array>>>>>>>>>>>>>>>>>>>>>, comment:null), ] -POSTHOOK: Lineage: parquet_nested_complex.max_nested_struct SIMPLE [(nestedcomplex)nestedcomplex.FieldSchema(name:max_nested_struct, type:array>>>>>>>>>>>>>>>>>>>>>>, comment:null), ] -POSTHOOK: Lineage: parquet_nested_complex.simple_int SIMPLE [(nestedcomplex)nestedcomplex.FieldSchema(name:simple_int, type:int, comment:null), ] -POSTHOOK: Lineage: parquet_nested_complex.simple_string SIMPLE [(nestedcomplex)nestedcomplex.FieldSchema(name:simple_string, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_nested_complex.max_nested_array SIMPLE [(nestedcomplex_n0)nestedcomplex_n0.FieldSchema(name:max_nested_array, type:array>>>>>>>>>>>>>>>>>>>>>>, comment:null), ] +POSTHOOK: Lineage: parquet_nested_complex.max_nested_map SIMPLE [(nestedcomplex_n0)nestedcomplex_n0.FieldSchema(name:max_nested_map, type:array>>>>>>>>>>>>>>>>>>>>>, comment:null), ] +POSTHOOK: Lineage: parquet_nested_complex.max_nested_struct SIMPLE [(nestedcomplex_n0)nestedcomplex_n0.FieldSchema(name:max_nested_struct, type:array>>>>>>>>>>>>>>>>>>>>>>, comment:null), ] +POSTHOOK: Lineage: parquet_nested_complex.simple_int SIMPLE [(nestedcomplex_n0)nestedcomplex_n0.FieldSchema(name:simple_int, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_nested_complex.simple_string SIMPLE [(nestedcomplex_n0)nestedcomplex_n0.FieldSchema(name:simple_string, type:string, comment:null), ] PREHOOK: query: SELECT * FROM parquet_nested_complex SORT BY simple_int PREHOOK: type: QUERY PREHOOK: Input: default@parquet_nested_complex @@ -85,14 +85,14 @@ POSTHOOK: Input: default@parquet_nested_complex #### A masked pattern was here #### 2 [[[[[[[[[[[[[[[[[[[[[[[0,3,2]]]]]]]]]]]]]]]]]]]]]]] [[[[[[[[[[[[[[[[[[[[[{"k1":"v1","k3":"v3"}]]]]]]]]]]]]]]]]]]]]] [[[[[[[[[[[[[[[[[[[[[[{"s":"b","i":10}]]]]]]]]]]]]]]]]]]]]]] 2 3 [[[[[[[[[[[[[[[[[[[[[[[0,1,2]]]]]]]]]]]]]]]]]]]]]]] [[[[[[[[[[[[[[[[[[[[[{"k1":"v1","k2":"v2"}]]]]]]]]]]]]]]]]]]]]] [[[[[[[[[[[[[[[[[[[[[[{"s":"a","i":10}]]]]]]]]]]]]]]]]]]]]]] 2 -PREHOOK: query: DROP TABLE nestedcomplex +PREHOOK: query: DROP TABLE nestedcomplex_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@nestedcomplex -PREHOOK: Output: default@nestedcomplex -POSTHOOK: query: DROP TABLE nestedcomplex +PREHOOK: Input: default@nestedcomplex_n0 +PREHOOK: Output: default@nestedcomplex_n0 +POSTHOOK: query: DROP TABLE nestedcomplex_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@nestedcomplex -POSTHOOK: Output: default@nestedcomplex +POSTHOOK: Input: default@nestedcomplex_n0 +POSTHOOK: Output: default@nestedcomplex_n0 PREHOOK: query: DROP TABLE parquet_nested_complex PREHOOK: type: DROPTABLE PREHOOK: Input: default@parquet_nested_complex diff --git a/ql/src/test/results/clientpositive/parquet_ppd_char.q.out b/ql/src/test/results/clientpositive/parquet_ppd_char.q.out index 4dba227bf5..79f280d4bd 100644 --- a/ql/src/test/results/clientpositive/parquet_ppd_char.q.out +++ b/ql/src/test/results/clientpositive/parquet_ppd_char.q.out @@ -1,108 +1,108 @@ -PREHOOK: query: create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet +PREHOOK: query: create table newtypestbl_n3(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@newtypestbl -POSTHOOK: query: create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet +PREHOOK: Output: default@newtypestbl_n3 +POSTHOOK: query: create table newtypestbl_n3(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@newtypestbl -PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl +POSTHOOK: Output: default@newtypestbl_n3 +PREHOOK: query: insert overwrite table newtypestbl_n3 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@newtypestbl -POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl +PREHOOK: Output: default@newtypestbl_n3 +POSTHOOK: query: insert overwrite table newtypestbl_n3 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@newtypestbl -POSTHOOK: Lineage: newtypestbl.c EXPRESSION [] -POSTHOOK: Lineage: newtypestbl.d EXPRESSION [] -POSTHOOK: Lineage: newtypestbl.da EXPRESSION [] -POSTHOOK: Lineage: newtypestbl.v EXPRESSION [] -PREHOOK: query: select * from newtypestbl where c="apple" +POSTHOOK: Output: default@newtypestbl_n3 +POSTHOOK: Lineage: newtypestbl_n3.c EXPRESSION [] +POSTHOOK: Lineage: newtypestbl_n3.d EXPRESSION [] +POSTHOOK: Lineage: newtypestbl_n3.da EXPRESSION [] +POSTHOOK: Lineage: newtypestbl_n3.v EXPRESSION [] +PREHOOK: query: select * from newtypestbl_n3 where c="apple" PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c="apple" +POSTHOOK: query: select * from newtypestbl_n3 where c="apple" POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where c="apple" +PREHOOK: query: select * from newtypestbl_n3 where c="apple" PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c="apple" +POSTHOOK: query: select * from newtypestbl_n3 where c="apple" POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where c!="apple" +PREHOOK: query: select * from newtypestbl_n3 where c!="apple" PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c!="apple" +POSTHOOK: query: select * from newtypestbl_n3 where c!="apple" POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where c!="apple" +PREHOOK: query: select * from newtypestbl_n3 where c!="apple" PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c!="apple" +POSTHOOK: query: select * from newtypestbl_n3 where c!="apple" POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where c<"hello" +PREHOOK: query: select * from newtypestbl_n3 where c<"hello" PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c<"hello" +POSTHOOK: query: select * from newtypestbl_n3 where c<"hello" POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where c<"hello" +PREHOOK: query: select * from newtypestbl_n3 where c<"hello" PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c<"hello" +POSTHOOK: query: select * from newtypestbl_n3 where c<"hello" POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where c<="hello" sort by c +PREHOOK: query: select * from newtypestbl_n3 where c<="hello" sort by c PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c<="hello" sort by c +POSTHOOK: query: select * from newtypestbl_n3 where c<="hello" sort by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 @@ -114,13 +114,13 @@ hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where c<="hello" sort by c +PREHOOK: query: select * from newtypestbl_n3 where c<="hello" sort by c PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c<="hello" sort by c +POSTHOOK: query: select * from newtypestbl_n3 where c<="hello" sort by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 @@ -132,65 +132,65 @@ hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where c="apple " +PREHOOK: query: select * from newtypestbl_n3 where c="apple " PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c="apple " +POSTHOOK: query: select * from newtypestbl_n3 where c="apple " POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where c="apple " +PREHOOK: query: select * from newtypestbl_n3 where c="apple " PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c="apple " +POSTHOOK: query: select * from newtypestbl_n3 where c="apple " POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where c in ("apple", "carrot") +PREHOOK: query: select * from newtypestbl_n3 where c in ("apple", "carrot") PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c in ("apple", "carrot") +POSTHOOK: query: select * from newtypestbl_n3 where c in ("apple", "carrot") POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where c in ("apple", "carrot") +PREHOOK: query: select * from newtypestbl_n3 where c in ("apple", "carrot") PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c in ("apple", "carrot") +POSTHOOK: query: select * from newtypestbl_n3 where c in ("apple", "carrot") POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where c in ("apple", "hello") sort by c +PREHOOK: query: select * from newtypestbl_n3 where c in ("apple", "hello") sort by c PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c in ("apple", "hello") sort by c +POSTHOOK: query: select * from newtypestbl_n3 where c in ("apple", "hello") sort by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 @@ -202,13 +202,13 @@ hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where c in ("apple", "hello") sort by c +PREHOOK: query: select * from newtypestbl_n3 where c in ("apple", "hello") sort by c PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c in ("apple", "hello") sort by c +POSTHOOK: query: select * from newtypestbl_n3 where c in ("apple", "hello") sort by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 @@ -220,55 +220,55 @@ hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where c in ("carrot") +PREHOOK: query: select * from newtypestbl_n3 where c in ("carrot") PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c in ("carrot") +POSTHOOK: query: select * from newtypestbl_n3 where c in ("carrot") POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -PREHOOK: query: select * from newtypestbl where c in ("carrot") +PREHOOK: query: select * from newtypestbl_n3 where c in ("carrot") PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c in ("carrot") +POSTHOOK: query: select * from newtypestbl_n3 where c in ("carrot") POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -PREHOOK: query: select * from newtypestbl where c between "apple" and "carrot" +PREHOOK: query: select * from newtypestbl_n3 where c between "apple" and "carrot" PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c between "apple" and "carrot" +POSTHOOK: query: select * from newtypestbl_n3 where c between "apple" and "carrot" POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where c between "apple" and "carrot" +PREHOOK: query: select * from newtypestbl_n3 where c between "apple" and "carrot" PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c between "apple" and "carrot" +POSTHOOK: query: select * from newtypestbl_n3 where c between "apple" and "carrot" POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where c between "apple" and "zombie" sort by c +PREHOOK: query: select * from newtypestbl_n3 where c between "apple" and "zombie" sort by c PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c between "apple" and "zombie" sort by c +POSTHOOK: query: select * from newtypestbl_n3 where c between "apple" and "zombie" sort by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 @@ -280,13 +280,13 @@ hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where c between "apple" and "zombie" sort by c +PREHOOK: query: select * from newtypestbl_n3 where c between "apple" and "zombie" sort by c PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c between "apple" and "zombie" sort by c +POSTHOOK: query: select * from newtypestbl_n3 where c between "apple" and "zombie" sort by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 @@ -298,19 +298,19 @@ hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where c between "carrot" and "carrot1" +PREHOOK: query: select * from newtypestbl_n3 where c between "carrot" and "carrot1" PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c between "carrot" and "carrot1" +POSTHOOK: query: select * from newtypestbl_n3 where c between "carrot" and "carrot1" POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -PREHOOK: query: select * from newtypestbl where c between "carrot" and "carrot1" +PREHOOK: query: select * from newtypestbl_n3 where c between "carrot" and "carrot1" PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where c between "carrot" and "carrot1" +POSTHOOK: query: select * from newtypestbl_n3 where c between "carrot" and "carrot1" POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n3 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/parquet_ppd_date.q.out b/ql/src/test/results/clientpositive/parquet_ppd_date.q.out index 821b4b2b0a..d9f6846236 100644 --- a/ql/src/test/results/clientpositive/parquet_ppd_date.q.out +++ b/ql/src/test/results/clientpositive/parquet_ppd_date.q.out @@ -1,173 +1,173 @@ -PREHOOK: query: create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet +PREHOOK: query: create table newtypestbl_n2(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@newtypestbl -POSTHOOK: query: create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet +PREHOOK: Output: default@newtypestbl_n2 +POSTHOOK: query: create table newtypestbl_n2(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@newtypestbl -PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl +POSTHOOK: Output: default@newtypestbl_n2 +PREHOOK: query: insert overwrite table newtypestbl_n2 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@newtypestbl -POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl +PREHOOK: Output: default@newtypestbl_n2 +POSTHOOK: query: insert overwrite table newtypestbl_n2 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@newtypestbl -POSTHOOK: Lineage: newtypestbl.c EXPRESSION [] -POSTHOOK: Lineage: newtypestbl.d EXPRESSION [] -POSTHOOK: Lineage: newtypestbl.da EXPRESSION [] -POSTHOOK: Lineage: newtypestbl.v EXPRESSION [] -PREHOOK: query: select * from newtypestbl where da='1970-02-20' +POSTHOOK: Output: default@newtypestbl_n2 +POSTHOOK: Lineage: newtypestbl_n2.c EXPRESSION [] +POSTHOOK: Lineage: newtypestbl_n2.d EXPRESSION [] +POSTHOOK: Lineage: newtypestbl_n2.da EXPRESSION [] +POSTHOOK: Lineage: newtypestbl_n2.v EXPRESSION [] +PREHOOK: query: select * from newtypestbl_n2 where da='1970-02-20' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da='1970-02-20' +POSTHOOK: query: select * from newtypestbl_n2 where da='1970-02-20' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where da='1970-02-20' +PREHOOK: query: select * from newtypestbl_n2 where da='1970-02-20' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da='1970-02-20' +POSTHOOK: query: select * from newtypestbl_n2 where da='1970-02-20' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where da= date '1970-02-20' +PREHOOK: query: select * from newtypestbl_n2 where da= date '1970-02-20' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da= date '1970-02-20' +POSTHOOK: query: select * from newtypestbl_n2 where da= date '1970-02-20' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date) +PREHOOK: query: select * from newtypestbl_n2 where da=cast('1970-02-20' as date) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date) +POSTHOOK: query: select * from newtypestbl_n2 where da=cast('1970-02-20' as date) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date) +PREHOOK: query: select * from newtypestbl_n2 where da=cast('1970-02-20' as date) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as date) +POSTHOOK: query: select * from newtypestbl_n2 where da=cast('1970-02-20' as date) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar(20)) +PREHOOK: query: select * from newtypestbl_n2 where da=cast('1970-02-20' as varchar(20)) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar(20)) +POSTHOOK: query: select * from newtypestbl_n2 where da=cast('1970-02-20' as varchar(20)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar(20)) +PREHOOK: query: select * from newtypestbl_n2 where da=cast('1970-02-20' as varchar(20)) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da=cast('1970-02-20' as varchar(20)) +POSTHOOK: query: select * from newtypestbl_n2 where da=cast('1970-02-20' as varchar(20)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where da!='1970-02-20' +PREHOOK: query: select * from newtypestbl_n2 where da!='1970-02-20' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da!='1970-02-20' +POSTHOOK: query: select * from newtypestbl_n2 where da!='1970-02-20' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where da!='1970-02-20' +PREHOOK: query: select * from newtypestbl_n2 where da!='1970-02-20' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da!='1970-02-20' +POSTHOOK: query: select * from newtypestbl_n2 where da!='1970-02-20' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where da<'1970-02-27' +PREHOOK: query: select * from newtypestbl_n2 where da<'1970-02-27' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da<'1970-02-27' +POSTHOOK: query: select * from newtypestbl_n2 where da<'1970-02-27' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where da<'1970-02-27' +PREHOOK: query: select * from newtypestbl_n2 where da<'1970-02-27' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da<'1970-02-27' +POSTHOOK: query: select * from newtypestbl_n2 where da<'1970-02-27' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where da<'1970-02-29' sort by c +PREHOOK: query: select * from newtypestbl_n2 where da<'1970-02-29' sort by c PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da<'1970-02-29' sort by c +POSTHOOK: query: select * from newtypestbl_n2 where da<'1970-02-29' sort by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 @@ -179,13 +179,13 @@ hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where da<'1970-02-29' sort by c +PREHOOK: query: select * from newtypestbl_n2 where da<'1970-02-29' sort by c PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da<'1970-02-29' sort by c +POSTHOOK: query: select * from newtypestbl_n2 where da<'1970-02-29' sort by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 @@ -197,55 +197,55 @@ hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where da<'1970-02-15' +PREHOOK: query: select * from newtypestbl_n2 where da<'1970-02-15' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da<'1970-02-15' +POSTHOOK: query: select * from newtypestbl_n2 where da<'1970-02-15' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -PREHOOK: query: select * from newtypestbl where da<'1970-02-15' +PREHOOK: query: select * from newtypestbl_n2 where da<'1970-02-15' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da<'1970-02-15' +POSTHOOK: query: select * from newtypestbl_n2 where da<'1970-02-15' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -PREHOOK: query: select * from newtypestbl where da<='1970-02-20' +PREHOOK: query: select * from newtypestbl_n2 where da<='1970-02-20' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da<='1970-02-20' +POSTHOOK: query: select * from newtypestbl_n2 where da<='1970-02-20' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where da<='1970-02-20' +PREHOOK: query: select * from newtypestbl_n2 where da<='1970-02-20' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da<='1970-02-20' +POSTHOOK: query: select * from newtypestbl_n2 where da<='1970-02-20' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where da<='1970-02-27' sort by c +PREHOOK: query: select * from newtypestbl_n2 where da<='1970-02-27' sort by c PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da<='1970-02-27' sort by c +POSTHOOK: query: select * from newtypestbl_n2 where da<='1970-02-27' sort by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 @@ -257,13 +257,13 @@ hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where da<='1970-02-27' sort by c +PREHOOK: query: select * from newtypestbl_n2 where da<='1970-02-27' sort by c PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da<='1970-02-27' sort by c +POSTHOOK: query: select * from newtypestbl_n2 where da<='1970-02-27' sort by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 @@ -275,39 +275,39 @@ hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date)) +PREHOOK: query: select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), cast('1970-02-27' as date)) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date)) +POSTHOOK: query: select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), cast('1970-02-27' as date)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date)) +PREHOOK: query: select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), cast('1970-02-27' as date)) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-27' as date)) +POSTHOOK: query: select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), cast('1970-02-27' as date)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c +PREHOOK: query: select * from newtypestbl_n2 where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c +POSTHOOK: query: select * from newtypestbl_n2 where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 @@ -319,13 +319,13 @@ hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c +PREHOOK: query: select * from newtypestbl_n2 where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c +POSTHOOK: query: select * from newtypestbl_n2 where da in (cast('1970-02-20' as date), cast('1970-02-27' as date)) sort by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 @@ -337,55 +337,55 @@ hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date)) +PREHOOK: query: select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), cast('1970-02-22' as date)) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date)) +POSTHOOK: query: select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), cast('1970-02-22' as date)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -PREHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date)) +PREHOOK: query: select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), cast('1970-02-22' as date)) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da in (cast('1970-02-21' as date), cast('1970-02-22' as date)) +POSTHOOK: query: select * from newtypestbl_n2 where da in (cast('1970-02-21' as date), cast('1970-02-22' as date)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -PREHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-22' +PREHOOK: query: select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-22' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-22' +POSTHOOK: query: select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-22' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-22' +PREHOOK: query: select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-22' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-22' +POSTHOOK: query: select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-22' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-28' sort by c +PREHOOK: query: select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-28' sort by c PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-28' sort by c +POSTHOOK: query: select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-28' sort by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 @@ -397,13 +397,13 @@ hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-28' sort by c +PREHOOK: query: select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-28' sort by c PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da between '1970-02-19' and '1970-02-28' sort by c +POSTHOOK: query: select * from newtypestbl_n2 where da between '1970-02-19' and '1970-02-28' sort by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 @@ -415,19 +415,19 @@ hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where da between '1970-02-18' and '1970-02-19' +PREHOOK: query: select * from newtypestbl_n2 where da between '1970-02-18' and '1970-02-19' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da between '1970-02-18' and '1970-02-19' +POSTHOOK: query: select * from newtypestbl_n2 where da between '1970-02-18' and '1970-02-19' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -PREHOOK: query: select * from newtypestbl where da between '1970-02-18' and '1970-02-19' +PREHOOK: query: select * from newtypestbl_n2 where da between '1970-02-18' and '1970-02-19' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where da between '1970-02-18' and '1970-02-19' +POSTHOOK: query: select * from newtypestbl_n2 where da between '1970-02-18' and '1970-02-19' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out b/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out index c2611fc8b1..c9a4338dbf 100644 --- a/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out +++ b/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out @@ -1,290 +1,290 @@ -PREHOOK: query: create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet +PREHOOK: query: create table newtypestbl_n5(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@newtypestbl -POSTHOOK: query: create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet +PREHOOK: Output: default@newtypestbl_n5 +POSTHOOK: query: create table newtypestbl_n5(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@newtypestbl -PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl +POSTHOOK: Output: default@newtypestbl_n5 +PREHOOK: query: insert overwrite table newtypestbl_n5 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@newtypestbl -POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl +PREHOOK: Output: default@newtypestbl_n5 +POSTHOOK: query: insert overwrite table newtypestbl_n5 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@newtypestbl -POSTHOOK: Lineage: newtypestbl.c EXPRESSION [] -POSTHOOK: Lineage: newtypestbl.d EXPRESSION [] -POSTHOOK: Lineage: newtypestbl.da EXPRESSION [] -POSTHOOK: Lineage: newtypestbl.v EXPRESSION [] -PREHOOK: query: select * from newtypestbl where d=0.22 +POSTHOOK: Output: default@newtypestbl_n5 +POSTHOOK: Lineage: newtypestbl_n5.c EXPRESSION [] +POSTHOOK: Lineage: newtypestbl_n5.d EXPRESSION [] +POSTHOOK: Lineage: newtypestbl_n5.da EXPRESSION [] +POSTHOOK: Lineage: newtypestbl_n5.v EXPRESSION [] +PREHOOK: query: select * from newtypestbl_n5 where d=0.22 PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d=0.22 +POSTHOOK: query: select * from newtypestbl_n5 where d=0.22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d=0.22 +PREHOOK: query: select * from newtypestbl_n5 where d=0.22 PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d=0.22 +POSTHOOK: query: select * from newtypestbl_n5 where d=0.22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d='0.22' +PREHOOK: query: select * from newtypestbl_n5 where d='0.22' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d='0.22' +POSTHOOK: query: select * from newtypestbl_n5 where d='0.22' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d='0.22' +PREHOOK: query: select * from newtypestbl_n5 where d='0.22' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d='0.22' +POSTHOOK: query: select * from newtypestbl_n5 where d='0.22' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d=cast('0.22' as float) +PREHOOK: query: select * from newtypestbl_n5 where d=cast('0.22' as float) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d=cast('0.22' as float) +POSTHOOK: query: select * from newtypestbl_n5 where d=cast('0.22' as float) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d=cast('0.22' as float) +PREHOOK: query: select * from newtypestbl_n5 where d=cast('0.22' as float) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d=cast('0.22' as float) +POSTHOOK: query: select * from newtypestbl_n5 where d=cast('0.22' as float) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d!=0.22 +PREHOOK: query: select * from newtypestbl_n5 where d!=0.22 PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d!=0.22 +POSTHOOK: query: select * from newtypestbl_n5 where d!=0.22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where d!=0.22 +PREHOOK: query: select * from newtypestbl_n5 where d!=0.22 PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d!=0.22 +POSTHOOK: query: select * from newtypestbl_n5 where d!=0.22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where d!='0.22' +PREHOOK: query: select * from newtypestbl_n5 where d!='0.22' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d!='0.22' +POSTHOOK: query: select * from newtypestbl_n5 where d!='0.22' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where d!='0.22' +PREHOOK: query: select * from newtypestbl_n5 where d!='0.22' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d!='0.22' +POSTHOOK: query: select * from newtypestbl_n5 where d!='0.22' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where d!=cast('0.22' as float) +PREHOOK: query: select * from newtypestbl_n5 where d!=cast('0.22' as float) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d!=cast('0.22' as float) +POSTHOOK: query: select * from newtypestbl_n5 where d!=cast('0.22' as float) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where d!=cast('0.22' as float) +PREHOOK: query: select * from newtypestbl_n5 where d!=cast('0.22' as float) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d!=cast('0.22' as float) +POSTHOOK: query: select * from newtypestbl_n5 where d!=cast('0.22' as float) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where d<11.22 +PREHOOK: query: select * from newtypestbl_n5 where d<11.22 PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d<11.22 +POSTHOOK: query: select * from newtypestbl_n5 where d<11.22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d<11.22 +PREHOOK: query: select * from newtypestbl_n5 where d<11.22 PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d<11.22 +POSTHOOK: query: select * from newtypestbl_n5 where d<11.22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d<'11.22' +PREHOOK: query: select * from newtypestbl_n5 where d<'11.22' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d<'11.22' +POSTHOOK: query: select * from newtypestbl_n5 where d<'11.22' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d<'11.22' +PREHOOK: query: select * from newtypestbl_n5 where d<'11.22' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d<'11.22' +POSTHOOK: query: select * from newtypestbl_n5 where d<'11.22' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n5 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d) STORED AS PARQUET +PREHOOK: query: CREATE TABLE test_n6 (Name string, address struct) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test -POSTHOOK: query: CREATE TABLE test (Name string, address struct) STORED AS PARQUET +PREHOOK: Output: default@test_n6 +POSTHOOK: query: CREATE TABLE test_n6 (Name string, address struct) STORED AS PARQUET POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/HiveGroup.parquet' OVERWRITE INTO TABLE test +POSTHOOK: Output: default@test_n6 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/HiveGroup.parquet' OVERWRITE INTO TABLE test_n6 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@test -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/HiveGroup.parquet' OVERWRITE INTO TABLE test +PREHOOK: Output: default@test_n6 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/HiveGroup.parquet' OVERWRITE INTO TABLE test_n6 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@test -PREHOOK: query: SELECT * FROM test +POSTHOOK: Output: default@test_n6 +PREHOOK: query: SELECT * FROM test_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n6 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test +POSTHOOK: query: SELECT * FROM test_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n6 #### A masked pattern was here #### Roger {"Zip":"87366","Street":"Congress Ave."} -PREHOOK: query: DROP TABLE test +PREHOOK: query: DROP TABLE test_n6 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test -PREHOOK: Output: default@test -POSTHOOK: query: DROP TABLE test +PREHOOK: Input: default@test_n6 +PREHOOK: Output: default@test_n6 +POSTHOOK: query: DROP TABLE test_n6 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test -POSTHOOK: Output: default@test +POSTHOOK: Input: default@test_n6 +POSTHOOK: Output: default@test_n6 diff --git a/ql/src/test/results/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q.out b/ql/src/test/results/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q.out index 474182b8a2..e6820ab184 100644 --- a/ql/src/test/results/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q.out +++ b/ql/src/test/results/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q.out @@ -1,12 +1,12 @@ -PREHOOK: query: DROP TABLE parquet_types_staging +PREHOOK: query: DROP TABLE parquet_types_staging_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE parquet_types_staging +POSTHOOK: query: DROP TABLE parquet_types_staging_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE parquet_types +PREHOOK: query: DROP TABLE parquet_types_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE parquet_types +POSTHOOK: query: DROP TABLE parquet_types_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE parquet_types_staging ( +PREHOOK: query: CREATE TABLE parquet_types_staging_n2 ( cint int, ctinyint tinyint, csmallint smallint, @@ -27,8 +27,8 @@ COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@parquet_types_staging -POSTHOOK: query: CREATE TABLE parquet_types_staging ( +PREHOOK: Output: default@parquet_types_staging_n2 +POSTHOOK: query: CREATE TABLE parquet_types_staging_n2 ( cint int, ctinyint tinyint, csmallint smallint, @@ -49,8 +49,8 @@ COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@parquet_types_staging -PREHOOK: query: CREATE TABLE parquet_types ( +POSTHOOK: Output: default@parquet_types_staging_n2 +PREHOOK: query: CREATE TABLE parquet_types_n1 ( cint int, ctinyint tinyint, csmallint smallint, @@ -68,8 +68,8 @@ PREHOOK: query: CREATE TABLE parquet_types ( ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@parquet_types -POSTHOOK: query: CREATE TABLE parquet_types ( +PREHOOK: Output: default@parquet_types_n1 +POSTHOOK: query: CREATE TABLE parquet_types_n1 ( cint int, ctinyint tinyint, csmallint smallint, @@ -87,24 +87,24 @@ POSTHOOK: query: CREATE TABLE parquet_types ( ) STORED AS PARQUET POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@parquet_types +POSTHOOK: Output: default@parquet_types_n1 PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_non_dictionary_types.txt' OVERWRITE INTO TABLE -parquet_types_staging +parquet_types_staging_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@parquet_types_staging +PREHOOK: Output: default@parquet_types_staging_n2 POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_non_dictionary_types.txt' OVERWRITE INTO TABLE -parquet_types_staging +parquet_types_staging_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@parquet_types_staging -PREHOOK: query: SELECT * FROM parquet_types_staging +POSTHOOK: Output: default@parquet_types_staging_n2 +PREHOOK: query: SELECT * FROM parquet_types_staging_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types_staging +PREHOOK: Input: default@parquet_types_staging_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM parquet_types_staging +POSTHOOK: query: SELECT * FROM parquet_types_staging_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types_staging +POSTHOOK: Input: default@parquet_types_staging_n2 #### A masked pattern was here #### 1000 -128 0 0.0 0.3 1940-01-01 01:01:01.111111111 {"":""} [1000,1001] {"c1":1000,"c2":"b"} 1940-01-01 1001 -127 1 0.3 1.3 b 1941-02-02 01:01:01.111111111 b b b {"b":"b"} [1001,1002] {"c1":1001,"c2":"c"} 1941-02-02 @@ -406,39 +406,39 @@ POSTHOOK: Input: default@parquet_types_staging 1297 -87 297 89.1 297.3 ll 2237-10-01 01:01:01.111111111 ll ll ll {"ll":"ll"} [1297,1298] {"c1":1297,"c2":"l"} 2237-10-01 1298 -86 298 89.4 298.3 lm 2238-11-02 01:01:01.111111111 lm lm lm {"lm":"lm"} [1298,1299] {"c1":1298,"c2":"l"} 2238-11-02 1299 -85 299 89.7 299.3 ln 2239-12-03 01:01:01.111111111 ln ln ln {"ln":"ln"} [1299,1300] {"c1":1299,"c2":"l"} 2239-12-03 -PREHOOK: query: INSERT OVERWRITE TABLE parquet_types +PREHOOK: query: INSERT OVERWRITE TABLE parquet_types_n1 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, -unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging +unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types_staging -PREHOOK: Output: default@parquet_types -POSTHOOK: query: INSERT OVERWRITE TABLE parquet_types +PREHOOK: Input: default@parquet_types_staging_n2 +PREHOOK: Output: default@parquet_types_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE parquet_types_n1 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, -unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging +unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types_staging -POSTHOOK: Output: default@parquet_types -POSTHOOK: Lineage: parquet_types.cbinary EXPRESSION [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cbinary, type:string, comment:null), ] -POSTHOOK: Lineage: parquet_types.cchar SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cchar, type:char(5), comment:null), ] -POSTHOOK: Lineage: parquet_types.cdouble SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: parquet_types.cfloat SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: parquet_types.cint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: parquet_types.csmallint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: parquet_types.cstring1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: parquet_types.ctinyint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -POSTHOOK: Lineage: parquet_types.cvarchar SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cvarchar, type:varchar(10), comment:null), ] -POSTHOOK: Lineage: parquet_types.d SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:d, type:date, comment:null), ] -POSTHOOK: Lineage: parquet_types.l1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:l1, type:array, comment:null), ] -POSTHOOK: Lineage: parquet_types.m1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:m1, type:map, comment:null), ] -POSTHOOK: Lineage: parquet_types.st1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:st1, type:struct, comment:null), ] -POSTHOOK: Lineage: parquet_types.t SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:t, type:timestamp, comment:null), ] +POSTHOOK: Input: default@parquet_types_staging_n2 +POSTHOOK: Output: default@parquet_types_n1 +POSTHOOK: Lineage: parquet_types_n1.cbinary EXPRESSION [(parquet_types_staging_n2)parquet_types_staging_n2.FieldSchema(name:cbinary, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_types_n1.cchar SIMPLE [(parquet_types_staging_n2)parquet_types_staging_n2.FieldSchema(name:cchar, type:char(5), comment:null), ] +POSTHOOK: Lineage: parquet_types_n1.cdouble SIMPLE [(parquet_types_staging_n2)parquet_types_staging_n2.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: parquet_types_n1.cfloat SIMPLE [(parquet_types_staging_n2)parquet_types_staging_n2.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: parquet_types_n1.cint SIMPLE [(parquet_types_staging_n2)parquet_types_staging_n2.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_types_n1.csmallint SIMPLE [(parquet_types_staging_n2)parquet_types_staging_n2.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: parquet_types_n1.cstring1 SIMPLE [(parquet_types_staging_n2)parquet_types_staging_n2.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_types_n1.ctinyint SIMPLE [(parquet_types_staging_n2)parquet_types_staging_n2.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +POSTHOOK: Lineage: parquet_types_n1.cvarchar SIMPLE [(parquet_types_staging_n2)parquet_types_staging_n2.FieldSchema(name:cvarchar, type:varchar(10), comment:null), ] +POSTHOOK: Lineage: parquet_types_n1.d SIMPLE [(parquet_types_staging_n2)parquet_types_staging_n2.FieldSchema(name:d, type:date, comment:null), ] +POSTHOOK: Lineage: parquet_types_n1.l1 SIMPLE [(parquet_types_staging_n2)parquet_types_staging_n2.FieldSchema(name:l1, type:array, comment:null), ] +POSTHOOK: Lineage: parquet_types_n1.m1 SIMPLE [(parquet_types_staging_n2)parquet_types_staging_n2.FieldSchema(name:m1, type:map, comment:null), ] +POSTHOOK: Lineage: parquet_types_n1.st1 SIMPLE [(parquet_types_staging_n2)parquet_types_staging_n2.FieldSchema(name:st1, type:struct, comment:null), ] +POSTHOOK: Lineage: parquet_types_n1.t SIMPLE [(parquet_types_staging_n2)parquet_types_staging_n2.FieldSchema(name:t, type:timestamp, comment:null), ] PREHOOK: query: EXPLAIN SELECT ctinyint, MAX(cint), MIN(csmallint), COUNT(cstring1), ROUND(AVG(cfloat), 5), ROUND(STDDEV_POP(cdouble),5) -FROM parquet_types +FROM parquet_types_n1 GROUP BY ctinyint ORDER BY ctinyint PREHOOK: type: QUERY @@ -448,7 +448,7 @@ POSTHOOK: query: EXPLAIN SELECT ctinyint, COUNT(cstring1), ROUND(AVG(cfloat), 5), ROUND(STDDEV_POP(cdouble),5) -FROM parquet_types +FROM parquet_types_n1 GROUP BY ctinyint ORDER BY ctinyint POSTHOOK: type: QUERY @@ -462,7 +462,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: parquet_types + alias: parquet_types_n1 Statistics: Num rows: 300 Data size: 4200 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cint (type: int), csmallint (type: smallint), cstring1 (type: string), cfloat (type: float), cdouble (type: double), (cdouble * cdouble) (type: double) @@ -534,11 +534,11 @@ PREHOOK: query: SELECT ctinyint, COUNT(cstring1), ROUND(AVG(cfloat), 5), ROUND(STDDEV_POP(cdouble),5) -FROM parquet_types +FROM parquet_types_n1 GROUP BY ctinyint ORDER BY ctinyint PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types +PREHOOK: Input: default@parquet_types_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT ctinyint, MAX(cint), @@ -546,11 +546,11 @@ POSTHOOK: query: SELECT ctinyint, COUNT(cstring1), ROUND(AVG(cfloat), 5), ROUND(STDDEV_POP(cdouble),5) -FROM parquet_types +FROM parquet_types_n1 GROUP BY ctinyint ORDER BY ctinyint POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types +POSTHOOK: Input: default@parquet_types_n1 #### A masked pattern was here #### -128 1256 0 2 38.4 128.0 -127 1257 1 2 38.7 128.0 @@ -808,9 +808,9 @@ POSTHOOK: Input: default@parquet_types 125 1253 253 1 75.9 0.0 126 1254 254 1 76.2 0.0 127 1255 255 1 76.5 0.0 -PREHOOK: query: EXPLAIN SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat +PREHOOK: query: EXPLAIN SELECT cfloat, count(*) FROM parquet_types_n1 GROUP BY cfloat ORDER BY cfloat PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat +POSTHOOK: query: EXPLAIN SELECT cfloat, count(*) FROM parquet_types_n1 GROUP BY cfloat ORDER BY cfloat POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -822,7 +822,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: parquet_types + alias: parquet_types_n1 Statistics: Num rows: 300 Data size: 4200 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cfloat (type: float) @@ -884,13 +884,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat +PREHOOK: query: SELECT cfloat, count(*) FROM parquet_types_n1 GROUP BY cfloat ORDER BY cfloat PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types +PREHOOK: Input: default@parquet_types_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat +POSTHOOK: query: SELECT cfloat, count(*) FROM parquet_types_n1 GROUP BY cfloat ORDER BY cfloat POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types +POSTHOOK: Input: default@parquet_types_n1 #### A masked pattern was here #### 0.0 1 0.3 1 @@ -1192,9 +1192,9 @@ POSTHOOK: Input: default@parquet_types 89.1 1 89.4 1 89.7 1 -PREHOOK: query: EXPLAIN SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar +PREHOOK: query: EXPLAIN SELECT cchar, count(*) FROM parquet_types_n1 GROUP BY cchar ORDER BY cchar PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar +POSTHOOK: query: EXPLAIN SELECT cchar, count(*) FROM parquet_types_n1 GROUP BY cchar ORDER BY cchar POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1206,7 +1206,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: parquet_types + alias: parquet_types_n1 Statistics: Num rows: 300 Data size: 4200 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cchar (type: char(5)) @@ -1268,13 +1268,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar +PREHOOK: query: SELECT cchar, count(*) FROM parquet_types_n1 GROUP BY cchar ORDER BY cchar PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types +PREHOOK: Input: default@parquet_types_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar +POSTHOOK: query: SELECT cchar, count(*) FROM parquet_types_n1 GROUP BY cchar ORDER BY cchar POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types +POSTHOOK: Input: default@parquet_types_n1 #### A masked pattern was here #### 1 b 1 @@ -1576,9 +1576,9 @@ w 1 x 1 y 1 z 1 -PREHOOK: query: EXPLAIN SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY cvarchar +PREHOOK: query: EXPLAIN SELECT cvarchar, count(*) FROM parquet_types_n1 GROUP BY cvarchar ORDER BY cvarchar PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY cvarchar +POSTHOOK: query: EXPLAIN SELECT cvarchar, count(*) FROM parquet_types_n1 GROUP BY cvarchar ORDER BY cvarchar POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1590,7 +1590,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: parquet_types + alias: parquet_types_n1 Statistics: Num rows: 300 Data size: 4200 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cvarchar (type: varchar(10)) @@ -1652,13 +1652,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY cvarchar +PREHOOK: query: SELECT cvarchar, count(*) FROM parquet_types_n1 GROUP BY cvarchar ORDER BY cvarchar PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types +PREHOOK: Input: default@parquet_types_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY cvarchar +POSTHOOK: query: SELECT cvarchar, count(*) FROM parquet_types_n1 GROUP BY cvarchar ORDER BY cvarchar POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types +POSTHOOK: Input: default@parquet_types_n1 #### A masked pattern was here #### 1 b 1 @@ -1960,9 +1960,9 @@ w 1 x 1 y 1 z 1 -PREHOOK: query: EXPLAIN SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY cstring1 +PREHOOK: query: EXPLAIN SELECT cstring1, count(*) FROM parquet_types_n1 GROUP BY cstring1 ORDER BY cstring1 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY cstring1 +POSTHOOK: query: EXPLAIN SELECT cstring1, count(*) FROM parquet_types_n1 GROUP BY cstring1 ORDER BY cstring1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1974,7 +1974,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: parquet_types + alias: parquet_types_n1 Statistics: Num rows: 300 Data size: 4200 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cstring1 (type: string) @@ -2036,13 +2036,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY cstring1 +PREHOOK: query: SELECT cstring1, count(*) FROM parquet_types_n1 GROUP BY cstring1 ORDER BY cstring1 PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types +PREHOOK: Input: default@parquet_types_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY cstring1 +POSTHOOK: query: SELECT cstring1, count(*) FROM parquet_types_n1 GROUP BY cstring1 ORDER BY cstring1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types +POSTHOOK: Input: default@parquet_types_n1 #### A masked pattern was here #### 1 b 1 @@ -2344,9 +2344,9 @@ w 1 x 1 y 1 z 1 -PREHOOK: query: EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary +PREHOOK: query: EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types_n1 GROUP BY cbinary PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary +POSTHOOK: query: EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types_n1 GROUP BY cbinary POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2357,7 +2357,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: parquet_types + alias: parquet_types_n1 Statistics: Num rows: 300 Data size: 4200 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cbinary (type: binary) @@ -2401,13 +2401,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary +PREHOOK: query: SELECT hex(cbinary), count(*) FROM parquet_types_n1 GROUP BY cbinary PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types +PREHOOK: Input: default@parquet_types_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary +POSTHOOK: query: SELECT hex(cbinary), count(*) FROM parquet_types_n1 GROUP BY cbinary POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types +POSTHOOK: Input: default@parquet_types_n1 #### A masked pattern was here #### NULL 264 1 diff --git a/ql/src/test/results/clientpositive/parquet_types_vectorization.q.out b/ql/src/test/results/clientpositive/parquet_types_vectorization.q.out index 43321ab915..58de65c50a 100644 --- a/ql/src/test/results/clientpositive/parquet_types_vectorization.q.out +++ b/ql/src/test/results/clientpositive/parquet_types_vectorization.q.out @@ -1,12 +1,12 @@ -PREHOOK: query: DROP TABLE parquet_types_staging +PREHOOK: query: DROP TABLE parquet_types_staging_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE parquet_types_staging +POSTHOOK: query: DROP TABLE parquet_types_staging_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE parquet_types +PREHOOK: query: DROP TABLE parquet_types_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE parquet_types +POSTHOOK: query: DROP TABLE parquet_types_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE parquet_types_staging ( +PREHOOK: query: CREATE TABLE parquet_types_staging_n1 ( cint int, ctinyint tinyint, csmallint smallint, @@ -27,8 +27,8 @@ COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@parquet_types_staging -POSTHOOK: query: CREATE TABLE parquet_types_staging ( +PREHOOK: Output: default@parquet_types_staging_n1 +POSTHOOK: query: CREATE TABLE parquet_types_staging_n1 ( cint int, ctinyint tinyint, csmallint smallint, @@ -49,8 +49,8 @@ COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@parquet_types_staging -PREHOOK: query: CREATE TABLE parquet_types ( +POSTHOOK: Output: default@parquet_types_staging_n1 +PREHOOK: query: CREATE TABLE parquet_types_n0 ( cint int, ctinyint tinyint, csmallint smallint, @@ -68,8 +68,8 @@ PREHOOK: query: CREATE TABLE parquet_types ( ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@parquet_types -POSTHOOK: query: CREATE TABLE parquet_types ( +PREHOOK: Output: default@parquet_types_n0 +POSTHOOK: query: CREATE TABLE parquet_types_n0 ( cint int, ctinyint tinyint, csmallint smallint, @@ -87,22 +87,22 @@ POSTHOOK: query: CREATE TABLE parquet_types ( ) STORED AS PARQUET POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@parquet_types -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging +POSTHOOK: Output: default@parquet_types_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@parquet_types_staging -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging +PREHOOK: Output: default@parquet_types_staging_n1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@parquet_types_staging -PREHOOK: query: SELECT * FROM parquet_types_staging +POSTHOOK: Output: default@parquet_types_staging_n1 +PREHOOK: query: SELECT * FROM parquet_types_staging_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types_staging +PREHOOK: Input: default@parquet_types_staging_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM parquet_types_staging +POSTHOOK: query: SELECT * FROM parquet_types_staging_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types_staging +POSTHOOK: Input: default@parquet_types_staging_n1 #### A masked pattern was here #### 100 1 1 1.0 0.0 abc 2011-01-01 01:01:01.111111111 a a B4F3CAFDBEDD {"k1":"v1"} [101,200] {"c1":10,"c2":"a"} 2011-01-01 101 2 2 1.1 0.3 def 2012-02-02 02:02:02.222222222 ab ab 68692CCAC0BDE7 {"k2":"v2"} [102,200] {"c1":10,"c2":"d"} 2012-02-02 @@ -126,39 +126,39 @@ POSTHOOK: Input: default@parquet_types_staging 119 2 5 1.4 5.7 fgh 2030-08-08 20:20:20.202020202 vwxyz abcdede 68692CCAC0BDE7 {"k20":"v20"} [120,200] {"c1":10,"c2":"f"} 2030-08-08 120 3 1 1.0 6.0 ijk 2031-09-09 21:21:21.212121212 wxyza abcde B4F3CAFDBEDD {"k21":"v21"} [121,200] {"c1":10,"c2":"i"} 2031-09-09 121 1 2 1.1 6.3 lmn 2032-10-10 22:22:22.222222222 bcdef abcde {"k22":"v22"} [122,200] {"c1":10,"c2":"l"} 2032-10-10 -PREHOOK: query: INSERT OVERWRITE TABLE parquet_types +PREHOOK: query: INSERT OVERWRITE TABLE parquet_types_n0 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, -unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging +unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types_staging -PREHOOK: Output: default@parquet_types -POSTHOOK: query: INSERT OVERWRITE TABLE parquet_types +PREHOOK: Input: default@parquet_types_staging_n1 +PREHOOK: Output: default@parquet_types_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE parquet_types_n0 SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar, -unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging +unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types_staging -POSTHOOK: Output: default@parquet_types -POSTHOOK: Lineage: parquet_types.cbinary EXPRESSION [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cbinary, type:string, comment:null), ] -POSTHOOK: Lineage: parquet_types.cchar SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cchar, type:char(5), comment:null), ] -POSTHOOK: Lineage: parquet_types.cdouble SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: parquet_types.cfloat SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: parquet_types.cint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: parquet_types.csmallint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: parquet_types.cstring1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: parquet_types.ctinyint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -POSTHOOK: Lineage: parquet_types.cvarchar SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cvarchar, type:varchar(10), comment:null), ] -POSTHOOK: Lineage: parquet_types.d SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:d, type:date, comment:null), ] -POSTHOOK: Lineage: parquet_types.l1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:l1, type:array, comment:null), ] -POSTHOOK: Lineage: parquet_types.m1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:m1, type:map, comment:null), ] -POSTHOOK: Lineage: parquet_types.st1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:st1, type:struct, comment:null), ] -POSTHOOK: Lineage: parquet_types.t SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:t, type:timestamp, comment:null), ] +POSTHOOK: Input: default@parquet_types_staging_n1 +POSTHOOK: Output: default@parquet_types_n0 +POSTHOOK: Lineage: parquet_types_n0.cbinary EXPRESSION [(parquet_types_staging_n1)parquet_types_staging_n1.FieldSchema(name:cbinary, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_types_n0.cchar SIMPLE [(parquet_types_staging_n1)parquet_types_staging_n1.FieldSchema(name:cchar, type:char(5), comment:null), ] +POSTHOOK: Lineage: parquet_types_n0.cdouble SIMPLE [(parquet_types_staging_n1)parquet_types_staging_n1.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: parquet_types_n0.cfloat SIMPLE [(parquet_types_staging_n1)parquet_types_staging_n1.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: parquet_types_n0.cint SIMPLE [(parquet_types_staging_n1)parquet_types_staging_n1.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_types_n0.csmallint SIMPLE [(parquet_types_staging_n1)parquet_types_staging_n1.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: parquet_types_n0.cstring1 SIMPLE [(parquet_types_staging_n1)parquet_types_staging_n1.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_types_n0.ctinyint SIMPLE [(parquet_types_staging_n1)parquet_types_staging_n1.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +POSTHOOK: Lineage: parquet_types_n0.cvarchar SIMPLE [(parquet_types_staging_n1)parquet_types_staging_n1.FieldSchema(name:cvarchar, type:varchar(10), comment:null), ] +POSTHOOK: Lineage: parquet_types_n0.d SIMPLE [(parquet_types_staging_n1)parquet_types_staging_n1.FieldSchema(name:d, type:date, comment:null), ] +POSTHOOK: Lineage: parquet_types_n0.l1 SIMPLE [(parquet_types_staging_n1)parquet_types_staging_n1.FieldSchema(name:l1, type:array, comment:null), ] +POSTHOOK: Lineage: parquet_types_n0.m1 SIMPLE [(parquet_types_staging_n1)parquet_types_staging_n1.FieldSchema(name:m1, type:map, comment:null), ] +POSTHOOK: Lineage: parquet_types_n0.st1 SIMPLE [(parquet_types_staging_n1)parquet_types_staging_n1.FieldSchema(name:st1, type:struct, comment:null), ] +POSTHOOK: Lineage: parquet_types_n0.t SIMPLE [(parquet_types_staging_n1)parquet_types_staging_n1.FieldSchema(name:t, type:timestamp, comment:null), ] PREHOOK: query: EXPLAIN SELECT ctinyint, MAX(cint), MIN(csmallint), COUNT(cstring1), ROUND(AVG(cfloat), 5), ROUND(STDDEV_POP(cdouble),5) -FROM parquet_types +FROM parquet_types_n0 GROUP BY ctinyint ORDER BY ctinyint PREHOOK: type: QUERY @@ -168,7 +168,7 @@ POSTHOOK: query: EXPLAIN SELECT ctinyint, COUNT(cstring1), ROUND(AVG(cfloat), 5), ROUND(STDDEV_POP(cdouble),5) -FROM parquet_types +FROM parquet_types_n0 GROUP BY ctinyint ORDER BY ctinyint POSTHOOK: type: QUERY @@ -182,7 +182,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: parquet_types + alias: parquet_types_n0 Statistics: Num rows: 22 Data size: 308 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cint (type: int), csmallint (type: smallint), cstring1 (type: string), cfloat (type: float), cdouble (type: double), (cdouble * cdouble) (type: double) @@ -254,11 +254,11 @@ PREHOOK: query: SELECT ctinyint, COUNT(cstring1), ROUND(AVG(cfloat), 5), ROUND(STDDEV_POP(cdouble),5) -FROM parquet_types +FROM parquet_types_n0 GROUP BY ctinyint ORDER BY ctinyint PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types +PREHOOK: Input: default@parquet_types_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT ctinyint, MAX(cint), @@ -266,18 +266,18 @@ POSTHOOK: query: SELECT ctinyint, COUNT(cstring1), ROUND(AVG(cfloat), 5), ROUND(STDDEV_POP(cdouble),5) -FROM parquet_types +FROM parquet_types_n0 GROUP BY ctinyint ORDER BY ctinyint POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types +POSTHOOK: Input: default@parquet_types_n0 #### A masked pattern was here #### 1 121 1 8 1.175 2.06216 2 119 1 7 1.21429 1.8 3 120 1 7 1.17143 1.8 -PREHOOK: query: EXPLAIN SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat +PREHOOK: query: EXPLAIN SELECT cfloat, count(*) FROM parquet_types_n0 GROUP BY cfloat ORDER BY cfloat PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat +POSTHOOK: query: EXPLAIN SELECT cfloat, count(*) FROM parquet_types_n0 GROUP BY cfloat ORDER BY cfloat POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -289,7 +289,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: parquet_types + alias: parquet_types_n0 Statistics: Num rows: 22 Data size: 308 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cfloat (type: float) @@ -351,22 +351,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat +PREHOOK: query: SELECT cfloat, count(*) FROM parquet_types_n0 GROUP BY cfloat ORDER BY cfloat PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types +PREHOOK: Input: default@parquet_types_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat +POSTHOOK: query: SELECT cfloat, count(*) FROM parquet_types_n0 GROUP BY cfloat ORDER BY cfloat POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types +POSTHOOK: Input: default@parquet_types_n0 #### A masked pattern was here #### 1.0 5 1.1 5 1.2 4 1.3 4 1.4 4 -PREHOOK: query: EXPLAIN SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar +PREHOOK: query: EXPLAIN SELECT cchar, count(*) FROM parquet_types_n0 GROUP BY cchar ORDER BY cchar PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar +POSTHOOK: query: EXPLAIN SELECT cchar, count(*) FROM parquet_types_n0 GROUP BY cchar ORDER BY cchar POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -378,7 +378,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: parquet_types + alias: parquet_types_n0 Statistics: Num rows: 22 Data size: 308 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cchar (type: char(5)) @@ -440,13 +440,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar +PREHOOK: query: SELECT cchar, count(*) FROM parquet_types_n0 GROUP BY cchar ORDER BY cchar PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types +PREHOOK: Input: default@parquet_types_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar +POSTHOOK: query: SELECT cchar, count(*) FROM parquet_types_n0 GROUP BY cchar ORDER BY cchar POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types +POSTHOOK: Input: default@parquet_types_n0 #### A masked pattern was here #### a 1 ab 1 @@ -466,9 +466,9 @@ tuvwx 1 uvwzy 1 vwxyz 1 wxyza 1 -PREHOOK: query: EXPLAIN SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY cvarchar +PREHOOK: query: EXPLAIN SELECT cvarchar, count(*) FROM parquet_types_n0 GROUP BY cvarchar ORDER BY cvarchar PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY cvarchar +POSTHOOK: query: EXPLAIN SELECT cvarchar, count(*) FROM parquet_types_n0 GROUP BY cvarchar ORDER BY cvarchar POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -480,7 +480,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: parquet_types + alias: parquet_types_n0 Statistics: Num rows: 22 Data size: 308 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cvarchar (type: varchar(10)) @@ -542,13 +542,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY cvarchar +PREHOOK: query: SELECT cvarchar, count(*) FROM parquet_types_n0 GROUP BY cvarchar ORDER BY cvarchar PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types +PREHOOK: Input: default@parquet_types_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY cvarchar +POSTHOOK: query: SELECT cvarchar, count(*) FROM parquet_types_n0 GROUP BY cvarchar ORDER BY cvarchar POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types +POSTHOOK: Input: default@parquet_types_n0 #### A masked pattern was here #### B4F3CAFDBE 1 a 1 @@ -564,9 +564,9 @@ abcdef 1 abcdefg 1 abcdefgh 1 b 1 -PREHOOK: query: EXPLAIN SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY cstring1 +PREHOOK: query: EXPLAIN SELECT cstring1, count(*) FROM parquet_types_n0 GROUP BY cstring1 ORDER BY cstring1 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY cstring1 +POSTHOOK: query: EXPLAIN SELECT cstring1, count(*) FROM parquet_types_n0 GROUP BY cstring1 ORDER BY cstring1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -578,7 +578,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: parquet_types + alias: parquet_types_n0 Statistics: Num rows: 22 Data size: 308 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cstring1 (type: string) @@ -640,13 +640,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY cstring1 +PREHOOK: query: SELECT cstring1, count(*) FROM parquet_types_n0 GROUP BY cstring1 ORDER BY cstring1 PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types +PREHOOK: Input: default@parquet_types_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY cstring1 +POSTHOOK: query: SELECT cstring1, count(*) FROM parquet_types_n0 GROUP BY cstring1 ORDER BY cstring1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types +POSTHOOK: Input: default@parquet_types_n0 #### A masked pattern was here #### abc 1 bcd 1 @@ -669,9 +669,9 @@ vwx 1 wxy 1 yza 1 zab 1 -PREHOOK: query: EXPLAIN SELECT t, count(*) FROM parquet_types GROUP BY t ORDER BY t +PREHOOK: query: EXPLAIN SELECT t, count(*) FROM parquet_types_n0 GROUP BY t ORDER BY t PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT t, count(*) FROM parquet_types GROUP BY t ORDER BY t +POSTHOOK: query: EXPLAIN SELECT t, count(*) FROM parquet_types_n0 GROUP BY t ORDER BY t POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -683,7 +683,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: parquet_types + alias: parquet_types_n0 Statistics: Num rows: 22 Data size: 308 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: t (type: timestamp) @@ -745,13 +745,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT t, count(*) FROM parquet_types GROUP BY t ORDER BY t +PREHOOK: query: SELECT t, count(*) FROM parquet_types_n0 GROUP BY t ORDER BY t PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types +PREHOOK: Input: default@parquet_types_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT t, count(*) FROM parquet_types GROUP BY t ORDER BY t +POSTHOOK: query: SELECT t, count(*) FROM parquet_types_n0 GROUP BY t ORDER BY t POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types +POSTHOOK: Input: default@parquet_types_n0 #### A masked pattern was here #### 2011-01-01 01:01:01.111111111 1 2012-02-02 02:02:02.222222222 1 @@ -775,9 +775,9 @@ POSTHOOK: Input: default@parquet_types 2030-08-08 20:20:20.202020202 1 2031-09-09 21:21:21.212121212 1 2032-10-10 22:22:22.222222222 1 -PREHOOK: query: EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary +PREHOOK: query: EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types_n0 GROUP BY cbinary PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary +POSTHOOK: query: EXPLAIN SELECT hex(cbinary), count(*) FROM parquet_types_n0 GROUP BY cbinary POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -788,7 +788,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: parquet_types + alias: parquet_types_n0 Statistics: Num rows: 22 Data size: 308 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cbinary (type: binary) @@ -832,13 +832,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary +PREHOOK: query: SELECT hex(cbinary), count(*) FROM parquet_types_n0 GROUP BY cbinary PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_types +PREHOOK: Input: default@parquet_types_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary +POSTHOOK: query: SELECT hex(cbinary), count(*) FROM parquet_types_n0 GROUP BY cbinary POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_types +POSTHOOK: Input: default@parquet_types_n0 #### A masked pattern was here #### 1 68656C6C6F 1 diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_part_project.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_part_project.q.out index 3d9307bcd4..0786685a3b 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_part_project.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_part_project.q.out @@ -1,54 +1,54 @@ -PREHOOK: query: CREATE TABLE alltypesparquet_part(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS PARQUET +PREHOOK: query: CREATE TABLE alltypesparquet_part_n0(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@alltypesparquet_part -POSTHOOK: query: CREATE TABLE alltypesparquet_part(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS PARQUET +PREHOOK: Output: default@alltypesparquet_part_n0 +POSTHOOK: query: CREATE TABLE alltypesparquet_part_n0(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS PARQUET POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@alltypesparquet_part -PREHOOK: query: insert overwrite table alltypesparquet_part partition (ds='2011') select * from alltypesparquet order by ctinyint, cint, cbigint limit 100 +POSTHOOK: Output: default@alltypesparquet_part_n0 +PREHOOK: query: insert overwrite table alltypesparquet_part_n0 partition (ds='2011') select * from alltypesparquet order by ctinyint, cint, cbigint limit 100 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesparquet -PREHOOK: Output: default@alltypesparquet_part@ds=2011 -POSTHOOK: query: insert overwrite table alltypesparquet_part partition (ds='2011') select * from alltypesparquet order by ctinyint, cint, cbigint limit 100 +PREHOOK: Output: default@alltypesparquet_part_n0@ds=2011 +POSTHOOK: query: insert overwrite table alltypesparquet_part_n0 partition (ds='2011') select * from alltypesparquet order by ctinyint, cint, cbigint limit 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesparquet -POSTHOOK: Output: default@alltypesparquet_part@ds=2011 -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2011).cbigint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2011).cboolean1 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2011).cboolean2 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2011).cdouble SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2011).cfloat SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2011).cint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2011).csmallint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2011).cstring1 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2011).cstring2 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2011).ctimestamp1 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2011).ctimestamp2 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2011).ctinyint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: insert overwrite table alltypesparquet_part partition (ds='2012') select * from alltypesparquet order by ctinyint, cint, cbigint limit 100 +POSTHOOK: Output: default@alltypesparquet_part_n0@ds=2011 +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2011).cbigint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2011).cboolean1 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2011).cboolean2 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2011).cdouble SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2011).cfloat SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2011).cint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2011).csmallint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2011).cstring1 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2011).cstring2 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2011).ctimestamp1 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2011).ctimestamp2 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2011).ctinyint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: insert overwrite table alltypesparquet_part_n0 partition (ds='2012') select * from alltypesparquet order by ctinyint, cint, cbigint limit 100 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesparquet -PREHOOK: Output: default@alltypesparquet_part@ds=2012 -POSTHOOK: query: insert overwrite table alltypesparquet_part partition (ds='2012') select * from alltypesparquet order by ctinyint, cint, cbigint limit 100 +PREHOOK: Output: default@alltypesparquet_part_n0@ds=2012 +POSTHOOK: query: insert overwrite table alltypesparquet_part_n0 partition (ds='2012') select * from alltypesparquet order by ctinyint, cint, cbigint limit 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesparquet -POSTHOOK: Output: default@alltypesparquet_part@ds=2012 -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2012).cbigint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2012).cboolean1 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2012).cboolean2 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2012).cdouble SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2012).cfloat SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2012).cint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2012).csmallint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2012).cstring1 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2012).cstring2 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2012).ctimestamp1 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2012).ctimestamp2 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: alltypesparquet_part PARTITION(ds=2012).ctinyint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: explain vectorization select (cdouble+2) c1 from alltypesparquet_part order by c1 limit 10 +POSTHOOK: Output: default@alltypesparquet_part_n0@ds=2012 +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2012).cbigint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2012).cboolean1 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2012).cboolean2 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2012).cdouble SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2012).cfloat SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2012).cint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2012).csmallint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2012).cstring1 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2012).cstring2 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2012).ctimestamp1 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2012).ctimestamp2 SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: alltypesparquet_part_n0 PARTITION(ds=2012).ctinyint SIMPLE [(alltypesparquet)alltypesparquet.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: explain vectorization select (cdouble+2) c1 from alltypesparquet_part_n0 order by c1 limit 10 PREHOOK: type: QUERY -POSTHOOK: query: explain vectorization select (cdouble+2) c1 from alltypesparquet_part order by c1 limit 10 +POSTHOOK: query: explain vectorization select (cdouble+2) c1 from alltypesparquet_part_n0 order by c1 limit 10 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -63,7 +63,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: alltypesparquet_part + alias: alltypesparquet_part_n0 Statistics: Num rows: 200 Data size: 2400 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (cdouble + 2.0D) (type: double) @@ -110,17 +110,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select (cdouble+2) c1 from alltypesparquet_part order by c1 limit 10 +PREHOOK: query: select (cdouble+2) c1 from alltypesparquet_part_n0 order by c1 limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@alltypesparquet_part -PREHOOK: Input: default@alltypesparquet_part@ds=2011 -PREHOOK: Input: default@alltypesparquet_part@ds=2012 +PREHOOK: Input: default@alltypesparquet_part_n0 +PREHOOK: Input: default@alltypesparquet_part_n0@ds=2011 +PREHOOK: Input: default@alltypesparquet_part_n0@ds=2012 #### A masked pattern was here #### -POSTHOOK: query: select (cdouble+2) c1 from alltypesparquet_part order by c1 limit 10 +POSTHOOK: query: select (cdouble+2) c1 from alltypesparquet_part_n0 order by c1 limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alltypesparquet_part -POSTHOOK: Input: default@alltypesparquet_part@ds=2011 -POSTHOOK: Input: default@alltypesparquet_part@ds=2012 +POSTHOOK: Input: default@alltypesparquet_part_n0 +POSTHOOK: Input: default@alltypesparquet_part_n0@ds=2011 +POSTHOOK: Input: default@alltypesparquet_part_n0@ds=2012 #### A masked pattern was here #### NULL NULL diff --git a/ql/src/test/results/clientpositive/partInit.q.out b/ql/src/test/results/clientpositive/partInit.q.out index ab3830b564..46b3658dc9 100644 --- a/ql/src/test/results/clientpositive/partInit.q.out +++ b/ql/src/test/results/clientpositive/partInit.q.out @@ -1,55 +1,55 @@ -PREHOOK: query: CREATE TABLE empty (c INT) PARTITIONED BY (p INT) +PREHOOK: query: CREATE TABLE empty_n1 (c INT) PARTITIONED BY (p INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@empty -POSTHOOK: query: CREATE TABLE empty (c INT) PARTITIONED BY (p INT) +PREHOOK: Output: default@empty_n1 +POSTHOOK: query: CREATE TABLE empty_n1 (c INT) PARTITIONED BY (p INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@empty -PREHOOK: query: SELECT MAX(c) FROM empty +POSTHOOK: Output: default@empty_n1 +PREHOOK: query: SELECT MAX(c) FROM empty_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@empty +PREHOOK: Input: default@empty_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT MAX(c) FROM empty +POSTHOOK: query: SELECT MAX(c) FROM empty_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@empty +POSTHOOK: Input: default@empty_n1 #### A masked pattern was here #### NULL -PREHOOK: query: SELECT MAX(p) FROM empty +PREHOOK: query: SELECT MAX(p) FROM empty_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@empty +PREHOOK: Input: default@empty_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT MAX(p) FROM empty +POSTHOOK: query: SELECT MAX(p) FROM empty_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@empty +POSTHOOK: Input: default@empty_n1 #### A masked pattern was here #### NULL -PREHOOK: query: ALTER TABLE empty ADD PARTITION (p=1) +PREHOOK: query: ALTER TABLE empty_n1 ADD PARTITION (p=1) PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@empty -POSTHOOK: query: ALTER TABLE empty ADD PARTITION (p=1) +PREHOOK: Output: default@empty_n1 +POSTHOOK: query: ALTER TABLE empty_n1 ADD PARTITION (p=1) POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@empty -POSTHOOK: Output: default@empty@p=1 -PREHOOK: query: SELECT MAX(p) FROM empty +POSTHOOK: Output: default@empty_n1 +POSTHOOK: Output: default@empty_n1@p=1 +PREHOOK: query: SELECT MAX(p) FROM empty_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@empty -PREHOOK: Input: default@empty@p=1 +PREHOOK: Input: default@empty_n1 +PREHOOK: Input: default@empty_n1@p=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT MAX(p) FROM empty +POSTHOOK: query: SELECT MAX(p) FROM empty_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@empty -POSTHOOK: Input: default@empty@p=1 +POSTHOOK: Input: default@empty_n1 +POSTHOOK: Input: default@empty_n1@p=1 #### A masked pattern was here #### NULL -PREHOOK: query: SELECT MAX(p) FROM empty +PREHOOK: query: SELECT MAX(p) FROM empty_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@empty -PREHOOK: Input: default@empty@p=1 +PREHOOK: Input: default@empty_n1 +PREHOOK: Input: default@empty_n1@p=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT MAX(p) FROM empty +POSTHOOK: query: SELECT MAX(p) FROM empty_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@empty -POSTHOOK: Input: default@empty@p=1 +POSTHOOK: Input: default@empty_n1 +POSTHOOK: Input: default@empty_n1@p=1 #### A masked pattern was here #### NULL diff --git a/ql/src/test/results/clientpositive/part_inherit_tbl_props.q.out b/ql/src/test/results/clientpositive/part_inherit_tbl_props.q.out index ff55f279c4..1bff5a6f28 100644 --- a/ql/src/test/results/clientpositive/part_inherit_tbl_props.q.out +++ b/ql/src/test/results/clientpositive/part_inherit_tbl_props.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') +PREHOOK: query: create table mytbl_n0 (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@mytbl -POSTHOOK: query: create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') +PREHOOK: Output: default@mytbl_n0 +POSTHOOK: query: create table mytbl_n0 (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@mytbl -PREHOOK: query: alter table mytbl add partition (c2 = 'v1') +POSTHOOK: Output: default@mytbl_n0 +PREHOOK: query: alter table mytbl_n0 add partition (c2 = 'v1') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@mytbl -POSTHOOK: query: alter table mytbl add partition (c2 = 'v1') +PREHOOK: Output: default@mytbl_n0 +POSTHOOK: query: alter table mytbl_n0 add partition (c2 = 'v1') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@mytbl -POSTHOOK: Output: default@mytbl@c2=v1 -PREHOOK: query: describe formatted mytbl partition (c2='v1') +POSTHOOK: Output: default@mytbl_n0 +POSTHOOK: Output: default@mytbl_n0@c2=v1 +PREHOOK: query: describe formatted mytbl_n0 partition (c2='v1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@mytbl -POSTHOOK: query: describe formatted mytbl partition (c2='v1') +PREHOOK: Input: default@mytbl_n0 +POSTHOOK: query: describe formatted mytbl_n0 partition (c2='v1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@mytbl +POSTHOOK: Input: default@mytbl_n0 # col_name data_type comment c1 tinyint @@ -29,7 +29,7 @@ c2 string # Detailed Partition Information Partition Value: [v1] Database: default -Table: mytbl +Table: mytbl_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"c1\":\"true\"}} diff --git a/ql/src/test/results/clientpositive/part_inherit_tbl_props_empty.q.out b/ql/src/test/results/clientpositive/part_inherit_tbl_props_empty.q.out index fae954acf0..20145f61bb 100644 --- a/ql/src/test/results/clientpositive/part_inherit_tbl_props_empty.q.out +++ b/ql/src/test/results/clientpositive/part_inherit_tbl_props_empty.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') +PREHOOK: query: create table mytbl_n2 (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@mytbl -POSTHOOK: query: create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') +PREHOOK: Output: default@mytbl_n2 +POSTHOOK: query: create table mytbl_n2 (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@mytbl -PREHOOK: query: alter table mytbl add partition (c2 = 'v1') +POSTHOOK: Output: default@mytbl_n2 +PREHOOK: query: alter table mytbl_n2 add partition (c2 = 'v1') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@mytbl -POSTHOOK: query: alter table mytbl add partition (c2 = 'v1') +PREHOOK: Output: default@mytbl_n2 +POSTHOOK: query: alter table mytbl_n2 add partition (c2 = 'v1') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@mytbl -POSTHOOK: Output: default@mytbl@c2=v1 -PREHOOK: query: describe formatted mytbl partition (c2='v1') +POSTHOOK: Output: default@mytbl_n2 +POSTHOOK: Output: default@mytbl_n2@c2=v1 +PREHOOK: query: describe formatted mytbl_n2 partition (c2='v1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@mytbl -POSTHOOK: query: describe formatted mytbl partition (c2='v1') +PREHOOK: Input: default@mytbl_n2 +POSTHOOK: query: describe formatted mytbl_n2 partition (c2='v1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@mytbl +POSTHOOK: Input: default@mytbl_n2 # col_name data_type comment c1 tinyint @@ -29,7 +29,7 @@ c2 string # Detailed Partition Information Partition Value: [v1] Database: default -Table: mytbl +Table: mytbl_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"c1\":\"true\"}} diff --git a/ql/src/test/results/clientpositive/partcols1.q.out b/ql/src/test/results/clientpositive/partcols1.q.out index e92cfef35d..1dc533e789 100644 --- a/ql/src/test/results/clientpositive/partcols1.q.out +++ b/ql/src/test/results/clientpositive/partcols1.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: create table test1(col1 string) partitioned by (partitionId int) +PREHOOK: query: create table test1_n13(col1 string) partitioned by (partitionId int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test1 -POSTHOOK: query: create table test1(col1 string) partitioned by (partitionId int) +PREHOOK: Output: default@test1_n13 +POSTHOOK: query: create table test1_n13(col1 string) partitioned by (partitionId int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test1 -PREHOOK: query: insert overwrite table test1 partition (partitionId=1) +POSTHOOK: Output: default@test1_n13 +PREHOOK: query: insert overwrite table test1_n13 partition (partitionId=1) select key from src tablesample (10 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test1@partitionid=1 -POSTHOOK: query: insert overwrite table test1 partition (partitionId=1) +PREHOOK: Output: default@test1_n13@partitionid=1 +POSTHOOK: query: insert overwrite table test1_n13 partition (partitionId=1) select key from src tablesample (10 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test1@partitionid=1 -POSTHOOK: Lineage: test1 PARTITION(partitionid=1).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Output: default@test1_n13@partitionid=1 +POSTHOOK: Lineage: test1_n13 PARTITION(partitionid=1).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: FROM ( - FROM test1 + FROM test1_n13 SELECT partitionId, 111 as col2, 222 as col3, 333 as col4 WHERE partitionId = 1 DISTRIBUTE BY partitionId @@ -31,11 +31,11 @@ SELECT TRANSFORM( USING 'cat' as (a,b,c,d) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test1@partitionid=1 +PREHOOK: Input: default@test1_n13 +PREHOOK: Input: default@test1_n13@partitionid=1 #### A masked pattern was here #### POSTHOOK: query: FROM ( - FROM test1 + FROM test1_n13 SELECT partitionId, 111 as col2, 222 as col3, 333 as col4 WHERE partitionId = 1 DISTRIBUTE BY partitionId @@ -48,8 +48,8 @@ SELECT TRANSFORM( USING 'cat' as (a,b,c,d) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test1@partitionid=1 +POSTHOOK: Input: default@test1_n13 +POSTHOOK: Input: default@test1_n13@partitionid=1 #### A masked pattern was here #### 1 111 222 333 1 111 222 333 diff --git a/ql/src/test/results/clientpositive/partial_column_stats.q.out b/ql/src/test/results/clientpositive/partial_column_stats.q.out index 1e640438b4..f5264668e5 100644 --- a/ql/src/test/results/clientpositive/partial_column_stats.q.out +++ b/ql/src/test/results/clientpositive/partial_column_stats.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: create table t1 (key int, data struct, value string) +PREHOOK: query: create table t1_n17 (key int, data struct, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (key int, data struct, value string) +PREHOOK: Output: default@t1_n17 +POSTHOOK: query: create table t1_n17 (key int, data struct, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: explain analyze table t1 compute statistics for columns +POSTHOOK: Output: default@t1_n17 +PREHOOK: query: explain analyze table t1_n17 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -POSTHOOK: query: explain analyze table t1 compute statistics for columns +POSTHOOK: query: explain analyze table t1_n17 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -19,7 +19,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n17 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: int), value (type: string) @@ -54,24 +54,24 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.t1 + Table: default.t1_n17 -PREHOOK: query: analyze table t1 compute statistics for columns +PREHOOK: query: analyze table t1_n17 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 +PREHOOK: Input: default@t1_n17 +PREHOOK: Output: default@t1_n17 #### A masked pattern was here #### -POSTHOOK: query: analyze table t1 compute statistics for columns +POSTHOOK: query: analyze table t1_n17 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n17 +POSTHOOK: Output: default@t1_n17 #### A masked pattern was here #### -PREHOOK: query: desc formatted t1 value +PREHOOK: query: desc formatted t1_n17 value PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: desc formatted t1 value +PREHOOK: Input: default@t1_n17 +POSTHOOK: query: desc formatted t1_n17 value POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n17 col_name value data_type string min diff --git a/ql/src/test/results/clientpositive/partition_condition_remover.q.out b/ql/src/test/results/clientpositive/partition_condition_remover.q.out index 2f8f9983c1..4ab2f07d70 100644 --- a/ql/src/test/results/clientpositive/partition_condition_remover.q.out +++ b/ql/src/test/results/clientpositive/partition_condition_remover.q.out @@ -1,36 +1,36 @@ -PREHOOK: query: drop table foo +PREHOOK: query: drop table foo_n5 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table foo +POSTHOOK: query: drop table foo_n5 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table foo (i int) partitioned by (s string) +PREHOOK: query: create table foo_n5 (i int) partitioned by (s string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@foo -POSTHOOK: query: create table foo (i int) partitioned by (s string) +PREHOOK: Output: default@foo_n5 +POSTHOOK: query: create table foo_n5 (i int) partitioned by (s string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@foo -PREHOOK: query: insert overwrite table foo partition(s='foo') select cint from alltypesorc limit 10 +POSTHOOK: Output: default@foo_n5 +PREHOOK: query: insert overwrite table foo_n5 partition(s='foo_n5') select cint from alltypesorc limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc -PREHOOK: Output: default@foo@s=foo -POSTHOOK: query: insert overwrite table foo partition(s='foo') select cint from alltypesorc limit 10 +PREHOOK: Output: default@foo_n5@s=foo_n5 +POSTHOOK: query: insert overwrite table foo_n5 partition(s='foo_n5') select cint from alltypesorc limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: default@foo@s=foo -POSTHOOK: Lineage: foo PARTITION(s=foo).i SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -PREHOOK: query: insert overwrite table foo partition(s='bar') select cint from alltypesorc limit 10 +POSTHOOK: Output: default@foo_n5@s=foo_n5 +POSTHOOK: Lineage: foo_n5 PARTITION(s=foo_n5).i SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +PREHOOK: query: insert overwrite table foo_n5 partition(s='bar') select cint from alltypesorc limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc -PREHOOK: Output: default@foo@s=bar -POSTHOOK: query: insert overwrite table foo partition(s='bar') select cint from alltypesorc limit 10 +PREHOOK: Output: default@foo_n5@s=bar +POSTHOOK: query: insert overwrite table foo_n5 partition(s='bar') select cint from alltypesorc limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: default@foo@s=bar -POSTHOOK: Lineage: foo PARTITION(s=bar).i SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -PREHOOK: query: explain select * from foo where s not in ('bar') +POSTHOOK: Output: default@foo_n5@s=bar +POSTHOOK: Lineage: foo_n5 PARTITION(s=bar).i SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +PREHOOK: query: explain select * from foo_n5 where s not in ('bar') PREHOOK: type: QUERY -POSTHOOK: query: explain select * from foo where s not in ('bar') +POSTHOOK: query: explain select * from foo_n5 where s not in ('bar') POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -41,7 +41,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: foo + alias: foo_n5 Statistics: Num rows: 10 Data size: 90 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: i (type: int), s (type: string) @@ -49,31 +49,31 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 90 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select * from foo where s not in ('bar') +PREHOOK: query: select * from foo_n5 where s not in ('bar') PREHOOK: type: QUERY -PREHOOK: Input: default@foo -PREHOOK: Input: default@foo@s=foo +PREHOOK: Input: default@foo_n5 +PREHOOK: Input: default@foo_n5@s=foo_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from foo where s not in ('bar') +POSTHOOK: query: select * from foo_n5 where s not in ('bar') POSTHOOK: type: QUERY -POSTHOOK: Input: default@foo -POSTHOOK: Input: default@foo@s=foo +POSTHOOK: Input: default@foo_n5 +POSTHOOK: Input: default@foo_n5@s=foo_n5 #### A masked pattern was here #### -528534767 foo -528534767 foo -528534767 foo -528534767 foo -528534767 foo -528534767 foo -528534767 foo -528534767 foo -528534767 foo -528534767 foo -PREHOOK: query: drop table foo +528534767 foo_n5 +528534767 foo_n5 +528534767 foo_n5 +528534767 foo_n5 +528534767 foo_n5 +528534767 foo_n5 +528534767 foo_n5 +528534767 foo_n5 +528534767 foo_n5 +528534767 foo_n5 +PREHOOK: query: drop table foo_n5 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@foo -PREHOOK: Output: default@foo -POSTHOOK: query: drop table foo +PREHOOK: Input: default@foo_n5 +PREHOOK: Output: default@foo_n5 +POSTHOOK: query: drop table foo_n5 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@foo -POSTHOOK: Output: default@foo +POSTHOOK: Input: default@foo_n5 +POSTHOOK: Output: default@foo_n5 diff --git a/ql/src/test/results/clientpositive/partition_decode_name.q.out b/ql/src/test/results/clientpositive/partition_decode_name.q.out index ea60750075..224561aaf9 100644 --- a/ql/src/test/results/clientpositive/partition_decode_name.q.out +++ b/ql/src/test/results/clientpositive/partition_decode_name.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table sc as select * +PREHOOK: query: create table sc_n0 as select * from (select '2011-01-11', '2011-01-11+14:18:26' from src tablesample (1 rows) union all select '2011-01-11', '2011-01-11+15:18:26' from src tablesample (1 rows) @@ -7,8 +7,8 @@ from (select '2011-01-11', '2011-01-11+14:18:26' from src tablesample (1 rows) PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@sc -POSTHOOK: query: create table sc as select * +PREHOOK: Output: default@sc_n0 +POSTHOOK: query: create table sc_n0 as select * from (select '2011-01-11', '2011-01-11+14:18:26' from src tablesample (1 rows) union all select '2011-01-11', '2011-01-11+15:18:26' from src tablesample (1 rows) @@ -17,76 +17,76 @@ from (select '2011-01-11', '2011-01-11+14:18:26' from src tablesample (1 rows) POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@sc -POSTHOOK: Lineage: sc._c0 EXPRESSION [] -POSTHOOK: Lineage: sc._c1 EXPRESSION [] -PREHOOK: query: create table sc_part (key string) partitioned by (ts string) stored as rcfile +POSTHOOK: Output: default@sc_n0 +POSTHOOK: Lineage: sc_n0._c0 EXPRESSION [] +POSTHOOK: Lineage: sc_n0._c1 EXPRESSION [] +PREHOOK: query: create table sc_part_n0 (key string) partitioned by (ts string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@sc_part -POSTHOOK: query: create table sc_part (key string) partitioned by (ts string) stored as rcfile +PREHOOK: Output: default@sc_part_n0 +POSTHOOK: query: create table sc_part_n0 (key string) partitioned by (ts string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@sc_part -PREHOOK: query: insert overwrite table sc_part partition(ts) select * from sc +POSTHOOK: Output: default@sc_part_n0 +PREHOOK: query: insert overwrite table sc_part_n0 partition(ts) select * from sc_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@sc -PREHOOK: Output: default@sc_part -POSTHOOK: query: insert overwrite table sc_part partition(ts) select * from sc +PREHOOK: Input: default@sc_n0 +PREHOOK: Output: default@sc_part_n0 +POSTHOOK: query: insert overwrite table sc_part_n0 partition(ts) select * from sc_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@sc -POSTHOOK: Output: default@sc_part@ts=2011-01-11+14%3A18%3A26 -POSTHOOK: Output: default@sc_part@ts=2011-01-11+15%3A18%3A26 -POSTHOOK: Output: default@sc_part@ts=2011-01-11+16%3A18%3A26 -POSTHOOK: Lineage: sc_part PARTITION(ts=2011-01-11+14:18:26).key SIMPLE [(sc)sc.FieldSchema(name:_c0, type:string, comment:null), ] -POSTHOOK: Lineage: sc_part PARTITION(ts=2011-01-11+15:18:26).key SIMPLE [(sc)sc.FieldSchema(name:_c0, type:string, comment:null), ] -POSTHOOK: Lineage: sc_part PARTITION(ts=2011-01-11+16:18:26).key SIMPLE [(sc)sc.FieldSchema(name:_c0, type:string, comment:null), ] -PREHOOK: query: show partitions sc_part +POSTHOOK: Input: default@sc_n0 +POSTHOOK: Output: default@sc_part_n0@ts=2011-01-11+14%3A18%3A26 +POSTHOOK: Output: default@sc_part_n0@ts=2011-01-11+15%3A18%3A26 +POSTHOOK: Output: default@sc_part_n0@ts=2011-01-11+16%3A18%3A26 +POSTHOOK: Lineage: sc_part_n0 PARTITION(ts=2011-01-11+14:18:26).key SIMPLE [(sc_n0)sc_n0.FieldSchema(name:_c0, type:string, comment:null), ] +POSTHOOK: Lineage: sc_part_n0 PARTITION(ts=2011-01-11+15:18:26).key SIMPLE [(sc_n0)sc_n0.FieldSchema(name:_c0, type:string, comment:null), ] +POSTHOOK: Lineage: sc_part_n0 PARTITION(ts=2011-01-11+16:18:26).key SIMPLE [(sc_n0)sc_n0.FieldSchema(name:_c0, type:string, comment:null), ] +PREHOOK: query: show partitions sc_part_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@sc_part -POSTHOOK: query: show partitions sc_part +PREHOOK: Input: default@sc_part_n0 +POSTHOOK: query: show partitions sc_part_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@sc_part +POSTHOOK: Input: default@sc_part_n0 ts=2011-01-11+14%3A18%3A26 ts=2011-01-11+15%3A18%3A26 ts=2011-01-11+16%3A18%3A26 -PREHOOK: query: select count(*) from sc_part where ts is not null +PREHOOK: query: select count(*) from sc_part_n0 where ts is not null PREHOOK: type: QUERY -PREHOOK: Input: default@sc_part +PREHOOK: Input: default@sc_part_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from sc_part where ts is not null +POSTHOOK: query: select count(*) from sc_part_n0 where ts is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@sc_part +POSTHOOK: Input: default@sc_part_n0 #### A masked pattern was here #### 3 -PREHOOK: query: insert overwrite table sc_part partition(ts) select * from sc +PREHOOK: query: insert overwrite table sc_part_n0 partition(ts) select * from sc_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@sc -PREHOOK: Output: default@sc_part -POSTHOOK: query: insert overwrite table sc_part partition(ts) select * from sc +PREHOOK: Input: default@sc_n0 +PREHOOK: Output: default@sc_part_n0 +POSTHOOK: query: insert overwrite table sc_part_n0 partition(ts) select * from sc_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@sc -POSTHOOK: Output: default@sc_part@ts=2011-01-11+14%3A18%3A26 -POSTHOOK: Output: default@sc_part@ts=2011-01-11+15%3A18%3A26 -POSTHOOK: Output: default@sc_part@ts=2011-01-11+16%3A18%3A26 -POSTHOOK: Lineage: sc_part PARTITION(ts=2011-01-11+14:18:26).key SIMPLE [(sc)sc.FieldSchema(name:_c0, type:string, comment:null), ] -POSTHOOK: Lineage: sc_part PARTITION(ts=2011-01-11+15:18:26).key SIMPLE [(sc)sc.FieldSchema(name:_c0, type:string, comment:null), ] -POSTHOOK: Lineage: sc_part PARTITION(ts=2011-01-11+16:18:26).key SIMPLE [(sc)sc.FieldSchema(name:_c0, type:string, comment:null), ] -PREHOOK: query: show partitions sc_part +POSTHOOK: Input: default@sc_n0 +POSTHOOK: Output: default@sc_part_n0@ts=2011-01-11+14%3A18%3A26 +POSTHOOK: Output: default@sc_part_n0@ts=2011-01-11+15%3A18%3A26 +POSTHOOK: Output: default@sc_part_n0@ts=2011-01-11+16%3A18%3A26 +POSTHOOK: Lineage: sc_part_n0 PARTITION(ts=2011-01-11+14:18:26).key SIMPLE [(sc_n0)sc_n0.FieldSchema(name:_c0, type:string, comment:null), ] +POSTHOOK: Lineage: sc_part_n0 PARTITION(ts=2011-01-11+15:18:26).key SIMPLE [(sc_n0)sc_n0.FieldSchema(name:_c0, type:string, comment:null), ] +POSTHOOK: Lineage: sc_part_n0 PARTITION(ts=2011-01-11+16:18:26).key SIMPLE [(sc_n0)sc_n0.FieldSchema(name:_c0, type:string, comment:null), ] +PREHOOK: query: show partitions sc_part_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@sc_part -POSTHOOK: query: show partitions sc_part +PREHOOK: Input: default@sc_part_n0 +POSTHOOK: query: show partitions sc_part_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@sc_part +POSTHOOK: Input: default@sc_part_n0 ts=2011-01-11+14:18:26 ts=2011-01-11+15:18:26 ts=2011-01-11+16:18:26 -PREHOOK: query: select count(*) from sc_part where ts is not null +PREHOOK: query: select count(*) from sc_part_n0 where ts is not null PREHOOK: type: QUERY -PREHOOK: Input: default@sc_part +PREHOOK: Input: default@sc_part_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from sc_part where ts is not null +POSTHOOK: query: select count(*) from sc_part_n0 where ts is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@sc_part +POSTHOOK: Input: default@sc_part_n0 #### A masked pattern was here #### 3 diff --git a/ql/src/test/results/clientpositive/partition_type_check.q.out b/ql/src/test/results/clientpositive/partition_type_check.q.out index c28fb0c1b5..0e3e2ee1ab 100644 --- a/ql/src/test/results/clientpositive/partition_type_check.q.out +++ b/ql/src/test/results/clientpositive/partition_type_check.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day string) stored as textfile +PREHOOK: query: CREATE TABLE tab1_n3 (id1 int,id2 string) PARTITIONED BY(month string,day string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tab1 -POSTHOOK: query: CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day string) stored as textfile +PREHOOK: Output: default@tab1_n3 +POSTHOOK: query: CREATE TABLE tab1_n3 (id1 int,id2 string) PARTITIONED BY(month string,day string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day=2) +POSTHOOK: Output: default@tab1_n3 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1_n3 PARTITION(month='June', day=2) PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tab1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day=2) +PREHOOK: Output: default@tab1_n3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1_n3 PARTITION(month='June', day=2) POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tab1 -POSTHOOK: Output: default@tab1@month=June/day=2 -PREHOOK: query: select * from tab1 +POSTHOOK: Output: default@tab1_n3 +POSTHOOK: Output: default@tab1_n3@month=June/day=2 +PREHOOK: query: select * from tab1_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@tab1 -PREHOOK: Input: default@tab1@month=June/day=2 +PREHOOK: Input: default@tab1_n3 +PREHOOK: Input: default@tab1_n3@month=June/day=2 #### A masked pattern was here #### -POSTHOOK: query: select * from tab1 +POSTHOOK: query: select * from tab1_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab1 -POSTHOOK: Input: default@tab1@month=June/day=2 +POSTHOOK: Input: default@tab1_n3 +POSTHOOK: Input: default@tab1_n3@month=June/day=2 #### A masked pattern was here #### 1 11 June 2 2 12 June 2 @@ -31,40 +31,40 @@ POSTHOOK: Input: default@tab1@month=June/day=2 7 17 June 2 8 18 June 2 8 28 June 2 -PREHOOK: query: drop table tab1 +PREHOOK: query: drop table tab1_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tab1 -PREHOOK: Output: default@tab1 -POSTHOOK: query: drop table tab1 +PREHOOK: Input: default@tab1_n3 +PREHOOK: Output: default@tab1_n3 +POSTHOOK: query: drop table tab1_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tab1 -POSTHOOK: Output: default@tab1 -PREHOOK: query: CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day int) stored as textfile +POSTHOOK: Input: default@tab1_n3 +POSTHOOK: Output: default@tab1_n3 +PREHOOK: query: CREATE TABLE tab1_n3 (id1 int,id2 string) PARTITIONED BY(month string,day int) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tab1 -POSTHOOK: query: CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day int) stored as textfile +PREHOOK: Output: default@tab1_n3 +POSTHOOK: query: CREATE TABLE tab1_n3 (id1 int,id2 string) PARTITIONED BY(month string,day int) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day='2') +POSTHOOK: Output: default@tab1_n3 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1_n3 PARTITION(month='June', day='2') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tab1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day='2') +PREHOOK: Output: default@tab1_n3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1_n3 PARTITION(month='June', day='2') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tab1 -POSTHOOK: Output: default@tab1@month=June/day=2 -PREHOOK: query: select * from tab1 +POSTHOOK: Output: default@tab1_n3 +POSTHOOK: Output: default@tab1_n3@month=June/day=2 +PREHOOK: query: select * from tab1_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@tab1 -PREHOOK: Input: default@tab1@month=June/day=2 +PREHOOK: Input: default@tab1_n3 +PREHOOK: Input: default@tab1_n3@month=June/day=2 #### A masked pattern was here #### -POSTHOOK: query: select * from tab1 +POSTHOOK: query: select * from tab1_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab1 -POSTHOOK: Input: default@tab1@month=June/day=2 +POSTHOOK: Input: default@tab1_n3 +POSTHOOK: Input: default@tab1_n3@month=June/day=2 #### A masked pattern was here #### 1 11 June 2 2 12 June 2 @@ -72,46 +72,46 @@ POSTHOOK: Input: default@tab1@month=June/day=2 7 17 June 2 8 18 June 2 8 28 June 2 -PREHOOK: query: drop table tab1 +PREHOOK: query: drop table tab1_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tab1 -PREHOOK: Output: default@tab1 -POSTHOOK: query: drop table tab1 +PREHOOK: Input: default@tab1_n3 +PREHOOK: Output: default@tab1_n3 +POSTHOOK: query: drop table tab1_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tab1 -POSTHOOK: Output: default@tab1 -PREHOOK: query: create table tab1 (id1 int, id2 string) PARTITIONED BY(month string,day date) stored as textfile +POSTHOOK: Input: default@tab1_n3 +POSTHOOK: Output: default@tab1_n3 +PREHOOK: query: create table tab1_n3 (id1 int, id2 string) PARTITIONED BY(month string,day date) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tab1 -POSTHOOK: query: create table tab1 (id1 int, id2 string) PARTITIONED BY(month string,day date) stored as textfile +PREHOOK: Output: default@tab1_n3 +POSTHOOK: query: create table tab1_n3 (id1 int, id2 string) PARTITIONED BY(month string,day date) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab1 -PREHOOK: query: alter table tab1 add partition (month='June', day='2008-01-01') +POSTHOOK: Output: default@tab1_n3 +PREHOOK: query: alter table tab1_n3 add partition (month='June', day='2008-01-01') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@tab1 -POSTHOOK: query: alter table tab1 add partition (month='June', day='2008-01-01') +PREHOOK: Output: default@tab1_n3 +POSTHOOK: query: alter table tab1_n3 add partition (month='June', day='2008-01-01') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@tab1 -POSTHOOK: Output: default@tab1@month=June/day=2008-01-01 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day='2008-01-01') +POSTHOOK: Output: default@tab1_n3 +POSTHOOK: Output: default@tab1_n3@month=June/day=2008-01-01 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1_n3 PARTITION(month='June', day='2008-01-01') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tab1@month=June/day=2008-01-01 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day='2008-01-01') +PREHOOK: Output: default@tab1_n3@month=June/day=2008-01-01 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1_n3 PARTITION(month='June', day='2008-01-01') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tab1@month=June/day=2008-01-01 -PREHOOK: query: select id1, id2, day from tab1 where day='2008-01-01' +POSTHOOK: Output: default@tab1_n3@month=June/day=2008-01-01 +PREHOOK: query: select id1, id2, day from tab1_n3 where day='2008-01-01' PREHOOK: type: QUERY -PREHOOK: Input: default@tab1 -PREHOOK: Input: default@tab1@month=June/day=2008-01-01 +PREHOOK: Input: default@tab1_n3 +PREHOOK: Input: default@tab1_n3@month=June/day=2008-01-01 #### A masked pattern was here #### -POSTHOOK: query: select id1, id2, day from tab1 where day='2008-01-01' +POSTHOOK: query: select id1, id2, day from tab1_n3 where day='2008-01-01' POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab1 -POSTHOOK: Input: default@tab1@month=June/day=2008-01-01 +POSTHOOK: Input: default@tab1_n3 +POSTHOOK: Input: default@tab1_n3@month=June/day=2008-01-01 #### A masked pattern was here #### 1 11 2008-01-01 2 12 2008-01-01 @@ -119,11 +119,11 @@ POSTHOOK: Input: default@tab1@month=June/day=2008-01-01 7 17 2008-01-01 8 18 2008-01-01 8 28 2008-01-01 -PREHOOK: query: drop table tab1 +PREHOOK: query: drop table tab1_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tab1 -PREHOOK: Output: default@tab1 -POSTHOOK: query: drop table tab1 +PREHOOK: Input: default@tab1_n3 +PREHOOK: Output: default@tab1_n3 +POSTHOOK: query: drop table tab1_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tab1 -POSTHOOK: Output: default@tab1 +POSTHOOK: Input: default@tab1_n3 +POSTHOOK: Output: default@tab1_n3 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out index 8c2d1a015a..46117d0dd1 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out @@ -1,26 +1,26 @@ -PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) +PREHOOK: query: create table partition_test_partitioned_n1(key string, value string) partitioned by (dt string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) +PREHOOK: Output: default@partition_test_partitioned_n1 +POSTHOOK: query: create table partition_test_partitioned_n1(key string, value string) partitioned by (dt string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt=100) select * from src1 +POSTHOOK: Output: default@partition_test_partitioned_n1 +PREHOOK: query: insert overwrite table partition_test_partitioned_n1 partition(dt=100) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@partition_test_partitioned@dt=100 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt=100) select * from src1 +PREHOOK: Output: default@partition_test_partitioned_n1@dt=100 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n1 partition(dt=100) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@partition_test_partitioned@dt=100 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show table extended like partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned_n1@dt=100 +POSTHOOK: Lineage: partition_test_partitioned_n1 PARTITION(dt=100).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n1 PARTITION(dt=100).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show table extended like partition_test_partitioned_n1 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like partition_test_partitioned +POSTHOOK: query: show table extended like partition_test_partitioned_n1 POSTHOOK: type: SHOW_TABLESTATUS -tableName:partition_test_partitioned +tableName:partition_test_partitioned_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -33,11 +33,11 @@ maxFileSize:216 minFileSize:216 #### A masked pattern was here #### -PREHOOK: query: show table extended like partition_test_partitioned partition(dt=100) +PREHOOK: query: show table extended like partition_test_partitioned_n1 partition(dt=100) PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=100) +POSTHOOK: query: show table extended like partition_test_partitioned_n1 partition(dt=100) POSTHOOK: type: SHOW_TABLESTATUS -tableName:partition_test_partitioned +tableName:partition_test_partitioned_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -50,15 +50,15 @@ maxFileSize:216 minFileSize:216 #### A masked pattern was here #### -PREHOOK: query: select key from partition_test_partitioned where dt=100 +PREHOOK: query: select key from partition_test_partitioned_n1 where dt=100 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=100 +PREHOOK: Input: default@partition_test_partitioned_n1 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=100 #### A masked pattern was here #### -POSTHOOK: query: select key from partition_test_partitioned where dt=100 +POSTHOOK: query: select key from partition_test_partitioned_n1 where dt=100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=100 +POSTHOOK: Input: default@partition_test_partitioned_n1 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=100 #### A masked pattern was here #### 238 @@ -85,15 +85,15 @@ POSTHOOK: Input: default@partition_test_partitioned@dt=100 -PREHOOK: query: select key from partition_test_partitioned +PREHOOK: query: select key from partition_test_partitioned_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=100 +PREHOOK: Input: default@partition_test_partitioned_n1 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=100 #### A masked pattern was here #### -POSTHOOK: query: select key from partition_test_partitioned +POSTHOOK: query: select key from partition_test_partitioned_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=100 +POSTHOOK: Input: default@partition_test_partitioned_n1 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=100 #### A masked pattern was here #### 238 @@ -120,29 +120,29 @@ POSTHOOK: Input: default@partition_test_partitioned@dt=100 -PREHOOK: query: alter table partition_test_partitioned set fileformat rcfile +PREHOOK: query: alter table partition_test_partitioned_n1 set fileformat rcfile PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set fileformat rcfile +PREHOOK: Input: default@partition_test_partitioned_n1 +PREHOOK: Output: default@partition_test_partitioned_n1 +POSTHOOK: query: alter table partition_test_partitioned_n1 set fileformat rcfile POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt=101) select * from src1 +POSTHOOK: Input: default@partition_test_partitioned_n1 +POSTHOOK: Output: default@partition_test_partitioned_n1 +PREHOOK: query: insert overwrite table partition_test_partitioned_n1 partition(dt=101) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@partition_test_partitioned@dt=101 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt=101) select * from src1 +PREHOOK: Output: default@partition_test_partitioned_n1@dt=101 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n1 partition(dt=101) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@partition_test_partitioned@dt=101 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show table extended like partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned_n1@dt=101 +POSTHOOK: Lineage: partition_test_partitioned_n1 PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n1 PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show table extended like partition_test_partitioned_n1 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like partition_test_partitioned +POSTHOOK: query: show table extended like partition_test_partitioned_n1 POSTHOOK: type: SHOW_TABLESTATUS -tableName:partition_test_partitioned +tableName:partition_test_partitioned_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat @@ -155,11 +155,11 @@ maxFileSize:275 minFileSize:216 #### A masked pattern was here #### -PREHOOK: query: show table extended like partition_test_partitioned partition(dt=100) +PREHOOK: query: show table extended like partition_test_partitioned_n1 partition(dt=100) PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=100) +POSTHOOK: query: show table extended like partition_test_partitioned_n1 partition(dt=100) POSTHOOK: type: SHOW_TABLESTATUS -tableName:partition_test_partitioned +tableName:partition_test_partitioned_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -172,11 +172,11 @@ maxFileSize:216 minFileSize:216 #### A masked pattern was here #### -PREHOOK: query: show table extended like partition_test_partitioned partition(dt=101) +PREHOOK: query: show table extended like partition_test_partitioned_n1 partition(dt=101) PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=101) +POSTHOOK: query: show table extended like partition_test_partitioned_n1 partition(dt=101) POSTHOOK: type: SHOW_TABLESTATUS -tableName:partition_test_partitioned +tableName:partition_test_partitioned_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat @@ -189,15 +189,15 @@ maxFileSize:275 minFileSize:275 #### A masked pattern was here #### -PREHOOK: query: select key from partition_test_partitioned where dt=100 +PREHOOK: query: select key from partition_test_partitioned_n1 where dt=100 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=100 +PREHOOK: Input: default@partition_test_partitioned_n1 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=100 #### A masked pattern was here #### -POSTHOOK: query: select key from partition_test_partitioned where dt=100 +POSTHOOK: query: select key from partition_test_partitioned_n1 where dt=100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=100 +POSTHOOK: Input: default@partition_test_partitioned_n1 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=100 #### A masked pattern was here #### 238 @@ -224,15 +224,15 @@ POSTHOOK: Input: default@partition_test_partitioned@dt=100 -PREHOOK: query: select key from partition_test_partitioned where dt=101 +PREHOOK: query: select key from partition_test_partitioned_n1 where dt=101 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=101 +PREHOOK: Input: default@partition_test_partitioned_n1 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=101 #### A masked pattern was here #### -POSTHOOK: query: select key from partition_test_partitioned where dt=101 +POSTHOOK: query: select key from partition_test_partitioned_n1 where dt=101 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=101 +POSTHOOK: Input: default@partition_test_partitioned_n1 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=101 #### A masked pattern was here #### 238 @@ -259,17 +259,17 @@ POSTHOOK: Input: default@partition_test_partitioned@dt=101 -PREHOOK: query: select key from partition_test_partitioned +PREHOOK: query: select key from partition_test_partitioned_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=100 -PREHOOK: Input: default@partition_test_partitioned@dt=101 +PREHOOK: Input: default@partition_test_partitioned_n1 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=100 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=101 #### A masked pattern was here #### -POSTHOOK: query: select key from partition_test_partitioned +POSTHOOK: query: select key from partition_test_partitioned_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=100 -POSTHOOK: Input: default@partition_test_partitioned@dt=101 +POSTHOOK: Input: default@partition_test_partitioned_n1 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=100 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=101 #### A masked pattern was here #### 238 @@ -321,29 +321,29 @@ POSTHOOK: Input: default@partition_test_partitioned@dt=101 -PREHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile +PREHOOK: query: alter table partition_test_partitioned_n1 set fileformat Sequencefile PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile +PREHOOK: Input: default@partition_test_partitioned_n1 +PREHOOK: Output: default@partition_test_partitioned_n1 +POSTHOOK: query: alter table partition_test_partitioned_n1 set fileformat Sequencefile POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt=102) select * from src1 +POSTHOOK: Input: default@partition_test_partitioned_n1 +POSTHOOK: Output: default@partition_test_partitioned_n1 +PREHOOK: query: insert overwrite table partition_test_partitioned_n1 partition(dt=102) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@partition_test_partitioned@dt=102 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt=102) select * from src1 +PREHOOK: Output: default@partition_test_partitioned_n1@dt=102 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n1 partition(dt=102) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@partition_test_partitioned@dt=102 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show table extended like partition_test_partitioned +POSTHOOK: Output: default@partition_test_partitioned_n1@dt=102 +POSTHOOK: Lineage: partition_test_partitioned_n1 PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n1 PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show table extended like partition_test_partitioned_n1 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like partition_test_partitioned +POSTHOOK: query: show table extended like partition_test_partitioned_n1 POSTHOOK: type: SHOW_TABLESTATUS -tableName:partition_test_partitioned +tableName:partition_test_partitioned_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -356,11 +356,11 @@ maxFileSize:603 minFileSize:216 #### A masked pattern was here #### -PREHOOK: query: show table extended like partition_test_partitioned partition(dt=100) +PREHOOK: query: show table extended like partition_test_partitioned_n1 partition(dt=100) PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=100) +POSTHOOK: query: show table extended like partition_test_partitioned_n1 partition(dt=100) POSTHOOK: type: SHOW_TABLESTATUS -tableName:partition_test_partitioned +tableName:partition_test_partitioned_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -373,11 +373,11 @@ maxFileSize:216 minFileSize:216 #### A masked pattern was here #### -PREHOOK: query: show table extended like partition_test_partitioned partition(dt=101) +PREHOOK: query: show table extended like partition_test_partitioned_n1 partition(dt=101) PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=101) +POSTHOOK: query: show table extended like partition_test_partitioned_n1 partition(dt=101) POSTHOOK: type: SHOW_TABLESTATUS -tableName:partition_test_partitioned +tableName:partition_test_partitioned_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat @@ -390,11 +390,11 @@ maxFileSize:275 minFileSize:275 #### A masked pattern was here #### -PREHOOK: query: show table extended like partition_test_partitioned partition(dt=102) +PREHOOK: query: show table extended like partition_test_partitioned_n1 partition(dt=102) PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=102) +POSTHOOK: query: show table extended like partition_test_partitioned_n1 partition(dt=102) POSTHOOK: type: SHOW_TABLESTATUS -tableName:partition_test_partitioned +tableName:partition_test_partitioned_n1 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -407,15 +407,15 @@ maxFileSize:603 minFileSize:603 #### A masked pattern was here #### -PREHOOK: query: select key from partition_test_partitioned where dt=100 +PREHOOK: query: select key from partition_test_partitioned_n1 where dt=100 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=100 +PREHOOK: Input: default@partition_test_partitioned_n1 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=100 #### A masked pattern was here #### -POSTHOOK: query: select key from partition_test_partitioned where dt=100 +POSTHOOK: query: select key from partition_test_partitioned_n1 where dt=100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=100 +POSTHOOK: Input: default@partition_test_partitioned_n1 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=100 #### A masked pattern was here #### 238 @@ -442,15 +442,15 @@ POSTHOOK: Input: default@partition_test_partitioned@dt=100 -PREHOOK: query: select key from partition_test_partitioned where dt=101 +PREHOOK: query: select key from partition_test_partitioned_n1 where dt=101 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=101 +PREHOOK: Input: default@partition_test_partitioned_n1 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=101 #### A masked pattern was here #### -POSTHOOK: query: select key from partition_test_partitioned where dt=101 +POSTHOOK: query: select key from partition_test_partitioned_n1 where dt=101 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=101 +POSTHOOK: Input: default@partition_test_partitioned_n1 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=101 #### A masked pattern was here #### 238 @@ -477,15 +477,15 @@ POSTHOOK: Input: default@partition_test_partitioned@dt=101 -PREHOOK: query: select key from partition_test_partitioned where dt=102 +PREHOOK: query: select key from partition_test_partitioned_n1 where dt=102 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=102 +PREHOOK: Input: default@partition_test_partitioned_n1 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=102 #### A masked pattern was here #### -POSTHOOK: query: select key from partition_test_partitioned where dt=102 +POSTHOOK: query: select key from partition_test_partitioned_n1 where dt=102 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=102 +POSTHOOK: Input: default@partition_test_partitioned_n1 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=102 #### A masked pattern was here #### 238 @@ -512,19 +512,19 @@ POSTHOOK: Input: default@partition_test_partitioned@dt=102 -PREHOOK: query: select key from partition_test_partitioned +PREHOOK: query: select key from partition_test_partitioned_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=100 -PREHOOK: Input: default@partition_test_partitioned@dt=101 -PREHOOK: Input: default@partition_test_partitioned@dt=102 +PREHOOK: Input: default@partition_test_partitioned_n1 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=100 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=101 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=102 #### A masked pattern was here #### -POSTHOOK: query: select key from partition_test_partitioned +POSTHOOK: query: select key from partition_test_partitioned_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=100 -POSTHOOK: Input: default@partition_test_partitioned@dt=101 -POSTHOOK: Input: default@partition_test_partitioned@dt=102 +POSTHOOK: Input: default@partition_test_partitioned_n1 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=100 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=101 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=102 #### A masked pattern was here #### 238 @@ -601,19 +601,19 @@ POSTHOOK: Input: default@partition_test_partitioned@dt=102 -PREHOOK: query: select key from partition_test_partitioned where dt >=100 and dt <= 102 +PREHOOK: query: select key from partition_test_partitioned_n1 where dt >=100 and dt <= 102 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=100 -PREHOOK: Input: default@partition_test_partitioned@dt=101 -PREHOOK: Input: default@partition_test_partitioned@dt=102 +PREHOOK: Input: default@partition_test_partitioned_n1 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=100 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=101 +PREHOOK: Input: default@partition_test_partitioned_n1@dt=102 #### A masked pattern was here #### -POSTHOOK: query: select key from partition_test_partitioned where dt >=100 and dt <= 102 +POSTHOOK: query: select key from partition_test_partitioned_n1 where dt >=100 and dt <= 102 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=100 -POSTHOOK: Input: default@partition_test_partitioned@dt=101 -POSTHOOK: Input: default@partition_test_partitioned@dt=102 +POSTHOOK: Input: default@partition_test_partitioned_n1 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=100 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=101 +POSTHOOK: Input: default@partition_test_partitioned_n1@dt=102 #### A masked pattern was here #### 238 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat11.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat11.q.out index d54fd0a528..eef01f28d1 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat11.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat11.q.out @@ -1,114 +1,114 @@ -PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: query: create table partition_test_partitioned_n4(key string, value string) partitioned by (dt string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: Output: default@partition_test_partitioned_n4 +POSTHOOK: query: create table partition_test_partitioned_n4(key string, value string) partitioned by (dt string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: Output: default@partition_test_partitioned_n4 +PREHOOK: query: alter table partition_test_partitioned_n4 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: Input: default@partition_test_partitioned_n4 +PREHOOK: Output: default@partition_test_partitioned_n4 +POSTHOOK: query: alter table partition_test_partitioned_n4 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238 +POSTHOOK: Input: default@partition_test_partitioned_n4 +POSTHOOK: Output: default@partition_test_partitioned_n4 +PREHOOK: query: insert overwrite table partition_test_partitioned_n4 partition(dt='1') select * from src where key = 238 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238 +PREHOOK: Output: default@partition_test_partitioned_n4@dt=1 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n4 partition(dt='1') select * from src where key = 238 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: Output: default@partition_test_partitioned_n4@dt=1 +POSTHOOK: Lineage: partition_test_partitioned_n4 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n4 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from partition_test_partitioned_n4 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n4 +PREHOOK: Input: default@partition_test_partitioned_n4@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n4 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n4 +POSTHOOK: Input: default@partition_test_partitioned_n4@dt=1 #### A masked pattern was here #### 238 val_238 1 238 val_238 1 -PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: query: select key+key, value from partition_test_partitioned_n4 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n4 +PREHOOK: Input: default@partition_test_partitioned_n4@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value from partition_test_partitioned_n4 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n4 +POSTHOOK: Input: default@partition_test_partitioned_n4@dt=1 #### A masked pattern was here #### 476.0 val_238 476.0 val_238 -PREHOOK: query: alter table partition_test_partitioned change key key int +PREHOOK: query: alter table partition_test_partitioned_n4 change key key int PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned change key key int +PREHOOK: Input: default@partition_test_partitioned_n4 +PREHOOK: Output: default@partition_test_partitioned_n4 +POSTHOOK: query: alter table partition_test_partitioned_n4 change key key int POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: Input: default@partition_test_partitioned_n4 +POSTHOOK: Output: default@partition_test_partitioned_n4 +PREHOOK: query: select key+key, value from partition_test_partitioned_n4 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n4 +PREHOOK: Input: default@partition_test_partitioned_n4@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value from partition_test_partitioned_n4 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n4 +POSTHOOK: Input: default@partition_test_partitioned_n4@dt=1 #### A masked pattern was here #### 476 val_238 476 val_238 -PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: query: select * from partition_test_partitioned_n4 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n4 +PREHOOK: Input: default@partition_test_partitioned_n4@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n4 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n4 +POSTHOOK: Input: default@partition_test_partitioned_n4@dt=1 #### A masked pattern was here #### 238 val_238 1 238 val_238 1 -PREHOOK: query: alter table partition_test_partitioned add columns (value2 string) +PREHOOK: query: alter table partition_test_partitioned_n4 add columns (value2 string) PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned add columns (value2 string) +PREHOOK: Input: default@partition_test_partitioned_n4 +PREHOOK: Output: default@partition_test_partitioned_n4 +POSTHOOK: query: alter table partition_test_partitioned_n4 add columns (value2 string) POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: Input: default@partition_test_partitioned_n4 +POSTHOOK: Output: default@partition_test_partitioned_n4 +PREHOOK: query: select key+key, value from partition_test_partitioned_n4 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n4 +PREHOOK: Input: default@partition_test_partitioned_n4@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value from partition_test_partitioned_n4 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n4 +POSTHOOK: Input: default@partition_test_partitioned_n4@dt=1 #### A masked pattern was here #### 476 val_238 476 val_238 -PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: query: select * from partition_test_partitioned_n4 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n4 +PREHOOK: Input: default@partition_test_partitioned_n4@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n4 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n4 +POSTHOOK: Input: default@partition_test_partitioned_n4@dt=1 #### A masked pattern was here #### 238 val_238 NULL 1 238 val_238 NULL 1 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat12.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat12.q.out index 4ec48ad5c7..c91e2b77b6 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat12.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat12.q.out @@ -1,159 +1,159 @@ -PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: query: create table partition_test_partitioned_n9(key string, value string) partitioned by (dt string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: Output: default@partition_test_partitioned_n9 +POSTHOOK: query: create table partition_test_partitioned_n9(key string, value string) partitioned by (dt string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: Output: default@partition_test_partitioned_n9 +PREHOOK: query: alter table partition_test_partitioned_n9 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: Input: default@partition_test_partitioned_n9 +PREHOOK: Output: default@partition_test_partitioned_n9 +POSTHOOK: query: alter table partition_test_partitioned_n9 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238 +POSTHOOK: Input: default@partition_test_partitioned_n9 +POSTHOOK: Output: default@partition_test_partitioned_n9 +PREHOOK: query: insert overwrite table partition_test_partitioned_n9 partition(dt='1') select * from src where key = 238 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src where key = 238 +PREHOOK: Output: default@partition_test_partitioned_n9@dt=1 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n9 partition(dt='1') select * from src where key = 238 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: Output: default@partition_test_partitioned_n9@dt=1 +POSTHOOK: Lineage: partition_test_partitioned_n9 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n9 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from partition_test_partitioned_n9 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n9 +PREHOOK: Input: default@partition_test_partitioned_n9@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n9 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n9 +POSTHOOK: Input: default@partition_test_partitioned_n9@dt=1 #### A masked pattern was here #### 238 val_238 1 238 val_238 1 -PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: query: select key+key, value from partition_test_partitioned_n9 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n9 +PREHOOK: Input: default@partition_test_partitioned_n9@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value from partition_test_partitioned_n9 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n9 +POSTHOOK: Input: default@partition_test_partitioned_n9@dt=1 #### A masked pattern was here #### 476.0 val_238 476.0 val_238 -PREHOOK: query: alter table partition_test_partitioned change key key int +PREHOOK: query: alter table partition_test_partitioned_n9 change key key int PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned change key key int +PREHOOK: Input: default@partition_test_partitioned_n9 +PREHOOK: Output: default@partition_test_partitioned_n9 +POSTHOOK: query: alter table partition_test_partitioned_n9 change key key int POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: Input: default@partition_test_partitioned_n9 +POSTHOOK: Output: default@partition_test_partitioned_n9 +PREHOOK: query: select key+key, value from partition_test_partitioned_n9 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n9 +PREHOOK: Input: default@partition_test_partitioned_n9@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value from partition_test_partitioned_n9 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n9 +POSTHOOK: Input: default@partition_test_partitioned_n9@dt=1 #### A masked pattern was here #### 476 val_238 476 val_238 -PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: query: select * from partition_test_partitioned_n9 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n9 +PREHOOK: Input: default@partition_test_partitioned_n9@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n9 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n9 +POSTHOOK: Input: default@partition_test_partitioned_n9@dt=1 #### A masked pattern was here #### 238 val_238 1 238 val_238 1 -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='2') select * from src where key = 97 +PREHOOK: query: insert overwrite table partition_test_partitioned_n9 partition(dt='2') select * from src where key = 97 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@partition_test_partitioned@dt=2 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='2') select * from src where key = 97 +PREHOOK: Output: default@partition_test_partitioned_n9@dt=2 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n9 partition(dt='2') select * from src where key = 97 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@partition_test_partitioned@dt=2 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: alter table partition_test_partitioned add columns (value2 string) +POSTHOOK: Output: default@partition_test_partitioned_n9@dt=2 +POSTHOOK: Lineage: partition_test_partitioned_n9 PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n9 PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table partition_test_partitioned_n9 add columns (value2 string) PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned add columns (value2 string) +PREHOOK: Input: default@partition_test_partitioned_n9 +PREHOOK: Output: default@partition_test_partitioned_n9 +POSTHOOK: query: alter table partition_test_partitioned_n9 add columns (value2 string) POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: Input: default@partition_test_partitioned_n9 +POSTHOOK: Output: default@partition_test_partitioned_n9 +PREHOOK: query: select key+key, value from partition_test_partitioned_n9 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 -PREHOOK: Input: default@partition_test_partitioned@dt=2 +PREHOOK: Input: default@partition_test_partitioned_n9 +PREHOOK: Input: default@partition_test_partitioned_n9@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n9@dt=2 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value from partition_test_partitioned_n9 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 -POSTHOOK: Input: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned_n9 +POSTHOOK: Input: default@partition_test_partitioned_n9@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n9@dt=2 #### A masked pattern was here #### 476 val_238 476 val_238 194 val_97 194 val_97 -PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: query: select * from partition_test_partitioned_n9 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 -PREHOOK: Input: default@partition_test_partitioned@dt=2 +PREHOOK: Input: default@partition_test_partitioned_n9 +PREHOOK: Input: default@partition_test_partitioned_n9@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n9@dt=2 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n9 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 -POSTHOOK: Input: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned_n9 +POSTHOOK: Input: default@partition_test_partitioned_n9@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n9@dt=2 #### A masked pattern was here #### 238 val_238 NULL 1 238 val_238 NULL 1 97 val_97 NULL 2 97 val_97 NULL 2 -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='3') select key, value, value from src where key = 200 +PREHOOK: query: insert overwrite table partition_test_partitioned_n9 partition(dt='3') select key, value, value from src where key = 200 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@partition_test_partitioned@dt=3 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='3') select key, value, value from src where key = 200 +PREHOOK: Output: default@partition_test_partitioned_n9@dt=3 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n9 partition(dt='3') select key, value, value from src where key = 200 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@partition_test_partitioned@dt=3 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select key+key, value, value2 from partition_test_partitioned where dt is not null +POSTHOOK: Output: default@partition_test_partitioned_n9@dt=3 +POSTHOOK: Lineage: partition_test_partitioned_n9 PARTITION(dt=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n9 PARTITION(dt=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n9 PARTITION(dt=3).value2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value, value2 from partition_test_partitioned_n9 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 -PREHOOK: Input: default@partition_test_partitioned@dt=2 -PREHOOK: Input: default@partition_test_partitioned@dt=3 +PREHOOK: Input: default@partition_test_partitioned_n9 +PREHOOK: Input: default@partition_test_partitioned_n9@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n9@dt=2 +PREHOOK: Input: default@partition_test_partitioned_n9@dt=3 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value, value2 from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value, value2 from partition_test_partitioned_n9 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 -POSTHOOK: Input: default@partition_test_partitioned@dt=2 -POSTHOOK: Input: default@partition_test_partitioned@dt=3 +POSTHOOK: Input: default@partition_test_partitioned_n9 +POSTHOOK: Input: default@partition_test_partitioned_n9@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n9@dt=2 +POSTHOOK: Input: default@partition_test_partitioned_n9@dt=3 #### A masked pattern was here #### 476 val_238 NULL 476 val_238 NULL @@ -161,19 +161,19 @@ POSTHOOK: Input: default@partition_test_partitioned@dt=3 194 val_97 NULL 400 val_200 val_200 400 val_200 val_200 -PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: query: select * from partition_test_partitioned_n9 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 -PREHOOK: Input: default@partition_test_partitioned@dt=2 -PREHOOK: Input: default@partition_test_partitioned@dt=3 +PREHOOK: Input: default@partition_test_partitioned_n9 +PREHOOK: Input: default@partition_test_partitioned_n9@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n9@dt=2 +PREHOOK: Input: default@partition_test_partitioned_n9@dt=3 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n9 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 -POSTHOOK: Input: default@partition_test_partitioned@dt=2 -POSTHOOK: Input: default@partition_test_partitioned@dt=3 +POSTHOOK: Input: default@partition_test_partitioned_n9 +POSTHOOK: Input: default@partition_test_partitioned_n9@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n9@dt=2 +POSTHOOK: Input: default@partition_test_partitioned_n9@dt=3 #### A masked pattern was here #### 238 val_238 NULL 1 238 val_238 NULL 1 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat13.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat13.q.out index d7a0fade76..5f7e31f222 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat13.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat13.q.out @@ -1,104 +1,104 @@ -PREHOOK: query: create table T1(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: query: create table T1_n8(key string, value string) partitioned by (dt string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: create table T1(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: Output: default@T1_n8 +POSTHOOK: query: create table T1_n8(key string, value string) partitioned by (dt string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: alter table T1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: Output: default@T1_n8 +PREHOOK: query: alter table T1_n8 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: alter table T1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: Input: default@t1_n8 +PREHOOK: Output: default@t1_n8 +POSTHOOK: query: alter table T1_n8 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: insert overwrite table T1 partition (dt='1') select * from src where key = 238 or key = 97 +POSTHOOK: Input: default@t1_n8 +POSTHOOK: Output: default@t1_n8 +PREHOOK: query: insert overwrite table T1_n8 partition (dt='1') select * from src where key = 238 or key = 97 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t1@dt=1 -POSTHOOK: query: insert overwrite table T1 partition (dt='1') select * from src where key = 238 or key = 97 +PREHOOK: Output: default@t1_n8@dt=1 +POSTHOOK: query: insert overwrite table T1_n8 partition (dt='1') select * from src where key = 238 or key = 97 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1@dt=1 -POSTHOOK: Lineage: t1 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: alter table T1 change key key int +POSTHOOK: Output: default@t1_n8@dt=1 +POSTHOOK: Lineage: t1_n8 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n8 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table T1_n8 change key key int PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: alter table T1 change key key int +PREHOOK: Input: default@t1_n8 +PREHOOK: Output: default@t1_n8 +POSTHOOK: query: alter table T1_n8 change key key int POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: insert overwrite table T1 partition (dt='2') select * from src where key = 238 or key = 97 +POSTHOOK: Input: default@t1_n8 +POSTHOOK: Output: default@t1_n8 +PREHOOK: query: insert overwrite table T1_n8 partition (dt='2') select * from src where key = 238 or key = 97 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t1@dt=2 -POSTHOOK: query: insert overwrite table T1 partition (dt='2') select * from src where key = 238 or key = 97 +PREHOOK: Output: default@t1_n8@dt=2 +POSTHOOK: query: insert overwrite table T1_n8 partition (dt='2') select * from src where key = 238 or key = 97 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1@dt=2 -POSTHOOK: Lineage: t1 PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: alter table T1 change key key string +POSTHOOK: Output: default@t1_n8@dt=2 +POSTHOOK: Lineage: t1_n8 PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n8 PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table T1_n8 change key key string PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: alter table T1 change key key string +PREHOOK: Input: default@t1_n8 +PREHOOK: Output: default@t1_n8 +POSTHOOK: query: alter table T1_n8 change key key string POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: create table T2(key string, value string) partitioned by (dt string) stored as rcfile +POSTHOOK: Input: default@t1_n8 +POSTHOOK: Output: default@t1_n8 +PREHOOK: query: create table T2_n6(key string, value string) partitioned by (dt string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: create table T2(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: Output: default@T2_n6 +POSTHOOK: query: create table T2_n6(key string, value string) partitioned by (dt string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: insert overwrite table T2 partition (dt='1') select * from src where key = 238 or key = 97 +POSTHOOK: Output: default@T2_n6 +PREHOOK: query: insert overwrite table T2_n6 partition (dt='1') select * from src where key = 238 or key = 97 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t2@dt=1 -POSTHOOK: query: insert overwrite table T2 partition (dt='1') select * from src where key = 238 or key = 97 +PREHOOK: Output: default@t2_n6@dt=1 +POSTHOOK: query: insert overwrite table T2_n6 partition (dt='1') select * from src where key = 238 or key = 97 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t2@dt=1 -POSTHOOK: Lineage: t2 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t2 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: Output: default@t2_n6@dt=1 +POSTHOOK: Lineage: t2_n6 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2_n6 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM T1_n8 a JOIN T2_n6 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=1 -PREHOOK: Input: default@t1@dt=2 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=1 +PREHOOK: Input: default@t1_n8 +PREHOOK: Input: default@t1_n8@dt=1 +PREHOOK: Input: default@t1_n8@dt=2 +PREHOOK: Input: default@t2_n6 +PREHOOK: Input: default@t2_n6@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: select /* + MAPJOIN(a) */ count(*) FROM T1_n8 a JOIN T2_n6 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=1 -POSTHOOK: Input: default@t1@dt=2 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=1 +POSTHOOK: Input: default@t1_n8 +POSTHOOK: Input: default@t1_n8@dt=1 +POSTHOOK: Input: default@t1_n8@dt=2 +POSTHOOK: Input: default@t2_n6 +POSTHOOK: Input: default@t2_n6@dt=1 #### A masked pattern was here #### 16 -PREHOOK: query: select count(*) FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: select count(*) FROM T1_n8 a JOIN T2_n6 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@dt=1 -PREHOOK: Input: default@t1@dt=2 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t2@dt=1 +PREHOOK: Input: default@t1_n8 +PREHOOK: Input: default@t1_n8@dt=1 +PREHOOK: Input: default@t1_n8@dt=2 +PREHOOK: Input: default@t2_n6 +PREHOOK: Input: default@t2_n6@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select count(*) FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: select count(*) FROM T1_n8 a JOIN T2_n6 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@dt=1 -POSTHOOK: Input: default@t1@dt=2 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t2@dt=1 +POSTHOOK: Input: default@t1_n8 +POSTHOOK: Input: default@t1_n8@dt=1 +POSTHOOK: Input: default@t1_n8@dt=2 +POSTHOOK: Input: default@t2_n6 +POSTHOOK: Input: default@t2_n6@dt=1 #### A masked pattern was here #### 16 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat14.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat14.q.out index d640e742c3..ccb12f640c 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat14.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat14.q.out @@ -1,182 +1,182 @@ -PREHOOK: query: CREATE TABLE tbl1(key int, value string) PARTITIONED by (ds string) +PREHOOK: query: CREATE TABLE tbl1_n8(key int, value string) PARTITIONED by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tbl1 -POSTHOOK: query: CREATE TABLE tbl1(key int, value string) PARTITIONED by (ds string) +PREHOOK: Output: default@tbl1_n8 +POSTHOOK: query: CREATE TABLE tbl1_n8(key int, value string) PARTITIONED by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tbl1 -PREHOOK: query: CREATE TABLE tbl2(key int, value string) PARTITIONED by (ds string) +POSTHOOK: Output: default@tbl1_n8 +PREHOOK: query: CREATE TABLE tbl2_n7(key int, value string) PARTITIONED by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tbl2 -POSTHOOK: query: CREATE TABLE tbl2(key int, value string) PARTITIONED by (ds string) +PREHOOK: Output: default@tbl2_n7 +POSTHOOK: query: CREATE TABLE tbl2_n7(key int, value string) PARTITIONED by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tbl2 -PREHOOK: query: alter table tbl1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: Output: default@tbl2_n7 +PREHOOK: query: alter table tbl1_n8 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@tbl1 -PREHOOK: Output: default@tbl1 -POSTHOOK: query: alter table tbl1 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: Input: default@tbl1_n8 +PREHOOK: Output: default@tbl1_n8 +POSTHOOK: query: alter table tbl1_n8 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@tbl1 -POSTHOOK: Output: default@tbl1 -PREHOOK: query: alter table tbl2 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: Input: default@tbl1_n8 +POSTHOOK: Output: default@tbl1_n8 +PREHOOK: query: alter table tbl2_n7 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@tbl2 -PREHOOK: Output: default@tbl2 -POSTHOOK: query: alter table tbl2 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: Input: default@tbl2_n7 +PREHOOK: Output: default@tbl2_n7 +POSTHOOK: query: alter table tbl2_n7 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@tbl2 -POSTHOOK: Output: default@tbl2 -PREHOOK: query: insert overwrite table tbl1 partition (ds='1') select * from src where key < 10 +POSTHOOK: Input: default@tbl2_n7 +POSTHOOK: Output: default@tbl2_n7 +PREHOOK: query: insert overwrite table tbl1_n8 partition (ds='1') select * from src where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tbl1@ds=1 -POSTHOOK: query: insert overwrite table tbl1 partition (ds='1') select * from src where key < 10 +PREHOOK: Output: default@tbl1_n8@ds=1 +POSTHOOK: query: insert overwrite table tbl1_n8 partition (ds='1') select * from src where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tbl1@ds=1 -POSTHOOK: Lineage: tbl1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tbl1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table tbl2 partition (ds='1') select * from src where key < 10 +POSTHOOK: Output: default@tbl1_n8@ds=1 +POSTHOOK: Lineage: tbl1_n8 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1_n8 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tbl2_n7 partition (ds='1') select * from src where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tbl2@ds=1 -POSTHOOK: query: insert overwrite table tbl2 partition (ds='1') select * from src where key < 10 +PREHOOK: Output: default@tbl2_n7@ds=1 +POSTHOOK: query: insert overwrite table tbl2_n7 partition (ds='1') select * from src where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tbl2@ds=1 -POSTHOOK: Lineage: tbl2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tbl2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: alter table tbl1 change key key int +POSTHOOK: Output: default@tbl2_n7@ds=1 +POSTHOOK: Lineage: tbl2_n7 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2_n7 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table tbl1_n8 change key key int PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@tbl1 -PREHOOK: Output: default@tbl1 -POSTHOOK: query: alter table tbl1 change key key int +PREHOOK: Input: default@tbl1_n8 +PREHOOK: Output: default@tbl1_n8 +POSTHOOK: query: alter table tbl1_n8 change key key int POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@tbl1 -POSTHOOK: Output: default@tbl1 -PREHOOK: query: insert overwrite table tbl1 partition (ds='2') select * from src where key < 10 +POSTHOOK: Input: default@tbl1_n8 +POSTHOOK: Output: default@tbl1_n8 +PREHOOK: query: insert overwrite table tbl1_n8 partition (ds='2') select * from src where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tbl1@ds=2 -POSTHOOK: query: insert overwrite table tbl1 partition (ds='2') select * from src where key < 10 +PREHOOK: Output: default@tbl1_n8@ds=2 +POSTHOOK: query: insert overwrite table tbl1_n8 partition (ds='2') select * from src where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tbl1@ds=2 -POSTHOOK: Lineage: tbl1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tbl1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: alter table tbl1 change key key string +POSTHOOK: Output: default@tbl1_n8@ds=2 +POSTHOOK: Lineage: tbl1_n8 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1_n8 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table tbl1_n8 change key key string PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@tbl1 -PREHOOK: Output: default@tbl1 -POSTHOOK: query: alter table tbl1 change key key string +PREHOOK: Input: default@tbl1_n8 +PREHOOK: Output: default@tbl1_n8 +POSTHOOK: query: alter table tbl1_n8 change key key string POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@tbl1 -POSTHOOK: Output: default@tbl1 +POSTHOOK: Input: default@tbl1_n8 +POSTHOOK: Output: default@tbl1_n8 PREHOOK: query: select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n8 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n7 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl1@ds=1 -PREHOOK: Input: default@tbl1@ds=2 -PREHOOK: Input: default@tbl2 -PREHOOK: Input: default@tbl2@ds=1 +PREHOOK: Input: default@tbl1_n8 +PREHOOK: Input: default@tbl1_n8@ds=1 +PREHOOK: Input: default@tbl1_n8@ds=2 +PREHOOK: Input: default@tbl2_n7 +PREHOOK: Input: default@tbl2_n7@ds=1 #### A masked pattern was here #### POSTHOOK: query: select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n8 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n7 a where key < 6) subq2 on subq1.key = subq2.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl1@ds=1 -POSTHOOK: Input: default@tbl1@ds=2 -POSTHOOK: Input: default@tbl2 -POSTHOOK: Input: default@tbl2@ds=1 +POSTHOOK: Input: default@tbl1_n8 +POSTHOOK: Input: default@tbl1_n8@ds=1 +POSTHOOK: Input: default@tbl1_n8@ds=2 +POSTHOOK: Input: default@tbl2_n7 +POSTHOOK: Input: default@tbl2_n7@ds=1 #### A masked pattern was here #### 40 PREHOOK: query: select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n8 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n7 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl1@ds=1 -PREHOOK: Input: default@tbl1@ds=2 -PREHOOK: Input: default@tbl2 -PREHOOK: Input: default@tbl2@ds=1 +PREHOOK: Input: default@tbl1_n8 +PREHOOK: Input: default@tbl1_n8@ds=1 +PREHOOK: Input: default@tbl1_n8@ds=2 +PREHOOK: Input: default@tbl2_n7 +PREHOOK: Input: default@tbl2_n7@ds=1 #### A masked pattern was here #### POSTHOOK: query: select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n8 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n7 a where key < 6) subq2 on subq1.key = subq2.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl1@ds=1 -POSTHOOK: Input: default@tbl1@ds=2 -POSTHOOK: Input: default@tbl2 -POSTHOOK: Input: default@tbl2@ds=1 +POSTHOOK: Input: default@tbl1_n8 +POSTHOOK: Input: default@tbl1_n8@ds=1 +POSTHOOK: Input: default@tbl1_n8@ds=2 +POSTHOOK: Input: default@tbl2_n7 +POSTHOOK: Input: default@tbl2_n7@ds=1 #### A masked pattern was here #### 40 PREHOOK: query: select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n8 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n7 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl1@ds=1 -PREHOOK: Input: default@tbl1@ds=2 -PREHOOK: Input: default@tbl2 -PREHOOK: Input: default@tbl2@ds=1 +PREHOOK: Input: default@tbl1_n8 +PREHOOK: Input: default@tbl1_n8@ds=1 +PREHOOK: Input: default@tbl1_n8@ds=2 +PREHOOK: Input: default@tbl2_n7 +PREHOOK: Input: default@tbl2_n7@ds=1 #### A masked pattern was here #### POSTHOOK: query: select /*+mapjoin(subq1)*/ count(*) from - (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + (select a.key as key, a.value as value from tbl1_n8 a where key < 6) subq1 join - (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + (select a.key as key, a.value as value from tbl2_n7 a where key < 6) subq2 on subq1.key = subq2.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl1@ds=1 -POSTHOOK: Input: default@tbl1@ds=2 -POSTHOOK: Input: default@tbl2 -POSTHOOK: Input: default@tbl2@ds=1 +POSTHOOK: Input: default@tbl1_n8 +POSTHOOK: Input: default@tbl1_n8@ds=1 +POSTHOOK: Input: default@tbl1_n8@ds=2 +POSTHOOK: Input: default@tbl2_n7 +POSTHOOK: Input: default@tbl2_n7@ds=1 #### A masked pattern was here #### 40 PREHOOK: query: select /*+mapjoin(subq1)*/ count(*) from - (select a.key+1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key+1 as key, concat(a.value, a.value) as value from tbl1_n8 a) subq1 join - (select a.key+1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key+1 as key, concat(a.value, a.value) as value from tbl2_n7 a) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -PREHOOK: Input: default@tbl1 -PREHOOK: Input: default@tbl1@ds=1 -PREHOOK: Input: default@tbl1@ds=2 -PREHOOK: Input: default@tbl2 -PREHOOK: Input: default@tbl2@ds=1 +PREHOOK: Input: default@tbl1_n8 +PREHOOK: Input: default@tbl1_n8@ds=1 +PREHOOK: Input: default@tbl1_n8@ds=2 +PREHOOK: Input: default@tbl2_n7 +PREHOOK: Input: default@tbl2_n7@ds=1 #### A masked pattern was here #### POSTHOOK: query: select /*+mapjoin(subq1)*/ count(*) from - (select a.key+1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + (select a.key+1 as key, concat(a.value, a.value) as value from tbl1_n8 a) subq1 join - (select a.key+1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + (select a.key+1 as key, concat(a.value, a.value) as value from tbl2_n7 a) subq2 on subq1.key = subq2.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tbl1 -POSTHOOK: Input: default@tbl1@ds=1 -POSTHOOK: Input: default@tbl1@ds=2 -POSTHOOK: Input: default@tbl2 -POSTHOOK: Input: default@tbl2@ds=1 +POSTHOOK: Input: default@tbl1_n8 +POSTHOOK: Input: default@tbl1_n8@ds=1 +POSTHOOK: Input: default@tbl1_n8@ds=2 +POSTHOOK: Input: default@tbl2_n7 +POSTHOOK: Input: default@tbl2_n7@ds=1 #### A masked pattern was here #### 44 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat15.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat15.q.out index c992d37a0d..87098d265b 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat15.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat15.q.out @@ -1,152 +1,152 @@ -PREHOOK: query: create table partition_test_partitioned(key string, value string) +PREHOOK: query: create table partition_test_partitioned_n6(key string, value string) partitioned by (dt string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: create table partition_test_partitioned(key string, value string) +PREHOOK: Output: default@partition_test_partitioned_n6 +POSTHOOK: query: create table partition_test_partitioned_n6(key string, value string) partitioned by (dt string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') +POSTHOOK: Output: default@partition_test_partitioned_n6 +PREHOOK: query: insert overwrite table partition_test_partitioned_n6 partition(dt='1') select * from src where key = 238 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') +PREHOOK: Output: default@partition_test_partitioned_n6@dt=1 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n6 partition(dt='1') select * from src where key = 238 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: Output: default@partition_test_partitioned_n6@dt=1 +POSTHOOK: Lineage: partition_test_partitioned_n6 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n6 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from partition_test_partitioned_n6 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n6 +PREHOOK: Input: default@partition_test_partitioned_n6@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n6 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n6 +POSTHOOK: Input: default@partition_test_partitioned_n6@dt=1 #### A masked pattern was here #### 238 val_238 1 238 val_238 1 -PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: query: select key+key, value from partition_test_partitioned_n6 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n6 +PREHOOK: Input: default@partition_test_partitioned_n6@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value from partition_test_partitioned_n6 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n6 +POSTHOOK: Input: default@partition_test_partitioned_n6@dt=1 #### A masked pattern was here #### 476.0 val_238 476.0 val_238 -PREHOOK: query: alter table partition_test_partitioned change key key int +PREHOOK: query: alter table partition_test_partitioned_n6 change key key int PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned change key key int +PREHOOK: Input: default@partition_test_partitioned_n6 +PREHOOK: Output: default@partition_test_partitioned_n6 +POSTHOOK: query: alter table partition_test_partitioned_n6 change key key int POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: Input: default@partition_test_partitioned_n6 +POSTHOOK: Output: default@partition_test_partitioned_n6 +PREHOOK: query: select key+key, value from partition_test_partitioned_n6 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n6 +PREHOOK: Input: default@partition_test_partitioned_n6@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value from partition_test_partitioned_n6 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n6 +POSTHOOK: Input: default@partition_test_partitioned_n6@dt=1 #### A masked pattern was here #### 476 val_238 476 val_238 -PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: query: select * from partition_test_partitioned_n6 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n6 +PREHOOK: Input: default@partition_test_partitioned_n6@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n6 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n6 +POSTHOOK: Input: default@partition_test_partitioned_n6@dt=1 #### A masked pattern was here #### 238 val_238 1 238 val_238 1 -PREHOOK: query: alter table partition_test_partitioned add columns (value2 string) +PREHOOK: query: alter table partition_test_partitioned_n6 add columns (value2 string) PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned add columns (value2 string) +PREHOOK: Input: default@partition_test_partitioned_n6 +PREHOOK: Output: default@partition_test_partitioned_n6 +POSTHOOK: query: alter table partition_test_partitioned_n6 add columns (value2 string) POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: Input: default@partition_test_partitioned_n6 +POSTHOOK: Output: default@partition_test_partitioned_n6 +PREHOOK: query: select key+key, value from partition_test_partitioned_n6 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n6 +PREHOOK: Input: default@partition_test_partitioned_n6@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value from partition_test_partitioned_n6 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n6 +POSTHOOK: Input: default@partition_test_partitioned_n6@dt=1 #### A masked pattern was here #### 476 val_238 476 val_238 -PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: query: select * from partition_test_partitioned_n6 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n6 +PREHOOK: Input: default@partition_test_partitioned_n6@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n6 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n6 +POSTHOOK: Input: default@partition_test_partitioned_n6@dt=1 #### A masked pattern was here #### 238 val_238 NULL 1 238 val_238 NULL 1 -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='2') +PREHOOK: query: insert overwrite table partition_test_partitioned_n6 partition(dt='2') select key, value, value from src where key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@partition_test_partitioned@dt=2 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='2') +PREHOOK: Output: default@partition_test_partitioned_n6@dt=2 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n6 partition(dt='2') select key, value, value from src where key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@partition_test_partitioned@dt=2 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select key+key, value, value2, dt from partition_test_partitioned where dt is not null +POSTHOOK: Output: default@partition_test_partitioned_n6@dt=2 +POSTHOOK: Lineage: partition_test_partitioned_n6 PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n6 PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n6 PARTITION(dt=2).value2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value, value2, dt from partition_test_partitioned_n6 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 -PREHOOK: Input: default@partition_test_partitioned@dt=2 +PREHOOK: Input: default@partition_test_partitioned_n6 +PREHOOK: Input: default@partition_test_partitioned_n6@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n6@dt=2 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value, value2, dt from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value, value2, dt from partition_test_partitioned_n6 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 -POSTHOOK: Input: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned_n6 +POSTHOOK: Input: default@partition_test_partitioned_n6@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n6@dt=2 #### A masked pattern was here #### 476 val_238 NULL 1 476 val_238 NULL 1 172 val_86 val_86 2 -PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: query: select * from partition_test_partitioned_n6 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 -PREHOOK: Input: default@partition_test_partitioned@dt=2 +PREHOOK: Input: default@partition_test_partitioned_n6 +PREHOOK: Input: default@partition_test_partitioned_n6@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n6@dt=2 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n6 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 -POSTHOOK: Input: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned_n6 +POSTHOOK: Input: default@partition_test_partitioned_n6@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n6@dt=2 #### A masked pattern was here #### 238 val_238 NULL 1 238 val_238 NULL 1 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat16.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat16.q.out index 906f53cc92..233e22953a 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat16.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat16.q.out @@ -1,152 +1,152 @@ -PREHOOK: query: create table partition_test_partitioned(key string, value string) +PREHOOK: query: create table partition_test_partitioned_n10(key string, value string) partitioned by (dt string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: create table partition_test_partitioned(key string, value string) +PREHOOK: Output: default@partition_test_partitioned_n10 +POSTHOOK: query: create table partition_test_partitioned_n10(key string, value string) partitioned by (dt string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') +POSTHOOK: Output: default@partition_test_partitioned_n10 +PREHOOK: query: insert overwrite table partition_test_partitioned_n10 partition(dt='1') select * from src where key = 238 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') +PREHOOK: Output: default@partition_test_partitioned_n10@dt=1 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n10 partition(dt='1') select * from src where key = 238 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: Output: default@partition_test_partitioned_n10@dt=1 +POSTHOOK: Lineage: partition_test_partitioned_n10 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n10 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from partition_test_partitioned_n10 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n10 +PREHOOK: Input: default@partition_test_partitioned_n10@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n10 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n10 +POSTHOOK: Input: default@partition_test_partitioned_n10@dt=1 #### A masked pattern was here #### 238 val_238 1 238 val_238 1 -PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +PREHOOK: query: select key+key, value from partition_test_partitioned_n10 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n10 +PREHOOK: Input: default@partition_test_partitioned_n10@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value from partition_test_partitioned_n10 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n10 +POSTHOOK: Input: default@partition_test_partitioned_n10@dt=1 #### A masked pattern was here #### 476.0 val_238 476.0 val_238 -PREHOOK: query: alter table partition_test_partitioned change key key int +PREHOOK: query: alter table partition_test_partitioned_n10 change key key int PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned change key key int +PREHOOK: Input: default@partition_test_partitioned_n10 +PREHOOK: Output: default@partition_test_partitioned_n10 +POSTHOOK: query: alter table partition_test_partitioned_n10 change key key int POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: Input: default@partition_test_partitioned_n10 +POSTHOOK: Output: default@partition_test_partitioned_n10 +PREHOOK: query: select key+key, value from partition_test_partitioned_n10 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n10 +PREHOOK: Input: default@partition_test_partitioned_n10@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value from partition_test_partitioned_n10 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n10 +POSTHOOK: Input: default@partition_test_partitioned_n10@dt=1 #### A masked pattern was here #### 476 val_238 476 val_238 -PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: query: select * from partition_test_partitioned_n10 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n10 +PREHOOK: Input: default@partition_test_partitioned_n10@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n10 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n10 +POSTHOOK: Input: default@partition_test_partitioned_n10@dt=1 #### A masked pattern was here #### 238 val_238 1 238 val_238 1 -PREHOOK: query: alter table partition_test_partitioned add columns (value2 string) +PREHOOK: query: alter table partition_test_partitioned_n10 add columns (value2 string) PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned add columns (value2 string) +PREHOOK: Input: default@partition_test_partitioned_n10 +PREHOOK: Output: default@partition_test_partitioned_n10 +POSTHOOK: query: alter table partition_test_partitioned_n10 add columns (value2 string) POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: Input: default@partition_test_partitioned_n10 +POSTHOOK: Output: default@partition_test_partitioned_n10 +PREHOOK: query: select key+key, value from partition_test_partitioned_n10 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n10 +PREHOOK: Input: default@partition_test_partitioned_n10@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value from partition_test_partitioned_n10 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n10 +POSTHOOK: Input: default@partition_test_partitioned_n10@dt=1 #### A masked pattern was here #### 476 val_238 476 val_238 -PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: query: select * from partition_test_partitioned_n10 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n10 +PREHOOK: Input: default@partition_test_partitioned_n10@dt=1 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n10 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n10 +POSTHOOK: Input: default@partition_test_partitioned_n10@dt=1 #### A masked pattern was here #### 238 val_238 NULL 1 238 val_238 NULL 1 -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='2') +PREHOOK: query: insert overwrite table partition_test_partitioned_n10 partition(dt='2') select key, value, value from src where key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@partition_test_partitioned@dt=2 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='2') +PREHOOK: Output: default@partition_test_partitioned_n10@dt=2 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n10 partition(dt='2') select key, value, value from src where key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@partition_test_partitioned@dt=2 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select key+key, value, value2, dt from partition_test_partitioned where dt is not null +POSTHOOK: Output: default@partition_test_partitioned_n10@dt=2 +POSTHOOK: Lineage: partition_test_partitioned_n10 PARTITION(dt=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n10 PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n10 PARTITION(dt=2).value2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key+key, value, value2, dt from partition_test_partitioned_n10 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 -PREHOOK: Input: default@partition_test_partitioned@dt=2 +PREHOOK: Input: default@partition_test_partitioned_n10 +PREHOOK: Input: default@partition_test_partitioned_n10@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n10@dt=2 #### A masked pattern was here #### -POSTHOOK: query: select key+key, value, value2, dt from partition_test_partitioned where dt is not null +POSTHOOK: query: select key+key, value, value2, dt from partition_test_partitioned_n10 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 -POSTHOOK: Input: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned_n10 +POSTHOOK: Input: default@partition_test_partitioned_n10@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n10@dt=2 #### A masked pattern was here #### 476 val_238 NULL 1 476 val_238 NULL 1 172 val_86 val_86 2 -PREHOOK: query: select * from partition_test_partitioned where dt is not null +PREHOOK: query: select * from partition_test_partitioned_n10 where dt is not null PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 -PREHOOK: Input: default@partition_test_partitioned@dt=2 +PREHOOK: Input: default@partition_test_partitioned_n10 +PREHOOK: Input: default@partition_test_partitioned_n10@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n10@dt=2 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null +POSTHOOK: query: select * from partition_test_partitioned_n10 where dt is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 -POSTHOOK: Input: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned_n10 +POSTHOOK: Input: default@partition_test_partitioned_n10@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n10@dt=2 #### A masked pattern was here #### 238 val_238 NULL 1 238 val_238 NULL 1 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat3.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat3.q.out index 976fc06496..5c41fa034b 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat3.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat3.q.out @@ -1,34 +1,34 @@ -PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) +PREHOOK: query: create table partition_test_partitioned_n8(key string, value string) partitioned by (dt string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) +PREHOOK: Output: default@partition_test_partitioned_n8 +POSTHOOK: query: create table partition_test_partitioned_n8(key string, value string) partitioned by (dt string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: alter table partition_test_partitioned set fileformat rcfile +POSTHOOK: Output: default@partition_test_partitioned_n8 +PREHOOK: query: alter table partition_test_partitioned_n8 set fileformat rcfile PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set fileformat rcfile +PREHOOK: Input: default@partition_test_partitioned_n8 +PREHOOK: Output: default@partition_test_partitioned_n8 +POSTHOOK: query: alter table partition_test_partitioned_n8 set fileformat rcfile POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt=101) select * from src1 +POSTHOOK: Input: default@partition_test_partitioned_n8 +POSTHOOK: Output: default@partition_test_partitioned_n8 +PREHOOK: query: insert overwrite table partition_test_partitioned_n8 partition(dt=101) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@partition_test_partitioned@dt=101 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt=101) select * from src1 +PREHOOK: Output: default@partition_test_partitioned_n8@dt=101 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n8 partition(dt=101) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@partition_test_partitioned@dt=101 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show table extended like partition_test_partitioned partition(dt=101) +POSTHOOK: Output: default@partition_test_partitioned_n8@dt=101 +POSTHOOK: Lineage: partition_test_partitioned_n8 PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n8 PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show table extended like partition_test_partitioned_n8 partition(dt=101) PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=101) +POSTHOOK: query: show table extended like partition_test_partitioned_n8 partition(dt=101) POSTHOOK: type: SHOW_TABLESTATUS -tableName:partition_test_partitioned +tableName:partition_test_partitioned_n8 #### A masked pattern was here #### inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat @@ -41,29 +41,29 @@ maxFileSize:275 minFileSize:275 #### A masked pattern was here #### -PREHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile +PREHOOK: query: alter table partition_test_partitioned_n8 set fileformat Sequencefile PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile +PREHOOK: Input: default@partition_test_partitioned_n8 +PREHOOK: Output: default@partition_test_partitioned_n8 +POSTHOOK: query: alter table partition_test_partitioned_n8 set fileformat Sequencefile POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt=102) select * from src1 +POSTHOOK: Input: default@partition_test_partitioned_n8 +POSTHOOK: Output: default@partition_test_partitioned_n8 +PREHOOK: query: insert overwrite table partition_test_partitioned_n8 partition(dt=102) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@partition_test_partitioned@dt=102 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt=102) select * from src1 +PREHOOK: Output: default@partition_test_partitioned_n8@dt=102 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n8 partition(dt=102) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@partition_test_partitioned@dt=102 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show table extended like partition_test_partitioned partition(dt=102) +POSTHOOK: Output: default@partition_test_partitioned_n8@dt=102 +POSTHOOK: Lineage: partition_test_partitioned_n8 PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n8 PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show table extended like partition_test_partitioned_n8 partition(dt=102) PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=102) +POSTHOOK: query: show table extended like partition_test_partitioned_n8 partition(dt=102) POSTHOOK: type: SHOW_TABLESTATUS -tableName:partition_test_partitioned +tableName:partition_test_partitioned_n8 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -76,15 +76,15 @@ maxFileSize:603 minFileSize:603 #### A masked pattern was here #### -PREHOOK: query: select key from partition_test_partitioned where dt=102 +PREHOOK: query: select key from partition_test_partitioned_n8 where dt=102 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=102 +PREHOOK: Input: default@partition_test_partitioned_n8 +PREHOOK: Input: default@partition_test_partitioned_n8@dt=102 #### A masked pattern was here #### -POSTHOOK: query: select key from partition_test_partitioned where dt=102 +POSTHOOK: query: select key from partition_test_partitioned_n8 where dt=102 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=102 +POSTHOOK: Input: default@partition_test_partitioned_n8 +POSTHOOK: Input: default@partition_test_partitioned_n8@dt=102 #### A masked pattern was here #### 238 @@ -111,21 +111,21 @@ POSTHOOK: Input: default@partition_test_partitioned@dt=102 -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt=101) select * from src1 +PREHOOK: query: insert overwrite table partition_test_partitioned_n8 partition(dt=101) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@partition_test_partitioned@dt=101 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt=101) select * from src1 +PREHOOK: Output: default@partition_test_partitioned_n8@dt=101 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n8 partition(dt=101) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@partition_test_partitioned@dt=101 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show table extended like partition_test_partitioned partition(dt=101) +POSTHOOK: Output: default@partition_test_partitioned_n8@dt=101 +POSTHOOK: Lineage: partition_test_partitioned_n8 PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n8 PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show table extended like partition_test_partitioned_n8 partition(dt=101) PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: show table extended like partition_test_partitioned partition(dt=101) +POSTHOOK: query: show table extended like partition_test_partitioned_n8 partition(dt=101) POSTHOOK: type: SHOW_TABLESTATUS -tableName:partition_test_partitioned +tableName:partition_test_partitioned_n8 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -138,15 +138,15 @@ maxFileSize:603 minFileSize:603 #### A masked pattern was here #### -PREHOOK: query: select key from partition_test_partitioned where dt=101 +PREHOOK: query: select key from partition_test_partitioned_n8 where dt=101 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=101 +PREHOOK: Input: default@partition_test_partitioned_n8 +PREHOOK: Input: default@partition_test_partitioned_n8@dt=101 #### A masked pattern was here #### -POSTHOOK: query: select key from partition_test_partitioned where dt=101 +POSTHOOK: query: select key from partition_test_partitioned_n8 where dt=101 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=101 +POSTHOOK: Input: default@partition_test_partitioned_n8 +POSTHOOK: Input: default@partition_test_partitioned_n8@dt=101 #### A masked pattern was here #### 238 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat4.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat4.q.out index e8c3a1779e..c1f9ca3d3f 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat4.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat4.q.out @@ -1,50 +1,50 @@ -PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) +PREHOOK: query: create table partition_test_partitioned_n5(key string, value string) partitioned by (dt string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) +PREHOOK: Output: default@partition_test_partitioned_n5 +POSTHOOK: query: create table partition_test_partitioned_n5(key string, value string) partitioned by (dt string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: alter table partition_test_partitioned set fileformat sequencefile +POSTHOOK: Output: default@partition_test_partitioned_n5 +PREHOOK: query: alter table partition_test_partitioned_n5 set fileformat sequencefile PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set fileformat sequencefile +PREHOOK: Input: default@partition_test_partitioned_n5 +PREHOOK: Output: default@partition_test_partitioned_n5 +POSTHOOK: query: alter table partition_test_partitioned_n5 set fileformat sequencefile POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src1 +POSTHOOK: Input: default@partition_test_partitioned_n5 +POSTHOOK: Output: default@partition_test_partitioned_n5 +PREHOOK: query: insert overwrite table partition_test_partitioned_n5 partition(dt='1') select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src1 +PREHOOK: Output: default@partition_test_partitioned_n5@dt=1 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n5 partition(dt='1') select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: alter table partition_test_partitioned partition (dt='1') set fileformat sequencefile +POSTHOOK: Output: default@partition_test_partitioned_n5@dt=1 +POSTHOOK: Lineage: partition_test_partitioned_n5 PARTITION(dt=1).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n5 PARTITION(dt=1).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table partition_test_partitioned_n5 partition (dt='1') set fileformat sequencefile PREHOOK: type: ALTERPARTITION_FILEFORMAT -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: query: alter table partition_test_partitioned partition (dt='1') set fileformat sequencefile +PREHOOK: Input: default@partition_test_partitioned_n5 +PREHOOK: Output: default@partition_test_partitioned_n5@dt=1 +POSTHOOK: query: alter table partition_test_partitioned_n5 partition (dt='1') set fileformat sequencefile POSTHOOK: type: ALTERPARTITION_FILEFORMAT -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 -POSTHOOK: Output: default@partition_test_partitioned@dt=1 -PREHOOK: query: alter table partition_test_partitioned add partition (dt='2') +POSTHOOK: Input: default@partition_test_partitioned_n5 +POSTHOOK: Input: default@partition_test_partitioned_n5@dt=1 +POSTHOOK: Output: default@partition_test_partitioned_n5@dt=1 +PREHOOK: query: alter table partition_test_partitioned_n5 add partition (dt='2') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned add partition (dt='2') +PREHOOK: Output: default@partition_test_partitioned_n5 +POSTHOOK: query: alter table partition_test_partitioned_n5 add partition (dt='2') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned@dt=2 -PREHOOK: query: alter table partition_test_partitioned drop partition (dt='2') +POSTHOOK: Output: default@partition_test_partitioned_n5 +POSTHOOK: Output: default@partition_test_partitioned_n5@dt=2 +PREHOOK: query: alter table partition_test_partitioned_n5 drop partition (dt='2') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned@dt=2 -POSTHOOK: query: alter table partition_test_partitioned drop partition (dt='2') +PREHOOK: Input: default@partition_test_partitioned_n5 +PREHOOK: Output: default@partition_test_partitioned_n5@dt=2 +POSTHOOK: query: alter table partition_test_partitioned_n5 drop partition (dt='2') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned_n5 +POSTHOOK: Output: default@partition_test_partitioned_n5@dt=2 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat5.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat5.q.out index 97e48b1ff0..5f542862fd 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat5.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat5.q.out @@ -1,84 +1,84 @@ -PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) +PREHOOK: query: create table partition_test_partitioned_n3(key string, value string) partitioned by (dt string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) +PREHOOK: Output: default@partition_test_partitioned_n3 +POSTHOOK: query: create table partition_test_partitioned_n3(key string, value string) partitioned by (dt string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: alter table partition_test_partitioned set fileformat rcfile +POSTHOOK: Output: default@partition_test_partitioned_n3 +PREHOOK: query: alter table partition_test_partitioned_n3 set fileformat rcfile PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set fileformat rcfile +PREHOOK: Input: default@partition_test_partitioned_n3 +PREHOOK: Output: default@partition_test_partitioned_n3 +POSTHOOK: query: alter table partition_test_partitioned_n3 set fileformat rcfile POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt=101) select * from src1 +POSTHOOK: Input: default@partition_test_partitioned_n3 +POSTHOOK: Output: default@partition_test_partitioned_n3 +PREHOOK: query: insert overwrite table partition_test_partitioned_n3 partition(dt=101) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@partition_test_partitioned@dt=101 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt=101) select * from src1 +PREHOOK: Output: default@partition_test_partitioned_n3@dt=101 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n3 partition(dt=101) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@partition_test_partitioned@dt=101 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile +POSTHOOK: Output: default@partition_test_partitioned_n3@dt=101 +POSTHOOK: Lineage: partition_test_partitioned_n3 PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n3 PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table partition_test_partitioned_n3 set fileformat Sequencefile PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile +PREHOOK: Input: default@partition_test_partitioned_n3 +PREHOOK: Output: default@partition_test_partitioned_n3 +POSTHOOK: query: alter table partition_test_partitioned_n3 set fileformat Sequencefile POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt=102) select * from src1 +POSTHOOK: Input: default@partition_test_partitioned_n3 +POSTHOOK: Output: default@partition_test_partitioned_n3 +PREHOOK: query: insert overwrite table partition_test_partitioned_n3 partition(dt=102) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@partition_test_partitioned@dt=102 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt=102) select * from src1 +PREHOOK: Output: default@partition_test_partitioned_n3@dt=102 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n3 partition(dt=102) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@partition_test_partitioned@dt=102 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select dt, count(1) from partition_test_partitioned where dt is not null group by dt +POSTHOOK: Output: default@partition_test_partitioned_n3@dt=102 +POSTHOOK: Lineage: partition_test_partitioned_n3 PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n3 PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select dt, count(1) from partition_test_partitioned_n3 where dt is not null group by dt PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=101 -PREHOOK: Input: default@partition_test_partitioned@dt=102 +PREHOOK: Input: default@partition_test_partitioned_n3 +PREHOOK: Input: default@partition_test_partitioned_n3@dt=101 +PREHOOK: Input: default@partition_test_partitioned_n3@dt=102 #### A masked pattern was here #### -POSTHOOK: query: select dt, count(1) from partition_test_partitioned where dt is not null group by dt +POSTHOOK: query: select dt, count(1) from partition_test_partitioned_n3 where dt is not null group by dt POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=101 -POSTHOOK: Input: default@partition_test_partitioned@dt=102 +POSTHOOK: Input: default@partition_test_partitioned_n3 +POSTHOOK: Input: default@partition_test_partitioned_n3@dt=101 +POSTHOOK: Input: default@partition_test_partitioned_n3@dt=102 #### A masked pattern was here #### 101 25 102 25 -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt=103) select * from src1 +PREHOOK: query: insert overwrite table partition_test_partitioned_n3 partition(dt=103) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@partition_test_partitioned@dt=103 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt=103) select * from src1 +PREHOOK: Output: default@partition_test_partitioned_n3@dt=103 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n3 partition(dt=103) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@partition_test_partitioned@dt=103 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=103).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=103).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select dt, count(1) from partition_test_partitioned where dt is not null group by dt +POSTHOOK: Output: default@partition_test_partitioned_n3@dt=103 +POSTHOOK: Lineage: partition_test_partitioned_n3 PARTITION(dt=103).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n3 PARTITION(dt=103).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select dt, count(1) from partition_test_partitioned_n3 where dt is not null group by dt PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=101 -PREHOOK: Input: default@partition_test_partitioned@dt=102 -PREHOOK: Input: default@partition_test_partitioned@dt=103 +PREHOOK: Input: default@partition_test_partitioned_n3 +PREHOOK: Input: default@partition_test_partitioned_n3@dt=101 +PREHOOK: Input: default@partition_test_partitioned_n3@dt=102 +PREHOOK: Input: default@partition_test_partitioned_n3@dt=103 #### A masked pattern was here #### -POSTHOOK: query: select dt, count(1) from partition_test_partitioned where dt is not null group by dt +POSTHOOK: query: select dt, count(1) from partition_test_partitioned_n3 where dt is not null group by dt POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=101 -POSTHOOK: Input: default@partition_test_partitioned@dt=102 -POSTHOOK: Input: default@partition_test_partitioned@dt=103 +POSTHOOK: Input: default@partition_test_partitioned_n3 +POSTHOOK: Input: default@partition_test_partitioned_n3@dt=101 +POSTHOOK: Input: default@partition_test_partitioned_n3@dt=102 +POSTHOOK: Input: default@partition_test_partitioned_n3@dt=103 #### A masked pattern was here #### 101 25 102 25 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat6.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat6.q.out index 1ae15e0592..e1b55c76f9 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat6.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat6.q.out @@ -1,80 +1,80 @@ -PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) +PREHOOK: query: create table partition_test_partitioned_n2(key string, value string) partitioned by (dt string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) +PREHOOK: Output: default@partition_test_partitioned_n2 +POSTHOOK: query: create table partition_test_partitioned_n2(key string, value string) partitioned by (dt string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: alter table partition_test_partitioned set fileformat rcfile +POSTHOOK: Output: default@partition_test_partitioned_n2 +PREHOOK: query: alter table partition_test_partitioned_n2 set fileformat rcfile PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set fileformat rcfile +PREHOOK: Input: default@partition_test_partitioned_n2 +PREHOOK: Output: default@partition_test_partitioned_n2 +POSTHOOK: query: alter table partition_test_partitioned_n2 set fileformat rcfile POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt=101) select * from src1 +POSTHOOK: Input: default@partition_test_partitioned_n2 +POSTHOOK: Output: default@partition_test_partitioned_n2 +PREHOOK: query: insert overwrite table partition_test_partitioned_n2 partition(dt=101) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@partition_test_partitioned@dt=101 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt=101) select * from src1 +PREHOOK: Output: default@partition_test_partitioned_n2@dt=101 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n2 partition(dt=101) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@partition_test_partitioned@dt=101 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile +POSTHOOK: Output: default@partition_test_partitioned_n2@dt=101 +POSTHOOK: Lineage: partition_test_partitioned_n2 PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n2 PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table partition_test_partitioned_n2 set fileformat Sequencefile PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set fileformat Sequencefile +PREHOOK: Input: default@partition_test_partitioned_n2 +PREHOOK: Output: default@partition_test_partitioned_n2 +POSTHOOK: query: alter table partition_test_partitioned_n2 set fileformat Sequencefile POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt=102) select * from src1 +POSTHOOK: Input: default@partition_test_partitioned_n2 +POSTHOOK: Output: default@partition_test_partitioned_n2 +PREHOOK: query: insert overwrite table partition_test_partitioned_n2 partition(dt=102) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@partition_test_partitioned@dt=102 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt=102) select * from src1 +PREHOOK: Output: default@partition_test_partitioned_n2@dt=102 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n2 partition(dt=102) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@partition_test_partitioned@dt=102 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@partition_test_partitioned_n2@dt=102 +POSTHOOK: Lineage: partition_test_partitioned_n2 PARTITION(dt=102).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n2 PARTITION(dt=102).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: select count(1) from -(select key, value from partition_test_partitioned where dt=101 and key < 100 +(select key, value from partition_test_partitioned_n2 where dt=101 and key < 100 union all -select key, value from partition_test_partitioned where dt=101 and key < 20)s +select key, value from partition_test_partitioned_n2 where dt=101 and key < 20)s PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=101 +PREHOOK: Input: default@partition_test_partitioned_n2 +PREHOOK: Input: default@partition_test_partitioned_n2@dt=101 #### A masked pattern was here #### POSTHOOK: query: select count(1) from -(select key, value from partition_test_partitioned where dt=101 and key < 100 +(select key, value from partition_test_partitioned_n2 where dt=101 and key < 100 union all -select key, value from partition_test_partitioned where dt=101 and key < 20)s +select key, value from partition_test_partitioned_n2 where dt=101 and key < 20)s POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=101 +POSTHOOK: Input: default@partition_test_partitioned_n2 +POSTHOOK: Input: default@partition_test_partitioned_n2@dt=101 #### A masked pattern was here #### 2 PREHOOK: query: select count(1) from -(select key, value from partition_test_partitioned where dt=101 and key < 100 +(select key, value from partition_test_partitioned_n2 where dt=101 and key < 100 union all -select key, value from partition_test_partitioned where dt=102 and key < 20)s +select key, value from partition_test_partitioned_n2 where dt=102 and key < 20)s PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=101 -PREHOOK: Input: default@partition_test_partitioned@dt=102 +PREHOOK: Input: default@partition_test_partitioned_n2 +PREHOOK: Input: default@partition_test_partitioned_n2@dt=101 +PREHOOK: Input: default@partition_test_partitioned_n2@dt=102 #### A masked pattern was here #### POSTHOOK: query: select count(1) from -(select key, value from partition_test_partitioned where dt=101 and key < 100 +(select key, value from partition_test_partitioned_n2 where dt=101 and key < 100 union all -select key, value from partition_test_partitioned where dt=102 and key < 20)s +select key, value from partition_test_partitioned_n2 where dt=102 and key < 20)s POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=101 -POSTHOOK: Input: default@partition_test_partitioned@dt=102 +POSTHOOK: Input: default@partition_test_partitioned_n2 +POSTHOOK: Input: default@partition_test_partitioned_n2@dt=101 +POSTHOOK: Input: default@partition_test_partitioned_n2@dt=102 #### A masked pattern was here #### 2 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat7.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat7.q.out index 5c2c7855aa..de4a822a9e 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat7.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat7.q.out @@ -1,52 +1,52 @@ -PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) +PREHOOK: query: create table partition_test_partitioned_n11(key string, value string) partitioned by (dt string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) +PREHOOK: Output: default@partition_test_partitioned_n11 +POSTHOOK: query: create table partition_test_partitioned_n11(key string, value string) partitioned by (dt string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: alter table partition_test_partitioned set fileformat rcfile +POSTHOOK: Output: default@partition_test_partitioned_n11 +PREHOOK: query: alter table partition_test_partitioned_n11 set fileformat rcfile PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set fileformat rcfile +PREHOOK: Input: default@partition_test_partitioned_n11 +PREHOOK: Output: default@partition_test_partitioned_n11 +POSTHOOK: query: alter table partition_test_partitioned_n11 set fileformat rcfile POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt=101) select * from src1 +POSTHOOK: Input: default@partition_test_partitioned_n11 +POSTHOOK: Output: default@partition_test_partitioned_n11 +PREHOOK: query: insert overwrite table partition_test_partitioned_n11 partition(dt=101) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@partition_test_partitioned@dt=101 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt=101) select * from src1 +PREHOOK: Output: default@partition_test_partitioned_n11@dt=101 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n11 partition(dt=101) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@partition_test_partitioned@dt=101 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select count(1) from partition_test_partitioned a join partition_test_partitioned b on a.key = b.key +POSTHOOK: Output: default@partition_test_partitioned_n11@dt=101 +POSTHOOK: Lineage: partition_test_partitioned_n11 PARTITION(dt=101).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n11 PARTITION(dt=101).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select count(1) from partition_test_partitioned_n11 a join partition_test_partitioned_n11 b on a.key = b.key where a.dt = '101' and b.dt = '101' PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=101 +PREHOOK: Input: default@partition_test_partitioned_n11 +PREHOOK: Input: default@partition_test_partitioned_n11@dt=101 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from partition_test_partitioned a join partition_test_partitioned b on a.key = b.key +POSTHOOK: query: select count(1) from partition_test_partitioned_n11 a join partition_test_partitioned_n11 b on a.key = b.key where a.dt = '101' and b.dt = '101' POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=101 +POSTHOOK: Input: default@partition_test_partitioned_n11 +POSTHOOK: Input: default@partition_test_partitioned_n11@dt=101 #### A masked pattern was here #### 115 -PREHOOK: query: select count(1) from partition_test_partitioned a join partition_test_partitioned b on a.key = b.key +PREHOOK: query: select count(1) from partition_test_partitioned_n11 a join partition_test_partitioned_n11 b on a.key = b.key where a.dt = '101' and b.dt = '101' and a.key < 100 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=101 +PREHOOK: Input: default@partition_test_partitioned_n11 +PREHOOK: Input: default@partition_test_partitioned_n11@dt=101 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from partition_test_partitioned a join partition_test_partitioned b on a.key = b.key +POSTHOOK: query: select count(1) from partition_test_partitioned_n11 a join partition_test_partitioned_n11 b on a.key = b.key where a.dt = '101' and b.dt = '101' and a.key < 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=101 +POSTHOOK: Input: default@partition_test_partitioned_n11 +POSTHOOK: Input: default@partition_test_partitioned_n11@dt=101 #### A masked pattern was here #### 2 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat8.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat8.q.out index d64649bd62..9de7451508 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat8.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat8.q.out @@ -1,70 +1,70 @@ -PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: query: create table partition_test_partitioned_n0(key string, value string) partitioned by (dt string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: Output: default@partition_test_partitioned_n0 +POSTHOOK: query: create table partition_test_partitioned_n0(key string, value string) partitioned by (dt string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src +POSTHOOK: Output: default@partition_test_partitioned_n0 +PREHOOK: query: insert overwrite table partition_test_partitioned_n0 partition(dt='1') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src +PREHOOK: Output: default@partition_test_partitioned_n0@dt=1 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n0 partition(dt='1') select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: alter table partition_test_partitioned set fileformat sequencefile +POSTHOOK: Output: default@partition_test_partitioned_n0@dt=1 +POSTHOOK: Lineage: partition_test_partitioned_n0 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n0 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table partition_test_partitioned_n0 set fileformat sequencefile PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set fileformat sequencefile +PREHOOK: Input: default@partition_test_partitioned_n0 +PREHOOK: Output: default@partition_test_partitioned_n0 +POSTHOOK: query: alter table partition_test_partitioned_n0 set fileformat sequencefile POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='2') select * from src +POSTHOOK: Input: default@partition_test_partitioned_n0 +POSTHOOK: Output: default@partition_test_partitioned_n0 +PREHOOK: query: insert overwrite table partition_test_partitioned_n0 partition(dt='2') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@partition_test_partitioned@dt=2 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='2') select * from src +PREHOOK: Output: default@partition_test_partitioned_n0@dt=2 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n0 partition(dt='2') select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@partition_test_partitioned@dt=2 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +POSTHOOK: Output: default@partition_test_partitioned_n0@dt=2 +POSTHOOK: Lineage: partition_test_partitioned_n0 PARTITION(dt=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n0 PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table partition_test_partitioned_n0 set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +PREHOOK: Input: default@partition_test_partitioned_n0 +PREHOOK: Output: default@partition_test_partitioned_n0 +POSTHOOK: query: alter table partition_test_partitioned_n0 set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='3') select * from src +POSTHOOK: Input: default@partition_test_partitioned_n0 +POSTHOOK: Output: default@partition_test_partitioned_n0 +PREHOOK: query: insert overwrite table partition_test_partitioned_n0 partition(dt='3') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@partition_test_partitioned@dt=3 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='3') select * from src +PREHOOK: Output: default@partition_test_partitioned_n0@dt=3 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n0 partition(dt='3') select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@partition_test_partitioned@dt=3 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +POSTHOOK: Output: default@partition_test_partitioned_n0@dt=3 +POSTHOOK: Lineage: partition_test_partitioned_n0 PARTITION(dt=3).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n0 PARTITION(dt=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from partition_test_partitioned_n0 where dt is not null order by key, value, dt limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 -PREHOOK: Input: default@partition_test_partitioned@dt=2 -PREHOOK: Input: default@partition_test_partitioned@dt=3 +PREHOOK: Input: default@partition_test_partitioned_n0 +PREHOOK: Input: default@partition_test_partitioned_n0@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n0@dt=2 +PREHOOK: Input: default@partition_test_partitioned_n0@dt=3 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +POSTHOOK: query: select * from partition_test_partitioned_n0 where dt is not null order by key, value, dt limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 -POSTHOOK: Input: default@partition_test_partitioned@dt=2 -POSTHOOK: Input: default@partition_test_partitioned@dt=3 +POSTHOOK: Input: default@partition_test_partitioned_n0 +POSTHOOK: Input: default@partition_test_partitioned_n0@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n0@dt=2 +POSTHOOK: Input: default@partition_test_partitioned_n0@dt=3 #### A masked pattern was here #### 0 val_0 1 0 val_0 1 @@ -86,19 +86,19 @@ POSTHOOK: Input: default@partition_test_partitioned@dt=3 100 val_100 3 103 val_103 1 103 val_103 1 -PREHOOK: query: select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +PREHOOK: query: select key+key as key, value, dt from partition_test_partitioned_n0 where dt is not null order by key, value, dt limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 -PREHOOK: Input: default@partition_test_partitioned@dt=2 -PREHOOK: Input: default@partition_test_partitioned@dt=3 +PREHOOK: Input: default@partition_test_partitioned_n0 +PREHOOK: Input: default@partition_test_partitioned_n0@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n0@dt=2 +PREHOOK: Input: default@partition_test_partitioned_n0@dt=3 #### A masked pattern was here #### -POSTHOOK: query: select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +POSTHOOK: query: select key+key as key, value, dt from partition_test_partitioned_n0 where dt is not null order by key, value, dt limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 -POSTHOOK: Input: default@partition_test_partitioned@dt=2 -POSTHOOK: Input: default@partition_test_partitioned@dt=3 +POSTHOOK: Input: default@partition_test_partitioned_n0 +POSTHOOK: Input: default@partition_test_partitioned_n0@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n0@dt=2 +POSTHOOK: Input: default@partition_test_partitioned_n0@dt=3 #### A masked pattern was here #### 0.0 val_0 1 0.0 val_0 1 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat9.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat9.q.out index d2f0c78ea5..ebb539ea34 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat9.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat9.q.out @@ -1,50 +1,50 @@ -PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: query: create table partition_test_partitioned_n7(key string, value string) partitioned by (dt string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: Output: default@partition_test_partitioned_n7 +POSTHOOK: query: create table partition_test_partitioned_n7(key string, value string) partitioned by (dt string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src +POSTHOOK: Output: default@partition_test_partitioned_n7 +PREHOOK: query: insert overwrite table partition_test_partitioned_n7 partition(dt='1') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='1') select * from src +PREHOOK: Output: default@partition_test_partitioned_n7@dt=1 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n7 partition(dt='1') select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@partition_test_partitioned@dt=1 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: Output: default@partition_test_partitioned_n7@dt=1 +POSTHOOK: Lineage: partition_test_partitioned_n7 PARTITION(dt=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n7 PARTITION(dt=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table partition_test_partitioned_n7 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: alter table partition_test_partitioned set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: Input: default@partition_test_partitioned_n7 +PREHOOK: Output: default@partition_test_partitioned_n7 +POSTHOOK: query: alter table partition_test_partitioned_n7 set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Output: default@partition_test_partitioned -PREHOOK: query: insert overwrite table partition_test_partitioned partition(dt='2') select * from src +POSTHOOK: Input: default@partition_test_partitioned_n7 +POSTHOOK: Output: default@partition_test_partitioned_n7 +PREHOOK: query: insert overwrite table partition_test_partitioned_n7 partition(dt='2') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@partition_test_partitioned@dt=2 -POSTHOOK: query: insert overwrite table partition_test_partitioned partition(dt='2') select * from src +PREHOOK: Output: default@partition_test_partitioned_n7@dt=2 +POSTHOOK: query: insert overwrite table partition_test_partitioned_n7 partition(dt='2') select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@partition_test_partitioned@dt=2 -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: partition_test_partitioned PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +POSTHOOK: Output: default@partition_test_partitioned_n7@dt=2 +POSTHOOK: Lineage: partition_test_partitioned_n7 PARTITION(dt=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_partitioned_n7 PARTITION(dt=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from partition_test_partitioned_n7 where dt is not null order by key, value, dt limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 -PREHOOK: Input: default@partition_test_partitioned@dt=2 +PREHOOK: Input: default@partition_test_partitioned_n7 +PREHOOK: Input: default@partition_test_partitioned_n7@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n7@dt=2 #### A masked pattern was here #### -POSTHOOK: query: select * from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +POSTHOOK: query: select * from partition_test_partitioned_n7 where dt is not null order by key, value, dt limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 -POSTHOOK: Input: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned_n7 +POSTHOOK: Input: default@partition_test_partitioned_n7@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n7@dt=2 #### A masked pattern was here #### 0 val_0 1 0 val_0 1 @@ -66,17 +66,17 @@ POSTHOOK: Input: default@partition_test_partitioned@dt=2 104 val_104 1 104 val_104 2 104 val_104 2 -PREHOOK: query: select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +PREHOOK: query: select key+key as key, value, dt from partition_test_partitioned_n7 where dt is not null order by key, value, dt limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@partition_test_partitioned -PREHOOK: Input: default@partition_test_partitioned@dt=1 -PREHOOK: Input: default@partition_test_partitioned@dt=2 +PREHOOK: Input: default@partition_test_partitioned_n7 +PREHOOK: Input: default@partition_test_partitioned_n7@dt=1 +PREHOOK: Input: default@partition_test_partitioned_n7@dt=2 #### A masked pattern was here #### -POSTHOOK: query: select key+key as key, value, dt from partition_test_partitioned where dt is not null order by key, value, dt limit 20 +POSTHOOK: query: select key+key as key, value, dt from partition_test_partitioned_n7 where dt is not null order by key, value, dt limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@partition_test_partitioned -POSTHOOK: Input: default@partition_test_partitioned@dt=1 -POSTHOOK: Input: default@partition_test_partitioned@dt=2 +POSTHOOK: Input: default@partition_test_partitioned_n7 +POSTHOOK: Input: default@partition_test_partitioned_n7@dt=1 +POSTHOOK: Input: default@partition_test_partitioned_n7@dt=2 #### A masked pattern was here #### 0.0 val_0 1 0.0 val_0 1 diff --git a/ql/src/test/results/clientpositive/partitions_json.q.out b/ql/src/test/results/clientpositive/partitions_json.q.out index 531ea56713..ca63ebbd81 100644 --- a/ql/src/test/results/clientpositive/partitions_json.q.out +++ b/ql/src/test/results/clientpositive/partitions_json.q.out @@ -1,77 +1,77 @@ -PREHOOK: query: CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE add_part_test_n0 (key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@add_part_test -POSTHOOK: query: CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@add_part_test_n0 +POSTHOOK: query: CREATE TABLE add_part_test_n0 (key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@add_part_test -PREHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: Output: default@add_part_test_n0 +PREHOOK: query: SHOW PARTITIONS add_part_test_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@add_part_test -POSTHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: Input: default@add_part_test_n0 +POSTHOOK: query: SHOW PARTITIONS add_part_test_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@add_part_test +POSTHOOK: Input: default@add_part_test_n0 {"partitions":[]} -PREHOOK: query: ALTER TABLE add_part_test ADD PARTITION (ds='2010-01-01') +PREHOOK: query: ALTER TABLE add_part_test_n0 ADD PARTITION (ds='2010-01-01') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@add_part_test -POSTHOOK: query: ALTER TABLE add_part_test ADD PARTITION (ds='2010-01-01') +PREHOOK: Output: default@add_part_test_n0 +POSTHOOK: query: ALTER TABLE add_part_test_n0 ADD PARTITION (ds='2010-01-01') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@add_part_test -POSTHOOK: Output: default@add_part_test@ds=2010-01-01 -PREHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: Output: default@add_part_test_n0 +POSTHOOK: Output: default@add_part_test_n0@ds=2010-01-01 +PREHOOK: query: SHOW PARTITIONS add_part_test_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@add_part_test -POSTHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: Input: default@add_part_test_n0 +POSTHOOK: query: SHOW PARTITIONS add_part_test_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@add_part_test +POSTHOOK: Input: default@add_part_test_n0 {"partitions":[{"name":"ds='2010-01-01'","values":[{"columnName":"ds","columnValue":"2010-01-01"}]}]} -PREHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') +PREHOOK: query: ALTER TABLE add_part_test_n0 ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@add_part_test -POSTHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') +PREHOOK: Output: default@add_part_test_n0 +POSTHOOK: query: ALTER TABLE add_part_test_n0 ADD IF NOT EXISTS PARTITION (ds='2010-01-01') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@add_part_test -PREHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: Output: default@add_part_test_n0 +PREHOOK: query: SHOW PARTITIONS add_part_test_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@add_part_test -POSTHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: Input: default@add_part_test_n0 +POSTHOOK: query: SHOW PARTITIONS add_part_test_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@add_part_test +POSTHOOK: Input: default@add_part_test_n0 {"partitions":[{"name":"ds='2010-01-01'","values":[{"columnName":"ds","columnValue":"2010-01-01"}]}]} -PREHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02') +PREHOOK: query: ALTER TABLE add_part_test_n0 ADD IF NOT EXISTS PARTITION (ds='2010-01-02') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@add_part_test -POSTHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02') +PREHOOK: Output: default@add_part_test_n0 +POSTHOOK: query: ALTER TABLE add_part_test_n0 ADD IF NOT EXISTS PARTITION (ds='2010-01-02') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@add_part_test -POSTHOOK: Output: default@add_part_test@ds=2010-01-02 -PREHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: Output: default@add_part_test_n0 +POSTHOOK: Output: default@add_part_test_n0@ds=2010-01-02 +PREHOOK: query: SHOW PARTITIONS add_part_test_n0 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@add_part_test -POSTHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: Input: default@add_part_test_n0 +POSTHOOK: query: SHOW PARTITIONS add_part_test_n0 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@add_part_test +POSTHOOK: Input: default@add_part_test_n0 {"partitions":[{"name":"ds='2010-01-01'","values":[{"columnName":"ds","columnValue":"2010-01-01"}]},{"name":"ds='2010-01-02'","values":[{"columnName":"ds","columnValue":"2010-01-02"}]}]} -PREHOOK: query: SHOW TABLE EXTENDED LIKE add_part_test PARTITION (ds='2010-01-02') +PREHOOK: query: SHOW TABLE EXTENDED LIKE add_part_test_n0 PARTITION (ds='2010-01-02') PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: SHOW TABLE EXTENDED LIKE add_part_test PARTITION (ds='2010-01-02') +POSTHOOK: query: SHOW TABLE EXTENDED LIKE add_part_test_n0 PARTITION (ds='2010-01-02') POSTHOOK: type: SHOW_TABLESTATUS #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE add_part_test DROP PARTITION (ds='2010-01-02') +PREHOOK: query: ALTER TABLE add_part_test_n0 DROP PARTITION (ds='2010-01-02') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@add_part_test -PREHOOK: Output: default@add_part_test@ds=2010-01-02 -POSTHOOK: query: ALTER TABLE add_part_test DROP PARTITION (ds='2010-01-02') +PREHOOK: Input: default@add_part_test_n0 +PREHOOK: Output: default@add_part_test_n0@ds=2010-01-02 +POSTHOOK: query: ALTER TABLE add_part_test_n0 DROP PARTITION (ds='2010-01-02') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@add_part_test -POSTHOOK: Output: default@add_part_test@ds=2010-01-02 -PREHOOK: query: DROP TABLE add_part_test +POSTHOOK: Input: default@add_part_test_n0 +POSTHOOK: Output: default@add_part_test_n0@ds=2010-01-02 +PREHOOK: query: DROP TABLE add_part_test_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@add_part_test -PREHOOK: Output: default@add_part_test -POSTHOOK: query: DROP TABLE add_part_test +PREHOOK: Input: default@add_part_test_n0 +PREHOOK: Output: default@add_part_test_n0 +POSTHOOK: query: DROP TABLE add_part_test_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@add_part_test -POSTHOOK: Output: default@add_part_test +POSTHOOK: Input: default@add_part_test_n0 +POSTHOOK: Output: default@add_part_test_n0 diff --git a/ql/src/test/results/clientpositive/pointlookup2.q.out b/ql/src/test/results/clientpositive/pointlookup2.q.out index 5a4ba5acb1..1776b48f0d 100644 --- a/ql/src/test/results/clientpositive/pointlookup2.q.out +++ b/ql/src/test/results/clientpositive/pointlookup2.q.out @@ -1,100 +1,100 @@ -PREHOOK: query: drop table pcr_t1 +PREHOOK: query: drop table pcr_t1_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table pcr_t1 +POSTHOOK: query: drop table pcr_t1_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table pcr_t2 +PREHOOK: query: drop table pcr_t2_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table pcr_t2 +POSTHOOK: query: drop table pcr_t2_n0 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table pcr_t3 PREHOOK: type: DROPTABLE POSTHOOK: query: drop table pcr_t3 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table pcr_t1 (key int, value string) partitioned by (ds string) +PREHOOK: query: create table pcr_t1_n2 (key int, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@pcr_t1 -POSTHOOK: query: create table pcr_t1 (key int, value string) partitioned by (ds string) +PREHOOK: Output: default@pcr_t1_n2 +POSTHOOK: query: create table pcr_t1_n2 (key int, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@pcr_t1 -PREHOOK: query: insert overwrite table pcr_t1 partition (ds='2000-04-08') select * from src where key < 20 order by key +POSTHOOK: Output: default@pcr_t1_n2 +PREHOOK: query: insert overwrite table pcr_t1_n2 partition (ds='2000-04-08') select * from src where key < 20 order by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@pcr_t1@ds=2000-04-08 -POSTHOOK: query: insert overwrite table pcr_t1 partition (ds='2000-04-08') select * from src where key < 20 order by key +PREHOOK: Output: default@pcr_t1_n2@ds=2000-04-08 +POSTHOOK: query: insert overwrite table pcr_t1_n2 partition (ds='2000-04-08') select * from src where key < 20 order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@pcr_t1@ds=2000-04-08 -POSTHOOK: Lineage: pcr_t1 PARTITION(ds=2000-04-08).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: pcr_t1 PARTITION(ds=2000-04-08).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table pcr_t1 partition (ds='2000-04-09') select * from src where key < 20 order by key +POSTHOOK: Output: default@pcr_t1_n2@ds=2000-04-08 +POSTHOOK: Lineage: pcr_t1_n2 PARTITION(ds=2000-04-08).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: pcr_t1_n2 PARTITION(ds=2000-04-08).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table pcr_t1_n2 partition (ds='2000-04-09') select * from src where key < 20 order by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@pcr_t1@ds=2000-04-09 -POSTHOOK: query: insert overwrite table pcr_t1 partition (ds='2000-04-09') select * from src where key < 20 order by key +PREHOOK: Output: default@pcr_t1_n2@ds=2000-04-09 +POSTHOOK: query: insert overwrite table pcr_t1_n2 partition (ds='2000-04-09') select * from src where key < 20 order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@pcr_t1@ds=2000-04-09 -POSTHOOK: Lineage: pcr_t1 PARTITION(ds=2000-04-09).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: pcr_t1 PARTITION(ds=2000-04-09).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table pcr_t1 partition (ds='2000-04-10') select * from src where key < 20 order by key +POSTHOOK: Output: default@pcr_t1_n2@ds=2000-04-09 +POSTHOOK: Lineage: pcr_t1_n2 PARTITION(ds=2000-04-09).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: pcr_t1_n2 PARTITION(ds=2000-04-09).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table pcr_t1_n2 partition (ds='2000-04-10') select * from src where key < 20 order by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@pcr_t1@ds=2000-04-10 -POSTHOOK: query: insert overwrite table pcr_t1 partition (ds='2000-04-10') select * from src where key < 20 order by key +PREHOOK: Output: default@pcr_t1_n2@ds=2000-04-10 +POSTHOOK: query: insert overwrite table pcr_t1_n2 partition (ds='2000-04-10') select * from src where key < 20 order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@pcr_t1@ds=2000-04-10 -POSTHOOK: Lineage: pcr_t1 PARTITION(ds=2000-04-10).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: pcr_t1 PARTITION(ds=2000-04-10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table pcr_t2 (ds string, key int, value string) +POSTHOOK: Output: default@pcr_t1_n2@ds=2000-04-10 +POSTHOOK: Lineage: pcr_t1_n2 PARTITION(ds=2000-04-10).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: pcr_t1_n2 PARTITION(ds=2000-04-10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table pcr_t2_n0 (ds string, key int, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@pcr_t2 -POSTHOOK: query: create table pcr_t2 (ds string, key int, value string) +PREHOOK: Output: default@pcr_t2_n0 +POSTHOOK: query: create table pcr_t2_n0 (ds string, key int, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@pcr_t2 -PREHOOK: query: from pcr_t1 -insert overwrite table pcr_t2 select ds, key, value where ds='2000-04-08' +POSTHOOK: Output: default@pcr_t2_n0 +PREHOOK: query: from pcr_t1_n2 +insert overwrite table pcr_t2_n0 select ds, key, value where ds='2000-04-08' PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds=2000-04-08 -PREHOOK: Output: default@pcr_t2 -POSTHOOK: query: from pcr_t1 -insert overwrite table pcr_t2 select ds, key, value where ds='2000-04-08' +PREHOOK: Input: default@pcr_t1_n2 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +PREHOOK: Output: default@pcr_t2_n0 +POSTHOOK: query: from pcr_t1_n2 +insert overwrite table pcr_t2_n0 select ds, key, value where ds='2000-04-08' POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 -POSTHOOK: Output: default@pcr_t2 -POSTHOOK: Lineage: pcr_t2.ds SIMPLE [] -POSTHOOK: Lineage: pcr_t2.key SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: pcr_t2.value SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: from pcr_t1 -insert overwrite table pcr_t2 select ds, key, value where ds='2000-04-08' and key=2 +POSTHOOK: Input: default@pcr_t1_n2 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +POSTHOOK: Output: default@pcr_t2_n0 +POSTHOOK: Lineage: pcr_t2_n0.ds SIMPLE [] +POSTHOOK: Lineage: pcr_t2_n0.key SIMPLE [(pcr_t1_n2)pcr_t1_n2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: pcr_t2_n0.value SIMPLE [(pcr_t1_n2)pcr_t1_n2.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: from pcr_t1_n2 +insert overwrite table pcr_t2_n0 select ds, key, value where ds='2000-04-08' and key=2 PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds=2000-04-08 -PREHOOK: Output: default@pcr_t2 -POSTHOOK: query: from pcr_t1 -insert overwrite table pcr_t2 select ds, key, value where ds='2000-04-08' and key=2 +PREHOOK: Input: default@pcr_t1_n2 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +PREHOOK: Output: default@pcr_t2_n0 +POSTHOOK: query: from pcr_t1_n2 +insert overwrite table pcr_t2_n0 select ds, key, value where ds='2000-04-08' and key=2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 -POSTHOOK: Output: default@pcr_t2 -POSTHOOK: Lineage: pcr_t2.ds SIMPLE [] -POSTHOOK: Lineage: pcr_t2.key SIMPLE [] -POSTHOOK: Lineage: pcr_t2.value SIMPLE [(pcr_t1)pcr_t1.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Input: default@pcr_t1_n2 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +POSTHOOK: Output: default@pcr_t2_n0 +POSTHOOK: Lineage: pcr_t2_n0.ds SIMPLE [] +POSTHOOK: Lineage: pcr_t2_n0.key SIMPLE [] +POSTHOOK: Lineage: pcr_t2_n0.value SIMPLE [(pcr_t1_n2)pcr_t1_n2.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain extended select key, value, ds -from pcr_t1 +from pcr_t1_n2 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds PREHOOK: type: QUERY POSTHOOK: query: explain extended select key, value, ds -from pcr_t1 +from pcr_t1_n2 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds POSTHOOK: type: QUERY @@ -107,7 +107,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: pcr_t1 + alias: pcr_t1_n2 Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -144,13 +144,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -167,16 +167,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 #### A masked pattern was here #### Partition base file name: ds=2000-04-09 @@ -192,13 +192,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -215,19 +215,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] + /pcr_t1_n2/ds=2000-04-08 [pcr_t1_n2] + /pcr_t1_n2/ds=2000-04-09 [pcr_t1_n2] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -265,13 +265,13 @@ STAGE PLANS: PREHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-08' order by t1.key PREHOOK: type: QUERY POSTHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-08' order by t1.key POSTHOOK: type: QUERY @@ -344,13 +344,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -367,18 +367,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [$hdt$_0:$hdt$_0:t1, $hdt$_0:$hdt$_1:t2] + /pcr_t1_n2/ds=2000-04-08 [$hdt$_0:$hdt$_0:t1, $hdt$_0:$hdt$_1:t2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -490,13 +490,13 @@ STAGE PLANS: PREHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-09' order by t1.key PREHOOK: type: QUERY POSTHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-09' order by t1.key POSTHOOK: type: QUERY @@ -569,13 +569,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -592,16 +592,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 #### A masked pattern was here #### Partition base file name: ds=2000-04-09 @@ -617,13 +617,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -640,19 +640,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [$hdt$_0:$hdt$_0:t1] - /pcr_t1/ds=2000-04-09 [$hdt$_0:$hdt$_1:t2] + /pcr_t1_n2/ds=2000-04-08 [$hdt$_0:$hdt$_0:t1] + /pcr_t1_n2/ds=2000-04-09 [$hdt$_0:$hdt$_1:t2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -765,13 +765,13 @@ STAGE PLANS: Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds PREHOOK: type: QUERY POSTHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds POSTHOOK: type: QUERY @@ -836,13 +836,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -859,16 +859,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 #### A masked pattern was here #### Partition base file name: ds=2000-04-09 @@ -884,13 +884,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -907,19 +907,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 #### A masked pattern was here #### Partition - base file name: pcr_t2 + base file name: pcr_t2_n0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -931,11 +931,11 @@ STAGE PLANS: columns.comments columns.types string:int:string #### A masked pattern was here #### - name default.pcr_t2 + name default.pcr_t2_n0 numFiles 1 numRows 1 rawDataSize 18 - serialization.ddl struct pcr_t2 { string ds, i32 key, string value} + serialization.ddl struct pcr_t2_n0 { string ds, i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 19 @@ -953,22 +953,22 @@ STAGE PLANS: columns.comments columns.types string:int:string #### A masked pattern was here #### - name default.pcr_t2 + name default.pcr_t2_n0 numFiles 1 numRows 1 rawDataSize 18 - serialization.ddl struct pcr_t2 { string ds, i32 key, string value} + serialization.ddl struct pcr_t2_n0 { string ds, i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 19 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t2 - name: default.pcr_t2 + name: default.pcr_t2_n0 + name: default.pcr_t2_n0 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [$hdt$_0:t1] - /pcr_t1/ds=2000-04-09 [$hdt$_0:t1] - /pcr_t2 [$hdt$_1:t2] + /pcr_t1_n2/ds=2000-04-08 [$hdt$_0:t1] + /pcr_t1_n2/ds=2000-04-09 [$hdt$_0:t1] + /pcr_t2_n0 [$hdt$_1:t2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -1081,13 +1081,13 @@ STAGE PLANS: Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t2.ds='2000-04-08' and t1.key=1) or (t2.ds='2000-04-09' and t1.key=2) order by t1.key, t1.value, t2.ds PREHOOK: type: QUERY POSTHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t2.ds='2000-04-08' and t1.key=1) or (t2.ds='2000-04-09' and t1.key=2) order by t1.key, t1.value, t2.ds POSTHOOK: type: QUERY @@ -1156,13 +1156,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -1179,16 +1179,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 #### A masked pattern was here #### Partition base file name: ds=2000-04-09 @@ -1204,13 +1204,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -1227,16 +1227,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 #### A masked pattern was here #### Partition base file name: ds=2000-04-10 @@ -1252,13 +1252,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -1275,19 +1275,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 #### A masked pattern was here #### Partition - base file name: pcr_t2 + base file name: pcr_t2_n0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -1299,11 +1299,11 @@ STAGE PLANS: columns.comments columns.types string:int:string #### A masked pattern was here #### - name default.pcr_t2 + name default.pcr_t2_n0 numFiles 1 numRows 1 rawDataSize 18 - serialization.ddl struct pcr_t2 { string ds, i32 key, string value} + serialization.ddl struct pcr_t2_n0 { string ds, i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 19 @@ -1321,23 +1321,23 @@ STAGE PLANS: columns.comments columns.types string:int:string #### A masked pattern was here #### - name default.pcr_t2 + name default.pcr_t2_n0 numFiles 1 numRows 1 rawDataSize 18 - serialization.ddl struct pcr_t2 { string ds, i32 key, string value} + serialization.ddl struct pcr_t2_n0 { string ds, i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 19 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t2 - name: default.pcr_t2 + name: default.pcr_t2_n0 + name: default.pcr_t2_n0 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [$hdt$_0:t1] - /pcr_t1/ds=2000-04-09 [$hdt$_0:t1] - /pcr_t1/ds=2000-04-10 [$hdt$_0:t1] - /pcr_t2 [$hdt$_1:t2] + /pcr_t1_n2/ds=2000-04-08 [$hdt$_0:t1] + /pcr_t1_n2/ds=2000-04-09 [$hdt$_0:t1] + /pcr_t1_n2/ds=2000-04-10 [$hdt$_0:t1] + /pcr_t2_n0 [$hdt$_1:t2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -1448,39 +1448,39 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, ds -from pcr_t1 +from pcr_t1_n2 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds=2000-04-08 -PREHOOK: Input: default@pcr_t1@ds=2000-04-09 +PREHOOK: Input: default@pcr_t1_n2 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 #### A masked pattern was here #### POSTHOOK: query: select key, value, ds -from pcr_t1 +from pcr_t1_n2 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-09 +POSTHOOK: Input: default@pcr_t1_n2 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 #### A masked pattern was here #### 2 val_2 2000-04-09 PREHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-08' order by t1.key PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds=2000-04-08 +PREHOOK: Input: default@pcr_t1_n2 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 #### A masked pattern was here #### POSTHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-08' order by t1.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 +POSTHOOK: Input: default@pcr_t1_n2 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 #### A masked pattern was here #### 0 val_0 2000-04-08 0 val_0 2000-04-08 0 val_0 2000-04-08 0 val_0 2000-04-08 @@ -1522,24 +1522,24 @@ POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 19 val_19 2000-04-08 19 val_19 2000-04-08 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds=2000-04-08 -PREHOOK: Input: default@pcr_t1@ds=2000-04-09 -PREHOOK: Input: default@pcr_t2 +PREHOOK: Input: default@pcr_t1_n2 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 +PREHOOK: Input: default@pcr_t2_n0 #### A masked pattern was here #### POSTHOOK: query: select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-09 -POSTHOOK: Input: default@pcr_t2 +POSTHOOK: Input: default@pcr_t1_n2 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 +POSTHOOK: Input: default@pcr_t2_n0 #### A masked pattern was here #### 0 val_0 2000-04-09 2000-04-08 2 val_2 0 val_0 2000-04-09 2000-04-08 2 val_2 @@ -1563,24 +1563,24 @@ POSTHOOK: Input: default@pcr_t2 9 val_9 2000-04-09 2000-04-08 2 val_2 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds=2000-04-08 -PREHOOK: Input: default@pcr_t1@ds=2000-04-09 -PREHOOK: Input: default@pcr_t2 +PREHOOK: Input: default@pcr_t1_n2 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 +PREHOOK: Input: default@pcr_t2_n0 #### A masked pattern was here #### POSTHOOK: query: select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-09 -POSTHOOK: Input: default@pcr_t2 +POSTHOOK: Input: default@pcr_t1_n2 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 +POSTHOOK: Input: default@pcr_t2_n0 #### A masked pattern was here #### 0 val_0 2000-04-09 2000-04-08 2 val_2 0 val_0 2000-04-09 2000-04-08 2 val_2 @@ -1604,36 +1604,36 @@ POSTHOOK: Input: default@pcr_t2 9 val_9 2000-04-09 2000-04-08 2 val_2 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t2.ds='2000-04-08' and t1.key=1) or (t2.ds='2000-04-09' and t1.key=2) order by t1.key, t1.value, t2.ds PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds=2000-04-08 -PREHOOK: Input: default@pcr_t1@ds=2000-04-09 -PREHOOK: Input: default@pcr_t1@ds=2000-04-10 -PREHOOK: Input: default@pcr_t2 +PREHOOK: Input: default@pcr_t1_n2 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-10 +PREHOOK: Input: default@pcr_t2_n0 #### A masked pattern was here #### POSTHOOK: query: select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t2.ds='2000-04-08' and t1.key=1) or (t2.ds='2000-04-09' and t1.key=2) order by t1.key, t1.value, t2.ds POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-09 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-10 -POSTHOOK: Input: default@pcr_t2 +POSTHOOK: Input: default@pcr_t1_n2 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-10 +POSTHOOK: Input: default@pcr_t2_n0 #### A masked pattern was here #### PREHOOK: query: explain extended select key, value, ds -from pcr_t1 +from pcr_t1_n2 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds PREHOOK: type: QUERY POSTHOOK: query: explain extended select key, value, ds -from pcr_t1 +from pcr_t1_n2 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds POSTHOOK: type: QUERY @@ -1646,7 +1646,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: pcr_t1 + alias: pcr_t1_n2 Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -1683,13 +1683,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -1706,16 +1706,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 #### A masked pattern was here #### Partition base file name: ds=2000-04-09 @@ -1731,13 +1731,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -1754,19 +1754,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [pcr_t1] - /pcr_t1/ds=2000-04-09 [pcr_t1] + /pcr_t1_n2/ds=2000-04-08 [pcr_t1_n2] + /pcr_t1_n2/ds=2000-04-09 [pcr_t1_n2] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -1804,13 +1804,13 @@ STAGE PLANS: PREHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-08' order by t1.key PREHOOK: type: QUERY POSTHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-08' order by t1.key POSTHOOK: type: QUERY @@ -1883,13 +1883,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -1906,18 +1906,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [$hdt$_0:$hdt$_0:t1, $hdt$_0:$hdt$_1:t2] + /pcr_t1_n2/ds=2000-04-08 [$hdt$_0:$hdt$_0:t1, $hdt$_0:$hdt$_1:t2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -2029,13 +2029,13 @@ STAGE PLANS: PREHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-09' order by t1.key PREHOOK: type: QUERY POSTHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-09' order by t1.key POSTHOOK: type: QUERY @@ -2108,13 +2108,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -2131,16 +2131,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 #### A masked pattern was here #### Partition base file name: ds=2000-04-09 @@ -2156,13 +2156,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -2179,19 +2179,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [$hdt$_0:$hdt$_0:t1] - /pcr_t1/ds=2000-04-09 [$hdt$_0:$hdt$_1:t2] + /pcr_t1_n2/ds=2000-04-08 [$hdt$_0:$hdt$_0:t1] + /pcr_t1_n2/ds=2000-04-09 [$hdt$_0:$hdt$_1:t2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -2304,13 +2304,13 @@ STAGE PLANS: Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds PREHOOK: type: QUERY POSTHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds POSTHOOK: type: QUERY @@ -2375,13 +2375,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -2398,16 +2398,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 #### A masked pattern was here #### Partition base file name: ds=2000-04-09 @@ -2423,13 +2423,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -2446,19 +2446,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 #### A masked pattern was here #### Partition - base file name: pcr_t2 + base file name: pcr_t2_n0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -2470,11 +2470,11 @@ STAGE PLANS: columns.comments columns.types string:int:string #### A masked pattern was here #### - name default.pcr_t2 + name default.pcr_t2_n0 numFiles 1 numRows 1 rawDataSize 18 - serialization.ddl struct pcr_t2 { string ds, i32 key, string value} + serialization.ddl struct pcr_t2_n0 { string ds, i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 19 @@ -2492,22 +2492,22 @@ STAGE PLANS: columns.comments columns.types string:int:string #### A masked pattern was here #### - name default.pcr_t2 + name default.pcr_t2_n0 numFiles 1 numRows 1 rawDataSize 18 - serialization.ddl struct pcr_t2 { string ds, i32 key, string value} + serialization.ddl struct pcr_t2_n0 { string ds, i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 19 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t2 - name: default.pcr_t2 + name: default.pcr_t2_n0 + name: default.pcr_t2_n0 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [$hdt$_0:t1] - /pcr_t1/ds=2000-04-09 [$hdt$_0:t1] - /pcr_t2 [$hdt$_1:t2] + /pcr_t1_n2/ds=2000-04-08 [$hdt$_0:t1] + /pcr_t1_n2/ds=2000-04-09 [$hdt$_0:t1] + /pcr_t2_n0 [$hdt$_1:t2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -2620,13 +2620,13 @@ STAGE PLANS: Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t2.ds='2000-04-08' and t1.key=1) or (t2.ds='2000-04-09' and t1.key=2) order by t1.key, t1.value, t2.ds PREHOOK: type: QUERY POSTHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t2.ds='2000-04-08' and t1.key=1) or (t2.ds='2000-04-09' and t1.key=2) order by t1.key, t1.value, t2.ds POSTHOOK: type: QUERY @@ -2695,13 +2695,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -2718,16 +2718,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 #### A masked pattern was here #### Partition base file name: ds=2000-04-09 @@ -2743,13 +2743,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -2766,16 +2766,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 #### A masked pattern was here #### Partition base file name: ds=2000-04-10 @@ -2791,13 +2791,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 numFiles 1 numRows 20 partition_columns ds partition_columns.types string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -2814,19 +2814,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n2 partition_columns ds partition_columns.types string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n2 + name: default.pcr_t1_n2 #### A masked pattern was here #### Partition - base file name: pcr_t2 + base file name: pcr_t2_n0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -2838,11 +2838,11 @@ STAGE PLANS: columns.comments columns.types string:int:string #### A masked pattern was here #### - name default.pcr_t2 + name default.pcr_t2_n0 numFiles 1 numRows 1 rawDataSize 18 - serialization.ddl struct pcr_t2 { string ds, i32 key, string value} + serialization.ddl struct pcr_t2_n0 { string ds, i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 19 @@ -2860,23 +2860,23 @@ STAGE PLANS: columns.comments columns.types string:int:string #### A masked pattern was here #### - name default.pcr_t2 + name default.pcr_t2_n0 numFiles 1 numRows 1 rawDataSize 18 - serialization.ddl struct pcr_t2 { string ds, i32 key, string value} + serialization.ddl struct pcr_t2_n0 { string ds, i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 19 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t2 - name: default.pcr_t2 + name: default.pcr_t2_n0 + name: default.pcr_t2_n0 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [$hdt$_0:t1] - /pcr_t1/ds=2000-04-09 [$hdt$_0:t1] - /pcr_t1/ds=2000-04-10 [$hdt$_0:t1] - /pcr_t2 [$hdt$_1:t2] + /pcr_t1_n2/ds=2000-04-08 [$hdt$_0:t1] + /pcr_t1_n2/ds=2000-04-09 [$hdt$_0:t1] + /pcr_t1_n2/ds=2000-04-10 [$hdt$_0:t1] + /pcr_t2_n0 [$hdt$_1:t2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -2987,39 +2987,39 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, ds -from pcr_t1 +from pcr_t1_n2 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds=2000-04-08 -PREHOOK: Input: default@pcr_t1@ds=2000-04-09 +PREHOOK: Input: default@pcr_t1_n2 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 #### A masked pattern was here #### POSTHOOK: query: select key, value, ds -from pcr_t1 +from pcr_t1_n2 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-09 +POSTHOOK: Input: default@pcr_t1_n2 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 #### A masked pattern was here #### 2 val_2 2000-04-09 PREHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-08' order by t1.key PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds=2000-04-08 +PREHOOK: Input: default@pcr_t1_n2 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 #### A masked pattern was here #### POSTHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n2 t1 join pcr_t1_n2 t2 on t1.key=t2.key and t1.ds='2000-04-08' and t2.ds='2000-04-08' order by t1.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 +POSTHOOK: Input: default@pcr_t1_n2 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 #### A masked pattern was here #### 0 val_0 2000-04-08 0 val_0 2000-04-08 0 val_0 2000-04-08 0 val_0 2000-04-08 @@ -3061,24 +3061,24 @@ POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 19 val_19 2000-04-08 19 val_19 2000-04-08 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds=2000-04-08 -PREHOOK: Input: default@pcr_t1@ds=2000-04-09 -PREHOOK: Input: default@pcr_t2 +PREHOOK: Input: default@pcr_t1_n2 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 +PREHOOK: Input: default@pcr_t2_n0 #### A masked pattern was here #### POSTHOOK: query: select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-09 -POSTHOOK: Input: default@pcr_t2 +POSTHOOK: Input: default@pcr_t1_n2 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 +POSTHOOK: Input: default@pcr_t2_n0 #### A masked pattern was here #### 0 val_0 2000-04-09 2000-04-08 2 val_2 0 val_0 2000-04-09 2000-04-08 2 val_2 @@ -3102,24 +3102,24 @@ POSTHOOK: Input: default@pcr_t2 9 val_9 2000-04-09 2000-04-08 2 val_2 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds=2000-04-08 -PREHOOK: Input: default@pcr_t1@ds=2000-04-09 -PREHOOK: Input: default@pcr_t2 +PREHOOK: Input: default@pcr_t1_n2 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 +PREHOOK: Input: default@pcr_t2_n0 #### A masked pattern was here #### POSTHOOK: query: select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t1.ds='2000-04-08' and t2.key=1) or (t1.ds='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-09 -POSTHOOK: Input: default@pcr_t2 +POSTHOOK: Input: default@pcr_t1_n2 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 +POSTHOOK: Input: default@pcr_t2_n0 #### A masked pattern was here #### 0 val_0 2000-04-09 2000-04-08 2 val_2 0 val_0 2000-04-09 2000-04-08 2 val_2 @@ -3143,43 +3143,43 @@ POSTHOOK: Input: default@pcr_t2 9 val_9 2000-04-09 2000-04-08 2 val_2 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t2.ds='2000-04-08' and t1.key=1) or (t2.ds='2000-04-09' and t1.key=2) order by t1.key, t1.value, t2.ds PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds=2000-04-08 -PREHOOK: Input: default@pcr_t1@ds=2000-04-09 -PREHOOK: Input: default@pcr_t1@ds=2000-04-10 -PREHOOK: Input: default@pcr_t2 +PREHOOK: Input: default@pcr_t1_n2 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 +PREHOOK: Input: default@pcr_t1_n2@ds=2000-04-10 +PREHOOK: Input: default@pcr_t2_n0 #### A masked pattern was here #### POSTHOOK: query: select * -from pcr_t1 t1 join pcr_t2 t2 +from pcr_t1_n2 t1 join pcr_t2_n0 t2 where (t2.ds='2000-04-08' and t1.key=1) or (t2.ds='2000-04-09' and t1.key=2) order by t1.key, t1.value, t2.ds POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-08 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-09 -POSTHOOK: Input: default@pcr_t1@ds=2000-04-10 -POSTHOOK: Input: default@pcr_t2 +POSTHOOK: Input: default@pcr_t1_n2 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-08 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-09 +POSTHOOK: Input: default@pcr_t1_n2@ds=2000-04-10 +POSTHOOK: Input: default@pcr_t2_n0 #### A masked pattern was here #### -PREHOOK: query: drop table pcr_t1 +PREHOOK: query: drop table pcr_t1_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@pcr_t1 -PREHOOK: Output: default@pcr_t1 -POSTHOOK: query: drop table pcr_t1 +PREHOOK: Input: default@pcr_t1_n2 +PREHOOK: Output: default@pcr_t1_n2 +POSTHOOK: query: drop table pcr_t1_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Output: default@pcr_t1 -PREHOOK: query: drop table pcr_t2 +POSTHOOK: Input: default@pcr_t1_n2 +POSTHOOK: Output: default@pcr_t1_n2 +PREHOOK: query: drop table pcr_t2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@pcr_t2 -PREHOOK: Output: default@pcr_t2 -POSTHOOK: query: drop table pcr_t2 +PREHOOK: Input: default@pcr_t2_n0 +PREHOOK: Output: default@pcr_t2_n0 +POSTHOOK: query: drop table pcr_t2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@pcr_t2 -POSTHOOK: Output: default@pcr_t2 +POSTHOOK: Input: default@pcr_t2_n0 +POSTHOOK: Output: default@pcr_t2_n0 PREHOOK: query: drop table pcr_t3 PREHOOK: type: DROPTABLE POSTHOOK: query: drop table pcr_t3 diff --git a/ql/src/test/results/clientpositive/pointlookup3.q.out b/ql/src/test/results/clientpositive/pointlookup3.q.out index 4ebb6e4516..975774faf9 100644 --- a/ql/src/test/results/clientpositive/pointlookup3.q.out +++ b/ql/src/test/results/clientpositive/pointlookup3.q.out @@ -1,54 +1,54 @@ -PREHOOK: query: drop table pcr_t1 +PREHOOK: query: drop table pcr_t1_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table pcr_t1 +POSTHOOK: query: drop table pcr_t1_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string) +PREHOOK: query: create table pcr_t1_n1 (key int, value string) partitioned by (ds1 string, ds2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@pcr_t1 -POSTHOOK: query: create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string) +PREHOOK: Output: default@pcr_t1_n1 +POSTHOOK: query: create table pcr_t1_n1 (key int, value string) partitioned by (ds1 string, ds2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@pcr_t1 -PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key +POSTHOOK: Output: default@pcr_t1_n1 +PREHOOK: query: insert overwrite table pcr_t1_n1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key +PREHOOK: Output: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: query: insert overwrite table pcr_t1_n1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-08,ds2=2001-04-08).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-08,ds2=2001-04-08).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key +POSTHOOK: Output: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: Lineage: pcr_t1_n1 PARTITION(ds1=2000-04-08,ds2=2001-04-08).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: pcr_t1_n1 PARTITION(ds1=2000-04-08,ds2=2001-04-08).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table pcr_t1_n1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 -POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key +PREHOOK: Output: default@pcr_t1_n1@ds1=2000-04-09/ds2=2001-04-09 +POSTHOOK: query: insert overwrite table pcr_t1_n1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 -POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-09,ds2=2001-04-09).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-09,ds2=2001-04-09).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key +POSTHOOK: Output: default@pcr_t1_n1@ds1=2000-04-09/ds2=2001-04-09 +POSTHOOK: Lineage: pcr_t1_n1 PARTITION(ds1=2000-04-09,ds2=2001-04-09).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: pcr_t1_n1 PARTITION(ds1=2000-04-09,ds2=2001-04-09).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table pcr_t1_n1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10 -POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key +PREHOOK: Output: default@pcr_t1_n1@ds1=2000-04-10/ds2=2001-04-10 +POSTHOOK: query: insert overwrite table pcr_t1_n1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10 -POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-10,ds2=2001-04-10).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-10,ds2=2001-04-10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@pcr_t1_n1@ds1=2000-04-10/ds2=2001-04-10 +POSTHOOK: Lineage: pcr_t1_n1 PARTITION(ds1=2000-04-10,ds2=2001-04-10).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: pcr_t1_n1 PARTITION(ds1=2000-04-10,ds2=2001-04-10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2) order by key, value, ds1, ds2 PREHOOK: type: QUERY POSTHOOK: query: explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2) order by key, value, ds1, ds2 POSTHOOK: type: QUERY @@ -61,7 +61,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: pcr_t1 + alias: pcr_t1_n1 Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -99,13 +99,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -122,16 +122,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 #### A masked pattern was here #### Partition base file name: ds2=2001-04-09 @@ -148,13 +148,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -171,19 +171,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1] - /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1] + /pcr_t1_n1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1_n1] + /pcr_t1_n1/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1_n1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -221,13 +221,13 @@ STAGE PLANS: PREHOOK: query: explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2) order by key, value, ds1, ds2 PREHOOK: type: QUERY POSTHOOK: query: explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2) order by key, value, ds1, ds2 POSTHOOK: type: QUERY @@ -240,7 +240,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: pcr_t1 + alias: pcr_t1_n1 Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -278,13 +278,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -301,18 +301,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:pcr_t1] + /pcr_t1_n1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:pcr_t1_n1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -350,13 +350,13 @@ STAGE PLANS: PREHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08' order by t2.key, t2.value, t1.ds1 PREHOOK: type: QUERY POSTHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08' order by t2.key, t2.value, t1.ds1 POSTHOOK: type: QUERY @@ -430,13 +430,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -453,18 +453,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:$hdt$_0:t1, $hdt$_0:$hdt$_1:t2] + /pcr_t1_n1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:$hdt$_0:t1, $hdt$_0:$hdt$_1:t2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -576,13 +576,13 @@ STAGE PLANS: PREHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09' order by t2.key, t2.value, t1.ds1 PREHOOK: type: QUERY POSTHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09' order by t2.key, t2.value, t1.ds1 POSTHOOK: type: QUERY @@ -656,13 +656,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -679,16 +679,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 #### A masked pattern was here #### Partition base file name: ds2=2001-04-09 @@ -705,13 +705,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -728,19 +728,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:$hdt$_0:t1] - /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [$hdt$_0:$hdt$_1:t2] + /pcr_t1_n1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:$hdt$_0:t1] + /pcr_t1_n1/ds1=2000-04-09/ds2=2001-04-09 [$hdt$_0:$hdt$_1:t2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -853,13 +853,13 @@ STAGE PLANS: Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds1 PREHOOK: type: QUERY POSTHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds1 POSTHOOK: type: QUERY @@ -925,13 +925,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -948,16 +948,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 #### A masked pattern was here #### Partition base file name: ds2=2001-04-09 @@ -974,13 +974,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -997,16 +997,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 #### A masked pattern was here #### Partition base file name: ds2=2001-04-10 @@ -1023,13 +1023,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -1046,20 +1046,20 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:t1, $hdt$_1:t2] - /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [$hdt$_0:t1, $hdt$_1:t2] - /pcr_t1/ds1=2000-04-10/ds2=2001-04-10 [$hdt$_1:t2] + /pcr_t1_n1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:t1, $hdt$_1:t2] + /pcr_t1_n1/ds1=2000-04-09/ds2=2001-04-09 [$hdt$_0:t1, $hdt$_1:t2] + /pcr_t1_n1/ds1=2000-04-10/ds2=2001-04-10 [$hdt$_1:t2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -1170,55 +1170,55 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2) order by key, value, ds1, ds2 PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 +PREHOOK: Input: default@pcr_t1_n1 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-09/ds2=2001-04-09 #### A masked pattern was here #### POSTHOOK: query: select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2) order by key, value, ds1, ds2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 +POSTHOOK: Input: default@pcr_t1_n1 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-09/ds2=2001-04-09 #### A masked pattern was here #### 2 val_2 2000-04-09 2001-04-09 PREHOOK: query: select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2) order by key, value, ds1, ds2 PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 +PREHOOK: Input: default@pcr_t1_n1 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 #### A masked pattern was here #### POSTHOOK: query: select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2) order by key, value, ds1, ds2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: Input: default@pcr_t1_n1 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 #### A masked pattern was here #### PREHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08' order by t2.key, t2.value, t1.ds1 PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 +PREHOOK: Input: default@pcr_t1_n1 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 #### A masked pattern was here #### POSTHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08' order by t2.key, t2.value, t1.ds1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: Input: default@pcr_t1_n1 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 #### A masked pattern was here #### 0 val_0 2000-04-08 2001-04-08 0 val_0 2000-04-08 2001-04-08 0 val_0 2000-04-08 2001-04-08 0 val_0 2000-04-08 2001-04-08 @@ -1259,22 +1259,22 @@ POSTHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 18 val_18 2000-04-08 2001-04-08 18 val_18 2000-04-08 2001-04-08 19 val_19 2000-04-08 2001-04-08 19 val_19 2000-04-08 2001-04-08 PREHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09' order by t2.key, t2.value, t1.ds1 PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 +PREHOOK: Input: default@pcr_t1_n1 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-09/ds2=2001-04-09 #### A masked pattern was here #### POSTHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09' order by t2.key, t2.value, t1.ds1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 +POSTHOOK: Input: default@pcr_t1_n1 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-09/ds2=2001-04-09 #### A masked pattern was here #### 0 val_0 2000-04-08 2001-04-08 0 val_0 2000-04-09 2001-04-09 0 val_0 2000-04-08 2001-04-08 0 val_0 2000-04-09 2001-04-09 @@ -1316,24 +1316,24 @@ POSTHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 19 val_19 2000-04-08 2001-04-08 19 val_19 2000-04-09 2001-04-09 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds1 PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10 +PREHOOK: Input: default@pcr_t1_n1 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-09/ds2=2001-04-09 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-10/ds2=2001-04-10 #### A masked pattern was here #### POSTHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10 +POSTHOOK: Input: default@pcr_t1_n1 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-09/ds2=2001-04-09 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-10/ds2=2001-04-10 #### A masked pattern was here #### 0 val_0 2000-04-09 2001-04-09 2 val_2 2000-04-10 2001-04-10 0 val_0 2000-04-09 2001-04-09 2 val_2 2000-04-08 2001-04-08 @@ -1397,13 +1397,13 @@ POSTHOOK: Input: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10 9 val_9 2000-04-09 2001-04-09 2 val_2 2000-04-09 2001-04-09 PREHOOK: query: explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2) order by key, value, ds1, ds2 PREHOOK: type: QUERY POSTHOOK: query: explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2) order by key, value, ds1, ds2 POSTHOOK: type: QUERY @@ -1416,7 +1416,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: pcr_t1 + alias: pcr_t1_n1 Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -1454,13 +1454,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -1477,16 +1477,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 #### A masked pattern was here #### Partition base file name: ds2=2001-04-09 @@ -1503,13 +1503,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -1526,19 +1526,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1] - /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1] + /pcr_t1_n1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1_n1] + /pcr_t1_n1/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1_n1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -1576,13 +1576,13 @@ STAGE PLANS: PREHOOK: query: explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2) order by key, value, ds1, ds2 PREHOOK: type: QUERY POSTHOOK: query: explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2) order by key, value, ds1, ds2 POSTHOOK: type: QUERY @@ -1595,7 +1595,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: pcr_t1 + alias: pcr_t1_n1 Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -1633,13 +1633,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -1656,18 +1656,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:pcr_t1] + /pcr_t1_n1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:pcr_t1_n1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -1705,13 +1705,13 @@ STAGE PLANS: PREHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08' order by t2.key, t2.value, t1.ds1 PREHOOK: type: QUERY POSTHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08' order by t2.key, t2.value, t1.ds1 POSTHOOK: type: QUERY @@ -1785,13 +1785,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -1808,18 +1808,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:$hdt$_0:t1, $hdt$_0:$hdt$_1:t2] + /pcr_t1_n1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:$hdt$_0:t1, $hdt$_0:$hdt$_1:t2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -1931,13 +1931,13 @@ STAGE PLANS: PREHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09' order by t2.key, t2.value, t1.ds1 PREHOOK: type: QUERY POSTHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09' order by t2.key, t2.value, t1.ds1 POSTHOOK: type: QUERY @@ -2011,13 +2011,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -2034,16 +2034,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 #### A masked pattern was here #### Partition base file name: ds2=2001-04-09 @@ -2060,13 +2060,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -2083,19 +2083,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:$hdt$_0:t1] - /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [$hdt$_0:$hdt$_1:t2] + /pcr_t1_n1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:$hdt$_0:t1] + /pcr_t1_n1/ds1=2000-04-09/ds2=2001-04-09 [$hdt$_0:$hdt$_1:t2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -2208,13 +2208,13 @@ STAGE PLANS: Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds1 PREHOOK: type: QUERY POSTHOOK: query: explain extended select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds1 POSTHOOK: type: QUERY @@ -2280,13 +2280,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -2303,16 +2303,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 #### A masked pattern was here #### Partition base file name: ds2=2001-04-09 @@ -2329,13 +2329,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -2352,16 +2352,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 #### A masked pattern was here #### Partition base file name: ds2=2001-04-10 @@ -2378,13 +2378,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -2401,20 +2401,20 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n1 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n1 + name: default.pcr_t1_n1 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:t1, $hdt$_1:t2] - /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [$hdt$_0:t1, $hdt$_1:t2] - /pcr_t1/ds1=2000-04-10/ds2=2001-04-10 [$hdt$_1:t2] + /pcr_t1_n1/ds1=2000-04-08/ds2=2001-04-08 [$hdt$_0:t1, $hdt$_1:t2] + /pcr_t1_n1/ds1=2000-04-09/ds2=2001-04-09 [$hdt$_0:t1, $hdt$_1:t2] + /pcr_t1_n1/ds1=2000-04-10/ds2=2001-04-10 [$hdt$_1:t2] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -2525,55 +2525,55 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2) order by key, value, ds1, ds2 PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 +PREHOOK: Input: default@pcr_t1_n1 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-09/ds2=2001-04-09 #### A masked pattern was here #### POSTHOOK: query: select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and key=1) or (ds1='2000-04-09' and key=2) order by key, value, ds1, ds2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 +POSTHOOK: Input: default@pcr_t1_n1 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-09/ds2=2001-04-09 #### A masked pattern was here #### 2 val_2 2000-04-09 2001-04-09 PREHOOK: query: select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2) order by key, value, ds1, ds2 PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 +PREHOOK: Input: default@pcr_t1_n1 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 #### A masked pattern was here #### POSTHOOK: query: select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n1 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-08' and key=2) order by key, value, ds1, ds2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: Input: default@pcr_t1_n1 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 #### A masked pattern was here #### PREHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08' order by t2.key, t2.value, t1.ds1 PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 +PREHOOK: Input: default@pcr_t1_n1 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 #### A masked pattern was here #### POSTHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds2='2001-04-08' order by t2.key, t2.value, t1.ds1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: Input: default@pcr_t1_n1 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 #### A masked pattern was here #### 0 val_0 2000-04-08 2001-04-08 0 val_0 2000-04-08 2001-04-08 0 val_0 2000-04-08 2001-04-08 0 val_0 2000-04-08 2001-04-08 @@ -2614,22 +2614,22 @@ POSTHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 18 val_18 2000-04-08 2001-04-08 18 val_18 2000-04-08 2001-04-08 19 val_19 2000-04-08 2001-04-08 19 val_19 2000-04-08 2001-04-08 PREHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09' order by t2.key, t2.value, t1.ds1 PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 +PREHOOK: Input: default@pcr_t1_n1 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-09/ds2=2001-04-09 #### A masked pattern was here #### POSTHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 on t1.key=t2.key and t1.ds1='2000-04-08' and t2.ds1='2000-04-09' order by t2.key, t2.value, t1.ds1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 +POSTHOOK: Input: default@pcr_t1_n1 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-09/ds2=2001-04-09 #### A masked pattern was here #### 0 val_0 2000-04-08 2001-04-08 0 val_0 2000-04-09 2001-04-09 0 val_0 2000-04-08 2001-04-08 0 val_0 2000-04-09 2001-04-09 @@ -2671,24 +2671,24 @@ POSTHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 19 val_19 2000-04-08 2001-04-08 19 val_19 2000-04-09 2001-04-09 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds1 PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10 +PREHOOK: Input: default@pcr_t1_n1 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-09/ds2=2001-04-09 +PREHOOK: Input: default@pcr_t1_n1@ds1=2000-04-10/ds2=2001-04-10 #### A masked pattern was here #### POSTHOOK: query: select * -from pcr_t1 t1 join pcr_t1 t2 +from pcr_t1_n1 t1 join pcr_t1_n1 t2 where (t1.ds1='2000-04-08' and t2.key=1) or (t1.ds1='2000-04-09' and t2.key=2) order by t2.key, t2.value, t1.ds1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10 +POSTHOOK: Input: default@pcr_t1_n1 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-09/ds2=2001-04-09 +POSTHOOK: Input: default@pcr_t1_n1@ds1=2000-04-10/ds2=2001-04-10 #### A masked pattern was here #### 0 val_0 2000-04-09 2001-04-09 2 val_2 2000-04-10 2001-04-10 0 val_0 2000-04-09 2001-04-09 2 val_2 2000-04-08 2001-04-08 @@ -2750,11 +2750,11 @@ POSTHOOK: Input: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10 9 val_9 2000-04-09 2001-04-09 2 val_2 2000-04-10 2001-04-10 9 val_9 2000-04-09 2001-04-09 2 val_2 2000-04-08 2001-04-08 9 val_9 2000-04-09 2001-04-09 2 val_2 2000-04-09 2001-04-09 -PREHOOK: query: drop table pcr_t1 +PREHOOK: query: drop table pcr_t1_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@pcr_t1 -PREHOOK: Output: default@pcr_t1 -POSTHOOK: query: drop table pcr_t1 +PREHOOK: Input: default@pcr_t1_n1 +PREHOOK: Output: default@pcr_t1_n1 +POSTHOOK: query: drop table pcr_t1_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Output: default@pcr_t1 +POSTHOOK: Input: default@pcr_t1_n1 +POSTHOOK: Output: default@pcr_t1_n1 diff --git a/ql/src/test/results/clientpositive/pointlookup4.q.out b/ql/src/test/results/clientpositive/pointlookup4.q.out index a914db0fe8..adf9d4075f 100644 --- a/ql/src/test/results/clientpositive/pointlookup4.q.out +++ b/ql/src/test/results/clientpositive/pointlookup4.q.out @@ -1,54 +1,54 @@ -PREHOOK: query: drop table pcr_t1 +PREHOOK: query: drop table pcr_t1_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table pcr_t1 +POSTHOOK: query: drop table pcr_t1_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string) +PREHOOK: query: create table pcr_t1_n0 (key int, value string) partitioned by (ds1 string, ds2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@pcr_t1 -POSTHOOK: query: create table pcr_t1 (key int, value string) partitioned by (ds1 string, ds2 string) +PREHOOK: Output: default@pcr_t1_n0 +POSTHOOK: query: create table pcr_t1_n0 (key int, value string) partitioned by (ds1 string, ds2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@pcr_t1 -PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key +POSTHOOK: Output: default@pcr_t1_n0 +PREHOOK: query: insert overwrite table pcr_t1_n0 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key +PREHOOK: Output: default@pcr_t1_n0@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: query: insert overwrite table pcr_t1_n0 partition (ds1='2000-04-08', ds2='2001-04-08') select * from src where key < 20 order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-08,ds2=2001-04-08).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-08,ds2=2001-04-08).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key +POSTHOOK: Output: default@pcr_t1_n0@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: Lineage: pcr_t1_n0 PARTITION(ds1=2000-04-08,ds2=2001-04-08).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: pcr_t1_n0 PARTITION(ds1=2000-04-08,ds2=2001-04-08).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table pcr_t1_n0 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 -POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key +PREHOOK: Output: default@pcr_t1_n0@ds1=2000-04-09/ds2=2001-04-09 +POSTHOOK: query: insert overwrite table pcr_t1_n0 partition (ds1='2000-04-09', ds2='2001-04-09') select * from src where key < 20 order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 -POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-09,ds2=2001-04-09).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-09,ds2=2001-04-09).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key +POSTHOOK: Output: default@pcr_t1_n0@ds1=2000-04-09/ds2=2001-04-09 +POSTHOOK: Lineage: pcr_t1_n0 PARTITION(ds1=2000-04-09,ds2=2001-04-09).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: pcr_t1_n0 PARTITION(ds1=2000-04-09,ds2=2001-04-09).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table pcr_t1_n0 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10 -POSTHOOK: query: insert overwrite table pcr_t1 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key +PREHOOK: Output: default@pcr_t1_n0@ds1=2000-04-10/ds2=2001-04-10 +POSTHOOK: query: insert overwrite table pcr_t1_n0 partition (ds1='2000-04-10', ds2='2001-04-10') select * from src where key < 20 order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@pcr_t1@ds1=2000-04-10/ds2=2001-04-10 -POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-10,ds2=2001-04-10).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: pcr_t1 PARTITION(ds1=2000-04-10,ds2=2001-04-10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@pcr_t1_n0@ds1=2000-04-10/ds2=2001-04-10 +POSTHOOK: Lineage: pcr_t1_n0 PARTITION(ds1=2000-04-10,ds2=2001-04-10).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: pcr_t1_n0 PARTITION(ds1=2000-04-10,ds2=2001-04-10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n0 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2) order by key, value, ds1, ds2 PREHOOK: type: QUERY POSTHOOK: query: explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n0 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2) order by key, value, ds1, ds2 POSTHOOK: type: QUERY @@ -61,7 +61,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: pcr_t1 + alias: pcr_t1_n0 Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -99,13 +99,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n0 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -122,16 +122,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n0 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n0 + name: default.pcr_t1_n0 #### A masked pattern was here #### Partition base file name: ds2=2001-04-09 @@ -148,13 +148,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n0 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -171,19 +171,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n0 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n0 + name: default.pcr_t1_n0 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1] - /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1] + /pcr_t1_n0/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1_n0] + /pcr_t1_n0/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1_n0] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -220,33 +220,33 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n0 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2) order by key, value, ds1, ds2 PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 +PREHOOK: Input: default@pcr_t1_n0 +PREHOOK: Input: default@pcr_t1_n0@ds1=2000-04-08/ds2=2001-04-08 +PREHOOK: Input: default@pcr_t1_n0@ds1=2000-04-09/ds2=2001-04-09 #### A masked pattern was here #### POSTHOOK: query: select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n0 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2) order by key, value, ds1, ds2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 +POSTHOOK: Input: default@pcr_t1_n0 +POSTHOOK: Input: default@pcr_t1_n0@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: Input: default@pcr_t1_n0@ds1=2000-04-09/ds2=2001-04-09 #### A masked pattern was here #### 2 val_2 2000-04-09 2001-04-09 PREHOOK: query: explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n0 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2) order by key, value, ds1, ds2 PREHOOK: type: QUERY POSTHOOK: query: explain extended select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n0 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2) order by key, value, ds1, ds2 POSTHOOK: type: QUERY @@ -259,7 +259,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: pcr_t1 + alias: pcr_t1_n0 Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -297,13 +297,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n0 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -320,16 +320,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n0 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n0 + name: default.pcr_t1_n0 #### A masked pattern was here #### Partition base file name: ds2=2001-04-09 @@ -346,13 +346,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n0 numFiles 1 numRows 20 partition_columns ds1/ds2 partition_columns.types string:string rawDataSize 160 - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 180 @@ -369,19 +369,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.pcr_t1 + name default.pcr_t1_n0 partition_columns ds1/ds2 partition_columns.types string:string - serialization.ddl struct pcr_t1 { i32 key, string value} + serialization.ddl struct pcr_t1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.pcr_t1 - name: default.pcr_t1 + name: default.pcr_t1_n0 + name: default.pcr_t1_n0 Truncated Path -> Alias: - /pcr_t1/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1] - /pcr_t1/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1] + /pcr_t1_n0/ds1=2000-04-08/ds2=2001-04-08 [pcr_t1_n0] + /pcr_t1_n0/ds1=2000-04-09/ds2=2001-04-09 [pcr_t1_n0] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -418,29 +418,29 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n0 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2) order by key, value, ds1, ds2 PREHOOK: type: QUERY -PREHOOK: Input: default@pcr_t1 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -PREHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 +PREHOOK: Input: default@pcr_t1_n0 +PREHOOK: Input: default@pcr_t1_n0@ds1=2000-04-08/ds2=2001-04-08 +PREHOOK: Input: default@pcr_t1_n0@ds1=2000-04-09/ds2=2001-04-09 #### A masked pattern was here #### POSTHOOK: query: select key, value, ds1, ds2 -from pcr_t1 +from pcr_t1_n0 where (ds1='2000-04-08' and ds2='2001-04-08' and key=1) or (ds1='2000-04-09' and ds2='2001-04-09' and key=2) order by key, value, ds1, ds2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-08/ds2=2001-04-08 -POSTHOOK: Input: default@pcr_t1@ds1=2000-04-09/ds2=2001-04-09 +POSTHOOK: Input: default@pcr_t1_n0 +POSTHOOK: Input: default@pcr_t1_n0@ds1=2000-04-08/ds2=2001-04-08 +POSTHOOK: Input: default@pcr_t1_n0@ds1=2000-04-09/ds2=2001-04-09 #### A masked pattern was here #### 2 val_2 2000-04-09 2001-04-09 -PREHOOK: query: drop table pcr_t1 +PREHOOK: query: drop table pcr_t1_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@pcr_t1 -PREHOOK: Output: default@pcr_t1 -POSTHOOK: query: drop table pcr_t1 +PREHOOK: Input: default@pcr_t1_n0 +PREHOOK: Output: default@pcr_t1_n0 +POSTHOOK: query: drop table pcr_t1_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@pcr_t1 -POSTHOOK: Output: default@pcr_t1 +POSTHOOK: Input: default@pcr_t1_n0 +POSTHOOK: Output: default@pcr_t1_n0 diff --git a/ql/src/test/results/clientpositive/ppd_join5.q.out b/ql/src/test/results/clientpositive/ppd_join5.q.out index 045a170a3c..4607f4ed6a 100644 --- a/ql/src/test/results/clientpositive/ppd_join5.q.out +++ b/ql/src/test/results/clientpositive/ppd_join5.q.out @@ -1,47 +1,47 @@ -PREHOOK: query: create table t1 (id1 string, id2 string) +PREHOOK: query: create table t1_n30 (id1 string, id2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (id1 string, id2 string) +PREHOOK: Output: default@t1_n30 +POSTHOOK: query: create table t1_n30 (id1 string, id2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2 (id string, d int) +POSTHOOK: Output: default@t1_n30 +PREHOOK: query: create table t2_n17 (id string, d int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2 (id string, d int) +PREHOOK: Output: default@t2_n17 +POSTHOOK: query: create table t2_n17 (id string, d int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n17 PREHOOK: query: from src tablesample (1 rows) - insert into table t1 select 'a','a' - insert into table t2 select 'a',2 + insert into table t1_n30 select 'a','a' + insert into table t2_n17 select 'a',2 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t1 -PREHOOK: Output: default@t2 +PREHOOK: Output: default@t1_n30 +PREHOOK: Output: default@t2_n17 POSTHOOK: query: from src tablesample (1 rows) - insert into table t1 select 'a','a' - insert into table t2 select 'a',2 + insert into table t1_n30 select 'a','a' + insert into table t2_n17 select 'a',2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t1.id1 SIMPLE [] -POSTHOOK: Lineage: t1.id2 SIMPLE [] -POSTHOOK: Lineage: t2.d SIMPLE [] -POSTHOOK: Lineage: t2.id SIMPLE [] +POSTHOOK: Output: default@t1_n30 +POSTHOOK: Output: default@t2_n17 +POSTHOOK: Lineage: t1_n30.id1 SIMPLE [] +POSTHOOK: Lineage: t1_n30.id2 SIMPLE [] +POSTHOOK: Lineage: t2_n17.d SIMPLE [] +POSTHOOK: Lineage: t2_n17.id SIMPLE [] Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: explain select a.*,b.d d1,c.d d2 from - t1 a join t2 b on (a.id1 = b.id) - join t2 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 + t1_n30 a join t2_n17 b on (a.id1 = b.id) + join t2_n17 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 PREHOOK: type: QUERY POSTHOOK: query: explain select a.*,b.d d1,c.d d2 from - t1 a join t2 b on (a.id1 = b.id) - join t2 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 + t1_n30 a join t2_n17 b on (a.id1 = b.id) + join t2_n17 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -153,15 +153,15 @@ Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MA PREHOOK: query: explain select * from ( select a.*,b.d d1,c.d d2 from - t1 a join t2 b on (a.id1 = b.id) - join t2 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 + t1_n30 a join t2_n17 b on (a.id1 = b.id) + join t2_n17 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 ) z where d1 > 1 or d2 > 1 PREHOOK: type: QUERY POSTHOOK: query: explain select * from ( select a.*,b.d d1,c.d d2 from - t1 a join t2 b on (a.id1 = b.id) - join t2 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 + t1_n30 a join t2_n17 b on (a.id1 = b.id) + join t2_n17 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 ) z where d1 > 1 or d2 > 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -273,19 +273,19 @@ STAGE PLANS: Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from ( select a.*,b.d d1,c.d d2 from - t1 a join t2 b on (a.id1 = b.id) - join t2 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 + t1_n30 a join t2_n17 b on (a.id1 = b.id) + join t2_n17 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 ) z where d1 > 1 or d2 > 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n30 +PREHOOK: Input: default@t2_n17 #### A masked pattern was here #### POSTHOOK: query: select * from ( select a.*,b.d d1,c.d d2 from - t1 a join t2 b on (a.id1 = b.id) - join t2 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 + t1_n30 a join t2_n17 b on (a.id1 = b.id) + join t2_n17 c on (a.id2 = b.id) where b.d <= 1 and c.d <= 1 ) z where d1 > 1 or d2 > 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n30 +POSTHOOK: Input: default@t2_n17 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/ppd_outer_join5.q.out b/ql/src/test/results/clientpositive/ppd_outer_join5.q.out index ba3baedc3a..bce6f9fe6a 100644 --- a/ql/src/test/results/clientpositive/ppd_outer_join5.q.out +++ b/ql/src/test/results/clientpositive/ppd_outer_join5.q.out @@ -1,40 +1,40 @@ -PREHOOK: query: create table t1 (id int, key string, value string) +PREHOOK: query: create table t1_n34 (id int, key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (id int, key string, value string) +PREHOOK: Output: default@t1_n34 +POSTHOOK: query: create table t1_n34 (id int, key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2 (id int, key string, value string) +POSTHOOK: Output: default@t1_n34 +PREHOOK: query: create table t2_n21 (id int, key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2 (id int, key string, value string) +PREHOOK: Output: default@t2_n21 +POSTHOOK: query: create table t2_n21 (id int, key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: create table t3 (id int, key string, value string) +POSTHOOK: Output: default@t2_n21 +PREHOOK: query: create table t3_n6 (id int, key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t3 -POSTHOOK: query: create table t3 (id int, key string, value string) +PREHOOK: Output: default@t3_n6 +POSTHOOK: query: create table t3_n6 (id int, key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t3 -PREHOOK: query: create table t4 (id int, key string, value string) +POSTHOOK: Output: default@t3_n6 +PREHOOK: query: create table t4_n7 (id int, key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t4 -POSTHOOK: query: create table t4 (id int, key string, value string) +PREHOOK: Output: default@t4_n7 +POSTHOOK: query: create table t4_n7 (id int, key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t4 +POSTHOOK: Output: default@t4_n7 Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: explain select * from t1 full outer join t2 on t1.id=t2.id join t3 on t2.id=t3.id where t3.id=20 +PREHOOK: query: explain select * from t1_n34 full outer join t2_n21 on t1_n34.id=t2_n21.id join t3_n6 on t2_n21.id=t3_n6.id where t3_n6.id=20 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t1 full outer join t2 on t1.id=t2.id join t3 on t2.id=t3.id where t3.id=20 +POSTHOOK: query: explain select * from t1_n34 full outer join t2_n21 on t1_n34.id=t2_n21.id join t3_n6 on t2_n21.id=t3_n6.id where t3_n6.id=20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -46,7 +46,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n34 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (id = 20) (type: boolean) @@ -60,7 +60,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) TableScan - alias: t2 + alias: t2_n21 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (id = 20) (type: boolean) @@ -101,7 +101,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 1 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col4 (type: string), _col5 (type: string) TableScan - alias: t3 + alias: t3_n6 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (id = 20) (type: boolean) @@ -143,9 +143,9 @@ STAGE PLANS: Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: explain select * from t1 join t2 on (t1.id=t2.id) left outer join t3 on (t2.id=t3.id) where t2.id=20 +PREHOOK: query: explain select * from t1_n34 join t2_n21 on (t1_n34.id=t2_n21.id) left outer join t3_n6 on (t2_n21.id=t3_n6.id) where t2_n21.id=20 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t1 join t2 on (t1.id=t2.id) left outer join t3 on (t2.id=t3.id) where t2.id=20 +POSTHOOK: query: explain select * from t1_n34 join t2_n21 on (t1_n34.id=t2_n21.id) left outer join t3_n6 on (t2_n21.id=t3_n6.id) where t2_n21.id=20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -157,7 +157,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n34 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (id = 20) (type: boolean) @@ -171,7 +171,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: string), _col2 (type: string) TableScan - alias: t3 + alias: t3_n6 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (id = 20) (type: boolean) @@ -209,7 +209,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 1 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: int), _col4 (type: string), _col5 (type: string) TableScan - alias: t2 + alias: t2_n21 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (id = 20) (type: boolean) @@ -251,9 +251,9 @@ STAGE PLANS: Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: explain select * from t1 join t2 on (t1.id=t2.id) left outer join t3 on (t1.id=t3.id) where t2.id=20 +PREHOOK: query: explain select * from t1_n34 join t2_n21 on (t1_n34.id=t2_n21.id) left outer join t3_n6 on (t1_n34.id=t3_n6.id) where t2_n21.id=20 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t1 join t2 on (t1.id=t2.id) left outer join t3 on (t1.id=t3.id) where t2.id=20 +POSTHOOK: query: explain select * from t1_n34 join t2_n21 on (t1_n34.id=t2_n21.id) left outer join t3_n6 on (t1_n34.id=t3_n6.id) where t2_n21.id=20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -265,7 +265,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n34 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (id = 20) (type: boolean) @@ -279,7 +279,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: string), _col2 (type: string) TableScan - alias: t3 + alias: t3_n6 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (id = 20) (type: boolean) @@ -317,7 +317,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 1 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: int), _col4 (type: string), _col5 (type: string) TableScan - alias: t2 + alias: t2_n21 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (id = 20) (type: boolean) @@ -357,35 +357,35 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n34 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n34 +PREHOOK: Output: default@t1_n34 +POSTHOOK: query: drop table t1_n34 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: drop table t2 +POSTHOOK: Input: default@t1_n34 +POSTHOOK: Output: default@t1_n34 +PREHOOK: query: drop table t2_n21 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: drop table t2 +PREHOOK: Input: default@t2_n21 +PREHOOK: Output: default@t2_n21 +POSTHOOK: query: drop table t2_n21 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 -PREHOOK: query: drop table t3 +POSTHOOK: Input: default@t2_n21 +POSTHOOK: Output: default@t2_n21 +PREHOOK: query: drop table t3_n6 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t3 -PREHOOK: Output: default@t3 -POSTHOOK: query: drop table t3 +PREHOOK: Input: default@t3_n6 +PREHOOK: Output: default@t3_n6 +POSTHOOK: query: drop table t3_n6 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t3 -POSTHOOK: Output: default@t3 -PREHOOK: query: drop table t4 +POSTHOOK: Input: default@t3_n6 +POSTHOOK: Output: default@t3_n6 +PREHOOK: query: drop table t4_n7 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t4 -PREHOOK: Output: default@t4 -POSTHOOK: query: drop table t4 +PREHOOK: Input: default@t4_n7 +PREHOOK: Output: default@t4_n7 +POSTHOOK: query: drop table t4_n7 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t4 -POSTHOOK: Output: default@t4 +POSTHOOK: Input: default@t4_n7 +POSTHOOK: Output: default@t4_n7 diff --git a/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out b/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out index 7eb621c257..b725e9f9cf 100644 --- a/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out +++ b/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out @@ -1,36 +1,36 @@ -PREHOOK: query: drop table pokes +PREHOOK: query: drop table pokes_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table pokes +POSTHOOK: query: drop table pokes_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table pokes2 +PREHOOK: query: drop table pokes2_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table pokes2 +POSTHOOK: query: drop table pokes2_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table pokes (foo int, bar int, blah int) +PREHOOK: query: create table pokes_n0 (foo int, bar int, blah int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@pokes -POSTHOOK: query: create table pokes (foo int, bar int, blah int) +PREHOOK: Output: default@pokes_n0 +POSTHOOK: query: create table pokes_n0 (foo int, bar int, blah int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@pokes -PREHOOK: query: create table pokes2 (foo int, bar int, blah int) +POSTHOOK: Output: default@pokes_n0 +PREHOOK: query: create table pokes2_n0 (foo int, bar int, blah int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@pokes2 -POSTHOOK: query: create table pokes2 (foo int, bar int, blah int) +PREHOOK: Output: default@pokes2_n0 +POSTHOOK: query: create table pokes2_n0 (foo int, bar int, blah int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@pokes2 +POSTHOOK: Output: default@pokes2_n0 PREHOOK: query: explain SELECT a.foo as foo1, b.foo as foo2, b.bar -FROM pokes a LEFT OUTER JOIN pokes2 b +FROM pokes_n0 a LEFT OUTER JOIN pokes2_n0 b ON a.foo=b.foo WHERE b.bar=3 PREHOOK: type: QUERY POSTHOOK: query: explain SELECT a.foo as foo1, b.foo as foo2, b.bar -FROM pokes a LEFT OUTER JOIN pokes2 b +FROM pokes_n0 a LEFT OUTER JOIN pokes2_n0 b ON a.foo=b.foo WHERE b.bar=3 POSTHOOK: type: QUERY @@ -102,14 +102,14 @@ STAGE PLANS: PREHOOK: query: explain SELECT * FROM (SELECT a.foo as foo1, b.foo as foo2, b.bar - FROM pokes a LEFT OUTER JOIN pokes2 b + FROM pokes_n0 a LEFT OUTER JOIN pokes2_n0 b ON a.foo=b.foo) a WHERE a.bar=3 PREHOOK: type: QUERY POSTHOOK: query: explain SELECT * FROM (SELECT a.foo as foo1, b.foo as foo2, b.bar - FROM pokes a LEFT OUTER JOIN pokes2 b + FROM pokes_n0 a LEFT OUTER JOIN pokes2_n0 b ON a.foo=b.foo) a WHERE a.bar=3 POSTHOOK: type: QUERY @@ -181,14 +181,14 @@ STAGE PLANS: PREHOOK: query: explain SELECT * FROM (SELECT a.foo as foo1, b.foo as foo2, a.bar - FROM pokes a JOIN pokes2 b + FROM pokes_n0 a JOIN pokes2_n0 b ON a.foo=b.foo) a WHERE a.bar=3 PREHOOK: type: QUERY POSTHOOK: query: explain SELECT * FROM (SELECT a.foo as foo1, b.foo as foo2, a.bar - FROM pokes a JOIN pokes2 b + FROM pokes_n0 a JOIN pokes2_n0 b ON a.foo=b.foo) a WHERE a.bar=3 POSTHOOK: type: QUERY @@ -258,9 +258,9 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[15][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: explain select c.foo, d.bar from (select c.foo, b.bar, c.blah from pokes c left outer join pokes b on c.foo=b.foo) c left outer join pokes d where d.foo=1 and c.bar=2 +PREHOOK: query: explain select c.foo, d.bar from (select c.foo, b.bar, c.blah from pokes_n0 c left outer join pokes_n0 b on c.foo=b.foo) c left outer join pokes_n0 d where d.foo=1 and c.bar=2 PREHOOK: type: QUERY -POSTHOOK: query: explain select c.foo, d.bar from (select c.foo, b.bar, c.blah from pokes c left outer join pokes b on c.foo=b.foo) c left outer join pokes d where d.foo=1 and c.bar=2 +POSTHOOK: query: explain select c.foo, d.bar from (select c.foo, b.bar, c.blah from pokes_n0 c left outer join pokes_n0 b on c.foo=b.foo) c left outer join pokes_n0 d where d.foo=1 and c.bar=2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -370,19 +370,19 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: drop table pokes +PREHOOK: query: drop table pokes_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@pokes -PREHOOK: Output: default@pokes -POSTHOOK: query: drop table pokes +PREHOOK: Input: default@pokes_n0 +PREHOOK: Output: default@pokes_n0 +POSTHOOK: query: drop table pokes_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@pokes -POSTHOOK: Output: default@pokes -PREHOOK: query: drop table pokes2 +POSTHOOK: Input: default@pokes_n0 +POSTHOOK: Output: default@pokes_n0 +PREHOOK: query: drop table pokes2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@pokes2 -PREHOOK: Output: default@pokes2 -POSTHOOK: query: drop table pokes2 +PREHOOK: Input: default@pokes2_n0 +PREHOOK: Output: default@pokes2_n0 +POSTHOOK: query: drop table pokes2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@pokes2 -POSTHOOK: Output: default@pokes2 +POSTHOOK: Input: default@pokes2_n0 +POSTHOOK: Output: default@pokes2_n0 diff --git a/ql/src/test/results/clientpositive/primitive_types.q.out b/ql/src/test/results/clientpositive/primitive_types.q.out index 3f717e12f2..9e4726c91e 100644 --- a/ql/src/test/results/clientpositive/primitive_types.q.out +++ b/ql/src/test/results/clientpositive/primitive_types.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table t ( +PREHOOK: query: create table t_n9 ( dp double precision, d double, f float, @@ -11,8 +11,8 @@ PREHOOK: query: create table t ( ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t ( +PREHOOK: Output: default@t_n9 +POSTHOOK: query: create table t_n9 ( dp double precision, d double, f float, @@ -25,13 +25,13 @@ POSTHOOK: query: create table t ( ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: describe t +POSTHOOK: Output: default@t_n9 +PREHOOK: query: describe t_n9 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t -POSTHOOK: query: describe t +PREHOOK: Input: default@t_n9 +POSTHOOK: query: describe t_n9 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n9 dp double d double f float diff --git a/ql/src/test/results/clientpositive/ptf_matchpath.q.out b/ql/src/test/results/clientpositive/ptf_matchpath.q.out index 2b8fb22a76..c59711434f 100644 --- a/ql/src/test/results/clientpositive/ptf_matchpath.q.out +++ b/ql/src/test/results/clientpositive/ptf_matchpath.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: DROP TABLE flights_tiny +PREHOOK: query: DROP TABLE flights_tiny_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE flights_tiny +POSTHOOK: query: DROP TABLE flights_tiny_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table flights_tiny ( +PREHOOK: query: create table flights_tiny_n0 ( ORIGIN_CITY_NAME string, DEST_CITY_NAME string, YEAR int, @@ -13,8 +13,8 @@ FL_NUM string ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@flights_tiny -POSTHOOK: query: create table flights_tiny ( +PREHOOK: Output: default@flights_tiny_n0 +POSTHOOK: query: create table flights_tiny_n0 ( ORIGIN_CITY_NAME string, DEST_CITY_NAME string, YEAR int, @@ -25,19 +25,19 @@ FL_NUM string ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@flights_tiny -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt' OVERWRITE INTO TABLE flights_tiny +POSTHOOK: Output: default@flights_tiny_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt' OVERWRITE INTO TABLE flights_tiny_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@flights_tiny -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt' OVERWRITE INTO TABLE flights_tiny +PREHOOK: Output: default@flights_tiny_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt' OVERWRITE INTO TABLE flights_tiny_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@flights_tiny +POSTHOOK: Output: default@flights_tiny_n0 PREHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - flights_tiny + flights_tiny_n0 distribute by fl_num sort by year, month, day_of_month arg1('LATE.LATE+'), @@ -48,7 +48,7 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - flights_tiny + flights_tiny_n0 distribute by fl_num sort by year, month, day_of_month arg1('LATE.LATE+'), @@ -65,7 +65,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: flights_tiny + alias: flights_tiny_n0 Statistics: Num rows: 1 Data size: 53790 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: fl_num (type: string), year (type: int), month (type: int), day_of_month (type: int) @@ -81,7 +81,7 @@ STAGE PLANS: PTF Operator Function definitions: Input definition - input alias: flights_tiny + input alias: flights_tiny_n0 output shape: type: TABLE Partition table definition @@ -114,7 +114,7 @@ STAGE PLANS: PREHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - flights_tiny + flights_tiny_n0 distribute by fl_num sort by year, month, day_of_month arg1('LATE.LATE+'), @@ -122,11 +122,11 @@ from matchpath(on arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath') ) PREHOOK: type: QUERY -PREHOOK: Input: default@flights_tiny +PREHOOK: Input: default@flights_tiny_n0 #### A masked pattern was here #### POSTHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - flights_tiny + flights_tiny_n0 distribute by fl_num sort by year, month, day_of_month arg1('LATE.LATE+'), @@ -134,7 +134,7 @@ from matchpath(on arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath') ) POSTHOOK: type: QUERY -POSTHOOK: Input: default@flights_tiny +POSTHOOK: Input: default@flights_tiny_n0 #### A masked pattern was here #### Baltimore 1142 2010 10 20 6 20 Baltimore 1142 2010 10 21 5 21 @@ -155,7 +155,7 @@ Washington 7291 2010 10 27 2 27 PREHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - flights_tiny + flights_tiny_n0 sort by fl_num, year, month, day_of_month arg1('LATE.LATE+'), arg2('LATE'), arg3(arr_delay > 15), @@ -166,7 +166,7 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - flights_tiny + flights_tiny_n0 sort by fl_num, year, month, day_of_month arg1('LATE.LATE+'), arg2('LATE'), arg3(arr_delay > 15), @@ -183,7 +183,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: flights_tiny + alias: flights_tiny_n0 Statistics: Num rows: 1 Data size: 53790 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: 0 (type: int), fl_num (type: string), year (type: int), month (type: int), day_of_month (type: int) @@ -199,7 +199,7 @@ STAGE PLANS: PTF Operator Function definitions: Input definition - input alias: flights_tiny + input alias: flights_tiny_n0 output shape: type: TABLE Partition table definition @@ -235,7 +235,7 @@ STAGE PLANS: PREHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - flights_tiny + flights_tiny_n0 sort by fl_num, year, month, day_of_month arg1('LATE.LATE+'), arg2('LATE'), arg3(arr_delay > 15), @@ -243,11 +243,11 @@ from matchpath(on ) where fl_num = 1142 PREHOOK: type: QUERY -PREHOOK: Input: default@flights_tiny +PREHOOK: Input: default@flights_tiny_n0 #### A masked pattern was here #### POSTHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - flights_tiny + flights_tiny_n0 sort by fl_num, year, month, day_of_month arg1('LATE.LATE+'), arg2('LATE'), arg3(arr_delay > 15), @@ -255,7 +255,7 @@ from matchpath(on ) where fl_num = 1142 POSTHOOK: type: QUERY -POSTHOOK: Input: default@flights_tiny +POSTHOOK: Input: default@flights_tiny_n0 #### A masked pattern was here #### Baltimore 1142 2010 10 20 6 20 Baltimore 1142 2010 10 21 5 21 @@ -265,7 +265,7 @@ Baltimore 1142 2010 10 26 2 26 PREHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - (select * from flights_tiny where fl_num = -1142) flights_tiny + (select * from flights_tiny_n0 where fl_num = -1142) flights_tiny_n0 sort by fl_num, year, month, day_of_month arg1('LATE.LATE+'), arg2('LATE'), arg3(arr_delay > 15), @@ -275,7 +275,7 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - (select * from flights_tiny where fl_num = -1142) flights_tiny + (select * from flights_tiny_n0 where fl_num = -1142) flights_tiny_n0 sort by fl_num, year, month, day_of_month arg1('LATE.LATE+'), arg2('LATE'), arg3(arr_delay > 15), @@ -291,7 +291,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: flights_tiny + alias: flights_tiny_n0 Statistics: Num rows: 1 Data size: 53790 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (fl_num = -1142) (type: boolean) @@ -314,7 +314,7 @@ STAGE PLANS: PTF Operator Function definitions: Input definition - input alias: flights_tiny + input alias: flights_tiny_n0 output shape: type: SUBQUERY Partition table definition @@ -347,23 +347,23 @@ STAGE PLANS: PREHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - (select * from flights_tiny where fl_num = -1142) flights_tiny + (select * from flights_tiny_n0 where fl_num = -1142) flights_tiny_n0 sort by fl_num, year, month, day_of_month arg1('LATE.LATE+'), arg2('LATE'), arg3(arr_delay > 15), arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath') ) PREHOOK: type: QUERY -PREHOOK: Input: default@flights_tiny +PREHOOK: Input: default@flights_tiny_n0 #### A masked pattern was here #### POSTHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on - (select * from flights_tiny where fl_num = -1142) flights_tiny + (select * from flights_tiny_n0 where fl_num = -1142) flights_tiny_n0 sort by fl_num, year, month, day_of_month arg1('LATE.LATE+'), arg2('LATE'), arg3(arr_delay > 15), arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath') ) POSTHOOK: type: QUERY -POSTHOOK: Input: default@flights_tiny +POSTHOOK: Input: default@flights_tiny_n0 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/quote1.q.out b/ql/src/test/results/clientpositive/quote1.q.out index 26cfd49e3d..3ff5984b7c 100644 --- a/ql/src/test/results/clientpositive/quote1.q.out +++ b/ql/src/test/results/clientpositive/quote1.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(`location` INT, `type` STRING) PARTITIONED BY(`table` STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n86(`location` INT, `type` STRING) PARTITIONED BY(`table` STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(`location` INT, `type` STRING) PARTITIONED BY(`table` STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n86 +POSTHOOK: query: CREATE TABLE dest1_n86(`location` INT, `type` STRING) PARTITIONED BY(`table` STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n86 PREHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 PARTITION(`table`='2008-04-08') SELECT src.key as `partition`, src.value as `from` WHERE src.key >= 200 and src.key < 300 +INSERT OVERWRITE TABLE dest1_n86 PARTITION(`table`='2008-04-08') SELECT src.key as `partition`, src.value as `from` WHERE src.key >= 200 and src.key < 300 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src -INSERT OVERWRITE TABLE dest1 PARTITION(`table`='2008-04-08') SELECT src.key as `partition`, src.value as `from` WHERE src.key >= 200 and src.key < 300 +INSERT OVERWRITE TABLE dest1_n86 PARTITION(`table`='2008-04-08') SELECT src.key as `partition`, src.value as `from` WHERE src.key >= 200 and src.key < 300 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -45,7 +45,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n86 Select Operator expressions: _col0 (type: int), _col1 (type: string), '2008-04-08' (type: string) outputColumnNames: location, type, table @@ -100,7 +100,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n86 Stage: Stage-2 Stats Work @@ -108,7 +108,7 @@ STAGE PLANS: Column Stats Desc: Columns: location, type Column Types: int, string - Table: default.dest1 + Table: default.dest1_n86 Stage: Stage-3 Map Reduce @@ -120,7 +120,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n86 Stage: Stage-5 Map Reduce @@ -132,7 +132,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n86 Stage: Stage-6 Move Operator @@ -141,10 +141,10 @@ STAGE PLANS: #### A masked pattern was here #### PREHOOK: query: EXPLAIN -SELECT `int`.`location`, `int`.`type`, `int`.`table` FROM dest1 `int` WHERE `int`.`table` = '2008-04-08' +SELECT `int`.`location`, `int`.`type`, `int`.`table` FROM dest1_n86 `int` WHERE `int`.`table` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT `int`.`location`, `int`.`type`, `int`.`table` FROM dest1 `int` WHERE `int`.`table` = '2008-04-08' +SELECT `int`.`location`, `int`.`type`, `int`.`table` FROM dest1_n86 `int` WHERE `int`.`table` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -167,26 +167,26 @@ STAGE PLANS: ListSink PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 PARTITION(`table`='2008-04-08') SELECT src.key as `partition`, src.value as `from` WHERE src.key >= 200 and src.key < 300 +INSERT OVERWRITE TABLE dest1_n86 PARTITION(`table`='2008-04-08') SELECT src.key as `partition`, src.value as `from` WHERE src.key >= 200 and src.key < 300 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1@table=2008-04-08 +PREHOOK: Output: default@dest1_n86@table=2008-04-08 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 PARTITION(`table`='2008-04-08') SELECT src.key as `partition`, src.value as `from` WHERE src.key >= 200 and src.key < 300 +INSERT OVERWRITE TABLE dest1_n86 PARTITION(`table`='2008-04-08') SELECT src.key as `partition`, src.value as `from` WHERE src.key >= 200 and src.key < 300 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1@table=2008-04-08 -POSTHOOK: Lineage: dest1 PARTITION(table=2008-04-08).location EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1 PARTITION(table=2008-04-08).type SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT `int`.`location`, `int`.`type`, `int`.`table` FROM dest1 `int` WHERE `int`.`table` = '2008-04-08' +POSTHOOK: Output: default@dest1_n86@table=2008-04-08 +POSTHOOK: Lineage: dest1_n86 PARTITION(table=2008-04-08).location EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n86 PARTITION(table=2008-04-08).type SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT `int`.`location`, `int`.`type`, `int`.`table` FROM dest1_n86 `int` WHERE `int`.`table` = '2008-04-08' PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 -PREHOOK: Input: default@dest1@table=2008-04-08 +PREHOOK: Input: default@dest1_n86 +PREHOOK: Input: default@dest1_n86@table=2008-04-08 #### A masked pattern was here #### -POSTHOOK: query: SELECT `int`.`location`, `int`.`type`, `int`.`table` FROM dest1 `int` WHERE `int`.`table` = '2008-04-08' +POSTHOOK: query: SELECT `int`.`location`, `int`.`type`, `int`.`table` FROM dest1_n86 `int` WHERE `int`.`table` = '2008-04-08' POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 -POSTHOOK: Input: default@dest1@table=2008-04-08 +POSTHOOK: Input: default@dest1_n86 +POSTHOOK: Input: default@dest1_n86@table=2008-04-08 #### A masked pattern was here #### 238 val_238 2008-04-08 255 val_255 2008-04-08 diff --git a/ql/src/test/results/clientpositive/quotedid_basic.q.out b/ql/src/test/results/clientpositive/quotedid_basic.q.out index ae938867fa..0b63992ac4 100644 --- a/ql/src/test/results/clientpositive/quotedid_basic.q.out +++ b/ql/src/test/results/clientpositive/quotedid_basic.q.out @@ -1,31 +1,31 @@ -PREHOOK: query: create table t1(`x+1` string, `y&y` string, `!@#$%^&*()_q` string) +PREHOOK: query: create table t1_n2(`x+1` string, `y&y` string, `!@#$%^&*()_q` string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(`x+1` string, `y&y` string, `!@#$%^&*()_q` string) +PREHOOK: Output: default@t1_n2 +POSTHOOK: query: create table t1_n2(`x+1` string, `y&y` string, `!@#$%^&*()_q` string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: describe t1 +POSTHOOK: Output: default@t1_n2 +PREHOOK: query: describe t1_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: describe t1 +PREHOOK: Input: default@t1_n2 +POSTHOOK: query: describe t1_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n2 x+1 string y&y string !@#$%^&*()_q string -PREHOOK: query: select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 +PREHOOK: query: select `x+1`, `y&y`, `!@#$%^&*()_q` from t1_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n2 #### A masked pattern was here #### -POSTHOOK: query: select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 +POSTHOOK: query: select `x+1`, `y&y`, `!@#$%^&*()_q` from t1_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n2 #### A masked pattern was here #### -PREHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 +PREHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1_n2 PREHOOK: type: QUERY -POSTHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 +POSTHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1_n2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -36,7 +36,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: t1 + alias: t1_n2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: x+1 (type: string), y&y (type: string), !@#$%^&*()_q (type: string) @@ -44,9 +44,9 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 where `!@#$%^&*()_q` = '1' +PREHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1_n2 where `!@#$%^&*()_q` = '1' PREHOOK: type: QUERY -POSTHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 where `!@#$%^&*()_q` = '1' +POSTHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1_n2 where `!@#$%^&*()_q` = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -57,7 +57,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (!@#$%^&*()_q = '1') (type: boolean) @@ -81,9 +81,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = '1' +PREHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1_n2 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = '1' PREHOOK: type: QUERY -POSTHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = '1' +POSTHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1_n2 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -94,7 +94,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (!@#$%^&*()_q = '1') (type: boolean) @@ -139,10 +139,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q`, rank() over(partition by `!@#$%^&*()_q` order by `y&y`) -from t1 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = '1' +from t1_n2 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = '1' PREHOOK: type: QUERY POSTHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q`, rank() over(partition by `!@#$%^&*()_q` order by `y&y`) -from t1 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = '1' +from t1_n2 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -154,7 +154,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (!@#$%^&*()_q = '1') (type: boolean) @@ -243,10 +243,10 @@ STAGE PLANS: ListSink PREHOOK: query: explain select `X+1`, `Y&y`, `!@#$%^&*()_Q`, rank() over(partition by `!@#$%^&*()_q` order by `y&y`) -from t1 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&Y`, `!@#$%^&*()_q` having `!@#$%^&*()_Q` = '1' +from t1_n2 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&Y`, `!@#$%^&*()_q` having `!@#$%^&*()_Q` = '1' PREHOOK: type: QUERY POSTHOOK: query: explain select `X+1`, `Y&y`, `!@#$%^&*()_Q`, rank() over(partition by `!@#$%^&*()_q` order by `y&y`) -from t1 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&Y`, `!@#$%^&*()_q` having `!@#$%^&*()_Q` = '1' +from t1_n2 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&Y`, `!@#$%^&*()_q` having `!@#$%^&*()_Q` = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -258,7 +258,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (!@#$%^&*()_q = '1') (type: boolean) diff --git a/ql/src/test/results/clientpositive/quotedid_skew.q.out b/ql/src/test/results/clientpositive/quotedid_skew.q.out index 1203306168..6e987b0e81 100644 --- a/ql/src/test/results/clientpositive/quotedid_skew.q.out +++ b/ql/src/test/results/clientpositive/quotedid_skew.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(`!@#$%^&*()_q` string, `y&y` string) +PREHOOK: query: CREATE TABLE T1_n29(`!@#$%^&*()_q` string, `y&y` string) SKEWED BY (`!@#$%^&*()_q`) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(`!@#$%^&*()_q` string, `y&y` string) +PREHOOK: Output: default@T1_n29 +POSTHOOK: query: CREATE TABLE T1_n29(`!@#$%^&*()_q` string, `y&y` string) SKEWED BY (`!@#$%^&*()_q`) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n29 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n29 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n29 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n29 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(`!@#$%^&*()_q` string, `y&y` string) +POSTHOOK: Output: default@t1_n29 +PREHOOK: query: CREATE TABLE T2_n18(`!@#$%^&*()_q` string, `y&y` string) SKEWED BY (`!@#$%^&*()_q`) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(`!@#$%^&*()_q` string, `y&y` string) +PREHOOK: Output: default@T2_n18 +POSTHOOK: query: CREATE TABLE T2_n18(`!@#$%^&*()_q` string, `y&y` string) SKEWED BY (`!@#$%^&*()_q`) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n18 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T2_n18 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n18 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T2_n18 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n18 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a. `!@#$%^&*()_q` = b. `!@#$%^&*()_q` +SELECT a.*, b.* FROM T1_n29 a JOIN T2_n18 b ON a. `!@#$%^&*()_q` = b. `!@#$%^&*()_q` PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a. `!@#$%^&*()_q` = b. `!@#$%^&*()_q` +SELECT a.*, b.* FROM T1_n29 a JOIN T2_n18 b ON a. `!@#$%^&*()_q` = b. `!@#$%^&*()_q` POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/quotedid_stats.q.out b/ql/src/test/results/clientpositive/quotedid_stats.q.out index 4804a61b70..35811b6d39 100644 --- a/ql/src/test/results/clientpositive/quotedid_stats.q.out +++ b/ql/src/test/results/clientpositive/quotedid_stats.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: create table t4(`x+1``` string, `y&y` string) +PREHOOK: query: create table t4_n5(`x+1``` string, `y&y` string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t4 -POSTHOOK: query: create table t4(`x+1``` string, `y&y` string) +PREHOOK: Output: default@t4_n5 +POSTHOOK: query: create table t4_n5(`x+1``` string, `y&y` string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t4 -PREHOOK: query: describe formatted t4 +POSTHOOK: Output: default@t4_n5 +PREHOOK: query: describe formatted t4_n5 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t4 -POSTHOOK: query: describe formatted t4 +PREHOOK: Input: default@t4_n5 +POSTHOOK: query: describe formatted t4_n5 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t4_n5 # col_name data_type comment x+1` string y&y string @@ -41,22 +41,22 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: analyze table t4 compute statistics for columns +PREHOOK: query: analyze table t4_n5 compute statistics for columns PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@t4 -PREHOOK: Output: default@t4 +PREHOOK: Input: default@t4_n5 +PREHOOK: Output: default@t4_n5 #### A masked pattern was here #### -POSTHOOK: query: analyze table t4 compute statistics for columns +POSTHOOK: query: analyze table t4_n5 compute statistics for columns POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@t4 -POSTHOOK: Output: default@t4 +POSTHOOK: Input: default@t4_n5 +POSTHOOK: Output: default@t4_n5 #### A masked pattern was here #### -PREHOOK: query: describe formatted t4 +PREHOOK: query: describe formatted t4_n5 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t4 -POSTHOOK: query: describe formatted t4 +PREHOOK: Input: default@t4_n5 +POSTHOOK: query: describe formatted t4_n5 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t4_n5 # col_name data_type comment x+1` string y&y string diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out b/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out index 7a925a3433..356be3828c 100644 --- a/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out +++ b/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: create table tmptable(key string, value string, hr string, ds string) +PREHOOK: query: create table tmptable_n1(key string, value string, hr string, ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmptable -POSTHOOK: query: create table tmptable(key string, value string, hr string, ds string) +PREHOOK: Output: default@tmptable_n1 +POSTHOOK: query: create table tmptable_n1(key string, value string, hr string, ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmptable +POSTHOOK: Output: default@tmptable_n1 PREHOOK: query: explain extended -insert overwrite table tmptable +insert overwrite table tmptable_n1 select a.* from srcpart a where rand(1) < 0.1 and a.ds = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: explain extended -insert overwrite table tmptable +insert overwrite table tmptable_n1 select a.* from srcpart a where rand(1) < 0.1 and a.ds = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -59,17 +59,17 @@ STAGE PLANS: columns.comments columns.types string:string:string:string #### A masked pattern was here #### - name default.tmptable + name default.tmptable_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct tmptable { string key, string value, string hr, string ds} + serialization.ddl struct tmptable_n1 { string key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n1 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -249,17 +249,17 @@ STAGE PLANS: columns.comments columns.types string:string:string:string #### A masked pattern was here #### - name default.tmptable + name default.tmptable_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct tmptable { string key, string value, string hr, string ds} + serialization.ddl struct tmptable_n1 { string key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n1 Stage: Stage-2 Stats Work @@ -268,7 +268,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, hr, ds Column Types: string, string, string, string - Table: default.tmptable + Table: default.tmptable_n1 Is Table Level Stats: true Stage: Stage-3 @@ -293,17 +293,17 @@ STAGE PLANS: columns.comments columns.types string:string:string:string #### A masked pattern was here #### - name default.tmptable + name default.tmptable_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct tmptable { string key, string value, string hr, string ds} + serialization.ddl struct tmptable_n1 { string key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n1 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -324,11 +324,11 @@ STAGE PLANS: columns.comments columns.types string:string:string:string #### A masked pattern was here #### - name default.tmptable + name default.tmptable_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct tmptable { string key, string value, string hr, string ds} + serialization.ddl struct tmptable_n1 { string key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -346,18 +346,18 @@ STAGE PLANS: columns.comments columns.types string:string:string:string #### A masked pattern was here #### - name default.tmptable + name default.tmptable_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct tmptable { string key, string value, string hr, string ds} + serialization.ddl struct tmptable_n1 { string key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable - name: default.tmptable + name: default.tmptable_n1 + name: default.tmptable_n1 Truncated Path -> Alias: #### A masked pattern was here #### @@ -383,17 +383,17 @@ STAGE PLANS: columns.comments columns.types string:string:string:string #### A masked pattern was here #### - name default.tmptable + name default.tmptable_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct tmptable { string key, string value, string hr, string ds} + serialization.ddl struct tmptable_n1 { string key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n1 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -414,11 +414,11 @@ STAGE PLANS: columns.comments columns.types string:string:string:string #### A masked pattern was here #### - name default.tmptable + name default.tmptable_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct tmptable { string key, string value, string hr, string ds} + serialization.ddl struct tmptable_n1 { string key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -436,18 +436,18 @@ STAGE PLANS: columns.comments columns.types string:string:string:string #### A masked pattern was here #### - name default.tmptable + name default.tmptable_n1 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct tmptable { string key, string value, string hr, string ds} + serialization.ddl struct tmptable_n1 { string key, string value, string hr, string ds} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable - name: default.tmptable + name: default.tmptable_n1 + name: default.tmptable_n1 Truncated Path -> Alias: #### A masked pattern was here #### @@ -457,31 +457,31 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table tmptable +PREHOOK: query: insert overwrite table tmptable_n1 select a.* from srcpart a where rand(1) < 0.1 and a.ds = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@tmptable -POSTHOOK: query: insert overwrite table tmptable +PREHOOK: Output: default@tmptable_n1 +POSTHOOK: query: insert overwrite table tmptable_n1 select a.* from srcpart a where rand(1) < 0.1 and a.ds = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@tmptable -POSTHOOK: Lineage: tmptable.ds SIMPLE [(srcpart)a.FieldSchema(name:hr, type:string, comment:null), ] -POSTHOOK: Lineage: tmptable.hr SIMPLE [] -POSTHOOK: Lineage: tmptable.key SIMPLE [(srcpart)a.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tmptable.value SIMPLE [(srcpart)a.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from tmptable x sort by x.key,x.value,x.ds,x.hr +POSTHOOK: Output: default@tmptable_n1 +POSTHOOK: Lineage: tmptable_n1.ds SIMPLE [(srcpart)a.FieldSchema(name:hr, type:string, comment:null), ] +POSTHOOK: Lineage: tmptable_n1.hr SIMPLE [] +POSTHOOK: Lineage: tmptable_n1.key SIMPLE [(srcpart)a.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tmptable_n1.value SIMPLE [(srcpart)a.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from tmptable_n1 x sort by x.key,x.value,x.ds,x.hr PREHOOK: type: QUERY -PREHOOK: Input: default@tmptable +PREHOOK: Input: default@tmptable_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from tmptable x sort by x.key,x.value,x.ds,x.hr +POSTHOOK: query: select * from tmptable_n1 x sort by x.key,x.value,x.ds,x.hr POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmptable +POSTHOOK: Input: default@tmptable_n1 #### A masked pattern was here #### 103 val_103 2008-04-08 11 118 val_118 2008-04-08 12 diff --git a/ql/src/test/results/clientpositive/rcfile_toleratecorruptions.q.out b/ql/src/test/results/clientpositive/rcfile_toleratecorruptions.q.out index 7b81717e3e..13910d0021 100644 --- a/ql/src/test/results/clientpositive/rcfile_toleratecorruptions.q.out +++ b/ql/src/test/results/clientpositive/rcfile_toleratecorruptions.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE test_src(key int, value string) stored as RCFILE +PREHOOK: query: CREATE TABLE test_src_n3(key int, value string) stored as RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_src -POSTHOOK: query: CREATE TABLE test_src(key int, value string) stored as RCFILE +PREHOOK: Output: default@test_src_n3 +POSTHOOK: query: CREATE TABLE test_src_n3(key int, value string) stored as RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_src -PREHOOK: query: INSERT OVERWRITE table test_src SELECT * FROM src +POSTHOOK: Output: default@test_src_n3 +PREHOOK: query: INSERT OVERWRITE table test_src_n3 SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_src -POSTHOOK: query: INSERT OVERWRITE table test_src SELECT * FROM src +PREHOOK: Output: default@test_src_n3 +POSTHOOK: query: INSERT OVERWRITE table test_src_n3 SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_src -POSTHOOK: Lineage: test_src.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_src.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT key, value FROM test_src order by key +POSTHOOK: Output: default@test_src_n3 +POSTHOOK: Lineage: test_src_n3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src_n3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT key, value FROM test_src_n3 order by key PREHOOK: type: QUERY -PREHOOK: Input: default@test_src +PREHOOK: Input: default@test_src_n3 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM test_src order by key +POSTHOOK: query: SELECT key, value FROM test_src_n3 order by key POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_src +POSTHOOK: Input: default@test_src_n3 #### A masked pattern was here #### 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/read_uint_parquet_vectorized.q.out b/ql/src/test/results/clientpositive/read_uint_parquet_vectorized.q.out index 3d5b4f2e8c..715e16c880 100644 --- a/ql/src/test/results/clientpositive/read_uint_parquet_vectorized.q.out +++ b/ql/src/test/results/clientpositive/read_uint_parquet_vectorized.q.out @@ -1,71 +1,71 @@ -PREHOOK: query: create table testbasicint (uint_32_col int) stored as parquet +PREHOOK: query: create table testbasicint_n0 (uint_32_col int) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testbasicint -POSTHOOK: query: create table testbasicint (uint_32_col int) stored as parquet +PREHOOK: Output: default@testbasicint_n0 +POSTHOOK: query: create table testbasicint_n0 (uint_32_col int) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testbasicint -PREHOOK: query: load data local inpath '../../data/files/test_uint.parquet' into table testbasicint +POSTHOOK: Output: default@testbasicint_n0 +PREHOOK: query: load data local inpath '../../data/files/test_uint.parquet' into table testbasicint_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testbasicint -POSTHOOK: query: load data local inpath '../../data/files/test_uint.parquet' into table testbasicint +PREHOOK: Output: default@testbasicint_n0 +POSTHOOK: query: load data local inpath '../../data/files/test_uint.parquet' into table testbasicint_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testbasicint -PREHOOK: query: select * from testbasicint +POSTHOOK: Output: default@testbasicint_n0 +PREHOOK: query: select * from testbasicint_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testbasicint +PREHOOK: Input: default@testbasicint_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testbasicint +POSTHOOK: query: select * from testbasicint_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testbasicint +POSTHOOK: Input: default@testbasicint_n0 #### A masked pattern was here #### 0 1 2147483647 NULL NULL -PREHOOK: query: drop table testbasicint +PREHOOK: query: drop table testbasicint_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testbasicint -PREHOOK: Output: default@testbasicint -POSTHOOK: query: drop table testbasicint +PREHOOK: Input: default@testbasicint_n0 +PREHOOK: Output: default@testbasicint_n0 +POSTHOOK: query: drop table testbasicint_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testbasicint -POSTHOOK: Output: default@testbasicint -PREHOOK: query: create table testbigintinv +POSTHOOK: Input: default@testbasicint_n0 +POSTHOOK: Output: default@testbasicint_n0 +PREHOOK: query: create table testbigintinv_n0 (col_INT32_UINT_8 bigint, col_INT32_UINT_16 bigint, col_INT32_UINT_32 bigint, col_INT64_UINT_64 bigint) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testbigintinv -POSTHOOK: query: create table testbigintinv +PREHOOK: Output: default@testbigintinv_n0 +POSTHOOK: query: create table testbigintinv_n0 (col_INT32_UINT_8 bigint, col_INT32_UINT_16 bigint, col_INT32_UINT_32 bigint, col_INT64_UINT_64 bigint) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testbigintinv -PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testbigintinv +POSTHOOK: Output: default@testbigintinv_n0 +PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testbigintinv_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testbigintinv -POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testbigintinv +PREHOOK: Output: default@testbigintinv_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testbigintinv_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testbigintinv -PREHOOK: query: select * from testbigintinv +POSTHOOK: Output: default@testbigintinv_n0 +PREHOOK: query: select * from testbigintinv_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testbigintinv +PREHOOK: Input: default@testbigintinv_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testbigintinv +POSTHOOK: query: select * from testbigintinv_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testbigintinv +POSTHOOK: Input: default@testbigintinv_n0 #### A masked pattern was here #### 0 0 0 0 127 127 127 127 @@ -76,45 +76,45 @@ POSTHOOK: Input: default@testbigintinv NULL NULL NULL 4294967295 NULL NULL NULL 9223372036854775807 NULL NULL NULL NULL -PREHOOK: query: drop table testbigintinv +PREHOOK: query: drop table testbigintinv_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testbigintinv -PREHOOK: Output: default@testbigintinv -POSTHOOK: query: drop table testbigintinv +PREHOOK: Input: default@testbigintinv_n0 +PREHOOK: Output: default@testbigintinv_n0 +POSTHOOK: query: drop table testbigintinv_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testbigintinv -POSTHOOK: Output: default@testbigintinv -PREHOOK: query: create table testintinv +POSTHOOK: Input: default@testbigintinv_n0 +POSTHOOK: Output: default@testbigintinv_n0 +PREHOOK: query: create table testintinv_n0 (col_INT32_UINT_8 int, col_INT32_UINT_16 int, col_INT32_UINT_32 int, col_INT64_UINT_64 int) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testintinv -POSTHOOK: query: create table testintinv +PREHOOK: Output: default@testintinv_n0 +POSTHOOK: query: create table testintinv_n0 (col_INT32_UINT_8 int, col_INT32_UINT_16 int, col_INT32_UINT_32 int, col_INT64_UINT_64 int) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testintinv -PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testintinv +POSTHOOK: Output: default@testintinv_n0 +PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testintinv_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testintinv -POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testintinv +PREHOOK: Output: default@testintinv_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testintinv_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testintinv -PREHOOK: query: select * from testintinv +POSTHOOK: Output: default@testintinv_n0 +PREHOOK: query: select * from testintinv_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testintinv +PREHOOK: Input: default@testintinv_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testintinv +POSTHOOK: query: select * from testintinv_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testintinv +POSTHOOK: Input: default@testintinv_n0 #### A masked pattern was here #### 0 0 0 0 127 127 127 127 @@ -125,45 +125,45 @@ POSTHOOK: Input: default@testintinv NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: drop table testintinv +PREHOOK: query: drop table testintinv_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testintinv -PREHOOK: Output: default@testintinv -POSTHOOK: query: drop table testintinv +PREHOOK: Input: default@testintinv_n0 +PREHOOK: Output: default@testintinv_n0 +POSTHOOK: query: drop table testintinv_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testintinv -POSTHOOK: Output: default@testintinv -PREHOOK: query: create table testsmallintinv +POSTHOOK: Input: default@testintinv_n0 +POSTHOOK: Output: default@testintinv_n0 +PREHOOK: query: create table testsmallintinv_n0 (col_INT32_UINT_8 smallint, col_INT32_UINT_16 smallint, col_INT32_UINT_32 smallint, col_INT64_UINT_64 smallint) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testsmallintinv -POSTHOOK: query: create table testsmallintinv +PREHOOK: Output: default@testsmallintinv_n0 +POSTHOOK: query: create table testsmallintinv_n0 (col_INT32_UINT_8 smallint, col_INT32_UINT_16 smallint, col_INT32_UINT_32 smallint, col_INT64_UINT_64 smallint) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testsmallintinv -PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testsmallintinv +POSTHOOK: Output: default@testsmallintinv_n0 +PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testsmallintinv_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testsmallintinv -POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testsmallintinv +PREHOOK: Output: default@testsmallintinv_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testsmallintinv_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testsmallintinv -PREHOOK: query: select * from testsmallintinv +POSTHOOK: Output: default@testsmallintinv_n0 +PREHOOK: query: select * from testsmallintinv_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testsmallintinv +PREHOOK: Input: default@testsmallintinv_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testsmallintinv +POSTHOOK: query: select * from testsmallintinv_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testsmallintinv +POSTHOOK: Input: default@testsmallintinv_n0 #### A masked pattern was here #### 0 0 0 0 127 127 127 127 @@ -174,45 +174,45 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: drop table testsmallintinv +PREHOOK: query: drop table testsmallintinv_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testsmallintinv -PREHOOK: Output: default@testsmallintinv -POSTHOOK: query: drop table testsmallintinv +PREHOOK: Input: default@testsmallintinv_n0 +PREHOOK: Output: default@testsmallintinv_n0 +POSTHOOK: query: drop table testsmallintinv_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testsmallintinv -POSTHOOK: Output: default@testsmallintinv -PREHOOK: query: create table testtinyintinv +POSTHOOK: Input: default@testsmallintinv_n0 +POSTHOOK: Output: default@testsmallintinv_n0 +PREHOOK: query: create table testtinyintinv_n0 (col_INT32_UINT_8 tinyint, col_INT32_UINT_16 tinyint, col_INT32_UINT_32 tinyint, col_INT64_UINT_64 tinyint) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testtinyintinv -POSTHOOK: query: create table testtinyintinv +PREHOOK: Output: default@testtinyintinv_n0 +POSTHOOK: query: create table testtinyintinv_n0 (col_INT32_UINT_8 tinyint, col_INT32_UINT_16 tinyint, col_INT32_UINT_32 tinyint, col_INT64_UINT_64 tinyint) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testtinyintinv -PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testtinyintinv +POSTHOOK: Output: default@testtinyintinv_n0 +PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testtinyintinv_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testtinyintinv -POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testtinyintinv +PREHOOK: Output: default@testtinyintinv_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testtinyintinv_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testtinyintinv -PREHOOK: query: select * from testtinyintinv +POSTHOOK: Output: default@testtinyintinv_n0 +PREHOOK: query: select * from testtinyintinv_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testtinyintinv +PREHOOK: Input: default@testtinyintinv_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testtinyintinv +POSTHOOK: query: select * from testtinyintinv_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testtinyintinv +POSTHOOK: Input: default@testtinyintinv_n0 #### A masked pattern was here #### 0 0 0 0 127 127 127 127 @@ -223,45 +223,45 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: drop table testtinyintinv +PREHOOK: query: drop table testtinyintinv_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testtinyintinv -PREHOOK: Output: default@testtinyintinv -POSTHOOK: query: drop table testtinyintinv +PREHOOK: Input: default@testtinyintinv_n0 +PREHOOK: Output: default@testtinyintinv_n0 +POSTHOOK: query: drop table testtinyintinv_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testtinyintinv -POSTHOOK: Output: default@testtinyintinv -PREHOOK: query: create table testfloatinv +POSTHOOK: Input: default@testtinyintinv_n0 +POSTHOOK: Output: default@testtinyintinv_n0 +PREHOOK: query: create table testfloatinv_n0 (col_INT32_UINT_8 float, col_INT32_UINT_16 float, col_INT32_UINT_32 float, col_INT64_UINT_64 float) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testfloatinv -POSTHOOK: query: create table testfloatinv +PREHOOK: Output: default@testfloatinv_n0 +POSTHOOK: query: create table testfloatinv_n0 (col_INT32_UINT_8 float, col_INT32_UINT_16 float, col_INT32_UINT_32 float, col_INT64_UINT_64 float) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testfloatinv -PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testfloatinv +POSTHOOK: Output: default@testfloatinv_n0 +PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testfloatinv_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testfloatinv -POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testfloatinv +PREHOOK: Output: default@testfloatinv_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testfloatinv_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testfloatinv -PREHOOK: query: select * from testfloatinv +POSTHOOK: Output: default@testfloatinv_n0 +PREHOOK: query: select * from testfloatinv_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testfloatinv +PREHOOK: Input: default@testfloatinv_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testfloatinv +POSTHOOK: query: select * from testfloatinv_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testfloatinv +POSTHOOK: Input: default@testfloatinv_n0 #### A masked pattern was here #### 0.0 0.0 0.0 0.0 127.0 127.0 127.0 127.0 @@ -272,45 +272,45 @@ POSTHOOK: Input: default@testfloatinv NULL NULL NULL 4.2949673E9 NULL NULL NULL 9.223372E18 NULL NULL NULL NULL -PREHOOK: query: drop table testfloatinv +PREHOOK: query: drop table testfloatinv_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testfloatinv -PREHOOK: Output: default@testfloatinv -POSTHOOK: query: drop table testfloatinv +PREHOOK: Input: default@testfloatinv_n0 +PREHOOK: Output: default@testfloatinv_n0 +POSTHOOK: query: drop table testfloatinv_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testfloatinv -POSTHOOK: Output: default@testfloatinv -PREHOOK: query: create table testdoubleinv +POSTHOOK: Input: default@testfloatinv_n0 +POSTHOOK: Output: default@testfloatinv_n0 +PREHOOK: query: create table testdoubleinv_n0 (col_INT32_UINT_8 double, col_INT32_UINT_16 double, col_INT32_UINT_32 double, col_INT64_UINT_64 double) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testdoubleinv -POSTHOOK: query: create table testdoubleinv +PREHOOK: Output: default@testdoubleinv_n0 +POSTHOOK: query: create table testdoubleinv_n0 (col_INT32_UINT_8 double, col_INT32_UINT_16 double, col_INT32_UINT_32 double, col_INT64_UINT_64 double) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testdoubleinv -PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdoubleinv +POSTHOOK: Output: default@testdoubleinv_n0 +PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdoubleinv_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testdoubleinv -POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdoubleinv +PREHOOK: Output: default@testdoubleinv_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdoubleinv_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testdoubleinv -PREHOOK: query: select * from testdoubleinv +POSTHOOK: Output: default@testdoubleinv_n0 +PREHOOK: query: select * from testdoubleinv_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testdoubleinv +PREHOOK: Input: default@testdoubleinv_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testdoubleinv +POSTHOOK: query: select * from testdoubleinv_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testdoubleinv +POSTHOOK: Input: default@testdoubleinv_n0 #### A masked pattern was here #### 0.0 0.0 0.0 0.0 127.0 127.0 127.0 127.0 @@ -321,45 +321,45 @@ POSTHOOK: Input: default@testdoubleinv NULL NULL NULL 4.294967295E9 NULL NULL NULL 9.223372036854776E18 NULL NULL NULL NULL -PREHOOK: query: drop table testdoubleinv +PREHOOK: query: drop table testdoubleinv_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testdoubleinv -PREHOOK: Output: default@testdoubleinv -POSTHOOK: query: drop table testdoubleinv +PREHOOK: Input: default@testdoubleinv_n0 +PREHOOK: Output: default@testdoubleinv_n0 +POSTHOOK: query: drop table testdoubleinv_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testdoubleinv -POSTHOOK: Output: default@testdoubleinv -PREHOOK: query: create table testdecimal22_2inv +POSTHOOK: Input: default@testdoubleinv_n0 +POSTHOOK: Output: default@testdoubleinv_n0 +PREHOOK: query: create table testdecimal22_2inv_n0 (col_INT32_UINT_8 decimal(22,2), col_INT32_UINT_16 decimal(22,2), col_INT32_UINT_32 decimal(22,2), col_INT64_UINT_64 decimal(22,2)) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testdecimal22_2inv -POSTHOOK: query: create table testdecimal22_2inv +PREHOOK: Output: default@testdecimal22_2inv_n0 +POSTHOOK: query: create table testdecimal22_2inv_n0 (col_INT32_UINT_8 decimal(22,2), col_INT32_UINT_16 decimal(22,2), col_INT32_UINT_32 decimal(22,2), col_INT64_UINT_64 decimal(22,2)) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testdecimal22_2inv -PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal22_2inv +POSTHOOK: Output: default@testdecimal22_2inv_n0 +PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal22_2inv_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testdecimal22_2inv -POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal22_2inv +PREHOOK: Output: default@testdecimal22_2inv_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal22_2inv_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testdecimal22_2inv -PREHOOK: query: select * from testdecimal22_2inv +POSTHOOK: Output: default@testdecimal22_2inv_n0 +PREHOOK: query: select * from testdecimal22_2inv_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testdecimal22_2inv +PREHOOK: Input: default@testdecimal22_2inv_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testdecimal22_2inv +POSTHOOK: query: select * from testdecimal22_2inv_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testdecimal22_2inv +POSTHOOK: Input: default@testdecimal22_2inv_n0 #### A masked pattern was here #### 0.00 0.00 0.00 0.00 127.00 127.00 127.00 127.00 @@ -370,45 +370,45 @@ POSTHOOK: Input: default@testdecimal22_2inv NULL NULL NULL 4294967295.00 NULL NULL NULL 9223372036854775807.00 NULL NULL NULL NULL -PREHOOK: query: drop table testdecimal22_2inv +PREHOOK: query: drop table testdecimal22_2inv_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testdecimal22_2inv -PREHOOK: Output: default@testdecimal22_2inv -POSTHOOK: query: drop table testdecimal22_2inv +PREHOOK: Input: default@testdecimal22_2inv_n0 +PREHOOK: Output: default@testdecimal22_2inv_n0 +POSTHOOK: query: drop table testdecimal22_2inv_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testdecimal22_2inv -POSTHOOK: Output: default@testdecimal22_2inv -PREHOOK: query: create table testdecimal13_2inv +POSTHOOK: Input: default@testdecimal22_2inv_n0 +POSTHOOK: Output: default@testdecimal22_2inv_n0 +PREHOOK: query: create table testdecimal13_2inv_n0 (col_INT32_UINT_8 decimal(13,2), col_INT32_UINT_16 decimal(13,2), col_INT32_UINT_32 decimal(13,2), col_INT64_UINT_64 decimal(13,2)) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testdecimal13_2inv -POSTHOOK: query: create table testdecimal13_2inv +PREHOOK: Output: default@testdecimal13_2inv_n0 +POSTHOOK: query: create table testdecimal13_2inv_n0 (col_INT32_UINT_8 decimal(13,2), col_INT32_UINT_16 decimal(13,2), col_INT32_UINT_32 decimal(13,2), col_INT64_UINT_64 decimal(13,2)) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testdecimal13_2inv -PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal13_2inv +POSTHOOK: Output: default@testdecimal13_2inv_n0 +PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal13_2inv_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testdecimal13_2inv -POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal13_2inv +PREHOOK: Output: default@testdecimal13_2inv_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal13_2inv_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testdecimal13_2inv -PREHOOK: query: select * from testdecimal13_2inv +POSTHOOK: Output: default@testdecimal13_2inv_n0 +PREHOOK: query: select * from testdecimal13_2inv_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testdecimal13_2inv +PREHOOK: Input: default@testdecimal13_2inv_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testdecimal13_2inv +POSTHOOK: query: select * from testdecimal13_2inv_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testdecimal13_2inv +POSTHOOK: Input: default@testdecimal13_2inv_n0 #### A masked pattern was here #### 0.00 0.00 0.00 0.00 127.00 127.00 127.00 127.00 @@ -419,45 +419,45 @@ POSTHOOK: Input: default@testdecimal13_2inv NULL NULL NULL 4294967295.00 NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: drop table testdecimal13_2inv +PREHOOK: query: drop table testdecimal13_2inv_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testdecimal13_2inv -PREHOOK: Output: default@testdecimal13_2inv -POSTHOOK: query: drop table testdecimal13_2inv +PREHOOK: Input: default@testdecimal13_2inv_n0 +PREHOOK: Output: default@testdecimal13_2inv_n0 +POSTHOOK: query: drop table testdecimal13_2inv_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testdecimal13_2inv -POSTHOOK: Output: default@testdecimal13_2inv -PREHOOK: query: create table testdecimal8_2inv +POSTHOOK: Input: default@testdecimal13_2inv_n0 +POSTHOOK: Output: default@testdecimal13_2inv_n0 +PREHOOK: query: create table testdecimal8_2inv_n0 (col_INT32_UINT_8 decimal(8,2), col_INT32_UINT_16 decimal(8,2), col_INT32_UINT_32 decimal(8,2), col_INT64_UINT_64 decimal(8,2)) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testdecimal8_2inv -POSTHOOK: query: create table testdecimal8_2inv +PREHOOK: Output: default@testdecimal8_2inv_n0 +POSTHOOK: query: create table testdecimal8_2inv_n0 (col_INT32_UINT_8 decimal(8,2), col_INT32_UINT_16 decimal(8,2), col_INT32_UINT_32 decimal(8,2), col_INT64_UINT_64 decimal(8,2)) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testdecimal8_2inv -PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal8_2inv +POSTHOOK: Output: default@testdecimal8_2inv_n0 +PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal8_2inv_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testdecimal8_2inv -POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal8_2inv +PREHOOK: Output: default@testdecimal8_2inv_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal8_2inv_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testdecimal8_2inv -PREHOOK: query: select * from testdecimal8_2inv +POSTHOOK: Output: default@testdecimal8_2inv_n0 +PREHOOK: query: select * from testdecimal8_2inv_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testdecimal8_2inv +PREHOOK: Input: default@testdecimal8_2inv_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testdecimal8_2inv +POSTHOOK: query: select * from testdecimal8_2inv_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testdecimal8_2inv +POSTHOOK: Input: default@testdecimal8_2inv_n0 #### A masked pattern was here #### 0.00 0.00 0.00 0.00 127.00 127.00 127.00 127.00 @@ -468,45 +468,45 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: drop table testdecimal8_2inv +PREHOOK: query: drop table testdecimal8_2inv_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testdecimal8_2inv -PREHOOK: Output: default@testdecimal8_2inv -POSTHOOK: query: drop table testdecimal8_2inv +PREHOOK: Input: default@testdecimal8_2inv_n0 +PREHOOK: Output: default@testdecimal8_2inv_n0 +POSTHOOK: query: drop table testdecimal8_2inv_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testdecimal8_2inv -POSTHOOK: Output: default@testdecimal8_2inv -PREHOOK: query: create table testdecimal6_2inv +POSTHOOK: Input: default@testdecimal8_2inv_n0 +POSTHOOK: Output: default@testdecimal8_2inv_n0 +PREHOOK: query: create table testdecimal6_2inv_n0 (col_INT32_UINT_8 decimal(6,2), col_INT32_UINT_16 decimal(6,2), col_INT32_UINT_32 decimal(6,2), col_INT64_UINT_64 decimal(6,2)) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testdecimal6_2inv -POSTHOOK: query: create table testdecimal6_2inv +PREHOOK: Output: default@testdecimal6_2inv_n0 +POSTHOOK: query: create table testdecimal6_2inv_n0 (col_INT32_UINT_8 decimal(6,2), col_INT32_UINT_16 decimal(6,2), col_INT32_UINT_32 decimal(6,2), col_INT64_UINT_64 decimal(6,2)) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testdecimal6_2inv -PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal6_2inv +POSTHOOK: Output: default@testdecimal6_2inv_n0 +PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal6_2inv_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testdecimal6_2inv -POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal6_2inv +PREHOOK: Output: default@testdecimal6_2inv_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal6_2inv_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testdecimal6_2inv -PREHOOK: query: select * from testdecimal6_2inv +POSTHOOK: Output: default@testdecimal6_2inv_n0 +PREHOOK: query: select * from testdecimal6_2inv_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testdecimal6_2inv +PREHOOK: Input: default@testdecimal6_2inv_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testdecimal6_2inv +POSTHOOK: query: select * from testdecimal6_2inv_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testdecimal6_2inv +POSTHOOK: Input: default@testdecimal6_2inv_n0 #### A masked pattern was here #### 0.00 0.00 0.00 0.00 127.00 127.00 127.00 127.00 @@ -517,45 +517,45 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: drop table testdecimal6_2inv +PREHOOK: query: drop table testdecimal6_2inv_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testdecimal6_2inv -PREHOOK: Output: default@testdecimal6_2inv -POSTHOOK: query: drop table testdecimal6_2inv +PREHOOK: Input: default@testdecimal6_2inv_n0 +PREHOOK: Output: default@testdecimal6_2inv_n0 +POSTHOOK: query: drop table testdecimal6_2inv_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testdecimal6_2inv -POSTHOOK: Output: default@testdecimal6_2inv -PREHOOK: query: create table testdecimal3_2inv +POSTHOOK: Input: default@testdecimal6_2inv_n0 +POSTHOOK: Output: default@testdecimal6_2inv_n0 +PREHOOK: query: create table testdecimal3_2inv_n0 (col_INT32_UINT_8 decimal(3,2), col_INT32_UINT_16 decimal(3,2), col_INT32_UINT_32 decimal(3,2), col_INT64_UINT_64 decimal(3,2)) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testdecimal3_2inv -POSTHOOK: query: create table testdecimal3_2inv +PREHOOK: Output: default@testdecimal3_2inv_n0 +POSTHOOK: query: create table testdecimal3_2inv_n0 (col_INT32_UINT_8 decimal(3,2), col_INT32_UINT_16 decimal(3,2), col_INT32_UINT_32 decimal(3,2), col_INT64_UINT_64 decimal(3,2)) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testdecimal3_2inv -PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal3_2inv +POSTHOOK: Output: default@testdecimal3_2inv_n0 +PREHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal3_2inv_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testdecimal3_2inv -POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal3_2inv +PREHOOK: Output: default@testdecimal3_2inv_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal3_2inv_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testdecimal3_2inv -PREHOOK: query: select * from testdecimal3_2inv +POSTHOOK: Output: default@testdecimal3_2inv_n0 +PREHOOK: query: select * from testdecimal3_2inv_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testdecimal3_2inv +PREHOOK: Input: default@testdecimal3_2inv_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testdecimal3_2inv +POSTHOOK: query: select * from testdecimal3_2inv_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testdecimal3_2inv +POSTHOOK: Input: default@testdecimal3_2inv_n0 #### A masked pattern was here #### 0.00 0.00 0.00 0.00 NULL NULL NULL NULL @@ -566,45 +566,45 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: drop table testdecimal3_2inv +PREHOOK: query: drop table testdecimal3_2inv_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testdecimal3_2inv -PREHOOK: Output: default@testdecimal3_2inv -POSTHOOK: query: drop table testdecimal3_2inv +PREHOOK: Input: default@testdecimal3_2inv_n0 +PREHOOK: Output: default@testdecimal3_2inv_n0 +POSTHOOK: query: drop table testdecimal3_2inv_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testdecimal3_2inv -POSTHOOK: Output: default@testdecimal3_2inv -PREHOOK: query: create table testbigintvalid +POSTHOOK: Input: default@testdecimal3_2inv_n0 +POSTHOOK: Output: default@testdecimal3_2inv_n0 +PREHOOK: query: create table testbigintvalid_n0 (col_INT32_UINT_8 bigint, col_INT32_UINT_16 bigint, col_INT32_UINT_32 bigint, col_INT64_UINT_64 bigint) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testbigintvalid -POSTHOOK: query: create table testbigintvalid +PREHOOK: Output: default@testbigintvalid_n0 +POSTHOOK: query: create table testbigintvalid_n0 (col_INT32_UINT_8 bigint, col_INT32_UINT_16 bigint, col_INT32_UINT_32 bigint, col_INT64_UINT_64 bigint) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testbigintvalid -PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testbigintvalid +POSTHOOK: Output: default@testbigintvalid_n0 +PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testbigintvalid_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testbigintvalid -POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testbigintvalid +PREHOOK: Output: default@testbigintvalid_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testbigintvalid_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testbigintvalid -PREHOOK: query: select * from testbigintvalid +POSTHOOK: Output: default@testbigintvalid_n0 +PREHOOK: query: select * from testbigintvalid_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testbigintvalid +PREHOOK: Input: default@testbigintvalid_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testbigintvalid +POSTHOOK: query: select * from testbigintvalid_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testbigintvalid +POSTHOOK: Input: default@testbigintvalid_n0 #### A masked pattern was here #### 0 0 0 0 127 127 127 127 @@ -615,45 +615,45 @@ NULL NULL 2147483647 2147483647 NULL NULL NULL 4294967295 NULL NULL NULL 9223372036854775807 NULL NULL NULL NULL -PREHOOK: query: drop table testbigintvalid +PREHOOK: query: drop table testbigintvalid_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testbigintvalid -PREHOOK: Output: default@testbigintvalid -POSTHOOK: query: drop table testbigintvalid +PREHOOK: Input: default@testbigintvalid_n0 +PREHOOK: Output: default@testbigintvalid_n0 +POSTHOOK: query: drop table testbigintvalid_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testbigintvalid -POSTHOOK: Output: default@testbigintvalid -PREHOOK: query: create table testintvalid +POSTHOOK: Input: default@testbigintvalid_n0 +POSTHOOK: Output: default@testbigintvalid_n0 +PREHOOK: query: create table testintvalid_n0 (col_INT32_UINT_8 int, col_INT32_UINT_16 int, col_INT32_UINT_32 int, col_INT64_UINT_64 int) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testintvalid -POSTHOOK: query: create table testintvalid +PREHOOK: Output: default@testintvalid_n0 +POSTHOOK: query: create table testintvalid_n0 (col_INT32_UINT_8 int, col_INT32_UINT_16 int, col_INT32_UINT_32 int, col_INT64_UINT_64 int) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testintvalid -PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testintvalid +POSTHOOK: Output: default@testintvalid_n0 +PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testintvalid_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testintvalid -POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testintvalid +PREHOOK: Output: default@testintvalid_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testintvalid_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testintvalid -PREHOOK: query: select * from testintvalid +POSTHOOK: Output: default@testintvalid_n0 +PREHOOK: query: select * from testintvalid_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testintvalid +PREHOOK: Input: default@testintvalid_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testintvalid +POSTHOOK: query: select * from testintvalid_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testintvalid +POSTHOOK: Input: default@testintvalid_n0 #### A masked pattern was here #### 0 0 0 0 127 127 127 127 @@ -664,45 +664,45 @@ NULL NULL 2147483647 2147483647 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: drop table testintvalid +PREHOOK: query: drop table testintvalid_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testintvalid -PREHOOK: Output: default@testintvalid -POSTHOOK: query: drop table testintvalid +PREHOOK: Input: default@testintvalid_n0 +PREHOOK: Output: default@testintvalid_n0 +POSTHOOK: query: drop table testintvalid_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testintvalid -POSTHOOK: Output: default@testintvalid -PREHOOK: query: create table testsmallintvalid +POSTHOOK: Input: default@testintvalid_n0 +POSTHOOK: Output: default@testintvalid_n0 +PREHOOK: query: create table testsmallintvalid_n0 (col_INT32_UINT_8 smallint, col_INT32_UINT_16 smallint, col_INT32_UINT_32 smallint, col_INT64_UINT_64 smallint) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testsmallintvalid -POSTHOOK: query: create table testsmallintvalid +PREHOOK: Output: default@testsmallintvalid_n0 +POSTHOOK: query: create table testsmallintvalid_n0 (col_INT32_UINT_8 smallint, col_INT32_UINT_16 smallint, col_INT32_UINT_32 smallint, col_INT64_UINT_64 smallint) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testsmallintvalid -PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testsmallintvalid +POSTHOOK: Output: default@testsmallintvalid_n0 +PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testsmallintvalid_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testsmallintvalid -POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testsmallintvalid +PREHOOK: Output: default@testsmallintvalid_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testsmallintvalid_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testsmallintvalid -PREHOOK: query: select * from testsmallintvalid +POSTHOOK: Output: default@testsmallintvalid_n0 +PREHOOK: query: select * from testsmallintvalid_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testsmallintvalid +PREHOOK: Input: default@testsmallintvalid_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testsmallintvalid +POSTHOOK: query: select * from testsmallintvalid_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testsmallintvalid +POSTHOOK: Input: default@testsmallintvalid_n0 #### A masked pattern was here #### 0 0 0 0 127 127 127 127 @@ -713,45 +713,45 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: drop table testsmallintvalid +PREHOOK: query: drop table testsmallintvalid_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testsmallintvalid -PREHOOK: Output: default@testsmallintvalid -POSTHOOK: query: drop table testsmallintvalid +PREHOOK: Input: default@testsmallintvalid_n0 +PREHOOK: Output: default@testsmallintvalid_n0 +POSTHOOK: query: drop table testsmallintvalid_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testsmallintvalid -POSTHOOK: Output: default@testsmallintvalid -PREHOOK: query: create table testtinyintvalid +POSTHOOK: Input: default@testsmallintvalid_n0 +POSTHOOK: Output: default@testsmallintvalid_n0 +PREHOOK: query: create table testtinyintvalid_n0 (col_INT32_UINT_8 tinyint, col_INT32_UINT_16 tinyint, col_INT32_UINT_32 tinyint, col_INT64_UINT_64 tinyint) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testtinyintvalid -POSTHOOK: query: create table testtinyintvalid +PREHOOK: Output: default@testtinyintvalid_n0 +POSTHOOK: query: create table testtinyintvalid_n0 (col_INT32_UINT_8 tinyint, col_INT32_UINT_16 tinyint, col_INT32_UINT_32 tinyint, col_INT64_UINT_64 tinyint) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testtinyintvalid -PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testtinyintvalid +POSTHOOK: Output: default@testtinyintvalid_n0 +PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testtinyintvalid_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testtinyintvalid -POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testtinyintvalid +PREHOOK: Output: default@testtinyintvalid_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testtinyintvalid_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testtinyintvalid -PREHOOK: query: select * from testtinyintvalid +POSTHOOK: Output: default@testtinyintvalid_n0 +PREHOOK: query: select * from testtinyintvalid_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testtinyintvalid +PREHOOK: Input: default@testtinyintvalid_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testtinyintvalid +POSTHOOK: query: select * from testtinyintvalid_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testtinyintvalid +POSTHOOK: Input: default@testtinyintvalid_n0 #### A masked pattern was here #### 0 0 0 0 127 127 127 127 @@ -762,45 +762,45 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: drop table testtinyintvalid +PREHOOK: query: drop table testtinyintvalid_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testtinyintvalid -PREHOOK: Output: default@testtinyintvalid -POSTHOOK: query: drop table testtinyintvalid +PREHOOK: Input: default@testtinyintvalid_n0 +PREHOOK: Output: default@testtinyintvalid_n0 +POSTHOOK: query: drop table testtinyintvalid_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testtinyintvalid -POSTHOOK: Output: default@testtinyintvalid -PREHOOK: query: create table testfloatvalid +POSTHOOK: Input: default@testtinyintvalid_n0 +POSTHOOK: Output: default@testtinyintvalid_n0 +PREHOOK: query: create table testfloatvalid_n0 (col_INT32_UINT_8 float, col_INT32_UINT_16 float, col_INT32_UINT_32 float, col_INT64_UINT_64 float) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testfloatvalid -POSTHOOK: query: create table testfloatvalid +PREHOOK: Output: default@testfloatvalid_n0 +POSTHOOK: query: create table testfloatvalid_n0 (col_INT32_UINT_8 float, col_INT32_UINT_16 float, col_INT32_UINT_32 float, col_INT64_UINT_64 float) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testfloatvalid -PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testfloatvalid +POSTHOOK: Output: default@testfloatvalid_n0 +PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testfloatvalid_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testfloatvalid -POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testfloatvalid +PREHOOK: Output: default@testfloatvalid_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testfloatvalid_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testfloatvalid -PREHOOK: query: select * from testfloatvalid +POSTHOOK: Output: default@testfloatvalid_n0 +PREHOOK: query: select * from testfloatvalid_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testfloatvalid +PREHOOK: Input: default@testfloatvalid_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testfloatvalid +POSTHOOK: query: select * from testfloatvalid_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testfloatvalid +POSTHOOK: Input: default@testfloatvalid_n0 #### A masked pattern was here #### 0.0 0.0 0.0 0.0 127.0 127.0 127.0 127.0 @@ -811,45 +811,45 @@ NULL NULL 2.14748365E9 2.14748365E9 NULL NULL NULL 4.2949673E9 NULL NULL NULL 9.223372E18 NULL NULL NULL NULL -PREHOOK: query: drop table testfloatvalid +PREHOOK: query: drop table testfloatvalid_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testfloatvalid -PREHOOK: Output: default@testfloatvalid -POSTHOOK: query: drop table testfloatvalid +PREHOOK: Input: default@testfloatvalid_n0 +PREHOOK: Output: default@testfloatvalid_n0 +POSTHOOK: query: drop table testfloatvalid_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testfloatvalid -POSTHOOK: Output: default@testfloatvalid -PREHOOK: query: create table testdoublevalid +POSTHOOK: Input: default@testfloatvalid_n0 +POSTHOOK: Output: default@testfloatvalid_n0 +PREHOOK: query: create table testdoublevalid_n0 (col_INT32_UINT_8 double, col_INT32_UINT_16 double, col_INT32_UINT_32 double, col_INT64_UINT_64 double) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testdoublevalid -POSTHOOK: query: create table testdoublevalid +PREHOOK: Output: default@testdoublevalid_n0 +POSTHOOK: query: create table testdoublevalid_n0 (col_INT32_UINT_8 double, col_INT32_UINT_16 double, col_INT32_UINT_32 double, col_INT64_UINT_64 double) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testdoublevalid -PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdoublevalid +POSTHOOK: Output: default@testdoublevalid_n0 +PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdoublevalid_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testdoublevalid -POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdoublevalid +PREHOOK: Output: default@testdoublevalid_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdoublevalid_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testdoublevalid -PREHOOK: query: select * from testdoublevalid +POSTHOOK: Output: default@testdoublevalid_n0 +PREHOOK: query: select * from testdoublevalid_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testdoublevalid +PREHOOK: Input: default@testdoublevalid_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testdoublevalid +POSTHOOK: query: select * from testdoublevalid_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testdoublevalid +POSTHOOK: Input: default@testdoublevalid_n0 #### A masked pattern was here #### 0.0 0.0 0.0 0.0 127.0 127.0 127.0 127.0 @@ -860,45 +860,45 @@ NULL NULL 2.147483647E9 2.147483647E9 NULL NULL NULL 4.294967295E9 NULL NULL NULL 9.223372036854776E18 NULL NULL NULL NULL -PREHOOK: query: drop table testdoublevalid +PREHOOK: query: drop table testdoublevalid_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testdoublevalid -PREHOOK: Output: default@testdoublevalid -POSTHOOK: query: drop table testdoublevalid +PREHOOK: Input: default@testdoublevalid_n0 +PREHOOK: Output: default@testdoublevalid_n0 +POSTHOOK: query: drop table testdoublevalid_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testdoublevalid -POSTHOOK: Output: default@testdoublevalid -PREHOOK: query: create table testdecimal22_2valid +POSTHOOK: Input: default@testdoublevalid_n0 +POSTHOOK: Output: default@testdoublevalid_n0 +PREHOOK: query: create table testdecimal22_2valid_n0 (col_INT32_UINT_8 decimal(22,2), col_INT32_UINT_16 decimal(22,2), col_INT32_UINT_32 decimal(22,2), col_INT64_UINT_64 decimal(22,2)) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testdecimal22_2valid -POSTHOOK: query: create table testdecimal22_2valid +PREHOOK: Output: default@testdecimal22_2valid_n0 +POSTHOOK: query: create table testdecimal22_2valid_n0 (col_INT32_UINT_8 decimal(22,2), col_INT32_UINT_16 decimal(22,2), col_INT32_UINT_32 decimal(22,2), col_INT64_UINT_64 decimal(22,2)) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testdecimal22_2valid -PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal22_2valid +POSTHOOK: Output: default@testdecimal22_2valid_n0 +PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal22_2valid_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testdecimal22_2valid -POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal22_2valid +PREHOOK: Output: default@testdecimal22_2valid_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal22_2valid_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testdecimal22_2valid -PREHOOK: query: select * from testdecimal22_2valid +POSTHOOK: Output: default@testdecimal22_2valid_n0 +PREHOOK: query: select * from testdecimal22_2valid_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testdecimal22_2valid +PREHOOK: Input: default@testdecimal22_2valid_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testdecimal22_2valid +POSTHOOK: query: select * from testdecimal22_2valid_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testdecimal22_2valid +POSTHOOK: Input: default@testdecimal22_2valid_n0 #### A masked pattern was here #### 0.00 0.00 0.00 0.00 127.00 127.00 127.00 127.00 @@ -909,45 +909,45 @@ NULL NULL 2147483647.00 2147483647.00 NULL NULL NULL 4294967295.00 NULL NULL NULL 9223372036854775807.00 NULL NULL NULL NULL -PREHOOK: query: drop table testdecimal22_2valid +PREHOOK: query: drop table testdecimal22_2valid_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testdecimal22_2valid -PREHOOK: Output: default@testdecimal22_2valid -POSTHOOK: query: drop table testdecimal22_2valid +PREHOOK: Input: default@testdecimal22_2valid_n0 +PREHOOK: Output: default@testdecimal22_2valid_n0 +POSTHOOK: query: drop table testdecimal22_2valid_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testdecimal22_2valid -POSTHOOK: Output: default@testdecimal22_2valid -PREHOOK: query: create table testdecimal13_2valid +POSTHOOK: Input: default@testdecimal22_2valid_n0 +POSTHOOK: Output: default@testdecimal22_2valid_n0 +PREHOOK: query: create table testdecimal13_2valid_n0 (col_INT32_UINT_8 decimal(13,2), col_INT32_UINT_16 decimal(13,2), col_INT32_UINT_32 decimal(13,2), col_INT64_UINT_64 decimal(13,2)) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testdecimal13_2valid -POSTHOOK: query: create table testdecimal13_2valid +PREHOOK: Output: default@testdecimal13_2valid_n0 +POSTHOOK: query: create table testdecimal13_2valid_n0 (col_INT32_UINT_8 decimal(13,2), col_INT32_UINT_16 decimal(13,2), col_INT32_UINT_32 decimal(13,2), col_INT64_UINT_64 decimal(13,2)) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testdecimal13_2valid -PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal13_2valid +POSTHOOK: Output: default@testdecimal13_2valid_n0 +PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal13_2valid_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testdecimal13_2valid -POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal13_2valid +PREHOOK: Output: default@testdecimal13_2valid_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal13_2valid_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testdecimal13_2valid -PREHOOK: query: select * from testdecimal13_2valid +POSTHOOK: Output: default@testdecimal13_2valid_n0 +PREHOOK: query: select * from testdecimal13_2valid_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testdecimal13_2valid +PREHOOK: Input: default@testdecimal13_2valid_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testdecimal13_2valid +POSTHOOK: query: select * from testdecimal13_2valid_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testdecimal13_2valid +POSTHOOK: Input: default@testdecimal13_2valid_n0 #### A masked pattern was here #### 0.00 0.00 0.00 0.00 127.00 127.00 127.00 127.00 @@ -958,45 +958,45 @@ NULL NULL 2147483647.00 2147483647.00 NULL NULL NULL 4294967295.00 NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: drop table testdecimal13_2valid +PREHOOK: query: drop table testdecimal13_2valid_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testdecimal13_2valid -PREHOOK: Output: default@testdecimal13_2valid -POSTHOOK: query: drop table testdecimal13_2valid +PREHOOK: Input: default@testdecimal13_2valid_n0 +PREHOOK: Output: default@testdecimal13_2valid_n0 +POSTHOOK: query: drop table testdecimal13_2valid_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testdecimal13_2valid -POSTHOOK: Output: default@testdecimal13_2valid -PREHOOK: query: create table testdecimal8_2valid +POSTHOOK: Input: default@testdecimal13_2valid_n0 +POSTHOOK: Output: default@testdecimal13_2valid_n0 +PREHOOK: query: create table testdecimal8_2valid_n0 (col_INT32_UINT_8 decimal(8,2), col_INT32_UINT_16 decimal(8,2), col_INT32_UINT_32 decimal(8,2), col_INT64_UINT_64 decimal(8,2)) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testdecimal8_2valid -POSTHOOK: query: create table testdecimal8_2valid +PREHOOK: Output: default@testdecimal8_2valid_n0 +POSTHOOK: query: create table testdecimal8_2valid_n0 (col_INT32_UINT_8 decimal(8,2), col_INT32_UINT_16 decimal(8,2), col_INT32_UINT_32 decimal(8,2), col_INT64_UINT_64 decimal(8,2)) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testdecimal8_2valid -PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal8_2valid +POSTHOOK: Output: default@testdecimal8_2valid_n0 +PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal8_2valid_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testdecimal8_2valid -POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal8_2valid +PREHOOK: Output: default@testdecimal8_2valid_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal8_2valid_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testdecimal8_2valid -PREHOOK: query: select * from testdecimal8_2valid +POSTHOOK: Output: default@testdecimal8_2valid_n0 +PREHOOK: query: select * from testdecimal8_2valid_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testdecimal8_2valid +PREHOOK: Input: default@testdecimal8_2valid_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testdecimal8_2valid +POSTHOOK: query: select * from testdecimal8_2valid_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testdecimal8_2valid +POSTHOOK: Input: default@testdecimal8_2valid_n0 #### A masked pattern was here #### 0.00 0.00 0.00 0.00 127.00 127.00 127.00 127.00 @@ -1007,45 +1007,45 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: drop table testdecimal8_2valid +PREHOOK: query: drop table testdecimal8_2valid_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testdecimal8_2valid -PREHOOK: Output: default@testdecimal8_2valid -POSTHOOK: query: drop table testdecimal8_2valid +PREHOOK: Input: default@testdecimal8_2valid_n0 +PREHOOK: Output: default@testdecimal8_2valid_n0 +POSTHOOK: query: drop table testdecimal8_2valid_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testdecimal8_2valid -POSTHOOK: Output: default@testdecimal8_2valid -PREHOOK: query: create table testdecimal6_2valid +POSTHOOK: Input: default@testdecimal8_2valid_n0 +POSTHOOK: Output: default@testdecimal8_2valid_n0 +PREHOOK: query: create table testdecimal6_2valid_n0 (col_INT32_UINT_8 decimal(6,2), col_INT32_UINT_16 decimal(6,2), col_INT32_UINT_32 decimal(6,2), col_INT64_UINT_64 decimal(6,2)) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testdecimal6_2valid -POSTHOOK: query: create table testdecimal6_2valid +PREHOOK: Output: default@testdecimal6_2valid_n0 +POSTHOOK: query: create table testdecimal6_2valid_n0 (col_INT32_UINT_8 decimal(6,2), col_INT32_UINT_16 decimal(6,2), col_INT32_UINT_32 decimal(6,2), col_INT64_UINT_64 decimal(6,2)) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testdecimal6_2valid -PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal6_2valid +POSTHOOK: Output: default@testdecimal6_2valid_n0 +PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal6_2valid_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testdecimal6_2valid -POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal6_2valid +PREHOOK: Output: default@testdecimal6_2valid_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal6_2valid_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testdecimal6_2valid -PREHOOK: query: select * from testdecimal6_2valid +POSTHOOK: Output: default@testdecimal6_2valid_n0 +PREHOOK: query: select * from testdecimal6_2valid_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testdecimal6_2valid +PREHOOK: Input: default@testdecimal6_2valid_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testdecimal6_2valid +POSTHOOK: query: select * from testdecimal6_2valid_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testdecimal6_2valid +POSTHOOK: Input: default@testdecimal6_2valid_n0 #### A masked pattern was here #### 0.00 0.00 0.00 0.00 127.00 127.00 127.00 127.00 @@ -1056,45 +1056,45 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: drop table testdecimal6_2valid +PREHOOK: query: drop table testdecimal6_2valid_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testdecimal6_2valid -PREHOOK: Output: default@testdecimal6_2valid -POSTHOOK: query: drop table testdecimal6_2valid +PREHOOK: Input: default@testdecimal6_2valid_n0 +PREHOOK: Output: default@testdecimal6_2valid_n0 +POSTHOOK: query: drop table testdecimal6_2valid_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testdecimal6_2valid -POSTHOOK: Output: default@testdecimal6_2valid -PREHOOK: query: create table testdecimal3_2valid +POSTHOOK: Input: default@testdecimal6_2valid_n0 +POSTHOOK: Output: default@testdecimal6_2valid_n0 +PREHOOK: query: create table testdecimal3_2valid_n0 (col_INT32_UINT_8 decimal(3,2), col_INT32_UINT_16 decimal(3,2), col_INT32_UINT_32 decimal(3,2), col_INT64_UINT_64 decimal(3,2)) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testdecimal3_2valid -POSTHOOK: query: create table testdecimal3_2valid +PREHOOK: Output: default@testdecimal3_2valid_n0 +POSTHOOK: query: create table testdecimal3_2valid_n0 (col_INT32_UINT_8 decimal(3,2), col_INT32_UINT_16 decimal(3,2), col_INT32_UINT_32 decimal(3,2), col_INT64_UINT_64 decimal(3,2)) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testdecimal3_2valid -PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal3_2valid +POSTHOOK: Output: default@testdecimal3_2valid_n0 +PREHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal3_2valid_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@testdecimal3_2valid -POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal3_2valid +PREHOOK: Output: default@testdecimal3_2valid_n0 +POSTHOOK: query: load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal3_2valid_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@testdecimal3_2valid -PREHOOK: query: select * from testdecimal3_2valid +POSTHOOK: Output: default@testdecimal3_2valid_n0 +PREHOOK: query: select * from testdecimal3_2valid_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@testdecimal3_2valid +PREHOOK: Input: default@testdecimal3_2valid_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from testdecimal3_2valid +POSTHOOK: query: select * from testdecimal3_2valid_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@testdecimal3_2valid +POSTHOOK: Input: default@testdecimal3_2valid_n0 #### A masked pattern was here #### 0.00 0.00 0.00 0.00 NULL NULL NULL NULL @@ -1105,11 +1105,11 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: drop table testdecimal3_2valid +PREHOOK: query: drop table testdecimal3_2valid_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testdecimal3_2valid -PREHOOK: Output: default@testdecimal3_2valid -POSTHOOK: query: drop table testdecimal3_2valid +PREHOOK: Input: default@testdecimal3_2valid_n0 +PREHOOK: Output: default@testdecimal3_2valid_n0 +POSTHOOK: query: drop table testdecimal3_2valid_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testdecimal3_2valid -POSTHOOK: Output: default@testdecimal3_2valid +POSTHOOK: Input: default@testdecimal3_2valid_n0 +POSTHOOK: Output: default@testdecimal3_2valid_n0 diff --git a/ql/src/test/results/clientpositive/recursive_dir.q.out b/ql/src/test/results/clientpositive/recursive_dir.q.out index 070f55b6bd..cec135b38b 100644 --- a/ql/src/test/results/clientpositive/recursive_dir.q.out +++ b/ql/src/test/results/clientpositive/recursive_dir.q.out @@ -1,72 +1,72 @@ -PREHOOK: query: CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE fact_daily_n1(x int) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@fact_daily -POSTHOOK: query: CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@fact_daily_n1 +POSTHOOK: query: CREATE TABLE fact_daily_n1(x int) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@fact_daily -PREHOOK: query: CREATE TABLE fact_tz(x int) PARTITIONED BY (ds STRING, hr STRING) +POSTHOOK: Output: default@fact_daily_n1 +PREHOOK: query: CREATE TABLE fact_tz_n0(x int) PARTITIONED BY (ds STRING, hr STRING) #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@fact_tz -POSTHOOK: query: CREATE TABLE fact_tz(x int) PARTITIONED BY (ds STRING, hr STRING) +PREHOOK: Output: default@fact_tz_n0 +POSTHOOK: query: CREATE TABLE fact_tz_n0(x int) PARTITIONED BY (ds STRING, hr STRING) #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@fact_tz -PREHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +POSTHOOK: Output: default@fact_tz_n0 +PREHOOK: query: INSERT OVERWRITE TABLE fact_tz_n0 PARTITION (ds='1', hr='1') SELECT key+11 FROM src WHERE key=484 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@fact_tz@ds=1/hr=1 -POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +PREHOOK: Output: default@fact_tz_n0@ds=1/hr=1 +POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz_n0 PARTITION (ds='1', hr='1') SELECT key+11 FROM src WHERE key=484 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@fact_tz@ds=1/hr=1 -POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=1).x EXPRESSION [] -PREHOOK: query: ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE') +POSTHOOK: Output: default@fact_tz_n0@ds=1/hr=1 +POSTHOOK: Lineage: fact_tz_n0 PARTITION(ds=1,hr=1).x EXPRESSION [] +PREHOOK: query: ALTER TABLE fact_daily_n1 SET TBLPROPERTIES('EXTERNAL'='TRUE') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@fact_daily -PREHOOK: Output: default@fact_daily -POSTHOOK: query: ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE') +PREHOOK: Input: default@fact_daily_n1 +PREHOOK: Output: default@fact_daily_n1 +POSTHOOK: query: ALTER TABLE fact_daily_n1 SET TBLPROPERTIES('EXTERNAL'='TRUE') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@fact_daily -POSTHOOK: Output: default@fact_daily -PREHOOK: query: ALTER TABLE fact_daily ADD PARTITION (ds='1') +POSTHOOK: Input: default@fact_daily_n1 +POSTHOOK: Output: default@fact_daily_n1 +PREHOOK: query: ALTER TABLE fact_daily_n1 ADD PARTITION (ds='1') #### A masked pattern was here #### PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -PREHOOK: Output: default@fact_daily -POSTHOOK: query: ALTER TABLE fact_daily ADD PARTITION (ds='1') +PREHOOK: Output: default@fact_daily_n1 +POSTHOOK: query: ALTER TABLE fact_daily_n1 ADD PARTITION (ds='1') #### A masked pattern was here #### POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### -POSTHOOK: Output: default@fact_daily -POSTHOOK: Output: default@fact_daily@ds=1 -PREHOOK: query: SELECT * FROM fact_daily WHERE ds='1' +POSTHOOK: Output: default@fact_daily_n1 +POSTHOOK: Output: default@fact_daily_n1@ds=1 +PREHOOK: query: SELECT * FROM fact_daily_n1 WHERE ds='1' PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n1 +PREHOOK: Input: default@fact_daily_n1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM fact_daily WHERE ds='1' +POSTHOOK: query: SELECT * FROM fact_daily_n1 WHERE ds='1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n1 +POSTHOOK: Input: default@fact_daily_n1@ds=1 #### A masked pattern was here #### 495 1 -PREHOOK: query: SELECT count(1) FROM fact_daily WHERE ds='1' +PREHOOK: query: SELECT count(1) FROM fact_daily_n1 WHERE ds='1' PREHOOK: type: QUERY -PREHOOK: Input: default@fact_daily -PREHOOK: Input: default@fact_daily@ds=1 +PREHOOK: Input: default@fact_daily_n1 +PREHOOK: Input: default@fact_daily_n1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(1) FROM fact_daily WHERE ds='1' +POSTHOOK: query: SELECT count(1) FROM fact_daily_n1 WHERE ds='1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@fact_daily -POSTHOOK: Input: default@fact_daily@ds=1 +POSTHOOK: Input: default@fact_daily_n1 +POSTHOOK: Input: default@fact_daily_n1@ds=1 #### A masked pattern was here #### 1 diff --git a/ql/src/test/results/clientpositive/reduce_deduplicate_exclude_gby.q.out b/ql/src/test/results/clientpositive/reduce_deduplicate_exclude_gby.q.out index f48482e8b1..ef556b2784 100644 --- a/ql/src/test/results/clientpositive/reduce_deduplicate_exclude_gby.q.out +++ b/ql/src/test/results/clientpositive/reduce_deduplicate_exclude_gby.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: create table t1( key_int1 int, key_int2 int, key_string1 string, key_string2 string) +PREHOOK: query: create table t1_n12( key_int1_n12 int, key_int2 int, key_string1 string, key_string2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1( key_int1 int, key_int2 int, key_string1 string, key_string2 string) +PREHOOK: Output: default@t1_n12 +POSTHOOK: query: create table t1_n12( key_int1_n12 int, key_int2 int, key_string1 string, key_string2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: select Q1.key_int1, sum(Q1.key_int1) from (select * from t1 cluster by key_int1) Q1 group by Q1.key_int1 +POSTHOOK: Output: default@t1_n12 +PREHOOK: query: select Q1.key_int1_n12, sum(Q1.key_int1_n12) from (select * from t1_n12 cluster by key_int1_n12) Q1 group by Q1.key_int1_n12 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n12 #### A masked pattern was here #### -POSTHOOK: query: select Q1.key_int1, sum(Q1.key_int1) from (select * from t1 cluster by key_int1) Q1 group by Q1.key_int1 +POSTHOOK: query: select Q1.key_int1_n12, sum(Q1.key_int1_n12) from (select * from t1_n12 cluster by key_int1_n12) Q1 group by Q1.key_int1_n12 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n12 #### A masked pattern was here #### -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n12 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n12 +PREHOOK: Output: default@t1_n12 +POSTHOOK: query: drop table t1_n12 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n12 +POSTHOOK: Output: default@t1_n12 diff --git a/ql/src/test/results/clientpositive/remove_exprs_stats.q.out b/ql/src/test/results/clientpositive/remove_exprs_stats.q.out index dc663bb721..4aeea45e8c 100644 --- a/ql/src/test/results/clientpositive/remove_exprs_stats.q.out +++ b/ql/src/test/results/clientpositive/remove_exprs_stats.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table if not exists loc_staging ( +PREHOOK: query: create table if not exists loc_staging_n0 ( state string, locid int, zip bigint, @@ -6,8 +6,8 @@ PREHOOK: query: create table if not exists loc_staging ( ) row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_staging -POSTHOOK: query: create table if not exists loc_staging ( +PREHOOK: Output: default@loc_staging_n0 +POSTHOOK: query: create table if not exists loc_staging_n0 ( state string, locid int, zip bigint, @@ -15,56 +15,56 @@ POSTHOOK: query: create table if not exists loc_staging ( ) row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_staging -PREHOOK: query: create table loc_orc like loc_staging +POSTHOOK: Output: default@loc_staging_n0 +PREHOOK: query: create table loc_orc_n0 like loc_staging_n0 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_orc -POSTHOOK: query: create table loc_orc like loc_staging +PREHOOK: Output: default@loc_orc_n0 +POSTHOOK: query: create table loc_orc_n0 like loc_staging_n0 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_orc -PREHOOK: query: alter table loc_orc set fileformat orc +POSTHOOK: Output: default@loc_orc_n0 +PREHOOK: query: alter table loc_orc_n0 set fileformat orc PREHOOK: type: ALTERTABLE_FILEFORMAT -PREHOOK: Input: default@loc_orc -PREHOOK: Output: default@loc_orc -POSTHOOK: query: alter table loc_orc set fileformat orc +PREHOOK: Input: default@loc_orc_n0 +PREHOOK: Output: default@loc_orc_n0 +POSTHOOK: query: alter table loc_orc_n0 set fileformat orc POSTHOOK: type: ALTERTABLE_FILEFORMAT -POSTHOOK: Input: default@loc_orc -POSTHOOK: Output: default@loc_orc -PREHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging +POSTHOOK: Input: default@loc_orc_n0 +POSTHOOK: Output: default@loc_orc_n0 +PREHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@loc_staging -POSTHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging +PREHOOK: Output: default@loc_staging_n0 +POSTHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@loc_staging -PREHOOK: query: insert overwrite table loc_orc select * from loc_staging +POSTHOOK: Output: default@loc_staging_n0 +PREHOOK: query: insert overwrite table loc_orc_n0 select * from loc_staging_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@loc_staging -PREHOOK: Output: default@loc_orc -POSTHOOK: query: insert overwrite table loc_orc select * from loc_staging +PREHOOK: Input: default@loc_staging_n0 +PREHOOK: Output: default@loc_orc_n0 +POSTHOOK: query: insert overwrite table loc_orc_n0 select * from loc_staging_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@loc_staging -POSTHOOK: Output: default@loc_orc -POSTHOOK: Lineage: loc_orc.locid SIMPLE [(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc.state SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc.year SIMPLE [(loc_staging)loc_staging.FieldSchema(name:year, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc.zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, comment:null), ] -PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year +POSTHOOK: Input: default@loc_staging_n0 +POSTHOOK: Output: default@loc_orc_n0 +POSTHOOK: Lineage: loc_orc_n0.locid SIMPLE [(loc_staging_n0)loc_staging_n0.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_n0.state SIMPLE [(loc_staging_n0)loc_staging_n0.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_n0.year SIMPLE [(loc_staging_n0)loc_staging_n0.FieldSchema(name:year, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_n0.zip SIMPLE [(loc_staging_n0)loc_staging_n0.FieldSchema(name:zip, type:bigint, comment:null), ] +PREHOOK: query: analyze table loc_orc_n0 compute statistics for columns state,locid,zip,year PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc -PREHOOK: Output: default@loc_orc +PREHOOK: Input: default@loc_orc_n0 +PREHOOK: Output: default@loc_orc_n0 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year +POSTHOOK: query: analyze table loc_orc_n0 compute statistics for columns state,locid,zip,year POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc -POSTHOOK: Output: default@loc_orc +POSTHOOK: Input: default@loc_orc_n0 +POSTHOOK: Output: default@loc_orc_n0 #### A masked pattern was here #### -PREHOOK: query: explain select * from loc_orc where locid < 30 +PREHOOK: query: explain select * from loc_orc_n0 where locid < 30 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid < 30 +POSTHOOK: query: explain select * from loc_orc_n0 where locid < 30 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -75,7 +75,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int) @@ -83,9 +83,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select * from loc_orc where locid > 30 +PREHOOK: query: explain select * from loc_orc_n0 where locid > 30 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid > 30 +POSTHOOK: query: explain select * from loc_orc_n0 where locid > 30 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -96,7 +96,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: false (type: boolean) @@ -120,9 +120,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from loc_orc where locid <= 30 +PREHOOK: query: explain select * from loc_orc_n0 where locid <= 30 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid <= 30 +POSTHOOK: query: explain select * from loc_orc_n0 where locid <= 30 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -133,7 +133,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int) @@ -141,9 +141,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select * from loc_orc where locid >= 30 +PREHOOK: query: explain select * from loc_orc_n0 where locid >= 30 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid >= 30 +POSTHOOK: query: explain select * from loc_orc_n0 where locid >= 30 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -154,7 +154,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: false (type: boolean) @@ -178,9 +178,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from loc_orc where locid < 6 +PREHOOK: query: explain select * from loc_orc_n0 where locid < 6 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid < 6 +POSTHOOK: query: explain select * from loc_orc_n0 where locid < 6 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -191,7 +191,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid < 6) (type: boolean) @@ -215,9 +215,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from loc_orc where locid > 6 +PREHOOK: query: explain select * from loc_orc_n0 where locid > 6 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid > 6 +POSTHOOK: query: explain select * from loc_orc_n0 where locid > 6 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -228,7 +228,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: false (type: boolean) @@ -252,9 +252,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from loc_orc where locid <= 6 +PREHOOK: query: explain select * from loc_orc_n0 where locid <= 6 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid <= 6 +POSTHOOK: query: explain select * from loc_orc_n0 where locid <= 6 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -265,7 +265,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int) @@ -273,9 +273,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select * from loc_orc where locid >= 6 +PREHOOK: query: explain select * from loc_orc_n0 where locid >= 6 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid >= 6 +POSTHOOK: query: explain select * from loc_orc_n0 where locid >= 6 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -286,7 +286,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid >= 6) (type: boolean) @@ -310,9 +310,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from loc_orc where locid < 1 +PREHOOK: query: explain select * from loc_orc_n0 where locid < 1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid < 1 +POSTHOOK: query: explain select * from loc_orc_n0 where locid < 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -323,7 +323,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: false (type: boolean) @@ -347,9 +347,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from loc_orc where locid > 1 +PREHOOK: query: explain select * from loc_orc_n0 where locid > 1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid > 1 +POSTHOOK: query: explain select * from loc_orc_n0 where locid > 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -360,7 +360,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid > 1) (type: boolean) @@ -384,9 +384,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from loc_orc where locid <= 1 +PREHOOK: query: explain select * from loc_orc_n0 where locid <= 1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid <= 1 +POSTHOOK: query: explain select * from loc_orc_n0 where locid <= 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -397,7 +397,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid <= 1) (type: boolean) @@ -421,9 +421,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from loc_orc where locid >= 1 +PREHOOK: query: explain select * from loc_orc_n0 where locid >= 1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid >= 1 +POSTHOOK: query: explain select * from loc_orc_n0 where locid >= 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -434,7 +434,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int) @@ -442,9 +442,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select * from loc_orc where locid IN (-4,5,30,40) +PREHOOK: query: explain select * from loc_orc_n0 where locid IN (-4,5,30,40) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid IN (-4,5,30,40) +POSTHOOK: query: explain select * from loc_orc_n0 where locid IN (-4,5,30,40) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -455,7 +455,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid) IN (5) (type: boolean) @@ -479,9 +479,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from loc_orc where locid IN (5,2,3) +PREHOOK: query: explain select * from loc_orc_n0 where locid IN (5,2,3) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid IN (5,2,3) +POSTHOOK: query: explain select * from loc_orc_n0 where locid IN (5,2,3) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -492,7 +492,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid) IN (5, 2, 3) (type: boolean) @@ -516,9 +516,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from loc_orc where locid IN (1,6,9) +PREHOOK: query: explain select * from loc_orc_n0 where locid IN (1,6,9) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid IN (1,6,9) +POSTHOOK: query: explain select * from loc_orc_n0 where locid IN (1,6,9) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -529,7 +529,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid) IN (1, 6) (type: boolean) @@ -553,9 +553,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from loc_orc where locid IN (40,30) +PREHOOK: query: explain select * from loc_orc_n0 where locid IN (40,30) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid IN (40,30) +POSTHOOK: query: explain select * from loc_orc_n0 where locid IN (40,30) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -566,7 +566,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: false (type: boolean) @@ -590,36 +590,36 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: create table t ( s string) +PREHOOK: query: create table t_n7 ( s string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t ( s string) +PREHOOK: Output: default@t_n7 +POSTHOOK: query: create table t_n7 ( s string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values (null),(null) +POSTHOOK: Output: default@t_n7 +PREHOOK: query: insert into t_n7 values (null),(null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (null),(null) +PREHOOK: Output: default@t_n7 +POSTHOOK: query: insert into t_n7 values (null),(null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.s EXPRESSION [] -PREHOOK: query: analyze table t compute statistics for columns s +POSTHOOK: Output: default@t_n7 +POSTHOOK: Lineage: t_n7.s EXPRESSION [] +PREHOOK: query: analyze table t_n7 compute statistics for columns s PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@t -PREHOOK: Output: default@t +PREHOOK: Input: default@t_n7 +PREHOOK: Output: default@t_n7 #### A masked pattern was here #### -POSTHOOK: query: analyze table t compute statistics for columns s +POSTHOOK: query: analyze table t_n7 compute statistics for columns s POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@t -POSTHOOK: Output: default@t +POSTHOOK: Input: default@t_n7 +POSTHOOK: Output: default@t_n7 #### A masked pattern was here #### -PREHOOK: query: explain select * from t where s is null +PREHOOK: query: explain select * from t_n7 where s is null PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t where s is null +POSTHOOK: query: explain select * from t_n7 where s is null POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -630,7 +630,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: t + alias: t_n7 Statistics: Num rows: 2 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: s (type: string) @@ -638,9 +638,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select * from loc_orc where locid is not null +PREHOOK: query: explain select * from loc_orc_n0 where locid is not null PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid is not null +POSTHOOK: query: explain select * from loc_orc_n0 where locid is not null POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -651,7 +651,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int) @@ -659,9 +659,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: explain select * from t where s is not null +PREHOOK: query: explain select * from t_n7 where s is not null PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t where s is not null +POSTHOOK: query: explain select * from t_n7 where s is not null POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -672,7 +672,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t + alias: t_n7 Statistics: Num rows: 2 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: false (type: boolean) @@ -696,9 +696,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from loc_orc where locid is null +PREHOOK: query: explain select * from loc_orc_n0 where locid is null PREHOOK: type: QUERY -POSTHOOK: query: explain select * from loc_orc where locid is null +POSTHOOK: query: explain select * from loc_orc_n0 where locid is null POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -709,7 +709,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: loc_orc + alias: loc_orc_n0 Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: false (type: boolean) @@ -733,28 +733,28 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: insert into t values ('val1') +PREHOOK: query: insert into t_n7 values ('val1') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values ('val1') +PREHOOK: Output: default@t_n7 +POSTHOOK: query: insert into t_n7 values ('val1') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.s SCRIPT [] -PREHOOK: query: analyze table t compute statistics for columns s +POSTHOOK: Output: default@t_n7 +POSTHOOK: Lineage: t_n7.s SCRIPT [] +PREHOOK: query: analyze table t_n7 compute statistics for columns s PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@t -PREHOOK: Output: default@t +PREHOOK: Input: default@t_n7 +PREHOOK: Output: default@t_n7 #### A masked pattern was here #### -POSTHOOK: query: analyze table t compute statistics for columns s +POSTHOOK: query: analyze table t_n7 compute statistics for columns s POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@t -POSTHOOK: Output: default@t +POSTHOOK: Input: default@t_n7 +POSTHOOK: Output: default@t_n7 #### A masked pattern was here #### -PREHOOK: query: explain select * from t where s is not null +PREHOOK: query: explain select * from t_n7 where s is not null PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t where s is not null +POSTHOOK: query: explain select * from t_n7 where s is not null POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -765,7 +765,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t + alias: t_n7 Statistics: Num rows: 3 Data size: 170 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: s is not null (type: boolean) @@ -789,9 +789,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from t where s is null +PREHOOK: query: explain select * from t_n7 where s is null PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t where s is null +POSTHOOK: query: explain select * from t_n7 where s is null POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -802,7 +802,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t + alias: t_n7 Statistics: Num rows: 3 Data size: 170 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: s is null (type: boolean) diff --git a/ql/src/test/results/clientpositive/rename_partition_location.q.out b/ql/src/test/results/clientpositive/rename_partition_location.q.out index 4fbc5de122..c7730efef2 100644 --- a/ql/src/test/results/clientpositive/rename_partition_location.q.out +++ b/ql/src/test/results/clientpositive/rename_partition_location.q.out @@ -1,49 +1,49 @@ -PREHOOK: query: CREATE TABLE rename_partition_table (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE rename_partition_table_n0 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default -PREHOOK: Output: default@rename_partition_table -POSTHOOK: query: CREATE TABLE rename_partition_table (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@rename_partition_table_n0 +POSTHOOK: query: CREATE TABLE rename_partition_table_n0 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default -POSTHOOK: Output: default@rename_partition_table -PREHOOK: query: INSERT OVERWRITE TABLE rename_partition_table PARTITION (part = '1') SELECT * FROM src +POSTHOOK: Output: default@rename_partition_table_n0 +PREHOOK: query: INSERT OVERWRITE TABLE rename_partition_table_n0 PARTITION (part = '1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@rename_partition_table@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE rename_partition_table PARTITION (part = '1') SELECT * FROM src +PREHOOK: Output: default@rename_partition_table_n0@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE rename_partition_table_n0 PARTITION (part = '1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@rename_partition_table@part=1 -POSTHOOK: Lineage: rename_partition_table PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: rename_partition_table PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@rename_partition_table_n0@part=1 +POSTHOOK: Lineage: rename_partition_table_n0 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: rename_partition_table_n0 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] #### A masked pattern was here #### PREHOOK: type: ALTERTABLE_LOCATION -PREHOOK: Input: default@rename_partition_table -PREHOOK: Output: default@rename_partition_table +PREHOOK: Input: default@rename_partition_table_n0 +PREHOOK: Output: default@rename_partition_table_n0 #### A masked pattern was here #### POSTHOOK: type: ALTERTABLE_LOCATION -POSTHOOK: Input: default@rename_partition_table -POSTHOOK: Output: default@rename_partition_table +POSTHOOK: Input: default@rename_partition_table_n0 +POSTHOOK: Output: default@rename_partition_table_n0 #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE rename_partition_table PARTITION (part = '1') RENAME TO PARTITION (part = '2') +PREHOOK: query: ALTER TABLE rename_partition_table_n0 PARTITION (part = '1') RENAME TO PARTITION (part = '2') PREHOOK: type: ALTERTABLE_RENAMEPART -PREHOOK: Input: default@rename_partition_table -PREHOOK: Output: default@rename_partition_table@part=1 -POSTHOOK: query: ALTER TABLE rename_partition_table PARTITION (part = '1') RENAME TO PARTITION (part = '2') +PREHOOK: Input: default@rename_partition_table_n0 +PREHOOK: Output: default@rename_partition_table_n0@part=1 +POSTHOOK: query: ALTER TABLE rename_partition_table_n0 PARTITION (part = '1') RENAME TO PARTITION (part = '2') POSTHOOK: type: ALTERTABLE_RENAMEPART -POSTHOOK: Input: default@rename_partition_table -POSTHOOK: Input: default@rename_partition_table@part=1 -POSTHOOK: Output: default@rename_partition_table@part=1 -POSTHOOK: Output: default@rename_partition_table@part=2 -PREHOOK: query: SELECT count(*) FROM rename_partition_table where part = '2' +POSTHOOK: Input: default@rename_partition_table_n0 +POSTHOOK: Input: default@rename_partition_table_n0@part=1 +POSTHOOK: Output: default@rename_partition_table_n0@part=1 +POSTHOOK: Output: default@rename_partition_table_n0@part=2 +PREHOOK: query: SELECT count(*) FROM rename_partition_table_n0 where part = '2' PREHOOK: type: QUERY -PREHOOK: Input: default@rename_partition_table +PREHOOK: Input: default@rename_partition_table_n0 #### A masked pattern was here #### 500 PREHOOK: query: CREATE TABLE rename_partition_table_2 (key STRING, value STRING) PARTITIONED BY (part STRING) @@ -65,10 +65,10 @@ PREHOOK: type: QUERY PREHOOK: Input: default@rename_partition_table_2 #### A masked pattern was here #### 500 -PREHOOK: query: DROP TABLE rename_partition_table +PREHOOK: query: DROP TABLE rename_partition_table_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@rename_partition_table -PREHOOK: Output: default@rename_partition_table +PREHOOK: Input: default@rename_partition_table_n0 +PREHOOK: Output: default@rename_partition_table_n0 PREHOOK: query: DROP TABLE rename_partition_table_2 PREHOOK: type: DROPTABLE PREHOOK: Input: default@rename_partition_table_2 diff --git a/ql/src/test/results/clientpositive/repair.q.out b/ql/src/test/results/clientpositive/repair.q.out index 581afe8000..a8dbda92fe 100644 --- a/ql/src/test/results/clientpositive/repair.q.out +++ b/ql/src/test/results/clientpositive/repair.q.out @@ -1,47 +1,44 @@ -PREHOOK: query: DROP TABLE IF EXISTS repairtable +PREHOOK: query: DROP TABLE IF EXISTS repairtable_n4 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS repairtable +POSTHOOK: query: DROP TABLE IF EXISTS repairtable_n4 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) +PREHOOK: query: CREATE TABLE repairtable_n4(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@repairtable -POSTHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) +PREHOOK: Output: default@repairtable_n4 +POSTHOOK: query: CREATE TABLE repairtable_n4(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@repairtable -PREHOOK: query: MSCK TABLE repairtable +POSTHOOK: Output: default@repairtable_n4 +PREHOOK: query: MSCK TABLE repairtable_n4 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE repairtable +PREHOOK: Output: default@repairtable_n4 +POSTHOOK: query: MSCK TABLE repairtable_n4 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -PREHOOK: query: MSCK TABLE default.repairtable +POSTHOOK: Output: default@repairtable_n4 +PREHOOK: query: MSCK TABLE default.repairtable_n4 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE default.repairtable +PREHOOK: Output: default@repairtable_n4 +POSTHOOK: query: MSCK TABLE default.repairtable_n4 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=a/p2=a repairtable:p1=b/p2=a -PREHOOK: query: MSCK REPAIR TABLE default.repairtable +POSTHOOK: Output: default@repairtable_n4 +PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n4 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK REPAIR TABLE default.repairtable +PREHOOK: Output: default@repairtable_n4 +POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n4 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -Partitions not in metastore: repairtable:p1=a/p2=a repairtable:p1=b/p2=a -#### A masked pattern was here #### -PREHOOK: query: MSCK TABLE repairtable +POSTHOOK: Output: default@repairtable_n4 +PREHOOK: query: MSCK TABLE repairtable_n4 PREHOOK: type: MSCK -PREHOOK: Output: default@repairtable -POSTHOOK: query: MSCK TABLE repairtable +PREHOOK: Output: default@repairtable_n4 +POSTHOOK: query: MSCK TABLE repairtable_n4 POSTHOOK: type: MSCK -POSTHOOK: Output: default@repairtable -PREHOOK: query: DROP TABLE default.repairtable +POSTHOOK: Output: default@repairtable_n4 +PREHOOK: query: DROP TABLE default.repairtable_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@repairtable -PREHOOK: Output: default@repairtable -POSTHOOK: query: DROP TABLE default.repairtable +PREHOOK: Input: default@repairtable_n4 +PREHOOK: Output: default@repairtable_n4 +POSTHOOK: query: DROP TABLE default.repairtable_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@repairtable -POSTHOOK: Output: default@repairtable +POSTHOOK: Input: default@repairtable_n4 +POSTHOOK: Output: default@repairtable_n4 diff --git a/ql/src/test/results/clientpositive/runtime_skewjoin_mapjoin_spark.q.out b/ql/src/test/results/clientpositive/runtime_skewjoin_mapjoin_spark.q.out index 04cdffcaf2..9555ae35c2 100644 --- a/ql/src/test/results/clientpositive/runtime_skewjoin_mapjoin_spark.q.out +++ b/ql/src/test/results/clientpositive/runtime_skewjoin_mapjoin_spark.q.out @@ -1,31 +1,31 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n61(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n61 +POSTHOOK: query: CREATE TABLE T1_n61(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n61 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n61 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n61 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n61 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1_n61 PREHOOK: query: EXPLAIN SELECT COUNT(*) FROM (SELECT src1.key,src1.value FROM src src1 JOIN src src2 ON src1.key=src2.key) a JOIN - (SELECT src.key,src.value FROM src JOIN T1 ON src.key=T1.key) b + (SELECT src.key,src.value FROM src JOIN T1_n61 ON src.key=T1_n61.key) b ON a.key=b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT COUNT(*) FROM (SELECT src1.key,src1.value FROM src src1 JOIN src src2 ON src1.key=src2.key) a JOIN - (SELECT src.key,src.value FROM src JOIN T1 ON src.key=T1.key) b + (SELECT src.key,src.value FROM src JOIN T1_n61 ON src.key=T1_n61.key) b ON a.key=b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -617,13 +617,13 @@ STAGE PLANS: Stage: Stage-37 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:$hdt$_2:t1 + $hdt$_1:$hdt$_2:t1_n61 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:$hdt$_2:t1 + $hdt$_1:$hdt$_2:t1_n61 TableScan - alias: t1 + alias: t1_n61 Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -714,18 +714,18 @@ STAGE PLANS: PREHOOK: query: SELECT COUNT(*) FROM (SELECT src1.key,src1.value FROM src src1 JOIN src src2 ON src1.key=src2.key) a JOIN - (SELECT src.key,src.value FROM src JOIN T1 ON src.key=T1.key) b + (SELECT src.key,src.value FROM src JOIN T1_n61 ON src.key=T1_n61.key) b ON a.key=b.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n61 #### A masked pattern was here #### POSTHOOK: query: SELECT COUNT(*) FROM (SELECT src1.key,src1.value FROM src src1 JOIN src src2 ON src1.key=src2.key) a JOIN - (SELECT src.key,src.value FROM src JOIN T1 ON src.key=T1.key) b + (SELECT src.key,src.value FROM src JOIN T1_n61 ON src.key=T1_n61.key) b ON a.key=b.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n61 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/sample1.q.out b/ql/src/test/results/clientpositive/sample1.q.out index 09b9ac74f3..adaf3cf387 100644 --- a/ql/src/test/results/clientpositive/sample1.q.out +++ b/ql/src/test/results/clientpositive/sample1.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING, dt STRING, hr STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n74(key INT, value STRING, dt STRING, hr STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING, dt STRING, hr STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n74 +POSTHOOK: query: CREATE TABLE dest1_n74(key INT, value STRING, dt STRING, hr STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n74 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n74 SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s WHERE s.ds='2008-04-08' and s.hr='11' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n74 SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s WHERE s.ds='2008-04-08' and s.hr='11' POSTHOOK: type: QUERY @@ -61,17 +61,17 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n74 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.ddl struct dest1_n74 { i32 key, string value, string dt, string hr} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n74 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -201,17 +201,17 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n74 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.ddl struct dest1_n74 { i32 key, string value, string dt, string hr} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n74 Stage: Stage-2 Stats Work @@ -220,7 +220,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value, dt, hr Column Types: int, string, string, string - Table: default.dest1 + Table: default.dest1_n74 Is Table Level Stats: true Stage: Stage-3 @@ -245,17 +245,17 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n74 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.ddl struct dest1_n74 { i32 key, string value, string dt, string hr} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n74 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -276,11 +276,11 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n74 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.ddl struct dest1_n74 { i32 key, string value, string dt, string hr} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -298,18 +298,18 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n74 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.ddl struct dest1_n74 { i32 key, string value, string dt, string hr} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n74 + name: default.dest1_n74 Truncated Path -> Alias: #### A masked pattern was here #### @@ -335,17 +335,17 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n74 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.ddl struct dest1_n74 { i32 key, string value, string dt, string hr} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n74 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -366,11 +366,11 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n74 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.ddl struct dest1_n74 { i32 key, string value, string dt, string hr} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -388,18 +388,18 @@ STAGE PLANS: columns.comments columns.types int:string:string:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n74 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value, string dt, string hr} + serialization.ddl struct dest1_n74 { i32 key, string value, string dt, string hr} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n74 + name: default.dest1_n74 Truncated Path -> Alias: #### A masked pattern was here #### @@ -409,31 +409,31 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n74 SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s WHERE s.ds='2008-04-08' and s.hr='11' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* +PREHOOK: Output: default@dest1_n74 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n74 SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s WHERE s.ds='2008-04-08' and s.hr='11' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.dt SIMPLE [(srcpart)s.FieldSchema(name:ds, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.hr SIMPLE [(srcpart)s.FieldSchema(name:hr, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)s.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(srcpart)s.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n74 +POSTHOOK: Lineage: dest1_n74.dt SIMPLE [(srcpart)s.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n74.hr SIMPLE [(srcpart)s.FieldSchema(name:hr, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n74.key EXPRESSION [(srcpart)s.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n74.value SIMPLE [(srcpart)s.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n74.* FROM dest1_n74 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n74 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n74.* FROM dest1_n74 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n74 #### A masked pattern was here #### 238 val_238 2008-04-08 11 86 val_86 2008-04-08 11 diff --git a/ql/src/test/results/clientpositive/sample2.q.out b/ql/src/test/results/clientpositive/sample2.q.out index 53c3df202e..9d7ef752ac 100644 --- a/ql/src/test/results/clientpositive/sample2.q.out +++ b/ql/src/test/results/clientpositive/sample2.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n26(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n26 +POSTHOOK: query: CREATE TABLE dest1_n26(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n26 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n26 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n26 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -60,17 +60,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n26 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n26 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n26 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -201,17 +201,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n26 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n26 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n26 Stage: Stage-2 Stats Work @@ -220,7 +220,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n26 Is Table Level Stats: true Stage: Stage-3 @@ -245,17 +245,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n26 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n26 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n26 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -276,11 +276,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n26 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n26 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -298,18 +298,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n26 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n26 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n26 + name: default.dest1_n26 Truncated Path -> Alias: #### A masked pattern was here #### @@ -335,17 +335,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n26 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n26 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n26 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -366,11 +366,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n26 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n26 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -388,18 +388,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n26 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n26 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n26 + name: default.dest1_n26 Truncated Path -> Alias: #### A masked pattern was here #### @@ -409,209 +409,99 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n26 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s PREHOOK: type: QUERY PREHOOK: Input: default@srcbucket -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* +PREHOOK: Output: default@dest1_n26 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n26 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SIMPLE [(srcbucket)s.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(srcbucket)s.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n26 +POSTHOOK: Lineage: dest1_n26.key SIMPLE [(srcbucket)s.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest1_n26.value SIMPLE [(srcbucket)s.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT dest1_n26.* FROM dest1_n26 order by key, value PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n26 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n26.* FROM dest1_n26 order by key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n26 #### A masked pattern was here #### 2 val_2 2 val_3 -3 val_4 6 val_7 6 val_7 10 val_10 10 val_11 -17 val_17 -19 val_19 -19 val_20 20 val_20 20 val_21 20 val_21 -21 val_22 -21 val_22 -21 val_22 -21 val_22 -27 val_27 -29 val_30 -29 val_30 30 val_30 30 val_31 -31 val_32 40 val_41 40 val_41 -41 val_41 -43 val_43 46 val_47 48 val_49 48 val_49 -51 val_51 -51 val_51 -51 val_52 54 val_54 -57 val_57 58 val_58 58 val_58 58 val_59 58 val_59 -59 val_60 60 val_61 -61 val_62 64 val_64 -65 val_65 -65 val_66 -65 val_66 -67 val_67 -67 val_67 68 val_69 -69 val_69 -69 val_70 70 val_70 70 val_70 70 val_70 70 val_71 -77 val_77 -77 val_78 -77 val_78 80 val_80 80 val_81 -83 val_83 -83 val_83 84 val_84 84 val_84 -85 val_85 -85 val_86 86 val_86 86 val_87 -89 val_90 -89 val_90 -89 val_90 90 val_90 90 val_90 90 val_90 -91 val_92 -93 val_94 -93 val_94 -93 val_94 96 val_96 -97 val_97 -97 val_97 -97 val_98 -97 val_98 98 val_98 98 val_98 -99 val_100 -101 val_102 -105 val_105 -105 val_106 -105 val_106 106 val_107 110 val_111 -113 val_113 -113 val_113 116 val_116 116 val_117 -117 val_118 -117 val_118 -119 val_119 -119 val_119 -119 val_119 -119 val_120 -119 val_120 -119 val_120 -121 val_122 -121 val_122 -123 val_124 -123 val_124 126 val_126 126 val_127 126 val_127 132 val_133 132 val_133 -133 val_133 -133 val_134 134 val_134 134 val_134 134 val_135 -135 val_136 -135 val_136 -135 val_136 -137 val_137 -137 val_137 -137 val_138 140 val_141 146 val_146 146 val_146 -149 val_149 -149 val_149 -149 val_150 -153 val_153 -153 val_154 -153 val_154 156 val_156 156 val_157 156 val_157 -157 val_157 -157 val_158 -157 val_158 158 val_158 162 val_162 162 val_163 -163 val_163 164 val_164 164 val_164 164 val_165 164 val_165 -165 val_165 -165 val_165 -165 val_166 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -177 val_177 -177 val_178 -177 val_178 178 val_178 178 val_179 178 val_179 -181 val_181 182 val_183 184 val_185 -185 val_186 -187 val_187 -187 val_187 -187 val_187 190 val_190 -191 val_191 -191 val_191 -191 val_192 -195 val_195 -195 val_195 -197 val_197 -197 val_197 -197 val_198 -199 val_199 -199 val_199 -199 val_199 -199 val_200 -201 val_201 202 val_202 206 val_207 206 val_207 @@ -621,57 +511,34 @@ POSTHOOK: Input: default@dest1 208 val_208 212 val_213 214 val_214 -215 val_216 216 val_216 216 val_216 216 val_217 -221 val_221 -221 val_221 226 val_226 226 val_227 226 val_227 226 val_227 226 val_227 -229 val_229 -229 val_229 -231 val_232 -233 val_233 -233 val_233 -237 val_237 -237 val_237 238 val_238 238 val_238 238 val_239 -239 val_239 -239 val_239 -239 val_240 -239 val_240 240 val_241 -243 val_244 -243 val_244 244 val_244 244 val_245 244 val_245 244 val_245 248 val_248 248 val_249 -249 val_249 -249 val_250 -249 val_250 252 val_252 252 val_253 254 val_255 -255 val_255 -255 val_255 256 val_256 256 val_256 256 val_257 260 val_260 260 val_261 260 val_261 -261 val_262 266 val_266 -271 val_272 272 val_272 272 val_272 272 val_273 @@ -681,20 +548,10 @@ POSTHOOK: Input: default@dest1 284 val_285 286 val_286 286 val_287 -287 val_287 -287 val_288 -287 val_288 -289 val_289 -289 val_290 -291 val_291 -291 val_292 -291 val_292 292 val_292 292 val_293 292 val_293 304 val_305 -307 val_307 -307 val_307 308 val_308 308 val_309 308 val_309 @@ -702,81 +559,37 @@ POSTHOOK: Input: default@dest1 310 val_311 310 val_311 310 val_311 -311 val_311 -311 val_311 -311 val_311 -313 val_314 -315 val_315 316 val_316 316 val_316 316 val_316 -317 val_317 -317 val_317 -317 val_318 324 val_325 -325 val_325 -325 val_325 326 val_327 -327 val_327 -327 val_327 -327 val_327 332 val_332 334 val_335 336 val_336 336 val_337 -337 val_338 338 val_338 338 val_339 -339 val_339 -341 val_341 -341 val_342 -341 val_342 -341 val_342 342 val_342 342 val_342 342 val_343 -343 val_344 344 val_344 344 val_344 344 val_345 -347 val_348 -347 val_348 348 val_348 348 val_348 348 val_348 348 val_348 348 val_348 348 val_349 -349 val_350 -349 val_350 -349 val_350 -349 val_350 -351 val_351 -351 val_352 -351 val_352 352 val_353 352 val_353 -353 val_353 -353 val_353 -353 val_354 -355 val_356 -355 val_356 360 val_360 360 val_361 362 val_362 364 val_364 364 val_365 -365 val_365 368 val_368 -369 val_369 -369 val_369 -369 val_369 -369 val_370 -371 val_372 -371 val_372 -371 val_372 -371 val_372 -377 val_377 378 val_378 378 val_379 384 val_384 @@ -789,24 +602,13 @@ POSTHOOK: Input: default@dest1 386 val_387 386 val_387 388 val_389 -391 val_392 -391 val_392 392 val_392 392 val_393 392 val_393 -393 val_393 -393 val_394 -393 val_394 394 val_394 396 val_396 396 val_396 396 val_396 -397 val_397 -397 val_397 -399 val_399 -399 val_399 -399 val_400 -399 val_400 402 val_402 402 val_403 402 val_403 @@ -816,36 +618,13 @@ POSTHOOK: Input: default@dest1 404 val_405 404 val_405 404 val_405 -407 val_407 -407 val_408 -407 val_408 -407 val_408 408 val_409 408 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_410 -409 val_410 410 val_411 -411 val_411 -411 val_412 414 val_414 414 val_414 414 val_415 -417 val_417 -417 val_417 -417 val_417 -419 val_419 -421 val_421 -421 val_422 -421 val_422 -423 val_424 -425 val_426 426 val_427 -427 val_427 -427 val_428 -427 val_428 428 val_429 430 val_430 430 val_430 @@ -853,24 +632,13 @@ POSTHOOK: Input: default@dest1 430 val_431 432 val_432 432 val_433 -435 val_435 -435 val_436 -437 val_437 -437 val_438 440 val_441 440 val_441 -443 val_443 -443 val_444 -443 val_444 -443 val_444 444 val_444 446 val_446 446 val_447 446 val_447 -449 val_449 452 val_452 -453 val_453 -453 val_454 454 val_454 454 val_454 454 val_454 @@ -878,19 +646,10 @@ POSTHOOK: Input: default@dest1 454 val_455 458 val_458 458 val_458 -459 val_459 -459 val_459 -459 val_460 -463 val_463 -463 val_463 -463 val_464 466 val_466 466 val_466 466 val_466 -467 val_467 -467 val_468 472 val_472 -473 val_474 474 val_475 474 val_475 476 val_477 @@ -899,7 +658,6 @@ POSTHOOK: Input: default@dest1 478 val_478 478 val_479 478 val_479 -479 val_479 480 val_480 480 val_480 480 val_480 @@ -907,23 +665,11 @@ POSTHOOK: Input: default@dest1 480 val_481 482 val_482 482 val_483 -483 val_483 484 val_484 484 val_485 -485 val_485 -485 val_486 -485 val_486 488 val_489 490 val_490 490 val_491 -491 val_491 -491 val_492 -491 val_492 -495 val_495 -495 val_496 -497 val_497 -497 val_498 -497 val_498 498 val_498 498 val_498 498 val_498 diff --git a/ql/src/test/results/clientpositive/sample4.q.out b/ql/src/test/results/clientpositive/sample4.q.out index 46ec309452..7786184c08 100644 --- a/ql/src/test/results/clientpositive/sample4.q.out +++ b/ql/src/test/results/clientpositive/sample4.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n98(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n98 +POSTHOOK: query: CREATE TABLE dest1_n98(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n98 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n98 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n98 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -60,17 +60,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n98 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n98 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n98 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -201,17 +201,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n98 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n98 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n98 Stage: Stage-2 Stats Work @@ -220,7 +220,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n98 Is Table Level Stats: true Stage: Stage-3 @@ -245,17 +245,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n98 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n98 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n98 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -276,11 +276,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n98 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n98 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -298,18 +298,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n98 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n98 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n98 + name: default.dest1_n98 Truncated Path -> Alias: #### A masked pattern was here #### @@ -335,17 +335,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n98 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n98 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n98 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -366,11 +366,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n98 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n98 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -388,18 +388,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n98 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n98 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n98 + name: default.dest1_n98 Truncated Path -> Alias: #### A masked pattern was here #### @@ -409,209 +409,99 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n98 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s PREHOOK: type: QUERY PREHOOK: Input: default@srcbucket -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* +PREHOOK: Output: default@dest1_n98 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n98 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SIMPLE [(srcbucket)s.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(srcbucket)s.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n98 +POSTHOOK: Lineage: dest1_n98.key SIMPLE [(srcbucket)s.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest1_n98.value SIMPLE [(srcbucket)s.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT dest1_n98.* FROM dest1_n98 order by key, value PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n98 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n98.* FROM dest1_n98 order by key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n98 #### A masked pattern was here #### 2 val_2 2 val_3 -3 val_4 6 val_7 6 val_7 10 val_10 10 val_11 -17 val_17 -19 val_19 -19 val_20 20 val_20 20 val_21 20 val_21 -21 val_22 -21 val_22 -21 val_22 -21 val_22 -27 val_27 -29 val_30 -29 val_30 30 val_30 30 val_31 -31 val_32 40 val_41 40 val_41 -41 val_41 -43 val_43 46 val_47 48 val_49 48 val_49 -51 val_51 -51 val_51 -51 val_52 54 val_54 -57 val_57 58 val_58 58 val_58 58 val_59 58 val_59 -59 val_60 60 val_61 -61 val_62 64 val_64 -65 val_65 -65 val_66 -65 val_66 -67 val_67 -67 val_67 68 val_69 -69 val_69 -69 val_70 70 val_70 70 val_70 70 val_70 70 val_71 -77 val_77 -77 val_78 -77 val_78 80 val_80 80 val_81 -83 val_83 -83 val_83 84 val_84 84 val_84 -85 val_85 -85 val_86 86 val_86 86 val_87 -89 val_90 -89 val_90 -89 val_90 90 val_90 90 val_90 90 val_90 -91 val_92 -93 val_94 -93 val_94 -93 val_94 96 val_96 -97 val_97 -97 val_97 -97 val_98 -97 val_98 98 val_98 98 val_98 -99 val_100 -101 val_102 -105 val_105 -105 val_106 -105 val_106 106 val_107 110 val_111 -113 val_113 -113 val_113 116 val_116 116 val_117 -117 val_118 -117 val_118 -119 val_119 -119 val_119 -119 val_119 -119 val_120 -119 val_120 -119 val_120 -121 val_122 -121 val_122 -123 val_124 -123 val_124 126 val_126 126 val_127 126 val_127 132 val_133 132 val_133 -133 val_133 -133 val_134 134 val_134 134 val_134 134 val_135 -135 val_136 -135 val_136 -135 val_136 -137 val_137 -137 val_137 -137 val_138 140 val_141 146 val_146 146 val_146 -149 val_149 -149 val_149 -149 val_150 -153 val_153 -153 val_154 -153 val_154 156 val_156 156 val_157 156 val_157 -157 val_157 -157 val_158 -157 val_158 158 val_158 162 val_162 162 val_163 -163 val_163 164 val_164 164 val_164 164 val_165 164 val_165 -165 val_165 -165 val_165 -165 val_166 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -177 val_177 -177 val_178 -177 val_178 178 val_178 178 val_179 178 val_179 -181 val_181 182 val_183 184 val_185 -185 val_186 -187 val_187 -187 val_187 -187 val_187 190 val_190 -191 val_191 -191 val_191 -191 val_192 -195 val_195 -195 val_195 -197 val_197 -197 val_197 -197 val_198 -199 val_199 -199 val_199 -199 val_199 -199 val_200 -201 val_201 202 val_202 206 val_207 206 val_207 @@ -621,57 +511,34 @@ POSTHOOK: Input: default@dest1 208 val_208 212 val_213 214 val_214 -215 val_216 216 val_216 216 val_216 216 val_217 -221 val_221 -221 val_221 226 val_226 226 val_227 226 val_227 226 val_227 226 val_227 -229 val_229 -229 val_229 -231 val_232 -233 val_233 -233 val_233 -237 val_237 -237 val_237 238 val_238 238 val_238 238 val_239 -239 val_239 -239 val_239 -239 val_240 -239 val_240 240 val_241 -243 val_244 -243 val_244 244 val_244 244 val_245 244 val_245 244 val_245 248 val_248 248 val_249 -249 val_249 -249 val_250 -249 val_250 252 val_252 252 val_253 254 val_255 -255 val_255 -255 val_255 256 val_256 256 val_256 256 val_257 260 val_260 260 val_261 260 val_261 -261 val_262 266 val_266 -271 val_272 272 val_272 272 val_272 272 val_273 @@ -681,20 +548,10 @@ POSTHOOK: Input: default@dest1 284 val_285 286 val_286 286 val_287 -287 val_287 -287 val_288 -287 val_288 -289 val_289 -289 val_290 -291 val_291 -291 val_292 -291 val_292 292 val_292 292 val_293 292 val_293 304 val_305 -307 val_307 -307 val_307 308 val_308 308 val_309 308 val_309 @@ -702,81 +559,37 @@ POSTHOOK: Input: default@dest1 310 val_311 310 val_311 310 val_311 -311 val_311 -311 val_311 -311 val_311 -313 val_314 -315 val_315 316 val_316 316 val_316 316 val_316 -317 val_317 -317 val_317 -317 val_318 324 val_325 -325 val_325 -325 val_325 326 val_327 -327 val_327 -327 val_327 -327 val_327 332 val_332 334 val_335 336 val_336 336 val_337 -337 val_338 338 val_338 338 val_339 -339 val_339 -341 val_341 -341 val_342 -341 val_342 -341 val_342 342 val_342 342 val_342 342 val_343 -343 val_344 344 val_344 344 val_344 344 val_345 -347 val_348 -347 val_348 348 val_348 348 val_348 348 val_348 348 val_348 348 val_348 348 val_349 -349 val_350 -349 val_350 -349 val_350 -349 val_350 -351 val_351 -351 val_352 -351 val_352 352 val_353 352 val_353 -353 val_353 -353 val_353 -353 val_354 -355 val_356 -355 val_356 360 val_360 360 val_361 362 val_362 364 val_364 364 val_365 -365 val_365 368 val_368 -369 val_369 -369 val_369 -369 val_369 -369 val_370 -371 val_372 -371 val_372 -371 val_372 -371 val_372 -377 val_377 378 val_378 378 val_379 384 val_384 @@ -789,24 +602,13 @@ POSTHOOK: Input: default@dest1 386 val_387 386 val_387 388 val_389 -391 val_392 -391 val_392 392 val_392 392 val_393 392 val_393 -393 val_393 -393 val_394 -393 val_394 394 val_394 396 val_396 396 val_396 396 val_396 -397 val_397 -397 val_397 -399 val_399 -399 val_399 -399 val_400 -399 val_400 402 val_402 402 val_403 402 val_403 @@ -816,36 +618,13 @@ POSTHOOK: Input: default@dest1 404 val_405 404 val_405 404 val_405 -407 val_407 -407 val_408 -407 val_408 -407 val_408 408 val_409 408 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_410 -409 val_410 410 val_411 -411 val_411 -411 val_412 414 val_414 414 val_414 414 val_415 -417 val_417 -417 val_417 -417 val_417 -419 val_419 -421 val_421 -421 val_422 -421 val_422 -423 val_424 -425 val_426 426 val_427 -427 val_427 -427 val_428 -427 val_428 428 val_429 430 val_430 430 val_430 @@ -853,24 +632,13 @@ POSTHOOK: Input: default@dest1 430 val_431 432 val_432 432 val_433 -435 val_435 -435 val_436 -437 val_437 -437 val_438 440 val_441 440 val_441 -443 val_443 -443 val_444 -443 val_444 -443 val_444 444 val_444 446 val_446 446 val_447 446 val_447 -449 val_449 452 val_452 -453 val_453 -453 val_454 454 val_454 454 val_454 454 val_454 @@ -878,19 +646,10 @@ POSTHOOK: Input: default@dest1 454 val_455 458 val_458 458 val_458 -459 val_459 -459 val_459 -459 val_460 -463 val_463 -463 val_463 -463 val_464 466 val_466 466 val_466 466 val_466 -467 val_467 -467 val_468 472 val_472 -473 val_474 474 val_475 474 val_475 476 val_477 @@ -899,7 +658,6 @@ POSTHOOK: Input: default@dest1 478 val_478 478 val_479 478 val_479 -479 val_479 480 val_480 480 val_480 480 val_480 @@ -907,23 +665,11 @@ POSTHOOK: Input: default@dest1 480 val_481 482 val_482 482 val_483 -483 val_483 484 val_484 484 val_485 -485 val_485 -485 val_486 -485 val_486 488 val_489 490 val_490 490 val_491 -491 val_491 -491 val_492 -491 val_492 -495 val_495 -495 val_496 -497 val_497 -497 val_498 -497 val_498 498 val_498 498 val_498 498 val_498 diff --git a/ql/src/test/results/clientpositive/sample5.q.out b/ql/src/test/results/clientpositive/sample5.q.out index 6bfb36629a..8fc55b60a4 100644 --- a/ql/src/test/results/clientpositive/sample5.q.out +++ b/ql/src/test/results/clientpositive/sample5.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n60(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n60 +POSTHOOK: query: CREATE TABLE dest1_n60(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n60 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n60 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 on key) s PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n60 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 on key) s POSTHOOK: type: QUERY @@ -61,17 +61,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n60 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n60 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n60 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -202,17 +202,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n60 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n60 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n60 Stage: Stage-2 Stats Work @@ -221,7 +221,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n60 Is Table Level Stats: true Stage: Stage-3 @@ -246,17 +246,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n60 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n60 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n60 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -277,11 +277,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n60 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n60 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -299,18 +299,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n60 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n60 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n60 + name: default.dest1_n60 Truncated Path -> Alias: #### A masked pattern was here #### @@ -336,17 +336,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n60 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n60 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n60 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -367,11 +367,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n60 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n60 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -389,18 +389,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n60 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n60 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n60 + name: default.dest1_n60 Truncated Path -> Alias: #### A masked pattern was here #### @@ -410,27 +410,27 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n60 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 on key) s PREHOOK: type: QUERY PREHOOK: Input: default@srcbucket -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* +PREHOOK: Output: default@dest1_n60 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n60 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 on key) s POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SIMPLE [(srcbucket)s.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(srcbucket)s.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT dest1.* FROM dest1 SORT BY key, value +POSTHOOK: Output: default@dest1_n60 +POSTHOOK: Lineage: dest1_n60.key SIMPLE [(srcbucket)s.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest1_n60.value SIMPLE [(srcbucket)s.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT dest1_n60.* FROM dest1_n60 SORT BY key, value PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n60 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 SORT BY key, value +POSTHOOK: query: SELECT dest1_n60.* FROM dest1_n60 SORT BY key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n60 #### A masked pattern was here #### 1 val_2 103 val_103 diff --git a/ql/src/test/results/clientpositive/sample6.q.out b/ql/src/test/results/clientpositive/sample6.q.out index 97d5dd1744..2a74c8e283 100644 --- a/ql/src/test/results/clientpositive/sample6.q.out +++ b/ql/src/test/results/clientpositive/sample6.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n24(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n24 +POSTHOOK: query: CREATE TABLE dest1_n24(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n24 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n24 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n24 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -60,17 +60,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n24 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n24 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n24 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -201,17 +201,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n24 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n24 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n24 Stage: Stage-2 Stats Work @@ -220,7 +220,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n24 Is Table Level Stats: true Stage: Stage-3 @@ -245,17 +245,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n24 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n24 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n24 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -276,11 +276,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n24 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n24 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -298,18 +298,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n24 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n24 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n24 + name: default.dest1_n24 Truncated Path -> Alias: #### A masked pattern was here #### @@ -335,17 +335,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n24 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n24 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n24 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -366,11 +366,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n24 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n24 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -388,18 +388,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n24 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n24 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n24 + name: default.dest1_n24 Truncated Path -> Alias: #### A masked pattern was here #### @@ -409,27 +409,27 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n24 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s PREHOOK: type: QUERY PREHOOK: Input: default@srcbucket -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* +PREHOOK: Output: default@dest1_n24 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n24 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SIMPLE [(srcbucket)s.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(srcbucket)s.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n24 +POSTHOOK: Lineage: dest1_n24.key SIMPLE [(srcbucket)s.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest1_n24.value SIMPLE [(srcbucket)s.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT dest1_n24.* FROM dest1_n24 order by key, value PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n24 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n24.* FROM dest1_n24 order by key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n24 #### A masked pattern was here #### 2 val_2 2 val_3 diff --git a/ql/src/test/results/clientpositive/sample7.q.out b/ql/src/test/results/clientpositive/sample7.q.out index 07b6f0efb0..a90fd5ec12 100644 --- a/ql/src/test/results/clientpositive/sample7.q.out +++ b/ql/src/test/results/clientpositive/sample7.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n135(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n135 +POSTHOOK: query: CREATE TABLE dest1_n135(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n135 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n135 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s WHERE s.key > 100 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE dest1 SELECT s.* +INSERT OVERWRITE TABLE dest1_n135 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s WHERE s.key > 100 POSTHOOK: type: QUERY @@ -61,17 +61,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n135 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n135 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n135 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -202,17 +202,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n135 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n135 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n135 Stage: Stage-2 Stats Work @@ -221,7 +221,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest1 + Table: default.dest1_n135 Is Table Level Stats: true Stage: Stage-3 @@ -246,17 +246,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n135 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n135 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n135 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -277,11 +277,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n135 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n135 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -299,18 +299,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n135 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n135 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n135 + name: default.dest1_n135 Truncated Path -> Alias: #### A masked pattern was here #### @@ -336,17 +336,17 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n135 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n135 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n135 TotalFiles: 1 GatherStats: false MultiFileSpray: false @@ -367,11 +367,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n135 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n135 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 @@ -389,18 +389,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.dest1 + name default.dest1_n135 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct dest1 { i32 key, string value} + serialization.ddl struct dest1_n135 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 - name: default.dest1 + name: default.dest1_n135 + name: default.dest1_n135 Truncated Path -> Alias: #### A masked pattern was here #### @@ -410,29 +410,29 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n135 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s WHERE s.key > 100 PREHOOK: type: QUERY PREHOOK: Input: default@srcbucket -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* +PREHOOK: Output: default@dest1_n135 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n135 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s WHERE s.key > 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcbucket -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SIMPLE [(srcbucket)s.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dest1.value SIMPLE [(srcbucket)s.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n135 +POSTHOOK: Lineage: dest1_n135.key SIMPLE [(srcbucket)s.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest1_n135.value SIMPLE [(srcbucket)s.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT dest1_n135.* FROM dest1_n135 order by key, value PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n135 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n135.* FROM dest1_n135 order by key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n135 #### A masked pattern was here #### 110 val_111 116 val_116 diff --git a/ql/src/test/results/clientpositive/sample_islocalmode_hook_use_metadata.q.out b/ql/src/test/results/clientpositive/sample_islocalmode_hook_use_metadata.q.out index 81b112da54..2f7026806d 100644 --- a/ql/src/test/results/clientpositive/sample_islocalmode_hook_use_metadata.q.out +++ b/ql/src/test/results/clientpositive/sample_islocalmode_hook_use_metadata.q.out @@ -1,74 +1,74 @@ -PREHOOK: query: create table sih_i_part (key int, value string) partitioned by (p string) +PREHOOK: query: create table sih_i_part_n1 (key int, value string) partitioned by (p string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@sih_i_part -POSTHOOK: query: create table sih_i_part (key int, value string) partitioned by (p string) +PREHOOK: Output: default@sih_i_part_n1 +POSTHOOK: query: create table sih_i_part_n1 (key int, value string) partitioned by (p string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@sih_i_part -PREHOOK: query: insert overwrite table sih_i_part partition (p='1') select key, value from src +POSTHOOK: Output: default@sih_i_part_n1 +PREHOOK: query: insert overwrite table sih_i_part_n1 partition (p='1') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@sih_i_part@p=1 -POSTHOOK: query: insert overwrite table sih_i_part partition (p='1') select key, value from src +PREHOOK: Output: default@sih_i_part_n1@p=1 +POSTHOOK: query: insert overwrite table sih_i_part_n1 partition (p='1') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@sih_i_part@p=1 -POSTHOOK: Lineage: sih_i_part PARTITION(p=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: sih_i_part PARTITION(p=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table sih_i_part partition (p='2') select key+10000, value from src +POSTHOOK: Output: default@sih_i_part_n1@p=1 +POSTHOOK: Lineage: sih_i_part_n1 PARTITION(p=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: sih_i_part_n1 PARTITION(p=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table sih_i_part_n1 partition (p='2') select key+10000, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@sih_i_part@p=2 -POSTHOOK: query: insert overwrite table sih_i_part partition (p='2') select key+10000, value from src +PREHOOK: Output: default@sih_i_part_n1@p=2 +POSTHOOK: query: insert overwrite table sih_i_part_n1 partition (p='2') select key+10000, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@sih_i_part@p=2 -POSTHOOK: Lineage: sih_i_part PARTITION(p=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: sih_i_part PARTITION(p=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table sih_i_part partition (p='3') select key+20000, value from src +POSTHOOK: Output: default@sih_i_part_n1@p=2 +POSTHOOK: Lineage: sih_i_part_n1 PARTITION(p=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: sih_i_part_n1 PARTITION(p=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table sih_i_part_n1 partition (p='3') select key+20000, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@sih_i_part@p=3 -POSTHOOK: query: insert overwrite table sih_i_part partition (p='3') select key+20000, value from src +PREHOOK: Output: default@sih_i_part_n1@p=3 +POSTHOOK: query: insert overwrite table sih_i_part_n1 partition (p='3') select key+20000, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@sih_i_part@p=3 -POSTHOOK: Lineage: sih_i_part PARTITION(p=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: sih_i_part PARTITION(p=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table sih_src as select key, value from sih_i_part order by key, value +POSTHOOK: Output: default@sih_i_part_n1@p=3 +POSTHOOK: Lineage: sih_i_part_n1 PARTITION(p=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: sih_i_part_n1 PARTITION(p=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table sih_src_n1 as select key, value from sih_i_part_n1 order by key, value PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@sih_i_part -PREHOOK: Input: default@sih_i_part@p=1 -PREHOOK: Input: default@sih_i_part@p=2 -PREHOOK: Input: default@sih_i_part@p=3 +PREHOOK: Input: default@sih_i_part_n1 +PREHOOK: Input: default@sih_i_part_n1@p=1 +PREHOOK: Input: default@sih_i_part_n1@p=2 +PREHOOK: Input: default@sih_i_part_n1@p=3 PREHOOK: Output: database:default -PREHOOK: Output: default@sih_src -POSTHOOK: query: create table sih_src as select key, value from sih_i_part order by key, value +PREHOOK: Output: default@sih_src_n1 +POSTHOOK: query: create table sih_src_n1 as select key, value from sih_i_part_n1 order by key, value POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@sih_i_part -POSTHOOK: Input: default@sih_i_part@p=1 -POSTHOOK: Input: default@sih_i_part@p=2 -POSTHOOK: Input: default@sih_i_part@p=3 +POSTHOOK: Input: default@sih_i_part_n1 +POSTHOOK: Input: default@sih_i_part_n1@p=1 +POSTHOOK: Input: default@sih_i_part_n1@p=2 +POSTHOOK: Input: default@sih_i_part_n1@p=3 POSTHOOK: Output: database:default -POSTHOOK: Output: default@sih_src -POSTHOOK: Lineage: sih_src.key SIMPLE [(sih_i_part)sih_i_part.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: sih_src.value SIMPLE [(sih_i_part)sih_i_part.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: create table sih_src2 as select key, value from sih_src order by key, value +POSTHOOK: Output: default@sih_src_n1 +POSTHOOK: Lineage: sih_src_n1.key SIMPLE [(sih_i_part_n1)sih_i_part_n1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: sih_src_n1.value SIMPLE [(sih_i_part_n1)sih_i_part_n1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: create table sih_src2_n1 as select key, value from sih_src_n1 order by key, value PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@sih_src +PREHOOK: Input: default@sih_src_n1 PREHOOK: Output: database:default -PREHOOK: Output: default@sih_src2 -POSTHOOK: query: create table sih_src2 as select key, value from sih_src order by key, value +PREHOOK: Output: default@sih_src2_n1 +POSTHOOK: query: create table sih_src2_n1 as select key, value from sih_src_n1 order by key, value POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@sih_src +POSTHOOK: Input: default@sih_src_n1 POSTHOOK: Output: database:default -POSTHOOK: Output: default@sih_src2 -POSTHOOK: Lineage: sih_src2.key SIMPLE [(sih_src)sih_src.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: sih_src2.value SIMPLE [(sih_src)sih_src.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: desc formatted sih_src +POSTHOOK: Output: default@sih_src2_n1 +POSTHOOK: Lineage: sih_src2_n1.key SIMPLE [(sih_src_n1)sih_src_n1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: sih_src2_n1.value SIMPLE [(sih_src_n1)sih_src_n1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: desc formatted sih_src_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@sih_src +PREHOOK: Input: default@sih_src_n1 # col_name data_type comment key int value string @@ -98,7 +98,7 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: explain select count(1) from sih_src +PREHOOK: query: explain select count(1) from sih_src_n1 PREHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -110,12 +110,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(1) from sih_src +PREHOOK: query: select count(1) from sih_src_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@sih_src +PREHOOK: Input: default@sih_src_n1 #### A masked pattern was here #### 1500 -PREHOOK: query: explain select count(1) from sih_src tablesample(1 percent) +PREHOOK: query: explain select count(1) from sih_src_n1 tablesample(1 percent) PREHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -126,7 +126,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: sih_src + alias: sih_src_n1 Statistics: Num rows: 1500 Data size: 18124 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 1500 Data size: 18124 Basic stats: COMPLETE Column stats: COMPLETE @@ -160,12 +160,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(1) from sih_src tablesample(1 percent) +PREHOOK: query: select count(1) from sih_src_n1 tablesample(1 percent) PREHOOK: type: QUERY -PREHOOK: Input: default@sih_src +PREHOOK: Input: default@sih_src_n1 #### A masked pattern was here #### 25 -PREHOOK: query: explain select count(1) from sih_src tablesample(10 rows) +PREHOOK: query: explain select count(1) from sih_src_n1 tablesample(10 rows) PREHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -176,7 +176,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: sih_src + alias: sih_src_n1 Row Limit Per Split: 10 Statistics: Num rows: 1500 Data size: 18124 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -211,8 +211,8 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(1) from sih_src tablesample(10 rows) +PREHOOK: query: select count(1) from sih_src_n1 tablesample(10 rows) PREHOOK: type: QUERY -PREHOOK: Input: default@sih_src +PREHOOK: Input: default@sih_src_n1 #### A masked pattern was here #### 650 diff --git a/ql/src/test/results/clientpositive/schema_evol_par_vec_table_non_dictionary_encoding.q.out b/ql/src/test/results/clientpositive/schema_evol_par_vec_table_non_dictionary_encoding.q.out index 530a622252..28656a4851 100644 --- a/ql/src/test/results/clientpositive/schema_evol_par_vec_table_non_dictionary_encoding.q.out +++ b/ql/src/test/results/clientpositive/schema_evol_par_vec_table_non_dictionary_encoding.q.out @@ -1,522 +1,522 @@ -PREHOOK: query: drop table test_alter +PREHOOK: query: drop table test_alter_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table test_alter +POSTHOOK: query: drop table test_alter_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table test_alter2 +PREHOOK: query: drop table test_alter2_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table test_alter2 +POSTHOOK: query: drop table test_alter2_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table test_alter3 +PREHOOK: query: drop table test_alter3_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table test_alter3 +POSTHOOK: query: drop table test_alter3_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table test_alter (id string) stored as parquet +PREHOOK: query: create table test_alter_n0 (id string) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_alter -POSTHOOK: query: create table test_alter (id string) stored as parquet +PREHOOK: Output: default@test_alter_n0 +POSTHOOK: query: create table test_alter_n0 (id string) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_alter -PREHOOK: query: insert into test_alter values ('1'), ('2'), ('3') +POSTHOOK: Output: default@test_alter_n0 +PREHOOK: query: insert into test_alter_n0 values ('1'), ('2'), ('3') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_alter -POSTHOOK: query: insert into test_alter values ('1'), ('2'), ('3') +PREHOOK: Output: default@test_alter_n0 +POSTHOOK: query: insert into test_alter_n0 values ('1'), ('2'), ('3') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_alter -POSTHOOK: Lineage: test_alter.id SCRIPT [] -PREHOOK: query: select * from test_alter +POSTHOOK: Output: default@test_alter_n0 +POSTHOOK: Lineage: test_alter_n0.id SCRIPT [] +PREHOOK: query: select * from test_alter_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter +PREHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter +POSTHOOK: query: select * from test_alter_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter +POSTHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### 1 2 3 -PREHOOK: query: alter table test_alter add columns (newCol string) +PREHOOK: query: alter table test_alter_n0 add columns (newCol string) PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@test_alter -PREHOOK: Output: default@test_alter -POSTHOOK: query: alter table test_alter add columns (newCol string) +PREHOOK: Input: default@test_alter_n0 +PREHOOK: Output: default@test_alter_n0 +POSTHOOK: query: alter table test_alter_n0 add columns (newCol string) POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@test_alter -POSTHOOK: Output: default@test_alter -PREHOOK: query: select * from test_alter +POSTHOOK: Input: default@test_alter_n0 +POSTHOOK: Output: default@test_alter_n0 +PREHOOK: query: select * from test_alter_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter +PREHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter +POSTHOOK: query: select * from test_alter_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter +POSTHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### 1 NULL 2 NULL 3 NULL -PREHOOK: query: insert into test_alter values ('4', '100') +PREHOOK: query: insert into test_alter_n0 values ('4', '100') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_alter -POSTHOOK: query: insert into test_alter values ('4', '100') +PREHOOK: Output: default@test_alter_n0 +POSTHOOK: query: insert into test_alter_n0 values ('4', '100') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_alter -POSTHOOK: Lineage: test_alter.id SCRIPT [] -POSTHOOK: Lineage: test_alter.newcol SCRIPT [] -PREHOOK: query: select * from test_alter +POSTHOOK: Output: default@test_alter_n0 +POSTHOOK: Lineage: test_alter_n0.id SCRIPT [] +POSTHOOK: Lineage: test_alter_n0.newcol SCRIPT [] +PREHOOK: query: select * from test_alter_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter +PREHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter +POSTHOOK: query: select * from test_alter_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter +POSTHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### 1 NULL 2 NULL 3 NULL 4 100 -PREHOOK: query: alter table test_alter replace columns (id string) +PREHOOK: query: alter table test_alter_n0 replace columns (id string) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@test_alter -PREHOOK: Output: default@test_alter -POSTHOOK: query: alter table test_alter replace columns (id string) +PREHOOK: Input: default@test_alter_n0 +PREHOOK: Output: default@test_alter_n0 +POSTHOOK: query: alter table test_alter_n0 replace columns (id string) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@test_alter -POSTHOOK: Output: default@test_alter -PREHOOK: query: select * from test_alter +POSTHOOK: Input: default@test_alter_n0 +POSTHOOK: Output: default@test_alter_n0 +PREHOOK: query: select * from test_alter_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter +PREHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter +POSTHOOK: query: select * from test_alter_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter +POSTHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### 1 2 3 4 -PREHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: query: alter table test_alter_n0 replace columns (id string, id2 string) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@test_alter -PREHOOK: Output: default@test_alter -POSTHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: Input: default@test_alter_n0 +PREHOOK: Output: default@test_alter_n0 +POSTHOOK: query: alter table test_alter_n0 replace columns (id string, id2 string) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@test_alter -POSTHOOK: Output: default@test_alter -PREHOOK: query: select * from test_alter +POSTHOOK: Input: default@test_alter_n0 +POSTHOOK: Output: default@test_alter_n0 +PREHOOK: query: select * from test_alter_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter +PREHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter +POSTHOOK: query: select * from test_alter_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter +POSTHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### 1 NULL 2 NULL 3 NULL 4 NULL -PREHOOK: query: insert into test_alter values ('5', '100') +PREHOOK: query: insert into test_alter_n0 values ('5', '100') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_alter -POSTHOOK: query: insert into test_alter values ('5', '100') +PREHOOK: Output: default@test_alter_n0 +POSTHOOK: query: insert into test_alter_n0 values ('5', '100') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_alter -POSTHOOK: Lineage: test_alter.id SCRIPT [] -POSTHOOK: Lineage: test_alter.id2 SCRIPT [] -PREHOOK: query: select * from test_alter +POSTHOOK: Output: default@test_alter_n0 +POSTHOOK: Lineage: test_alter_n0.id SCRIPT [] +POSTHOOK: Lineage: test_alter_n0.id2 SCRIPT [] +PREHOOK: query: select * from test_alter_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter +PREHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter +POSTHOOK: query: select * from test_alter_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter +POSTHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### 1 NULL 2 NULL 3 NULL 4 NULL 5 100 -PREHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: query: alter table test_alter_n0 replace columns (id string, id2 string) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@test_alter -PREHOOK: Output: default@test_alter -POSTHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: Input: default@test_alter_n0 +PREHOOK: Output: default@test_alter_n0 +POSTHOOK: query: alter table test_alter_n0 replace columns (id string, id2 string) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@test_alter -POSTHOOK: Output: default@test_alter -PREHOOK: query: select * from test_alter +POSTHOOK: Input: default@test_alter_n0 +POSTHOOK: Output: default@test_alter_n0 +PREHOOK: query: select * from test_alter_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter +PREHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter +POSTHOOK: query: select * from test_alter_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter +POSTHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### 1 NULL 2 NULL 3 NULL 4 NULL 5 100 -PREHOOK: query: alter table test_alter replace columns (id char(10), id2 string) +PREHOOK: query: alter table test_alter_n0 replace columns (id char(10), id2 string) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@test_alter -PREHOOK: Output: default@test_alter -POSTHOOK: query: alter table test_alter replace columns (id char(10), id2 string) +PREHOOK: Input: default@test_alter_n0 +PREHOOK: Output: default@test_alter_n0 +POSTHOOK: query: alter table test_alter_n0 replace columns (id char(10), id2 string) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@test_alter -POSTHOOK: Output: default@test_alter -PREHOOK: query: select * from test_alter +POSTHOOK: Input: default@test_alter_n0 +POSTHOOK: Output: default@test_alter_n0 +PREHOOK: query: select * from test_alter_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter +PREHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter +POSTHOOK: query: select * from test_alter_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter +POSTHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### 1 NULL 2 NULL 3 NULL 4 NULL 5 100 -PREHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: query: alter table test_alter_n0 replace columns (id string, id2 string) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@test_alter -PREHOOK: Output: default@test_alter -POSTHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: Input: default@test_alter_n0 +PREHOOK: Output: default@test_alter_n0 +POSTHOOK: query: alter table test_alter_n0 replace columns (id string, id2 string) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@test_alter -POSTHOOK: Output: default@test_alter -PREHOOK: query: alter table test_alter replace columns (id varchar(10), id2 string) +POSTHOOK: Input: default@test_alter_n0 +POSTHOOK: Output: default@test_alter_n0 +PREHOOK: query: alter table test_alter_n0 replace columns (id varchar(10), id2 string) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@test_alter -PREHOOK: Output: default@test_alter -POSTHOOK: query: alter table test_alter replace columns (id varchar(10), id2 string) +PREHOOK: Input: default@test_alter_n0 +PREHOOK: Output: default@test_alter_n0 +POSTHOOK: query: alter table test_alter_n0 replace columns (id varchar(10), id2 string) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@test_alter -POSTHOOK: Output: default@test_alter -PREHOOK: query: select * from test_alter +POSTHOOK: Input: default@test_alter_n0 +POSTHOOK: Output: default@test_alter_n0 +PREHOOK: query: select * from test_alter_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter +PREHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter +POSTHOOK: query: select * from test_alter_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter +POSTHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### 1 NULL 2 NULL 3 NULL 4 NULL 5 100 -PREHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: query: alter table test_alter_n0 replace columns (id string, id2 string) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@test_alter -PREHOOK: Output: default@test_alter -POSTHOOK: query: alter table test_alter replace columns (id string, id2 string) +PREHOOK: Input: default@test_alter_n0 +PREHOOK: Output: default@test_alter_n0 +POSTHOOK: query: alter table test_alter_n0 replace columns (id string, id2 string) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@test_alter -POSTHOOK: Output: default@test_alter -PREHOOK: query: alter table test_alter replace columns (idv varchar(10), id2 string) +POSTHOOK: Input: default@test_alter_n0 +POSTHOOK: Output: default@test_alter_n0 +PREHOOK: query: alter table test_alter_n0 replace columns (idv varchar(10), id2 string) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@test_alter -PREHOOK: Output: default@test_alter -POSTHOOK: query: alter table test_alter replace columns (idv varchar(10), id2 string) +PREHOOK: Input: default@test_alter_n0 +PREHOOK: Output: default@test_alter_n0 +POSTHOOK: query: alter table test_alter_n0 replace columns (idv varchar(10), id2 string) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@test_alter -POSTHOOK: Output: default@test_alter -PREHOOK: query: select * from test_alter +POSTHOOK: Input: default@test_alter_n0 +POSTHOOK: Output: default@test_alter_n0 +PREHOOK: query: select * from test_alter_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter +PREHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter +POSTHOOK: query: select * from test_alter_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter +POSTHOOK: Input: default@test_alter_n0 #### A masked pattern was here #### NULL 100 NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: create table test_alter2 (id int) stored as parquet +PREHOOK: query: create table test_alter2_n0 (id int) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: create table test_alter2 (id int) stored as parquet +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: create table test_alter2_n0 (id int) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_alter2 -PREHOOK: query: insert into test_alter2 values (1) +POSTHOOK: Output: default@test_alter2_n0 +PREHOOK: query: insert into test_alter2_n0 values (1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: insert into test_alter2 values (1) +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: insert into test_alter2_n0 values (1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_alter2 -POSTHOOK: Lineage: test_alter2.id SCRIPT [] -PREHOOK: query: alter table test_alter2 replace columns (id bigint) +POSTHOOK: Output: default@test_alter2_n0 +POSTHOOK: Lineage: test_alter2_n0.id SCRIPT [] +PREHOOK: query: alter table test_alter2_n0 replace columns (id bigint) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@test_alter2 -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: alter table test_alter2 replace columns (id bigint) +PREHOOK: Input: default@test_alter2_n0 +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: alter table test_alter2_n0 replace columns (id bigint) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@test_alter2 -POSTHOOK: Output: default@test_alter2 -PREHOOK: query: select * from test_alter2 +POSTHOOK: Input: default@test_alter2_n0 +POSTHOOK: Output: default@test_alter2_n0 +PREHOOK: query: select * from test_alter2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter2 +PREHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter2 +POSTHOOK: query: select * from test_alter2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter2 +POSTHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### 1 -PREHOOK: query: drop table test_alter2 +PREHOOK: query: drop table test_alter2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_alter2 -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: drop table test_alter2 +PREHOOK: Input: default@test_alter2_n0 +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: drop table test_alter2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_alter2 -POSTHOOK: Output: default@test_alter2 -PREHOOK: query: create table test_alter2 (id float) stored as parquet +POSTHOOK: Input: default@test_alter2_n0 +POSTHOOK: Output: default@test_alter2_n0 +PREHOOK: query: create table test_alter2_n0 (id float) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: create table test_alter2 (id float) stored as parquet +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: create table test_alter2_n0 (id float) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_alter2 -PREHOOK: query: insert into test_alter2 values (1.5) +POSTHOOK: Output: default@test_alter2_n0 +PREHOOK: query: insert into test_alter2_n0 values (1.5) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: insert into test_alter2 values (1.5) +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: insert into test_alter2_n0 values (1.5) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_alter2 -POSTHOOK: Lineage: test_alter2.id SCRIPT [] -PREHOOK: query: alter table test_alter2 replace columns (id double) +POSTHOOK: Output: default@test_alter2_n0 +POSTHOOK: Lineage: test_alter2_n0.id SCRIPT [] +PREHOOK: query: alter table test_alter2_n0 replace columns (id double) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@test_alter2 -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: alter table test_alter2 replace columns (id double) +PREHOOK: Input: default@test_alter2_n0 +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: alter table test_alter2_n0 replace columns (id double) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@test_alter2 -POSTHOOK: Output: default@test_alter2 -PREHOOK: query: select * from test_alter2 +POSTHOOK: Input: default@test_alter2_n0 +POSTHOOK: Output: default@test_alter2_n0 +PREHOOK: query: select * from test_alter2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter2 +PREHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter2 +POSTHOOK: query: select * from test_alter2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter2 +POSTHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### 1.5 -PREHOOK: query: drop table test_alter2 +PREHOOK: query: drop table test_alter2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_alter2 -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: drop table test_alter2 +PREHOOK: Input: default@test_alter2_n0 +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: drop table test_alter2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_alter2 -POSTHOOK: Output: default@test_alter2 -PREHOOK: query: create table test_alter2 (ts timestamp) stored as parquet +POSTHOOK: Input: default@test_alter2_n0 +POSTHOOK: Output: default@test_alter2_n0 +PREHOOK: query: create table test_alter2_n0 (ts timestamp) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: create table test_alter2 (ts timestamp) stored as parquet +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: create table test_alter2_n0 (ts timestamp) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_alter2 -PREHOOK: query: insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') +POSTHOOK: Output: default@test_alter2_n0 +PREHOOK: query: insert into test_alter2_n0 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: insert into test_alter2_n0 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_alter2 -POSTHOOK: Lineage: test_alter2.ts SCRIPT [] -PREHOOK: query: select * from test_alter2 +POSTHOOK: Output: default@test_alter2_n0 +POSTHOOK: Lineage: test_alter2_n0.ts SCRIPT [] +PREHOOK: query: select * from test_alter2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter2 +PREHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter2 +POSTHOOK: query: select * from test_alter2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter2 +POSTHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### 2018-01-01 13:14:15.123456 2018-01-02 14:15:16.123456 2018-01-03 16:17:18.123456 -PREHOOK: query: alter table test_alter2 replace columns (ts string) +PREHOOK: query: alter table test_alter2_n0 replace columns (ts string) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@test_alter2 -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: alter table test_alter2 replace columns (ts string) +PREHOOK: Input: default@test_alter2_n0 +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: alter table test_alter2_n0 replace columns (ts string) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@test_alter2 -POSTHOOK: Output: default@test_alter2 -PREHOOK: query: select * from test_alter2 +POSTHOOK: Input: default@test_alter2_n0 +POSTHOOK: Output: default@test_alter2_n0 +PREHOOK: query: select * from test_alter2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter2 +PREHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter2 +POSTHOOK: query: select * from test_alter2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter2 +POSTHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### 2018-01-01 13:14:15.123456 2018-01-02 14:15:16.123456 2018-01-03 16:17:18.123456 -PREHOOK: query: drop table test_alter2 +PREHOOK: query: drop table test_alter2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_alter2 -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: drop table test_alter2 +PREHOOK: Input: default@test_alter2_n0 +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: drop table test_alter2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_alter2 -POSTHOOK: Output: default@test_alter2 -PREHOOK: query: create table test_alter2 (ts timestamp) stored as parquet +POSTHOOK: Input: default@test_alter2_n0 +POSTHOOK: Output: default@test_alter2_n0 +PREHOOK: query: create table test_alter2_n0 (ts timestamp) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: create table test_alter2 (ts timestamp) stored as parquet +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: create table test_alter2_n0 (ts timestamp) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_alter2 -PREHOOK: query: insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') +POSTHOOK: Output: default@test_alter2_n0 +PREHOOK: query: insert into test_alter2_n0 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: insert into test_alter2_n0 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_alter2 -POSTHOOK: Lineage: test_alter2.ts SCRIPT [] -PREHOOK: query: select * from test_alter2 +POSTHOOK: Output: default@test_alter2_n0 +POSTHOOK: Lineage: test_alter2_n0.ts SCRIPT [] +PREHOOK: query: select * from test_alter2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter2 +PREHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter2 +POSTHOOK: query: select * from test_alter2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter2 +POSTHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### 2018-01-01 13:14:15.123456 2018-01-02 14:15:16.123456 2018-01-03 16:17:18.123456 -PREHOOK: query: alter table test_alter2 replace columns (ts varchar(19)) +PREHOOK: query: alter table test_alter2_n0 replace columns (ts varchar(19)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@test_alter2 -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: alter table test_alter2 replace columns (ts varchar(19)) +PREHOOK: Input: default@test_alter2_n0 +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: alter table test_alter2_n0 replace columns (ts varchar(19)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@test_alter2 -POSTHOOK: Output: default@test_alter2 -PREHOOK: query: select * from test_alter2 +POSTHOOK: Input: default@test_alter2_n0 +POSTHOOK: Output: default@test_alter2_n0 +PREHOOK: query: select * from test_alter2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter2 +PREHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter2 +POSTHOOK: query: select * from test_alter2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter2 +POSTHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### 2018-01-01 13:14:15 2018-01-02 14:15:16 2018-01-03 16:17:18 -PREHOOK: query: drop table test_alter2 +PREHOOK: query: drop table test_alter2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_alter2 -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: drop table test_alter2 +PREHOOK: Input: default@test_alter2_n0 +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: drop table test_alter2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_alter2 -POSTHOOK: Output: default@test_alter2 -PREHOOK: query: create table test_alter2 (ts timestamp) stored as parquet +POSTHOOK: Input: default@test_alter2_n0 +POSTHOOK: Output: default@test_alter2_n0 +PREHOOK: query: create table test_alter2_n0 (ts timestamp) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: create table test_alter2 (ts timestamp) stored as parquet +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: create table test_alter2_n0 (ts timestamp) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_alter2 -PREHOOK: query: insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') +POSTHOOK: Output: default@test_alter2_n0 +PREHOOK: query: insert into test_alter2_n0 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: insert into test_alter2 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: insert into test_alter2_n0 values ('2018-01-01 13:14:15.123456'), ('2018-01-02 14:15:16.123456'), ('2018-01-03 16:17:18.123456') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_alter2 -POSTHOOK: Lineage: test_alter2.ts SCRIPT [] -PREHOOK: query: select * from test_alter2 +POSTHOOK: Output: default@test_alter2_n0 +POSTHOOK: Lineage: test_alter2_n0.ts SCRIPT [] +PREHOOK: query: select * from test_alter2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter2 +PREHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter2 +POSTHOOK: query: select * from test_alter2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter2 +POSTHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### 2018-01-01 13:14:15.123456 2018-01-02 14:15:16.123456 2018-01-03 16:17:18.123456 -PREHOOK: query: alter table test_alter2 replace columns (ts char(25)) +PREHOOK: query: alter table test_alter2_n0 replace columns (ts char(25)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@test_alter2 -PREHOOK: Output: default@test_alter2 -POSTHOOK: query: alter table test_alter2 replace columns (ts char(25)) +PREHOOK: Input: default@test_alter2_n0 +PREHOOK: Output: default@test_alter2_n0 +POSTHOOK: query: alter table test_alter2_n0 replace columns (ts char(25)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@test_alter2 -POSTHOOK: Output: default@test_alter2 -PREHOOK: query: select * from test_alter2 +POSTHOOK: Input: default@test_alter2_n0 +POSTHOOK: Output: default@test_alter2_n0 +PREHOOK: query: select * from test_alter2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter2 +PREHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from test_alter2 +POSTHOOK: query: select * from test_alter2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter2 +POSTHOOK: Input: default@test_alter2_n0 #### A masked pattern was here #### 2018-01-01 13:14:15.12345 2018-01-02 14:15:16.12345 2018-01-03 16:17:18.12345 -PREHOOK: query: create table test_alter3 (id1 tinyint, id2 smallint, id3 int, id4 bigint) stored as parquet +PREHOOK: query: create table test_alter3_n0 (id1 tinyint, id2 smallint, id3 int, id4 bigint) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_alter3 -POSTHOOK: query: create table test_alter3 (id1 tinyint, id2 smallint, id3 int, id4 bigint) stored as parquet +PREHOOK: Output: default@test_alter3_n0 +POSTHOOK: query: create table test_alter3_n0 (id1 tinyint, id2 smallint, id3 int, id4 bigint) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_alter3 -PREHOOK: query: insert into test_alter3 values (10, 20, 30, 40) +POSTHOOK: Output: default@test_alter3_n0 +PREHOOK: query: insert into test_alter3_n0 values (10, 20, 30, 40) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_alter3 -POSTHOOK: query: insert into test_alter3 values (10, 20, 30, 40) +PREHOOK: Output: default@test_alter3_n0 +POSTHOOK: query: insert into test_alter3_n0 values (10, 20, 30, 40) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_alter3 -POSTHOOK: Lineage: test_alter3.id1 SCRIPT [] -POSTHOOK: Lineage: test_alter3.id2 SCRIPT [] -POSTHOOK: Lineage: test_alter3.id3 SCRIPT [] -POSTHOOK: Lineage: test_alter3.id4 SCRIPT [] -PREHOOK: query: alter table test_alter3 replace columns (id1 smallint, id2 int, id3 bigint, id4 decimal(10,4)) +POSTHOOK: Output: default@test_alter3_n0 +POSTHOOK: Lineage: test_alter3_n0.id1 SCRIPT [] +POSTHOOK: Lineage: test_alter3_n0.id2 SCRIPT [] +POSTHOOK: Lineage: test_alter3_n0.id3 SCRIPT [] +POSTHOOK: Lineage: test_alter3_n0.id4 SCRIPT [] +PREHOOK: query: alter table test_alter3_n0 replace columns (id1 smallint, id2 int, id3 bigint, id4 decimal(10,4)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@test_alter3 -PREHOOK: Output: default@test_alter3 -POSTHOOK: query: alter table test_alter3 replace columns (id1 smallint, id2 int, id3 bigint, id4 decimal(10,4)) +PREHOOK: Input: default@test_alter3_n0 +PREHOOK: Output: default@test_alter3_n0 +POSTHOOK: query: alter table test_alter3_n0 replace columns (id1 smallint, id2 int, id3 bigint, id4 decimal(10,4)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@test_alter3 -POSTHOOK: Output: default@test_alter3 -PREHOOK: query: select id1, id2, id3 from test_alter3 +POSTHOOK: Input: default@test_alter3_n0 +POSTHOOK: Output: default@test_alter3_n0 +PREHOOK: query: select id1, id2, id3 from test_alter3_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@test_alter3 +PREHOOK: Input: default@test_alter3_n0 #### A masked pattern was here #### -POSTHOOK: query: select id1, id2, id3 from test_alter3 +POSTHOOK: query: select id1, id2, id3 from test_alter3_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_alter3 +POSTHOOK: Input: default@test_alter3_n0 #### A masked pattern was here #### 10 20 30 diff --git a/ql/src/test/results/clientpositive/select_unquote_and.q.out b/ql/src/test/results/clientpositive/select_unquote_and.q.out index 625c121e74..4049ade3c5 100644 --- a/ql/src/test/results/clientpositive/select_unquote_and.q.out +++ b/ql/src/test/results/clientpositive/select_unquote_and.q.out @@ -1,47 +1,47 @@ -PREHOOK: query: CREATE TABLE npe_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE npe_test_n0 (key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@npe_test -POSTHOOK: query: CREATE TABLE npe_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@npe_test_n0 +POSTHOOK: query: CREATE TABLE npe_test_n0 (key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@npe_test -PREHOOK: query: INSERT OVERWRITE TABLE npe_test PARTITION(ds='2012-12-11') +POSTHOOK: Output: default@npe_test_n0 +PREHOOK: query: INSERT OVERWRITE TABLE npe_test_n0 PARTITION(ds='2012-12-11') SELECT src.key, src.value FROM src WHERE key < '200' PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@npe_test@ds=2012-12-11 -POSTHOOK: query: INSERT OVERWRITE TABLE npe_test PARTITION(ds='2012-12-11') +PREHOOK: Output: default@npe_test_n0@ds=2012-12-11 +POSTHOOK: query: INSERT OVERWRITE TABLE npe_test_n0 PARTITION(ds='2012-12-11') SELECT src.key, src.value FROM src WHERE key < '200' POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@npe_test@ds=2012-12-11 -POSTHOOK: Lineage: npe_test PARTITION(ds=2012-12-11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: npe_test PARTITION(ds=2012-12-11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE npe_test PARTITION(ds='2012-12-12') +POSTHOOK: Output: default@npe_test_n0@ds=2012-12-11 +POSTHOOK: Lineage: npe_test_n0 PARTITION(ds=2012-12-11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: npe_test_n0 PARTITION(ds=2012-12-11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: INSERT OVERWRITE TABLE npe_test_n0 PARTITION(ds='2012-12-12') SELECT src.key, src.value FROM src WHERE key > '200' PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@npe_test@ds=2012-12-12 -POSTHOOK: query: INSERT OVERWRITE TABLE npe_test PARTITION(ds='2012-12-12') +PREHOOK: Output: default@npe_test_n0@ds=2012-12-12 +POSTHOOK: query: INSERT OVERWRITE TABLE npe_test_n0 PARTITION(ds='2012-12-12') SELECT src.key, src.value FROM src WHERE key > '200' POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@npe_test@ds=2012-12-12 -POSTHOOK: Lineage: npe_test PARTITION(ds=2012-12-12).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: npe_test PARTITION(ds=2012-12-12).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT count(*) FROM npe_test +POSTHOOK: Output: default@npe_test_n0@ds=2012-12-12 +POSTHOOK: Lineage: npe_test_n0 PARTITION(ds=2012-12-12).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: npe_test_n0 PARTITION(ds=2012-12-12).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT count(*) FROM npe_test_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@npe_test +PREHOOK: Input: default@npe_test_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(*) FROM npe_test +POSTHOOK: query: SELECT count(*) FROM npe_test_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@npe_test +POSTHOOK: Input: default@npe_test_n0 #### A masked pattern was here #### 498 -PREHOOK: query: EXPLAIN SELECT * FROM npe_test WHERE ds > 2012-11-31 AND ds < 2012-12-15 +PREHOOK: query: EXPLAIN SELECT * FROM npe_test_n0 WHERE ds > 2012-11-31 AND ds < 2012-12-15 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT * FROM npe_test WHERE ds > 2012-11-31 AND ds < 2012-12-15 +POSTHOOK: query: EXPLAIN SELECT * FROM npe_test_n0 WHERE ds > 2012-11-31 AND ds < 2012-12-15 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -52,7 +52,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: npe_test + alias: npe_test_n0 Statistics: Num rows: 498 Data size: 5290 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((UDFToDouble(ds) < 1985.0D) and (UDFToDouble(ds) > 1970.0D)) (type: boolean) @@ -76,24 +76,24 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT count(*) FROM npe_test WHERE ds > 2012-11-31 AND ds < 2012-12-15 +PREHOOK: query: SELECT count(*) FROM npe_test_n0 WHERE ds > 2012-11-31 AND ds < 2012-12-15 PREHOOK: type: QUERY -PREHOOK: Input: default@npe_test -PREHOOK: Input: default@npe_test@ds=2012-12-11 -PREHOOK: Input: default@npe_test@ds=2012-12-12 +PREHOOK: Input: default@npe_test_n0 +PREHOOK: Input: default@npe_test_n0@ds=2012-12-11 +PREHOOK: Input: default@npe_test_n0@ds=2012-12-12 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(*) FROM npe_test WHERE ds > 2012-11-31 AND ds < 2012-12-15 +POSTHOOK: query: SELECT count(*) FROM npe_test_n0 WHERE ds > 2012-11-31 AND ds < 2012-12-15 POSTHOOK: type: QUERY -POSTHOOK: Input: default@npe_test -POSTHOOK: Input: default@npe_test@ds=2012-12-11 -POSTHOOK: Input: default@npe_test@ds=2012-12-12 +POSTHOOK: Input: default@npe_test_n0 +POSTHOOK: Input: default@npe_test_n0@ds=2012-12-11 +POSTHOOK: Input: default@npe_test_n0@ds=2012-12-12 #### A masked pattern was here #### 0 -PREHOOK: query: DROP TABLE npe_test +PREHOOK: query: DROP TABLE npe_test_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@npe_test -PREHOOK: Output: default@npe_test -POSTHOOK: query: DROP TABLE npe_test +PREHOOK: Input: default@npe_test_n0 +PREHOOK: Output: default@npe_test_n0 +POSTHOOK: query: DROP TABLE npe_test_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@npe_test -POSTHOOK: Output: default@npe_test +POSTHOOK: Input: default@npe_test_n0 +POSTHOOK: Output: default@npe_test_n0 diff --git a/ql/src/test/results/clientpositive/select_unquote_not.q.out b/ql/src/test/results/clientpositive/select_unquote_not.q.out index 8dad00798c..28e7d8ad44 100644 --- a/ql/src/test/results/clientpositive/select_unquote_not.q.out +++ b/ql/src/test/results/clientpositive/select_unquote_not.q.out @@ -1,47 +1,47 @@ -PREHOOK: query: CREATE TABLE npe_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE npe_test_n1 (key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@npe_test -POSTHOOK: query: CREATE TABLE npe_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@npe_test_n1 +POSTHOOK: query: CREATE TABLE npe_test_n1 (key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@npe_test -PREHOOK: query: INSERT OVERWRITE TABLE npe_test PARTITION(ds='2012-12-11') +POSTHOOK: Output: default@npe_test_n1 +PREHOOK: query: INSERT OVERWRITE TABLE npe_test_n1 PARTITION(ds='2012-12-11') SELECT src.key, src.value FROM src WHERE key < '200' PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@npe_test@ds=2012-12-11 -POSTHOOK: query: INSERT OVERWRITE TABLE npe_test PARTITION(ds='2012-12-11') +PREHOOK: Output: default@npe_test_n1@ds=2012-12-11 +POSTHOOK: query: INSERT OVERWRITE TABLE npe_test_n1 PARTITION(ds='2012-12-11') SELECT src.key, src.value FROM src WHERE key < '200' POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@npe_test@ds=2012-12-11 -POSTHOOK: Lineage: npe_test PARTITION(ds=2012-12-11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: npe_test PARTITION(ds=2012-12-11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE npe_test PARTITION(ds='2012-12-12') +POSTHOOK: Output: default@npe_test_n1@ds=2012-12-11 +POSTHOOK: Lineage: npe_test_n1 PARTITION(ds=2012-12-11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: npe_test_n1 PARTITION(ds=2012-12-11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: INSERT OVERWRITE TABLE npe_test_n1 PARTITION(ds='2012-12-12') SELECT src.key, src.value FROM src WHERE key > '200' PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@npe_test@ds=2012-12-12 -POSTHOOK: query: INSERT OVERWRITE TABLE npe_test PARTITION(ds='2012-12-12') +PREHOOK: Output: default@npe_test_n1@ds=2012-12-12 +POSTHOOK: query: INSERT OVERWRITE TABLE npe_test_n1 PARTITION(ds='2012-12-12') SELECT src.key, src.value FROM src WHERE key > '200' POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@npe_test@ds=2012-12-12 -POSTHOOK: Lineage: npe_test PARTITION(ds=2012-12-12).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: npe_test PARTITION(ds=2012-12-12).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT count(*) FROM npe_test +POSTHOOK: Output: default@npe_test_n1@ds=2012-12-12 +POSTHOOK: Lineage: npe_test_n1 PARTITION(ds=2012-12-12).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: npe_test_n1 PARTITION(ds=2012-12-12).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT count(*) FROM npe_test_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@npe_test +PREHOOK: Input: default@npe_test_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(*) FROM npe_test +POSTHOOK: query: SELECT count(*) FROM npe_test_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@npe_test +POSTHOOK: Input: default@npe_test_n1 #### A masked pattern was here #### 498 -PREHOOK: query: EXPLAIN SELECT * FROM npe_test WHERE NOT ds < 2012-11-31 +PREHOOK: query: EXPLAIN SELECT * FROM npe_test_n1 WHERE NOT ds < 2012-11-31 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT * FROM npe_test WHERE NOT ds < 2012-11-31 +POSTHOOK: query: EXPLAIN SELECT * FROM npe_test_n1 WHERE NOT ds < 2012-11-31 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -52,7 +52,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: npe_test + alias: npe_test_n1 Statistics: Num rows: 498 Data size: 5290 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(ds) >= 1970.0D) (type: boolean) @@ -76,24 +76,24 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT count(*) FROM npe_test WHERE NOT ds < 2012-11-31 +PREHOOK: query: SELECT count(*) FROM npe_test_n1 WHERE NOT ds < 2012-11-31 PREHOOK: type: QUERY -PREHOOK: Input: default@npe_test -PREHOOK: Input: default@npe_test@ds=2012-12-11 -PREHOOK: Input: default@npe_test@ds=2012-12-12 +PREHOOK: Input: default@npe_test_n1 +PREHOOK: Input: default@npe_test_n1@ds=2012-12-11 +PREHOOK: Input: default@npe_test_n1@ds=2012-12-12 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(*) FROM npe_test WHERE NOT ds < 2012-11-31 +POSTHOOK: query: SELECT count(*) FROM npe_test_n1 WHERE NOT ds < 2012-11-31 POSTHOOK: type: QUERY -POSTHOOK: Input: default@npe_test -POSTHOOK: Input: default@npe_test@ds=2012-12-11 -POSTHOOK: Input: default@npe_test@ds=2012-12-12 +POSTHOOK: Input: default@npe_test_n1 +POSTHOOK: Input: default@npe_test_n1@ds=2012-12-11 +POSTHOOK: Input: default@npe_test_n1@ds=2012-12-12 #### A masked pattern was here #### 0 -PREHOOK: query: DROP TABLE npe_test +PREHOOK: query: DROP TABLE npe_test_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@npe_test -PREHOOK: Output: default@npe_test -POSTHOOK: query: DROP TABLE npe_test +PREHOOK: Input: default@npe_test_n1 +PREHOOK: Output: default@npe_test_n1 +POSTHOOK: query: DROP TABLE npe_test_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@npe_test -POSTHOOK: Output: default@npe_test +POSTHOOK: Input: default@npe_test_n1 +POSTHOOK: Output: default@npe_test_n1 diff --git a/ql/src/test/results/clientpositive/semijoin2.q.out b/ql/src/test/results/clientpositive/semijoin2.q.out index d62af3eb74..0e659932a0 100644 --- a/ql/src/test/results/clientpositive/semijoin2.q.out +++ b/ql/src/test/results/clientpositive/semijoin2.q.out @@ -1,32 +1,32 @@ -PREHOOK: query: CREATE TABLE table_1 (boolean_col_1 BOOLEAN, float_col_2 FLOAT, bigint_col_3 BIGINT, varchar0111_col_4 VARCHAR(111), bigint_col_5 BIGINT, float_col_6 FLOAT, boolean_col_7 BOOLEAN, decimal0101_col_8 DECIMAL(1, 1), decimal0904_col_9 DECIMAL(9, 4), char0112_col_10 CHAR(112), double_col_11 DOUBLE, boolean_col_12 BOOLEAN, double_col_13 DOUBLE, varchar0142_col_14 VARCHAR(142), timestamp_col_15 TIMESTAMP, decimal0502_col_16 DECIMAL(5, 2), smallint_col_25 SMALLINT, decimal3222_col_18 DECIMAL(32, 22), boolean_col_19 BOOLEAN, decimal2012_col_20 DECIMAL(20, 12), char0204_col_21 CHAR(204), double_col_61 DOUBLE, timestamp_col_23 TIMESTAMP, int_col_24 INT, float_col_25 FLOAT, smallint_col_26 SMALLINT, double_col_27 DOUBLE, char0180_col_28 CHAR(180), decimal1503_col_29 DECIMAL(15, 3), timestamp_col_30 TIMESTAMP, smallint_col_31 SMALLINT, decimal2020_col_32 DECIMAL(20, 20), timestamp_col_33 TIMESTAMP, boolean_col_34 BOOLEAN, decimal3025_col_35 DECIMAL(30, 25), decimal3117_col_36 DECIMAL(31, 17), timestamp_col_37 TIMESTAMP, varchar0146_col_38 VARCHAR(146), boolean_col_39 BOOLEAN, double_col_40 DOUBLE, float_col_41 FLOAT, timestamp_col_42 TIMESTAMP, double_col_43 DOUBLE, boolean_col_44 BOOLEAN, timestamp_col_45 TIMESTAMP, tinyint_col_8 TINYINT, int_col_47 INT, decimal0401_col_48 DECIMAL(4, 1), varchar0064_col_49 VARCHAR(64), string_col_50 STRING, double_col_51 DOUBLE, string_col_52 STRING, boolean_col_53 BOOLEAN, int_col_54 INT, boolean_col_55 BOOLEAN, string_col_56 STRING, double_col_57 DOUBLE, varchar0131_col_58 VARCHAR(131), boolean_col_59 BOOLEAN, bigint_col_22 BIGINT, char0184_col_61 CHAR(184), varchar0173_col_62 VARCHAR(173), timestamp_col_63 TIMESTAMP, decimal1709_col_26 DECIMAL(20, 5), timestamp_col_65 TIMESTAMP, timestamp_col_66 TIMESTAMP, timestamp_col_67 TIMESTAMP, boolean_col_68 BOOLEAN, decimal1208_col_20 DECIMAL(33, 11), decimal1605_col_70 DECIMAL(16, 5), varchar0010_col_71 VARCHAR(10), tinyint_col_72 TINYINT, timestamp_col_10 TIMESTAMP, decimal2714_col_74 DECIMAL(27, 14), double_col_75 DOUBLE, boolean_col_76 BOOLEAN, double_col_77 DOUBLE, string_col_78 STRING, boolean_col_79 BOOLEAN, boolean_col_80 BOOLEAN, decimal0803_col_81 DECIMAL(8, 3), decimal1303_col_82 DECIMAL(13, 3), tinyint_col_83 TINYINT, decimal3424_col_84 DECIMAL(34, 24), float_col_85 FLOAT, boolean_col_86 BOOLEAN, char0233_col_87 CHAR(233)) +PREHOOK: query: CREATE TABLE table_1_n0 (boolean_col_1 BOOLEAN, float_col_2 FLOAT, bigint_col_3 BIGINT, varchar0111_col_4 VARCHAR(111), bigint_col_5 BIGINT, float_col_6 FLOAT, boolean_col_7 BOOLEAN, decimal0101_col_8 DECIMAL(1, 1), decimal0904_col_9 DECIMAL(9, 4), char0112_col_10 CHAR(112), double_col_11 DOUBLE, boolean_col_12 BOOLEAN, double_col_13 DOUBLE, varchar0142_col_14 VARCHAR(142), timestamp_col_15 TIMESTAMP, decimal0502_col_16 DECIMAL(5, 2), smallint_col_25 SMALLINT, decimal3222_col_18 DECIMAL(32, 22), boolean_col_19 BOOLEAN, decimal2012_col_20 DECIMAL(20, 12), char0204_col_21 CHAR(204), double_col_61 DOUBLE, timestamp_col_23 TIMESTAMP, int_col_24 INT, float_col_25 FLOAT, smallint_col_26 SMALLINT, double_col_27 DOUBLE, char0180_col_28 CHAR(180), decimal1503_col_29 DECIMAL(15, 3), timestamp_col_30 TIMESTAMP, smallint_col_31 SMALLINT, decimal2020_col_32 DECIMAL(20, 20), timestamp_col_33 TIMESTAMP, boolean_col_34 BOOLEAN, decimal3025_col_35 DECIMAL(30, 25), decimal3117_col_36 DECIMAL(31, 17), timestamp_col_37 TIMESTAMP, varchar0146_col_38 VARCHAR(146), boolean_col_39 BOOLEAN, double_col_40 DOUBLE, float_col_41 FLOAT, timestamp_col_42 TIMESTAMP, double_col_43 DOUBLE, boolean_col_44 BOOLEAN, timestamp_col_45 TIMESTAMP, tinyint_col_8 TINYINT, int_col_47 INT, decimal0401_col_48 DECIMAL(4, 1), varchar0064_col_49 VARCHAR(64), string_col_50 STRING, double_col_51 DOUBLE, string_col_52 STRING, boolean_col_53 BOOLEAN, int_col_54 INT, boolean_col_55 BOOLEAN, string_col_56 STRING, double_col_57 DOUBLE, varchar0131_col_58 VARCHAR(131), boolean_col_59 BOOLEAN, bigint_col_22 BIGINT, char0184_col_61 CHAR(184), varchar0173_col_62 VARCHAR(173), timestamp_col_63 TIMESTAMP, decimal1709_col_26 DECIMAL(20, 5), timestamp_col_65 TIMESTAMP, timestamp_col_66 TIMESTAMP, timestamp_col_67 TIMESTAMP, boolean_col_68 BOOLEAN, decimal1208_col_20 DECIMAL(33, 11), decimal1605_col_70 DECIMAL(16, 5), varchar0010_col_71 VARCHAR(10), tinyint_col_72 TINYINT, timestamp_col_10 TIMESTAMP, decimal2714_col_74 DECIMAL(27, 14), double_col_75 DOUBLE, boolean_col_76 BOOLEAN, double_col_77 DOUBLE, string_col_78 STRING, boolean_col_79 BOOLEAN, boolean_col_80 BOOLEAN, decimal0803_col_81 DECIMAL(8, 3), decimal1303_col_82 DECIMAL(13, 3), tinyint_col_83 TINYINT, decimal3424_col_84 DECIMAL(34, 24), float_col_85 FLOAT, boolean_col_86 BOOLEAN, char0233_col_87 CHAR(233)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table_1 -POSTHOOK: query: CREATE TABLE table_1 (boolean_col_1 BOOLEAN, float_col_2 FLOAT, bigint_col_3 BIGINT, varchar0111_col_4 VARCHAR(111), bigint_col_5 BIGINT, float_col_6 FLOAT, boolean_col_7 BOOLEAN, decimal0101_col_8 DECIMAL(1, 1), decimal0904_col_9 DECIMAL(9, 4), char0112_col_10 CHAR(112), double_col_11 DOUBLE, boolean_col_12 BOOLEAN, double_col_13 DOUBLE, varchar0142_col_14 VARCHAR(142), timestamp_col_15 TIMESTAMP, decimal0502_col_16 DECIMAL(5, 2), smallint_col_25 SMALLINT, decimal3222_col_18 DECIMAL(32, 22), boolean_col_19 BOOLEAN, decimal2012_col_20 DECIMAL(20, 12), char0204_col_21 CHAR(204), double_col_61 DOUBLE, timestamp_col_23 TIMESTAMP, int_col_24 INT, float_col_25 FLOAT, smallint_col_26 SMALLINT, double_col_27 DOUBLE, char0180_col_28 CHAR(180), decimal1503_col_29 DECIMAL(15, 3), timestamp_col_30 TIMESTAMP, smallint_col_31 SMALLINT, decimal2020_col_32 DECIMAL(20, 20), timestamp_col_33 TIMESTAMP, boolean_col_34 BOOLEAN, decimal3025_col_35 DECIMAL(30, 25), decimal3117_col_36 DECIMAL(31, 17), timestamp_col_37 TIMESTAMP, varchar0146_col_38 VARCHAR(146), boolean_col_39 BOOLEAN, double_col_40 DOUBLE, float_col_41 FLOAT, timestamp_col_42 TIMESTAMP, double_col_43 DOUBLE, boolean_col_44 BOOLEAN, timestamp_col_45 TIMESTAMP, tinyint_col_8 TINYINT, int_col_47 INT, decimal0401_col_48 DECIMAL(4, 1), varchar0064_col_49 VARCHAR(64), string_col_50 STRING, double_col_51 DOUBLE, string_col_52 STRING, boolean_col_53 BOOLEAN, int_col_54 INT, boolean_col_55 BOOLEAN, string_col_56 STRING, double_col_57 DOUBLE, varchar0131_col_58 VARCHAR(131), boolean_col_59 BOOLEAN, bigint_col_22 BIGINT, char0184_col_61 CHAR(184), varchar0173_col_62 VARCHAR(173), timestamp_col_63 TIMESTAMP, decimal1709_col_26 DECIMAL(20, 5), timestamp_col_65 TIMESTAMP, timestamp_col_66 TIMESTAMP, timestamp_col_67 TIMESTAMP, boolean_col_68 BOOLEAN, decimal1208_col_20 DECIMAL(33, 11), decimal1605_col_70 DECIMAL(16, 5), varchar0010_col_71 VARCHAR(10), tinyint_col_72 TINYINT, timestamp_col_10 TIMESTAMP, decimal2714_col_74 DECIMAL(27, 14), double_col_75 DOUBLE, boolean_col_76 BOOLEAN, double_col_77 DOUBLE, string_col_78 STRING, boolean_col_79 BOOLEAN, boolean_col_80 BOOLEAN, decimal0803_col_81 DECIMAL(8, 3), decimal1303_col_82 DECIMAL(13, 3), tinyint_col_83 TINYINT, decimal3424_col_84 DECIMAL(34, 24), float_col_85 FLOAT, boolean_col_86 BOOLEAN, char0233_col_87 CHAR(233)) +PREHOOK: Output: default@table_1_n0 +POSTHOOK: query: CREATE TABLE table_1_n0 (boolean_col_1 BOOLEAN, float_col_2 FLOAT, bigint_col_3 BIGINT, varchar0111_col_4 VARCHAR(111), bigint_col_5 BIGINT, float_col_6 FLOAT, boolean_col_7 BOOLEAN, decimal0101_col_8 DECIMAL(1, 1), decimal0904_col_9 DECIMAL(9, 4), char0112_col_10 CHAR(112), double_col_11 DOUBLE, boolean_col_12 BOOLEAN, double_col_13 DOUBLE, varchar0142_col_14 VARCHAR(142), timestamp_col_15 TIMESTAMP, decimal0502_col_16 DECIMAL(5, 2), smallint_col_25 SMALLINT, decimal3222_col_18 DECIMAL(32, 22), boolean_col_19 BOOLEAN, decimal2012_col_20 DECIMAL(20, 12), char0204_col_21 CHAR(204), double_col_61 DOUBLE, timestamp_col_23 TIMESTAMP, int_col_24 INT, float_col_25 FLOAT, smallint_col_26 SMALLINT, double_col_27 DOUBLE, char0180_col_28 CHAR(180), decimal1503_col_29 DECIMAL(15, 3), timestamp_col_30 TIMESTAMP, smallint_col_31 SMALLINT, decimal2020_col_32 DECIMAL(20, 20), timestamp_col_33 TIMESTAMP, boolean_col_34 BOOLEAN, decimal3025_col_35 DECIMAL(30, 25), decimal3117_col_36 DECIMAL(31, 17), timestamp_col_37 TIMESTAMP, varchar0146_col_38 VARCHAR(146), boolean_col_39 BOOLEAN, double_col_40 DOUBLE, float_col_41 FLOAT, timestamp_col_42 TIMESTAMP, double_col_43 DOUBLE, boolean_col_44 BOOLEAN, timestamp_col_45 TIMESTAMP, tinyint_col_8 TINYINT, int_col_47 INT, decimal0401_col_48 DECIMAL(4, 1), varchar0064_col_49 VARCHAR(64), string_col_50 STRING, double_col_51 DOUBLE, string_col_52 STRING, boolean_col_53 BOOLEAN, int_col_54 INT, boolean_col_55 BOOLEAN, string_col_56 STRING, double_col_57 DOUBLE, varchar0131_col_58 VARCHAR(131), boolean_col_59 BOOLEAN, bigint_col_22 BIGINT, char0184_col_61 CHAR(184), varchar0173_col_62 VARCHAR(173), timestamp_col_63 TIMESTAMP, decimal1709_col_26 DECIMAL(20, 5), timestamp_col_65 TIMESTAMP, timestamp_col_66 TIMESTAMP, timestamp_col_67 TIMESTAMP, boolean_col_68 BOOLEAN, decimal1208_col_20 DECIMAL(33, 11), decimal1605_col_70 DECIMAL(16, 5), varchar0010_col_71 VARCHAR(10), tinyint_col_72 TINYINT, timestamp_col_10 TIMESTAMP, decimal2714_col_74 DECIMAL(27, 14), double_col_75 DOUBLE, boolean_col_76 BOOLEAN, double_col_77 DOUBLE, string_col_78 STRING, boolean_col_79 BOOLEAN, boolean_col_80 BOOLEAN, decimal0803_col_81 DECIMAL(8, 3), decimal1303_col_82 DECIMAL(13, 3), tinyint_col_83 TINYINT, decimal3424_col_84 DECIMAL(34, 24), float_col_85 FLOAT, boolean_col_86 BOOLEAN, char0233_col_87 CHAR(233)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_1 -PREHOOK: query: CREATE TABLE table_18 (timestamp_col_1 TIMESTAMP, double_col_2 DOUBLE, boolean_col_3 BOOLEAN, timestamp_col_4 TIMESTAMP, decimal2103_col_5 DECIMAL(21, 3), char0221_col_6 CHAR(221), tinyint_col_7 TINYINT, float_col_8 FLOAT, int_col_2 INT, timestamp_col_10 TIMESTAMP, char0228_col_11 CHAR(228), timestamp_col_12 TIMESTAMP, double_col_13 DOUBLE, tinyint_col_6 TINYINT, tinyint_col_33 TINYINT, smallint_col_38 SMALLINT, boolean_col_17 BOOLEAN, double_col_18 DOUBLE, boolean_col_19 BOOLEAN, bigint_col_20 BIGINT, decimal0504_col_37 DECIMAL(37, 34), boolean_col_22 BOOLEAN, double_col_23 DOUBLE, timestamp_col_24 TIMESTAMP, varchar0076_col_25 VARCHAR(76), timestamp_col_18 TIMESTAMP, boolean_col_27 BOOLEAN, decimal1611_col_22 DECIMAL(37, 5), boolean_col_29 BOOLEAN) +POSTHOOK: Output: default@table_1_n0 +PREHOOK: query: CREATE TABLE table_18_n0 (timestamp_col_1 TIMESTAMP, double_col_2 DOUBLE, boolean_col_3 BOOLEAN, timestamp_col_4 TIMESTAMP, decimal2103_col_5 DECIMAL(21, 3), char0221_col_6 CHAR(221), tinyint_col_7 TINYINT, float_col_8 FLOAT, int_col_2 INT, timestamp_col_10 TIMESTAMP, char0228_col_11 CHAR(228), timestamp_col_12 TIMESTAMP, double_col_13 DOUBLE, tinyint_col_6 TINYINT, tinyint_col_33 TINYINT, smallint_col_38 SMALLINT, boolean_col_17 BOOLEAN, double_col_18 DOUBLE, boolean_col_19 BOOLEAN, bigint_col_20 BIGINT, decimal0504_col_37 DECIMAL(37, 34), boolean_col_22 BOOLEAN, double_col_23 DOUBLE, timestamp_col_24 TIMESTAMP, varchar0076_col_25 VARCHAR(76), timestamp_col_18 TIMESTAMP, boolean_col_27 BOOLEAN, decimal1611_col_22 DECIMAL(37, 5), boolean_col_29 BOOLEAN) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table_18 -POSTHOOK: query: CREATE TABLE table_18 (timestamp_col_1 TIMESTAMP, double_col_2 DOUBLE, boolean_col_3 BOOLEAN, timestamp_col_4 TIMESTAMP, decimal2103_col_5 DECIMAL(21, 3), char0221_col_6 CHAR(221), tinyint_col_7 TINYINT, float_col_8 FLOAT, int_col_2 INT, timestamp_col_10 TIMESTAMP, char0228_col_11 CHAR(228), timestamp_col_12 TIMESTAMP, double_col_13 DOUBLE, tinyint_col_6 TINYINT, tinyint_col_33 TINYINT, smallint_col_38 SMALLINT, boolean_col_17 BOOLEAN, double_col_18 DOUBLE, boolean_col_19 BOOLEAN, bigint_col_20 BIGINT, decimal0504_col_37 DECIMAL(37, 34), boolean_col_22 BOOLEAN, double_col_23 DOUBLE, timestamp_col_24 TIMESTAMP, varchar0076_col_25 VARCHAR(76), timestamp_col_18 TIMESTAMP, boolean_col_27 BOOLEAN, decimal1611_col_22 DECIMAL(37, 5), boolean_col_29 BOOLEAN) +PREHOOK: Output: default@table_18_n0 +POSTHOOK: query: CREATE TABLE table_18_n0 (timestamp_col_1 TIMESTAMP, double_col_2 DOUBLE, boolean_col_3 BOOLEAN, timestamp_col_4 TIMESTAMP, decimal2103_col_5 DECIMAL(21, 3), char0221_col_6 CHAR(221), tinyint_col_7 TINYINT, float_col_8 FLOAT, int_col_2 INT, timestamp_col_10 TIMESTAMP, char0228_col_11 CHAR(228), timestamp_col_12 TIMESTAMP, double_col_13 DOUBLE, tinyint_col_6 TINYINT, tinyint_col_33 TINYINT, smallint_col_38 SMALLINT, boolean_col_17 BOOLEAN, double_col_18 DOUBLE, boolean_col_19 BOOLEAN, bigint_col_20 BIGINT, decimal0504_col_37 DECIMAL(37, 34), boolean_col_22 BOOLEAN, double_col_23 DOUBLE, timestamp_col_24 TIMESTAMP, varchar0076_col_25 VARCHAR(76), timestamp_col_18 TIMESTAMP, boolean_col_27 BOOLEAN, decimal1611_col_22 DECIMAL(37, 5), boolean_col_29 BOOLEAN) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_18 +POSTHOOK: Output: default@table_18_n0 PREHOOK: query: explain SELECT COALESCE(498, LEAD(COALESCE(-973, -684, 515)) OVER (PARTITION BY (t2.int_col_2 + t1.smallint_col_25) ORDER BY (t2.int_col_2 + t1.smallint_col_25), FLOOR(t1.double_col_61) DESC), 524) AS int_col, (t2.int_col_2) + (t1.smallint_col_25) AS int_col_1, FLOOR(t1.double_col_61) AS float_col, COALESCE(SUM(COALESCE(62, -380, -435)) OVER (PARTITION BY (t2.int_col_2 + t1.smallint_col_25) ORDER BY (t2.int_col_2 + t1.smallint_col_25) DESC, FLOOR(t1.double_col_61) DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 48 FOLLOWING), 704) AS int_col_2 -FROM table_1 t1 -INNER JOIN table_18 t2 ON (((t2.tinyint_col_6) = (t1.bigint_col_22)) AND ((t2.decimal0504_col_37) = (t1.decimal1709_col_26))) AND ((t2.tinyint_col_33) = (t1.tinyint_col_8)) +FROM table_1_n0 t1 +INNER JOIN table_18_n0 t2 ON (((t2.tinyint_col_6) = (t1.bigint_col_22)) AND ((t2.decimal0504_col_37) = (t1.decimal1709_col_26))) AND ((t2.tinyint_col_33) = (t1.tinyint_col_8)) WHERE (t2.smallint_col_38) IN (SELECT COALESCE(-92, -994) AS int_col -FROM table_1 tt1 -INNER JOIN table_18 tt2 ON (tt2.decimal1611_col_22) = (tt1.decimal1208_col_20) +FROM table_1_n0 tt1 +INNER JOIN table_18_n0 tt2 ON (tt2.decimal1611_col_22) = (tt1.decimal1208_col_20) WHERE (t1.timestamp_col_10) = (tt2.timestamp_col_18)) PREHOOK: type: QUERY @@ -36,13 +36,13 @@ COALESCE(498, LEAD(COALESCE(-973, -684, 515)) OVER (PARTITION BY (t2.int_col_2 + (t2.int_col_2) + (t1.smallint_col_25) AS int_col_1, FLOOR(t1.double_col_61) AS float_col, COALESCE(SUM(COALESCE(62, -380, -435)) OVER (PARTITION BY (t2.int_col_2 + t1.smallint_col_25) ORDER BY (t2.int_col_2 + t1.smallint_col_25) DESC, FLOOR(t1.double_col_61) DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 48 FOLLOWING), 704) AS int_col_2 -FROM table_1 t1 -INNER JOIN table_18 t2 ON (((t2.tinyint_col_6) = (t1.bigint_col_22)) AND ((t2.decimal0504_col_37) = (t1.decimal1709_col_26))) AND ((t2.tinyint_col_33) = (t1.tinyint_col_8)) +FROM table_1_n0 t1 +INNER JOIN table_18_n0 t2 ON (((t2.tinyint_col_6) = (t1.bigint_col_22)) AND ((t2.decimal0504_col_37) = (t1.decimal1709_col_26))) AND ((t2.tinyint_col_33) = (t1.tinyint_col_8)) WHERE (t2.smallint_col_38) IN (SELECT COALESCE(-92, -994) AS int_col -FROM table_1 tt1 -INNER JOIN table_18 tt2 ON (tt2.decimal1611_col_22) = (tt1.decimal1208_col_20) +FROM table_1_n0 tt1 +INNER JOIN table_18_n0 tt2 ON (tt2.decimal1611_col_22) = (tt1.decimal1208_col_20) WHERE (t1.timestamp_col_10) = (tt2.timestamp_col_18)) POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/semijoin3.q.out b/ql/src/test/results/clientpositive/semijoin3.q.out index 5b346fba0f..cbe5f02aab 100644 --- a/ql/src/test/results/clientpositive/semijoin3.q.out +++ b/ql/src/test/results/clientpositive/semijoin3.q.out @@ -1,49 +1,49 @@ -PREHOOK: query: create table t1 as select cast(key as int) key, value from src +PREHOOK: query: create table t1_n21 as select cast(key as int) key, value from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 as select cast(key as int) key, value from src +PREHOOK: Output: default@t1_n21 +POSTHOOK: query: create table t1_n21 as select cast(key as int) key, value from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table t2 as select cast(key as int) key, value from src +POSTHOOK: Output: default@t1_n21 +POSTHOOK: Lineage: t1_n21.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n21.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table t2_n12 as select cast(key as int) key, value from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2 as select cast(key as int) key, value from src +PREHOOK: Output: default@t2_n12 +POSTHOOK: query: create table t2_n12 as select cast(key as int) key, value from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@t2_n12 +POSTHOOK: Lineage: t2_n12.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2_n12.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain select count(1) from (select key - from t1 - where key = 0) t1 + from t1_n21 + where key = 0) t1_n21 left semi join (select key - from t2 - where key = 0) t2 + from t2_n12 + where key = 0) t2_n12 on 1 = 1 PREHOOK: type: QUERY POSTHOOK: query: explain select count(1) from (select key - from t1 - where key = 0) t1 + from t1_n21 + where key = 0) t1_n21 left semi join (select key - from t2 - where key = 0) t2 + from t2_n12 + where key = 0) t2_n12 on 1 = 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -56,7 +56,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n21 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key = 0) (type: boolean) @@ -69,7 +69,7 @@ STAGE PLANS: Map-reduce partition columns: 1 (type: int) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE TableScan - alias: t2 + alias: t2_n12 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key = 0) (type: boolean) @@ -133,29 +133,29 @@ STAGE PLANS: PREHOOK: query: select count(1) from (select key - from t1 - where key = 0) t1 + from t1_n21 + where key = 0) t1_n21 left semi join (select key - from t2 - where key = 0) t2 + from t2_n12 + where key = 0) t2_n12 on 1 = 1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n21 +PREHOOK: Input: default@t2_n12 #### A masked pattern was here #### POSTHOOK: query: select count(1) from (select key - from t1 - where key = 0) t1 + from t1_n21 + where key = 0) t1_n21 left semi join (select key - from t2 - where key = 0) t2 + from t2_n12 + where key = 0) t2_n12 on 1 = 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n21 +POSTHOOK: Input: default@t2_n12 #### A masked pattern was here #### 3 diff --git a/ql/src/test/results/clientpositive/semijoin5.q.out b/ql/src/test/results/clientpositive/semijoin5.q.out index fd2fb089ae..8c94715e70 100644 --- a/ql/src/test/results/clientpositive/semijoin5.q.out +++ b/ql/src/test/results/clientpositive/semijoin5.q.out @@ -1,33 +1,33 @@ -PREHOOK: query: CREATE TABLE table_1 (timestamp_col_1 TIMESTAMP, decimal3003_col_2 DECIMAL(30, 3), tinyint_col_3 TINYINT, decimal0101_col_4 DECIMAL(1, 1), boolean_col_5 BOOLEAN, float_col_6 FLOAT, bigint_col_7 BIGINT, varchar0098_col_8 VARCHAR(98), timestamp_col_9 TIMESTAMP, bigint_col_10 BIGINT, decimal0903_col_11 DECIMAL(9, 3), timestamp_col_12 TIMESTAMP, timestamp_col_13 TIMESTAMP, float_col_14 FLOAT, char0254_col_15 CHAR(254), double_col_16 DOUBLE, timestamp_col_17 TIMESTAMP, boolean_col_18 BOOLEAN, decimal2608_col_19 DECIMAL(26, 8), varchar0216_col_20 VARCHAR(216), string_col_21 STRING, bigint_col_22 BIGINT, boolean_col_23 BOOLEAN, timestamp_col_24 TIMESTAMP, boolean_col_25 BOOLEAN, decimal2016_col_26 DECIMAL(20, 16), string_col_27 STRING, decimal0202_col_28 DECIMAL(2, 2), float_col_29 FLOAT, decimal2020_col_30 DECIMAL(20, 20), boolean_col_31 BOOLEAN, double_col_32 DOUBLE, varchar0148_col_33 VARCHAR(148), decimal2121_col_34 DECIMAL(21, 21), tinyint_col_35 TINYINT, boolean_col_36 BOOLEAN, boolean_col_37 BOOLEAN, string_col_38 STRING, decimal3420_col_39 DECIMAL(34, 20), timestamp_col_40 TIMESTAMP, decimal1408_col_41 DECIMAL(14, 8), string_col_42 STRING, decimal0902_col_43 DECIMAL(9, 2), varchar0204_col_44 VARCHAR(204), boolean_col_45 BOOLEAN, timestamp_col_46 TIMESTAMP, boolean_col_47 BOOLEAN, bigint_col_48 BIGINT, boolean_col_49 BOOLEAN, smallint_col_50 SMALLINT, decimal0704_col_51 DECIMAL(7, 4), timestamp_col_52 TIMESTAMP, boolean_col_53 BOOLEAN, timestamp_col_54 TIMESTAMP, int_col_55 INT, decimal0505_col_56 DECIMAL(5, 5), char0155_col_57 CHAR(155), boolean_col_58 BOOLEAN, bigint_col_59 BIGINT, boolean_col_60 BOOLEAN, boolean_col_61 BOOLEAN, char0249_col_62 CHAR(249), boolean_col_63 BOOLEAN, timestamp_col_64 TIMESTAMP, decimal1309_col_65 DECIMAL(13, 9), int_col_66 INT, float_col_67 FLOAT, timestamp_col_68 TIMESTAMP, timestamp_col_69 TIMESTAMP, boolean_col_70 BOOLEAN, timestamp_col_71 TIMESTAMP, double_col_72 DOUBLE, boolean_col_73 BOOLEAN, char0222_col_74 CHAR(222), float_col_75 FLOAT, string_col_76 STRING, decimal2612_col_77 DECIMAL(26, 12), timestamp_col_78 TIMESTAMP, char0128_col_79 CHAR(128), timestamp_col_80 TIMESTAMP, double_col_81 DOUBLE, timestamp_col_82 TIMESTAMP, float_col_83 FLOAT, decimal2622_col_84 DECIMAL(26, 22), double_col_85 DOUBLE, float_col_86 FLOAT, decimal0907_col_87 DECIMAL(9, 7)) STORED AS orc +PREHOOK: query: CREATE TABLE table_1_n1 (timestamp_col_1 TIMESTAMP, decimal3003_col_2 DECIMAL(30, 3), tinyint_col_3 TINYINT, decimal0101_col_4 DECIMAL(1, 1), boolean_col_5 BOOLEAN, float_col_6 FLOAT, bigint_col_7 BIGINT, varchar0098_col_8 VARCHAR(98), timestamp_col_9 TIMESTAMP, bigint_col_10 BIGINT, decimal0903_col_11 DECIMAL(9, 3), timestamp_col_12 TIMESTAMP, timestamp_col_13 TIMESTAMP, float_col_14 FLOAT, char0254_col_15 CHAR(254), double_col_16 DOUBLE, timestamp_col_17 TIMESTAMP, boolean_col_18 BOOLEAN, decimal2608_col_19 DECIMAL(26, 8), varchar0216_col_20 VARCHAR(216), string_col_21 STRING, bigint_col_22 BIGINT, boolean_col_23 BOOLEAN, timestamp_col_24 TIMESTAMP, boolean_col_25 BOOLEAN, decimal2016_col_26 DECIMAL(20, 16), string_col_27 STRING, decimal0202_col_28 DECIMAL(2, 2), float_col_29 FLOAT, decimal2020_col_30 DECIMAL(20, 20), boolean_col_31 BOOLEAN, double_col_32 DOUBLE, varchar0148_col_33 VARCHAR(148), decimal2121_col_34 DECIMAL(21, 21), tinyint_col_35 TINYINT, boolean_col_36 BOOLEAN, boolean_col_37 BOOLEAN, string_col_38 STRING, decimal3420_col_39 DECIMAL(34, 20), timestamp_col_40 TIMESTAMP, decimal1408_col_41 DECIMAL(14, 8), string_col_42 STRING, decimal0902_col_43 DECIMAL(9, 2), varchar0204_col_44 VARCHAR(204), boolean_col_45 BOOLEAN, timestamp_col_46 TIMESTAMP, boolean_col_47 BOOLEAN, bigint_col_48 BIGINT, boolean_col_49 BOOLEAN, smallint_col_50 SMALLINT, decimal0704_col_51 DECIMAL(7, 4), timestamp_col_52 TIMESTAMP, boolean_col_53 BOOLEAN, timestamp_col_54 TIMESTAMP, int_col_55 INT, decimal0505_col_56 DECIMAL(5, 5), char0155_col_57 CHAR(155), boolean_col_58 BOOLEAN, bigint_col_59 BIGINT, boolean_col_60 BOOLEAN, boolean_col_61 BOOLEAN, char0249_col_62 CHAR(249), boolean_col_63 BOOLEAN, timestamp_col_64 TIMESTAMP, decimal1309_col_65 DECIMAL(13, 9), int_col_66 INT, float_col_67 FLOAT, timestamp_col_68 TIMESTAMP, timestamp_col_69 TIMESTAMP, boolean_col_70 BOOLEAN, timestamp_col_71 TIMESTAMP, double_col_72 DOUBLE, boolean_col_73 BOOLEAN, char0222_col_74 CHAR(222), float_col_75 FLOAT, string_col_76 STRING, decimal2612_col_77 DECIMAL(26, 12), timestamp_col_78 TIMESTAMP, char0128_col_79 CHAR(128), timestamp_col_80 TIMESTAMP, double_col_81 DOUBLE, timestamp_col_82 TIMESTAMP, float_col_83 FLOAT, decimal2622_col_84 DECIMAL(26, 22), double_col_85 DOUBLE, float_col_86 FLOAT, decimal0907_col_87 DECIMAL(9, 7)) STORED AS orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table_1 -POSTHOOK: query: CREATE TABLE table_1 (timestamp_col_1 TIMESTAMP, decimal3003_col_2 DECIMAL(30, 3), tinyint_col_3 TINYINT, decimal0101_col_4 DECIMAL(1, 1), boolean_col_5 BOOLEAN, float_col_6 FLOAT, bigint_col_7 BIGINT, varchar0098_col_8 VARCHAR(98), timestamp_col_9 TIMESTAMP, bigint_col_10 BIGINT, decimal0903_col_11 DECIMAL(9, 3), timestamp_col_12 TIMESTAMP, timestamp_col_13 TIMESTAMP, float_col_14 FLOAT, char0254_col_15 CHAR(254), double_col_16 DOUBLE, timestamp_col_17 TIMESTAMP, boolean_col_18 BOOLEAN, decimal2608_col_19 DECIMAL(26, 8), varchar0216_col_20 VARCHAR(216), string_col_21 STRING, bigint_col_22 BIGINT, boolean_col_23 BOOLEAN, timestamp_col_24 TIMESTAMP, boolean_col_25 BOOLEAN, decimal2016_col_26 DECIMAL(20, 16), string_col_27 STRING, decimal0202_col_28 DECIMAL(2, 2), float_col_29 FLOAT, decimal2020_col_30 DECIMAL(20, 20), boolean_col_31 BOOLEAN, double_col_32 DOUBLE, varchar0148_col_33 VARCHAR(148), decimal2121_col_34 DECIMAL(21, 21), tinyint_col_35 TINYINT, boolean_col_36 BOOLEAN, boolean_col_37 BOOLEAN, string_col_38 STRING, decimal3420_col_39 DECIMAL(34, 20), timestamp_col_40 TIMESTAMP, decimal1408_col_41 DECIMAL(14, 8), string_col_42 STRING, decimal0902_col_43 DECIMAL(9, 2), varchar0204_col_44 VARCHAR(204), boolean_col_45 BOOLEAN, timestamp_col_46 TIMESTAMP, boolean_col_47 BOOLEAN, bigint_col_48 BIGINT, boolean_col_49 BOOLEAN, smallint_col_50 SMALLINT, decimal0704_col_51 DECIMAL(7, 4), timestamp_col_52 TIMESTAMP, boolean_col_53 BOOLEAN, timestamp_col_54 TIMESTAMP, int_col_55 INT, decimal0505_col_56 DECIMAL(5, 5), char0155_col_57 CHAR(155), boolean_col_58 BOOLEAN, bigint_col_59 BIGINT, boolean_col_60 BOOLEAN, boolean_col_61 BOOLEAN, char0249_col_62 CHAR(249), boolean_col_63 BOOLEAN, timestamp_col_64 TIMESTAMP, decimal1309_col_65 DECIMAL(13, 9), int_col_66 INT, float_col_67 FLOAT, timestamp_col_68 TIMESTAMP, timestamp_col_69 TIMESTAMP, boolean_col_70 BOOLEAN, timestamp_col_71 TIMESTAMP, double_col_72 DOUBLE, boolean_col_73 BOOLEAN, char0222_col_74 CHAR(222), float_col_75 FLOAT, string_col_76 STRING, decimal2612_col_77 DECIMAL(26, 12), timestamp_col_78 TIMESTAMP, char0128_col_79 CHAR(128), timestamp_col_80 TIMESTAMP, double_col_81 DOUBLE, timestamp_col_82 TIMESTAMP, float_col_83 FLOAT, decimal2622_col_84 DECIMAL(26, 22), double_col_85 DOUBLE, float_col_86 FLOAT, decimal0907_col_87 DECIMAL(9, 7)) STORED AS orc +PREHOOK: Output: default@table_1_n1 +POSTHOOK: query: CREATE TABLE table_1_n1 (timestamp_col_1 TIMESTAMP, decimal3003_col_2 DECIMAL(30, 3), tinyint_col_3 TINYINT, decimal0101_col_4 DECIMAL(1, 1), boolean_col_5 BOOLEAN, float_col_6 FLOAT, bigint_col_7 BIGINT, varchar0098_col_8 VARCHAR(98), timestamp_col_9 TIMESTAMP, bigint_col_10 BIGINT, decimal0903_col_11 DECIMAL(9, 3), timestamp_col_12 TIMESTAMP, timestamp_col_13 TIMESTAMP, float_col_14 FLOAT, char0254_col_15 CHAR(254), double_col_16 DOUBLE, timestamp_col_17 TIMESTAMP, boolean_col_18 BOOLEAN, decimal2608_col_19 DECIMAL(26, 8), varchar0216_col_20 VARCHAR(216), string_col_21 STRING, bigint_col_22 BIGINT, boolean_col_23 BOOLEAN, timestamp_col_24 TIMESTAMP, boolean_col_25 BOOLEAN, decimal2016_col_26 DECIMAL(20, 16), string_col_27 STRING, decimal0202_col_28 DECIMAL(2, 2), float_col_29 FLOAT, decimal2020_col_30 DECIMAL(20, 20), boolean_col_31 BOOLEAN, double_col_32 DOUBLE, varchar0148_col_33 VARCHAR(148), decimal2121_col_34 DECIMAL(21, 21), tinyint_col_35 TINYINT, boolean_col_36 BOOLEAN, boolean_col_37 BOOLEAN, string_col_38 STRING, decimal3420_col_39 DECIMAL(34, 20), timestamp_col_40 TIMESTAMP, decimal1408_col_41 DECIMAL(14, 8), string_col_42 STRING, decimal0902_col_43 DECIMAL(9, 2), varchar0204_col_44 VARCHAR(204), boolean_col_45 BOOLEAN, timestamp_col_46 TIMESTAMP, boolean_col_47 BOOLEAN, bigint_col_48 BIGINT, boolean_col_49 BOOLEAN, smallint_col_50 SMALLINT, decimal0704_col_51 DECIMAL(7, 4), timestamp_col_52 TIMESTAMP, boolean_col_53 BOOLEAN, timestamp_col_54 TIMESTAMP, int_col_55 INT, decimal0505_col_56 DECIMAL(5, 5), char0155_col_57 CHAR(155), boolean_col_58 BOOLEAN, bigint_col_59 BIGINT, boolean_col_60 BOOLEAN, boolean_col_61 BOOLEAN, char0249_col_62 CHAR(249), boolean_col_63 BOOLEAN, timestamp_col_64 TIMESTAMP, decimal1309_col_65 DECIMAL(13, 9), int_col_66 INT, float_col_67 FLOAT, timestamp_col_68 TIMESTAMP, timestamp_col_69 TIMESTAMP, boolean_col_70 BOOLEAN, timestamp_col_71 TIMESTAMP, double_col_72 DOUBLE, boolean_col_73 BOOLEAN, char0222_col_74 CHAR(222), float_col_75 FLOAT, string_col_76 STRING, decimal2612_col_77 DECIMAL(26, 12), timestamp_col_78 TIMESTAMP, char0128_col_79 CHAR(128), timestamp_col_80 TIMESTAMP, double_col_81 DOUBLE, timestamp_col_82 TIMESTAMP, float_col_83 FLOAT, decimal2622_col_84 DECIMAL(26, 22), double_col_85 DOUBLE, float_col_86 FLOAT, decimal0907_col_87 DECIMAL(9, 7)) STORED AS orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_1 -PREHOOK: query: CREATE TABLE table_18 (boolean_col_1 BOOLEAN, boolean_col_2 BOOLEAN, decimal2518_col_3 DECIMAL(25, 18), float_col_4 FLOAT, timestamp_col_5 TIMESTAMP, double_col_6 DOUBLE, double_col_7 DOUBLE, char0035_col_8 CHAR(35), decimal2709_col_9 DECIMAL(27, 9), int_col_10 INT, timestamp_col_11 TIMESTAMP, decimal3604_col_12 DECIMAL(36, 4), string_col_13 STRING, int_col_14 INT, tinyint_col_15 TINYINT, decimal1911_col_16 DECIMAL(19, 11), float_col_17 FLOAT, timestamp_col_18 TIMESTAMP, smallint_col_19 SMALLINT, tinyint_col_20 TINYINT, timestamp_col_21 TIMESTAMP, boolean_col_22 BOOLEAN, int_col_23 INT) STORED AS orc +POSTHOOK: Output: default@table_1_n1 +PREHOOK: query: CREATE TABLE table_18_n1 (boolean_col_1 BOOLEAN, boolean_col_2 BOOLEAN, decimal2518_col_3 DECIMAL(25, 18), float_col_4 FLOAT, timestamp_col_5 TIMESTAMP, double_col_6 DOUBLE, double_col_7 DOUBLE, char0035_col_8 CHAR(35), decimal2709_col_9 DECIMAL(27, 9), int_col_10 INT, timestamp_col_11 TIMESTAMP, decimal3604_col_12 DECIMAL(36, 4), string_col_13 STRING, int_col_14 INT, tinyint_col_15 TINYINT, decimal1911_col_16 DECIMAL(19, 11), float_col_17 FLOAT, timestamp_col_18 TIMESTAMP, smallint_col_19 SMALLINT, tinyint_col_20 TINYINT, timestamp_col_21 TIMESTAMP, boolean_col_22 BOOLEAN, int_col_23 INT) STORED AS orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table_18 -POSTHOOK: query: CREATE TABLE table_18 (boolean_col_1 BOOLEAN, boolean_col_2 BOOLEAN, decimal2518_col_3 DECIMAL(25, 18), float_col_4 FLOAT, timestamp_col_5 TIMESTAMP, double_col_6 DOUBLE, double_col_7 DOUBLE, char0035_col_8 CHAR(35), decimal2709_col_9 DECIMAL(27, 9), int_col_10 INT, timestamp_col_11 TIMESTAMP, decimal3604_col_12 DECIMAL(36, 4), string_col_13 STRING, int_col_14 INT, tinyint_col_15 TINYINT, decimal1911_col_16 DECIMAL(19, 11), float_col_17 FLOAT, timestamp_col_18 TIMESTAMP, smallint_col_19 SMALLINT, tinyint_col_20 TINYINT, timestamp_col_21 TIMESTAMP, boolean_col_22 BOOLEAN, int_col_23 INT) STORED AS orc +PREHOOK: Output: default@table_18_n1 +POSTHOOK: query: CREATE TABLE table_18_n1 (boolean_col_1 BOOLEAN, boolean_col_2 BOOLEAN, decimal2518_col_3 DECIMAL(25, 18), float_col_4 FLOAT, timestamp_col_5 TIMESTAMP, double_col_6 DOUBLE, double_col_7 DOUBLE, char0035_col_8 CHAR(35), decimal2709_col_9 DECIMAL(27, 9), int_col_10 INT, timestamp_col_11 TIMESTAMP, decimal3604_col_12 DECIMAL(36, 4), string_col_13 STRING, int_col_14 INT, tinyint_col_15 TINYINT, decimal1911_col_16 DECIMAL(19, 11), float_col_17 FLOAT, timestamp_col_18 TIMESTAMP, smallint_col_19 SMALLINT, tinyint_col_20 TINYINT, timestamp_col_21 TIMESTAMP, boolean_col_22 BOOLEAN, int_col_23 INT) STORED AS orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_18 +POSTHOOK: Output: default@table_18_n1 PREHOOK: query: explain SELECT COALESCE(498, LEAD(COALESCE(-973, -684, 515)) OVER (PARTITION BY (t2.int_col_10 + t1.smallint_col_50) ORDER BY (t2.int_col_10 + t1.smallint_col_50), FLOOR(t1.double_col_16) DESC), 524) AS int_col, (t2.int_col_10) + (t1.smallint_col_50) AS int_col_1, FLOOR(t1.double_col_16) AS float_col, COALESCE(SUM(COALESCE(62, -380, -435)) OVER (PARTITION BY (t2.int_col_10 + t1.smallint_col_50) ORDER BY (t2.int_col_10 + t1.smallint_col_50) DESC, FLOOR(t1.double_col_16) DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 48 FOLLOWING), 704) AS int_col_2 -FROM table_1 t1 -INNER JOIN table_18 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND +FROM table_1_n1 t1 +INNER JOIN table_18_n1 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND ((t2.decimal2709_col_9) = (t1.decimal2016_col_26))) AND ((t2.tinyint_col_20) = (t1.tinyint_col_3)) WHERE (t2.smallint_col_19) IN (SELECT COALESCE(-92, -994) AS int_col - FROM table_1 tt1 - INNER JOIN table_18 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) + FROM table_1_n1 tt1 + INNER JOIN table_18_n1 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) WHERE (t1.timestamp_col_9) = (tt2.timestamp_col_18)) PREHOOK: type: QUERY POSTHOOK: query: explain @@ -36,14 +36,14 @@ SELECT (t2.int_col_10) + (t1.smallint_col_50) AS int_col_1, FLOOR(t1.double_col_16) AS float_col, COALESCE(SUM(COALESCE(62, -380, -435)) OVER (PARTITION BY (t2.int_col_10 + t1.smallint_col_50) ORDER BY (t2.int_col_10 + t1.smallint_col_50) DESC, FLOOR(t1.double_col_16) DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 48 FOLLOWING), 704) AS int_col_2 -FROM table_1 t1 -INNER JOIN table_18 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND +FROM table_1_n1 t1 +INNER JOIN table_18_n1 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND ((t2.decimal2709_col_9) = (t1.decimal2016_col_26))) AND ((t2.tinyint_col_20) = (t1.tinyint_col_3)) WHERE (t2.smallint_col_19) IN (SELECT COALESCE(-92, -994) AS int_col - FROM table_1 tt1 - INNER JOIN table_18 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) + FROM table_1_n1 tt1 + INNER JOIN table_18_n1 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) WHERE (t1.timestamp_col_9) = (tt2.timestamp_col_18)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -303,34 +303,34 @@ PREHOOK: query: SELECT (t2.int_col_10) + (t1.smallint_col_50) AS int_col_1, FLOOR(t1.double_col_16) AS float_col, COALESCE(SUM(COALESCE(62, -380, -435)) OVER (PARTITION BY (t2.int_col_10 + t1.smallint_col_50) ORDER BY (t2.int_col_10 + t1.smallint_col_50) DESC, FLOOR(t1.double_col_16) DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 48 FOLLOWING), 704) AS int_col_2 -FROM table_1 t1 -INNER JOIN table_18 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND +FROM table_1_n1 t1 +INNER JOIN table_18_n1 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND ((t2.decimal2709_col_9) = (t1.decimal2016_col_26))) AND ((t2.tinyint_col_20) = (t1.tinyint_col_3)) WHERE (t2.smallint_col_19) IN (SELECT COALESCE(-92, -994) AS int_col - FROM table_1 tt1 - INNER JOIN table_18 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) + FROM table_1_n1 tt1 + INNER JOIN table_18_n1 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) WHERE (t1.timestamp_col_9) = (tt2.timestamp_col_18)) PREHOOK: type: QUERY -PREHOOK: Input: default@table_1 -PREHOOK: Input: default@table_18 +PREHOOK: Input: default@table_18_n1 +PREHOOK: Input: default@table_1_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT COALESCE(498, LEAD(COALESCE(-973, -684, 515)) OVER (PARTITION BY (t2.int_col_10 + t1.smallint_col_50) ORDER BY (t2.int_col_10 + t1.smallint_col_50), FLOOR(t1.double_col_16) DESC), 524) AS int_col, (t2.int_col_10) + (t1.smallint_col_50) AS int_col_1, FLOOR(t1.double_col_16) AS float_col, COALESCE(SUM(COALESCE(62, -380, -435)) OVER (PARTITION BY (t2.int_col_10 + t1.smallint_col_50) ORDER BY (t2.int_col_10 + t1.smallint_col_50) DESC, FLOOR(t1.double_col_16) DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 48 FOLLOWING), 704) AS int_col_2 -FROM table_1 t1 -INNER JOIN table_18 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND +FROM table_1_n1 t1 +INNER JOIN table_18_n1 t2 ON (((t2.tinyint_col_15) = (t1.bigint_col_7)) AND ((t2.decimal2709_col_9) = (t1.decimal2016_col_26))) AND ((t2.tinyint_col_20) = (t1.tinyint_col_3)) WHERE (t2.smallint_col_19) IN (SELECT COALESCE(-92, -994) AS int_col - FROM table_1 tt1 - INNER JOIN table_18 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) + FROM table_1_n1 tt1 + INNER JOIN table_18_n1 tt2 ON (tt2.decimal1911_col_16) = (tt1.decimal2612_col_77) WHERE (t1.timestamp_col_9) = (tt2.timestamp_col_18)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@table_1 -POSTHOOK: Input: default@table_18 +POSTHOOK: Input: default@table_18_n1 +POSTHOOK: Input: default@table_1_n1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/setop_no_distinct.q.out b/ql/src/test/results/clientpositive/setop_no_distinct.q.out index e759245cd1..31ca7d4338 100644 --- a/ql/src/test/results/clientpositive/setop_no_distinct.q.out +++ b/ql/src/test/results/clientpositive/setop_no_distinct.q.out @@ -1,196 +1,196 @@ -PREHOOK: query: create table a(key int, value int) +PREHOOK: query: create table a_n1(key int, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@a -POSTHOOK: query: create table a(key int, value int) +PREHOOK: Output: default@a_n1 +POSTHOOK: query: create table a_n1(key int, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@a -PREHOOK: query: insert into table a values (1,2),(1,2),(1,2),(1,3),(2,3) +POSTHOOK: Output: default@a_n1 +PREHOOK: query: insert into table a_n1 values (1,2),(1,2),(1,2),(1,3),(2,3) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@a -POSTHOOK: query: insert into table a values (1,2),(1,2),(1,2),(1,3),(2,3) +PREHOOK: Output: default@a_n1 +POSTHOOK: query: insert into table a_n1 values (1,2),(1,2),(1,2),(1,3),(2,3) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@a -POSTHOOK: Lineage: a.key SCRIPT [] -POSTHOOK: Lineage: a.value SCRIPT [] -PREHOOK: query: create table b(key int, value int) +POSTHOOK: Output: default@a_n1 +POSTHOOK: Lineage: a_n1.key SCRIPT [] +POSTHOOK: Lineage: a_n1.value SCRIPT [] +PREHOOK: query: create table b_n1(key int, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@b -POSTHOOK: query: create table b(key int, value int) +PREHOOK: Output: default@b_n1 +POSTHOOK: query: create table b_n1(key int, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@b -PREHOOK: query: insert into table b values (1,2),(1,2),(2,3) +POSTHOOK: Output: default@b_n1 +PREHOOK: query: insert into table b_n1 values (1,2),(1,2),(2,3) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@b -POSTHOOK: query: insert into table b values (1,2),(1,2),(2,3) +PREHOOK: Output: default@b_n1 +POSTHOOK: query: insert into table b_n1 values (1,2),(1,2),(2,3) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@b -POSTHOOK: Lineage: b.key SCRIPT [] -POSTHOOK: Lineage: b.value SCRIPT [] -PREHOOK: query: select * from a intersect select * from b +POSTHOOK: Output: default@b_n1 +POSTHOOK: Lineage: b_n1.key SCRIPT [] +POSTHOOK: Lineage: b_n1.value SCRIPT [] +PREHOOK: query: select * from a_n1 intersect select * from b_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n1 +PREHOOK: Input: default@b_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from a intersect select * from b +POSTHOOK: query: select * from a_n1 intersect select * from b_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n1 +POSTHOOK: Input: default@b_n1 #### A masked pattern was here #### 1 2 2 3 -PREHOOK: query: (select * from b intersect (select * from a)) intersect select * from b +PREHOOK: query: (select * from b_n1 intersect (select * from a_n1)) intersect select * from b_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n1 +PREHOOK: Input: default@b_n1 #### A masked pattern was here #### -POSTHOOK: query: (select * from b intersect (select * from a)) intersect select * from b +POSTHOOK: query: (select * from b_n1 intersect (select * from a_n1)) intersect select * from b_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n1 +POSTHOOK: Input: default@b_n1 #### A masked pattern was here #### 1 2 2 3 -PREHOOK: query: select * from b intersect all select * from a intersect select * from b +PREHOOK: query: select * from b_n1 intersect all select * from a_n1 intersect select * from b_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n1 +PREHOOK: Input: default@b_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from b intersect all select * from a intersect select * from b +POSTHOOK: query: select * from b_n1 intersect all select * from a_n1 intersect select * from b_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n1 +POSTHOOK: Input: default@b_n1 #### A masked pattern was here #### 1 2 2 3 -PREHOOK: query: (select * from b) intersect all ((select * from a) intersect select * from b) +PREHOOK: query: (select * from b_n1) intersect all ((select * from a_n1) intersect select * from b_n1) PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n1 +PREHOOK: Input: default@b_n1 #### A masked pattern was here #### -POSTHOOK: query: (select * from b) intersect all ((select * from a) intersect select * from b) +POSTHOOK: query: (select * from b_n1) intersect all ((select * from a_n1) intersect select * from b_n1) POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n1 +POSTHOOK: Input: default@b_n1 #### A masked pattern was here #### 1 2 2 3 -PREHOOK: query: select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +PREHOOK: query: select * from (select a_n1.key, b_n1.value from a_n1 join b_n1 on a_n1.key=b_n1.key)sub1 intersect -select * from (select a.key, b.value from a join b on a.key=b.key)sub2 +select * from (select a_n1.key, b_n1.value from a_n1 join b_n1 on a_n1.key=b_n1.key)sub2 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n1 +PREHOOK: Input: default@b_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from (select a.key, b.value from a join b on a.key=b.key)sub1 +POSTHOOK: query: select * from (select a_n1.key, b_n1.value from a_n1 join b_n1 on a_n1.key=b_n1.key)sub1 intersect -select * from (select a.key, b.value from a join b on a.key=b.key)sub2 +select * from (select a_n1.key, b_n1.value from a_n1 join b_n1 on a_n1.key=b_n1.key)sub2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n1 +POSTHOOK: Input: default@b_n1 #### A masked pattern was here #### 1 2 2 3 -PREHOOK: query: drop table a +PREHOOK: query: drop table a_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@a -PREHOOK: Output: default@a -POSTHOOK: query: drop table a +PREHOOK: Input: default@a_n1 +PREHOOK: Output: default@a_n1 +POSTHOOK: query: drop table a_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@a -POSTHOOK: Output: default@a -PREHOOK: query: drop table b +POSTHOOK: Input: default@a_n1 +POSTHOOK: Output: default@a_n1 +PREHOOK: query: drop table b_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@b -PREHOOK: Output: default@b -POSTHOOK: query: drop table b +PREHOOK: Input: default@b_n1 +PREHOOK: Output: default@b_n1 +POSTHOOK: query: drop table b_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@b -POSTHOOK: Output: default@b -PREHOOK: query: create table a(key int) +POSTHOOK: Input: default@b_n1 +POSTHOOK: Output: default@b_n1 +PREHOOK: query: create table a_n1(key int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@a -POSTHOOK: query: create table a(key int) +PREHOOK: Output: default@a_n1 +POSTHOOK: query: create table a_n1(key int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@a -PREHOOK: query: insert into table a values (0),(1),(2),(2),(2),(2),(3),(NULL),(NULL),(NULL),(NULL),(NULL) +POSTHOOK: Output: default@a_n1 +PREHOOK: query: insert into table a_n1 values (0),(1),(2),(2),(2),(2),(3),(NULL),(NULL),(NULL),(NULL),(NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@a -POSTHOOK: query: insert into table a values (0),(1),(2),(2),(2),(2),(3),(NULL),(NULL),(NULL),(NULL),(NULL) +PREHOOK: Output: default@a_n1 +POSTHOOK: query: insert into table a_n1 values (0),(1),(2),(2),(2),(2),(3),(NULL),(NULL),(NULL),(NULL),(NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@a -POSTHOOK: Lineage: a.key SCRIPT [] -PREHOOK: query: create table b(key bigint) +POSTHOOK: Output: default@a_n1 +POSTHOOK: Lineage: a_n1.key SCRIPT [] +PREHOOK: query: create table b_n1(key bigint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@b -POSTHOOK: query: create table b(key bigint) +PREHOOK: Output: default@b_n1 +POSTHOOK: query: create table b_n1(key bigint) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@b -PREHOOK: query: insert into table b values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL) +POSTHOOK: Output: default@b_n1 +PREHOOK: query: insert into table b_n1 values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@b -POSTHOOK: query: insert into table b values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL) +PREHOOK: Output: default@b_n1 +POSTHOOK: query: insert into table b_n1 values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@b -POSTHOOK: Lineage: b.key SCRIPT [] -PREHOOK: query: select * from a except select * from b +POSTHOOK: Output: default@b_n1 +POSTHOOK: Lineage: b_n1.key SCRIPT [] +PREHOOK: query: select * from a_n1 except select * from b_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n1 +PREHOOK: Input: default@b_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from a except select * from b +POSTHOOK: query: select * from a_n1 except select * from b_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n1 +POSTHOOK: Input: default@b_n1 #### A masked pattern was here #### 0 -PREHOOK: query: (select * from a) minus select * from b union (select * from a) minus select * from b +PREHOOK: query: (select * from a_n1) minus select * from b_n1 union (select * from a_n1) minus select * from b_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n1 +PREHOOK: Input: default@b_n1 #### A masked pattern was here #### -POSTHOOK: query: (select * from a) minus select * from b union (select * from a) minus select * from b +POSTHOOK: query: (select * from a_n1) minus select * from b_n1 union (select * from a_n1) minus select * from b_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n1 +POSTHOOK: Input: default@b_n1 #### A masked pattern was here #### 0 -PREHOOK: query: (select * from a) minus select * from b union all ((select * from a) minus select * from b) +PREHOOK: query: (select * from a_n1) minus select * from b_n1 union all ((select * from a_n1) minus select * from b_n1) PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n1 +PREHOOK: Input: default@b_n1 #### A masked pattern was here #### -POSTHOOK: query: (select * from a) minus select * from b union all ((select * from a) minus select * from b) +POSTHOOK: query: (select * from a_n1) minus select * from b_n1 union all ((select * from a_n1) minus select * from b_n1) POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n1 +POSTHOOK: Input: default@b_n1 #### A masked pattern was here #### 0 0 -PREHOOK: query: (select * from a) minus select * from b union all (select * from a) minus all select * from b +PREHOOK: query: (select * from a_n1) minus select * from b_n1 union all (select * from a_n1) minus all select * from b_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n1 +PREHOOK: Input: default@b_n1 #### A masked pattern was here #### -POSTHOOK: query: (select * from a) minus select * from b union all (select * from a) minus all select * from b +POSTHOOK: query: (select * from a_n1) minus select * from b_n1 union all (select * from a_n1) minus all select * from b_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n1 +POSTHOOK: Input: default@b_n1 #### A masked pattern was here #### NULL NULL @@ -198,40 +198,40 @@ NULL 0 2 2 -PREHOOK: query: select * from a minus select * from b minus (select * from a minus select * from b) +PREHOOK: query: select * from a_n1 minus select * from b_n1 minus (select * from a_n1 minus select * from b_n1) PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n1 +PREHOOK: Input: default@b_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from a minus select * from b minus (select * from a minus select * from b) +POSTHOOK: query: select * from a_n1 minus select * from b_n1 minus (select * from a_n1 minus select * from b_n1) POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n1 +POSTHOOK: Input: default@b_n1 #### A masked pattern was here #### -PREHOOK: query: (select * from a) minus (select * from b minus (select * from a minus select * from b)) +PREHOOK: query: (select * from a_n1) minus (select * from b_n1 minus (select * from a_n1 minus select * from b_n1)) PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n1 +PREHOOK: Input: default@b_n1 #### A masked pattern was here #### -POSTHOOK: query: (select * from a) minus (select * from b minus (select * from a minus select * from b)) +POSTHOOK: query: (select * from a_n1) minus (select * from b_n1 minus (select * from a_n1 minus select * from b_n1)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n1 +POSTHOOK: Input: default@b_n1 #### A masked pattern was here #### 0 -PREHOOK: query: drop table a +PREHOOK: query: drop table a_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@a -PREHOOK: Output: default@a -POSTHOOK: query: drop table a +PREHOOK: Input: default@a_n1 +PREHOOK: Output: default@a_n1 +POSTHOOK: query: drop table a_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@a -POSTHOOK: Output: default@a -PREHOOK: query: drop table b +POSTHOOK: Input: default@a_n1 +POSTHOOK: Output: default@a_n1 +PREHOOK: query: drop table b_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@b -PREHOOK: Output: default@b -POSTHOOK: query: drop table b +PREHOOK: Input: default@b_n1 +PREHOOK: Output: default@b_n1 +POSTHOOK: query: drop table b_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@b -POSTHOOK: Output: default@b +POSTHOOK: Input: default@b_n1 +POSTHOOK: Output: default@b_n1 diff --git a/ql/src/test/results/clientpositive/show_columns.q.out b/ql/src/test/results/clientpositive/show_columns.q.out index 7ad2686d6c..83d68b6df9 100644 --- a/ql/src/test/results/clientpositive/show_columns.q.out +++ b/ql/src/test/results/clientpositive/show_columns.q.out @@ -48,26 +48,26 @@ PREHOOK: Input: database:test_db POSTHOOK: query: USE test_db POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test_db -PREHOOK: query: CREATE TABLE foo(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc INT, a INT, b INT, c INT) +PREHOOK: query: CREATE TABLE foo_n7(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc INT, a INT, b INT, c INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:test_db -PREHOOK: Output: test_db@foo -POSTHOOK: query: CREATE TABLE foo(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc INT, a INT, b INT, c INT) +PREHOOK: Output: test_db@foo_n7 +POSTHOOK: query: CREATE TABLE foo_n7(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc INT, a INT, b INT, c INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:test_db -POSTHOOK: Output: test_db@foo +POSTHOOK: Output: test_db@foo_n7 PREHOOK: query: USE test_db PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:test_db POSTHOOK: query: USE test_db POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test_db -PREHOOK: query: SHOW COLUMNS from foo +PREHOOK: query: SHOW COLUMNS from foo_n7 PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: test_db@foo -POSTHOOK: query: SHOW COLUMNS from foo +PREHOOK: Input: test_db@foo_n7 +POSTHOOK: query: SHOW COLUMNS from foo_n7 POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: test_db@foo +POSTHOOK: Input: test_db@foo_n7 a b c @@ -77,12 +77,12 @@ col3 cola colb colc -PREHOOK: query: SHOW COLUMNS in foo +PREHOOK: query: SHOW COLUMNS in foo_n7 PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: test_db@foo -POSTHOOK: query: SHOW COLUMNS in foo +PREHOOK: Input: test_db@foo_n7 +POSTHOOK: query: SHOW COLUMNS in foo_n7 POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: test_db@foo +POSTHOOK: Input: test_db@foo_n7 a b c @@ -92,60 +92,60 @@ col3 cola colb colc -PREHOOK: query: SHOW COLUMNS in foo 'col*' +PREHOOK: query: SHOW COLUMNS in foo_n7 'col*' PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: test_db@foo -POSTHOOK: query: SHOW COLUMNS in foo 'col*' +PREHOOK: Input: test_db@foo_n7 +POSTHOOK: query: SHOW COLUMNS in foo_n7 'col*' POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: test_db@foo +POSTHOOK: Input: test_db@foo_n7 col1 col2 col3 cola colb colc -PREHOOK: query: SHOW COLUMNS in foo "col*" +PREHOOK: query: SHOW COLUMNS in foo_n7 "col*" PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: test_db@foo -POSTHOOK: query: SHOW COLUMNS in foo "col*" +PREHOOK: Input: test_db@foo_n7 +POSTHOOK: query: SHOW COLUMNS in foo_n7 "col*" POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: test_db@foo +POSTHOOK: Input: test_db@foo_n7 col1 col2 col3 cola colb colc -PREHOOK: query: SHOW COLUMNS from foo 'col*' +PREHOOK: query: SHOW COLUMNS from foo_n7 'col*' PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: test_db@foo -POSTHOOK: query: SHOW COLUMNS from foo 'col*' +PREHOOK: Input: test_db@foo_n7 +POSTHOOK: query: SHOW COLUMNS from foo_n7 'col*' POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: test_db@foo +POSTHOOK: Input: test_db@foo_n7 col1 col2 col3 cola colb colc -PREHOOK: query: SHOW COLUMNS from foo "col*" +PREHOOK: query: SHOW COLUMNS from foo_n7 "col*" PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: test_db@foo -POSTHOOK: query: SHOW COLUMNS from foo "col*" +PREHOOK: Input: test_db@foo_n7 +POSTHOOK: query: SHOW COLUMNS from foo_n7 "col*" POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: test_db@foo +POSTHOOK: Input: test_db@foo_n7 col1 col2 col3 cola colb colc -PREHOOK: query: SHOW COLUMNS from foo "col1|cola" +PREHOOK: query: SHOW COLUMNS from foo_n7 "col1|cola" PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: test_db@foo -POSTHOOK: query: SHOW COLUMNS from foo "col1|cola" +PREHOOK: Input: test_db@foo_n7 +POSTHOOK: query: SHOW COLUMNS from foo_n7 "col1|cola" POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: test_db@foo +POSTHOOK: Input: test_db@foo_n7 col1 cola PREHOOK: query: CREATE DATABASE `database` @@ -160,20 +160,20 @@ PREHOOK: Input: database:database POSTHOOK: query: USE `database` POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:database -PREHOOK: query: CREATE TABLE foo(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc INT, a INT, b INT, c INT) +PREHOOK: query: CREATE TABLE foo_n7(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc INT, a INT, b INT, c INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:database -PREHOOK: Output: database@foo -POSTHOOK: query: CREATE TABLE foo(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc INT, a INT, b INT, c INT) +PREHOOK: Output: database@foo_n7 +POSTHOOK: query: CREATE TABLE foo_n7(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc INT, a INT, b INT, c INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:database -POSTHOOK: Output: database@foo -PREHOOK: query: SHOW COLUMNS from foo +POSTHOOK: Output: database@foo_n7 +PREHOOK: query: SHOW COLUMNS from foo_n7 PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: database@foo -POSTHOOK: query: SHOW COLUMNS from foo +PREHOOK: Input: database@foo_n7 +POSTHOOK: query: SHOW COLUMNS from foo_n7 POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: database@foo +POSTHOOK: Input: database@foo_n7 a b c @@ -183,48 +183,48 @@ col3 cola colb colc -PREHOOK: query: SHOW COLUMNS in foo "col*" +PREHOOK: query: SHOW COLUMNS in foo_n7 "col*" PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: database@foo -POSTHOOK: query: SHOW COLUMNS in foo "col*" +PREHOOK: Input: database@foo_n7 +POSTHOOK: query: SHOW COLUMNS in foo_n7 "col*" POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: database@foo +POSTHOOK: Input: database@foo_n7 col1 col2 col3 cola colb colc -PREHOOK: query: SHOW COLUMNS in foo "nomatch*" +PREHOOK: query: SHOW COLUMNS in foo_n7 "nomatch*" PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: database@foo -POSTHOOK: query: SHOW COLUMNS in foo "nomatch*" +PREHOOK: Input: database@foo_n7 +POSTHOOK: query: SHOW COLUMNS in foo_n7 "nomatch*" POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: database@foo -PREHOOK: query: SHOW COLUMNS in foo "col+" +POSTHOOK: Input: database@foo_n7 +PREHOOK: query: SHOW COLUMNS in foo_n7 "col+" PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: database@foo -POSTHOOK: query: SHOW COLUMNS in foo "col+" +PREHOOK: Input: database@foo_n7 +POSTHOOK: query: SHOW COLUMNS in foo_n7 "col+" POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: database@foo -PREHOOK: query: SHOW COLUMNS in foo "nomatch" +POSTHOOK: Input: database@foo_n7 +PREHOOK: query: SHOW COLUMNS in foo_n7 "nomatch" PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: database@foo -POSTHOOK: query: SHOW COLUMNS in foo "nomatch" +PREHOOK: Input: database@foo_n7 +POSTHOOK: query: SHOW COLUMNS in foo_n7 "nomatch" POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: database@foo +POSTHOOK: Input: database@foo_n7 PREHOOK: query: use default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: SHOW COLUMNS from test_db.foo +PREHOOK: query: SHOW COLUMNS from test_db.foo_n7 PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: test_db@foo -POSTHOOK: query: SHOW COLUMNS from test_db.foo +PREHOOK: Input: test_db@foo_n7 +POSTHOOK: query: SHOW COLUMNS from test_db.foo_n7 POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: test_db@foo +POSTHOOK: Input: test_db@foo_n7 a b c @@ -234,12 +234,12 @@ col3 cola colb colc -PREHOOK: query: SHOW COLUMNS from foo from test_db +PREHOOK: query: SHOW COLUMNS from foo_n7 from test_db PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: test_db@foo -POSTHOOK: query: SHOW COLUMNS from foo from test_db +PREHOOK: Input: test_db@foo_n7 +POSTHOOK: query: SHOW COLUMNS from foo_n7 from test_db POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: test_db@foo +POSTHOOK: Input: test_db@foo_n7 a b c @@ -249,24 +249,24 @@ col3 cola colb colc -PREHOOK: query: SHOW COLUMNS from foo from test_db "col*" +PREHOOK: query: SHOW COLUMNS from foo_n7 from test_db "col*" PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: test_db@foo -POSTHOOK: query: SHOW COLUMNS from foo from test_db "col*" +PREHOOK: Input: test_db@foo_n7 +POSTHOOK: query: SHOW COLUMNS from foo_n7 from test_db "col*" POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: test_db@foo +POSTHOOK: Input: test_db@foo_n7 col1 col2 col3 cola colb colc -PREHOOK: query: SHOW COLUMNS from foo from test_db like 'col*' +PREHOOK: query: SHOW COLUMNS from foo_n7 from test_db like 'col*' PREHOOK: type: SHOWCOLUMNS -PREHOOK: Input: test_db@foo -POSTHOOK: query: SHOW COLUMNS from foo from test_db like 'col*' +PREHOOK: Input: test_db@foo_n7 +POSTHOOK: query: SHOW COLUMNS from foo_n7 from test_db like 'col*' POSTHOOK: type: SHOWCOLUMNS -POSTHOOK: Input: test_db@foo +POSTHOOK: Input: test_db@foo_n7 col1 col2 col3 diff --git a/ql/src/test/results/clientpositive/show_create_table_alter.q.out b/ql/src/test/results/clientpositive/show_create_table_alter.q.out index 74be374ff7..2c75c36a20 100644 --- a/ql/src/test/results/clientpositive/show_create_table_alter.q.out +++ b/ql/src/test/results/clientpositive/show_create_table_alter.q.out @@ -1,20 +1,20 @@ -PREHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1 (key smallint, value float) +PREHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1_n1 (key smallint, value float) CLUSTERED BY (key) SORTED BY (value DESC) INTO 5 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1 (key smallint, value float) +PREHOOK: Output: default@tmp_showcrt1_n1 +POSTHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1_n1 (key smallint, value float) CLUSTERED BY (key) SORTED BY (value DESC) INTO 5 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +POSTHOOK: Output: default@tmp_showcrt1_n1 +PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n1 PREHOOK: type: SHOW_CREATETABLE -PREHOOK: Input: default@tmp_showcrt1 -POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n1 +POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n1 POSTHOOK: type: SHOW_CREATETABLE -POSTHOOK: Input: default@tmp_showcrt1 -CREATE EXTERNAL TABLE `tmp_showcrt1`( +POSTHOOK: Input: default@tmp_showcrt1_n1 +CREATE EXTERNAL TABLE `tmp_showcrt1_n1`( `key` smallint, `value` float) CLUSTERED BY ( @@ -33,21 +33,21 @@ LOCATION TBLPROPERTIES ( 'bucketing_version'='2', #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='temporary table', 'EXTERNAL'='FALSE') +PREHOOK: query: ALTER TABLE tmp_showcrt1_n1 SET TBLPROPERTIES ('comment'='temporary table', 'EXTERNAL'='FALSE') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@tmp_showcrt1 -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='temporary table', 'EXTERNAL'='FALSE') +PREHOOK: Input: default@tmp_showcrt1_n1 +PREHOOK: Output: default@tmp_showcrt1_n1 +POSTHOOK: query: ALTER TABLE tmp_showcrt1_n1 SET TBLPROPERTIES ('comment'='temporary table', 'EXTERNAL'='FALSE') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@tmp_showcrt1 -POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +POSTHOOK: Input: default@tmp_showcrt1_n1 +POSTHOOK: Output: default@tmp_showcrt1_n1 +PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n1 PREHOOK: type: SHOW_CREATETABLE -PREHOOK: Input: default@tmp_showcrt1 -POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n1 +POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n1 POSTHOOK: type: SHOW_CREATETABLE -POSTHOOK: Input: default@tmp_showcrt1 -CREATE TABLE `tmp_showcrt1`( +POSTHOOK: Input: default@tmp_showcrt1_n1 +CREATE TABLE `tmp_showcrt1_n1`( `key` smallint, `value` float) COMMENT 'temporary table' @@ -68,21 +68,21 @@ TBLPROPERTIES ( 'EXTERNAL'='FALSE', 'bucketing_version'='2', #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='changed comment', 'EXTERNAL'='TRUE') +PREHOOK: query: ALTER TABLE tmp_showcrt1_n1 SET TBLPROPERTIES ('comment'='changed comment', 'EXTERNAL'='TRUE') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@tmp_showcrt1 -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='changed comment', 'EXTERNAL'='TRUE') +PREHOOK: Input: default@tmp_showcrt1_n1 +PREHOOK: Output: default@tmp_showcrt1_n1 +POSTHOOK: query: ALTER TABLE tmp_showcrt1_n1 SET TBLPROPERTIES ('comment'='changed comment', 'EXTERNAL'='TRUE') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@tmp_showcrt1 -POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +POSTHOOK: Input: default@tmp_showcrt1_n1 +POSTHOOK: Output: default@tmp_showcrt1_n1 +PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n1 PREHOOK: type: SHOW_CREATETABLE -PREHOOK: Input: default@tmp_showcrt1 -POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n1 +POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n1 POSTHOOK: type: SHOW_CREATETABLE -POSTHOOK: Input: default@tmp_showcrt1 -CREATE EXTERNAL TABLE `tmp_showcrt1`( +POSTHOOK: Input: default@tmp_showcrt1_n1 +CREATE EXTERNAL TABLE `tmp_showcrt1_n1`( `key` smallint, `value` float) COMMENT 'changed comment' @@ -102,21 +102,21 @@ LOCATION TBLPROPERTIES ( 'bucketing_version'='2', #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('SORTBUCKETCOLSPREFIX'='FALSE') +PREHOOK: query: ALTER TABLE tmp_showcrt1_n1 SET TBLPROPERTIES ('SORTBUCKETCOLSPREFIX'='FALSE') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@tmp_showcrt1 -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('SORTBUCKETCOLSPREFIX'='FALSE') +PREHOOK: Input: default@tmp_showcrt1_n1 +PREHOOK: Output: default@tmp_showcrt1_n1 +POSTHOOK: query: ALTER TABLE tmp_showcrt1_n1 SET TBLPROPERTIES ('SORTBUCKETCOLSPREFIX'='FALSE') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@tmp_showcrt1 -POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +POSTHOOK: Input: default@tmp_showcrt1_n1 +POSTHOOK: Output: default@tmp_showcrt1_n1 +PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n1 PREHOOK: type: SHOW_CREATETABLE -PREHOOK: Input: default@tmp_showcrt1 -POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n1 +POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n1 POSTHOOK: type: SHOW_CREATETABLE -POSTHOOK: Input: default@tmp_showcrt1 -CREATE EXTERNAL TABLE `tmp_showcrt1`( +POSTHOOK: Input: default@tmp_showcrt1_n1 +CREATE EXTERNAL TABLE `tmp_showcrt1_n1`( `key` smallint, `value` float) COMMENT 'changed comment' @@ -136,21 +136,21 @@ LOCATION TBLPROPERTIES ( 'bucketing_version'='2', #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('storage_handler'='org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler') +PREHOOK: query: ALTER TABLE tmp_showcrt1_n1 SET TBLPROPERTIES ('storage_handler'='org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@tmp_showcrt1 -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('storage_handler'='org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler') +PREHOOK: Input: default@tmp_showcrt1_n1 +PREHOOK: Output: default@tmp_showcrt1_n1 +POSTHOOK: query: ALTER TABLE tmp_showcrt1_n1 SET TBLPROPERTIES ('storage_handler'='org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@tmp_showcrt1 -POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +POSTHOOK: Input: default@tmp_showcrt1_n1 +POSTHOOK: Output: default@tmp_showcrt1_n1 +PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n1 PREHOOK: type: SHOW_CREATETABLE -PREHOOK: Input: default@tmp_showcrt1 -POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n1 +POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n1 POSTHOOK: type: SHOW_CREATETABLE -POSTHOOK: Input: default@tmp_showcrt1 -CREATE EXTERNAL TABLE `tmp_showcrt1`( +POSTHOOK: Input: default@tmp_showcrt1_n1 +CREATE EXTERNAL TABLE `tmp_showcrt1_n1`( `key` smallint, `value` float) COMMENT 'changed comment' @@ -170,11 +170,11 @@ LOCATION TBLPROPERTIES ( 'bucketing_version'='2', #### A masked pattern was here #### -PREHOOK: query: DROP TABLE tmp_showcrt1 +PREHOOK: query: DROP TABLE tmp_showcrt1_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tmp_showcrt1 -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: DROP TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n1 +PREHOOK: Output: default@tmp_showcrt1_n1 +POSTHOOK: query: DROP TABLE tmp_showcrt1_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tmp_showcrt1 -POSTHOOK: Output: default@tmp_showcrt1 +POSTHOOK: Input: default@tmp_showcrt1_n1 +POSTHOOK: Output: default@tmp_showcrt1_n1 diff --git a/ql/src/test/results/clientpositive/show_create_table_partitioned.q.out b/ql/src/test/results/clientpositive/show_create_table_partitioned.q.out index bb2c6c7073..e554a18e6b 100644 --- a/ql/src/test/results/clientpositive/show_create_table_partitioned.q.out +++ b/ql/src/test/results/clientpositive/show_create_table_partitioned.q.out @@ -1,22 +1,22 @@ -PREHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1 (key string, newvalue boolean COMMENT 'a new value') +PREHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1_n2 (key string, newvalue boolean COMMENT 'a new value') COMMENT 'temporary table' PARTITIONED BY (value bigint COMMENT 'some value') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1 (key string, newvalue boolean COMMENT 'a new value') +PREHOOK: Output: default@tmp_showcrt1_n2 +POSTHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1_n2 (key string, newvalue boolean COMMENT 'a new value') COMMENT 'temporary table' PARTITIONED BY (value bigint COMMENT 'some value') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +POSTHOOK: Output: default@tmp_showcrt1_n2 +PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n2 PREHOOK: type: SHOW_CREATETABLE -PREHOOK: Input: default@tmp_showcrt1 -POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n2 +POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n2 POSTHOOK: type: SHOW_CREATETABLE -POSTHOOK: Input: default@tmp_showcrt1 -CREATE EXTERNAL TABLE `tmp_showcrt1`( +POSTHOOK: Input: default@tmp_showcrt1_n2 +CREATE EXTERNAL TABLE `tmp_showcrt1_n2`( `key` string, `newvalue` boolean COMMENT 'a new value') COMMENT 'temporary table' @@ -33,11 +33,11 @@ LOCATION TBLPROPERTIES ( 'bucketing_version'='2', #### A masked pattern was here #### -PREHOOK: query: DROP TABLE tmp_showcrt1 +PREHOOK: query: DROP TABLE tmp_showcrt1_n2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tmp_showcrt1 -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: DROP TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n2 +PREHOOK: Output: default@tmp_showcrt1_n2 +POSTHOOK: query: DROP TABLE tmp_showcrt1_n2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tmp_showcrt1 -POSTHOOK: Output: default@tmp_showcrt1 +POSTHOOK: Input: default@tmp_showcrt1_n2 +POSTHOOK: Output: default@tmp_showcrt1_n2 diff --git a/ql/src/test/results/clientpositive/show_create_table_serde.q.out b/ql/src/test/results/clientpositive/show_create_table_serde.q.out index aa93fa16d7..8b95c9b98e 100644 --- a/ql/src/test/results/clientpositive/show_create_table_serde.q.out +++ b/ql/src/test/results/clientpositive/show_create_table_serde.q.out @@ -1,26 +1,26 @@ -PREHOOK: query: CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +PREHOOK: query: CREATE TABLE tmp_showcrt1_n0 (key int, value string, newvalue bigint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +PREHOOK: Output: default@tmp_showcrt1_n0 +POSTHOOK: query: CREATE TABLE tmp_showcrt1_n0 (key int, value string, newvalue bigint) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: ALTER TABLE tmp_showcrt1 SET SERDEPROPERTIES ('custom.property.key1'='custom.property.value1', 'custom.property.key2'='custom.property.value2') +POSTHOOK: Output: default@tmp_showcrt1_n0 +PREHOOK: query: ALTER TABLE tmp_showcrt1_n0 SET SERDEPROPERTIES ('custom.property.key1'='custom.property.value1', 'custom.property.key2'='custom.property.value2') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES -PREHOOK: Input: default@tmp_showcrt1 -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: ALTER TABLE tmp_showcrt1 SET SERDEPROPERTIES ('custom.property.key1'='custom.property.value1', 'custom.property.key2'='custom.property.value2') +PREHOOK: Input: default@tmp_showcrt1_n0 +PREHOOK: Output: default@tmp_showcrt1_n0 +POSTHOOK: query: ALTER TABLE tmp_showcrt1_n0 SET SERDEPROPERTIES ('custom.property.key1'='custom.property.value1', 'custom.property.key2'='custom.property.value2') POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES -POSTHOOK: Input: default@tmp_showcrt1 -POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +POSTHOOK: Input: default@tmp_showcrt1_n0 +POSTHOOK: Output: default@tmp_showcrt1_n0 +PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n0 PREHOOK: type: SHOW_CREATETABLE -PREHOOK: Input: default@tmp_showcrt1 -POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n0 +POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n0 POSTHOOK: type: SHOW_CREATETABLE -POSTHOOK: Input: default@tmp_showcrt1 -CREATE TABLE `tmp_showcrt1`( +POSTHOOK: Input: default@tmp_showcrt1_n0 +CREATE TABLE `tmp_showcrt1_n0`( `key` int, `value` string, `newvalue` bigint) @@ -38,37 +38,37 @@ LOCATION TBLPROPERTIES ( 'bucketing_version'='2', #### A masked pattern was here #### -PREHOOK: query: DROP TABLE tmp_showcrt1 +PREHOOK: query: DROP TABLE tmp_showcrt1_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tmp_showcrt1 -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: DROP TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n0 +PREHOOK: Output: default@tmp_showcrt1_n0 +POSTHOOK: query: DROP TABLE tmp_showcrt1_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tmp_showcrt1 -POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +POSTHOOK: Input: default@tmp_showcrt1_n0 +POSTHOOK: Output: default@tmp_showcrt1_n0 +PREHOOK: query: CREATE TABLE tmp_showcrt1_n0 (key int, value string, newvalue bigint) COMMENT 'temporary table' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +PREHOOK: Output: default@tmp_showcrt1_n0 +POSTHOOK: query: CREATE TABLE tmp_showcrt1_n0 (key int, value string, newvalue bigint) COMMENT 'temporary table' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +POSTHOOK: Output: default@tmp_showcrt1_n0 +PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n0 PREHOOK: type: SHOW_CREATETABLE -PREHOOK: Input: default@tmp_showcrt1 -POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n0 +POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n0 POSTHOOK: type: SHOW_CREATETABLE -POSTHOOK: Input: default@tmp_showcrt1 -CREATE TABLE `tmp_showcrt1`( +POSTHOOK: Input: default@tmp_showcrt1_n0 +CREATE TABLE `tmp_showcrt1_n0`( `key` int, `value` string, `newvalue` bigint) @@ -84,37 +84,37 @@ LOCATION TBLPROPERTIES ( 'bucketing_version'='2', #### A masked pattern was here #### -PREHOOK: query: DROP TABLE tmp_showcrt1 +PREHOOK: query: DROP TABLE tmp_showcrt1_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tmp_showcrt1 -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: DROP TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n0 +PREHOOK: Output: default@tmp_showcrt1_n0 +POSTHOOK: query: DROP TABLE tmp_showcrt1_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tmp_showcrt1 -POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +POSTHOOK: Input: default@tmp_showcrt1_n0 +POSTHOOK: Output: default@tmp_showcrt1_n0 +PREHOOK: query: CREATE TABLE tmp_showcrt1_n0 (key int, value string, newvalue bigint) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' WITH SERDEPROPERTIES ('custom.property.key1'='custom.property.value1', 'custom.property.key2'='custom.property.value2') STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +PREHOOK: Output: default@tmp_showcrt1_n0 +POSTHOOK: query: CREATE TABLE tmp_showcrt1_n0 (key int, value string, newvalue bigint) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' WITH SERDEPROPERTIES ('custom.property.key1'='custom.property.value1', 'custom.property.key2'='custom.property.value2') STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +POSTHOOK: Output: default@tmp_showcrt1_n0 +PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n0 PREHOOK: type: SHOW_CREATETABLE -PREHOOK: Input: default@tmp_showcrt1 -POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n0 +POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n0 POSTHOOK: type: SHOW_CREATETABLE -POSTHOOK: Input: default@tmp_showcrt1 -CREATE TABLE `tmp_showcrt1`( +POSTHOOK: Input: default@tmp_showcrt1_n0 +CREATE TABLE `tmp_showcrt1_n0`( `key` int, `value` string, `newvalue` bigint) @@ -132,35 +132,35 @@ LOCATION TBLPROPERTIES ( 'bucketing_version'='2', #### A masked pattern was here #### -PREHOOK: query: DROP TABLE tmp_showcrt1 +PREHOOK: query: DROP TABLE tmp_showcrt1_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tmp_showcrt1 -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: DROP TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n0 +PREHOOK: Output: default@tmp_showcrt1_n0 +POSTHOOK: query: DROP TABLE tmp_showcrt1_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tmp_showcrt1 -POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1 (key string, value boolean) +POSTHOOK: Input: default@tmp_showcrt1_n0 +POSTHOOK: Output: default@tmp_showcrt1_n0 +PREHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1_n0 (key string, value boolean) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED BY 'org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler' WITH SERDEPROPERTIES ('field.delim'=',', 'serialization.format'='$') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1 (key string, value boolean) +PREHOOK: Output: default@tmp_showcrt1_n0 +POSTHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1_n0 (key string, value boolean) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED BY 'org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler' WITH SERDEPROPERTIES ('field.delim'=',', 'serialization.format'='$') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +POSTHOOK: Output: default@tmp_showcrt1_n0 +PREHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n0 PREHOOK: type: SHOW_CREATETABLE -PREHOOK: Input: default@tmp_showcrt1 -POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n0 +POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1_n0 POSTHOOK: type: SHOW_CREATETABLE -POSTHOOK: Input: default@tmp_showcrt1 -CREATE EXTERNAL TABLE `tmp_showcrt1`( +POSTHOOK: Input: default@tmp_showcrt1_n0 +CREATE EXTERNAL TABLE `tmp_showcrt1_n0`( `key` string, `value` boolean) ROW FORMAT SERDE @@ -175,11 +175,11 @@ LOCATION TBLPROPERTIES ( 'bucketing_version'='2', #### A masked pattern was here #### -PREHOOK: query: DROP TABLE tmp_showcrt1 +PREHOOK: query: DROP TABLE tmp_showcrt1_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tmp_showcrt1 -PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: DROP TABLE tmp_showcrt1 +PREHOOK: Input: default@tmp_showcrt1_n0 +PREHOOK: Output: default@tmp_showcrt1_n0 +POSTHOOK: query: DROP TABLE tmp_showcrt1_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tmp_showcrt1 -POSTHOOK: Output: default@tmp_showcrt1 +POSTHOOK: Input: default@tmp_showcrt1_n0 +POSTHOOK: Output: default@tmp_showcrt1_n0 diff --git a/ql/src/test/results/clientpositive/show_materialized_views.q.out b/ql/src/test/results/clientpositive/show_materialized_views.q.out index 2fb1ad7a85..21d8721e42 100644 --- a/ql/src/test/results/clientpositive/show_materialized_views.q.out +++ b/ql/src/test/results/clientpositive/show_materialized_views.q.out @@ -201,27 +201,27 @@ PREHOOK: Input: database:database POSTHOOK: query: USE `database` POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:database -PREHOOK: query: CREATE TABLE foo(a INT) +PREHOOK: query: CREATE TABLE foo_n0(a INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:database -PREHOOK: Output: database@foo -POSTHOOK: query: CREATE TABLE foo(a INT) +PREHOOK: Output: database@foo_n0 +POSTHOOK: query: CREATE TABLE foo_n0(a INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:database -POSTHOOK: Output: database@foo +POSTHOOK: Output: database@foo_n0 PREHOOK: query: CREATE VIEW fooview AS -SELECT * FROM foo +SELECT * FROM foo_n0 PREHOOK: type: CREATEVIEW -PREHOOK: Input: database@foo +PREHOOK: Input: database@foo_n0 PREHOOK: Output: database:database PREHOOK: Output: database@fooview POSTHOOK: query: CREATE VIEW fooview AS -SELECT * FROM foo +SELECT * FROM foo_n0 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: database@foo +POSTHOOK: Input: database@foo_n0 POSTHOOK: Output: database:database POSTHOOK: Output: database@fooview -POSTHOOK: Lineage: fooview.a SIMPLE [(foo)foo.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: fooview.a SIMPLE [(foo_n0)foo_n0.FieldSchema(name:a, type:int, comment:null), ] PREHOOK: query: USE default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default @@ -236,9 +236,9 @@ PREHOOK: query: DROP MATERIALIZED VIEW fooview PREHOOK: type: DROP_MATERIALIZED_VIEW POSTHOOK: query: DROP MATERIALIZED VIEW fooview POSTHOOK: type: DROP_MATERIALIZED_VIEW -PREHOOK: query: DROP TABLE foo +PREHOOK: query: DROP TABLE foo_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE foo +POSTHOOK: query: DROP TABLE foo_n0 POSTHOOK: type: DROPTABLE PREHOOK: query: USE test1 PREHOOK: type: SWITCHDATABASE diff --git a/ql/src/test/results/clientpositive/show_tables.q.out b/ql/src/test/results/clientpositive/show_tables.q.out index 5cdecb58e0..38cc280a20 100644 --- a/ql/src/test/results/clientpositive/show_tables.q.out +++ b/ql/src/test/results/clientpositive/show_tables.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: CREATE TABLE shtb_test1(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE shtb_test1_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@shtb_test1 -POSTHOOK: query: CREATE TABLE shtb_test1(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE +PREHOOK: Output: default@shtb_test1_n0 +POSTHOOK: query: CREATE TABLE shtb_test1_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@shtb_test1 -PREHOOK: query: CREATE TABLE shtb_test2(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@shtb_test1_n0 +PREHOOK: query: CREATE TABLE shtb_test2_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@shtb_test2 -POSTHOOK: query: CREATE TABLE shtb_test2(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE +PREHOOK: Output: default@shtb_test2_n0 +POSTHOOK: query: CREATE TABLE shtb_test2_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@shtb_test2 +POSTHOOK: Output: default@shtb_test2_n0 PREHOOK: query: EXPLAIN SHOW TABLES 'shtb_*' PREHOOK: type: SHOWTABLES @@ -43,13 +43,13 @@ PREHOOK: Input: database:default POSTHOOK: query: SHOW TABLES 'shtb_*' POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:default -shtb_test1 -shtb_test2 +shtb_test1_n0 +shtb_test2_n0 PREHOOK: query: EXPLAIN -SHOW TABLES LIKE 'shtb_test1|shtb_test2' +SHOW TABLES LIKE 'shtb_test1_n0|shtb_test2_n0' PREHOOK: type: SHOWTABLES POSTHOOK: query: EXPLAIN -SHOW TABLES LIKE 'shtb_test1|shtb_test2' +SHOW TABLES LIKE 'shtb_test1_n0|shtb_test2_n0' POSTHOOK: type: SHOWTABLES STAGE DEPENDENCIES: Stage-0 is a root stage @@ -60,7 +60,7 @@ STAGE PLANS: Show Table Operator: Show Tables database name: default - pattern: shtb_test1|shtb_test2 + pattern: shtb_test1_n0|shtb_test2_n0 Stage: Stage-1 Fetch Operator @@ -68,14 +68,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SHOW TABLES LIKE 'shtb_test1|shtb_test2' +PREHOOK: query: SHOW TABLES LIKE 'shtb_test1_n0|shtb_test2_n0' PREHOOK: type: SHOWTABLES PREHOOK: Input: database:default -POSTHOOK: query: SHOW TABLES LIKE 'shtb_test1|shtb_test2' +POSTHOOK: query: SHOW TABLES LIKE 'shtb_test1_n0|shtb_test2_n0' POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:default -shtb_test1 -shtb_test2 +shtb_test1_n0 +shtb_test2_n0 PREHOOK: query: CREATE DATABASE test_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test_db @@ -88,22 +88,22 @@ PREHOOK: Input: database:test_db POSTHOOK: query: USE test_db POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test_db -PREHOOK: query: CREATE TABLE foo(a INT) +PREHOOK: query: CREATE TABLE foo_n4(a INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:test_db -PREHOOK: Output: test_db@foo -POSTHOOK: query: CREATE TABLE foo(a INT) +PREHOOK: Output: test_db@foo_n4 +POSTHOOK: query: CREATE TABLE foo_n4(a INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:test_db -POSTHOOK: Output: test_db@foo -PREHOOK: query: CREATE TABLE bar(a INT) +POSTHOOK: Output: test_db@foo_n4 +PREHOOK: query: CREATE TABLE bar_n0(a INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:test_db -PREHOOK: Output: test_db@bar -POSTHOOK: query: CREATE TABLE bar(a INT) +PREHOOK: Output: test_db@bar_n0 +POSTHOOK: query: CREATE TABLE bar_n0(a INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:test_db -POSTHOOK: Output: test_db@bar +POSTHOOK: Output: test_db@bar_n0 PREHOOK: query: CREATE TABLE baz(a INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:test_db @@ -124,18 +124,18 @@ PREHOOK: Input: database:test_db POSTHOOK: query: SHOW TABLES FROM test_db POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:test_db -bar +bar_n0 baz -foo +foo_n4 PREHOOK: query: SHOW TABLES IN test_db PREHOOK: type: SHOWTABLES PREHOOK: Input: database:test_db POSTHOOK: query: SHOW TABLES IN test_db POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:test_db -bar +bar_n0 baz -foo +foo_n4 PREHOOK: query: SHOW TABLES IN test_db "test*" PREHOOK: type: SHOWTABLES PREHOOK: Input: database:test_db @@ -148,11 +148,11 @@ PREHOOK: Input: database:test_db POSTHOOK: query: SHOW TABLES IN test_db LIKE "nomatch" POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:test_db -PREHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE foo +PREHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE foo_n4 PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE foo +POSTHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE foo_n4 POSTHOOK: type: SHOW_TABLESTATUS -tableName:foo +tableName:foo_n4 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -165,11 +165,11 @@ maxFileSize:0 minFileSize:0 #### A masked pattern was here #### -PREHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE "foo" +PREHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE "foo_n4" PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE "foo" +POSTHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE "foo_n4" POSTHOOK: type: SHOW_TABLESTATUS -tableName:foo +tableName:foo_n4 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -182,11 +182,11 @@ maxFileSize:0 minFileSize:0 #### A masked pattern was here #### -PREHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE 'foo' +PREHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE 'foo_n4' PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE 'foo' +POSTHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE 'foo_n4' POSTHOOK: type: SHOW_TABLESTATUS -tableName:foo +tableName:foo_n4 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -199,11 +199,11 @@ maxFileSize:0 minFileSize:0 #### A masked pattern was here #### -PREHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE `foo` +PREHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE `foo_n4` PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE `foo` +POSTHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE `foo_n4` POSTHOOK: type: SHOW_TABLESTATUS -tableName:foo +tableName:foo_n4 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -220,7 +220,7 @@ PREHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE 'ba*' PREHOOK: type: SHOW_TABLESTATUS POSTHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE 'ba*' POSTHOOK: type: SHOW_TABLESTATUS -tableName:bar +tableName:bar_n0 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -250,7 +250,7 @@ PREHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE "ba*" PREHOOK: type: SHOW_TABLESTATUS POSTHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE "ba*" POSTHOOK: type: SHOW_TABLESTATUS -tableName:bar +tableName:bar_n0 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -280,7 +280,7 @@ PREHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE `ba*` PREHOOK: type: SHOW_TABLESTATUS POSTHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE `ba*` POSTHOOK: type: SHOW_TABLESTATUS -tableName:bar +tableName:bar_n0 #### A masked pattern was here #### inputformat:org.apache.hadoop.mapred.TextInputFormat outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -318,24 +318,24 @@ PREHOOK: Input: database:database POSTHOOK: query: USE `database` POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:database -PREHOOK: query: CREATE TABLE foo(a INT) +PREHOOK: query: CREATE TABLE foo_n4(a INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:database -PREHOOK: Output: database@foo -POSTHOOK: query: CREATE TABLE foo(a INT) +PREHOOK: Output: database@foo_n4 +POSTHOOK: query: CREATE TABLE foo_n4(a INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:database -POSTHOOK: Output: database@foo +POSTHOOK: Output: database@foo_n4 PREHOOK: query: USE default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: SHOW TABLES FROM `database` LIKE "foo" +PREHOOK: query: SHOW TABLES FROM `database` LIKE "foo_n4" PREHOOK: type: SHOWTABLES PREHOOK: Input: database:database -POSTHOOK: query: SHOW TABLES FROM `database` LIKE "foo" +POSTHOOK: query: SHOW TABLES FROM `database` LIKE "foo_n4" POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:database -foo +foo_n4 diff --git a/ql/src/test/results/clientpositive/show_views.q.out b/ql/src/test/results/clientpositive/show_views.q.out index 41f1e65857..311f700cb1 100644 --- a/ql/src/test/results/clientpositive/show_views.q.out +++ b/ql/src/test/results/clientpositive/show_views.q.out @@ -16,101 +16,101 @@ PREHOOK: Input: database:test1 POSTHOOK: query: USE test1 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test1 -PREHOOK: query: CREATE TABLE shtb_test1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE shtb_test1_n1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:test1 -PREHOOK: Output: test1@shtb_test1 -POSTHOOK: query: CREATE TABLE shtb_test1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE +PREHOOK: Output: test1@shtb_test1_n1 +POSTHOOK: query: CREATE TABLE shtb_test1_n1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:test1 -POSTHOOK: Output: test1@shtb_test1 -PREHOOK: query: CREATE VIEW shtb_test1_view1 AS SELECT * FROM shtb_test1 where KEY > 1000 and KEY < 2000 +POSTHOOK: Output: test1@shtb_test1_n1 +PREHOOK: query: CREATE VIEW shtb_test1_view1_n0 AS SELECT * FROM shtb_test1_n1 where KEY > 1000 and KEY < 2000 PREHOOK: type: CREATEVIEW -PREHOOK: Input: test1@shtb_test1 +PREHOOK: Input: test1@shtb_test1_n1 PREHOOK: Output: database:test1 -PREHOOK: Output: test1@shtb_test1_view1 -POSTHOOK: query: CREATE VIEW shtb_test1_view1 AS SELECT * FROM shtb_test1 where KEY > 1000 and KEY < 2000 +PREHOOK: Output: test1@shtb_test1_view1_n0 +POSTHOOK: query: CREATE VIEW shtb_test1_view1_n0 AS SELECT * FROM shtb_test1_n1 where KEY > 1000 and KEY < 2000 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: test1@shtb_test1 +POSTHOOK: Input: test1@shtb_test1_n1 POSTHOOK: Output: database:test1 -POSTHOOK: Output: test1@shtb_test1_view1 -POSTHOOK: Lineage: shtb_test1_view1.ds SIMPLE [(shtb_test1)shtb_test1.FieldSchema(name:ds, type:string, comment:null), ] -POSTHOOK: Lineage: shtb_test1_view1.key SIMPLE [(shtb_test1)shtb_test1.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: shtb_test1_view1.value SIMPLE [(shtb_test1)shtb_test1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: CREATE VIEW shtb_test1_view2 AS SELECT * FROM shtb_test1 where KEY > 100 and KEY < 200 +POSTHOOK: Output: test1@shtb_test1_view1_n0 +POSTHOOK: Lineage: shtb_test1_view1_n0.ds SIMPLE [(shtb_test1_n1)shtb_test1_n1.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: shtb_test1_view1_n0.key SIMPLE [(shtb_test1_n1)shtb_test1_n1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: shtb_test1_view1_n0.value SIMPLE [(shtb_test1_n1)shtb_test1_n1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE VIEW shtb_test1_view2_n0 AS SELECT * FROM shtb_test1_n1 where KEY > 100 and KEY < 200 PREHOOK: type: CREATEVIEW -PREHOOK: Input: test1@shtb_test1 +PREHOOK: Input: test1@shtb_test1_n1 PREHOOK: Output: database:test1 -PREHOOK: Output: test1@shtb_test1_view2 -POSTHOOK: query: CREATE VIEW shtb_test1_view2 AS SELECT * FROM shtb_test1 where KEY > 100 and KEY < 200 +PREHOOK: Output: test1@shtb_test1_view2_n0 +POSTHOOK: query: CREATE VIEW shtb_test1_view2_n0 AS SELECT * FROM shtb_test1_n1 where KEY > 100 and KEY < 200 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: test1@shtb_test1 +POSTHOOK: Input: test1@shtb_test1_n1 POSTHOOK: Output: database:test1 -POSTHOOK: Output: test1@shtb_test1_view2 -POSTHOOK: Lineage: shtb_test1_view2.ds SIMPLE [(shtb_test1)shtb_test1.FieldSchema(name:ds, type:string, comment:null), ] -POSTHOOK: Lineage: shtb_test1_view2.key SIMPLE [(shtb_test1)shtb_test1.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: shtb_test1_view2.value SIMPLE [(shtb_test1)shtb_test1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: CREATE VIEW shtb_full_view2 AS SELECT * FROM shtb_test1 +POSTHOOK: Output: test1@shtb_test1_view2_n0 +POSTHOOK: Lineage: shtb_test1_view2_n0.ds SIMPLE [(shtb_test1_n1)shtb_test1_n1.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: shtb_test1_view2_n0.key SIMPLE [(shtb_test1_n1)shtb_test1_n1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: shtb_test1_view2_n0.value SIMPLE [(shtb_test1_n1)shtb_test1_n1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE VIEW shtb_full_view2_n0 AS SELECT * FROM shtb_test1_n1 PREHOOK: type: CREATEVIEW -PREHOOK: Input: test1@shtb_test1 +PREHOOK: Input: test1@shtb_test1_n1 PREHOOK: Output: database:test1 -PREHOOK: Output: test1@shtb_full_view2 -POSTHOOK: query: CREATE VIEW shtb_full_view2 AS SELECT * FROM shtb_test1 +PREHOOK: Output: test1@shtb_full_view2_n0 +POSTHOOK: query: CREATE VIEW shtb_full_view2_n0 AS SELECT * FROM shtb_test1_n1 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: test1@shtb_test1 +POSTHOOK: Input: test1@shtb_test1_n1 POSTHOOK: Output: database:test1 -POSTHOOK: Output: test1@shtb_full_view2 -POSTHOOK: Lineage: shtb_full_view2.ds SIMPLE [(shtb_test1)shtb_test1.FieldSchema(name:ds, type:string, comment:null), ] -POSTHOOK: Lineage: shtb_full_view2.key SIMPLE [(shtb_test1)shtb_test1.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: shtb_full_view2.value SIMPLE [(shtb_test1)shtb_test1.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Output: test1@shtb_full_view2_n0 +POSTHOOK: Lineage: shtb_full_view2_n0.ds SIMPLE [(shtb_test1_n1)shtb_test1_n1.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: shtb_full_view2_n0.key SIMPLE [(shtb_test1_n1)shtb_test1_n1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: shtb_full_view2_n0.value SIMPLE [(shtb_test1_n1)shtb_test1_n1.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: USE test2 PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:test2 POSTHOOK: query: USE test2 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test2 -PREHOOK: query: CREATE TABLE shtb_test1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE shtb_test1_n1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:test2 -PREHOOK: Output: test2@shtb_test1 -POSTHOOK: query: CREATE TABLE shtb_test1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE +PREHOOK: Output: test2@shtb_test1_n1 +POSTHOOK: query: CREATE TABLE shtb_test1_n1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:test2 -POSTHOOK: Output: test2@shtb_test1 -PREHOOK: query: CREATE TABLE shtb_test2(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE +POSTHOOK: Output: test2@shtb_test1_n1 +PREHOOK: query: CREATE TABLE shtb_test2_n1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:test2 -PREHOOK: Output: test2@shtb_test2 -POSTHOOK: query: CREATE TABLE shtb_test2(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE +PREHOOK: Output: test2@shtb_test2_n1 +POSTHOOK: query: CREATE TABLE shtb_test2_n1(KEY INT, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:test2 -POSTHOOK: Output: test2@shtb_test2 -PREHOOK: query: CREATE VIEW shtb_test1_view1 AS SELECT * FROM shtb_test1 where KEY > 1000 and KEY < 2000 +POSTHOOK: Output: test2@shtb_test2_n1 +PREHOOK: query: CREATE VIEW shtb_test1_view1_n0 AS SELECT * FROM shtb_test1_n1 where KEY > 1000 and KEY < 2000 PREHOOK: type: CREATEVIEW -PREHOOK: Input: test2@shtb_test1 +PREHOOK: Input: test2@shtb_test1_n1 PREHOOK: Output: database:test2 -PREHOOK: Output: test2@shtb_test1_view1 -POSTHOOK: query: CREATE VIEW shtb_test1_view1 AS SELECT * FROM shtb_test1 where KEY > 1000 and KEY < 2000 +PREHOOK: Output: test2@shtb_test1_view1_n0 +POSTHOOK: query: CREATE VIEW shtb_test1_view1_n0 AS SELECT * FROM shtb_test1_n1 where KEY > 1000 and KEY < 2000 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: test2@shtb_test1 +POSTHOOK: Input: test2@shtb_test1_n1 POSTHOOK: Output: database:test2 -POSTHOOK: Output: test2@shtb_test1_view1 -POSTHOOK: Lineage: shtb_test1_view1.ds SIMPLE [(shtb_test1)shtb_test1.FieldSchema(name:ds, type:string, comment:null), ] -POSTHOOK: Lineage: shtb_test1_view1.key SIMPLE [(shtb_test1)shtb_test1.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: shtb_test1_view1.value SIMPLE [(shtb_test1)shtb_test1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: CREATE VIEW shtb_test2_view2 AS SELECT * FROM shtb_test2 where KEY > 100 and KEY < 200 +POSTHOOK: Output: test2@shtb_test1_view1_n0 +POSTHOOK: Lineage: shtb_test1_view1_n0.ds SIMPLE [(shtb_test1_n1)shtb_test1_n1.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: shtb_test1_view1_n0.key SIMPLE [(shtb_test1_n1)shtb_test1_n1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: shtb_test1_view1_n0.value SIMPLE [(shtb_test1_n1)shtb_test1_n1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE VIEW shtb_test2_view2_n0 AS SELECT * FROM shtb_test2_n1 where KEY > 100 and KEY < 200 PREHOOK: type: CREATEVIEW -PREHOOK: Input: test2@shtb_test2 +PREHOOK: Input: test2@shtb_test2_n1 PREHOOK: Output: database:test2 -PREHOOK: Output: test2@shtb_test2_view2 -POSTHOOK: query: CREATE VIEW shtb_test2_view2 AS SELECT * FROM shtb_test2 where KEY > 100 and KEY < 200 +PREHOOK: Output: test2@shtb_test2_view2_n0 +POSTHOOK: query: CREATE VIEW shtb_test2_view2_n0 AS SELECT * FROM shtb_test2_n1 where KEY > 100 and KEY < 200 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: test2@shtb_test2 +POSTHOOK: Input: test2@shtb_test2_n1 POSTHOOK: Output: database:test2 -POSTHOOK: Output: test2@shtb_test2_view2 -POSTHOOK: Lineage: shtb_test2_view2.ds SIMPLE [(shtb_test2)shtb_test2.FieldSchema(name:ds, type:string, comment:null), ] -POSTHOOK: Lineage: shtb_test2_view2.key SIMPLE [(shtb_test2)shtb_test2.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: shtb_test2_view2.value SIMPLE [(shtb_test2)shtb_test2.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Output: test2@shtb_test2_view2_n0 +POSTHOOK: Lineage: shtb_test2_view2_n0.ds SIMPLE [(shtb_test2_n1)shtb_test2_n1.FieldSchema(name:ds, type:string, comment:null), ] +POSTHOOK: Lineage: shtb_test2_view2_n0.key SIMPLE [(shtb_test2_n1)shtb_test2_n1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: shtb_test2_view2_n0.value SIMPLE [(shtb_test2_n1)shtb_test2_n1.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: USE test1 PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:test1 @@ -121,9 +121,9 @@ PREHOOK: query: SHOW VIEWS PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS POSTHOOK: type: SHOWVIEWS -shtb_full_view2 -shtb_test1_view1 -shtb_test1_view2 +shtb_full_view2_n0 +shtb_test1_view1_n0 +shtb_test1_view2_n0 PREHOOK: query: SHOW VIEWS 'test_*' PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS 'test_*' @@ -132,8 +132,6 @@ PREHOOK: query: SHOW VIEWS '*view2' PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS '*view2' POSTHOOK: type: SHOWVIEWS -shtb_full_view2 -shtb_test1_view2 PREHOOK: query: SHOW VIEWS LIKE 'test_view1|test_view2' PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS LIKE 'test_view1|test_view2' @@ -148,8 +146,8 @@ PREHOOK: query: SHOW VIEWS 'shtb_*' PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS 'shtb_*' POSTHOOK: type: SHOWVIEWS -shtb_test1_view1 -shtb_test2_view2 +shtb_test1_view1_n0 +shtb_test2_view2_n0 PREHOOK: query: USE default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default @@ -160,22 +158,22 @@ PREHOOK: query: SHOW VIEWS FROM test1 PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS FROM test1 POSTHOOK: type: SHOWVIEWS -shtb_full_view2 -shtb_test1_view1 -shtb_test1_view2 +shtb_full_view2_n0 +shtb_test1_view1_n0 +shtb_test1_view2_n0 PREHOOK: query: SHOW VIEWS FROM test2 PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS FROM test2 POSTHOOK: type: SHOWVIEWS -shtb_test1_view1 -shtb_test2_view2 +shtb_test1_view1_n0 +shtb_test2_view2_n0 PREHOOK: query: SHOW VIEWS IN test1 PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS IN test1 POSTHOOK: type: SHOWVIEWS -shtb_full_view2 -shtb_test1_view1 -shtb_test1_view2 +shtb_full_view2_n0 +shtb_test1_view1_n0 +shtb_test1_view2_n0 PREHOOK: query: SHOW VIEWS IN default PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS IN default @@ -200,43 +198,43 @@ PREHOOK: Input: database:database POSTHOOK: query: USE `database` POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:database -PREHOOK: query: CREATE TABLE foo(a INT) +PREHOOK: query: CREATE TABLE foo_n8(a INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:database -PREHOOK: Output: database@foo -POSTHOOK: query: CREATE TABLE foo(a INT) +PREHOOK: Output: database@foo_n8 +POSTHOOK: query: CREATE TABLE foo_n8(a INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:database -POSTHOOK: Output: database@foo -PREHOOK: query: CREATE VIEW fooview AS SELECT * FROM foo +POSTHOOK: Output: database@foo_n8 +PREHOOK: query: CREATE VIEW fooview_n0 AS SELECT * FROM foo_n8 PREHOOK: type: CREATEVIEW -PREHOOK: Input: database@foo +PREHOOK: Input: database@foo_n8 PREHOOK: Output: database:database -PREHOOK: Output: database@fooview -POSTHOOK: query: CREATE VIEW fooview AS SELECT * FROM foo +PREHOOK: Output: database@fooview_n0 +POSTHOOK: query: CREATE VIEW fooview_n0 AS SELECT * FROM foo_n8 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: database@foo +POSTHOOK: Input: database@foo_n8 POSTHOOK: Output: database:database -POSTHOOK: Output: database@fooview -POSTHOOK: Lineage: fooview.a SIMPLE [(foo)foo.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Output: database@fooview_n0 +POSTHOOK: Lineage: fooview_n0.a SIMPLE [(foo_n8)foo_n8.FieldSchema(name:a, type:int, comment:null), ] PREHOOK: query: USE default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: SHOW VIEWS FROM `database` LIKE "fooview" +PREHOOK: query: SHOW VIEWS FROM `database` LIKE "fooview_n0" PREHOOK: type: SHOWVIEWS -POSTHOOK: query: SHOW VIEWS FROM `database` LIKE "fooview" +POSTHOOK: query: SHOW VIEWS FROM `database` LIKE "fooview_n0" POSTHOOK: type: SHOWVIEWS -fooview -PREHOOK: query: DROP VIEW fooview +fooview_n0 +PREHOOK: query: DROP VIEW fooview_n0 PREHOOK: type: DROPVIEW -POSTHOOK: query: DROP VIEW fooview +POSTHOOK: query: DROP VIEW fooview_n0 POSTHOOK: type: DROPVIEW -PREHOOK: query: DROP TABLE foo +PREHOOK: query: DROP TABLE foo_n8 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE foo +POSTHOOK: query: DROP TABLE foo_n8 POSTHOOK: type: DROPTABLE PREHOOK: query: USE test1 PREHOOK: type: SWITCHDATABASE @@ -244,38 +242,38 @@ PREHOOK: Input: database:test1 POSTHOOK: query: USE test1 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test1 -PREHOOK: query: DROP VIEW shtb_test1_view1 +PREHOOK: query: DROP VIEW shtb_test1_view1_n0 PREHOOK: type: DROPVIEW -PREHOOK: Input: test1@shtb_test1_view1 -PREHOOK: Output: test1@shtb_test1_view1 -POSTHOOK: query: DROP VIEW shtb_test1_view1 +PREHOOK: Input: test1@shtb_test1_view1_n0 +PREHOOK: Output: test1@shtb_test1_view1_n0 +POSTHOOK: query: DROP VIEW shtb_test1_view1_n0 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: test1@shtb_test1_view1 -POSTHOOK: Output: test1@shtb_test1_view1 -PREHOOK: query: DROP VIEW shtb_test1_view2 +POSTHOOK: Input: test1@shtb_test1_view1_n0 +POSTHOOK: Output: test1@shtb_test1_view1_n0 +PREHOOK: query: DROP VIEW shtb_test1_view2_n0 PREHOOK: type: DROPVIEW -PREHOOK: Input: test1@shtb_test1_view2 -PREHOOK: Output: test1@shtb_test1_view2 -POSTHOOK: query: DROP VIEW shtb_test1_view2 +PREHOOK: Input: test1@shtb_test1_view2_n0 +PREHOOK: Output: test1@shtb_test1_view2_n0 +POSTHOOK: query: DROP VIEW shtb_test1_view2_n0 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: test1@shtb_test1_view2 -POSTHOOK: Output: test1@shtb_test1_view2 -PREHOOK: query: DROP VIEW shtb_full_view2 +POSTHOOK: Input: test1@shtb_test1_view2_n0 +POSTHOOK: Output: test1@shtb_test1_view2_n0 +PREHOOK: query: DROP VIEW shtb_full_view2_n0 PREHOOK: type: DROPVIEW -PREHOOK: Input: test1@shtb_full_view2 -PREHOOK: Output: test1@shtb_full_view2 -POSTHOOK: query: DROP VIEW shtb_full_view2 +PREHOOK: Input: test1@shtb_full_view2_n0 +PREHOOK: Output: test1@shtb_full_view2_n0 +POSTHOOK: query: DROP VIEW shtb_full_view2_n0 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: test1@shtb_full_view2 -POSTHOOK: Output: test1@shtb_full_view2 -PREHOOK: query: DROP TABLE shtb_test1 +POSTHOOK: Input: test1@shtb_full_view2_n0 +POSTHOOK: Output: test1@shtb_full_view2_n0 +PREHOOK: query: DROP TABLE shtb_test1_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: test1@shtb_test1 -PREHOOK: Output: test1@shtb_test1 -POSTHOOK: query: DROP TABLE shtb_test1 +PREHOOK: Input: test1@shtb_test1_n1 +PREHOOK: Output: test1@shtb_test1_n1 +POSTHOOK: query: DROP TABLE shtb_test1_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: test1@shtb_test1 -POSTHOOK: Output: test1@shtb_test1 +POSTHOOK: Input: test1@shtb_test1_n1 +POSTHOOK: Output: test1@shtb_test1_n1 PREHOOK: query: DROP DATABASE test1 PREHOOK: type: DROPDATABASE PREHOOK: Input: database:test1 @@ -290,38 +288,38 @@ PREHOOK: Input: database:test2 POSTHOOK: query: USE test2 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test2 -PREHOOK: query: DROP VIEW shtb_test1_view1 +PREHOOK: query: DROP VIEW shtb_test1_view1_n0 PREHOOK: type: DROPVIEW -PREHOOK: Input: test2@shtb_test1_view1 -PREHOOK: Output: test2@shtb_test1_view1 -POSTHOOK: query: DROP VIEW shtb_test1_view1 +PREHOOK: Input: test2@shtb_test1_view1_n0 +PREHOOK: Output: test2@shtb_test1_view1_n0 +POSTHOOK: query: DROP VIEW shtb_test1_view1_n0 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: test2@shtb_test1_view1 -POSTHOOK: Output: test2@shtb_test1_view1 -PREHOOK: query: DROP VIEW shtb_test2_view2 +POSTHOOK: Input: test2@shtb_test1_view1_n0 +POSTHOOK: Output: test2@shtb_test1_view1_n0 +PREHOOK: query: DROP VIEW shtb_test2_view2_n0 PREHOOK: type: DROPVIEW -PREHOOK: Input: test2@shtb_test2_view2 -PREHOOK: Output: test2@shtb_test2_view2 -POSTHOOK: query: DROP VIEW shtb_test2_view2 +PREHOOK: Input: test2@shtb_test2_view2_n0 +PREHOOK: Output: test2@shtb_test2_view2_n0 +POSTHOOK: query: DROP VIEW shtb_test2_view2_n0 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: test2@shtb_test2_view2 -POSTHOOK: Output: test2@shtb_test2_view2 -PREHOOK: query: DROP TABLE shtb_test1 +POSTHOOK: Input: test2@shtb_test2_view2_n0 +POSTHOOK: Output: test2@shtb_test2_view2_n0 +PREHOOK: query: DROP TABLE shtb_test1_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: test2@shtb_test1 -PREHOOK: Output: test2@shtb_test1 -POSTHOOK: query: DROP TABLE shtb_test1 +PREHOOK: Input: test2@shtb_test1_n1 +PREHOOK: Output: test2@shtb_test1_n1 +POSTHOOK: query: DROP TABLE shtb_test1_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: test2@shtb_test1 -POSTHOOK: Output: test2@shtb_test1 -PREHOOK: query: DROP TABLE shtb_test2 +POSTHOOK: Input: test2@shtb_test1_n1 +POSTHOOK: Output: test2@shtb_test1_n1 +PREHOOK: query: DROP TABLE shtb_test2_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: test2@shtb_test2 -PREHOOK: Output: test2@shtb_test2 -POSTHOOK: query: DROP TABLE shtb_test2 +PREHOOK: Input: test2@shtb_test2_n1 +PREHOOK: Output: test2@shtb_test2_n1 +POSTHOOK: query: DROP TABLE shtb_test2_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: test2@shtb_test2 -POSTHOOK: Output: test2@shtb_test2 +POSTHOOK: Input: test2@shtb_test2_n1 +POSTHOOK: Output: test2@shtb_test2_n1 PREHOOK: query: DROP DATABASE test2 PREHOOK: type: DROPDATABASE PREHOOK: Input: database:test2 diff --git a/ql/src/test/results/clientpositive/skewjoin.q.out b/ql/src/test/results/clientpositive/skewjoin.q.out index 9bd30144c0..0aa294e20b 100644 --- a/ql/src/test/results/clientpositive/skewjoin.q.out +++ b/ql/src/test/results/clientpositive/skewjoin.q.out @@ -1,82 +1,82 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n83(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n83 +POSTHOOK: query: CREATE TABLE T1_n83(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T1_n83 +PREHOOK: query: CREATE TABLE T2_n51(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n51 +POSTHOOK: query: CREATE TABLE T2_n51(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T2_n51 +PREHOOK: query: CREATE TABLE T3_n22(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T3_n22 +POSTHOOK: query: CREATE TABLE T3_n22(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T3_n22 +PREHOOK: query: CREATE TABLE T4_n8(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T4 -POSTHOOK: query: CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T4_n8 +POSTHOOK: query: CREATE TABLE T4_n8(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T4 -PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T4_n8 +PREHOOK: query: CREATE TABLE dest_j1_n17(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_j1 -POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_j1_n17 +POSTHOOK: query: CREATE TABLE dest_j1_n17(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_j1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@dest_j1_n17 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n83 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n83 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n83 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@t1_n83 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n51 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n51 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n51 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: Output: default@t2_n51 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n22 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: Output: default@t3_n22 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n22 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4 +POSTHOOK: Output: default@t3_n22 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4_n8 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t4 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4 +PREHOOK: Output: default@t4_n8 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4_n8 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t4 +POSTHOOK: Output: default@t4_n8 PREHOOK: query: EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n17 SELECT src1.key, src2.value PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n17 SELECT src1.key, src2.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -143,7 +143,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n17 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -199,7 +199,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n17 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -226,7 +226,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest_j1 + name: default.dest_j1_n17 Stage: Stage-2 Stats Work @@ -234,7 +234,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.dest_j1 + Table: default.dest_j1_n17 Stage: Stage-3 Map Reduce @@ -259,37 +259,37 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n17 SELECT src1.key, src2.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest_j1 +PREHOOK: Output: default@dest_j1_n17 POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value +INSERT OVERWRITE TABLE dest_j1_n17 SELECT src1.key, src2.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest_j1 -POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT sum(hash(key)), sum(hash(value)) FROM dest_j1 +POSTHOOK: Output: default@dest_j1_n17 +POSTHOOK: Lineage: dest_j1_n17.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest_j1_n17.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT sum(hash(key)), sum(hash(value)) FROM dest_j1_n17 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_j1 +PREHOOK: Input: default@dest_j1_n17 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(key)), sum(hash(value)) FROM dest_j1 +POSTHOOK: query: SELECT sum(hash(key)), sum(hash(value)) FROM dest_j1_n17 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_j1 +POSTHOOK: Input: default@dest_j1_n17 #### A masked pattern was here #### 278697 101852390308 PREHOOK: query: EXPLAIN SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n83 a JOIN T2_n51 b ON a.key = b.key + JOIN T3_n22 c ON b.key = c.key + JOIN T4_n8 d ON c.key = d.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n83 a JOIN T2_n51 b ON a.key = b.key + JOIN T3_n22 c ON b.key = c.key + JOIN T4_n8 d ON c.key = d.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -379,37 +379,37 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n83 a JOIN T2_n51 b ON a.key = b.key + JOIN T3_n22 c ON b.key = c.key + JOIN T4_n8 d ON c.key = d.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 -PREHOOK: Input: default@t4 +PREHOOK: Input: default@t1_n83 +PREHOOK: Input: default@t2_n51 +PREHOOK: Input: default@t3_n22 +PREHOOK: Input: default@t4_n8 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ STREAMTABLE(a) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n83 a JOIN T2_n51 b ON a.key = b.key + JOIN T3_n22 c ON b.key = c.key + JOIN T4_n8 d ON c.key = d.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 -POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t1_n83 +POSTHOOK: Input: default@t2_n51 +POSTHOOK: Input: default@t3_n22 +POSTHOOK: Input: default@t4_n8 #### A masked pattern was here #### 2 12 2 22 2 12 2 12 PREHOOK: query: EXPLAIN SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n83 a JOIN T2_n51 b ON a.key = b.key + JOIN T3_n22 c ON b.key = c.key + JOIN T4_n8 d ON c.key = d.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n83 a JOIN T2_n51 b ON a.key = b.key + JOIN T3_n22 c ON b.key = c.key + JOIN T4_n8 d ON c.key = d.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -499,29 +499,29 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n83 a JOIN T2_n51 b ON a.key = b.key + JOIN T3_n22 c ON b.key = c.key + JOIN T4_n8 d ON c.key = d.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 -PREHOOK: Input: default@t4 +PREHOOK: Input: default@t1_n83 +PREHOOK: Input: default@t2_n51 +PREHOOK: Input: default@t3_n22 +PREHOOK: Input: default@t4_n8 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ STREAMTABLE(a,c) */ * -FROM T1 a JOIN T2 b ON a.key = b.key - JOIN T3 c ON b.key = c.key - JOIN T4 d ON c.key = d.key +FROM T1_n83 a JOIN T2_n51 b ON a.key = b.key + JOIN T3_n22 c ON b.key = c.key + JOIN T4_n8 d ON c.key = d.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 -POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t1_n83 +POSTHOOK: Input: default@t2_n51 +POSTHOOK: Input: default@t3_n22 +POSTHOOK: Input: default@t4_n8 #### A masked pattern was here #### 2 12 2 22 2 12 2 12 -PREHOOK: query: EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) +PREHOOK: query: EXPLAIN FROM T1_n83 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) +POSTHOOK: query: EXPLAIN FROM T1_n83 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -605,15 +605,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: FROM T1 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) +PREHOOK: query: FROM T1_n83 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n83 #### A masked pattern was here #### -POSTHOOK: query: FROM T1 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) +POSTHOOK: query: FROM T1_n83 a JOIN src c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n83 #### A masked pattern was here #### 198 6274 194 PREHOOK: query: EXPLAIN FROM @@ -1219,10 +1219,10 @@ POSTHOOK: Input: default@src #### A masked pattern was here #### 293143 -136853010385 PREHOOK: query: EXPLAIN -SELECT /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) FROM T1 k LEFT OUTER JOIN T1 v ON k.key+1=v.key +SELECT /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) FROM T1_n83 k LEFT OUTER JOIN T1_n83 v ON k.key+1=v.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) FROM T1 k LEFT OUTER JOIN T1 v ON k.key+1=v.key +SELECT /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) FROM T1_n83 k LEFT OUTER JOIN T1_n83 v ON k.key+1=v.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -1291,112 +1291,112 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) FROM T1 k LEFT OUTER JOIN T1 v ON k.key+1=v.key +PREHOOK: query: SELECT /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) FROM T1_n83 k LEFT OUTER JOIN T1_n83 v ON k.key+1=v.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n83 #### A masked pattern was here #### -POSTHOOK: query: SELECT /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) FROM T1 k LEFT OUTER JOIN T1 v ON k.key+1=v.key +POSTHOOK: query: SELECT /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) FROM T1_n83 k LEFT OUTER JOIN T1_n83 v ON k.key+1=v.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n83 #### A masked pattern was here #### 372 6320 -PREHOOK: query: select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.val +PREHOOK: query: select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1_n83 k join T1_n83 v on k.key=v.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n83 #### A masked pattern was here #### -POSTHOOK: query: select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.val +POSTHOOK: query: select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1_n83 k join T1_n83 v on k.key=v.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n83 #### A masked pattern was here #### NULL NULL -PREHOOK: query: select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.key +PREHOOK: query: select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1_n83 k join T1_n83 v on k.key=v.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n83 #### A masked pattern was here #### -POSTHOOK: query: select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.key +POSTHOOK: query: select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1_n83 k join T1_n83 v on k.key=v.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n83 #### A masked pattern was here #### 429 12643 -PREHOOK: query: select sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.key +PREHOOK: query: select sum(hash(k.key)), sum(hash(v.val)) from T1_n83 k join T1_n83 v on k.key=v.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n83 #### A masked pattern was here #### -POSTHOOK: query: select sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.key +POSTHOOK: query: select sum(hash(k.key)), sum(hash(v.val)) from T1_n83 k join T1_n83 v on k.key=v.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n83 #### A masked pattern was here #### 429 12643 -PREHOOK: query: select count(1) from T1 a join T1 b on a.key = b.key +PREHOOK: query: select count(1) from T1_n83 a join T1_n83 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n83 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from T1 a join T1 b on a.key = b.key +POSTHOOK: query: select count(1) from T1_n83 a join T1_n83 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n83 #### A masked pattern was here #### 8 -PREHOOK: query: FROM T1 a LEFT OUTER JOIN T2 c ON c.key+1=a.key SELECT sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) +PREHOOK: query: FROM T1_n83 a LEFT OUTER JOIN T2_n51 c ON c.key+1=a.key SELECT sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n83 +PREHOOK: Input: default@t2_n51 #### A masked pattern was here #### -POSTHOOK: query: FROM T1 a LEFT OUTER JOIN T2 c ON c.key+1=a.key SELECT sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) +POSTHOOK: query: FROM T1_n83 a LEFT OUTER JOIN T2_n51 c ON c.key+1=a.key SELECT sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n83 +POSTHOOK: Input: default@t2_n51 #### A masked pattern was here #### 317 9462 50 -PREHOOK: query: FROM T1 a RIGHT OUTER JOIN T2 c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) +PREHOOK: query: FROM T1_n83 a RIGHT OUTER JOIN T2_n51 c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n83 +PREHOOK: Input: default@t2_n51 #### A masked pattern was here #### -POSTHOOK: query: FROM T1 a RIGHT OUTER JOIN T2 c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) +POSTHOOK: query: FROM T1_n83 a RIGHT OUTER JOIN T2_n51 c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n83 +POSTHOOK: Input: default@t2_n51 #### A masked pattern was here #### 51 1570 318 -PREHOOK: query: FROM T1 a FULL OUTER JOIN T2 c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) +PREHOOK: query: FROM T1_n83 a FULL OUTER JOIN T2_n51 c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n83 +PREHOOK: Input: default@t2_n51 #### A masked pattern was here #### -POSTHOOK: query: FROM T1 a FULL OUTER JOIN T2 c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) +POSTHOOK: query: FROM T1_n83 a FULL OUTER JOIN T2_n51 c ON c.key+1=a.key SELECT /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n83 +POSTHOOK: Input: default@t2_n51 #### A masked pattern was here #### 317 9462 318 -PREHOOK: query: SELECT sum(hash(src1.key)), sum(hash(src1.val)), sum(hash(src2.key)) FROM T1 src1 LEFT OUTER JOIN T2 src2 ON src1.key+1 = src2.key RIGHT OUTER JOIN T2 src3 ON src2.key = src3.key +PREHOOK: query: SELECT sum(hash(src1.key)), sum(hash(src1.val)), sum(hash(src2.key)) FROM T1_n83 src1 LEFT OUTER JOIN T2_n51 src2 ON src1.key+1 = src2.key RIGHT OUTER JOIN T2_n51 src3 ON src2.key = src3.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n83 +PREHOOK: Input: default@t2_n51 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(src1.key)), sum(hash(src1.val)), sum(hash(src2.key)) FROM T1 src1 LEFT OUTER JOIN T2 src2 ON src1.key+1 = src2.key RIGHT OUTER JOIN T2 src3 ON src2.key = src3.key +POSTHOOK: query: SELECT sum(hash(src1.key)), sum(hash(src1.val)), sum(hash(src2.key)) FROM T1_n83 src1 LEFT OUTER JOIN T2_n51 src2 ON src1.key+1 = src2.key RIGHT OUTER JOIN T2_n51 src3 ON src2.key = src3.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n83 +POSTHOOK: Input: default@t2_n51 #### A masked pattern was here #### 370 11003 377 -PREHOOK: query: SELECT sum(hash(src1.key)), sum(hash(src1.val)), sum(hash(src2.key)) FROM T1 src1 JOIN T2 src2 ON src1.key+1 = src2.key JOIN T2 src3 ON src2.key = src3.key +PREHOOK: query: SELECT sum(hash(src1.key)), sum(hash(src1.val)), sum(hash(src2.key)) FROM T1_n83 src1 JOIN T2_n51 src2 ON src1.key+1 = src2.key JOIN T2_n51 src3 ON src2.key = src3.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n83 +PREHOOK: Input: default@t2_n51 #### A masked pattern was here #### -POSTHOOK: query: SELECT sum(hash(src1.key)), sum(hash(src1.val)), sum(hash(src2.key)) FROM T1 src1 JOIN T2 src2 ON src1.key+1 = src2.key JOIN T2 src3 ON src2.key = src3.key +POSTHOOK: query: SELECT sum(hash(src1.key)), sum(hash(src1.val)), sum(hash(src2.key)) FROM T1_n83 src1 JOIN T2_n51 src2 ON src1.key+1 = src2.key JOIN T2_n51 src3 ON src2.key = src3.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n83 +POSTHOOK: Input: default@t2_n51 #### A masked pattern was here #### 370 11003 377 -PREHOOK: query: select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k left outer join T1 v on k.key+1=v.key +PREHOOK: query: select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from T1_n83 k left outer join T1_n83 v on k.key+1=v.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n83 #### A masked pattern was here #### -POSTHOOK: query: select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k left outer join T1 v on k.key+1=v.key +POSTHOOK: query: select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from T1_n83 k left outer join T1_n83 v on k.key+1=v.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n83 #### A masked pattern was here #### 372 6320 diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin1.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin1.q.out index bf7b8b2b72..2e97259fec 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin1.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin1.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n41(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n41 +POSTHOOK: query: CREATE TABLE T1_n41(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n41 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n41 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n41 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n41 +PREHOOK: query: CREATE TABLE T2_n26(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n26 +POSTHOOK: query: CREATE TABLE T2_n26(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n26 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n26 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n26 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n26 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n26 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n41 a JOIN T2_n26 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n41 a JOIN T2_n26 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-8 is a root stage @@ -153,17 +153,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n41 a JOIN T2_n26 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n41 +PREHOOK: Input: default@t2_n26 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n41 a JOIN T2_n26 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n41 +POSTHOOK: Input: default@t2_n26 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 @@ -172,10 +172,10 @@ POSTHOOK: Input: default@t2 8 28 8 18 8 28 8 18 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n41 a RIGHT OUTER JOIN T2_n26 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n41 a RIGHT OUTER JOIN T2_n26 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-8 is a root stage @@ -290,17 +290,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n41 a RIGHT OUTER JOIN T2_n26 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n41 +PREHOOK: Input: default@t2_n26 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n41 a RIGHT OUTER JOIN T2_n26 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n41 +POSTHOOK: Input: default@t2_n26 #### A masked pattern was here #### NULL NULL 4 14 NULL NULL 5 15 @@ -311,10 +311,10 @@ NULL NULL 5 15 8 28 8 18 8 28 8 18 PREHOOK: query: EXPLAIN -SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +SELECT count(1) FROM T1_n41 a JOIN T2_n26 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +SELECT count(1) FROM T1_n41 a JOIN T2_n26 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-8 is a root stage @@ -444,22 +444,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT count(1) FROM T1_n41 a JOIN T2_n26 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n41 +PREHOOK: Input: default@t2_n26 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT count(1) FROM T1_n41 a JOIN T2_n26 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n41 +POSTHOOK: Input: default@t2_n26 #### A masked pattern was here #### 6 PREHOOK: query: EXPLAIN -SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT count(1) FROM T1_n41 a RIGHT OUTER JOIN T2_n26 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT count(1) FROM T1_n41 a RIGHT OUTER JOIN T2_n26 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-8 is a root stage @@ -589,14 +589,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT count(1) FROM T1_n41 a RIGHT OUTER JOIN T2_n26 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n41 +PREHOOK: Input: default@t2_n26 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT count(1) FROM T1_n41 a RIGHT OUTER JOIN T2_n26 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n41 +POSTHOOK: Input: default@t2_n26 #### A masked pattern was here #### 8 diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin10.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin10.q.out index 3768732040..615df22ce0 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin10.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin10.q.out @@ -1,76 +1,76 @@ -PREHOOK: query: CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE tmpT1_n0(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmpT1 -POSTHOOK: query: CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@tmpT1_n0 +POSTHOOK: query: CREATE TABLE tmpT1_n0(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmpT1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1 +POSTHOOK: Output: default@tmpT1_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tmpt1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1 +PREHOOK: Output: default@tmpt1_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tmpt1 -PREHOOK: query: CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) +POSTHOOK: Output: default@tmpt1_n0 +PREHOOK: query: CREATE TABLE T1_n95(key INT, val STRING) SKEWED BY (key) ON ((2)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) +PREHOOK: Output: default@T1_n95 +POSTHOOK: query: CREATE TABLE T1_n95(key INT, val STRING) SKEWED BY (key) ON ((2)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 SELECT key, val FROM tmpT1 +POSTHOOK: Output: default@T1_n95 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n95 SELECT key, val FROM tmpT1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tmpt1 -PREHOOK: Output: default@t1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 SELECT key, val FROM tmpT1 +PREHOOK: Input: default@tmpt1_n0 +PREHOOK: Output: default@t1_n95 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n95 SELECT key, val FROM tmpT1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmpt1 -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key EXPRESSION [(tmpt1)tmpt1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1.val SIMPLE [(tmpt1)tmpt1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE tmpT2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Input: default@tmpt1_n0 +POSTHOOK: Output: default@t1_n95 +POSTHOOK: Lineage: t1_n95.key EXPRESSION [(tmpt1_n0)tmpt1_n0.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n95.val SIMPLE [(tmpt1_n0)tmpt1_n0.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE tmpT2_n0(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmpT2 -POSTHOOK: query: CREATE TABLE tmpT2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@tmpT2_n0 +POSTHOOK: query: CREATE TABLE tmpT2_n0(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmpT2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE tmpT2 +POSTHOOK: Output: default@tmpT2_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE tmpT2_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tmpt2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE tmpT2 +PREHOOK: Output: default@tmpt2_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE tmpT2_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tmpt2 -PREHOOK: query: CREATE TABLE T2(key INT, val STRING) SKEWED BY (key) ON ((3)) +POSTHOOK: Output: default@tmpt2_n0 +PREHOOK: query: CREATE TABLE T2_n57(key INT, val STRING) SKEWED BY (key) ON ((3)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key INT, val STRING) SKEWED BY (key) ON ((3)) +PREHOOK: Output: default@T2_n57 +POSTHOOK: query: CREATE TABLE T2_n57(key INT, val STRING) SKEWED BY (key) ON ((3)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: INSERT OVERWRITE TABLE T2 SELECT key, val FROM tmpT2 +POSTHOOK: Output: default@T2_n57 +PREHOOK: query: INSERT OVERWRITE TABLE T2_n57 SELECT key, val FROM tmpT2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tmpt2 -PREHOOK: Output: default@t2 -POSTHOOK: query: INSERT OVERWRITE TABLE T2 SELECT key, val FROM tmpT2 +PREHOOK: Input: default@tmpt2_n0 +PREHOOK: Output: default@t2_n57 +POSTHOOK: query: INSERT OVERWRITE TABLE T2_n57 SELECT key, val FROM tmpT2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmpt2 -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.key EXPRESSION [(tmpt2)tmpt2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t2.val SIMPLE [(tmpt2)tmpt2.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Input: default@tmpt2_n0 +POSTHOOK: Output: default@t2_n57 +POSTHOOK: Lineage: t2_n57.key EXPRESSION [(tmpt2_n0)tmpt2_n0.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n57.val SIMPLE [(tmpt2_n0)tmpt2_n0.FieldSchema(name:val, type:string, comment:null), ] PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n95 a JOIN T2_n57 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n95 a JOIN T2_n57 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-8 is a root stage @@ -185,17 +185,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n95 a JOIN T2_n57 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n95 +PREHOOK: Input: default@t2_n57 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n95 a JOIN T2_n57 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n95 +POSTHOOK: Input: default@t2_n57 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 @@ -204,10 +204,10 @@ POSTHOOK: Input: default@t2 8 28 8 18 8 28 8 18 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n95 a RIGHT OUTER JOIN T2_n57 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n95 a RIGHT OUTER JOIN T2_n57 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-8 is a root stage @@ -322,17 +322,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n95 a RIGHT OUTER JOIN T2_n57 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n95 +PREHOOK: Input: default@t2_n57 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n95 a RIGHT OUTER JOIN T2_n57 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n95 +POSTHOOK: Input: default@t2_n57 #### A masked pattern was here #### NULL NULL 4 14 NULL NULL 5 15 @@ -343,10 +343,10 @@ NULL NULL 5 15 8 28 8 18 8 28 8 18 PREHOOK: query: EXPLAIN -SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +SELECT count(1) FROM T1_n95 a JOIN T2_n57 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +SELECT count(1) FROM T1_n95 a JOIN T2_n57 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-8 is a root stage @@ -476,22 +476,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT count(1) FROM T1_n95 a JOIN T2_n57 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n95 +PREHOOK: Input: default@t2_n57 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT count(1) FROM T1_n95 a JOIN T2_n57 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n95 +POSTHOOK: Input: default@t2_n57 #### A masked pattern was here #### 6 PREHOOK: query: EXPLAIN -SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT count(1) FROM T1_n95 a RIGHT OUTER JOIN T2_n57 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT count(1) FROM T1_n95 a RIGHT OUTER JOIN T2_n57 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-8 is a root stage @@ -621,14 +621,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT count(1) FROM T1_n95 a RIGHT OUTER JOIN T2_n57 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n95 +PREHOOK: Input: default@t2_n57 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT count(1) FROM T1_n95 a RIGHT OUTER JOIN T2_n57 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n95 +POSTHOOK: Input: default@t2_n57 #### A masked pattern was here #### 8 diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin11.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin11.q.out index be88dad58e..3795cc808d 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin11.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin11.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n18(key STRING, val STRING) CLUSTERED BY (key) INTO 4 BUCKETS SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n18 +POSTHOOK: query: CREATE TABLE T1_n18(key STRING, val STRING) CLUSTERED BY (key) INTO 4 BUCKETS SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +POSTHOOK: Output: default@T1_n18 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n18 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +PREHOOK: Output: default@t1_n18 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n18 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t1_n18 +PREHOOK: query: CREATE TABLE T2_n12(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n12 +POSTHOOK: query: CREATE TABLE T2_n12(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n12 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n12 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n12 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n12 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n12 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n18 a JOIN T2_n12 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n18 a JOIN T2_n12 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-8 is a root stage @@ -153,17 +153,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n18 a JOIN T2_n12 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n18 +PREHOOK: Input: default@t2_n12 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n18 a JOIN T2_n12 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n18 +POSTHOOK: Input: default@t2_n12 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin2.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin2.q.out index 214f635bcc..338b8f3b69 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin2.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin2.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n14(key STRING, val STRING) SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n14 +POSTHOOK: query: CREATE TABLE T1_n14(key STRING, val STRING) SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n14 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n14 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n14 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n14 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n14 +PREHOOK: query: CREATE TABLE T2_n9(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n9 +POSTHOOK: query: CREATE TABLE T2_n9(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n9 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n9 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n9 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n9 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n9 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n14 a JOIN T2_n9 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n14 a JOIN T2_n9 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-8 is a root stage @@ -153,17 +153,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n14 a JOIN T2_n9 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n14 +PREHOOK: Input: default@t2_n9 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n14 a JOIN T2_n9 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n14 +POSTHOOK: Input: default@t2_n9 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 @@ -172,10 +172,10 @@ POSTHOOK: Input: default@t2 8 28 8 18 8 28 8 18 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n14 a FULL OUTER JOIN T2_n9 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n14 a FULL OUTER JOIN T2_n9 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -313,17 +313,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n14 a FULL OUTER JOIN T2_n9 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n14 +PREHOOK: Input: default@t2_n9 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n14 a FULL OUTER JOIN T2_n9 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n14 +POSTHOOK: Input: default@t2_n9 #### A masked pattern was here #### NULL NULL 4 14 NULL NULL 5 15 diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin3.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin3.q.out index 074671507a..3251409500 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin3.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin3.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n99(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12), (8, 18)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n99 +POSTHOOK: query: CREATE TABLE T1_n99(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12), (8, 18)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n99 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n99 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n99 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n99 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n99 +PREHOOK: query: CREATE TABLE T2_n60(key STRING, val STRING) SKEWED BY (key, val) ON ((3, 13), (8, 18)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n60 +POSTHOOK: query: CREATE TABLE T2_n60(key STRING, val STRING) SKEWED BY (key, val) ON ((3, 13), (8, 18)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n60 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n60 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n60 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n60 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n60 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n99 a JOIN T2_n60 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n99 a JOIN T2_n60 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-8 is a root stage @@ -153,17 +153,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n99 a JOIN T2_n60 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n99 +PREHOOK: Input: default@t2_n60 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n99 a JOIN T2_n60 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n99 +POSTHOOK: Input: default@t2_n60 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin4.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin4.q.out index f7d1784aac..540cbb1778 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin4.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin4.q.out @@ -1,60 +1,60 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n80(key STRING, val STRING) SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n80 +POSTHOOK: query: CREATE TABLE T1_n80(key STRING, val STRING) SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n80 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n80 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n80 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n80 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n80 +PREHOOK: query: CREATE TABLE T2_n48(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n48 +POSTHOOK: query: CREATE TABLE T2_n48(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n48 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n48 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n48 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n48 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t2_n48 +PREHOOK: query: CREATE TABLE T3_n19(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T3_n19 +POSTHOOK: query: CREATE TABLE T3_n19(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: Output: default@T3_n19 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n19 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: Output: default@t3_n19 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n19 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 +POSTHOOK: Output: default@t3_n19 PREHOOK: query: EXPLAIN -SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +SELECT a.*, b.*, c.* FROM T1_n80 a JOIN T2_n48 b ON a.key = b.key JOIN T3_n19 c on a.key = c.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +SELECT a.*, b.*, c.* FROM T1_n80 a JOIN T2_n48 b ON a.key = b.key JOIN T3_n19 c on a.key = c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-10 is a root stage @@ -213,18 +213,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +PREHOOK: query: SELECT a.*, b.*, c.* FROM T1_n80 a JOIN T2_n48 b ON a.key = b.key JOIN T3_n19 c on a.key = c.key ORDER BY a.key, b.key, c.key, a.val, b.val, c.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n80 +PREHOOK: Input: default@t2_n48 +PREHOOK: Input: default@t3_n19 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +POSTHOOK: query: SELECT a.*, b.*, c.* FROM T1_n80 a JOIN T2_n48 b ON a.key = b.key JOIN T3_n19 c on a.key = c.key ORDER BY a.key, b.key, c.key, a.val, b.val, c.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n80 +POSTHOOK: Input: default@t2_n48 +POSTHOOK: Input: default@t3_n19 #### A masked pattern was here #### 2 12 2 22 2 12 diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin5.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin5.q.out index 170a5bb59d..7802b773f6 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin5.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin5.q.out @@ -1,54 +1,54 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n55(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n55 +POSTHOOK: query: CREATE TABLE T1_n55(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n55 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n55 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n55 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n55 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t1_n55 +PREHOOK: query: CREATE TABLE T2_n35(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n35 +POSTHOOK: query: CREATE TABLE T2_n35(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n35 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n35 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n35 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n35 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n35 PREHOOK: query: EXPLAIN select * from ( -select key, val from T1 +select key, val from T1_n55 union all -select key, val from T1 +select key, val from T1_n55 ) subq1 -join T2 b on subq1.key = b.key +join T2_n35 b on subq1.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select * from ( -select key, val from T1 +select key, val from T1_n55 union all -select key, val from T1 +select key, val from T1_n55 ) subq1 -join T2 b on subq1.key = b.key +join T2_n35 b on subq1.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-5 is a root stage @@ -83,7 +83,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n55 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -110,7 +110,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: t1 + alias: t1_n55 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -147,27 +147,27 @@ STAGE PLANS: PREHOOK: query: select * from ( -select key, val from T1 +select key, val from T1_n55 union all -select key, val from T1 +select key, val from T1_n55 ) subq1 -join T2 b on subq1.key = b.key +join T2_n35 b on subq1.key = b.key ORDER BY subq1.key, b.key, subq1.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n55 +PREHOOK: Input: default@t2_n35 #### A masked pattern was here #### POSTHOOK: query: select * from ( -select key, val from T1 +select key, val from T1_n55 union all -select key, val from T1 +select key, val from T1_n55 ) subq1 -join T2 b on subq1.key = b.key +join T2_n35 b on subq1.key = b.key ORDER BY subq1.key, b.key, subq1.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n55 +POSTHOOK: Input: default@t2_n35 #### A masked pattern was here #### 2 12 2 22 2 12 2 22 @@ -184,16 +184,16 @@ POSTHOOK: Input: default@t2 PREHOOK: query: EXPLAIN select * from ( -select key, count(1) as cnt from T1 group by key +select key, count(1) as cnt from T1_n55 group by key ) subq1 -join T2 b on subq1.key = b.key +join T2_n35 b on subq1.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select * from ( -select key, count(1) as cnt from T1 group by key +select key, count(1) as cnt from T1_n55 group by key ) subq1 -join T2 b on subq1.key = b.key +join T2_n35 b on subq1.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -206,7 +206,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n55 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -292,23 +292,23 @@ STAGE PLANS: PREHOOK: query: select * from ( -select key, count(1) as cnt from T1 group by key +select key, count(1) as cnt from T1_n55 group by key ) subq1 -join T2 b on subq1.key = b.key +join T2_n35 b on subq1.key = b.key ORDER BY subq1.key, b.key, subq1.cnt, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n55 +PREHOOK: Input: default@t2_n35 #### A masked pattern was here #### POSTHOOK: query: select * from ( -select key, count(1) as cnt from T1 group by key +select key, count(1) as cnt from T1_n55 group by key ) subq1 -join T2 b on subq1.key = b.key +join T2_n35 b on subq1.key = b.key ORDER BY subq1.key, b.key, subq1.cnt, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n55 +POSTHOOK: Input: default@t2_n35 #### A masked pattern was here #### 2 1 2 22 3 1 3 13 diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out index 7707a70926..7f8cddca58 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out @@ -1,46 +1,46 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n24(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n24 +POSTHOOK: query: CREATE TABLE T1_n24(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n24 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n24 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n24 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n24 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: drop table array_valued_T1 +POSTHOOK: Output: default@t1_n24 +PREHOOK: query: drop table array_valued_T1_n24 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table array_valued_T1 +POSTHOOK: query: drop table array_valued_T1_n24 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table array_valued_T1 (key string, value array) SKEWED BY (key) ON ((8)) +PREHOOK: query: create table array_valued_T1_n24 (key string, value array) SKEWED BY (key) ON ((8)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@array_valued_T1 -POSTHOOK: query: create table array_valued_T1 (key string, value array) SKEWED BY (key) ON ((8)) +PREHOOK: Output: default@array_valued_T1_n24 +POSTHOOK: query: create table array_valued_T1_n24 (key string, value array) SKEWED BY (key) ON ((8)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@array_valued_T1 -PREHOOK: query: insert overwrite table array_valued_T1 select key, array(value) from T1 +POSTHOOK: Output: default@array_valued_T1_n24 +PREHOOK: query: insert overwrite table array_valued_T1_n24 select key, array(value) from T1_n24 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@array_valued_t1 -POSTHOOK: query: insert overwrite table array_valued_T1 select key, array(value) from T1 +PREHOOK: Input: default@t1_n24 +PREHOOK: Output: default@array_valued_t1_n24 +POSTHOOK: query: insert overwrite table array_valued_T1_n24 select key, array(value) from T1_n24 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@array_valued_t1 -POSTHOOK: Lineage: array_valued_t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: array_valued_t1.value EXPRESSION [(t1)t1.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Input: default@t1_n24 +POSTHOOK: Output: default@array_valued_t1_n24 +POSTHOOK: Lineage: array_valued_t1_n24.key SIMPLE [(t1_n24)t1_n24.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: array_valued_t1_n24.value EXPRESSION [(t1_n24)t1_n24.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain -select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +select * from (select a.key as key, b.value as array_val from T1_n24 a join array_valued_T1_n24 b on a.key=b.key) i lateral view explode (array_val) c as val PREHOOK: type: QUERY POSTHOOK: query: explain -select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +select * from (select a.key as key, b.value as array_val from T1_n24 a join array_valued_T1_n24 b on a.key=b.key) i lateral view explode (array_val) c as val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-8 is a root stage @@ -199,17 +199,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +PREHOOK: query: select * from (select a.key as key, b.value as array_val from T1_n24 a join array_valued_T1_n24 b on a.key=b.key) i lateral view explode (array_val) c as val ORDER BY key, val PREHOOK: type: QUERY -PREHOOK: Input: default@array_valued_t1 -PREHOOK: Input: default@t1 +PREHOOK: Input: default@array_valued_t1_n24 +PREHOOK: Input: default@t1_n24 #### A masked pattern was here #### -POSTHOOK: query: select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +POSTHOOK: query: select * from (select a.key as key, b.value as array_val from T1_n24 a join array_valued_T1_n24 b on a.key=b.key) i lateral view explode (array_val) c as val ORDER BY key, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@array_valued_t1 -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@array_valued_t1_n24 +POSTHOOK: Input: default@t1_n24 #### A masked pattern was here #### 1 ["11"] 11 2 ["12"] 12 diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin7.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin7.q.out index ee92f9801c..ac5e7df249 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin7.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin7.q.out @@ -1,51 +1,51 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n100(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n100 +POSTHOOK: query: CREATE TABLE T1_n100(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n100 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n100 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n100 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n100 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t1_n100 +PREHOOK: query: CREATE TABLE T2_n61(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n61 +POSTHOOK: query: CREATE TABLE T2_n61(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n61 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n61 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n61 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n61 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n61 PREHOOK: query: EXPLAIN select * from ( - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n100 a join T2_n61 b on a.key = b.key union all - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n100 a join T2_n61 b on a.key = b.key ) subq1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select * from ( - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n100 a join T2_n61 b on a.key = b.key union all - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n100 a join T2_n61 b on a.key = b.key ) subq1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -303,25 +303,25 @@ STAGE PLANS: PREHOOK: query: select * from ( - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n100 a join T2_n61 b on a.key = b.key union all - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n100 a join T2_n61 b on a.key = b.key ) subq1 ORDER BY key, val1, val2 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n100 +PREHOOK: Input: default@t2_n61 #### A masked pattern was here #### POSTHOOK: query: select * from ( - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n100 a join T2_n61 b on a.key = b.key union all - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n100 a join T2_n61 b on a.key = b.key ) subq1 ORDER BY key, val1, val2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n100 +POSTHOOK: Input: default@t2_n61 #### A masked pattern was here #### 2 12 22 2 12 22 diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin8.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin8.q.out index 78b75b84b0..1c884068db 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin8.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin8.q.out @@ -1,64 +1,64 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n29(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n29 +POSTHOOK: query: CREATE TABLE T1_n29(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n29 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n29 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n29 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n29 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t1_n29 +PREHOOK: query: CREATE TABLE T2_n20(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n20 +POSTHOOK: query: CREATE TABLE T2_n20(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n20 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n20 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n20 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n20 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) +POSTHOOK: Output: default@t2_n20 +PREHOOK: query: CREATE TABLE T3_n7(key STRING, val STRING) SKEWED BY (val) ON ((12)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) +PREHOOK: Output: default@T3_n7 +POSTHOOK: query: CREATE TABLE T3_n7(key STRING, val STRING) SKEWED BY (val) ON ((12)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: Output: default@T3_n7 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n7 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: Output: default@t3_n7 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n7 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 +POSTHOOK: Output: default@t3_n7 PREHOOK: query: EXPLAIN select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n29 a join T2_n20 b on a.key = b.key +join T3_n7 c on a.val = c.val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n29 a join T2_n20 b on a.key = b.key +join T3_n7 c on a.val = c.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-7 is a root stage @@ -155,22 +155,22 @@ STAGE PLANS: PREHOOK: query: select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n29 a join T2_n20 b on a.key = b.key +join T3_n7 c on a.val = c.val order by a.key, b.key, c.key, a.val, b.val, c.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n29 +PREHOOK: Input: default@t2_n20 +PREHOOK: Input: default@t3_n7 #### A masked pattern was here #### POSTHOOK: query: select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n29 a join T2_n20 b on a.key = b.key +join T3_n7 c on a.val = c.val order by a.key, b.key, c.key, a.val, b.val, c.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n29 +POSTHOOK: Input: default@t2_n20 +POSTHOOK: Input: default@t3_n7 #### A masked pattern was here #### 2 12 2 22 2 12 diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out index bf5d2ab750..7373948141 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out @@ -1,66 +1,66 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n152(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n152 +POSTHOOK: query: CREATE TABLE T1_n152(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n152 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n152 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n152 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n152 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t1_n152 +PREHOOK: query: CREATE TABLE T2_n89(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n89 +POSTHOOK: query: CREATE TABLE T2_n89(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n89 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n89 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n89 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n89 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) +POSTHOOK: Output: default@t2_n89 +PREHOOK: query: CREATE TABLE T3_n36(key STRING, val STRING) SKEWED BY (val) ON ((12)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) +PREHOOK: Output: default@T3_n36 +POSTHOOK: query: CREATE TABLE T3_n36(key STRING, val STRING) SKEWED BY (val) ON ((12)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: Output: default@T3_n36 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n36 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: Output: default@t3_n36 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n36 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 +POSTHOOK: Output: default@t3_n36 PREHOOK: query: EXPLAIN select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n152 a join T2_n89 b on a.key = b.key +join T3_n36 c on a.val = c.val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n152 a join T2_n89 b on a.key = b.key +join T3_n36 c on a.val = c.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-10 is a root stage @@ -211,22 +211,22 @@ STAGE PLANS: PREHOOK: query: select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n152 a join T2_n89 b on a.key = b.key +join T3_n36 c on a.val = c.val order by a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n152 +PREHOOK: Input: default@t2_n89 +PREHOOK: Input: default@t3_n36 #### A masked pattern was here #### POSTHOOK: query: select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n152 a join T2_n89 b on a.key = b.key +join T3_n36 c on a.val = c.val order by a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n152 +POSTHOOK: Input: default@t2_n89 +POSTHOOK: Input: default@t3_n36 #### A masked pattern was here #### 2 12 2 22 2 12 diff --git a/ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out b/ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out index c1e24d6fc6..4f3707c17b 100644 --- a/ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out @@ -73,10 +73,10 @@ POSTHOOK: Output: default@nonskewtable POSTHOOK: Lineage: nonskewtable.key SCRIPT [] POSTHOOK: Lineage: nonskewtable.value SCRIPT [] PREHOOK: query: EXPLAIN -CREATE TABLE result AS SELECT a.* FROM skewtable a JOIN nonskewtable b ON a.key=b.key +CREATE TABLE result_n1 AS SELECT a.* FROM skewtable a JOIN nonskewtable b ON a.key=b.key PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: EXPLAIN -CREATE TABLE result AS SELECT a.* FROM skewtable a JOIN nonskewtable b ON a.key=b.key +CREATE TABLE result_n1 AS SELECT a.* FROM skewtable a JOIN nonskewtable b ON a.key=b.key POSTHOOK: type: CREATETABLE_AS_SELECT STAGE DEPENDENCIES: Stage-1 is a root stage @@ -139,7 +139,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.result + name: default.result_n1 Stage: Stage-5 Conditional Operator @@ -176,7 +176,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.result + name: default.result_n1 Local Work: Map Reduce Local Work @@ -193,31 +193,31 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.result + name: default.result_n1 Stage: Stage-2 Stats Work Basic Stats Work: -PREHOOK: query: CREATE TABLE result AS SELECT a.* FROM skewtable a JOIN nonskewtable b ON a.key=b.key +PREHOOK: query: CREATE TABLE result_n1 AS SELECT a.* FROM skewtable a JOIN nonskewtable b ON a.key=b.key PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@nonskewtable PREHOOK: Input: default@skewtable PREHOOK: Output: database:default -PREHOOK: Output: default@result -POSTHOOK: query: CREATE TABLE result AS SELECT a.* FROM skewtable a JOIN nonskewtable b ON a.key=b.key +PREHOOK: Output: default@result_n1 +POSTHOOK: query: CREATE TABLE result_n1 AS SELECT a.* FROM skewtable a JOIN nonskewtable b ON a.key=b.key POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@nonskewtable POSTHOOK: Input: default@skewtable POSTHOOK: Output: database:default -POSTHOOK: Output: default@result -POSTHOOK: Lineage: result.key SIMPLE [(skewtable)a.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: result.value SIMPLE [(skewtable)a.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM result +POSTHOOK: Output: default@result_n1 +POSTHOOK: Lineage: result_n1.key SIMPLE [(skewtable)a.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: result_n1.value SIMPLE [(skewtable)a.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM result_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@result +PREHOOK: Input: default@result_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM result +POSTHOOK: query: SELECT * FROM result_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@result +POSTHOOK: Input: default@result_n1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/skewjoin_union_remove_1.q.out b/ql/src/test/results/clientpositive/skewjoin_union_remove_1.q.out index 23c81b3fb9..21ab5deae0 100644 --- a/ql/src/test/results/clientpositive/skewjoin_union_remove_1.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_union_remove_1.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n36(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n36 +POSTHOOK: query: CREATE TABLE T1_n36(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n36 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n36 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n36 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n36 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n36 +PREHOOK: query: CREATE TABLE T2_n23(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n23 +POSTHOOK: query: CREATE TABLE T2_n23(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n23 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n23 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n23 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n23 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n23 PREHOOK: query: EXPLAIN -SELECT * FROM T1 a JOIN T2 b ON a.key = b.key +SELECT * FROM T1_n36 a JOIN T2_n23 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT * FROM T1 a JOIN T2 b ON a.key = b.key +SELECT * FROM T1_n36 a JOIN T2_n23 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -153,17 +153,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT * FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT * FROM T1_n36 a JOIN T2_n23 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n36 +PREHOOK: Input: default@t2_n23 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT * FROM T1_n36 a JOIN T2_n23 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n36 +POSTHOOK: Input: default@t2_n23 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 @@ -172,10 +172,10 @@ POSTHOOK: Input: default@t2 8 28 8 18 8 28 8 18 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n36 a RIGHT OUTER JOIN T2_n23 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n36 a RIGHT OUTER JOIN T2_n23 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -290,17 +290,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n36 a RIGHT OUTER JOIN T2_n23 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n36 +PREHOOK: Input: default@t2_n23 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n36 a RIGHT OUTER JOIN T2_n23 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n36 +POSTHOOK: Input: default@t2_n23 #### A masked pattern was here #### NULL NULL 4 14 NULL NULL 5 15 @@ -310,21 +310,21 @@ NULL NULL 5 15 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: create table DEST1(key1 STRING, val1 STRING, key2 STRING, val2 STRING) +PREHOOK: query: create table DEST1_n7(key1 STRING, val1 STRING, key2 STRING, val2 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: create table DEST1(key1 STRING, val1 STRING, key2 STRING, val2 STRING) +PREHOOK: Output: default@DEST1_n7 +POSTHOOK: query: create table DEST1_n7(key1 STRING, val1 STRING, key2 STRING, val2 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 +POSTHOOK: Output: default@DEST1_n7 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE DEST1 -SELECT * FROM T1 a JOIN T2 b ON a.key = b.key +INSERT OVERWRITE TABLE DEST1_n7 +SELECT * FROM T1_n36 a JOIN T2_n23 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE DEST1 -SELECT * FROM T1 a JOIN T2 b ON a.key = b.key +INSERT OVERWRITE TABLE DEST1_n7 +SELECT * FROM T1_n36 a JOIN T2_n23 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -383,7 +383,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n7 Stage: Stage-0 Move Operator @@ -393,7 +393,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n7 Stage: Stage-3 Map Reduce @@ -443,33 +443,33 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n7 -PREHOOK: query: INSERT OVERWRITE TABLE DEST1 -SELECT * FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: INSERT OVERWRITE TABLE DEST1_n7 +SELECT * FROM T1_n36 a JOIN T2_n23 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE DEST1 -SELECT * FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: Input: default@t1_n36 +PREHOOK: Input: default@t2_n23 +PREHOOK: Output: default@dest1_n7 +POSTHOOK: query: INSERT OVERWRITE TABLE DEST1_n7 +SELECT * FROM T1_n36 a JOIN T2_n23 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key1 SIMPLE [(t1)a.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.key2 SIMPLE [(t2)b.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.val1 SIMPLE [(t1)a.FieldSchema(name:val, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.val2 SIMPLE [(t2)b.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM DEST1 +POSTHOOK: Input: default@t1_n36 +POSTHOOK: Input: default@t2_n23 +POSTHOOK: Output: default@dest1_n7 +POSTHOOK: Lineage: dest1_n7.key1 SIMPLE [(t1_n36)a.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n7.key2 SIMPLE [(t2_n23)b.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n7.val1 SIMPLE [(t1_n36)a.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n7.val2 SIMPLE [(t2_n23)b.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM DEST1_n7 ORDER BY key1, key2, val1, val2 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n7 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DEST1 +POSTHOOK: query: SELECT * FROM DEST1_n7 ORDER BY key1, key2, val1, val2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n7 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 @@ -478,12 +478,12 @@ POSTHOOK: Input: default@dest1 8 28 8 18 8 28 8 18 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE DEST1 -SELECT * FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +INSERT OVERWRITE TABLE DEST1_n7 +SELECT * FROM T1_n36 a RIGHT OUTER JOIN T2_n23 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE DEST1 -SELECT * FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +INSERT OVERWRITE TABLE DEST1_n7 +SELECT * FROM T1_n36 a RIGHT OUTER JOIN T2_n23 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -542,7 +542,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n7 Stage: Stage-0 Move Operator @@ -552,7 +552,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n7 Stage: Stage-3 Map Reduce @@ -602,33 +602,33 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n7 -PREHOOK: query: INSERT OVERWRITE TABLE DEST1 -SELECT * FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: query: INSERT OVERWRITE TABLE DEST1_n7 +SELECT * FROM T1_n36 a RIGHT OUTER JOIN T2_n23 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE DEST1 -SELECT * FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: Input: default@t1_n36 +PREHOOK: Input: default@t2_n23 +PREHOOK: Output: default@dest1_n7 +POSTHOOK: query: INSERT OVERWRITE TABLE DEST1_n7 +SELECT * FROM T1_n36 a RIGHT OUTER JOIN T2_n23 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key1 SIMPLE [(t1)a.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.key2 SIMPLE [(t2)b.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.val1 SIMPLE [(t1)a.FieldSchema(name:val, type:string, comment:null), ] -POSTHOOK: Lineage: dest1.val2 SIMPLE [(t2)b.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM DEST1 +POSTHOOK: Input: default@t1_n36 +POSTHOOK: Input: default@t2_n23 +POSTHOOK: Output: default@dest1_n7 +POSTHOOK: Lineage: dest1_n7.key1 SIMPLE [(t1_n36)a.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n7.key2 SIMPLE [(t2_n23)b.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n7.val1 SIMPLE [(t1_n36)a.FieldSchema(name:val, type:string, comment:null), ] +POSTHOOK: Lineage: dest1_n7.val2 SIMPLE [(t2_n23)b.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM DEST1_n7 ORDER BY key1, key2, val1, val2 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n7 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DEST1 +POSTHOOK: query: SELECT * FROM DEST1_n7 ORDER BY key1, key2, val1, val2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n7 #### A masked pattern was here #### NULL NULL 4 14 NULL NULL 5 15 diff --git a/ql/src/test/results/clientpositive/skewjoin_union_remove_2.q.out b/ql/src/test/results/clientpositive/skewjoin_union_remove_2.q.out index 212be24dd1..843786a2b5 100644 --- a/ql/src/test/results/clientpositive/skewjoin_union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_union_remove_2.q.out @@ -1,60 +1,60 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n4(key STRING, val STRING) SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n4 +POSTHOOK: query: CREATE TABLE T1_n4(key STRING, val STRING) SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n4 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n4 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n4 +PREHOOK: query: CREATE TABLE T2_n2(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n2 +POSTHOOK: query: CREATE TABLE T2_n2(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t2_n2 +PREHOOK: query: CREATE TABLE T3_n1(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T3_n1 +POSTHOOK: query: CREATE TABLE T3_n1(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: Output: default@T3_n1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: Output: default@t3_n1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 +POSTHOOK: Output: default@t3_n1 PREHOOK: query: EXPLAIN -SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +SELECT a.*, b.*, c.* FROM T1_n4 a JOIN T2_n2 b ON a.key = b.key JOIN T3_n1 c on a.key = c.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +SELECT a.*, b.*, c.* FROM T1_n4 a JOIN T2_n2 b ON a.key = b.key JOIN T3_n1 c on a.key = c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -204,18 +204,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +PREHOOK: query: SELECT a.*, b.*, c.* FROM T1_n4 a JOIN T2_n2 b ON a.key = b.key JOIN T3_n1 c on a.key = c.key ORDER BY a.key, b.key, c.key, a.val, b.val, c.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n4 +PREHOOK: Input: default@t2_n2 +PREHOOK: Input: default@t3_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +POSTHOOK: query: SELECT a.*, b.*, c.* FROM T1_n4 a JOIN T2_n2 b ON a.key = b.key JOIN T3_n1 c on a.key = c.key ORDER BY a.key, b.key, c.key, a.val, b.val, c.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n4 +POSTHOOK: Input: default@t2_n2 +POSTHOOK: Input: default@t3_n1 #### A masked pattern was here #### 2 12 2 22 2 12 diff --git a/ql/src/test/results/clientpositive/skewjoinopt1.q.out b/ql/src/test/results/clientpositive/skewjoinopt1.q.out index 6307e4b944..31b0e7ab2d 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt1.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt1.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n64(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n64 +POSTHOOK: query: CREATE TABLE T1_n64(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n64 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n64 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n64 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n64 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n64 +PREHOOK: query: CREATE TABLE T2_n40(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n40 +POSTHOOK: query: CREATE TABLE T2_n40(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n40 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n40 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n40 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n40 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n40 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n64 a JOIN T2_n40 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n64 a JOIN T2_n40 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -176,17 +176,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n64 a JOIN T2_n40 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n64 +PREHOOK: Input: default@t2_n40 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n64 a JOIN T2_n40 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n64 +POSTHOOK: Input: default@t2_n40 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 @@ -195,10 +195,10 @@ POSTHOOK: Input: default@t2 8 28 8 18 8 28 8 18 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n64 a RIGHT OUTER JOIN T2_n40 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n64 a RIGHT OUTER JOIN T2_n40 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -336,17 +336,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n64 a RIGHT OUTER JOIN T2_n40 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n64 +PREHOOK: Input: default@t2_n40 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n64 a RIGHT OUTER JOIN T2_n40 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n64 +POSTHOOK: Input: default@t2_n40 #### A masked pattern was here #### NULL NULL 4 14 NULL NULL 5 15 @@ -357,10 +357,10 @@ NULL NULL 5 15 8 28 8 18 8 28 8 18 PREHOOK: query: EXPLAIN -SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +SELECT count(1) FROM T1_n64 a JOIN T2_n40 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +SELECT count(1) FROM T1_n64 a JOIN T2_n40 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -509,22 +509,22 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT count(1) FROM T1_n64 a JOIN T2_n40 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n64 +PREHOOK: Input: default@t2_n40 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT count(1) FROM T1_n64 a JOIN T2_n40 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n64 +POSTHOOK: Input: default@t2_n40 #### A masked pattern was here #### 6 PREHOOK: query: EXPLAIN -SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT count(1) FROM T1_n64 a RIGHT OUTER JOIN T2_n40 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +SELECT count(1) FROM T1_n64 a RIGHT OUTER JOIN T2_n40 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -673,14 +673,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT count(1) FROM T1_n64 a RIGHT OUTER JOIN T2_n40 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n64 +PREHOOK: Input: default@t2_n40 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT count(1) FROM T1_n64 a RIGHT OUTER JOIN T2_n40 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n64 +POSTHOOK: Input: default@t2_n40 #### A masked pattern was here #### 8 diff --git a/ql/src/test/results/clientpositive/skewjoinopt10.q.out b/ql/src/test/results/clientpositive/skewjoinopt10.q.out index 72a8f15661..472eaac4c4 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt10.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt10.q.out @@ -1,46 +1,46 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n56(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n56 +POSTHOOK: query: CREATE TABLE T1_n56(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n56 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n56 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n56 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n56 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: drop table array_valued_T1 +POSTHOOK: Output: default@t1_n56 +PREHOOK: query: drop table array_valued_T1_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table array_valued_T1 +POSTHOOK: query: drop table array_valued_T1_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table array_valued_T1 (key string, value array) SKEWED BY (key) ON ((8)) +PREHOOK: query: create table array_valued_T1_n0 (key string, value array) SKEWED BY (key) ON ((8)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@array_valued_T1 -POSTHOOK: query: create table array_valued_T1 (key string, value array) SKEWED BY (key) ON ((8)) +PREHOOK: Output: default@array_valued_T1_n0 +POSTHOOK: query: create table array_valued_T1_n0 (key string, value array) SKEWED BY (key) ON ((8)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@array_valued_T1 -PREHOOK: query: insert overwrite table array_valued_T1 select key, array(value) from T1 +POSTHOOK: Output: default@array_valued_T1_n0 +PREHOOK: query: insert overwrite table array_valued_T1_n0 select key, array(value) from T1_n56 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@array_valued_t1 -POSTHOOK: query: insert overwrite table array_valued_T1 select key, array(value) from T1 +PREHOOK: Input: default@t1_n56 +PREHOOK: Output: default@array_valued_t1_n0 +POSTHOOK: query: insert overwrite table array_valued_T1_n0 select key, array(value) from T1_n56 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@array_valued_t1 -POSTHOOK: Lineage: array_valued_t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: array_valued_t1.value EXPRESSION [(t1)t1.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Input: default@t1_n56 +POSTHOOK: Output: default@array_valued_t1_n0 +POSTHOOK: Lineage: array_valued_t1_n0.key SIMPLE [(t1_n56)t1_n56.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: array_valued_t1_n0.value EXPRESSION [(t1_n56)t1_n56.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain -select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +select * from (select a.key as key, b.value as array_val from T1_n56 a join array_valued_T1_n0 b on a.key=b.key) i lateral view explode (array_val) c as val PREHOOK: type: QUERY POSTHOOK: query: explain -select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +select * from (select a.key as key, b.value as array_val from T1_n56 a join array_valued_T1_n0 b on a.key=b.key) i lateral view explode (array_val) c as val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -220,17 +220,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +PREHOOK: query: select * from (select a.key as key, b.value as array_val from T1_n56 a join array_valued_T1_n0 b on a.key=b.key) i lateral view explode (array_val) c as val ORDER BY key, val PREHOOK: type: QUERY -PREHOOK: Input: default@array_valued_t1 -PREHOOK: Input: default@t1 +PREHOOK: Input: default@array_valued_t1_n0 +PREHOOK: Input: default@t1_n56 #### A masked pattern was here #### -POSTHOOK: query: select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val +POSTHOOK: query: select * from (select a.key as key, b.value as array_val from T1_n56 a join array_valued_T1_n0 b on a.key=b.key) i lateral view explode (array_val) c as val ORDER BY key, val POSTHOOK: type: QUERY -POSTHOOK: Input: default@array_valued_t1 -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@array_valued_t1_n0 +POSTHOOK: Input: default@t1_n56 #### A masked pattern was here #### 1 ["11"] 11 2 ["12"] 12 diff --git a/ql/src/test/results/clientpositive/skewjoinopt11.q.out b/ql/src/test/results/clientpositive/skewjoinopt11.q.out index de172204cc..713254416e 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt11.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt11.q.out @@ -1,51 +1,51 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n78(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n78 +POSTHOOK: query: CREATE TABLE T1_n78(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n78 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n78 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n78 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n78 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t1_n78 +PREHOOK: query: CREATE TABLE T2_n47(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n47 +POSTHOOK: query: CREATE TABLE T2_n47(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n47 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n47 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n47 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n47 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n47 PREHOOK: query: EXPLAIN select * from ( - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n78 a join T2_n47 b on a.key = b.key union all - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n78 a join T2_n47 b on a.key = b.key ) subq1 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select * from ( - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n78 a join T2_n47 b on a.key = b.key union all - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n78 a join T2_n47 b on a.key = b.key ) subq1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -349,25 +349,25 @@ STAGE PLANS: PREHOOK: query: select * from ( - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n78 a join T2_n47 b on a.key = b.key union all - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n78 a join T2_n47 b on a.key = b.key ) subq1 ORDER BY key, val1, val2 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n78 +PREHOOK: Input: default@t2_n47 #### A masked pattern was here #### POSTHOOK: query: select * from ( - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n78 a join T2_n47 b on a.key = b.key union all - select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key + select a.key, a.val as val1, b.val as val2 from T1_n78 a join T2_n47 b on a.key = b.key ) subq1 ORDER BY key, val1, val2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n78 +POSTHOOK: Input: default@t2_n47 #### A masked pattern was here #### 2 12 22 2 12 22 diff --git a/ql/src/test/results/clientpositive/skewjoinopt12.q.out b/ql/src/test/results/clientpositive/skewjoinopt12.q.out index 10b59646d5..26420b22f8 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt12.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt12.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n102(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12), (8, 18)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n102 +POSTHOOK: query: CREATE TABLE T1_n102(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12), (8, 18)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n102 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n102 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n102 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n102 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n102 +PREHOOK: query: CREATE TABLE T2_n62(key STRING, val STRING) SKEWED BY (key, val) ON ((3, 13), (8, 18)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n62 +POSTHOOK: query: CREATE TABLE T2_n62(key STRING, val STRING) SKEWED BY (key, val) ON ((3, 13), (8, 18)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n62 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n62 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n62 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n62 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n62 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n102 a JOIN T2_n62 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n102 a JOIN T2_n62 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -172,17 +172,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +PREHOOK: query: SELECT a.*, b.* FROM T1_n102 a JOIN T2_n62 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n102 +PREHOOK: Input: default@t2_n62 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +POSTHOOK: query: SELECT a.*, b.* FROM T1_n102 a JOIN T2_n62 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n102 +POSTHOOK: Input: default@t2_n62 #### A masked pattern was here #### 3 13 3 13 8 18 8 18 diff --git a/ql/src/test/results/clientpositive/skewjoinopt13.q.out b/ql/src/test/results/clientpositive/skewjoinopt13.q.out index 1b8798f300..b373e2a0ae 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt13.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt13.q.out @@ -1,64 +1,64 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n38(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n38 +POSTHOOK: query: CREATE TABLE T1_n38(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n38 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n38 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n38 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n38 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t1_n38 +PREHOOK: query: CREATE TABLE T2_n25(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n25 +POSTHOOK: query: CREATE TABLE T2_n25(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n25 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n25 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n25 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n25 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) +POSTHOOK: Output: default@t2_n25 +PREHOOK: query: CREATE TABLE T3_n9(key STRING, val STRING) SKEWED BY (val) ON ((12)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) +PREHOOK: Output: default@T3_n9 +POSTHOOK: query: CREATE TABLE T3_n9(key STRING, val STRING) SKEWED BY (val) ON ((12)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: Output: default@T3_n9 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n9 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: Output: default@t3_n9 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n9 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 +POSTHOOK: Output: default@t3_n9 PREHOOK: query: EXPLAIN select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n38 a join T2_n25 b on a.key = b.key +join T3_n9 c on a.val = c.val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n38 a join T2_n25 b on a.key = b.key +join T3_n9 c on a.val = c.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -168,22 +168,22 @@ STAGE PLANS: PREHOOK: query: select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n38 a join T2_n25 b on a.key = b.key +join T3_n9 c on a.val = c.val order by a.key, b.key, c.key, a.val, b.val, c.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n38 +PREHOOK: Input: default@t2_n25 +PREHOOK: Input: default@t3_n9 #### A masked pattern was here #### POSTHOOK: query: select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n38 a join T2_n25 b on a.key = b.key +join T3_n9 c on a.val = c.val order by a.key, b.key, c.key, a.val, b.val, c.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n38 +POSTHOOK: Input: default@t2_n25 +POSTHOOK: Input: default@t3_n9 #### A masked pattern was here #### 2 12 2 22 2 12 diff --git a/ql/src/test/results/clientpositive/skewjoinopt14.q.out b/ql/src/test/results/clientpositive/skewjoinopt14.q.out index 147bd5f507..6a720b48d0 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt14.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt14.q.out @@ -1,66 +1,66 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n65(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n65 +POSTHOOK: query: CREATE TABLE T1_n65(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n65 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n65 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n65 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n65 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t1_n65 +PREHOOK: query: CREATE TABLE T2_n39(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n39 +POSTHOOK: query: CREATE TABLE T2_n39(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n39 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n39 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n39 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n39 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) +POSTHOOK: Output: default@t2_n39 +PREHOOK: query: CREATE TABLE T3_n14(key STRING, val STRING) SKEWED BY (val) ON ((12)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) +PREHOOK: Output: default@T3_n14 +POSTHOOK: query: CREATE TABLE T3_n14(key STRING, val STRING) SKEWED BY (val) ON ((12)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: Output: default@T3_n14 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n14 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: Output: default@t3_n14 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n14 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 +POSTHOOK: Output: default@t3_n14 PREHOOK: query: EXPLAIN select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n65 a join T2_n39 b on a.key = b.key +join T3_n14 c on a.val = c.val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n65 a join T2_n39 b on a.key = b.key +join T3_n14 c on a.val = c.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -230,22 +230,22 @@ STAGE PLANS: PREHOOK: query: select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n65 a join T2_n39 b on a.key = b.key +join T3_n14 c on a.val = c.val order by a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n65 +PREHOOK: Input: default@t2_n39 +PREHOOK: Input: default@t3_n14 #### A masked pattern was here #### POSTHOOK: query: select * from -T1 a join T2 b on a.key = b.key -join T3 c on a.val = c.val +T1_n65 a join T2_n39 b on a.key = b.key +join T3_n14 c on a.val = c.val order by a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n65 +POSTHOOK: Input: default@t2_n39 +POSTHOOK: Input: default@t3_n14 #### A masked pattern was here #### 2 12 2 22 2 12 diff --git a/ql/src/test/results/clientpositive/skewjoinopt16.q.out b/ql/src/test/results/clientpositive/skewjoinopt16.q.out index f2210e9fd7..80da0c1320 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt16.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt16.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n98(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n98 +POSTHOOK: query: CREATE TABLE T1_n98(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n98 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n98 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n98 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n98 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n98 +PREHOOK: query: CREATE TABLE T2_n59(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n59 +POSTHOOK: query: CREATE TABLE T2_n59(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n59 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n59 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n59 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n59 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n59 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n98 a JOIN T2_n59 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n98 a JOIN T2_n59 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -172,17 +172,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +PREHOOK: query: SELECT a.*, b.* FROM T1_n98 a JOIN T2_n59 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n98 +PREHOOK: Input: default@t2_n59 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +POSTHOOK: query: SELECT a.*, b.* FROM T1_n98 a JOIN T2_n59 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n98 +POSTHOOK: Input: default@t2_n59 #### A masked pattern was here #### 3 13 3 13 8 18 8 18 diff --git a/ql/src/test/results/clientpositive/skewjoinopt17.q.out b/ql/src/test/results/clientpositive/skewjoinopt17.q.out index fef87d06c3..d5d897fbe7 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt17.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt17.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n15(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n15 +POSTHOOK: query: CREATE TABLE T1_n15(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n15 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n15 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n15 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n15 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n15 +PREHOOK: query: CREATE TABLE T2_n10(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n10 +POSTHOOK: query: CREATE TABLE T2_n10(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n10 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n10 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n10 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n10 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n10 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n15 a JOIN T2_n10 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n15 a JOIN T2_n10 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -176,17 +176,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n15 a JOIN T2_n10 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n15 +PREHOOK: Input: default@t2_n10 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n15 a JOIN T2_n10 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n15 +POSTHOOK: Input: default@t2_n10 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 @@ -194,63 +194,63 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: DROP TABLE T1 +PREHOOK: query: DROP TABLE T1_n15 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: DROP TABLE T1 +PREHOOK: Input: default@t1_n15 +PREHOOK: Output: default@t1_n15 +POSTHOOK: query: DROP TABLE T1_n15 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: DROP TABLE T2 +POSTHOOK: Input: default@t1_n15 +POSTHOOK: Output: default@t1_n15 +PREHOOK: query: DROP TABLE T2_n10 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: DROP TABLE T2 +PREHOOK: Input: default@t2_n10 +PREHOOK: Output: default@t2_n10 +POSTHOOK: query: DROP TABLE T2_n10 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: Input: default@t2_n10 +POSTHOOK: Output: default@t2_n10 +PREHOOK: query: CREATE TABLE T1_n15(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n15 +POSTHOOK: query: CREATE TABLE T1_n15(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n15 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n15 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n15 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n15 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n15 +PREHOOK: query: CREATE TABLE T2_n10(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n10 +POSTHOOK: query: CREATE TABLE T2_n10(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n10 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n10 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n10 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n10 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n10 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n15 a JOIN T2_n10 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n15 a JOIN T2_n10 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -384,17 +384,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +PREHOOK: query: SELECT a.*, b.* FROM T1_n15 a JOIN T2_n10 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n15 +PREHOOK: Input: default@t2_n10 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +POSTHOOK: query: SELECT a.*, b.* FROM T1_n15 a JOIN T2_n10 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n15 +POSTHOOK: Input: default@t2_n10 #### A masked pattern was here #### 3 13 3 13 8 18 8 18 diff --git a/ql/src/test/results/clientpositive/skewjoinopt18.q.out b/ql/src/test/results/clientpositive/skewjoinopt18.q.out index 163d4866fc..15939660c3 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt18.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt18.q.out @@ -1,60 +1,60 @@ -PREHOOK: query: CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE tmpT1_n1(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmpT1 -POSTHOOK: query: CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@tmpT1_n1 +POSTHOOK: query: CREATE TABLE tmpT1_n1(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmpT1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1 +POSTHOOK: Output: default@tmpT1_n1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tmpt1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1 +PREHOOK: Output: default@tmpt1_n1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tmpt1 -PREHOOK: query: CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) +POSTHOOK: Output: default@tmpt1_n1 +PREHOOK: query: CREATE TABLE T1_n103(key INT, val STRING) SKEWED BY (key) ON ((2)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) +PREHOOK: Output: default@T1_n103 +POSTHOOK: query: CREATE TABLE T1_n103(key INT, val STRING) SKEWED BY (key) ON ((2)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: INSERT OVERWRITE TABLE T1 SELECT key, val FROM tmpT1 +POSTHOOK: Output: default@T1_n103 +PREHOOK: query: INSERT OVERWRITE TABLE T1_n103 SELECT key, val FROM tmpT1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@tmpt1 -PREHOOK: Output: default@t1 -POSTHOOK: query: INSERT OVERWRITE TABLE T1 SELECT key, val FROM tmpT1 +PREHOOK: Input: default@tmpt1_n1 +PREHOOK: Output: default@t1_n103 +POSTHOOK: query: INSERT OVERWRITE TABLE T1_n103 SELECT key, val FROM tmpT1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmpt1 -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key EXPRESSION [(tmpt1)tmpt1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t1.val SIMPLE [(tmpt1)tmpt1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Input: default@tmpt1_n1 +POSTHOOK: Output: default@t1_n103 +POSTHOOK: Lineage: t1_n103.key EXPRESSION [(tmpt1_n1)tmpt1_n1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t1_n103.val SIMPLE [(tmpt1_n1)tmpt1_n1.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE T2_n63(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n63 +POSTHOOK: query: CREATE TABLE T2_n63(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n63 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n63 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n63 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n63 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n63 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n103 a JOIN T2_n63 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n103 a JOIN T2_n63 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -119,17 +119,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n103 a JOIN T2_n63 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n103 +PREHOOK: Input: default@t2_n63 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n103 a JOIN T2_n63 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n103 +POSTHOOK: Input: default@t2_n63 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 diff --git a/ql/src/test/results/clientpositive/skewjoinopt19.q.out b/ql/src/test/results/clientpositive/skewjoinopt19.q.out index 73e7943864..4c4c1be5e5 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt19.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt19.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n20(key STRING, val STRING) CLUSTERED BY (key) INTO 4 BUCKETS SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n20 +POSTHOOK: query: CREATE TABLE T1_n20(key STRING, val STRING) CLUSTERED BY (key) INTO 4 BUCKETS SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +POSTHOOK: Output: default@T1_n20 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n20 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +PREHOOK: Output: default@t1_n20 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n20 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t1_n20 +PREHOOK: query: CREATE TABLE T2_n13(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n13 +POSTHOOK: query: CREATE TABLE T2_n13(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n13 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n13 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n13 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n13 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n13 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n20 a JOIN T2_n13 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n20 a JOIN T2_n13 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -176,17 +176,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n20 a JOIN T2_n13 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n20 +PREHOOK: Input: default@t2_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n20 a JOIN T2_n13 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n20 +POSTHOOK: Input: default@t2_n13 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 diff --git a/ql/src/test/results/clientpositive/skewjoinopt2.q.out b/ql/src/test/results/clientpositive/skewjoinopt2.q.out index 4e863b1751..a96a20c9eb 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt2.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt2.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n89(key STRING, val STRING) SKEWED BY (key) ON ((2), (7)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n89 +POSTHOOK: query: CREATE TABLE T1_n89(key STRING, val STRING) SKEWED BY (key) ON ((2), (7)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n89 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n89 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n89 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n89 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n89 +PREHOOK: query: CREATE TABLE T2_n54(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n54 +POSTHOOK: query: CREATE TABLE T2_n54(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n54 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n54 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n54 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n54 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n54 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n89 a JOIN T2_n54 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n89 a JOIN T2_n54 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -172,26 +172,26 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +PREHOOK: query: SELECT a.*, b.* FROM T1_n89 a JOIN T2_n54 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n89 +PREHOOK: Input: default@t2_n54 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val +POSTHOOK: query: SELECT a.*, b.* FROM T1_n89 a JOIN T2_n54 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n89 +POSTHOOK: Input: default@t2_n54 #### A masked pattern was here #### 3 13 3 13 8 18 8 18 8 18 8 18 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n89 a LEFT OUTER JOIN T2_n54 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val +SELECT a.*, b.* FROM T1_n89 a LEFT OUTER JOIN T2_n54 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -325,17 +325,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val +PREHOOK: query: SELECT a.*, b.* FROM T1_n89 a LEFT OUTER JOIN T2_n54 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n89 +PREHOOK: Input: default@t2_n54 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val +POSTHOOK: query: SELECT a.*, b.* FROM T1_n89 a LEFT OUTER JOIN T2_n54 b ON a.key = b.key and a.val = b.val ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n89 +POSTHOOK: Input: default@t2_n54 #### A masked pattern was here #### 1 11 NULL NULL 2 12 NULL NULL @@ -345,10 +345,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 28 NULL NULL PREHOOK: query: EXPLAIN -SELECT a.key, count(1) FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key +SELECT a.key, count(1) FROM T1_n89 a JOIN T2_n54 b ON a.key = b.key and a.val = b.val group by a.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.key, count(1) FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key +SELECT a.key, count(1) FROM T1_n89 a JOIN T2_n54 b ON a.key = b.key and a.val = b.val group by a.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -506,23 +506,23 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.key, count(1) FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key +PREHOOK: query: SELECT a.key, count(1) FROM T1_n89 a JOIN T2_n54 b ON a.key = b.key and a.val = b.val group by a.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n89 +PREHOOK: Input: default@t2_n54 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.key, count(1) FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key +POSTHOOK: query: SELECT a.key, count(1) FROM T1_n89 a JOIN T2_n54 b ON a.key = b.key and a.val = b.val group by a.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n89 +POSTHOOK: Input: default@t2_n54 #### A masked pattern was here #### 3 1 8 2 PREHOOK: query: EXPLAIN -SELECT a.key, count(1) FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key +SELECT a.key, count(1) FROM T1_n89 a LEFT OUTER JOIN T2_n54 b ON a.key = b.key and a.val = b.val group by a.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.key, count(1) FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key +SELECT a.key, count(1) FROM T1_n89 a LEFT OUTER JOIN T2_n54 b ON a.key = b.key and a.val = b.val group by a.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -680,15 +680,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.key, count(1) FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key +PREHOOK: query: SELECT a.key, count(1) FROM T1_n89 a LEFT OUTER JOIN T2_n54 b ON a.key = b.key and a.val = b.val group by a.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n89 +PREHOOK: Input: default@t2_n54 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.key, count(1) FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key +POSTHOOK: query: SELECT a.key, count(1) FROM T1_n89 a LEFT OUTER JOIN T2_n54 b ON a.key = b.key and a.val = b.val group by a.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n89 +POSTHOOK: Input: default@t2_n54 #### A masked pattern was here #### 1 1 2 1 diff --git a/ql/src/test/results/clientpositive/skewjoinopt20.q.out b/ql/src/test/results/clientpositive/skewjoinopt20.q.out index 3db5527d7d..2ac53fde00 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt20.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt20.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n65(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n65 +POSTHOOK: query: CREATE TABLE T1_n65(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +POSTHOOK: Output: default@T1_n65 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n65 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1 +PREHOOK: Output: default@t1_n65 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n65 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t1_n65 +PREHOOK: query: CREATE TABLE T2_n41(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n41 +POSTHOOK: query: CREATE TABLE T2_n41(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n41 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n41 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n41 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n41 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n41 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n65 a JOIN T2_n41 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n65 a JOIN T2_n41 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -176,17 +176,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n65 a JOIN T2_n41 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n65 +PREHOOK: Input: default@t2_n41 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n65 a JOIN T2_n41 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n65 +POSTHOOK: Input: default@t2_n41 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 diff --git a/ql/src/test/results/clientpositive/skewjoinopt21.q.out b/ql/src/test/results/clientpositive/skewjoinopt21.q.out index 1b577ed84c..4909aa05c8 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt21.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt21.q.out @@ -1,53 +1,53 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n38(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n38 +POSTHOOK: query: CREATE TABLE T1_n38(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n38 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n38 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n38 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n38 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n38 +PREHOOK: query: CREATE TABLE T2_n24(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n24 +POSTHOOK: query: CREATE TABLE T2_n24(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n24 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n24 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n24 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n24 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n24 PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM - (SELECT key as k, val as v FROM T1) a + (SELECT key as k, val as v FROM T1_n38) a JOIN - (SELECT key as k, val as v FROM T2) b + (SELECT key as k, val as v FROM T2_n24) b ON a.k = b.k PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM - (SELECT key as k, val as v FROM T1) a + (SELECT key as k, val as v FROM T1_n38) a JOIN - (SELECT key as k, val as v FROM T2) b + (SELECT key as k, val as v FROM T2_n24) b ON a.k = b.k POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -61,7 +61,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n38 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((key = '2') or (key = '3')) and key is not null) (type: boolean) @@ -77,7 +77,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) TableScan - alias: t2 + alias: t2_n24 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((key = '2') or (key = '3')) and key is not null) (type: boolean) @@ -136,7 +136,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n38 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((not ((key = '2') or (key = '3'))) and key is not null) (type: boolean) @@ -152,7 +152,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) TableScan - alias: t2 + alias: t2_n24 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((not ((key = '2') or (key = '3'))) and key is not null) (type: boolean) @@ -188,25 +188,25 @@ STAGE PLANS: PREHOOK: query: SELECT a.*, b.* FROM - (SELECT key as k, val as v FROM T1) a + (SELECT key as k, val as v FROM T1_n38) a JOIN - (SELECT key as k, val as v FROM T2) b + (SELECT key as k, val as v FROM T2_n24) b ON a.k = b.k ORDER BY a.k, b.k, a.v, b.v PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n38 +PREHOOK: Input: default@t2_n24 #### A masked pattern was here #### POSTHOOK: query: SELECT a.*, b.* FROM - (SELECT key as k, val as v FROM T1) a + (SELECT key as k, val as v FROM T1_n38) a JOIN - (SELECT key as k, val as v FROM T2) b + (SELECT key as k, val as v FROM T2_n24) b ON a.k = b.k ORDER BY a.k, b.k, a.v, b.v POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n38 +POSTHOOK: Input: default@t2_n24 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 diff --git a/ql/src/test/results/clientpositive/skewjoinopt3.q.out b/ql/src/test/results/clientpositive/skewjoinopt3.q.out index 0730cb1e0e..e2a0832f06 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt3.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt3.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n6(key STRING, val STRING) SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n6 +POSTHOOK: query: CREATE TABLE T1_n6(key STRING, val STRING) SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n6 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n6 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n6 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n6 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n6 +PREHOOK: query: CREATE TABLE T2_n4(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n4 +POSTHOOK: query: CREATE TABLE T2_n4(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n4 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n4 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n4 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n6 a JOIN T2_n4 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n6 a JOIN T2_n4 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -176,17 +176,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n6 a JOIN T2_n4 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n6 +PREHOOK: Input: default@t2_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n6 a JOIN T2_n4 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n6 +POSTHOOK: Input: default@t2_n4 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 @@ -195,10 +195,10 @@ POSTHOOK: Input: default@t2 8 28 8 18 8 28 8 18 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n6 a FULL OUTER JOIN T2_n4 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n6 a FULL OUTER JOIN T2_n4 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -336,17 +336,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n6 a FULL OUTER JOIN T2_n4 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n6 +PREHOOK: Input: default@t2_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n6 a FULL OUTER JOIN T2_n4 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n6 +POSTHOOK: Input: default@t2_n4 #### A masked pattern was here #### NULL NULL 4 14 NULL NULL 5 15 diff --git a/ql/src/test/results/clientpositive/skewjoinopt4.q.out b/ql/src/test/results/clientpositive/skewjoinopt4.q.out index 431af9347b..14efacb933 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt4.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt4.q.out @@ -1,42 +1,42 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n34(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n34 +POSTHOOK: query: CREATE TABLE T1_n34(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n34 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n34 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n34 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n34 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t1_n34 +PREHOOK: query: CREATE TABLE T2_n21(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n21 +POSTHOOK: query: CREATE TABLE T2_n21(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n21 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n21 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n21 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n21 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n21 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n34 a JOIN T2_n21 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n34 a JOIN T2_n21 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -174,17 +174,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n34 a JOIN T2_n21 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n34 +PREHOOK: Input: default@t2_n21 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n34 a JOIN T2_n21 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n34 +POSTHOOK: Input: default@t2_n21 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 @@ -193,10 +193,10 @@ POSTHOOK: Input: default@t2 8 28 8 18 8 28 8 18 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T2 a JOIN T1 b ON a.key = b.key +SELECT a.*, b.* FROM T2_n21 a JOIN T1_n34 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T2 a JOIN T1 b ON a.key = b.key +SELECT a.*, b.* FROM T2_n21 a JOIN T1_n34 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -334,17 +334,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T2 a JOIN T1 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T2_n21 a JOIN T1_n34 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n34 +PREHOOK: Input: default@t2_n21 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T2 a JOIN T1 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T2_n21 a JOIN T1_n34 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n34 +POSTHOOK: Input: default@t2_n21 #### A masked pattern was here #### 2 22 2 12 3 13 3 13 diff --git a/ql/src/test/results/clientpositive/skewjoinopt5.q.out b/ql/src/test/results/clientpositive/skewjoinopt5.q.out index 08342f5bcd..c6ac7fe657 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt5.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt5.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n63(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n63 +POSTHOOK: query: CREATE TABLE T1_n63(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n63 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n63 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n63 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n63 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n63 +PREHOOK: query: CREATE TABLE T2_n39(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n39 +POSTHOOK: query: CREATE TABLE T2_n39(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n39 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n39 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n39 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n39 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n39 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n63 a JOIN T2_n39 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n63 a JOIN T2_n39 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -176,17 +176,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n63 a JOIN T2_n39 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n63 +PREHOOK: Input: default@t2_n39 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n63 a JOIN T2_n39 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n63 +POSTHOOK: Input: default@t2_n39 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 diff --git a/ql/src/test/results/clientpositive/skewjoinopt6.q.out b/ql/src/test/results/clientpositive/skewjoinopt6.q.out index b84fe19797..454344d0d6 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt6.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt6.q.out @@ -1,44 +1,44 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n84(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12), (8, 18)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n84 +POSTHOOK: query: CREATE TABLE T1_n84(key STRING, val STRING) SKEWED BY (key, val) ON ((2, 12), (8, 18)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n84 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n84 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n84 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n84 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n84 +PREHOOK: query: CREATE TABLE T2_n52(key STRING, val STRING) SKEWED BY (key, val) ON ((3, 13), (8, 18)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n52 +POSTHOOK: query: CREATE TABLE T2_n52(key STRING, val STRING) SKEWED BY (key, val) ON ((3, 13), (8, 18)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n52 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n52 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n52 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n52 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n52 PREHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n84 a JOIN T2_n52 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +SELECT a.*, b.* FROM T1_n84 a JOIN T2_n52 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -176,17 +176,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +PREHOOK: query: SELECT a.*, b.* FROM T1_n84 a JOIN T2_n52 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n84 +PREHOOK: Input: default@t2_n52 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key +POSTHOOK: query: SELECT a.*, b.* FROM T1_n84 a JOIN T2_n52 b ON a.key = b.key ORDER BY a.key, b.key, a.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n84 +POSTHOOK: Input: default@t2_n52 #### A masked pattern was here #### 2 12 2 22 3 13 3 13 diff --git a/ql/src/test/results/clientpositive/skewjoinopt8.q.out b/ql/src/test/results/clientpositive/skewjoinopt8.q.out index 89648f6884..4b74407a84 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt8.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt8.q.out @@ -1,58 +1,58 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n90(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n90 +POSTHOOK: query: CREATE TABLE T1_n90(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n90 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n90 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n90 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n90 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: Output: default@t1_n90 +PREHOOK: query: CREATE TABLE T2_n55(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) +PREHOOK: Output: default@T2_n55 +POSTHOOK: query: CREATE TABLE T2_n55(key STRING, val STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n55 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n55 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n55 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n55 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t2_n55 +PREHOOK: query: CREATE TABLE T3_n24(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T3_n24 +POSTHOOK: query: CREATE TABLE T3_n24(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: Output: default@T3_n24 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n24 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: Output: default@t3_n24 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n24 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 +POSTHOOK: Output: default@t3_n24 PREHOOK: query: EXPLAIN -SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +SELECT a.*, b.*, c.* FROM T1_n90 a JOIN T2_n55 b ON a.key = b.key JOIN T3_n24 c on a.key = c.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +SELECT a.*, b.*, c.* FROM T1_n90 a JOIN T2_n55 b ON a.key = b.key JOIN T3_n24 c on a.key = c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -225,18 +225,18 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +PREHOOK: query: SELECT a.*, b.*, c.* FROM T1_n90 a JOIN T2_n55 b ON a.key = b.key JOIN T3_n24 c on a.key = c.key ORDER BY a.key, b.key, c.key, a.val, b.val, c.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n90 +PREHOOK: Input: default@t2_n55 +PREHOOK: Input: default@t3_n24 #### A masked pattern was here #### -POSTHOOK: query: SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key +POSTHOOK: query: SELECT a.*, b.*, c.* FROM T1_n90 a JOIN T2_n55 b ON a.key = b.key JOIN T3_n24 c on a.key = c.key ORDER BY a.key, b.key, c.key, a.val, b.val, c.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n90 +POSTHOOK: Input: default@t2_n55 +POSTHOOK: Input: default@t3_n24 #### A masked pattern was here #### 2 12 2 22 2 12 diff --git a/ql/src/test/results/clientpositive/skewjoinopt9.q.out b/ql/src/test/results/clientpositive/skewjoinopt9.q.out index def40f761c..defe516694 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt9.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt9.q.out @@ -1,54 +1,54 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1_n5(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) +PREHOOK: Output: default@T1_n5 +POSTHOOK: query: CREATE TABLE T1_n5(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T1_n5 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n5 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n5 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n5 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@t1_n5 +PREHOOK: query: CREATE TABLE T2_n3(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n3 +POSTHOOK: query: CREATE TABLE T2_n3(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@T2_n3 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2_n3 PREHOOK: query: EXPLAIN select * from ( -select key, val from T1 +select key, val from T1_n5 union all -select key, val from T1 +select key, val from T1_n5 ) subq1 -join T2 b on subq1.key = b.key +join T2_n3 b on subq1.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select * from ( -select key, val from T1 +select key, val from T1_n5 union all -select key, val from T1 +select key, val from T1_n5 ) subq1 -join T2 b on subq1.key = b.key +join T2_n3 b on subq1.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -59,7 +59,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n5 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -77,7 +77,7 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) TableScan - alias: t1 + alias: t1_n5 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -135,27 +135,27 @@ STAGE PLANS: PREHOOK: query: select * from ( -select key, val from T1 +select key, val from T1_n5 union all -select key, val from T1 +select key, val from T1_n5 ) subq1 -join T2 b on subq1.key = b.key +join T2_n3 b on subq1.key = b.key ORDER BY subq1.key, b.key, subq1.val, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n5 +PREHOOK: Input: default@t2_n3 #### A masked pattern was here #### POSTHOOK: query: select * from ( -select key, val from T1 +select key, val from T1_n5 union all -select key, val from T1 +select key, val from T1_n5 ) subq1 -join T2 b on subq1.key = b.key +join T2_n3 b on subq1.key = b.key ORDER BY subq1.key, b.key, subq1.val, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n5 +POSTHOOK: Input: default@t2_n3 #### A masked pattern was here #### 2 12 2 22 2 12 2 22 @@ -172,16 +172,16 @@ POSTHOOK: Input: default@t2 PREHOOK: query: EXPLAIN select * from ( -select key, count(1) as cnt from T1 group by key +select key, count(1) as cnt from T1_n5 group by key ) subq1 -join T2 b on subq1.key = b.key +join T2_n3 b on subq1.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select * from ( -select key, count(1) as cnt from T1 group by key +select key, count(1) as cnt from T1_n5 group by key ) subq1 -join T2 b on subq1.key = b.key +join T2_n3 b on subq1.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -193,7 +193,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n5 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -276,23 +276,23 @@ STAGE PLANS: PREHOOK: query: select * from ( -select key, count(1) as cnt from T1 group by key +select key, count(1) as cnt from T1_n5 group by key ) subq1 -join T2 b on subq1.key = b.key +join T2_n3 b on subq1.key = b.key ORDER BY subq1.key, b.key, subq1.cnt, b.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n5 +PREHOOK: Input: default@t2_n3 #### A masked pattern was here #### POSTHOOK: query: select * from ( -select key, count(1) as cnt from T1 group by key +select key, count(1) as cnt from T1_n5 group by key ) subq1 -join T2 b on subq1.key = b.key +join T2_n3 b on subq1.key = b.key ORDER BY subq1.key, b.key, subq1.cnt, b.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n5 +POSTHOOK: Input: default@t2_n3 #### A masked pattern was here #### 2 1 2 22 3 1 3 13 diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out index 8e43c88fc0..fcc50dfd07 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out @@ -1,57 +1,57 @@ -PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: query: create table smb_bucket_1_n3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smb_bucket_1 -POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: Output: default@smb_bucket_1_n3 +POSTHOOK: query: create table smb_bucket_1_n3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smb_bucket_1 -PREHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +POSTHOOK: Output: default@smb_bucket_1_n3 +PREHOOK: query: create table smb_bucket_2_n3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smb_bucket_2 -POSTHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: Output: default@smb_bucket_2_n3 +POSTHOOK: query: create table smb_bucket_2_n3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smb_bucket_2 -PREHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +POSTHOOK: Output: default@smb_bucket_2_n3 +PREHOOK: query: create table smb_bucket_3_n3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smb_bucket_3 -POSTHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: Output: default@smb_bucket_3_n3 +POSTHOOK: query: create table smb_bucket_3_n3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smb_bucket_3 -PREHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1 +POSTHOOK: Output: default@smb_bucket_3_n3 +PREHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@smb_bucket_1 -POSTHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1 +PREHOOK: Output: default@smb_bucket_1_n3 +POSTHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@smb_bucket_1 -PREHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2 +POSTHOOK: Output: default@smb_bucket_1_n3 +PREHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@smb_bucket_2 -POSTHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2 +PREHOOK: Output: default@smb_bucket_2_n3 +POSTHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@smb_bucket_2 -PREHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3 +POSTHOOK: Output: default@smb_bucket_2_n3 +PREHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@smb_bucket_3 -POSTHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3 +PREHOOK: Output: default@smb_bucket_3_n3 +POSTHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@smb_bucket_3 -PREHOOK: query: desc formatted smb_bucket_1 +POSTHOOK: Output: default@smb_bucket_3_n3 +PREHOOK: query: desc formatted smb_bucket_1_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@smb_bucket_1 -POSTHOOK: query: desc formatted smb_bucket_1 +PREHOOK: Input: default@smb_bucket_1_n3 +POSTHOOK: query: desc formatted smb_bucket_1_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@smb_bucket_1 +POSTHOOK: Input: default@smb_bucket_1_n3 # col_name data_type comment key int value string @@ -81,20 +81,20 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: select count(*) from smb_bucket_1 +PREHOOK: query: select count(*) from smb_bucket_1_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 +PREHOOK: Input: default@smb_bucket_1_n3 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from smb_bucket_1 +POSTHOOK: query: select count(*) from smb_bucket_1_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 +POSTHOOK: Input: default@smb_bucket_1_n3 #### A masked pattern was here #### 5 PREHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -133,21 +133,21 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_2 +PREHOOK: Input: default@smb_bucket_1_n3 +PREHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_2 +POSTHOOK: Input: default@smb_bucket_1_n3 +POSTHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### PREHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a left outer join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a left outer join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -183,15 +183,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a left outer join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_2 +PREHOOK: Input: default@smb_bucket_1_n3 +PREHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a left outer join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_2 +POSTHOOK: Input: default@smb_bucket_1_n3 +POSTHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### 1 val_1 NULL NULL 10 val_10 NULL NULL @@ -199,10 +199,10 @@ POSTHOOK: Input: default@smb_bucket_2 4 val_4 NULL NULL 5 val_5 NULL NULL PREHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a right outer join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a right outer join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -238,25 +238,25 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a right outer join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_2 +PREHOOK: Input: default@smb_bucket_1_n3 +PREHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a right outer join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_2 +POSTHOOK: Input: default@smb_bucket_1_n3 +POSTHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### NULL NULL 20 val_20 NULL NULL 23 val_23 NULL NULL 25 val_25 NULL NULL 30 val_30 PREHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a full outer join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a full outer join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -292,15 +292,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a full outer join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_2 +PREHOOK: Input: default@smb_bucket_1_n3 +PREHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n3 a full outer join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_2 +POSTHOOK: Input: default@smb_bucket_1_n3 +POSTHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### 1 val_1 NULL NULL 10 val_10 NULL NULL @@ -312,10 +312,10 @@ NULL NULL 23 val_23 NULL NULL 25 val_25 NULL NULL 30 val_30 PREHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -354,21 +354,21 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_2 +PREHOOK: Input: default@smb_bucket_1_n3 +PREHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_2 +POSTHOOK: Input: default@smb_bucket_1_n3 +POSTHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### PREHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a left outer join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a left outer join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -404,15 +404,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a left outer join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_2 +PREHOOK: Input: default@smb_bucket_1_n3 +PREHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a left outer join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_2 +POSTHOOK: Input: default@smb_bucket_1_n3 +POSTHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### 1 val_1 NULL NULL 10 val_10 NULL NULL @@ -420,10 +420,10 @@ POSTHOOK: Input: default@smb_bucket_2 4 val_4 NULL NULL 5 val_5 NULL NULL PREHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a right outer join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a right outer join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -459,25 +459,25 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a right outer join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_2 +PREHOOK: Input: default@smb_bucket_1_n3 +PREHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a right outer join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_2 +POSTHOOK: Input: default@smb_bucket_1_n3 +POSTHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### NULL NULL 20 val_20 NULL NULL 23 val_23 NULL NULL 25 val_25 NULL NULL 30 val_30 PREHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a full outer join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a full outer join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -513,15 +513,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a full outer join smb_bucket_2_n3 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_2 +PREHOOK: Input: default@smb_bucket_1_n3 +PREHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n3 a full outer join smb_bucket_2_n3 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_2 +POSTHOOK: Input: default@smb_bucket_1_n3 +POSTHOOK: Input: default@smb_bucket_2_n3 #### A masked pattern was here #### 1 val_1 NULL NULL 10 val_10 NULL NULL diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out index afbe1ab2e6..d35a2077b4 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out @@ -1,50 +1,50 @@ -PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table1_n1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table1 -POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +PREHOOK: Output: default@test_table1_n1 +POSTHOOK: query: CREATE TABLE test_table1_n1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table1 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +POSTHOOK: Output: default@test_table1_n1 +PREHOOK: query: CREATE TABLE test_table2_n1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +PREHOOK: Output: default@test_table2_n1 +POSTHOOK: query: CREATE TABLE test_table2_n1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 +POSTHOOK: Output: default@test_table2_n1 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table1_n1 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table2_n1 PARTITION (ds = '1') SELECT * PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table1@ds=1 -PREHOOK: Output: default@test_table2@ds=1 +PREHOOK: Output: default@test_table1_n1@ds=1 +PREHOOK: Output: default@test_table2_n1@ds=1 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table1_n1 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table2_n1 PARTITION (ds = '1') SELECT * POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table1@ds=1 -POSTHOOK: Output: default@test_table2@ds=1 -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS +POSTHOOK: Output: default@test_table1_n1@ds=1 +POSTHOOK: Output: default@test_table2_n1@ds=1 +POSTHOOK: Lineage: test_table1_n1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1_n1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: CREATE TABLE test_table3_n1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table3 -POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS +PREHOOK: Output: default@test_table3_n1 +POSTHOOK: query: CREATE TABLE test_table3_n1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table3 +POSTHOOK: Output: default@test_table3_n1 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1' +INSERT OVERWRITE TABLE test_table3_n1 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1_n1 a JOIN test_table2_n1 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1' +INSERT OVERWRITE TABLE test_table3_n1 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1_n1 a JOIN test_table2_n1 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -102,13 +102,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table1 + name default.test_table1_n1 numFiles 16 numRows 500 partition_columns ds partition_columns.types string rawDataSize 5312 - serialization.ddl struct test_table1 { i32 key, string value} + serialization.ddl struct test_table1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -127,18 +127,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table1 + name default.test_table1_n1 partition_columns ds partition_columns.types string - serialization.ddl struct test_table1 { i32 key, string value} + serialization.ddl struct test_table1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table1 - name: default.test_table1 + name: default.test_table1_n1 + name: default.test_table1_n1 Truncated Path -> Alias: - /test_table1/ds=1 [a] + /test_table1_n1/ds=1 [a] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -163,15 +163,15 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table3 + name default.test_table3_n1 partition_columns ds partition_columns.types string - serialization.ddl struct test_table3 { i32 key, string value} + serialization.ddl struct test_table3_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n1 TotalFiles: 16 GatherStats: true MultiFileSpray: true @@ -221,15 +221,15 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table3 + name default.test_table3_n1 partition_columns ds partition_columns.types string - serialization.ddl struct test_table3 { i32 key, string value} + serialization.ddl struct test_table3_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n1 Stage: Stage-2 Stats Work @@ -238,7 +238,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table3 + Table: default.test_table3_n1 Is Table Level Stats: false Stage: Stage-3 @@ -314,31 +314,31 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1' +PREHOOK: query: INSERT OVERWRITE TABLE test_table3_n1 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1_n1 a JOIN test_table2_n1 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 -PREHOOK: Output: default@test_table3@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1' +PREHOOK: Input: default@test_table1_n1 +PREHOOK: Input: default@test_table1_n1@ds=1 +PREHOOK: Input: default@test_table2_n1 +PREHOOK: Input: default@test_table2_n1@ds=1 +PREHOOK: Output: default@test_table3_n1@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3_n1 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1_n1 a JOIN test_table2_n1 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 -POSTHOOK: Output: default@test_table3@ds=1 -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value SIMPLE [(test_table2)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT * FROM test_table1 ORDER BY key +POSTHOOK: Input: default@test_table1_n1 +POSTHOOK: Input: default@test_table1_n1@ds=1 +POSTHOOK: Input: default@test_table2_n1 +POSTHOOK: Input: default@test_table2_n1@ds=1 +POSTHOOK: Output: default@test_table3_n1@ds=1 +POSTHOOK: Lineage: test_table3_n1 PARTITION(ds=1).key SIMPLE [(test_table1_n1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3_n1 PARTITION(ds=1).value SIMPLE [(test_table2_n1)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM test_table1_n1 ORDER BY key PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table1_n1 +PREHOOK: Input: default@test_table1_n1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_table1 ORDER BY key +POSTHOOK: query: SELECT * FROM test_table1_n1 ORDER BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table1_n1 +POSTHOOK: Input: default@test_table1_n1@ds=1 #### A masked pattern was here #### 0 val_0 1 0 val_0 1 @@ -840,15 +840,15 @@ POSTHOOK: Input: default@test_table1@ds=1 498 val_498 1 498 val_498 1 498 val_498 1 -PREHOOK: query: SELECT * FROM test_table3 ORDER BY key +PREHOOK: query: SELECT * FROM test_table3_n1 ORDER BY key PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table3_n1 +PREHOOK: Input: default@test_table3_n1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_table3 ORDER BY key +POSTHOOK: query: SELECT * FROM test_table3_n1 ORDER BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table3_n1 +POSTHOOK: Input: default@test_table3_n1@ds=1 #### A masked pattern was here #### 0 val_0 1 0 val_0 1 @@ -1878,9 +1878,9 @@ POSTHOOK: Input: default@test_table3@ds=1 498 val_498 1 498 val_498 1 498 val_498 1 -PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table1_n1 TABLESAMPLE(BUCKET 2 OUT OF 16) PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table1_n1 TABLESAMPLE(BUCKET 2 OUT OF 16) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1891,7 +1891,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_table1 + alias: test_table1_n1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -1945,13 +1945,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table1 + name default.test_table1_n1 numFiles 16 numRows 500 partition_columns ds partition_columns.types string rawDataSize 5312 - serialization.ddl struct test_table1 { i32 key, string value} + serialization.ddl struct test_table1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -1970,18 +1970,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table1 + name default.test_table1_n1 partition_columns ds partition_columns.types string - serialization.ddl struct test_table1 { i32 key, string value} + serialization.ddl struct test_table1_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table1 - name: default.test_table1 + name: default.test_table1_n1 + name: default.test_table1_n1 Truncated Path -> Alias: - /test_table1/ds=1/000001_0 [test_table1] + /test_table1_n1/ds=1/000001_0 [test_table1_n1] Stage: Stage-0 Fetch Operator @@ -1989,9 +1989,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table3_n1 TABLESAMPLE(BUCKET 2 OUT OF 16) PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table3_n1 TABLESAMPLE(BUCKET 2 OUT OF 16) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2002,7 +2002,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_table3 + alias: test_table3_n1 Statistics: Num rows: 1028 Data size: 10968 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -2056,13 +2056,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table3 + name default.test_table3_n1 numFiles 16 numRows 1028 partition_columns ds partition_columns.types string rawDataSize 10968 - serialization.ddl struct test_table3 { i32 key, string value} + serialization.ddl struct test_table3_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 11996 @@ -2080,18 +2080,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table3 + name default.test_table3_n1 partition_columns ds partition_columns.types string - serialization.ddl struct test_table3 { i32 key, string value} + serialization.ddl struct test_table3_n1 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 - name: default.test_table3 + name: default.test_table3_n1 + name: default.test_table3_n1 Truncated Path -> Alias: - /test_table3/ds=1/000001_0 [test_table3] + /test_table3_n1/ds=1/000001_0 [test_table3_n1] Stage: Stage-0 Fetch Operator @@ -2099,15 +2099,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) +PREHOOK: query: SELECT * FROM test_table1_n1 TABLESAMPLE(BUCKET 2 OUT OF 16) PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 +PREHOOK: Input: default@test_table1_n1 +PREHOOK: Input: default@test_table1_n1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) +POSTHOOK: query: SELECT * FROM test_table1_n1 TABLESAMPLE(BUCKET 2 OUT OF 16) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 +POSTHOOK: Input: default@test_table1_n1 +POSTHOOK: Input: default@test_table1_n1@ds=1 #### A masked pattern was here #### 8 val_8 1 26 val_26 1 @@ -2141,15 +2141,15 @@ POSTHOOK: Input: default@test_table1@ds=1 455 val_455 1 462 val_462 1 462 val_462 1 -PREHOOK: query: SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) +PREHOOK: query: SELECT * FROM test_table3_n1 TABLESAMPLE(BUCKET 2 OUT OF 16) PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table3_n1 +PREHOOK: Input: default@test_table3_n1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) +POSTHOOK: query: SELECT * FROM test_table3_n1 TABLESAMPLE(BUCKET 2 OUT OF 16) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table3_n1 +POSTHOOK: Input: default@test_table3_n1@ds=1 #### A masked pattern was here #### 462 val_462 1 462 val_462 1 @@ -2215,18 +2215,18 @@ POSTHOOK: Input: default@test_table3@ds=1 26 val_26 1 26 val_26 1 8 val_8 1 -PREHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +PREHOOK: query: SELECT COUNT(*) FROM test_table3_n1 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1_n1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table1_n1 +PREHOOK: Input: default@test_table1_n1@ds=1 +PREHOOK: Input: default@test_table3_n1 +PREHOOK: Input: default@test_table3_n1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +POSTHOOK: query: SELECT COUNT(*) FROM test_table3_n1 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1_n1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table1_n1 +POSTHOOK: Input: default@test_table1_n1@ds=1 +POSTHOOK: Input: default@test_table3_n1 +POSTHOOK: Input: default@test_table3_n1@ds=1 #### A masked pattern was here #### 140 diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out index 1672d1a942..d56ae7fe76 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out @@ -1,62 +1,62 @@ -PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table1_n6 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table1 -POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +PREHOOK: Output: default@test_table1_n6 +POSTHOOK: query: CREATE TABLE test_table1_n6 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table1 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +POSTHOOK: Output: default@test_table1_n6 +PREHOOK: query: CREATE TABLE test_table2_n6 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +PREHOOK: Output: default@test_table2_n6 +POSTHOOK: query: CREATE TABLE test_table2_n6 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 +POSTHOOK: Output: default@test_table2_n6 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT * -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3') SELECT * +INSERT OVERWRITE TABLE test_table1_n6 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table2_n6 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table2_n6 PARTITION (ds = '2') SELECT * +INSERT OVERWRITE TABLE test_table2_n6 PARTITION (ds = '3') SELECT * PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table1@ds=1 -PREHOOK: Output: default@test_table2@ds=1 -PREHOOK: Output: default@test_table2@ds=2 -PREHOOK: Output: default@test_table2@ds=3 +PREHOOK: Output: default@test_table1_n6@ds=1 +PREHOOK: Output: default@test_table2_n6@ds=1 +PREHOOK: Output: default@test_table2_n6@ds=2 +PREHOOK: Output: default@test_table2_n6@ds=3 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT * -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3') SELECT * +INSERT OVERWRITE TABLE test_table1_n6 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table2_n6 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table2_n6 PARTITION (ds = '2') SELECT * +INSERT OVERWRITE TABLE test_table2_n6 PARTITION (ds = '3') SELECT * POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table1@ds=1 -POSTHOOK: Output: default@test_table2@ds=1 -POSTHOOK: Output: default@test_table2@ds=2 -POSTHOOK: Output: default@test_table2@ds=3 -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2 PARTITION(ds=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +POSTHOOK: Output: default@test_table1_n6@ds=1 +POSTHOOK: Output: default@test_table2_n6@ds=1 +POSTHOOK: Output: default@test_table2_n6@ds=2 +POSTHOOK: Output: default@test_table2_n6@ds=3 +POSTHOOK: Lineage: test_table1_n6 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1_n6 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n6 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n6 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n6 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n6 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n6 PARTITION(ds=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n6 PARTITION(ds=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: CREATE TABLE test_table3_n4 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table3 -POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +PREHOOK: Output: default@test_table3_n4 +POSTHOOK: query: CREATE TABLE test_table3_n4 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table3 +POSTHOOK: Output: default@test_table3_n4 PREHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1' +INSERT OVERWRITE TABLE test_table3_n4 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1_n6 a JOIN test_table2_n6 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1' +INSERT OVERWRITE TABLE test_table3_n4 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1_n6 a JOIN test_table2_n6 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -107,15 +107,15 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table3 + name default.test_table3_n4 partition_columns ds partition_columns.types string - serialization.ddl struct test_table3 { i32 key, string value} + serialization.ddl struct test_table3_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n4 TotalFiles: 1 GatherStats: true MultiFileSpray: false @@ -138,13 +138,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table1 + name default.test_table1_n6 numFiles 16 numRows 500 partition_columns ds partition_columns.types string rawDataSize 5312 - serialization.ddl struct test_table1 { i32 key, string value} + serialization.ddl struct test_table1_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -163,18 +163,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table1 + name default.test_table1_n6 partition_columns ds partition_columns.types string - serialization.ddl struct test_table1 { i32 key, string value} + serialization.ddl struct test_table1_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table1 - name: default.test_table1 + name: default.test_table1_n6 + name: default.test_table1_n6 Truncated Path -> Alias: - /test_table1/ds=1 [a] + /test_table1_n6/ds=1 [a] Stage: Stage-0 Move Operator @@ -196,15 +196,15 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table3 + name default.test_table3_n4 partition_columns ds partition_columns.types string - serialization.ddl struct test_table3 { i32 key, string value} + serialization.ddl struct test_table3_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n4 Stage: Stage-2 Stats Work @@ -213,51 +213,51 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table3 + Table: default.test_table3_n4 Is Table Level Stats: false -PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1' +PREHOOK: query: INSERT OVERWRITE TABLE test_table3_n4 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1_n6 a JOIN test_table2_n6 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 -PREHOOK: Input: default@test_table2@ds=2 -PREHOOK: Input: default@test_table2@ds=3 -PREHOOK: Output: default@test_table3@ds=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1' +PREHOOK: Input: default@test_table1_n6 +PREHOOK: Input: default@test_table1_n6@ds=1 +PREHOOK: Input: default@test_table2_n6 +PREHOOK: Input: default@test_table2_n6@ds=1 +PREHOOK: Input: default@test_table2_n6@ds=2 +PREHOOK: Input: default@test_table2_n6@ds=3 +PREHOOK: Output: default@test_table3_n4@ds=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3_n4 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1_n6 a JOIN test_table2_n6 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 -POSTHOOK: Input: default@test_table2@ds=2 -POSTHOOK: Input: default@test_table2@ds=3 -POSTHOOK: Output: default@test_table3@ds=1 -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value SIMPLE [(test_table2)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +POSTHOOK: Input: default@test_table1_n6 +POSTHOOK: Input: default@test_table1_n6@ds=1 +POSTHOOK: Input: default@test_table2_n6 +POSTHOOK: Input: default@test_table2_n6@ds=1 +POSTHOOK: Input: default@test_table2_n6@ds=2 +POSTHOOK: Input: default@test_table2_n6@ds=3 +POSTHOOK: Output: default@test_table3_n4@ds=1 +POSTHOOK: Lineage: test_table3_n4 PARTITION(ds=1).key SIMPLE [(test_table1_n6)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3_n4 PARTITION(ds=1).value SIMPLE [(test_table2_n6)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT COUNT(*) FROM test_table3_n4 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1_n6 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 +PREHOOK: Input: default@test_table1_n6 +PREHOOK: Input: default@test_table1_n6@ds=1 +PREHOOK: Input: default@test_table3_n4 +PREHOOK: Input: default@test_table3_n4@ds=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +POSTHOOK: query: SELECT COUNT(*) FROM test_table3_n4 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1_n6 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 +POSTHOOK: Input: default@test_table1_n6 +POSTHOOK: Input: default@test_table1_n6@ds=1 +POSTHOOK: Input: default@test_table3_n4 +POSTHOOK: Input: default@test_table3_n4@ds=1 #### A masked pattern was here #### 420 PREHOOK: query: explain extended -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') -SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +INSERT OVERWRITE TABLE test_table3_n4 PARTITION (ds = '2') +SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3_n4 a JOIN test_table1_n6 b ON a.key = b.key AND a.ds = '1' AND b.ds='1' PREHOOK: type: QUERY POSTHOOK: query: explain extended -INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') -SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +INSERT OVERWRITE TABLE test_table3_n4 PARTITION (ds = '2') +SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3_n4 a JOIN test_table1_n6 b ON a.key = b.key AND a.ds = '1' AND b.ds='1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -316,13 +316,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table3 + name default.test_table3_n4 numFiles 16 numRows 3084 partition_columns ds partition_columns.types string rawDataSize 32904 - serialization.ddl struct test_table3 { i32 key, string value} + serialization.ddl struct test_table3_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 35988 @@ -341,18 +341,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table3 + name default.test_table3_n4 partition_columns ds partition_columns.types string - serialization.ddl struct test_table3 { i32 key, string value} + serialization.ddl struct test_table3_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 - name: default.test_table3 + name: default.test_table3_n4 + name: default.test_table3_n4 Truncated Path -> Alias: - /test_table3/ds=1 [a] + /test_table3_n4/ds=1 [a] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -378,15 +378,15 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table3 + name default.test_table3_n4 partition_columns ds partition_columns.types string - serialization.ddl struct test_table3 { i32 key, string value} + serialization.ddl struct test_table3_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n4 TotalFiles: 16 GatherStats: true MultiFileSpray: true @@ -437,15 +437,15 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table3 + name default.test_table3_n4 partition_columns ds partition_columns.types string - serialization.ddl struct test_table3 { i32 key, string value} + serialization.ddl struct test_table3_n4 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 + name: default.test_table3_n4 Stage: Stage-2 Stats Work @@ -454,7 +454,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table3 + Table: default.test_table3_n4 Is Table Level Stats: false Stage: Stage-3 @@ -530,32 +530,32 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false -PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') -SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +PREHOOK: query: INSERT OVERWRITE TABLE test_table3_n4 PARTITION (ds = '2') +SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3_n4 a JOIN test_table1_n6 b ON a.key = b.key AND a.ds = '1' AND b.ds='1' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table1@ds=1 -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=1 -PREHOOK: Output: default@test_table3@ds=2 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') -SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +PREHOOK: Input: default@test_table1_n6 +PREHOOK: Input: default@test_table1_n6@ds=1 +PREHOOK: Input: default@test_table3_n4 +PREHOOK: Input: default@test_table3_n4@ds=1 +PREHOOK: Output: default@test_table3_n4@ds=2 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table3_n4 PARTITION (ds = '2') +SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3_n4 a JOIN test_table1_n6 b ON a.key = b.key AND a.ds = '1' AND b.ds='1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table1@ds=1 -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=1 -POSTHOOK: Output: default@test_table3@ds=2 -POSTHOOK: Lineage: test_table3 PARTITION(ds=2).key SIMPLE [(test_table3)a.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table3 PARTITION(ds=2).value EXPRESSION [(test_table3)a.FieldSchema(name:value, type:string, comment:null), (test_table1)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT count(*) from test_table3 tablesample (bucket 2 out of 16) a where ds = '2' +POSTHOOK: Input: default@test_table1_n6 +POSTHOOK: Input: default@test_table1_n6@ds=1 +POSTHOOK: Input: default@test_table3_n4 +POSTHOOK: Input: default@test_table3_n4@ds=1 +POSTHOOK: Output: default@test_table3_n4@ds=2 +POSTHOOK: Lineage: test_table3_n4 PARTITION(ds=2).key SIMPLE [(test_table3_n4)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table3_n4 PARTITION(ds=2).value EXPRESSION [(test_table3_n4)a.FieldSchema(name:value, type:string, comment:null), (test_table1_n6)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT count(*) from test_table3_n4 tablesample (bucket 2 out of 16) a where ds = '2' PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table3@ds=2 +PREHOOK: Input: default@test_table3_n4 +PREHOOK: Input: default@test_table3_n4@ds=2 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(*) from test_table3 tablesample (bucket 2 out of 16) a where ds = '2' +POSTHOOK: query: SELECT count(*) from test_table3_n4 tablesample (bucket 2 out of 16) a where ds = '2' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table3@ds=2 +POSTHOOK: Input: default@test_table3_n4 +POSTHOOK: Input: default@test_table3_n4@ds=2 #### A masked pattern was here #### 420 diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out index cf36279bac..2dd921f8d9 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out @@ -1,70 +1,70 @@ -PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table1_n12 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table1 -POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS +PREHOOK: Output: default@test_table1_n12 +POSTHOOK: query: CREATE TABLE test_table1_n12 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table1 -PREHOOK: query: CREATE TABLE test_table2 (value INT, key STRING) CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS +POSTHOOK: Output: default@test_table1_n12 +PREHOOK: query: CREATE TABLE test_table2_n12 (value INT, key STRING) CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (value INT, key STRING) CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS +PREHOOK: Output: default@test_table2_n12 +POSTHOOK: query: CREATE TABLE test_table2_n12 (value INT, key STRING) CLUSTERED BY (value) SORTED BY (value ASC) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 -PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS +POSTHOOK: Output: default@test_table2_n12 +PREHOOK: query: CREATE TABLE test_table3_n6 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table3 -POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS +PREHOOK: Output: default@test_table3_n6 +POSTHOOK: query: CREATE TABLE test_table3_n6 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table3 -PREHOOK: query: CREATE TABLE test_table4 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (value ASC, key ASC) INTO 16 BUCKETS +POSTHOOK: Output: default@test_table3_n6 +PREHOOK: query: CREATE TABLE test_table4_n0 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (value ASC, key ASC) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table4 -POSTHOOK: query: CREATE TABLE test_table4 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (value ASC, key ASC) INTO 16 BUCKETS +PREHOOK: Output: default@test_table4_n0 +POSTHOOK: query: CREATE TABLE test_table4_n0 (key INT, value STRING) CLUSTERED BY (key, value) SORTED BY (value ASC, key ASC) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table4 +POSTHOOK: Output: default@test_table4_n0 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 SELECT * -INSERT OVERWRITE TABLE test_table2 SELECT * -INSERT OVERWRITE TABLE test_table3 SELECT * -INSERT OVERWRITE TABLE test_table4 SELECT * +INSERT OVERWRITE TABLE test_table1_n12 SELECT * +INSERT OVERWRITE TABLE test_table2_n12 SELECT * +INSERT OVERWRITE TABLE test_table3_n6 SELECT * +INSERT OVERWRITE TABLE test_table4_n0 SELECT * PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table1 -PREHOOK: Output: default@test_table2 -PREHOOK: Output: default@test_table3 -PREHOOK: Output: default@test_table4 +PREHOOK: Output: default@test_table1_n12 +PREHOOK: Output: default@test_table2_n12 +PREHOOK: Output: default@test_table3_n6 +PREHOOK: Output: default@test_table4_n0 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 SELECT * -INSERT OVERWRITE TABLE test_table2 SELECT * -INSERT OVERWRITE TABLE test_table3 SELECT * -INSERT OVERWRITE TABLE test_table4 SELECT * +INSERT OVERWRITE TABLE test_table1_n12 SELECT * +INSERT OVERWRITE TABLE test_table2_n12 SELECT * +INSERT OVERWRITE TABLE test_table3_n6 SELECT * +INSERT OVERWRITE TABLE test_table4_n0 SELECT * POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table1 -POSTHOOK: Output: default@test_table2 -POSTHOOK: Output: default@test_table3 -POSTHOOK: Output: default@test_table4 -POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2.key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2.value EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: test_table4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@test_table1_n12 +POSTHOOK: Output: default@test_table2_n12 +POSTHOOK: Output: default@test_table3_n6 +POSTHOOK: Output: default@test_table4_n0 +POSTHOOK: Lineage: test_table1_n12.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1_n12.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n12.key SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n12.value EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table3_n6.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table3_n6.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_table4_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table4_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN EXTENDED -SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10 +SELECT /*+ MAPJOIN(b) */ * FROM test_table1_n12 a JOIN test_table2_n12 b ON a.key = b.value ORDER BY a.key LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10 +SELECT /*+ MAPJOIN(b) */ * FROM test_table1_n12 a JOIN test_table2_n12 b ON a.key = b.value ORDER BY a.key LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -108,7 +108,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: test_table1 + base file name: test_table1_n12 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -122,11 +122,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table1 + name default.test_table1_n12 numFiles 16 numRows 500 rawDataSize 5312 - serialization.ddl struct test_table1 { i32 key, string value} + serialization.ddl struct test_table1_n12 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -146,20 +146,20 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table1 + name default.test_table1_n12 numFiles 16 numRows 500 rawDataSize 5312 - serialization.ddl struct test_table1 { i32 key, string value} + serialization.ddl struct test_table1_n12 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table1 - name: default.test_table1 + name: default.test_table1_n12 + name: default.test_table1_n12 Truncated Path -> Alias: - /test_table1 [a] + /test_table1_n12 [a] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -195,15 +195,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10 +PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM test_table1_n12 a JOIN test_table2_n12 b ON a.key = b.value ORDER BY a.key LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table1_n12 +PREHOOK: Input: default@test_table2_n12 #### A masked pattern was here #### -POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10 +POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM test_table1_n12 a JOIN test_table2_n12 b ON a.key = b.value ORDER BY a.key LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table1_n12 +POSTHOOK: Input: default@test_table2_n12 #### A masked pattern was here #### 0 val_0 0 val_0 0 val_0 0 val_0 @@ -216,10 +216,10 @@ POSTHOOK: Input: default@test_table2 0 val_0 0 val_0 2 val_2 2 val_2 PREHOOK: query: EXPLAIN EXTENDED -SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10 +SELECT /*+ MAPJOIN(b) */ * FROM test_table3_n6 a JOIN test_table4_n0 b ON a.key = b.value ORDER BY a.key LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED -SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10 +SELECT /*+ MAPJOIN(b) */ * FROM test_table3_n6 a JOIN test_table4_n0 b ON a.key = b.value ORDER BY a.key LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -291,7 +291,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: test_table3 + base file name: test_table3_n6 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -305,11 +305,11 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table3 + name default.test_table3_n6 numFiles 16 numRows 500 rawDataSize 5312 - serialization.ddl struct test_table3 { i32 key, string value} + serialization.ddl struct test_table3_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -329,20 +329,20 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.test_table3 + name default.test_table3_n6 numFiles 16 numRows 500 rawDataSize 5312 - serialization.ddl struct test_table3 { i32 key, string value} + serialization.ddl struct test_table3_n6 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table3 - name: default.test_table3 + name: default.test_table3_n6 + name: default.test_table3_n6 Truncated Path -> Alias: - /test_table3 [a] + /test_table3_n6 [a] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -381,13 +381,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10 +PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM test_table3_n6 a JOIN test_table4_n0 b ON a.key = b.value ORDER BY a.key LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test_table3 -PREHOOK: Input: default@test_table4 +PREHOOK: Input: default@test_table3_n6 +PREHOOK: Input: default@test_table4_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10 +POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM test_table3_n6 a JOIN test_table4_n0 b ON a.key = b.value ORDER BY a.key LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table3 -POSTHOOK: Input: default@test_table4 +POSTHOOK: Input: default@test_table3_n6 +POSTHOOK: Input: default@test_table4_n0 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_16.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_16.q.out index ab2b323ff2..4f6406b0d3 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_16.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_16.q.out @@ -1,42 +1,42 @@ -PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE test_table1_n9 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table1 -POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: Output: default@test_table1_n9 +POSTHOOK: query: CREATE TABLE test_table1_n9 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table1 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: Output: default@test_table1_n9 +PREHOOK: query: CREATE TABLE test_table2_n9 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: Output: default@test_table2_n9 +POSTHOOK: query: CREATE TABLE test_table2_n9 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 +POSTHOOK: Output: default@test_table2_n9 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 SELECT * -INSERT OVERWRITE TABLE test_table2 SELECT * +INSERT OVERWRITE TABLE test_table1_n9 SELECT * +INSERT OVERWRITE TABLE test_table2_n9 SELECT * PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table1 -PREHOOK: Output: default@test_table2 +PREHOOK: Output: default@test_table1_n9 +PREHOOK: Output: default@test_table2_n9 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 SELECT * -INSERT OVERWRITE TABLE test_table2 SELECT * +INSERT OVERWRITE TABLE test_table1_n9 SELECT * +INSERT OVERWRITE TABLE test_table2_n9 SELECT * POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table1 -POSTHOOK: Output: default@test_table2 -POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@test_table1_n9 +POSTHOOK: Output: default@test_table2_n9 +POSTHOOK: Lineage: test_table1_n9.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1_n9.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n9.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table2_n9.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN -SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key +SELECT /*+mapjoin(b)*/ count(*) FROM test_table1_n9 a JOIN test_table2_n9 b ON a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key +SELECT /*+mapjoin(b)*/ count(*) FROM test_table1_n9 a JOIN test_table2_n9 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -83,14 +83,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key +PREHOOK: query: SELECT /*+mapjoin(b)*/ count(*) FROM test_table1_n9 a JOIN test_table2_n9 b ON a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table1_n9 +PREHOOK: Input: default@test_table2_n9 #### A masked pattern was here #### -POSTHOOK: query: SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key +POSTHOOK: query: SELECT /*+mapjoin(b)*/ count(*) FROM test_table1_n9 a JOIN test_table2_n9 b ON a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table1_n9 +POSTHOOK: Input: default@test_table2_n9 #### A masked pattern was here #### 1028 diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_2.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_2.q.out index 1dfacda93c..3353eb58bd 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_2.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_2.q.out @@ -1,56 +1,56 @@ -PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: query: create table smb_bucket_1_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smb_bucket_1 -POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: Output: default@smb_bucket_1_n1 +POSTHOOK: query: create table smb_bucket_1_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smb_bucket_1 -PREHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +POSTHOOK: Output: default@smb_bucket_1_n1 +PREHOOK: query: create table smb_bucket_2_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smb_bucket_2 -POSTHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: Output: default@smb_bucket_2_n1 +POSTHOOK: query: create table smb_bucket_2_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smb_bucket_2 -PREHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +POSTHOOK: Output: default@smb_bucket_2_n1 +PREHOOK: query: create table smb_bucket_3_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smb_bucket_3 -POSTHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: Output: default@smb_bucket_3_n1 +POSTHOOK: query: create table smb_bucket_3_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smb_bucket_3 -PREHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1 +POSTHOOK: Output: default@smb_bucket_3_n1 +PREHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@smb_bucket_1 -POSTHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1 +PREHOOK: Output: default@smb_bucket_1_n1 +POSTHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@smb_bucket_1 -PREHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2 +POSTHOOK: Output: default@smb_bucket_1_n1 +PREHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@smb_bucket_2 -POSTHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2 +PREHOOK: Output: default@smb_bucket_2_n1 +POSTHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@smb_bucket_2 -PREHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3 +POSTHOOK: Output: default@smb_bucket_2_n1 +PREHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@smb_bucket_3 -POSTHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3 +PREHOOK: Output: default@smb_bucket_3_n1 +POSTHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@smb_bucket_3 +POSTHOOK: Output: default@smb_bucket_3_n1 PREHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -89,23 +89,23 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_1_n1 +PREHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_1_n1 +POSTHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### 10 val_10 10 val_10 4 val_4 4 val_4 PREHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a left outer join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a left outer join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -141,15 +141,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a left outer join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_1_n1 +PREHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a left outer join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_1_n1 +POSTHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### 1 val_1 NULL NULL 10 val_10 10 val_10 @@ -157,10 +157,10 @@ POSTHOOK: Input: default@smb_bucket_3 4 val_4 4 val_4 5 val_5 NULL NULL PREHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a right outer join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a right outer join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -196,15 +196,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a right outer join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_1_n1 +PREHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a right outer join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_1_n1 +POSTHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### 10 val_10 10 val_10 4 val_4 4 val_4 @@ -213,10 +213,10 @@ NULL NULL 19 val_19 NULL NULL 20 val_20 NULL NULL 23 val_23 PREHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a full outer join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a full outer join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -252,15 +252,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a full outer join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_1_n1 +PREHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_1_n1 a full outer join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_1_n1 +POSTHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### 1 val_1 NULL NULL 10 val_10 10 val_10 @@ -272,10 +272,10 @@ NULL NULL 19 val_19 NULL NULL 20 val_20 NULL NULL 23 val_23 PREHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -314,23 +314,23 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_1_n1 +PREHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_1_n1 +POSTHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### 10 val_10 10 val_10 4 val_4 4 val_4 PREHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a left outer join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a left outer join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -366,15 +366,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a left outer join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_1_n1 +PREHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a left outer join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_1_n1 +POSTHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### 1 val_1 NULL NULL 10 val_10 10 val_10 @@ -382,10 +382,10 @@ POSTHOOK: Input: default@smb_bucket_3 4 val_4 4 val_4 5 val_5 NULL NULL PREHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a right outer join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a right outer join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -421,15 +421,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a right outer join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_1_n1 +PREHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a right outer join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_1_n1 +POSTHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### 10 val_10 10 val_10 4 val_4 4 val_4 @@ -438,10 +438,10 @@ NULL NULL 19 val_19 NULL NULL 20 val_20 NULL NULL 23 val_23 PREHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a full outer join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a full outer join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -477,15 +477,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a full outer join smb_bucket_3_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_1_n1 +PREHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1 a full outer join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_1_n1 a full outer join smb_bucket_3_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_1_n1 +POSTHOOK: Input: default@smb_bucket_3_n1 #### A masked pattern was here #### 1 val_1 NULL NULL 10 val_10 10 val_10 diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_21.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_21.q.out index d05a42b05a..810fa11e5c 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_21.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_21.q.out @@ -1,42 +1,42 @@ -PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1_n17 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table1 -POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table1_n17 +POSTHOOK: query: CREATE TABLE test_table1_n17 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table1 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: Output: default@test_table1_n17 +PREHOOK: query: CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table2_n16 +POSTHOOK: query: CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 +POSTHOOK: Output: default@test_table2_n16 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table1_n17 PARTITION (ds = '1') SELECT * PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table1@ds=1 +PREHOOK: Output: default@test_table1_n17@ds=1 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * +INSERT OVERWRITE TABLE test_table1_n17 PARTITION (ds = '1') SELECT * POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table1@ds=1 -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@test_table1_n17@ds=1 +POSTHOOK: Lineage: test_table1_n17 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1_n17 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -61,7 +61,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n16 Execution mode: vectorized Stage: Stage-0 @@ -74,7 +74,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n16 Stage: Stage-2 Stats Work @@ -82,33 +82,33 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table2 + Table: default.test_table2_n16 -PREHOOK: query: drop table test_table2 +PREHOOK: query: drop table test_table2_n16 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_table2 -PREHOOK: Output: default@test_table2 -POSTHOOK: query: drop table test_table2 +PREHOOK: Input: default@test_table2_n16 +PREHOOK: Output: default@test_table2_n16 +POSTHOOK: query: drop table test_table2_n16 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_table2 -POSTHOOK: Output: default@test_table2 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: Input: default@test_table2_n16 +POSTHOOK: Output: default@test_table2_n16 +PREHOOK: query: CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key desc) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table2_n16 +POSTHOOK: query: CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key desc) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 +POSTHOOK: Output: default@test_table2_n16 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -146,7 +146,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n16 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string) outputColumnNames: key, value, ds @@ -174,7 +174,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n16 Stage: Stage-2 Stats Work @@ -182,7 +182,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table2 + Table: default.test_table2_n16 Stage: Stage-3 Map Reduce @@ -214,31 +214,31 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: drop table test_table2 +PREHOOK: query: drop table test_table2_n16 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_table2 -PREHOOK: Output: default@test_table2 -POSTHOOK: query: drop table test_table2 +PREHOOK: Input: default@test_table2_n16 +PREHOOK: Output: default@test_table2_n16 +POSTHOOK: query: drop table test_table2_n16 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_table2 -POSTHOOK: Output: default@test_table2 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: Input: default@test_table2_n16 +POSTHOOK: Output: default@test_table2_n16 +PREHOOK: query: CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key, value) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table2_n16 +POSTHOOK: query: CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key, value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 +POSTHOOK: Output: default@test_table2_n16 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -275,7 +275,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n16 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string) outputColumnNames: key, value, ds @@ -303,7 +303,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n16 Stage: Stage-2 Stats Work @@ -311,7 +311,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table2 + Table: default.test_table2_n16 Stage: Stage-3 Map Reduce @@ -343,31 +343,31 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: drop table test_table2 +PREHOOK: query: drop table test_table2_n16 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_table2 -PREHOOK: Output: default@test_table2 -POSTHOOK: query: drop table test_table2 +PREHOOK: Input: default@test_table2_n16 +PREHOOK: Output: default@test_table2_n16 +POSTHOOK: query: drop table test_table2_n16 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_table2 -POSTHOOK: Output: default@test_table2 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: Input: default@test_table2_n16 +POSTHOOK: Output: default@test_table2_n16 +PREHOOK: query: CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (value) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table2_n16 +POSTHOOK: query: CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 +POSTHOOK: Output: default@test_table2_n16 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -405,7 +405,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n16 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string) outputColumnNames: key, value, ds @@ -433,7 +433,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n16 Stage: Stage-2 Stats Work @@ -441,7 +441,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table2 + Table: default.test_table2_n16 Stage: Stage-3 Map Reduce @@ -473,31 +473,31 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: drop table test_table2 +PREHOOK: query: drop table test_table2_n16 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_table2 -PREHOOK: Output: default@test_table2 -POSTHOOK: query: drop table test_table2 +PREHOOK: Input: default@test_table2_n16 +PREHOOK: Output: default@test_table2_n16 +POSTHOOK: query: drop table test_table2_n16 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_table2 -POSTHOOK: Output: default@test_table2 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: Input: default@test_table2_n16 +POSTHOOK: Output: default@test_table2_n16 +PREHOOK: query: CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table2_n16 +POSTHOOK: query: CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 +POSTHOOK: Output: default@test_table2_n16 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -535,7 +535,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n16 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string) outputColumnNames: key, value, ds @@ -563,7 +563,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n16 Stage: Stage-2 Stats Work @@ -571,7 +571,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table2 + Table: default.test_table2_n16 Stage: Stage-3 Map Reduce @@ -603,31 +603,31 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: drop table test_table2 +PREHOOK: query: drop table test_table2_n16 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_table2 -PREHOOK: Output: default@test_table2 -POSTHOOK: query: drop table test_table2 +PREHOOK: Input: default@test_table2_n16 +PREHOOK: Output: default@test_table2_n16 +POSTHOOK: query: drop table test_table2_n16 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_table2 -POSTHOOK: Output: default@test_table2 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: Input: default@test_table2_n16 +POSTHOOK: Output: default@test_table2_n16 +PREHOOK: query: CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: Output: default@test_table2_n16 +POSTHOOK: query: CREATE TABLE test_table2_n16 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 +POSTHOOK: Output: default@test_table2_n16 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') -SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' +INSERT OVERWRITE TABLE test_table2_n16 PARTITION (ds = '1') +SELECT a.key, a.value FROM test_table1_n17 a WHERE a.ds = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -664,7 +664,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n16 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string) outputColumnNames: key, value, ds @@ -692,7 +692,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n16 Stage: Stage-2 Stats Work @@ -700,7 +700,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table2 + Table: default.test_table2_n16 Stage: Stage-3 Map Reduce diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_22.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_22.q.out index 57edfa5b22..6652e71adb 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_22.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_22.q.out @@ -1,40 +1,40 @@ -PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) +PREHOOK: query: CREATE TABLE test_table1_n10 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table1 -POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) +PREHOOK: Output: default@test_table1_n10 +POSTHOOK: query: CREATE TABLE test_table1_n10 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table1 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) +POSTHOOK: Output: default@test_table1_n10 +PREHOOK: query: CREATE TABLE test_table2_n10 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) +PREHOOK: Output: default@test_table2_n10 +POSTHOOK: query: CREATE TABLE test_table2_n10 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 +POSTHOOK: Output: default@test_table2_n10 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 SELECT * +INSERT OVERWRITE TABLE test_table1_n10 SELECT * PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table1 +PREHOOK: Output: default@test_table1_n10 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 SELECT * +INSERT OVERWRITE TABLE test_table1_n10 SELECT * POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table1 -POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 -SELECT * FROM test_table1 +POSTHOOK: Output: default@test_table1_n10 +POSTHOOK: Lineage: test_table1_n10.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1_n10.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2_n10 +SELECT * FROM test_table1_n10 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 -SELECT * FROM test_table1 +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2_n10 +SELECT * FROM test_table1_n10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -46,7 +46,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_table1 + alias: test_table1_n10 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) @@ -59,7 +59,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n10 Execution mode: vectorized Stage: Stage-0 @@ -70,7 +70,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n10 Stage: Stage-2 Stats Work @@ -78,109 +78,109 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table2 + Table: default.test_table2_n10 -PREHOOK: query: INSERT OVERWRITE TABLE test_table2 -SELECT * FROM test_table1 +PREHOOK: query: INSERT OVERWRITE TABLE test_table2_n10 +SELECT * FROM test_table1_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Output: default@test_table2 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table2 -SELECT * FROM test_table1 +PREHOOK: Input: default@test_table1_n10 +PREHOOK: Output: default@test_table2_n10 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table2_n10 +SELECT * FROM test_table1_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Output: default@test_table2 -POSTHOOK: Lineage: test_table2.key SIMPLE [(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table2.value SIMPLE [(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select count(*) from test_table1 +POSTHOOK: Input: default@test_table1_n10 +POSTHOOK: Output: default@test_table2_n10 +POSTHOOK: Lineage: test_table2_n10.key SIMPLE [(test_table1_n10)test_table1_n10.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table2_n10.value SIMPLE [(test_table1_n10)test_table1_n10.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select count(*) from test_table1_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1_n10 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table1 +POSTHOOK: query: select count(*) from test_table1_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1_n10 #### A masked pattern was here #### 500 -PREHOOK: query: select count(*) from test_table1 tablesample (bucket 2 out of 2) s +PREHOOK: query: select count(*) from test_table1_n10 tablesample (bucket 2 out of 2) s PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1_n10 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table1 tablesample (bucket 2 out of 2) s +POSTHOOK: query: select count(*) from test_table1_n10 tablesample (bucket 2 out of 2) s POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1_n10 #### A masked pattern was here #### 257 -PREHOOK: query: select count(*) from test_table2 +PREHOOK: query: select count(*) from test_table2_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2_n10 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 +POSTHOOK: query: select count(*) from test_table2_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2_n10 #### A masked pattern was here #### 500 -PREHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s +PREHOOK: query: select count(*) from test_table2_n10 tablesample (bucket 2 out of 2) s PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2_n10 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s +POSTHOOK: query: select count(*) from test_table2_n10 tablesample (bucket 2 out of 2) s POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2_n10 #### A masked pattern was here #### 257 -PREHOOK: query: drop table test_table1 +PREHOOK: query: drop table test_table1_n10 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_table1 -PREHOOK: Output: default@test_table1 -POSTHOOK: query: drop table test_table1 +PREHOOK: Input: default@test_table1_n10 +PREHOOK: Output: default@test_table1_n10 +POSTHOOK: query: drop table test_table1_n10 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_table1 -POSTHOOK: Output: default@test_table1 -PREHOOK: query: drop table test_table2 +POSTHOOK: Input: default@test_table1_n10 +POSTHOOK: Output: default@test_table1_n10 +PREHOOK: query: drop table test_table2_n10 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test_table2 -PREHOOK: Output: default@test_table2 -POSTHOOK: query: drop table test_table2 +PREHOOK: Input: default@test_table2_n10 +PREHOOK: Output: default@test_table2_n10 +POSTHOOK: query: drop table test_table2_n10 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test_table2 -POSTHOOK: Output: default@test_table2 -PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) +POSTHOOK: Input: default@test_table2_n10 +POSTHOOK: Output: default@test_table2_n10 +PREHOOK: query: CREATE TABLE test_table1_n10 (key INT, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table1 -POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) +PREHOOK: Output: default@test_table1_n10 +POSTHOOK: query: CREATE TABLE test_table1_n10 (key INT, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table1 -PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) +POSTHOOK: Output: default@test_table1_n10 +PREHOOK: query: CREATE TABLE test_table2_n10 (key INT, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table2 -POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) +PREHOOK: Output: default@test_table2_n10 +POSTHOOK: query: CREATE TABLE test_table2_n10 (key INT, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table2 +POSTHOOK: Output: default@test_table2_n10 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 SELECT * +INSERT OVERWRITE TABLE test_table1_n10 SELECT * PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_table1 +PREHOOK: Output: default@test_table1_n10 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE test_table1 SELECT * +INSERT OVERWRITE TABLE test_table1_n10 SELECT * POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_table1 -POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 -SELECT * FROM test_table1 +POSTHOOK: Output: default@test_table1_n10 +POSTHOOK: Lineage: test_table1_n10.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_table1_n10.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2_n10 +SELECT * FROM test_table1_n10 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 -SELECT * FROM test_table1 +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2_n10 +SELECT * FROM test_table1_n10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -192,7 +192,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_table1 + alias: test_table1_n10 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) @@ -205,7 +205,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n10 Execution mode: vectorized Stage: Stage-0 @@ -216,7 +216,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_table2 + name: default.test_table2_n10 Stage: Stage-2 Stats Work @@ -224,53 +224,53 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.test_table2 + Table: default.test_table2_n10 -PREHOOK: query: INSERT OVERWRITE TABLE test_table2 -SELECT * FROM test_table1 +PREHOOK: query: INSERT OVERWRITE TABLE test_table2_n10 +SELECT * FROM test_table1_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 -PREHOOK: Output: default@test_table2 -POSTHOOK: query: INSERT OVERWRITE TABLE test_table2 -SELECT * FROM test_table1 +PREHOOK: Input: default@test_table1_n10 +PREHOOK: Output: default@test_table2_n10 +POSTHOOK: query: INSERT OVERWRITE TABLE test_table2_n10 +SELECT * FROM test_table1_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 -POSTHOOK: Output: default@test_table2 -POSTHOOK: Lineage: test_table2.key SIMPLE [(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test_table2.value SIMPLE [(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select count(*) from test_table1 +POSTHOOK: Input: default@test_table1_n10 +POSTHOOK: Output: default@test_table2_n10 +POSTHOOK: Lineage: test_table2_n10.key SIMPLE [(test_table1_n10)test_table1_n10.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test_table2_n10.value SIMPLE [(test_table1_n10)test_table1_n10.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select count(*) from test_table1_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1_n10 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table1 +POSTHOOK: query: select count(*) from test_table1_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1_n10 #### A masked pattern was here #### 500 -PREHOOK: query: select count(*) from test_table1 tablesample (bucket 2 out of 2) s +PREHOOK: query: select count(*) from test_table1_n10 tablesample (bucket 2 out of 2) s PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1_n10 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table1 tablesample (bucket 2 out of 2) s +POSTHOOK: query: select count(*) from test_table1_n10 tablesample (bucket 2 out of 2) s POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1_n10 #### A masked pattern was here #### 257 -PREHOOK: query: select count(*) from test_table2 +PREHOOK: query: select count(*) from test_table2_n10 PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2_n10 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 +POSTHOOK: query: select count(*) from test_table2_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2_n10 #### A masked pattern was here #### 500 -PREHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s +PREHOOK: query: select count(*) from test_table2_n10 tablesample (bucket 2 out of 2) s PREHOOK: type: QUERY -PREHOOK: Input: default@test_table2 +PREHOOK: Input: default@test_table2_n10 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s +POSTHOOK: query: select count(*) from test_table2_n10 tablesample (bucket 2 out of 2) s POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table2 +POSTHOOK: Input: default@test_table2_n10 #### A masked pattern was here #### 257 diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out index 8fe738a11b..6e57bcad13 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out @@ -1,59 +1,59 @@ -PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: query: create table smb_bucket_1_n4(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smb_bucket_1 -POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: Output: default@smb_bucket_1_n4 +POSTHOOK: query: create table smb_bucket_1_n4(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smb_bucket_1 -PREHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +POSTHOOK: Output: default@smb_bucket_1_n4 +PREHOOK: query: create table smb_bucket_2_n4(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smb_bucket_2 -POSTHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: Output: default@smb_bucket_2_n4 +POSTHOOK: query: create table smb_bucket_2_n4(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smb_bucket_2 -PREHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +POSTHOOK: Output: default@smb_bucket_2_n4 +PREHOOK: query: create table smb_bucket_3_n4(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smb_bucket_3 -POSTHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: Output: default@smb_bucket_3_n4 +POSTHOOK: query: create table smb_bucket_3_n4(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smb_bucket_3 -PREHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1 +POSTHOOK: Output: default@smb_bucket_3_n4 +PREHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@smb_bucket_1 -POSTHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1 +PREHOOK: Output: default@smb_bucket_1_n4 +POSTHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@smb_bucket_1 -PREHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2 +POSTHOOK: Output: default@smb_bucket_1_n4 +PREHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@smb_bucket_2 -POSTHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2 +PREHOOK: Output: default@smb_bucket_2_n4 +POSTHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@smb_bucket_2 -PREHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3 +POSTHOOK: Output: default@smb_bucket_2_n4 +PREHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@smb_bucket_3 -POSTHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3 +PREHOOK: Output: default@smb_bucket_3_n4 +POSTHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@smb_bucket_3 +POSTHOOK: Output: default@smb_bucket_3_n4 Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product Warning: Shuffle Join JOIN[18][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-4:MAPRED' is a cross product PREHOOK: query: explain -select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 +select * from (select a.key from smb_bucket_1_n4 a join smb_bucket_2_n4 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2_n4 c join smb_bucket_3_n4 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 PREHOOK: type: QUERY POSTHOOK: query: explain -select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 +select * from (select a.key from smb_bucket_1_n4 a join smb_bucket_2_n4 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2_n4 c join smb_bucket_3_n4 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -185,10 +185,10 @@ Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MA Warning: Map Join MAPJOIN[35][bigTable=?] in task 'Stage-9:MAPRED' is a cross product Warning: Map Join MAPJOIN[36][bigTable=?] in task 'Stage-10:MAPRED' is a cross product PREHOOK: query: explain -select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 +select * from (select a.key from smb_bucket_1_n4 a join smb_bucket_2_n4 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2_n4 c join smb_bucket_3_n4 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 PREHOOK: type: QUERY POSTHOOK: query: explain -select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 +select * from (select a.key from smb_bucket_1_n4 a join smb_bucket_2_n4 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2_n4 c join smb_bucket_3_n4 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-13 is a root stage @@ -425,15 +425,15 @@ Warning: Map Join MAPJOIN[33][bigTable=?] in task 'Stage-6:MAPRED' is a cross pr Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product Warning: Map Join MAPJOIN[35][bigTable=?] in task 'Stage-9:MAPRED' is a cross product Warning: Map Join MAPJOIN[36][bigTable=?] in task 'Stage-10:MAPRED' is a cross product -PREHOOK: query: select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 +PREHOOK: query: select * from (select a.key from smb_bucket_1_n4 a join smb_bucket_2_n4 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2_n4 c join smb_bucket_3_n4 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_1 -PREHOOK: Input: default@smb_bucket_2 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_1_n4 +PREHOOK: Input: default@smb_bucket_2_n4 +PREHOOK: Input: default@smb_bucket_3_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 +POSTHOOK: query: select * from (select a.key from smb_bucket_1_n4 a join smb_bucket_2_n4 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2_n4 c join smb_bucket_3_n4 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_1 -POSTHOOK: Input: default@smb_bucket_2 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_1_n4 +POSTHOOK: Input: default@smb_bucket_2_n4 +POSTHOOK: Input: default@smb_bucket_3_n4 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_3.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_3.q.out index cf4c74410f..e116fa7779 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_3.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_3.q.out @@ -1,56 +1,56 @@ -PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: query: create table smb_bucket_1_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smb_bucket_1 -POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: Output: default@smb_bucket_1_n0 +POSTHOOK: query: create table smb_bucket_1_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smb_bucket_1 -PREHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +POSTHOOK: Output: default@smb_bucket_1_n0 +PREHOOK: query: create table smb_bucket_2_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smb_bucket_2 -POSTHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: Output: default@smb_bucket_2_n0 +POSTHOOK: query: create table smb_bucket_2_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smb_bucket_2 -PREHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +POSTHOOK: Output: default@smb_bucket_2_n0 +PREHOOK: query: create table smb_bucket_3_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smb_bucket_3 -POSTHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: Output: default@smb_bucket_3_n0 +POSTHOOK: query: create table smb_bucket_3_n0(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smb_bucket_3 -PREHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1 +POSTHOOK: Output: default@smb_bucket_3_n0 +PREHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@smb_bucket_1 -POSTHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1 +PREHOOK: Output: default@smb_bucket_1_n0 +POSTHOOK: query: load data local inpath '../../data/files/smb_rc1/000000_0' overwrite into table smb_bucket_1_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@smb_bucket_1 -PREHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2 +POSTHOOK: Output: default@smb_bucket_1_n0 +PREHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@smb_bucket_2 -POSTHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2 +PREHOOK: Output: default@smb_bucket_2_n0 +POSTHOOK: query: load data local inpath '../../data/files/smb_rc2/000000_0' overwrite into table smb_bucket_2_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@smb_bucket_2 -PREHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3 +POSTHOOK: Output: default@smb_bucket_2_n0 +PREHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@smb_bucket_3 -POSTHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3 +PREHOOK: Output: default@smb_bucket_3_n0 +POSTHOOK: query: load data local inpath '../../data/files/smb_rc3/000000_0' overwrite into table smb_bucket_3_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@smb_bucket_3 +POSTHOOK: Output: default@smb_bucket_3_n0 PREHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -89,23 +89,23 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_2 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_2_n0 +PREHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_2 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_2_n0 +POSTHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### 20 val_20 20 val_20 23 val_23 23 val_23 PREHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a left outer join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a left outer join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -141,25 +141,25 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a left outer join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_2 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_2_n0 +PREHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a left outer join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_2 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_2_n0 +POSTHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### 20 val_20 20 val_20 23 val_23 23 val_23 25 val_25 NULL NULL 30 val_30 NULL NULL PREHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a right outer join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a right outer join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -195,15 +195,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a right outer join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_2 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_2_n0 +PREHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a right outer join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_2 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_2_n0 +POSTHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### 20 val_20 20 val_20 23 val_23 23 val_23 @@ -212,10 +212,10 @@ NULL NULL 17 val_17 NULL NULL 19 val_19 NULL NULL 4 val_4 PREHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a full outer join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a full outer join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -251,15 +251,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a full outer join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_2 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_2_n0 +PREHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket_2_n0 a full outer join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_2 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_2_n0 +POSTHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### 20 val_20 20 val_20 23 val_23 23 val_23 @@ -270,10 +270,10 @@ NULL NULL 17 val_17 NULL NULL 19 val_19 NULL NULL 4 val_4 PREHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -312,23 +312,23 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_2 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_2_n0 +PREHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_2 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_2_n0 +POSTHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### 20 val_20 20 val_20 23 val_23 23 val_23 PREHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a left outer join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a left outer join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -364,25 +364,25 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a left outer join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_2 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_2_n0 +PREHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a left outer join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a left outer join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_2 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_2_n0 +POSTHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### 20 val_20 20 val_20 23 val_23 23 val_23 25 val_25 NULL NULL 30 val_30 NULL NULL PREHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a right outer join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a right outer join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -418,15 +418,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a right outer join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_2 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_2_n0 +PREHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a right outer join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a right outer join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_2 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_2_n0 +POSTHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### 20 val_20 20 val_20 23 val_23 23 val_23 @@ -435,10 +435,10 @@ NULL NULL 17 val_17 NULL NULL 19 val_19 NULL NULL 4 val_4 PREHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a full outer join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key +select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a full outer join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -474,15 +474,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a full outer join smb_bucket_3_n0 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket_2 -PREHOOK: Input: default@smb_bucket_3 +PREHOOK: Input: default@smb_bucket_2_n0 +PREHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket_2_n0 a full outer join smb_bucket_3_n0 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket_2 -POSTHOOK: Input: default@smb_bucket_3 +POSTHOOK: Input: default@smb_bucket_2_n0 +POSTHOOK: Input: default@smb_bucket_3_n0 #### A masked pattern was here #### 20 val_20 20 val_20 23 val_23 23 val_23 diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_46.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_46.q.out index f259c0638b..e61c4b88ac 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_46.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_46.q.out @@ -19,27 +19,27 @@ POSTHOOK: Output: default@aux1 POSTHOOK: Lineage: aux1.col_1 SCRIPT [] POSTHOOK: Lineage: aux1.key SCRIPT [] POSTHOOK: Lineage: aux1.value SCRIPT [] -PREHOOK: query: CREATE TABLE test1 (key INT, value INT, col_1 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE test1_n5 (key INT, value INT, col_1 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test1 -POSTHOOK: query: CREATE TABLE test1 (key INT, value INT, col_1 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +PREHOOK: Output: default@test1_n5 +POSTHOOK: query: CREATE TABLE test1_n5 (key INT, value INT, col_1 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test1 -PREHOOK: query: INSERT OVERWRITE TABLE test1 +POSTHOOK: Output: default@test1_n5 +PREHOOK: query: INSERT OVERWRITE TABLE test1_n5 SELECT * FROM aux1 PREHOOK: type: QUERY PREHOOK: Input: default@aux1 -PREHOOK: Output: default@test1 -POSTHOOK: query: INSERT OVERWRITE TABLE test1 +PREHOOK: Output: default@test1_n5 +POSTHOOK: query: INSERT OVERWRITE TABLE test1_n5 SELECT * FROM aux1 POSTHOOK: type: QUERY POSTHOOK: Input: default@aux1 -POSTHOOK: Output: default@test1 -POSTHOOK: Lineage: test1.col_1 SIMPLE [(aux1)aux1.FieldSchema(name:col_1, type:string, comment:null), ] -POSTHOOK: Lineage: test1.key SIMPLE [(aux1)aux1.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test1.value SIMPLE [(aux1)aux1.FieldSchema(name:value, type:int, comment:null), ] +POSTHOOK: Output: default@test1_n5 +POSTHOOK: Lineage: test1_n5.col_1 SIMPLE [(aux1)aux1.FieldSchema(name:col_1, type:string, comment:null), ] +POSTHOOK: Lineage: test1_n5.key SIMPLE [(aux1)aux1.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test1_n5.value SIMPLE [(aux1)aux1.FieldSchema(name:value, type:int, comment:null), ] PREHOOK: query: CREATE TABLE aux2 (key INT, value INT, col_2 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -61,36 +61,36 @@ POSTHOOK: Output: default@aux2 POSTHOOK: Lineage: aux2.col_2 SCRIPT [] POSTHOOK: Lineage: aux2.key SCRIPT [] POSTHOOK: Lineage: aux2.value SCRIPT [] -PREHOOK: query: CREATE TABLE test2 (key INT, value INT, col_2 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE test2_n3 (key INT, value INT, col_2 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test2 -POSTHOOK: query: CREATE TABLE test2 (key INT, value INT, col_2 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +PREHOOK: Output: default@test2_n3 +POSTHOOK: query: CREATE TABLE test2_n3 (key INT, value INT, col_2 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test2 -PREHOOK: query: INSERT OVERWRITE TABLE test2 +POSTHOOK: Output: default@test2_n3 +PREHOOK: query: INSERT OVERWRITE TABLE test2_n3 SELECT * FROM aux2 PREHOOK: type: QUERY PREHOOK: Input: default@aux2 -PREHOOK: Output: default@test2 -POSTHOOK: query: INSERT OVERWRITE TABLE test2 +PREHOOK: Output: default@test2_n3 +POSTHOOK: query: INSERT OVERWRITE TABLE test2_n3 SELECT * FROM aux2 POSTHOOK: type: QUERY POSTHOOK: Input: default@aux2 -POSTHOOK: Output: default@test2 -POSTHOOK: Lineage: test2.col_2 SIMPLE [(aux2)aux2.FieldSchema(name:col_2, type:string, comment:null), ] -POSTHOOK: Lineage: test2.key SIMPLE [(aux2)aux2.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test2.value SIMPLE [(aux2)aux2.FieldSchema(name:value, type:int, comment:null), ] +POSTHOOK: Output: default@test2_n3 +POSTHOOK: Lineage: test2_n3.col_2 SIMPLE [(aux2)aux2.FieldSchema(name:col_2, type:string, comment:null), ] +POSTHOOK: Lineage: test2_n3.key SIMPLE [(aux2)aux2.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test2_n3.value SIMPLE [(aux2)aux2.FieldSchema(name:value, type:int, comment:null), ] PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -101,7 +101,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -128,18 +128,18 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### 98 NULL None NULL NULL NULL NULL NULL None NULL NULL NULL @@ -151,17 +151,17 @@ NULL NULL None NULL NULL NULL 100 1 Bob NULL NULL NULL PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND test1_n5.key between 100 and 102 + AND test2_n3.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND test1_n5.key between 100 and 102 + AND test2_n3.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -172,7 +172,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -202,22 +202,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND test1_n5.key between 100 and 102 + AND test2_n3.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND test1_n5.key between 100 and 102 + AND test2_n3.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### 98 NULL None NULL NULL NULL NULL NULL None NULL NULL NULL @@ -228,15 +228,15 @@ NULL NULL None NULL NULL NULL Warning: Map Join MAPJOIN[11][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.key between 100 and 102 + AND test2_n3.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.key between 100 and 102 + AND test2_n3.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -247,13 +247,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n3 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n3 TableScan - alias: test2 + alias: test2_n3 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key BETWEEN 100 AND 102 (type: boolean) @@ -274,7 +274,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -310,20 +310,20 @@ STAGE PLANS: Warning: Map Join MAPJOIN[11][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.key between 100 and 102 + AND test2_n3.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.key between 100 and 102 + AND test2_n3.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### 98 NULL None NULL NULL NULL NULL NULL None NULL NULL NULL @@ -333,13 +333,13 @@ NULL NULL None NULL NULL NULL 100 1 Bob 102 2 Del PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value AND true) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value AND true) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -350,7 +350,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test2 + alias: test2_n3 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -377,18 +377,18 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value AND true) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value AND true) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### NULL NULL NULL 105 NULL None 101 2 Car 103 2 Ema @@ -399,13 +399,13 @@ NULL NULL NULL 104 3 Fli Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -416,13 +416,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n3 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n3 TableScan - alias: test2 + alias: test2_n3 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -440,7 +440,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -476,18 +476,18 @@ STAGE PLANS: Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### 98 NULL None NULL NULL NULL NULL NULL None NULL NULL NULL @@ -504,17 +504,17 @@ NULL NULL None NULL NULL NULL Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -525,13 +525,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n3 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n3 TableScan - alias: test2 + alias: test2_n3 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -546,7 +546,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -579,22 +579,22 @@ STAGE PLANS: Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### 98 NULL None 102 2 Del NULL NULL None 102 2 Del @@ -612,15 +612,15 @@ NULL NULL None 102 2 Del Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -631,13 +631,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n3 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n3 TableScan - alias: test2 + alias: test2_n3 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -652,7 +652,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -685,20 +685,20 @@ STAGE PLANS: Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### 98 NULL None NULL NULL NULL NULL NULL None NULL NULL NULL @@ -716,15 +716,15 @@ NULL NULL None NULL NULL NULL Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -735,13 +735,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n3 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n3 TableScan - alias: test2 + alias: test2_n3 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -756,7 +756,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -789,20 +789,20 @@ STAGE PLANS: Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### 98 NULL None 102 2 Del NULL NULL None 102 2 Del @@ -814,17 +814,17 @@ NULL NULL None 102 2 Del 100 1 Bob 102 2 Del PREHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -835,7 +835,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -863,22 +863,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 LEFT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n5 LEFT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### 98 NULL None NULL NULL NULL NULL NULL None NULL NULL NULL @@ -890,17 +890,17 @@ NULL NULL None NULL NULL NULL Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -911,13 +911,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:test1 + $hdt$_0:test1_n5 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:test1 + $hdt$_0:test1_n5 TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -932,7 +932,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test2 + alias: test2_n3 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -965,22 +965,22 @@ STAGE PLANS: Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### 101 2 Car 105 NULL None 100 1 Bob 105 NULL None @@ -998,15 +998,15 @@ NULL NULL None 102 2 Del Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -1017,13 +1017,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:test1 + $hdt$_0:test1_n5 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:test1 + $hdt$_0:test1_n5 TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1038,7 +1038,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test2 + alias: test2_n3 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1071,20 +1071,20 @@ STAGE PLANS: Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### 101 2 Car 105 NULL None 100 1 Bob 105 NULL None @@ -1099,15 +1099,15 @@ POSTHOOK: Input: default@test2 Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -1118,13 +1118,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:test1 + $hdt$_0:test1_n5 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:test1 + $hdt$_0:test1_n5 TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1139,7 +1139,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test2 + alias: test2_n3 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1172,20 +1172,20 @@ STAGE PLANS: Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### NULL NULL NULL 105 NULL None 101 2 Car 103 2 Ema @@ -1199,17 +1199,17 @@ NULL NULL None 102 2 Del NULL NULL NULL 104 3 Fli PREHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1220,7 +1220,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test2 + alias: test2_n3 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1248,22 +1248,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 RIGHT OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n5 RIGHT OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### NULL NULL NULL 105 NULL None 101 2 Car 103 2 Ema @@ -1273,17 +1273,17 @@ NULL NULL NULL 104 3 Fli Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1294,7 +1294,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1305,7 +1305,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n3 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1341,22 +1341,22 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### 100 1 Bob 104 3 Fli 100 1 Bob 102 2 Del @@ -1374,15 +1374,15 @@ NULL NULL None 102 2 Del Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1393,7 +1393,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1404,7 +1404,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n3 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1440,20 +1440,20 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test1_n5.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### 100 1 Bob 104 3 Fli 100 1 Bob 102 2 Del @@ -1471,15 +1471,15 @@ NULL NULL None NULL NULL NULL Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1490,7 +1490,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1501,7 +1501,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n3 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1537,20 +1537,20 @@ STAGE PLANS: Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - OR test2.key between 100 and 102) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + OR test2_n3.key between 100 and 102) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### 100 1 Bob 102 2 Del 99 0 Alice 102 2 Del @@ -1564,17 +1564,17 @@ NULL NULL NULL 104 3 Fli NULL NULL NULL 105 NULL None PREHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1585,7 +1585,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n5 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -1598,7 +1598,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col2 (type: string) TableScan - alias: test2 + alias: test2_n3 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1635,22 +1635,22 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)) PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n5 +PREHOOK: Input: default@test2_n3 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 FULL OUTER JOIN test2 -ON (test1.value=test2.value - AND (test1.key between 100 and 102 - OR test2.key between 100 and 102)) +FROM test1_n5 FULL OUTER JOIN test2_n3 +ON (test1_n5.value=test2_n3.value + AND (test1_n5.key between 100 and 102 + OR test2_n3.key between 100 and 102)) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n5 +POSTHOOK: Input: default@test2_n3 #### A masked pattern was here #### NULL NULL None NULL NULL NULL 98 NULL None NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_47.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_47.q.out index 73b295cf13..a708af2e67 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_47.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_47.q.out @@ -1,101 +1,101 @@ -PREHOOK: query: CREATE TABLE aux1 (key INT, value INT, col_1 STRING) +PREHOOK: query: CREATE TABLE aux1_n0 (key INT, value INT, col_1 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@aux1 -POSTHOOK: query: CREATE TABLE aux1 (key INT, value INT, col_1 STRING) +PREHOOK: Output: default@aux1_n0 +POSTHOOK: query: CREATE TABLE aux1_n0 (key INT, value INT, col_1 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@aux1 -PREHOOK: query: INSERT INTO aux1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), +POSTHOOK: Output: default@aux1_n0 +PREHOOK: query: INSERT INTO aux1_n0 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@aux1 -POSTHOOK: query: INSERT INTO aux1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), +PREHOOK: Output: default@aux1_n0 +POSTHOOK: query: INSERT INTO aux1_n0 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'), (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@aux1 -POSTHOOK: Lineage: aux1.col_1 SCRIPT [] -POSTHOOK: Lineage: aux1.key SCRIPT [] -POSTHOOK: Lineage: aux1.value SCRIPT [] -PREHOOK: query: CREATE TABLE test1 (key INT, value INT, col_1 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +POSTHOOK: Output: default@aux1_n0 +POSTHOOK: Lineage: aux1_n0.col_1 SCRIPT [] +POSTHOOK: Lineage: aux1_n0.key SCRIPT [] +POSTHOOK: Lineage: aux1_n0.value SCRIPT [] +PREHOOK: query: CREATE TABLE test1_n7 (key INT, value INT, col_1 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test1 -POSTHOOK: query: CREATE TABLE test1 (key INT, value INT, col_1 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +PREHOOK: Output: default@test1_n7 +POSTHOOK: query: CREATE TABLE test1_n7 (key INT, value INT, col_1 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test1 -PREHOOK: query: INSERT OVERWRITE TABLE test1 -SELECT * FROM aux1 +POSTHOOK: Output: default@test1_n7 +PREHOOK: query: INSERT OVERWRITE TABLE test1_n7 +SELECT * FROM aux1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@aux1 -PREHOOK: Output: default@test1 -POSTHOOK: query: INSERT OVERWRITE TABLE test1 -SELECT * FROM aux1 +PREHOOK: Input: default@aux1_n0 +PREHOOK: Output: default@test1_n7 +POSTHOOK: query: INSERT OVERWRITE TABLE test1_n7 +SELECT * FROM aux1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@aux1 -POSTHOOK: Output: default@test1 -POSTHOOK: Lineage: test1.col_1 SIMPLE [(aux1)aux1.FieldSchema(name:col_1, type:string, comment:null), ] -POSTHOOK: Lineage: test1.key SIMPLE [(aux1)aux1.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test1.value SIMPLE [(aux1)aux1.FieldSchema(name:value, type:int, comment:null), ] -PREHOOK: query: CREATE TABLE aux2 (key INT, value INT, col_2 STRING) +POSTHOOK: Input: default@aux1_n0 +POSTHOOK: Output: default@test1_n7 +POSTHOOK: Lineage: test1_n7.col_1 SIMPLE [(aux1_n0)aux1_n0.FieldSchema(name:col_1, type:string, comment:null), ] +POSTHOOK: Lineage: test1_n7.key SIMPLE [(aux1_n0)aux1_n0.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test1_n7.value SIMPLE [(aux1_n0)aux1_n0.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: CREATE TABLE aux2_n0 (key INT, value INT, col_2 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@aux2 -POSTHOOK: query: CREATE TABLE aux2 (key INT, value INT, col_2 STRING) +PREHOOK: Output: default@aux2_n0 +POSTHOOK: query: CREATE TABLE aux2_n0 (key INT, value INT, col_2 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@aux2 -PREHOOK: query: INSERT INTO aux2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), +POSTHOOK: Output: default@aux2_n0 +PREHOOK: query: INSERT INTO aux2_n0 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), (104, 3, 'Fli'), (105, NULL, 'None') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@aux2 -POSTHOOK: query: INSERT INTO aux2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), +PREHOOK: Output: default@aux2_n0 +POSTHOOK: query: INSERT INTO aux2_n0 VALUES (102, 2, 'Del'), (103, 2, 'Ema'), (104, 3, 'Fli'), (105, NULL, 'None') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@aux2 -POSTHOOK: Lineage: aux2.col_2 SCRIPT [] -POSTHOOK: Lineage: aux2.key SCRIPT [] -POSTHOOK: Lineage: aux2.value SCRIPT [] -PREHOOK: query: CREATE TABLE test2 (key INT, value INT, col_2 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +POSTHOOK: Output: default@aux2_n0 +POSTHOOK: Lineage: aux2_n0.col_2 SCRIPT [] +POSTHOOK: Lineage: aux2_n0.key SCRIPT [] +POSTHOOK: Lineage: aux2_n0.value SCRIPT [] +PREHOOK: query: CREATE TABLE test2_n5 (key INT, value INT, col_2 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test2 -POSTHOOK: query: CREATE TABLE test2 (key INT, value INT, col_2 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +PREHOOK: Output: default@test2_n5 +POSTHOOK: query: CREATE TABLE test2_n5 (key INT, value INT, col_2 STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test2 -PREHOOK: query: INSERT OVERWRITE TABLE test2 -SELECT * FROM aux2 +POSTHOOK: Output: default@test2_n5 +PREHOOK: query: INSERT OVERWRITE TABLE test2_n5 +SELECT * FROM aux2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@aux2 -PREHOOK: Output: default@test2 -POSTHOOK: query: INSERT OVERWRITE TABLE test2 -SELECT * FROM aux2 +PREHOOK: Input: default@aux2_n0 +PREHOOK: Output: default@test2_n5 +POSTHOOK: query: INSERT OVERWRITE TABLE test2_n5 +SELECT * FROM aux2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@aux2 -POSTHOOK: Output: default@test2 -POSTHOOK: Lineage: test2.col_2 SIMPLE [(aux2)aux2.FieldSchema(name:col_2, type:string, comment:null), ] -POSTHOOK: Lineage: test2.key SIMPLE [(aux2)aux2.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: test2.value SIMPLE [(aux2)aux2.FieldSchema(name:value, type:int, comment:null), ] +POSTHOOK: Input: default@aux2_n0 +POSTHOOK: Output: default@test2_n5 +POSTHOOK: Lineage: test2_n5.col_2 SIMPLE [(aux2_n0)aux2_n0.FieldSchema(name:col_2, type:string, comment:null), ] +POSTHOOK: Lineage: test2_n5.key SIMPLE [(aux2_n0)aux2_n0.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: test2_n5.value SIMPLE [(aux2_n0)aux2_n0.FieldSchema(name:value, type:int, comment:null), ] PREHOOK: query: EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.value=test2_n5.value + AND test1_n7.key between 100 and 102 + AND test2_n5.key between 100 and 102) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.value=test2_n5.value + AND test1_n7.key between 100 and 102 + AND test2_n5.key between 100 and 102) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -107,7 +107,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n7 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key BETWEEN 100 AND 102 and value is not null) (type: boolean) @@ -139,36 +139,36 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.value=test2_n5.value + AND test1_n7.key between 100 and 102 + AND test2_n5.key between 100 and 102) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n7 +PREHOOK: Input: default@test2_n5 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value - AND test1.key between 100 and 102 - AND test2.key between 100 and 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.value=test2_n5.value + AND test1_n7.key between 100 and 102 + AND test2_n5.key between 100 and 102) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n7 +POSTHOOK: Input: default@test2_n5 #### A masked pattern was here #### 101 2 Car 102 2 Del PREHOOK: query: EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.value=test2_n5.value AND true) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.value=test2_n5.value AND true) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -180,7 +180,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n7 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: value is not null (type: boolean) @@ -212,20 +212,20 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.value=test2_n5.value AND true) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n7 +PREHOOK: Input: default@test2_n5 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value AND true) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.value=test2_n5.value AND true) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n7 +POSTHOOK: Input: default@test2_n5 #### A masked pattern was here #### 101 2 Car 103 2 Ema 101 2 Car 102 2 Del @@ -234,17 +234,17 @@ POSTHOOK: Input: default@test2 Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102 +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.key between 100 and 102 + AND test2_n5.key between 100 and 102 AND true) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102 +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.key between 100 and 102 + AND test2_n5.key between 100 and 102 AND true) LIMIT 10 POSTHOOK: type: QUERY @@ -257,13 +257,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n5 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n5 TableScan - alias: test2 + alias: test2_n5 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key BETWEEN 100 AND 102 (type: boolean) @@ -281,7 +281,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n7 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key BETWEEN 100 AND 102 (type: boolean) @@ -320,42 +320,42 @@ STAGE PLANS: Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102 +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.key between 100 and 102 + AND test2_n5.key between 100 and 102 AND true) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n7 +PREHOOK: Input: default@test2_n5 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 JOIN test2 -ON (test1.key between 100 and 102 - AND test2.key between 100 and 102 +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.key between 100 and 102 + AND test2_n5.key between 100 and 102 AND true) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n7 +POSTHOOK: Input: default@test2_n5 #### A masked pattern was here #### 101 2 Car 102 2 Del 100 1 Bob 102 2 Del Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.value=test2_n5.value + OR test1_n7.key between 100 and 102 + OR test2_n5.key between 100 and 102) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.value=test2_n5.value + OR test1_n7.key between 100 and 102 + OR test2_n5.key between 100 and 102) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -367,13 +367,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n5 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n5 TableScan - alias: test2 + alias: test2_n5 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -388,7 +388,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n7 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -424,24 +424,24 @@ STAGE PLANS: Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.value=test2_n5.value + OR test1_n7.key between 100 and 102 + OR test2_n5.key between 100 and 102) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n7 +PREHOOK: Input: default@test2_n5 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 JOIN test2 -ON (test1.value=test2.value - OR test1.key between 100 and 102 - OR test2.key between 100 and 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.value=test2_n5.value + OR test1_n7.key between 100 and 102 + OR test2_n5.key between 100 and 102) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n7 +POSTHOOK: Input: default@test2_n5 #### A masked pattern was here #### 98 NULL None 102 2 Del NULL NULL None 102 2 Del @@ -456,16 +456,16 @@ NULL NULL None 102 2 Del Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.key+test2.key >= 100 - AND test1.key+test2.key <= 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.key+test2_n5.key >= 100 + AND test1_n7.key+test2_n5.key <= 102) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.key+test2.key >= 100 - AND test1.key+test2.key <= 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.key+test2_n5.key >= 100 + AND test1_n7.key+test2_n5.key <= 102) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -477,13 +477,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n5 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n5 TableScan - alias: test2 + alias: test2_n5 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -498,7 +498,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n7 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -534,36 +534,36 @@ STAGE PLANS: Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 JOIN test2 -ON (test1.key+test2.key >= 100 - AND test1.key+test2.key <= 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.key+test2_n5.key >= 100 + AND test1_n7.key+test2_n5.key <= 102) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n7 +PREHOOK: Input: default@test2_n5 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 JOIN test2 -ON (test1.key+test2.key >= 100 - AND test1.key+test2.key <= 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.key+test2_n5.key >= 100 + AND test1_n7.key+test2_n5.key <= 102) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n7 +POSTHOOK: Input: default@test2_n5 #### A masked pattern was here #### Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.key+test2.key >= 100 - OR test1.key+test2.key <= 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.key+test2_n5.key >= 100 + OR test1_n7.key+test2_n5.key <= 102) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 JOIN test2 -ON (test1.key+test2.key >= 100 - OR test1.key+test2.key <= 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.key+test2_n5.key >= 100 + OR test1_n7.key+test2_n5.key <= 102) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -575,13 +575,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n5 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n5 TableScan - alias: test2 + alias: test2_n5 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -596,7 +596,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n7 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -632,22 +632,22 @@ STAGE PLANS: Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 JOIN test2 -ON (test1.key+test2.key >= 100 - OR test1.key+test2.key <= 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.key+test2_n5.key >= 100 + OR test1_n7.key+test2_n5.key <= 102) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n7 +PREHOOK: Input: default@test2_n5 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 JOIN test2 -ON (test1.key+test2.key >= 100 - OR test1.key+test2.key <= 102) +FROM test1_n7 JOIN test2_n5 +ON (test1_n7.key+test2_n5.key >= 100 + OR test1_n7.key+test2_n5.key <= 102) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n7 +POSTHOOK: Input: default@test2_n5 #### A masked pattern was here #### 98 NULL None 105 NULL None 98 NULL None 103 2 Ema @@ -662,14 +662,14 @@ POSTHOOK: Input: default@test2 Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test1 JOIN test2 -ON ((test1.key,test2.key) IN ((100,100),(101,101),(102,102))) +FROM test1_n7 JOIN test2_n5 +ON ((test1_n7.key,test2_n5.key) IN ((100,100),(101,101),(102,102))) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test1 JOIN test2 -ON ((test1.key,test2.key) IN ((100,100),(101,101),(102,102))) +FROM test1_n7 JOIN test2_n5 +ON ((test1_n7.key,test2_n5.key) IN ((100,100),(101,101),(102,102))) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -681,13 +681,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:test2 + $hdt$_1:test2_n5 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:test2 + $hdt$_1:test2_n5 TableScan - alias: test2 + alias: test2_n5 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -702,7 +702,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test1 + alias: test1_n7 Statistics: Num rows: 6 Data size: 56 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_1 (type: string) @@ -738,34 +738,34 @@ STAGE PLANS: Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test1 JOIN test2 -ON ((test1.key,test2.key) IN ((100,100),(101,101),(102,102))) +FROM test1_n7 JOIN test2_n5 +ON ((test1_n7.key,test2_n5.key) IN ((100,100),(101,101),(102,102))) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n7 +PREHOOK: Input: default@test2_n5 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test1 JOIN test2 -ON ((test1.key,test2.key) IN ((100,100),(101,101),(102,102))) +FROM test1_n7 JOIN test2_n5 +ON ((test1_n7.key,test2_n5.key) IN ((100,100),(101,101),(102,102))) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n7 +POSTHOOK: Input: default@test2_n5 #### A masked pattern was here #### Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -LEFT OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n7 a ON (a.key+test2_n5.key >= 100) +LEFT OUTER JOIN test1_n7 b ON (b.value = test2_n5.value) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -LEFT OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n7 a ON (a.key+test2_n5.key >= 100) +LEFT OUTER JOIN test1_n7 b ON (b.value = test2_n5.value) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -777,7 +777,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test2 + alias: test2_n5 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -833,22 +833,22 @@ STAGE PLANS: Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -LEFT OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n7 a ON (a.key+test2_n5.key >= 100) +LEFT OUTER JOIN test1_n7 b ON (b.value = test2_n5.value) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n7 +PREHOOK: Input: default@test2_n5 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -LEFT OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n7 a ON (a.key+test2_n5.key >= 100) +LEFT OUTER JOIN test1_n7 b ON (b.value = test2_n5.value) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n7 +POSTHOOK: Input: default@test2_n5 #### A masked pattern was here #### 104 3 Fli 100 1 Bob NULL NULL NULL 104 3 Fli 99 0 Alice NULL NULL NULL @@ -863,16 +863,16 @@ POSTHOOK: Input: default@test2 Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test2 -LEFT OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +LEFT OUTER JOIN test1_n7 a ON (a.value = test2_n5.value) +JOIN test1_n7 b ON (b.key+test2_n5.key<= 102) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test2 -LEFT OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +LEFT OUTER JOIN test1_n7 a ON (a.value = test2_n5.value) +JOIN test1_n7 b ON (b.key+test2_n5.key<= 102) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -884,7 +884,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test2 + alias: test2_n5 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -937,36 +937,36 @@ STAGE PLANS: Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test2 -LEFT OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +LEFT OUTER JOIN test1_n7 a ON (a.value = test2_n5.value) +JOIN test1_n7 b ON (b.key+test2_n5.key<= 102) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n7 +PREHOOK: Input: default@test2_n5 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test2 -LEFT OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +LEFT OUTER JOIN test1_n7 a ON (a.value = test2_n5.value) +JOIN test1_n7 b ON (b.key+test2_n5.key<= 102) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n7 +POSTHOOK: Input: default@test2_n5 #### A masked pattern was here #### Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Stage-7:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -RIGHT OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n7 a ON (a.key+test2_n5.key >= 100) +RIGHT OUTER JOIN test1_n7 b ON (b.value = test2_n5.value) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -RIGHT OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n7 a ON (a.key+test2_n5.key >= 100) +RIGHT OUTER JOIN test1_n7 b ON (b.value = test2_n5.value) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -982,13 +982,13 @@ STAGE PLANS: Stage: Stage-9 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:test2 + $hdt$_0:test2_n5 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:test2 + $hdt$_0:test2_n5 TableScan - alias: test2 + alias: test2_n5 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1127,22 +1127,22 @@ STAGE PLANS: Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Stage-7:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -RIGHT OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n7 a ON (a.key+test2_n5.key >= 100) +RIGHT OUTER JOIN test1_n7 b ON (b.value = test2_n5.value) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n7 +PREHOOK: Input: default@test2_n5 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -RIGHT OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n7 a ON (a.key+test2_n5.key >= 100) +RIGHT OUTER JOIN test1_n7 b ON (b.value = test2_n5.value) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n7 +POSTHOOK: Input: default@test2_n5 #### A masked pattern was here #### NULL NULL NULL NULL NULL NULL 98 NULL None NULL NULL NULL NULL NULL NULL NULL NULL None @@ -1157,16 +1157,16 @@ NULL NULL NULL NULL NULL NULL NULL NULL None Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test2 -RIGHT OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +RIGHT OUTER JOIN test1_n7 a ON (a.value = test2_n5.value) +JOIN test1_n7 b ON (b.key+test2_n5.key<= 102) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test2 -RIGHT OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +RIGHT OUTER JOIN test1_n7 a ON (a.value = test2_n5.value) +JOIN test1_n7 b ON (b.key+test2_n5.key<= 102) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1231,36 +1231,36 @@ STAGE PLANS: Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test2 -RIGHT OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +RIGHT OUTER JOIN test1_n7 a ON (a.value = test2_n5.value) +JOIN test1_n7 b ON (b.key+test2_n5.key<= 102) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n7 +PREHOOK: Input: default@test2_n5 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test2 -RIGHT OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +RIGHT OUTER JOIN test1_n7 a ON (a.value = test2_n5.value) +JOIN test1_n7 b ON (b.key+test2_n5.key<= 102) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n7 +POSTHOOK: Input: default@test2_n5 #### A masked pattern was here #### Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-2:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -FULL OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n7 a ON (a.key+test2_n5.key >= 100) +FULL OUTER JOIN test1_n7 b ON (b.value = test2_n5.value) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -FULL OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n7 a ON (a.key+test2_n5.key >= 100) +FULL OUTER JOIN test1_n7 b ON (b.value = test2_n5.value) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1272,13 +1272,13 @@ STAGE PLANS: Stage: Stage-6 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:test2 + $hdt$_0:test2_n5 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:test2 + $hdt$_0:test2_n5 TableScan - alias: test2 + alias: test2_n5 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1357,22 +1357,22 @@ STAGE PLANS: Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-2:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -FULL OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n7 a ON (a.key+test2_n5.key >= 100) +FULL OUTER JOIN test1_n7 b ON (b.value = test2_n5.value) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n7 +PREHOOK: Input: default@test2_n5 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test2 -JOIN test1 a ON (a.key+test2.key >= 100) -FULL OUTER JOIN test1 b ON (b.value = test2.value) +FROM test2_n5 +JOIN test1_n7 a ON (a.key+test2_n5.key >= 100) +FULL OUTER JOIN test1_n7 b ON (b.value = test2_n5.value) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n7 +POSTHOOK: Input: default@test2_n5 #### A masked pattern was here #### 105 NULL None 100 1 Bob NULL NULL NULL 105 NULL None 99 0 Alice NULL NULL NULL @@ -1387,16 +1387,16 @@ NULL NULL NULL NULL NULL NULL 100 1 Bob Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-5:MAPRED' is a cross product PREHOOK: query: EXPLAIN SELECT * -FROM test2 -FULL OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +FULL OUTER JOIN test1_n7 a ON (a.value = test2_n5.value) +JOIN test1_n7 b ON (b.key+test2_n5.key<= 102) LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * -FROM test2 -FULL OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +FULL OUTER JOIN test1_n7 a ON (a.value = test2_n5.value) +JOIN test1_n7 b ON (b.key+test2_n5.key<= 102) LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1410,7 +1410,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test2 + alias: test2_n5 Statistics: Num rows: 4 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: int), col_2 (type: string) @@ -1505,20 +1505,20 @@ STAGE PLANS: Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-5:MAPRED' is a cross product PREHOOK: query: SELECT * -FROM test2 -FULL OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +FULL OUTER JOIN test1_n7 a ON (a.value = test2_n5.value) +JOIN test1_n7 b ON (b.key+test2_n5.key<= 102) LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test2 +PREHOOK: Input: default@test1_n7 +PREHOOK: Input: default@test2_n5 #### A masked pattern was here #### POSTHOOK: query: SELECT * -FROM test2 -FULL OUTER JOIN test1 a ON (a.value = test2.value) -JOIN test1 b ON (b.key+test2.key<= 102) +FROM test2_n5 +FULL OUTER JOIN test1_n7 a ON (a.value = test2_n5.value) +JOIN test1_n7 b ON (b.key+test2_n5.key<= 102) LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test2 +POSTHOOK: Input: default@test1_n7 +POSTHOOK: Input: default@test2_n5 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_8.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_8.q.out index 20cfc0d074..26b7eb98b7 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_8.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_8.q.out @@ -14,22 +14,22 @@ POSTHOOK: query: load data local inpath '../../data/files/smb_bucket_input.rc' i POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket_input -PREHOOK: query: CREATE TABLE smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS +PREHOOK: query: CREATE TABLE smb_bucket4_1_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: CREATE TABLE smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS +PREHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: query: CREATE TABLE smb_bucket4_1_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smb_bucket4_1 -PREHOOK: query: CREATE TABLE smb_bucket4_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS +POSTHOOK: Output: default@smb_bucket4_1_n1 +PREHOOK: query: CREATE TABLE smb_bucket4_2_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@smb_bucket4_2 -POSTHOOK: query: CREATE TABLE smb_bucket4_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS +PREHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: query: CREATE TABLE smb_bucket4_2_n1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@smb_bucket4_2 +POSTHOOK: Output: default@smb_bucket4_2_n1 PREHOOK: query: CREATE TABLE smb_bucket4_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -38,35 +38,35 @@ POSTHOOK: query: CREATE TABLE smb_bucket4_3(key int, value string) CLUSTERED BY POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@smb_bucket4_3 -PREHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=4 or key=2000 or key=4000 +PREHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=4 or key=2000 or key=4000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=4 or key=2000 or key=4000 +PREHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=4 or key=2000 or key=4000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=484 or key=3000 or key=5000 +POSTHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: Lineage: smb_bucket4_1_n1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_1_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=484 or key=3000 or key=5000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_2 -POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=484 or key=3000 or key=5000 +PREHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=484 or key=3000 or key=5000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: Lineage: smb_bucket4_2_n1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_2_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket4_1 -PREHOOK: Input: default@smb_bucket4_2 +PREHOOK: Input: default@smb_bucket4_1_n1 +PREHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket4_1 -POSTHOOK: Input: default@smb_bucket4_2 +POSTHOOK: Input: default@smb_bucket4_1_n1 +POSTHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### 2000 val_169 NULL NULL 4 val_356 NULL NULL @@ -74,15 +74,15 @@ POSTHOOK: Input: default@smb_bucket4_2 NULL NULL 3000 val_169 NULL NULL 484 val_169 NULL NULL 5000 val_125 -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket4_1 -PREHOOK: Input: default@smb_bucket4_2 +PREHOOK: Input: default@smb_bucket4_1_n1 +PREHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket4_1 -POSTHOOK: Input: default@smb_bucket4_2 +POSTHOOK: Input: default@smb_bucket4_1_n1 +POSTHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### 2000 val_169 NULL NULL 4 val_356 NULL NULL @@ -90,164 +90,164 @@ POSTHOOK: Input: default@smb_bucket4_2 NULL NULL 3000 val_169 NULL NULL 484 val_169 NULL NULL 5000 val_125 -PREHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=2000 or key=4000 +PREHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=2000 or key=4000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=2000 or key=4000 +PREHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=2000 or key=4000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=3000 or key=5000 +POSTHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: Lineage: smb_bucket4_1_n1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_1_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=3000 or key=5000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_2 -POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=3000 or key=5000 +PREHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=3000 or key=5000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: Lineage: smb_bucket4_2_n1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_2_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket4_1 -PREHOOK: Input: default@smb_bucket4_2 +PREHOOK: Input: default@smb_bucket4_1_n1 +PREHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket4_1 -POSTHOOK: Input: default@smb_bucket4_2 +POSTHOOK: Input: default@smb_bucket4_1_n1 +POSTHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### 2000 val_169 NULL NULL 4000 val_125 NULL NULL NULL NULL 3000 val_169 NULL NULL 5000 val_125 -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket4_1 -PREHOOK: Input: default@smb_bucket4_2 +PREHOOK: Input: default@smb_bucket4_1_n1 +PREHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket4_1 -POSTHOOK: Input: default@smb_bucket4_2 +POSTHOOK: Input: default@smb_bucket4_1_n1 +POSTHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### 2000 val_169 NULL NULL 4000 val_125 NULL NULL NULL NULL 3000 val_169 NULL NULL 5000 val_125 -PREHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=4000 +PREHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=4000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=4000 +PREHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=4000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [] -POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=5000 +POSTHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: Lineage: smb_bucket4_1_n1.key SIMPLE [] +POSTHOOK: Lineage: smb_bucket4_1_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=5000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_2 -POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=5000 +PREHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=5000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [] -POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: Lineage: smb_bucket4_2_n1.key SIMPLE [] +POSTHOOK: Lineage: smb_bucket4_2_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket4_1 -PREHOOK: Input: default@smb_bucket4_2 +PREHOOK: Input: default@smb_bucket4_1_n1 +PREHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket4_1 -POSTHOOK: Input: default@smb_bucket4_2 +POSTHOOK: Input: default@smb_bucket4_1_n1 +POSTHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### 4000 val_125 NULL NULL NULL NULL 5000 val_125 -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket4_1 -PREHOOK: Input: default@smb_bucket4_2 +PREHOOK: Input: default@smb_bucket4_1_n1 +PREHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket4_1 -POSTHOOK: Input: default@smb_bucket4_2 +POSTHOOK: Input: default@smb_bucket4_1_n1 +POSTHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### 4000 val_125 NULL NULL NULL NULL 5000 val_125 -PREHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=1000 or key=4000 +PREHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=1000 or key=4000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=1000 or key=4000 +PREHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=1000 or key=4000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=1000 or key=5000 +POSTHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: Lineage: smb_bucket4_1_n1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_1_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=1000 or key=5000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_2 -POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=1000 or key=5000 +PREHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=1000 or key=5000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: Lineage: smb_bucket4_2_n1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_2_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket4_1 -PREHOOK: Input: default@smb_bucket4_2 +PREHOOK: Input: default@smb_bucket4_1_n1 +PREHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket4_1 -POSTHOOK: Input: default@smb_bucket4_2 +POSTHOOK: Input: default@smb_bucket4_1_n1 +POSTHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### 1000 val_1000 1000 val_1000 4000 val_125 NULL NULL NULL NULL 5000 val_125 -PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket4_1 -PREHOOK: Input: default@smb_bucket4_2 +PREHOOK: Input: default@smb_bucket4_1_n1 +PREHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket4_1 -POSTHOOK: Input: default@smb_bucket4_2 +POSTHOOK: Input: default@smb_bucket4_1_n1 +POSTHOOK: Input: default@smb_bucket4_2_n1 #### A masked pattern was here #### 1000 val_1000 1000 val_1000 4000 val_125 NULL NULL NULL NULL 5000 val_125 -PREHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=1000 or key=4000 +PREHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=1000 or key=4000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=1000 or key=4000 +PREHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=1000 or key=4000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=1000 or key=5000 +POSTHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: Lineage: smb_bucket4_1_n1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_1_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=1000 or key=5000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_2 -POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=1000 or key=5000 +PREHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=1000 or key=5000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: Lineage: smb_bucket4_2_n1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_2_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=1000 or key=5000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input @@ -258,44 +258,44 @@ POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_3 POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: smb_bucket4_3.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket4_1 -PREHOOK: Input: default@smb_bucket4_2 +PREHOOK: Input: default@smb_bucket4_1_n1 +PREHOOK: Input: default@smb_bucket4_2_n1 PREHOOK: Input: default@smb_bucket4_3 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket4_1 -POSTHOOK: Input: default@smb_bucket4_2 +POSTHOOK: Input: default@smb_bucket4_1_n1 +POSTHOOK: Input: default@smb_bucket4_2_n1 POSTHOOK: Input: default@smb_bucket4_3 #### A masked pattern was here #### 1000 val_1000 1000 val_1000 1000 val_1000 4000 val_125 NULL NULL NULL NULL NULL NULL 5000 val_125 NULL NULL NULL NULL NULL NULL 5000 val_125 -PREHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=1000 or key=4000 +PREHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=1000 or key=4000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=1000 or key=4000 +PREHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=1000 or key=4000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=1000 or key=5000 +POSTHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: Lineage: smb_bucket4_1_n1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_1_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=1000 or key=5000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_2 -POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=1000 or key=5000 +PREHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=1000 or key=5000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: Lineage: smb_bucket4_2_n1.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_bucket4_2_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=1000 or key=4000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input @@ -306,43 +306,43 @@ POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_3 POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: smb_bucket4_3.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket4_1 -PREHOOK: Input: default@smb_bucket4_2 +PREHOOK: Input: default@smb_bucket4_1_n1 +PREHOOK: Input: default@smb_bucket4_2_n1 PREHOOK: Input: default@smb_bucket4_3 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket4_1 -POSTHOOK: Input: default@smb_bucket4_2 +POSTHOOK: Input: default@smb_bucket4_1_n1 +POSTHOOK: Input: default@smb_bucket4_2_n1 POSTHOOK: Input: default@smb_bucket4_3 #### A masked pattern was here #### 1000 val_1000 1000 val_1000 1000 val_1000 4000 val_125 NULL NULL 4000 val_125 NULL NULL 5000 val_125 NULL NULL -PREHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=4000 +PREHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=4000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=4000 +PREHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=4000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [] -POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=5000 +POSTHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: Lineage: smb_bucket4_1_n1.key SIMPLE [] +POSTHOOK: Lineage: smb_bucket4_1_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=5000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_2 -POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=5000 +PREHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=5000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [] -POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: Lineage: smb_bucket4_2_n1.key SIMPLE [] +POSTHOOK: Lineage: smb_bucket4_2_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=4000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input @@ -353,42 +353,42 @@ POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_3 POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_3.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket4_1 -PREHOOK: Input: default@smb_bucket4_2 +PREHOOK: Input: default@smb_bucket4_1_n1 +PREHOOK: Input: default@smb_bucket4_2_n1 PREHOOK: Input: default@smb_bucket4_3 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket4_1 -POSTHOOK: Input: default@smb_bucket4_2 +POSTHOOK: Input: default@smb_bucket4_1_n1 +POSTHOOK: Input: default@smb_bucket4_2_n1 POSTHOOK: Input: default@smb_bucket4_3 #### A masked pattern was here #### 4000 val_125 NULL NULL 4000 val_125 NULL NULL 5000 val_125 NULL NULL -PREHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=00000 +PREHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=00000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=00000 +PREHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=00000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [] -POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=4000 +POSTHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: Lineage: smb_bucket4_1_n1.key SIMPLE [] +POSTHOOK: Lineage: smb_bucket4_1_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=4000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_2 -POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=4000 +PREHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=4000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [] -POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: Lineage: smb_bucket4_2_n1.key SIMPLE [] +POSTHOOK: Lineage: smb_bucket4_2_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=5000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input @@ -399,42 +399,42 @@ POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_3 POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_3.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket4_1 -PREHOOK: Input: default@smb_bucket4_2 +PREHOOK: Input: default@smb_bucket4_1_n1 +PREHOOK: Input: default@smb_bucket4_2_n1 PREHOOK: Input: default@smb_bucket4_3 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket4_1 -POSTHOOK: Input: default@smb_bucket4_2 +POSTHOOK: Input: default@smb_bucket4_1_n1 +POSTHOOK: Input: default@smb_bucket4_2_n1 POSTHOOK: Input: default@smb_bucket4_3 #### A masked pattern was here #### NULL NULL 4000 val_125 NULL NULL NULL NULL NULL NULL 5000 val_125 -PREHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=1000 +PREHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=1000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: insert overwrite table smb_bucket4_1 select * from smb_bucket_input where key=1000 +PREHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_1_n1 select * from smb_bucket_input where key=1000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_1 -POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE [] -POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=4000 +POSTHOOK: Output: default@smb_bucket4_1_n1 +POSTHOOK: Lineage: smb_bucket4_1_n1.key SIMPLE [] +POSTHOOK: Lineage: smb_bucket4_1_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=4000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input -PREHOOK: Output: default@smb_bucket4_2 -POSTHOOK: query: insert overwrite table smb_bucket4_2 select * from smb_bucket_input where key=4000 +PREHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: query: insert overwrite table smb_bucket4_2_n1 select * from smb_bucket_input where key=4000 POSTHOOK: type: QUERY POSTHOOK: Input: default@smb_bucket_input -POSTHOOK: Output: default@smb_bucket4_2 -POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE [] -POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Output: default@smb_bucket4_2_n1 +POSTHOOK: Lineage: smb_bucket4_2_n1.key SIMPLE [] +POSTHOOK: Lineage: smb_bucket4_2_n1.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: insert overwrite table smb_bucket4_3 select * from smb_bucket_input where key=5000 PREHOOK: type: QUERY PREHOOK: Input: default@smb_bucket_input @@ -445,18 +445,18 @@ POSTHOOK: Input: default@smb_bucket_input POSTHOOK: Output: default@smb_bucket4_3 POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE [] POSTHOOK: Lineage: smb_bucket4_3.value SIMPLE [(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key PREHOOK: type: QUERY -PREHOOK: Input: default@smb_bucket4_1 -PREHOOK: Input: default@smb_bucket4_2 +PREHOOK: Input: default@smb_bucket4_1_n1 +PREHOOK: Input: default@smb_bucket4_2_n1 PREHOOK: Input: default@smb_bucket4_3 #### A masked pattern was here #### -POSTHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key +POSTHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1_n1 a full outer join smb_bucket4_2_n1 b on a.key = b.key full outer join smb_bucket4_3 c on a.key=c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@smb_bucket4_1 -POSTHOOK: Input: default@smb_bucket4_2 +POSTHOOK: Input: default@smb_bucket4_1_n1 +POSTHOOK: Input: default@smb_bucket4_2_n1 POSTHOOK: Input: default@smb_bucket4_3 #### A masked pattern was here #### 1000 val_1000 NULL NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_1.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_1.q.out index c108fca1e8..80ffff2a17 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_1.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_1.q.out @@ -1,52 +1,52 @@ -PREHOOK: query: drop table table_desc1 +PREHOOK: query: drop table table_desc1_n3 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table table_desc1 +POSTHOOK: query: drop table table_desc1_n3 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table table_desc2 +PREHOOK: query: drop table table_desc2_n3 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table table_desc2 +POSTHOOK: query: drop table table_desc2_n3 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table table_desc1(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS +PREHOOK: query: create table table_desc1_n3(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table_desc1 -POSTHOOK: query: create table table_desc1(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS +PREHOOK: Output: default@table_desc1_n3 +POSTHOOK: query: create table table_desc1_n3(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_desc1 -PREHOOK: query: create table table_desc2(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS +POSTHOOK: Output: default@table_desc1_n3 +PREHOOK: query: create table table_desc2_n3(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table_desc2 -POSTHOOK: query: create table table_desc2(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS +PREHOOK: Output: default@table_desc2_n3 +POSTHOOK: query: create table table_desc2_n3(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_desc2 -PREHOOK: query: insert overwrite table table_desc1 select key, value from src +POSTHOOK: Output: default@table_desc2_n3 +PREHOOK: query: insert overwrite table table_desc1_n3 select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@table_desc1 -POSTHOOK: query: insert overwrite table table_desc1 select key, value from src +PREHOOK: Output: default@table_desc1_n3 +POSTHOOK: query: insert overwrite table table_desc1_n3 select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@table_desc1 -POSTHOOK: Lineage: table_desc1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: table_desc1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table table_desc2 select key, value from src +POSTHOOK: Output: default@table_desc1_n3 +POSTHOOK: Lineage: table_desc1_n3.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_desc1_n3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table table_desc2_n3 select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@table_desc2 -POSTHOOK: query: insert overwrite table table_desc2 select key, value from src +PREHOOK: Output: default@table_desc2_n3 +POSTHOOK: query: insert overwrite table table_desc2_n3 select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@table_desc2 -POSTHOOK: Lineage: table_desc2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: table_desc2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@table_desc2_n3 +POSTHOOK: Lineage: table_desc2_n3.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_desc2_n3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key where a.key < 10 +select /*+ mapjoin(b) */ count(*) from table_desc1_n3 a join table_desc2_n3 b on a.key=b.key where a.key < 10 PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key where a.key < 10 +select /*+ mapjoin(b) */ count(*) from table_desc1_n3 a join table_desc2_n3 b on a.key=b.key where a.key < 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -93,14 +93,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key where a.key < 10 +PREHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1_n3 a join table_desc2_n3 b on a.key=b.key where a.key < 10 PREHOOK: type: QUERY -PREHOOK: Input: default@table_desc1 -PREHOOK: Input: default@table_desc2 +PREHOOK: Input: default@table_desc1_n3 +PREHOOK: Input: default@table_desc2_n3 #### A masked pattern was here #### -POSTHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key where a.key < 10 +POSTHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1_n3 a join table_desc2_n3 b on a.key=b.key where a.key < 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table_desc1 -POSTHOOK: Input: default@table_desc2 +POSTHOOK: Input: default@table_desc1_n3 +POSTHOOK: Input: default@table_desc2_n3 #### A masked pattern was here #### 22 diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_2.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_2.q.out index b9270ea4fa..1aba918616 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_2.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_2.q.out @@ -1,57 +1,57 @@ -PREHOOK: query: drop table table_desc1 +PREHOOK: query: drop table table_desc1_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table table_desc1 +POSTHOOK: query: drop table table_desc1_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table table_desc2 +PREHOOK: query: drop table table_desc2_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table table_desc2 +POSTHOOK: query: drop table table_desc2_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table table_desc1(key string, value string) clustered by (key, value) +PREHOOK: query: create table table_desc1_n1(key string, value string) clustered by (key, value) sorted by (key DESC, value DESC) into 1 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table_desc1 -POSTHOOK: query: create table table_desc1(key string, value string) clustered by (key, value) +PREHOOK: Output: default@table_desc1_n1 +POSTHOOK: query: create table table_desc1_n1(key string, value string) clustered by (key, value) sorted by (key DESC, value DESC) into 1 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_desc1 -PREHOOK: query: create table table_desc2(key string, value string) clustered by (key, value) +POSTHOOK: Output: default@table_desc1_n1 +PREHOOK: query: create table table_desc2_n1(key string, value string) clustered by (key, value) sorted by (key DESC, value DESC) into 1 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table_desc2 -POSTHOOK: query: create table table_desc2(key string, value string) clustered by (key, value) +PREHOOK: Output: default@table_desc2_n1 +POSTHOOK: query: create table table_desc2_n1(key string, value string) clustered by (key, value) sorted by (key DESC, value DESC) into 1 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_desc2 -PREHOOK: query: insert overwrite table table_desc1 select key, value from src +POSTHOOK: Output: default@table_desc2_n1 +PREHOOK: query: insert overwrite table table_desc1_n1 select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@table_desc1 -POSTHOOK: query: insert overwrite table table_desc1 select key, value from src +PREHOOK: Output: default@table_desc1_n1 +POSTHOOK: query: insert overwrite table table_desc1_n1 select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@table_desc1 -POSTHOOK: Lineage: table_desc1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: table_desc1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table table_desc2 select key, value from src +POSTHOOK: Output: default@table_desc1_n1 +POSTHOOK: Lineage: table_desc1_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_desc1_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table table_desc2_n1 select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@table_desc2 -POSTHOOK: query: insert overwrite table table_desc2 select key, value from src +PREHOOK: Output: default@table_desc2_n1 +POSTHOOK: query: insert overwrite table table_desc2_n1 select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@table_desc2 -POSTHOOK: Lineage: table_desc2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: table_desc2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@table_desc2_n1 +POSTHOOK: Lineage: table_desc2_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_desc2_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +select /*+ mapjoin(b) */ count(*) from table_desc1_n1 a join table_desc2_n1 b on a.key=b.key and a.value=b.value where a.key < 10 PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +select /*+ mapjoin(b) */ count(*) from table_desc1_n1 a join table_desc2_n1 b on a.key=b.key and a.value=b.value where a.key < 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -99,16 +99,16 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +PREHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1_n1 a join table_desc2_n1 b on a.key=b.key and a.value=b.value where a.key < 10 PREHOOK: type: QUERY -PREHOOK: Input: default@table_desc1 -PREHOOK: Input: default@table_desc2 +PREHOOK: Input: default@table_desc1_n1 +PREHOOK: Input: default@table_desc2_n1 #### A masked pattern was here #### -POSTHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +POSTHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1_n1 a join table_desc2_n1 b on a.key=b.key and a.value=b.value where a.key < 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table_desc1 -POSTHOOK: Input: default@table_desc2 +POSTHOOK: Input: default@table_desc1_n1 +POSTHOOK: Input: default@table_desc2_n1 #### A masked pattern was here #### 22 diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_3.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_3.q.out index 86381a0cfa..6d8125709a 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_3.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_3.q.out @@ -1,57 +1,57 @@ -PREHOOK: query: drop table table_desc1 +PREHOOK: query: drop table table_desc1_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table table_desc1 +POSTHOOK: query: drop table table_desc1_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table table_desc2 +PREHOOK: query: drop table table_desc2_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table table_desc2 +POSTHOOK: query: drop table table_desc2_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table table_desc1(key string, value string) clustered by (key, value) +PREHOOK: query: create table table_desc1_n0(key string, value string) clustered by (key, value) sorted by (key DESC, value ASC) into 1 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table_desc1 -POSTHOOK: query: create table table_desc1(key string, value string) clustered by (key, value) +PREHOOK: Output: default@table_desc1_n0 +POSTHOOK: query: create table table_desc1_n0(key string, value string) clustered by (key, value) sorted by (key DESC, value ASC) into 1 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_desc1 -PREHOOK: query: create table table_desc2(key string, value string) clustered by (key, value) +POSTHOOK: Output: default@table_desc1_n0 +PREHOOK: query: create table table_desc2_n0(key string, value string) clustered by (key, value) sorted by (key DESC, value ASC) into 1 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table_desc2 -POSTHOOK: query: create table table_desc2(key string, value string) clustered by (key, value) +PREHOOK: Output: default@table_desc2_n0 +POSTHOOK: query: create table table_desc2_n0(key string, value string) clustered by (key, value) sorted by (key DESC, value ASC) into 1 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_desc2 -PREHOOK: query: insert overwrite table table_desc1 select key, value from src +POSTHOOK: Output: default@table_desc2_n0 +PREHOOK: query: insert overwrite table table_desc1_n0 select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@table_desc1 -POSTHOOK: query: insert overwrite table table_desc1 select key, value from src +PREHOOK: Output: default@table_desc1_n0 +POSTHOOK: query: insert overwrite table table_desc1_n0 select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@table_desc1 -POSTHOOK: Lineage: table_desc1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: table_desc1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table table_desc2 select key, value from src +POSTHOOK: Output: default@table_desc1_n0 +POSTHOOK: Lineage: table_desc1_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_desc1_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table table_desc2_n0 select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@table_desc2 -POSTHOOK: query: insert overwrite table table_desc2 select key, value from src +PREHOOK: Output: default@table_desc2_n0 +POSTHOOK: query: insert overwrite table table_desc2_n0 select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@table_desc2 -POSTHOOK: Lineage: table_desc2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: table_desc2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@table_desc2_n0 +POSTHOOK: Lineage: table_desc2_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_desc2_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +select /*+ mapjoin(b) */ count(*) from table_desc1_n0 a join table_desc2_n0 b on a.key=b.key and a.value=b.value where a.key < 10 PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +select /*+ mapjoin(b) */ count(*) from table_desc1_n0 a join table_desc2_n0 b on a.key=b.key and a.value=b.value where a.key < 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -99,16 +99,16 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +PREHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1_n0 a join table_desc2_n0 b on a.key=b.key and a.value=b.value where a.key < 10 PREHOOK: type: QUERY -PREHOOK: Input: default@table_desc1 -PREHOOK: Input: default@table_desc2 +PREHOOK: Input: default@table_desc1_n0 +PREHOOK: Input: default@table_desc2_n0 #### A masked pattern was here #### -POSTHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +POSTHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1_n0 a join table_desc2_n0 b on a.key=b.key and a.value=b.value where a.key < 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table_desc1 -POSTHOOK: Input: default@table_desc2 +POSTHOOK: Input: default@table_desc1_n0 +POSTHOOK: Input: default@table_desc2_n0 #### A masked pattern was here #### 22 diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out index 38d2b96789..f733fe28c0 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out @@ -1,59 +1,59 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n7 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) SORTED BY (key DESC) INTO 1 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n7 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n7 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) SORTED BY (key DESC) INTO 1 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') SELECT * FROM src +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n7 +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n7 PARTITION (part='1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') SELECT * FROM src +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n7@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n7 PARTITION (part='1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n7@part=1 +POSTHOOK: Lineage: srcbucket_mapjoin_part_1_n7 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcbucket_mapjoin_part_1_n7 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n17 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) SORTED BY (key DESC) INTO 1 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n17 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n17 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) SORTED BY (key DESC) INTO 1 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') SELECT * FROM src +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n17 +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n17 PARTITION (part='1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') SELECT * FROM src +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n17@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n17 PARTITION (part='1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) SORTED BY (value DESC) INTO 1 BUCKETS +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n17@part=1 +POSTHOOK: Lineage: srcbucket_mapjoin_part_2_n17 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcbucket_mapjoin_part_2_n17 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n17 CLUSTERED BY (key) SORTED BY (value DESC) INTO 1 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) SORTED BY (value DESC) INTO 1 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n17 +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n17 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n17 CLUSTERED BY (key) SORTED BY (value DESC) INTO 1 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n17 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n17 PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n7 a JOIN srcbucket_mapjoin_part_2_n17 b ON a.key = b.key AND a.part = '1' AND b.part = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n7 a JOIN srcbucket_mapjoin_part_2_n17 b ON a.key = b.key AND a.part = '1' AND b.part = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -109,13 +109,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n7 numFiles 1 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n7 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -134,18 +134,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n7 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n7 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n7 + name: default.srcbucket_mapjoin_part_1_n7 Truncated Path -> Alias: - /srcbucket_mapjoin_part_1/part=1 [a] + /srcbucket_mapjoin_part_1_n7/part=1 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -181,21 +181,21 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n7 a JOIN srcbucket_mapjoin_part_2_n17 b ON a.key = b.key AND a.part = '1' AND b.part = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n7 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n7@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n17 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n17@part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n7 a JOIN srcbucket_mapjoin_part_2_n17 b ON a.key = b.key AND a.part = '1' AND b.part = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n7 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n7@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n17 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n17@part=1 #### A masked pattern was here #### 1028 diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out index 64bf678654..4730e2c63b 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out @@ -1,59 +1,59 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n3 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n3 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n3 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') SELECT * FROM src +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n3 +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n3 PARTITION (part='1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') SELECT * FROM src +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n3@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n3 PARTITION (part='1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n3@part=1 +POSTHOOK: Lineage: srcbucket_mapjoin_part_1_n3 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcbucket_mapjoin_part_1_n3 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n8 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) SORTED BY (value DESC) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n8 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n8 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) SORTED BY (value DESC) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') SELECT * FROM src +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n8 +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n8 PARTITION (part='1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') SELECT * FROM src +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n8@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n8 PARTITION (part='1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n8@part=1 +POSTHOOK: Lineage: srcbucket_mapjoin_part_2_n8 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcbucket_mapjoin_part_2_n8 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n8 CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n8 +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n8 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n8 CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n8 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n8 PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n3 a JOIN srcbucket_mapjoin_part_2_n8 b ON a.key = b.key AND a.part = '1' AND b.part = '1' PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n3 a JOIN srcbucket_mapjoin_part_2_n8 b ON a.key = b.key AND a.part = '1' AND b.part = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -83,13 +83,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n8 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n8 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -107,16 +107,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n8 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n8 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n8 + name: default.srcbucket_mapjoin_part_2_n8 Alias -> Map Local Operator Tree: b TableScan @@ -194,13 +194,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n3 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n3 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -219,18 +219,18 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n3 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n3 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n3 + name: default.srcbucket_mapjoin_part_1_n3 Truncated Path -> Alias: - /srcbucket_mapjoin_part_1/part=1 [a] + /srcbucket_mapjoin_part_1_n3/part=1 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -268,21 +268,21 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n3 a JOIN srcbucket_mapjoin_part_2_n8 b ON a.key = b.key AND a.part = '1' AND b.part = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n3 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n3@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n8 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n8@part=1 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n3 a JOIN srcbucket_mapjoin_part_2_n8 b ON a.key = b.key AND a.part = '1' AND b.part = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n3 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n3@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n8 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n8@part=1 #### A masked pattern was here #### 1028 diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out index c5e8605f33..5249fb6623 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out @@ -1,95 +1,95 @@ -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n0 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key, value) SORTED BY (key DESC) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n0 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_1_n0 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key, value) SORTED BY (key DESC) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') SELECT * FROM src +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n0 +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n0 PARTITION (part='1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') SELECT * FROM src +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n0@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n0 PARTITION (part='1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n0@part=1 +POSTHOOK: Lineage: srcbucket_mapjoin_part_1_n0 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcbucket_mapjoin_part_1_n0 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1_n0 CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Output: default@srcbucket_mapjoin_part_1 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n0 +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n0 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1_n0 CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') SELECT * FROM src +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n0 +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n0 +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n0 PARTITION (part='2') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') SELECT * FROM src +PREHOOK: Output: default@srcbucket_mapjoin_part_1_n0@part=2 +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1_n0 PARTITION (part='2') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcbucket_mapjoin_part_1 PARTITION(part=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: Output: default@srcbucket_mapjoin_part_1_n0@part=2 +POSTHOOK: Lineage: srcbucket_mapjoin_part_1_n0 PARTITION(part=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcbucket_mapjoin_part_1_n0 PARTITION(part=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n2 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n2 +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2_n2 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') SELECT * FROM src +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n2 +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n2 PARTITION (part='1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') SELECT * FROM src +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n2@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n2 PARTITION (part='1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key, value) SORTED BY (key DESC) INTO 2 BUCKETS +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n2@part=1 +POSTHOOK: Lineage: srcbucket_mapjoin_part_2_n2 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcbucket_mapjoin_part_2_n2 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n2 CLUSTERED BY (key, value) SORTED BY (key DESC) INTO 2 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key, value) SORTED BY (key DESC) INTO 2 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n2 +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n2 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n2 CLUSTERED BY (key, value) SORTED BY (key DESC) INTO 2 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='2') SELECT * FROM src +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n2 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n2 +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n2 PARTITION (part='2') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=2 -POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='2') SELECT * FROM src +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n2@part=2 +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2_n2 PARTITION (part='2') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=2 -POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n2@part=2 +POSTHOOK: Lineage: srcbucket_mapjoin_part_2_n2 PARTITION(part=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcbucket_mapjoin_part_2_n2 PARTITION(part=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n2 CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n2 +PREHOOK: Output: default@srcbucket_mapjoin_part_2_n2 +POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2_n2 CLUSTERED BY (key, value) SORTED BY (value DESC) INTO 2 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Output: default@srcbucket_mapjoin_part_2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n2 +POSTHOOK: Output: default@srcbucket_mapjoin_part_2_n2 PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n0 a JOIN srcbucket_mapjoin_part_2_n2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n0 a JOIN srcbucket_mapjoin_part_2_n2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -119,13 +119,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n2 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -143,16 +143,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n2 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n2 + name: default.srcbucket_mapjoin_part_2_n2 Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -167,13 +167,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n2 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -191,16 +191,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_2 + name default.srcbucket_mapjoin_part_2_n2 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_2_n2 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_2 - name: default.srcbucket_mapjoin_part_2 + name: default.srcbucket_mapjoin_part_2_n2 + name: default.srcbucket_mapjoin_part_2_n2 Alias -> Map Local Operator Tree: b TableScan @@ -270,13 +270,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n0 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -294,16 +294,16 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n0 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n0 + name: default.srcbucket_mapjoin_part_1_n0 #### A masked pattern was here #### Partition base file name: part=2 @@ -320,13 +320,13 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n0 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 5312 - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -344,19 +344,19 @@ STAGE PLANS: columns.comments columns.types int:string #### A masked pattern was here #### - name default.srcbucket_mapjoin_part_1 + name default.srcbucket_mapjoin_part_1_n0 partition_columns part partition_columns.types string - serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value} + serialization.ddl struct srcbucket_mapjoin_part_1_n0 { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket_mapjoin_part_1 - name: default.srcbucket_mapjoin_part_1 + name: default.srcbucket_mapjoin_part_1_n0 + name: default.srcbucket_mapjoin_part_1_n0 Truncated Path -> Alias: - /srcbucket_mapjoin_part_1/part=1 [a] - /srcbucket_mapjoin_part_1/part=2 [a] + /srcbucket_mapjoin_part_1_n0/part=1 [a] + /srcbucket_mapjoin_part_1_n0/part=2 [a] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -394,25 +394,25 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n0 a JOIN srcbucket_mapjoin_part_2_n2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL PREHOOK: type: QUERY -PREHOOK: Input: default@srcbucket_mapjoin_part_1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 -PREHOOK: Input: default@srcbucket_mapjoin_part_2@part=2 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n0 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n0@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_1_n0@part=2 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n2 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n2@part=1 +PREHOOK: Input: default@srcbucket_mapjoin_part_2_n2@part=2 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ count(*) -FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b +FROM srcbucket_mapjoin_part_1_n0 a JOIN srcbucket_mapjoin_part_2_n2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcbucket_mapjoin_part_1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n0 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n0@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_1_n0@part=2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n2 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n2@part=1 +POSTHOOK: Input: default@srcbucket_mapjoin_part_2_n2@part=2 #### A masked pattern was here #### 4112 diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_8.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_8.q.out index 490b6cd4c1..1bb81b9209 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_8.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_8.q.out @@ -1,10 +1,10 @@ -PREHOOK: query: drop table table_desc1 +PREHOOK: query: drop table table_desc1_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table table_desc1 +POSTHOOK: query: drop table table_desc1_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table table_desc2 +PREHOOK: query: drop table table_desc2_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table table_desc2 +POSTHOOK: query: drop table table_desc2_n2 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table table_desc3 PREHOOK: type: DROPTABLE @@ -14,26 +14,26 @@ PREHOOK: query: drop table table_desc4 PREHOOK: type: DROPTABLE POSTHOOK: query: drop table table_desc4 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table table_desc1(key string, value string) clustered by (key) +PREHOOK: query: create table table_desc1_n2(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table_desc1 -POSTHOOK: query: create table table_desc1(key string, value string) clustered by (key) +PREHOOK: Output: default@table_desc1_n2 +POSTHOOK: query: create table table_desc1_n2(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_desc1 -PREHOOK: query: create table table_desc2(key string, value string) clustered by (key) +POSTHOOK: Output: default@table_desc1_n2 +PREHOOK: query: create table table_desc2_n2(key string, value string) clustered by (key) sorted by (key DESC, value DESC) into 1 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@table_desc2 -POSTHOOK: query: create table table_desc2(key string, value string) clustered by (key) +PREHOOK: Output: default@table_desc2_n2 +POSTHOOK: query: create table table_desc2_n2(key string, value string) clustered by (key) sorted by (key DESC, value DESC) into 1 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@table_desc2 +POSTHOOK: Output: default@table_desc2_n2 PREHOOK: query: create table table_desc3(key string, value1 string, value2 string) clustered by (key) sorted by (key DESC, value1 DESC,value2 DESC) into 1 BUCKETS PREHOOK: type: CREATETABLE @@ -54,26 +54,26 @@ sorted by (key DESC, value2 DESC) into 1 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@table_desc4 -PREHOOK: query: insert overwrite table table_desc1 select key, value from src sort by key DESC +PREHOOK: query: insert overwrite table table_desc1_n2 select key, value from src sort by key DESC PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@table_desc1 -POSTHOOK: query: insert overwrite table table_desc1 select key, value from src sort by key DESC +PREHOOK: Output: default@table_desc1_n2 +POSTHOOK: query: insert overwrite table table_desc1_n2 select key, value from src sort by key DESC POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@table_desc1 -POSTHOOK: Lineage: table_desc1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: table_desc1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table table_desc2 select key, value from src sort by key DESC +POSTHOOK: Output: default@table_desc1_n2 +POSTHOOK: Lineage: table_desc1_n2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_desc1_n2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table table_desc2_n2 select key, value from src sort by key DESC PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@table_desc2 -POSTHOOK: query: insert overwrite table table_desc2 select key, value from src sort by key DESC +PREHOOK: Output: default@table_desc2_n2 +POSTHOOK: query: insert overwrite table table_desc2_n2 select key, value from src sort by key DESC POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@table_desc2 -POSTHOOK: Lineage: table_desc2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: table_desc2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@table_desc2_n2 +POSTHOOK: Lineage: table_desc2_n2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_desc2_n2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: insert overwrite table table_desc3 select key, value, concat(value,"_2") as value2 from src sort by key, value, value2 DESC PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -96,11 +96,11 @@ POSTHOOK: Output: default@table_desc4 POSTHOOK: Lineage: table_desc4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: table_desc4.value2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +select /*+ mapjoin(b) */ count(*) from table_desc1_n2 a join table_desc2_n2 b on a.key=b.key where a.key < 10 PREHOOK: type: QUERY POSTHOOK: query: explain -select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +select /*+ mapjoin(b) */ count(*) from table_desc1_n2 a join table_desc2_n2 b on a.key=b.key where a.key < 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -148,17 +148,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +PREHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1_n2 a join table_desc2_n2 b on a.key=b.key where a.key < 10 PREHOOK: type: QUERY -PREHOOK: Input: default@table_desc1 -PREHOOK: Input: default@table_desc2 +PREHOOK: Input: default@table_desc1_n2 +PREHOOK: Input: default@table_desc2_n2 #### A masked pattern was here #### -POSTHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b +POSTHOOK: query: select /*+ mapjoin(b) */ count(*) from table_desc1_n2 a join table_desc2_n2 b on a.key=b.key where a.key < 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@table_desc1 -POSTHOOK: Input: default@table_desc2 +POSTHOOK: Input: default@table_desc1_n2 +POSTHOOK: Input: default@table_desc2_n2 #### A masked pattern was here #### 22 PREHOOK: query: explain diff --git a/ql/src/test/results/clientpositive/stats1.q.out b/ql/src/test/results/clientpositive/stats1.q.out index 05e0cebeb6..10291ce4b5 100644 --- a/ql/src/test/results/clientpositive/stats1.q.out +++ b/ql/src/test/results/clientpositive/stats1.q.out @@ -1,20 +1,20 @@ -PREHOOK: query: create table tmptable(key string, value string) +PREHOOK: query: create table tmptable_n4(key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmptable -POSTHOOK: query: create table tmptable(key string, value string) +PREHOOK: Output: default@tmptable_n4 +POSTHOOK: query: create table tmptable_n4(key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmptable +POSTHOOK: Output: default@tmptable_n4 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE tmptable +INSERT OVERWRITE TABLE tmptable_n4 SELECT unionsrc.key, unionsrc.value FROM (SELECT 'tst1' AS key, cast(count(1) AS string) AS value FROM src s1 UNION ALL SELECT s2.key AS key, s2.value AS value FROM src1 s2) unionsrc PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE tmptable +INSERT OVERWRITE TABLE tmptable_n4 SELECT unionsrc.key, unionsrc.value FROM (SELECT 'tst1' AS key, cast(count(1) AS string) AS value FROM src s1 UNION ALL @@ -75,7 +75,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -105,7 +105,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n4 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -141,7 +141,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n4 Stage: Stage-3 Stats Work @@ -149,9 +149,9 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.tmptable + Table: default.tmptable_n4 -PREHOOK: query: INSERT OVERWRITE TABLE tmptable +PREHOOK: query: INSERT OVERWRITE TABLE tmptable_n4 SELECT unionsrc.key, unionsrc.value FROM (SELECT 'tst1' AS key, cast(count(1) AS string) AS value FROM src s1 UNION ALL @@ -159,8 +159,8 @@ FROM (SELECT 'tst1' AS key, cast(count(1) AS string) AS value FROM src s1 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@src1 -PREHOOK: Output: default@tmptable -POSTHOOK: query: INSERT OVERWRITE TABLE tmptable +PREHOOK: Output: default@tmptable_n4 +POSTHOOK: query: INSERT OVERWRITE TABLE tmptable_n4 SELECT unionsrc.key, unionsrc.value FROM (SELECT 'tst1' AS key, cast(count(1) AS string) AS value FROM src s1 UNION ALL @@ -168,16 +168,16 @@ FROM (SELECT 'tst1' AS key, cast(count(1) AS string) AS value FROM src s1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@tmptable -POSTHOOK: Lineage: tmptable.key EXPRESSION [(src1)s2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tmptable.value EXPRESSION [(src)s1.null, (src1)s2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM tmptable x SORT BY x.key, x.value +POSTHOOK: Output: default@tmptable_n4 +POSTHOOK: Lineage: tmptable_n4.key EXPRESSION [(src1)s2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tmptable_n4.value EXPRESSION [(src)s1.null, (src1)s2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM tmptable_n4 x SORT BY x.key, x.value PREHOOK: type: QUERY -PREHOOK: Input: default@tmptable +PREHOOK: Input: default@tmptable_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM tmptable x SORT BY x.key, x.value +POSTHOOK: query: SELECT * FROM tmptable_n4 x SORT BY x.key, x.value POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmptable +POSTHOOK: Input: default@tmptable_n4 #### A masked pattern was here #### @@ -205,12 +205,12 @@ POSTHOOK: Input: default@tmptable 66 val_66 98 val_98 tst1 500 -PREHOOK: query: DESCRIBE FORMATTED tmptable +PREHOOK: query: DESCRIBE FORMATTED tmptable_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tmptable -POSTHOOK: query: DESCRIBE FORMATTED tmptable +PREHOOK: Input: default@tmptable_n4 +POSTHOOK: query: DESCRIBE FORMATTED tmptable_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tmptable +POSTHOOK: Input: default@tmptable_n4 # col_name data_type comment key string value string @@ -240,20 +240,20 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@tmptable -POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable +PREHOOK: Output: default@tmptable_n4 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@tmptable -PREHOOK: query: DESCRIBE FORMATTED tmptable +POSTHOOK: Output: default@tmptable_n4 +PREHOOK: query: DESCRIBE FORMATTED tmptable_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tmptable -POSTHOOK: query: DESCRIBE FORMATTED tmptable +PREHOOK: Input: default@tmptable_n4 +POSTHOOK: query: DESCRIBE FORMATTED tmptable_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tmptable +POSTHOOK: Input: default@tmptable_n4 # col_name data_type comment key string value string diff --git a/ql/src/test/results/clientpositive/stats10.q.out b/ql/src/test/results/clientpositive/stats10.q.out index 24c2cf7f54..dc81e22c1e 100644 --- a/ql/src/test/results/clientpositive/stats10.q.out +++ b/ql/src/test/results/clientpositive/stats10.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE bucket3_1_n0(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket3_1 -POSTHOOK: query: CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: Output: default@bucket3_1_n0 +POSTHOOK: query: CREATE TABLE bucket3_1_n0(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket3_1 +POSTHOOK: Output: default@bucket3_1_n0 PREHOOK: query: explain -insert overwrite table bucket3_1 partition (ds='1') +insert overwrite table bucket3_1_n0 partition (ds='1') select * from src PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table bucket3_1 partition (ds='1') +insert overwrite table bucket3_1_n0 partition (ds='1') select * from src POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -49,7 +49,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket3_1 + name: default.bucket3_1_n0 Select Operator expressions: _col0 (type: int), _col1 (type: string), '1' (type: string) outputColumnNames: key, value, ds @@ -77,7 +77,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.bucket3_1 + name: default.bucket3_1_n0 Stage: Stage-2 Stats Work @@ -85,7 +85,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.bucket3_1 + Table: default.bucket3_1_n0 Stage: Stage-3 Map Reduce @@ -117,51 +117,51 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: insert overwrite table bucket3_1 partition (ds='1') +PREHOOK: query: insert overwrite table bucket3_1_n0 partition (ds='1') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@bucket3_1@ds=1 -POSTHOOK: query: insert overwrite table bucket3_1 partition (ds='1') +PREHOOK: Output: default@bucket3_1_n0@ds=1 +POSTHOOK: query: insert overwrite table bucket3_1_n0 partition (ds='1') select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@bucket3_1@ds=1 -POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table bucket3_1 partition (ds='1') +POSTHOOK: Output: default@bucket3_1_n0@ds=1 +POSTHOOK: Lineage: bucket3_1_n0 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket3_1_n0 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table bucket3_1_n0 partition (ds='1') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@bucket3_1@ds=1 -POSTHOOK: query: insert overwrite table bucket3_1 partition (ds='1') +PREHOOK: Output: default@bucket3_1_n0@ds=1 +POSTHOOK: query: insert overwrite table bucket3_1_n0 partition (ds='1') select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@bucket3_1@ds=1 -POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table bucket3_1 partition (ds='2') +POSTHOOK: Output: default@bucket3_1_n0@ds=1 +POSTHOOK: Lineage: bucket3_1_n0 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket3_1_n0 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table bucket3_1_n0 partition (ds='2') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@bucket3_1@ds=2 -POSTHOOK: query: insert overwrite table bucket3_1 partition (ds='2') +PREHOOK: Output: default@bucket3_1_n0@ds=2 +POSTHOOK: query: insert overwrite table bucket3_1_n0 partition (ds='2') select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@bucket3_1@ds=2 -POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key +POSTHOOK: Output: default@bucket3_1_n0@ds=2 +POSTHOOK: Lineage: bucket3_1_n0 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bucket3_1_n0 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from bucket3_1_n0 tablesample (bucket 1 out of 2) s where ds = '1' order by key PREHOOK: type: QUERY -PREHOOK: Input: default@bucket3_1 -PREHOOK: Input: default@bucket3_1@ds=1 +PREHOOK: Input: default@bucket3_1_n0 +PREHOOK: Input: default@bucket3_1_n0@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key +POSTHOOK: query: select * from bucket3_1_n0 tablesample (bucket 1 out of 2) s where ds = '1' order by key POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket3_1 -POSTHOOK: Input: default@bucket3_1@ds=1 +POSTHOOK: Input: default@bucket3_1_n0 +POSTHOOK: Input: default@bucket3_1_n0@ds=1 #### A masked pattern was here #### 2 val_2 1 10 val_10 1 @@ -406,9 +406,9 @@ POSTHOOK: Input: default@bucket3_1@ds=1 498 val_498 1 498 val_498 1 498 val_498 1 -PREHOOK: query: explain analyze table bucket3_1 partition (ds) compute statistics +PREHOOK: query: explain analyze table bucket3_1_n0 partition (ds) compute statistics PREHOOK: type: QUERY -POSTHOOK: query: explain analyze table bucket3_1 partition (ds) compute statistics +POSTHOOK: query: explain analyze table bucket3_1_n0 partition (ds) compute statistics POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -419,35 +419,35 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: bucket3_1 + alias: bucket3_1_n0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Stage: Stage-1 Stats Work Basic Stats Work: -PREHOOK: query: analyze table bucket3_1 partition (ds) compute statistics +PREHOOK: query: analyze table bucket3_1_n0 partition (ds) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@bucket3_1 -PREHOOK: Input: default@bucket3_1@ds=1 -PREHOOK: Input: default@bucket3_1@ds=2 -PREHOOK: Output: default@bucket3_1 -PREHOOK: Output: default@bucket3_1@ds=1 -PREHOOK: Output: default@bucket3_1@ds=2 -POSTHOOK: query: analyze table bucket3_1 partition (ds) compute statistics +PREHOOK: Input: default@bucket3_1_n0 +PREHOOK: Input: default@bucket3_1_n0@ds=1 +PREHOOK: Input: default@bucket3_1_n0@ds=2 +PREHOOK: Output: default@bucket3_1_n0 +PREHOOK: Output: default@bucket3_1_n0@ds=1 +PREHOOK: Output: default@bucket3_1_n0@ds=2 +POSTHOOK: query: analyze table bucket3_1_n0 partition (ds) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket3_1 -POSTHOOK: Input: default@bucket3_1@ds=1 -POSTHOOK: Input: default@bucket3_1@ds=2 -POSTHOOK: Output: default@bucket3_1 -POSTHOOK: Output: default@bucket3_1@ds=1 -POSTHOOK: Output: default@bucket3_1@ds=2 -PREHOOK: query: describe formatted bucket3_1 partition (ds='1') +POSTHOOK: Input: default@bucket3_1_n0 +POSTHOOK: Input: default@bucket3_1_n0@ds=1 +POSTHOOK: Input: default@bucket3_1_n0@ds=2 +POSTHOOK: Output: default@bucket3_1_n0 +POSTHOOK: Output: default@bucket3_1_n0@ds=1 +POSTHOOK: Output: default@bucket3_1_n0@ds=2 +PREHOOK: query: describe formatted bucket3_1_n0 partition (ds='1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@bucket3_1 -POSTHOOK: query: describe formatted bucket3_1 partition (ds='1') +PREHOOK: Input: default@bucket3_1_n0 +POSTHOOK: query: describe formatted bucket3_1_n0 partition (ds='1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@bucket3_1 +POSTHOOK: Input: default@bucket3_1_n0 # col_name data_type comment key int value string @@ -459,7 +459,7 @@ ds string # Detailed Partition Information Partition Value: [1] Database: default -Table: bucket3_1 +Table: bucket3_1_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -479,12 +479,12 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted bucket3_1 partition (ds='2') +PREHOOK: query: describe formatted bucket3_1_n0 partition (ds='2') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@bucket3_1 -POSTHOOK: query: describe formatted bucket3_1 partition (ds='2') +PREHOOK: Input: default@bucket3_1_n0 +POSTHOOK: query: describe formatted bucket3_1_n0 partition (ds='2') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@bucket3_1 +POSTHOOK: Input: default@bucket3_1_n0 # col_name data_type comment key int value string @@ -496,7 +496,7 @@ ds string # Detailed Partition Information Partition Value: [2] Database: default -Table: bucket3_1 +Table: bucket3_1_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -516,12 +516,12 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted bucket3_1 +PREHOOK: query: describe formatted bucket3_1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@bucket3_1 -POSTHOOK: query: describe formatted bucket3_1 +PREHOOK: Input: default@bucket3_1_n0 +POSTHOOK: query: describe formatted bucket3_1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@bucket3_1 +POSTHOOK: Input: default@bucket3_1_n0 # col_name data_type comment key int value string diff --git a/ql/src/test/results/clientpositive/stats12.q.out b/ql/src/test/results/clientpositive/stats12.q.out index cf6bc03bbe..ec0143efcd 100644 --- a/ql/src/test/results/clientpositive/stats12.q.out +++ b/ql/src/test/results/clientpositive/stats12.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table analyze_srcpart like srcpart +PREHOOK: query: create table analyze_srcpart_n3 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@analyze_srcpart -POSTHOOK: query: create table analyze_srcpart like srcpart +PREHOOK: Output: default@analyze_srcpart_n3 +POSTHOOK: query: create table analyze_srcpart_n3 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@analyze_srcpart -PREHOOK: query: insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null +POSTHOOK: Output: default@analyze_srcpart_n3 +PREHOOK: query: insert overwrite table analyze_srcpart_n3 partition (ds, hr) select * from srcpart where ds is not null PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@analyze_srcpart -POSTHOOK: query: insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null +PREHOOK: Output: default@analyze_srcpart_n3 +POSTHOOK: query: insert overwrite table analyze_srcpart_n3 partition (ds, hr) select * from srcpart where ds is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@analyze_srcpart_n3@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n3@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@analyze_srcpart_n3@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n3@ds=2008-04-09/hr=12 +POSTHOOK: Lineage: analyze_srcpart_n3 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n3 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n3 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n3 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n3 PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n3 PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n3 PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n3 PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain extended -analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics +analyze table analyze_srcpart_n3 PARTITION(ds='2008-04-08',hr) compute statistics PREHOOK: type: QUERY POSTHOOK: query: explain extended -analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics +analyze table analyze_srcpart_n3 PARTITION(ds='2008-04-08',hr) compute statistics POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -48,9 +48,9 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: analyze_srcpart + alias: analyze_srcpart_n3 Statistics: Num rows: 392 Data size: 232480 Basic stats: COMPLETE Column stats: NONE - Statistics Aggregation Key Prefix: default.analyze_srcpart/ + Statistics Aggregation Key Prefix: default.analyze_srcpart_n3/ GatherStats: true Path -> Alias: #### A masked pattern was here #### @@ -70,11 +70,11 @@ STAGE PLANS: columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.analyze_srcpart + name default.analyze_srcpart_n3 numFiles 1 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct analyze_srcpart { string key, string value} + serialization.ddl struct analyze_srcpart_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -90,16 +90,16 @@ STAGE PLANS: columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.analyze_srcpart + name default.analyze_srcpart_n3 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct analyze_srcpart { string key, string value} + serialization.ddl struct analyze_srcpart_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.analyze_srcpart - name: default.analyze_srcpart + name: default.analyze_srcpart_n3 + name: default.analyze_srcpart_n3 #### A masked pattern was here #### Partition base file name: hr=12 @@ -115,11 +115,11 @@ STAGE PLANS: columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.analyze_srcpart + name default.analyze_srcpart_n3 numFiles 1 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct analyze_srcpart { string key, string value} + serialization.ddl struct analyze_srcpart_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5812 @@ -135,47 +135,47 @@ STAGE PLANS: columns.comments 'default','default' columns.types string:string #### A masked pattern was here #### - name default.analyze_srcpart + name default.analyze_srcpart_n3 partition_columns ds/hr partition_columns.types string:string - serialization.ddl struct analyze_srcpart { string key, string value} + serialization.ddl struct analyze_srcpart_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.analyze_srcpart - name: default.analyze_srcpart + name: default.analyze_srcpart_n3 + name: default.analyze_srcpart_n3 Truncated Path -> Alias: - /analyze_srcpart/ds=2008-04-08/hr=11 [analyze_srcpart] - /analyze_srcpart/ds=2008-04-08/hr=12 [analyze_srcpart] + /analyze_srcpart_n3/ds=2008-04-08/hr=11 [analyze_srcpart_n3] + /analyze_srcpart_n3/ds=2008-04-08/hr=12 [analyze_srcpart_n3] Stage: Stage-1 Stats Work Basic Stats Work: - Stats Aggregation Key Prefix: default.analyze_srcpart/ + Stats Aggregation Key Prefix: default.analyze_srcpart_n3/ -PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics +PREHOOK: query: analyze table analyze_srcpart_n3 PARTITION(ds='2008-04-08',hr) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@analyze_srcpart -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@analyze_srcpart -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics +PREHOOK: Input: default@analyze_srcpart_n3 +PREHOOK: Input: default@analyze_srcpart_n3@ds=2008-04-08/hr=11 +PREHOOK: Input: default@analyze_srcpart_n3@ds=2008-04-08/hr=12 +PREHOOK: Output: default@analyze_srcpart_n3 +PREHOOK: Output: default@analyze_srcpart_n3@ds=2008-04-08/hr=11 +PREHOOK: Output: default@analyze_srcpart_n3@ds=2008-04-08/hr=12 +POSTHOOK: query: analyze table analyze_srcpart_n3 PARTITION(ds='2008-04-08',hr) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@analyze_srcpart -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@analyze_srcpart -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -PREHOOK: query: desc formatted analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n3 +POSTHOOK: Input: default@analyze_srcpart_n3@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@analyze_srcpart_n3@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@analyze_srcpart_n3 +POSTHOOK: Output: default@analyze_srcpart_n3@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n3@ds=2008-04-08/hr=12 +PREHOOK: query: desc formatted analyze_srcpart_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: desc formatted analyze_srcpart +PREHOOK: Input: default@analyze_srcpart_n3 +POSTHOOK: query: desc formatted analyze_srcpart_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n3 # col_name data_type comment key string default value string default @@ -209,12 +209,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted analyze_srcpart partition (ds='2008-04-08', hr=11) +PREHOOK: query: desc formatted analyze_srcpart_n3 partition (ds='2008-04-08', hr=11) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: desc formatted analyze_srcpart partition (ds='2008-04-08', hr=11) +PREHOOK: Input: default@analyze_srcpart_n3 +POSTHOOK: query: desc formatted analyze_srcpart_n3 partition (ds='2008-04-08', hr=11) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n3 # col_name data_type comment key string default value string default @@ -227,7 +227,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -247,12 +247,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted analyze_srcpart partition (ds='2008-04-08', hr=12) +PREHOOK: query: desc formatted analyze_srcpart_n3 partition (ds='2008-04-08', hr=12) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: desc formatted analyze_srcpart partition (ds='2008-04-08', hr=12) +PREHOOK: Input: default@analyze_srcpart_n3 +POSTHOOK: query: desc formatted analyze_srcpart_n3 partition (ds='2008-04-08', hr=12) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n3 # col_name data_type comment key string default value string default @@ -265,7 +265,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 12] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n3 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -285,12 +285,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted analyze_srcpart partition (ds='2008-04-09', hr=11) +PREHOOK: query: desc formatted analyze_srcpart_n3 partition (ds='2008-04-09', hr=11) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: desc formatted analyze_srcpart partition (ds='2008-04-09', hr=11) +PREHOOK: Input: default@analyze_srcpart_n3 +POSTHOOK: query: desc formatted analyze_srcpart_n3 partition (ds='2008-04-09', hr=11) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n3 # col_name data_type comment key string default value string default @@ -303,7 +303,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-09, 11] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n3 #### A masked pattern was here #### Partition Parameters: numFiles 1 @@ -320,12 +320,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted analyze_srcpart partition (ds='2008-04-09', hr=12) +PREHOOK: query: desc formatted analyze_srcpart_n3 partition (ds='2008-04-09', hr=12) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: desc formatted analyze_srcpart partition (ds='2008-04-09', hr=12) +PREHOOK: Input: default@analyze_srcpart_n3 +POSTHOOK: query: desc formatted analyze_srcpart_n3 partition (ds='2008-04-09', hr=12) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n3 # col_name data_type comment key string default value string default @@ -338,7 +338,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-09, 12] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n3 #### A masked pattern was here #### Partition Parameters: numFiles 1 diff --git a/ql/src/test/results/clientpositive/stats15.q.out b/ql/src/test/results/clientpositive/stats15.q.out index faebe8afd8..2bfc6751e9 100644 --- a/ql/src/test/results/clientpositive/stats15.q.out +++ b/ql/src/test/results/clientpositive/stats15.q.out @@ -1,35 +1,35 @@ -PREHOOK: query: create table stats_src like src +PREHOOK: query: create table stats_src_n0 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@stats_src -POSTHOOK: query: create table stats_src like src +PREHOOK: Output: default@stats_src_n0 +POSTHOOK: query: create table stats_src_n0 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@stats_src -PREHOOK: query: insert overwrite table stats_src select * from src +POSTHOOK: Output: default@stats_src_n0 +PREHOOK: query: insert overwrite table stats_src_n0 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@stats_src -POSTHOOK: query: insert overwrite table stats_src select * from src +PREHOOK: Output: default@stats_src_n0 +POSTHOOK: query: insert overwrite table stats_src_n0 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@stats_src -POSTHOOK: Lineage: stats_src.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: stats_src.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: analyze table stats_src compute statistics +POSTHOOK: Output: default@stats_src_n0 +POSTHOOK: Lineage: stats_src_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: stats_src_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: analyze table stats_src_n0 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@stats_src -PREHOOK: Output: default@stats_src -POSTHOOK: query: analyze table stats_src compute statistics +PREHOOK: Input: default@stats_src_n0 +PREHOOK: Output: default@stats_src_n0 +POSTHOOK: query: analyze table stats_src_n0 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@stats_src -POSTHOOK: Output: default@stats_src -PREHOOK: query: desc formatted stats_src +POSTHOOK: Input: default@stats_src_n0 +POSTHOOK: Output: default@stats_src_n0 +PREHOOK: query: desc formatted stats_src_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@stats_src -POSTHOOK: query: desc formatted stats_src +PREHOOK: Input: default@stats_src_n0 +POSTHOOK: query: desc formatted stats_src_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@stats_src +POSTHOOK: Input: default@stats_src_n0 # col_name data_type comment key string default value string default @@ -58,74 +58,74 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: create table stats_part like srcpart +PREHOOK: query: create table stats_part_n0 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@stats_part -POSTHOOK: query: create table stats_part like srcpart +PREHOOK: Output: default@stats_part_n0 +POSTHOOK: query: create table stats_part_n0 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@stats_part -PREHOOK: query: insert overwrite table stats_part partition (ds='2010-04-08', hr = '11') select key, value from src +POSTHOOK: Output: default@stats_part_n0 +PREHOOK: query: insert overwrite table stats_part_n0 partition (ds='2010-04-08', hr = '11') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@stats_part@ds=2010-04-08/hr=11 -POSTHOOK: query: insert overwrite table stats_part partition (ds='2010-04-08', hr = '11') select key, value from src +PREHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=11 +POSTHOOK: query: insert overwrite table stats_part_n0 partition (ds='2010-04-08', hr = '11') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@stats_part@ds=2010-04-08/hr=11 -POSTHOOK: Lineage: stats_part PARTITION(ds=2010-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: stats_part PARTITION(ds=2010-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table stats_part partition (ds='2010-04-08', hr = '12') select key, value from src +POSTHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=11 +POSTHOOK: Lineage: stats_part_n0 PARTITION(ds=2010-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: stats_part_n0 PARTITION(ds=2010-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table stats_part_n0 partition (ds='2010-04-08', hr = '12') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@stats_part@ds=2010-04-08/hr=12 -POSTHOOK: query: insert overwrite table stats_part partition (ds='2010-04-08', hr = '12') select key, value from src +PREHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=12 +POSTHOOK: query: insert overwrite table stats_part_n0 partition (ds='2010-04-08', hr = '12') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@stats_part@ds=2010-04-08/hr=12 -POSTHOOK: Lineage: stats_part PARTITION(ds=2010-04-08,hr=12).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: stats_part PARTITION(ds=2010-04-08,hr=12).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: analyze table stats_part partition(ds='2010-04-08', hr='11') compute statistics +POSTHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=12 +POSTHOOK: Lineage: stats_part_n0 PARTITION(ds=2010-04-08,hr=12).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: stats_part_n0 PARTITION(ds=2010-04-08,hr=12).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: analyze table stats_part_n0 partition(ds='2010-04-08', hr='11') compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@stats_part -PREHOOK: Input: default@stats_part@ds=2010-04-08/hr=11 -PREHOOK: Output: default@stats_part -PREHOOK: Output: default@stats_part@ds=2010-04-08/hr=11 -POSTHOOK: query: analyze table stats_part partition(ds='2010-04-08', hr='11') compute statistics +PREHOOK: Input: default@stats_part_n0 +PREHOOK: Input: default@stats_part_n0@ds=2010-04-08/hr=11 +PREHOOK: Output: default@stats_part_n0 +PREHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=11 +POSTHOOK: query: analyze table stats_part_n0 partition(ds='2010-04-08', hr='11') compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@stats_part -POSTHOOK: Input: default@stats_part@ds=2010-04-08/hr=11 -POSTHOOK: Output: default@stats_part -POSTHOOK: Output: default@stats_part@ds=2010-04-08/hr=11 -PREHOOK: query: analyze table stats_part partition(ds='2010-04-08', hr='12') compute statistics +POSTHOOK: Input: default@stats_part_n0 +POSTHOOK: Input: default@stats_part_n0@ds=2010-04-08/hr=11 +POSTHOOK: Output: default@stats_part_n0 +POSTHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=11 +PREHOOK: query: analyze table stats_part_n0 partition(ds='2010-04-08', hr='12') compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@stats_part -PREHOOK: Input: default@stats_part@ds=2010-04-08/hr=12 -PREHOOK: Output: default@stats_part -PREHOOK: Output: default@stats_part@ds=2010-04-08/hr=12 -POSTHOOK: query: analyze table stats_part partition(ds='2010-04-08', hr='12') compute statistics +PREHOOK: Input: default@stats_part_n0 +PREHOOK: Input: default@stats_part_n0@ds=2010-04-08/hr=12 +PREHOOK: Output: default@stats_part_n0 +PREHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=12 +POSTHOOK: query: analyze table stats_part_n0 partition(ds='2010-04-08', hr='12') compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@stats_part -POSTHOOK: Input: default@stats_part@ds=2010-04-08/hr=12 -POSTHOOK: Output: default@stats_part -POSTHOOK: Output: default@stats_part@ds=2010-04-08/hr=12 -PREHOOK: query: insert overwrite table stats_part partition (ds='2010-04-08', hr = '13') select key, value from src +POSTHOOK: Input: default@stats_part_n0 +POSTHOOK: Input: default@stats_part_n0@ds=2010-04-08/hr=12 +POSTHOOK: Output: default@stats_part_n0 +POSTHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=12 +PREHOOK: query: insert overwrite table stats_part_n0 partition (ds='2010-04-08', hr = '13') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@stats_part@ds=2010-04-08/hr=13 -POSTHOOK: query: insert overwrite table stats_part partition (ds='2010-04-08', hr = '13') select key, value from src +PREHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=13 +POSTHOOK: query: insert overwrite table stats_part_n0 partition (ds='2010-04-08', hr = '13') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@stats_part@ds=2010-04-08/hr=13 -POSTHOOK: Lineage: stats_part PARTITION(ds=2010-04-08,hr=13).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: stats_part PARTITION(ds=2010-04-08,hr=13).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted stats_part +POSTHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=13 +POSTHOOK: Lineage: stats_part_n0 PARTITION(ds=2010-04-08,hr=13).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: stats_part_n0 PARTITION(ds=2010-04-08,hr=13).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted stats_part_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@stats_part -POSTHOOK: query: desc formatted stats_part +PREHOOK: Input: default@stats_part_n0 +POSTHOOK: query: desc formatted stats_part_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part_n0 # col_name data_type comment key string default value string default @@ -160,12 +160,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted stats_part partition (ds='2010-04-08', hr = '11') +PREHOOK: query: desc formatted stats_part_n0 partition (ds='2010-04-08', hr = '11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@stats_part -POSTHOOK: query: desc formatted stats_part partition (ds='2010-04-08', hr = '11') +PREHOOK: Input: default@stats_part_n0 +POSTHOOK: query: desc formatted stats_part_n0 partition (ds='2010-04-08', hr = '11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part_n0 # col_name data_type comment key string default value string default @@ -178,7 +178,7 @@ hr string # Detailed Partition Information Partition Value: [2010-04-08, 11] Database: default -Table: stats_part +Table: stats_part_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -198,12 +198,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: desc formatted stats_part partition (ds='2010-04-08', hr = '12') +PREHOOK: query: desc formatted stats_part_n0 partition (ds='2010-04-08', hr = '12') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@stats_part -POSTHOOK: query: desc formatted stats_part partition (ds='2010-04-08', hr = '12') +PREHOOK: Input: default@stats_part_n0 +POSTHOOK: query: desc formatted stats_part_n0 partition (ds='2010-04-08', hr = '12') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part_n0 # col_name data_type comment key string default value string default @@ -216,7 +216,7 @@ hr string # Detailed Partition Information Partition Value: [2010-04-08, 12] Database: default -Table: stats_part +Table: stats_part_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -236,32 +236,32 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: analyze table stats_part partition(ds, hr) compute statistics +PREHOOK: query: analyze table stats_part_n0 partition(ds, hr) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@stats_part -PREHOOK: Input: default@stats_part@ds=2010-04-08/hr=11 -PREHOOK: Input: default@stats_part@ds=2010-04-08/hr=12 -PREHOOK: Input: default@stats_part@ds=2010-04-08/hr=13 -PREHOOK: Output: default@stats_part -PREHOOK: Output: default@stats_part@ds=2010-04-08/hr=11 -PREHOOK: Output: default@stats_part@ds=2010-04-08/hr=12 -PREHOOK: Output: default@stats_part@ds=2010-04-08/hr=13 -POSTHOOK: query: analyze table stats_part partition(ds, hr) compute statistics +PREHOOK: Input: default@stats_part_n0 +PREHOOK: Input: default@stats_part_n0@ds=2010-04-08/hr=11 +PREHOOK: Input: default@stats_part_n0@ds=2010-04-08/hr=12 +PREHOOK: Input: default@stats_part_n0@ds=2010-04-08/hr=13 +PREHOOK: Output: default@stats_part_n0 +PREHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=11 +PREHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=12 +PREHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=13 +POSTHOOK: query: analyze table stats_part_n0 partition(ds, hr) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@stats_part -POSTHOOK: Input: default@stats_part@ds=2010-04-08/hr=11 -POSTHOOK: Input: default@stats_part@ds=2010-04-08/hr=12 -POSTHOOK: Input: default@stats_part@ds=2010-04-08/hr=13 -POSTHOOK: Output: default@stats_part -POSTHOOK: Output: default@stats_part@ds=2010-04-08/hr=11 -POSTHOOK: Output: default@stats_part@ds=2010-04-08/hr=12 -POSTHOOK: Output: default@stats_part@ds=2010-04-08/hr=13 -PREHOOK: query: desc formatted stats_part +POSTHOOK: Input: default@stats_part_n0 +POSTHOOK: Input: default@stats_part_n0@ds=2010-04-08/hr=11 +POSTHOOK: Input: default@stats_part_n0@ds=2010-04-08/hr=12 +POSTHOOK: Input: default@stats_part_n0@ds=2010-04-08/hr=13 +POSTHOOK: Output: default@stats_part_n0 +POSTHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=11 +POSTHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=12 +POSTHOOK: Output: default@stats_part_n0@ds=2010-04-08/hr=13 +PREHOOK: query: desc formatted stats_part_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@stats_part -POSTHOOK: query: desc formatted stats_part +PREHOOK: Input: default@stats_part_n0 +POSTHOOK: query: desc formatted stats_part_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part_n0 # col_name data_type comment key string default value string default @@ -296,19 +296,19 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table stats_src +PREHOOK: query: drop table stats_src_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@stats_src -PREHOOK: Output: default@stats_src -POSTHOOK: query: drop table stats_src +PREHOOK: Input: default@stats_src_n0 +PREHOOK: Output: default@stats_src_n0 +POSTHOOK: query: drop table stats_src_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@stats_src -POSTHOOK: Output: default@stats_src -PREHOOK: query: drop table stats_part +POSTHOOK: Input: default@stats_src_n0 +POSTHOOK: Output: default@stats_src_n0 +PREHOOK: query: drop table stats_part_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@stats_part -PREHOOK: Output: default@stats_part -POSTHOOK: query: drop table stats_part +PREHOOK: Input: default@stats_part_n0 +PREHOOK: Output: default@stats_part_n0 +POSTHOOK: query: drop table stats_part_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@stats_part -POSTHOOK: Output: default@stats_part +POSTHOOK: Input: default@stats_part_n0 +POSTHOOK: Output: default@stats_part_n0 diff --git a/ql/src/test/results/clientpositive/stats18.q.out b/ql/src/test/results/clientpositive/stats18.q.out index b947f62650..309ad4d3d8 100644 --- a/ql/src/test/results/clientpositive/stats18.q.out +++ b/ql/src/test/results/clientpositive/stats18.q.out @@ -1,27 +1,27 @@ -PREHOOK: query: create table stats_part like srcpart +PREHOOK: query: create table stats_part_n1 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@stats_part -POSTHOOK: query: create table stats_part like srcpart +PREHOOK: Output: default@stats_part_n1 +POSTHOOK: query: create table stats_part_n1 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@stats_part -PREHOOK: query: insert overwrite table stats_part partition (ds='2010-04-08', hr = '13') select key, value from src +POSTHOOK: Output: default@stats_part_n1 +PREHOOK: query: insert overwrite table stats_part_n1 partition (ds='2010-04-08', hr = '13') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@stats_part@ds=2010-04-08/hr=13 -POSTHOOK: query: insert overwrite table stats_part partition (ds='2010-04-08', hr = '13') select key, value from src +PREHOOK: Output: default@stats_part_n1@ds=2010-04-08/hr=13 +POSTHOOK: query: insert overwrite table stats_part_n1 partition (ds='2010-04-08', hr = '13') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@stats_part@ds=2010-04-08/hr=13 -POSTHOOK: Lineage: stats_part PARTITION(ds=2010-04-08,hr=13).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: stats_part PARTITION(ds=2010-04-08,hr=13).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted stats_part partition (ds='2010-04-08', hr='13') +POSTHOOK: Output: default@stats_part_n1@ds=2010-04-08/hr=13 +POSTHOOK: Lineage: stats_part_n1 PARTITION(ds=2010-04-08,hr=13).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: stats_part_n1 PARTITION(ds=2010-04-08,hr=13).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted stats_part_n1 partition (ds='2010-04-08', hr='13') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@stats_part -POSTHOOK: query: desc formatted stats_part partition (ds='2010-04-08', hr='13') +PREHOOK: Input: default@stats_part_n1 +POSTHOOK: query: desc formatted stats_part_n1 partition (ds='2010-04-08', hr='13') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part_n1 # col_name data_type comment key string default value string default @@ -34,7 +34,7 @@ hr string # Detailed Partition Information Partition Value: [2010-04-08, 13] Database: default -Table: stats_part +Table: stats_part_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -54,20 +54,20 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE stats_part partition (ds='2010-04-08', hr='13') +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE stats_part_n1 partition (ds='2010-04-08', hr='13') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@stats_part@ds=2010-04-08/hr=13 -POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE stats_part partition (ds='2010-04-08', hr='13') +PREHOOK: Output: default@stats_part_n1@ds=2010-04-08/hr=13 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE stats_part_n1 partition (ds='2010-04-08', hr='13') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@stats_part@ds=2010-04-08/hr=13 -PREHOOK: query: desc formatted stats_part partition (ds='2010-04-08', hr='13') +POSTHOOK: Output: default@stats_part_n1@ds=2010-04-08/hr=13 +PREHOOK: query: desc formatted stats_part_n1 partition (ds='2010-04-08', hr='13') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@stats_part -POSTHOOK: query: desc formatted stats_part partition (ds='2010-04-08', hr='13') +PREHOOK: Input: default@stats_part_n1 +POSTHOOK: query: desc formatted stats_part_n1 partition (ds='2010-04-08', hr='13') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@stats_part +POSTHOOK: Input: default@stats_part_n1 # col_name data_type comment key string default value string default @@ -80,7 +80,7 @@ hr string # Detailed Partition Information Partition Value: [2010-04-08, 13] Database: default -Table: stats_part +Table: stats_part_n1 #### A masked pattern was here #### Partition Parameters: numFiles 2 @@ -103,11 +103,11 @@ PREHOOK: query: drop table stats_src PREHOOK: type: DROPTABLE POSTHOOK: query: drop table stats_src POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table stats_part +PREHOOK: query: drop table stats_part_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@stats_part -PREHOOK: Output: default@stats_part -POSTHOOK: query: drop table stats_part +PREHOOK: Input: default@stats_part_n1 +PREHOOK: Output: default@stats_part_n1 +POSTHOOK: query: drop table stats_part_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@stats_part -POSTHOOK: Output: default@stats_part +POSTHOOK: Input: default@stats_part_n1 +POSTHOOK: Output: default@stats_part_n1 diff --git a/ql/src/test/results/clientpositive/stats3.q.out b/ql/src/test/results/clientpositive/stats3.q.out index 2c2801d327..d0c6054657 100644 --- a/ql/src/test/results/clientpositive/stats3.q.out +++ b/ql/src/test/results/clientpositive/stats3.q.out @@ -1,24 +1,24 @@ -PREHOOK: query: drop table hive_test_src +PREHOOK: query: drop table hive_test_src_n3 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table hive_test_src +POSTHOOK: query: drop table hive_test_src_n3 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table hive_test_dst +PREHOOK: query: drop table hive_test_dst_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table hive_test_dst +POSTHOOK: query: drop table hive_test_dst_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table hive_test_src ( col1 string ) stored as textfile +PREHOOK: query: create table hive_test_src_n3 ( col1 string ) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@hive_test_src -POSTHOOK: query: create table hive_test_src ( col1 string ) stored as textfile +PREHOOK: Output: default@hive_test_src_n3 +POSTHOOK: query: create table hive_test_src_n3 ( col1 string ) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@hive_test_src +POSTHOOK: Output: default@hive_test_src_n3 PREHOOK: query: explain extended -load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src +load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n3 PREHOOK: type: LOAD POSTHOOK: query: explain extended -load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src +load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n3 POSTHOOK: type: LOAD STAGE DEPENDENCIES: Stage-0 is a root stage @@ -42,36 +42,36 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.hive_test_src + name default.hive_test_src_n3 numFiles 0 numRows 0 rawDataSize 0 - serialization.ddl struct hive_test_src { string col1} + serialization.ddl struct hive_test_src_n3 { string col1} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 0 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.hive_test_src + name: default.hive_test_src_n3 Stage: Stage-1 Stats Work Basic Stats Work: -PREHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src +PREHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@hive_test_src -POSTHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src +PREHOOK: Output: default@hive_test_src_n3 +POSTHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@hive_test_src -PREHOOK: query: desc formatted hive_test_src +POSTHOOK: Output: default@hive_test_src_n3 +PREHOOK: query: desc formatted hive_test_src_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@hive_test_src -POSTHOOK: query: desc formatted hive_test_src +PREHOOK: Input: default@hive_test_src_n3 +POSTHOOK: query: desc formatted hive_test_src_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@hive_test_src +POSTHOOK: Input: default@hive_test_src_n3 # col_name data_type comment col1 string @@ -99,32 +99,32 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: create table hive_test_dst ( col1 string ) partitioned by ( pcol1 string , pcol2 string) stored as sequencefile +PREHOOK: query: create table hive_test_dst_n0 ( col1 string ) partitioned by ( pcol1 string , pcol2 string) stored as sequencefile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@hive_test_dst -POSTHOOK: query: create table hive_test_dst ( col1 string ) partitioned by ( pcol1 string , pcol2 string) stored as sequencefile +PREHOOK: Output: default@hive_test_dst_n0 +POSTHOOK: query: create table hive_test_dst_n0 ( col1 string ) partitioned by ( pcol1 string , pcol2 string) stored as sequencefile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@hive_test_dst -PREHOOK: query: insert overwrite table hive_test_dst partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src +POSTHOOK: Output: default@hive_test_dst_n0 +PREHOOK: query: insert overwrite table hive_test_dst_n0 partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@hive_test_src -PREHOOK: Output: default@hive_test_dst@pcol1=test_part/pcol2=test_Part -POSTHOOK: query: insert overwrite table hive_test_dst partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src +PREHOOK: Input: default@hive_test_src_n3 +PREHOOK: Output: default@hive_test_dst_n0@pcol1=test_part/pcol2=test_Part +POSTHOOK: query: insert overwrite table hive_test_dst_n0 partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@hive_test_src -POSTHOOK: Output: default@hive_test_dst@pcol1=test_part/pcol2=test_Part -POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ] -PREHOOK: query: select * from hive_test_dst where pcol1='test_part' and pcol2='test_Part' +POSTHOOK: Input: default@hive_test_src_n3 +POSTHOOK: Output: default@hive_test_dst_n0@pcol1=test_part/pcol2=test_Part +POSTHOOK: Lineage: hive_test_dst_n0 PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src_n3)hive_test_src_n3.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: select * from hive_test_dst_n0 where pcol1='test_part' and pcol2='test_Part' PREHOOK: type: QUERY -PREHOOK: Input: default@hive_test_dst -PREHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part +PREHOOK: Input: default@hive_test_dst_n0 +PREHOOK: Input: default@hive_test_dst_n0@pcol1=test_part/pcol2=test_Part #### A masked pattern was here #### -POSTHOOK: query: select * from hive_test_dst where pcol1='test_part' and pcol2='test_Part' +POSTHOOK: query: select * from hive_test_dst_n0 where pcol1='test_part' and pcol2='test_Part' POSTHOOK: type: QUERY -POSTHOOK: Input: default@hive_test_dst -POSTHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part +POSTHOOK: Input: default@hive_test_dst_n0 +POSTHOOK: Input: default@hive_test_dst_n0@pcol1=test_part/pcol2=test_Part #### A masked pattern was here #### 1 test_part test_Part 2 test_part test_Part @@ -132,50 +132,50 @@ POSTHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part 4 test_part test_Part 5 test_part test_Part 6 test_part test_Part -PREHOOK: query: select count(1) from hive_test_dst +PREHOOK: query: select count(1) from hive_test_dst_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@hive_test_dst +PREHOOK: Input: default@hive_test_dst_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from hive_test_dst +POSTHOOK: query: select count(1) from hive_test_dst_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@hive_test_dst +POSTHOOK: Input: default@hive_test_dst_n0 #### A masked pattern was here #### 6 -PREHOOK: query: insert overwrite table hive_test_dst partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src +PREHOOK: query: insert overwrite table hive_test_dst_n0 partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@hive_test_src -PREHOOK: Output: default@hive_test_dst@pcol1=test_part/pcol2=test_Part -POSTHOOK: query: insert overwrite table hive_test_dst partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src +PREHOOK: Input: default@hive_test_src_n3 +PREHOOK: Output: default@hive_test_dst_n0@pcol1=test_part/pcol2=test_Part +POSTHOOK: query: insert overwrite table hive_test_dst_n0 partition ( pCol1='test_part', pcol2='test_Part') select col1 from hive_test_src_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@hive_test_src -POSTHOOK: Output: default@hive_test_dst@pcol1=test_part/pcol2=test_Part -POSTHOOK: Lineage: hive_test_dst PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src)hive_test_src.FieldSchema(name:col1, type:string, comment:null), ] -PREHOOK: query: select * from hive_test_dst where pcol1='test_part' and pcol2='test_part' +POSTHOOK: Input: default@hive_test_src_n3 +POSTHOOK: Output: default@hive_test_dst_n0@pcol1=test_part/pcol2=test_Part +POSTHOOK: Lineage: hive_test_dst_n0 PARTITION(pcol1=test_part,pcol2=test_Part).col1 SIMPLE [(hive_test_src_n3)hive_test_src_n3.FieldSchema(name:col1, type:string, comment:null), ] +PREHOOK: query: select * from hive_test_dst_n0 where pcol1='test_part' and pcol2='test_part' PREHOOK: type: QUERY -PREHOOK: Input: default@hive_test_dst +PREHOOK: Input: default@hive_test_dst_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from hive_test_dst where pcol1='test_part' and pcol2='test_part' +POSTHOOK: query: select * from hive_test_dst_n0 where pcol1='test_part' and pcol2='test_part' POSTHOOK: type: QUERY -POSTHOOK: Input: default@hive_test_dst +POSTHOOK: Input: default@hive_test_dst_n0 #### A masked pattern was here #### -PREHOOK: query: select count(1) from hive_test_dst +PREHOOK: query: select count(1) from hive_test_dst_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@hive_test_dst +PREHOOK: Input: default@hive_test_dst_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from hive_test_dst +POSTHOOK: query: select count(1) from hive_test_dst_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@hive_test_dst +POSTHOOK: Input: default@hive_test_dst_n0 #### A masked pattern was here #### 6 -PREHOOK: query: select * from hive_test_dst where pcol1='test_part' +PREHOOK: query: select * from hive_test_dst_n0 where pcol1='test_part' PREHOOK: type: QUERY -PREHOOK: Input: default@hive_test_dst -PREHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part +PREHOOK: Input: default@hive_test_dst_n0 +PREHOOK: Input: default@hive_test_dst_n0@pcol1=test_part/pcol2=test_Part #### A masked pattern was here #### -POSTHOOK: query: select * from hive_test_dst where pcol1='test_part' +POSTHOOK: query: select * from hive_test_dst_n0 where pcol1='test_part' POSTHOOK: type: QUERY -POSTHOOK: Input: default@hive_test_dst -POSTHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part +POSTHOOK: Input: default@hive_test_dst_n0 +POSTHOOK: Input: default@hive_test_dst_n0@pcol1=test_part/pcol2=test_Part #### A masked pattern was here #### 1 test_part test_Part 2 test_part test_Part @@ -183,28 +183,28 @@ POSTHOOK: Input: default@hive_test_dst@pcol1=test_part/pcol2=test_Part 4 test_part test_Part 5 test_part test_Part 6 test_part test_Part -PREHOOK: query: select * from hive_test_dst where pcol1='test_part' and pcol2='test_part' +PREHOOK: query: select * from hive_test_dst_n0 where pcol1='test_part' and pcol2='test_part' PREHOOK: type: QUERY -PREHOOK: Input: default@hive_test_dst +PREHOOK: Input: default@hive_test_dst_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from hive_test_dst where pcol1='test_part' and pcol2='test_part' +POSTHOOK: query: select * from hive_test_dst_n0 where pcol1='test_part' and pcol2='test_part' POSTHOOK: type: QUERY -POSTHOOK: Input: default@hive_test_dst +POSTHOOK: Input: default@hive_test_dst_n0 #### A masked pattern was here #### -PREHOOK: query: select * from hive_test_dst where pcol1='test_Part' +PREHOOK: query: select * from hive_test_dst_n0 where pcol1='test_Part' PREHOOK: type: QUERY -PREHOOK: Input: default@hive_test_dst +PREHOOK: Input: default@hive_test_dst_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from hive_test_dst where pcol1='test_Part' +POSTHOOK: query: select * from hive_test_dst_n0 where pcol1='test_Part' POSTHOOK: type: QUERY -POSTHOOK: Input: default@hive_test_dst +POSTHOOK: Input: default@hive_test_dst_n0 #### A masked pattern was here #### -PREHOOK: query: describe formatted hive_test_dst +PREHOOK: query: describe formatted hive_test_dst_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@hive_test_dst -POSTHOOK: query: describe formatted hive_test_dst +PREHOOK: Input: default@hive_test_dst_n0 +POSTHOOK: query: describe formatted hive_test_dst_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@hive_test_dst +POSTHOOK: Input: default@hive_test_dst_n0 # col_name data_type comment col1 string @@ -239,19 +239,19 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table hive_test_src +PREHOOK: query: drop table hive_test_src_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@hive_test_src -PREHOOK: Output: default@hive_test_src -POSTHOOK: query: drop table hive_test_src +PREHOOK: Input: default@hive_test_src_n3 +PREHOOK: Output: default@hive_test_src_n3 +POSTHOOK: query: drop table hive_test_src_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@hive_test_src -POSTHOOK: Output: default@hive_test_src -PREHOOK: query: drop table hive_test_dst +POSTHOOK: Input: default@hive_test_src_n3 +POSTHOOK: Output: default@hive_test_src_n3 +PREHOOK: query: drop table hive_test_dst_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@hive_test_dst -PREHOOK: Output: default@hive_test_dst -POSTHOOK: query: drop table hive_test_dst +PREHOOK: Input: default@hive_test_dst_n0 +PREHOOK: Output: default@hive_test_dst_n0 +POSTHOOK: query: drop table hive_test_dst_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@hive_test_dst -POSTHOOK: Output: default@hive_test_dst +POSTHOOK: Input: default@hive_test_dst_n0 +POSTHOOK: Output: default@hive_test_dst_n0 diff --git a/ql/src/test/results/clientpositive/stats6.q.out b/ql/src/test/results/clientpositive/stats6.q.out index 704bff8a63..77be16cb13 100644 --- a/ql/src/test/results/clientpositive/stats6.q.out +++ b/ql/src/test/results/clientpositive/stats6.q.out @@ -1,68 +1,68 @@ -PREHOOK: query: create table analyze_srcpart like srcpart +PREHOOK: query: create table analyze_srcpart_n2 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@analyze_srcpart -POSTHOOK: query: create table analyze_srcpart like srcpart +PREHOOK: Output: default@analyze_srcpart_n2 +POSTHOOK: query: create table analyze_srcpart_n2 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@analyze_srcpart -PREHOOK: query: insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null +POSTHOOK: Output: default@analyze_srcpart_n2 +PREHOOK: query: insert overwrite table analyze_srcpart_n2 partition (ds, hr) select * from srcpart where ds is not null PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@analyze_srcpart -POSTHOOK: query: insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null +PREHOOK: Output: default@analyze_srcpart_n2 +POSTHOOK: query: insert overwrite table analyze_srcpart_n2 partition (ds, hr) select * from srcpart where ds is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics +POSTHOOK: Output: default@analyze_srcpart_n2@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n2@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@analyze_srcpart_n2@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n2@ds=2008-04-09/hr=12 +POSTHOOK: Lineage: analyze_srcpart_n2 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n2 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n2 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n2 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n2 PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n2 PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n2 PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n2 PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: analyze table analyze_srcpart_n2 PARTITION(ds='2008-04-08',hr=11) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@analyze_srcpart -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@analyze_srcpart -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics +PREHOOK: Input: default@analyze_srcpart_n2 +PREHOOK: Input: default@analyze_srcpart_n2@ds=2008-04-08/hr=11 +PREHOOK: Output: default@analyze_srcpart_n2 +PREHOOK: Output: default@analyze_srcpart_n2@ds=2008-04-08/hr=11 +POSTHOOK: query: analyze table analyze_srcpart_n2 PARTITION(ds='2008-04-08',hr=11) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@analyze_srcpart -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@analyze_srcpart -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics +POSTHOOK: Input: default@analyze_srcpart_n2 +POSTHOOK: Input: default@analyze_srcpart_n2@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n2 +POSTHOOK: Output: default@analyze_srcpart_n2@ds=2008-04-08/hr=11 +PREHOOK: query: analyze table analyze_srcpart_n2 PARTITION(ds='2008-04-08',hr=12) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@analyze_srcpart -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@analyze_srcpart -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics +PREHOOK: Input: default@analyze_srcpart_n2 +PREHOOK: Input: default@analyze_srcpart_n2@ds=2008-04-08/hr=12 +PREHOOK: Output: default@analyze_srcpart_n2 +PREHOOK: Output: default@analyze_srcpart_n2@ds=2008-04-08/hr=12 +POSTHOOK: query: analyze table analyze_srcpart_n2 PARTITION(ds='2008-04-08',hr=12) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@analyze_srcpart -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@analyze_srcpart -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) +POSTHOOK: Input: default@analyze_srcpart_n2 +POSTHOOK: Input: default@analyze_srcpart_n2@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@analyze_srcpart_n2 +POSTHOOK: Output: default@analyze_srcpart_n2@ds=2008-04-08/hr=12 +PREHOOK: query: describe formatted analyze_srcpart_n2 PARTITION(ds='2008-04-08',hr=11) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) +PREHOOK: Input: default@analyze_srcpart_n2 +POSTHOOK: query: describe formatted analyze_srcpart_n2 PARTITION(ds='2008-04-08',hr=11) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n2 # col_name data_type comment key string default value string default @@ -75,7 +75,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -95,12 +95,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12) +PREHOOK: query: describe formatted analyze_srcpart_n2 PARTITION(ds='2008-04-08',hr=12) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12) +PREHOOK: Input: default@analyze_srcpart_n2 +POSTHOOK: query: describe formatted analyze_srcpart_n2 PARTITION(ds='2008-04-08',hr=12) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n2 # col_name data_type comment key string default value string default @@ -113,7 +113,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 12] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n2 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -133,12 +133,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=11) +PREHOOK: query: describe formatted analyze_srcpart_n2 PARTITION(ds='2008-04-09',hr=11) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=11) +PREHOOK: Input: default@analyze_srcpart_n2 +POSTHOOK: query: describe formatted analyze_srcpart_n2 PARTITION(ds='2008-04-09',hr=11) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n2 # col_name data_type comment key string default value string default @@ -151,7 +151,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-09, 11] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n2 #### A masked pattern was here #### Partition Parameters: numFiles 1 @@ -168,12 +168,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=12) +PREHOOK: query: describe formatted analyze_srcpart_n2 PARTITION(ds='2008-04-09',hr=12) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=12) +PREHOOK: Input: default@analyze_srcpart_n2 +POSTHOOK: query: describe formatted analyze_srcpart_n2 PARTITION(ds='2008-04-09',hr=12) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n2 # col_name data_type comment key string default value string default @@ -186,7 +186,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-09, 12] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n2 #### A masked pattern was here #### Partition Parameters: numFiles 1 @@ -203,12 +203,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart +PREHOOK: query: describe formatted analyze_srcpart_n2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart +PREHOOK: Input: default@analyze_srcpart_n2 +POSTHOOK: query: describe formatted analyze_srcpart_n2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n2 # col_name data_type comment key string default value string default diff --git a/ql/src/test/results/clientpositive/stats7.q.out b/ql/src/test/results/clientpositive/stats7.q.out index 626e5f0644..403c6455d2 100644 --- a/ql/src/test/results/clientpositive/stats7.q.out +++ b/ql/src/test/results/clientpositive/stats7.q.out @@ -1,41 +1,41 @@ -PREHOOK: query: create table analyze_srcpart like srcpart +PREHOOK: query: create table analyze_srcpart_n4 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@analyze_srcpart -POSTHOOK: query: create table analyze_srcpart like srcpart +PREHOOK: Output: default@analyze_srcpart_n4 +POSTHOOK: query: create table analyze_srcpart_n4 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@analyze_srcpart -PREHOOK: query: insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null +POSTHOOK: Output: default@analyze_srcpart_n4 +PREHOOK: query: insert overwrite table analyze_srcpart_n4 partition (ds, hr) select * from srcpart where ds is not null PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@analyze_srcpart -POSTHOOK: query: insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null +PREHOOK: Output: default@analyze_srcpart_n4 +POSTHOOK: query: insert overwrite table analyze_srcpart_n4 partition (ds, hr) select * from srcpart where ds is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics +POSTHOOK: Output: default@analyze_srcpart_n4@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n4@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@analyze_srcpart_n4@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n4@ds=2008-04-09/hr=12 +POSTHOOK: Lineage: analyze_srcpart_n4 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n4 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n4 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n4 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n4 PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n4 PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n4 PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n4 PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain analyze table analyze_srcpart_n4 PARTITION(ds='2008-04-08',hr) compute statistics PREHOOK: type: QUERY -POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics +POSTHOOK: query: explain analyze table analyze_srcpart_n4 PARTITION(ds='2008-04-08',hr) compute statistics POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -46,35 +46,35 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: analyze_srcpart + alias: analyze_srcpart_n4 Statistics: Num rows: 392 Data size: 232480 Basic stats: COMPLETE Column stats: NONE Stage: Stage-1 Stats Work Basic Stats Work: -PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics +PREHOOK: query: analyze table analyze_srcpart_n4 PARTITION(ds='2008-04-08',hr) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@analyze_srcpart -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@analyze_srcpart -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics +PREHOOK: Input: default@analyze_srcpart_n4 +PREHOOK: Input: default@analyze_srcpart_n4@ds=2008-04-08/hr=11 +PREHOOK: Input: default@analyze_srcpart_n4@ds=2008-04-08/hr=12 +PREHOOK: Output: default@analyze_srcpart_n4 +PREHOOK: Output: default@analyze_srcpart_n4@ds=2008-04-08/hr=11 +PREHOOK: Output: default@analyze_srcpart_n4@ds=2008-04-08/hr=12 +POSTHOOK: query: analyze table analyze_srcpart_n4 PARTITION(ds='2008-04-08',hr) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@analyze_srcpart -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@analyze_srcpart -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) +POSTHOOK: Input: default@analyze_srcpart_n4 +POSTHOOK: Input: default@analyze_srcpart_n4@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@analyze_srcpart_n4@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@analyze_srcpart_n4 +POSTHOOK: Output: default@analyze_srcpart_n4@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n4@ds=2008-04-08/hr=12 +PREHOOK: query: describe formatted analyze_srcpart_n4 PARTITION(ds='2008-04-08',hr=11) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) +PREHOOK: Input: default@analyze_srcpart_n4 +POSTHOOK: query: describe formatted analyze_srcpart_n4 PARTITION(ds='2008-04-08',hr=11) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n4 # col_name data_type comment key string default value string default @@ -87,7 +87,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n4 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -107,12 +107,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12) +PREHOOK: query: describe formatted analyze_srcpart_n4 PARTITION(ds='2008-04-08',hr=12) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12) +PREHOOK: Input: default@analyze_srcpart_n4 +POSTHOOK: query: describe formatted analyze_srcpart_n4 PARTITION(ds='2008-04-08',hr=12) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n4 # col_name data_type comment key string default value string default @@ -125,7 +125,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 12] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n4 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -145,12 +145,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart +PREHOOK: query: describe formatted analyze_srcpart_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart +PREHOOK: Input: default@analyze_srcpart_n4 +POSTHOOK: query: describe formatted analyze_srcpart_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n4 # col_name data_type comment key string default value string default diff --git a/ql/src/test/results/clientpositive/stats8.q.out b/ql/src/test/results/clientpositive/stats8.q.out index da92f96f6a..e68b1d1d4e 100644 --- a/ql/src/test/results/clientpositive/stats8.q.out +++ b/ql/src/test/results/clientpositive/stats8.q.out @@ -1,41 +1,41 @@ -PREHOOK: query: create table analyze_srcpart like srcpart +PREHOOK: query: create table analyze_srcpart_n1 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@analyze_srcpart -POSTHOOK: query: create table analyze_srcpart like srcpart +PREHOOK: Output: default@analyze_srcpart_n1 +POSTHOOK: query: create table analyze_srcpart_n1 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@analyze_srcpart -PREHOOK: query: insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null +POSTHOOK: Output: default@analyze_srcpart_n1 +PREHOOK: query: insert overwrite table analyze_srcpart_n1 partition (ds, hr) select * from srcpart where ds is not null PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@analyze_srcpart -POSTHOOK: query: insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null +PREHOOK: Output: default@analyze_srcpart_n1 +POSTHOOK: query: insert overwrite table analyze_srcpart_n1 partition (ds, hr) select * from srcpart where ds is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics +POSTHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-09/hr=12 +POSTHOOK: Lineage: analyze_srcpart_n1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n1 PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n1 PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n1 PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n1 PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=11) compute statistics PREHOOK: type: QUERY -POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics +POSTHOOK: query: explain analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=11) compute statistics POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -46,31 +46,31 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: analyze_srcpart + alias: analyze_srcpart_n1 Statistics: Num rows: 392 Data size: 232480 Basic stats: COMPLETE Column stats: NONE Stage: Stage-1 Stats Work Basic Stats Work: -PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics +PREHOOK: query: analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=11) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@analyze_srcpart -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@analyze_srcpart -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics +PREHOOK: Input: default@analyze_srcpart_n1 +PREHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-08/hr=11 +PREHOOK: Output: default@analyze_srcpart_n1 +PREHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-08/hr=11 +POSTHOOK: query: analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=11) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@analyze_srcpart -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@analyze_srcpart -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) +POSTHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n1 +POSTHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-08/hr=11 +PREHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=11) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) +PREHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=11) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n1 # col_name data_type comment key string default value string default @@ -83,7 +83,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -103,12 +103,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart +PREHOOK: query: describe formatted analyze_srcpart_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart +PREHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: query: describe formatted analyze_srcpart_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n1 # col_name data_type comment key string default value string default @@ -142,9 +142,9 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics +PREHOOK: query: explain analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=12) compute statistics PREHOOK: type: QUERY -POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics +POSTHOOK: query: explain analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=12) compute statistics POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -155,31 +155,31 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: analyze_srcpart + alias: analyze_srcpart_n1 Statistics: Num rows: 500 Data size: 5312 Basic stats: PARTIAL Column stats: NONE Stage: Stage-1 Stats Work Basic Stats Work: -PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics +PREHOOK: query: analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=12) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@analyze_srcpart -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@analyze_srcpart -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics +PREHOOK: Input: default@analyze_srcpart_n1 +PREHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-08/hr=12 +PREHOOK: Output: default@analyze_srcpart_n1 +PREHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-08/hr=12 +POSTHOOK: query: analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=12) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@analyze_srcpart -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@analyze_srcpart -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12) +POSTHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@analyze_srcpart_n1 +POSTHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-08/hr=12 +PREHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=12) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12) +PREHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=12) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n1 # col_name data_type comment key string default value string default @@ -192,7 +192,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 12] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -212,9 +212,9 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics +PREHOOK: query: explain analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=11) compute statistics PREHOOK: type: QUERY -POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics +POSTHOOK: query: explain analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=11) compute statistics POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -225,31 +225,31 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: analyze_srcpart + alias: analyze_srcpart_n1 Statistics: Num rows: 1000 Data size: 10624 Basic stats: PARTIAL Column stats: NONE Stage: Stage-1 Stats Work Basic Stats Work: -PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics +PREHOOK: query: analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=11) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@analyze_srcpart -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=11 -PREHOOK: Output: default@analyze_srcpart -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 -POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics +PREHOOK: Input: default@analyze_srcpart_n1 +PREHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-09/hr=11 +PREHOOK: Output: default@analyze_srcpart_n1 +PREHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-09/hr=11 +POSTHOOK: query: analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=11) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@analyze_srcpart -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@analyze_srcpart -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=11) +POSTHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n1 +POSTHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-09/hr=11 +PREHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=11) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=11) +PREHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=11) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n1 # col_name data_type comment key string default value string default @@ -262,7 +262,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-09, 11] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -282,9 +282,9 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics +PREHOOK: query: explain analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=12) compute statistics PREHOOK: type: QUERY -POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics +POSTHOOK: query: explain analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=12) compute statistics POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -295,31 +295,31 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: analyze_srcpart + alias: analyze_srcpart_n1 Statistics: Num rows: 1500 Data size: 15936 Basic stats: PARTIAL Column stats: NONE Stage: Stage-1 Stats Work Basic Stats Work: -PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics +PREHOOK: query: analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=12) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@analyze_srcpart -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@analyze_srcpart -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 -POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics +PREHOOK: Input: default@analyze_srcpart_n1 +PREHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-09/hr=12 +PREHOOK: Output: default@analyze_srcpart_n1 +PREHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-09/hr=12 +POSTHOOK: query: analyze table analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=12) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@analyze_srcpart -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@analyze_srcpart -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=12) +POSTHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@analyze_srcpart_n1 +POSTHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-09/hr=12 +PREHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=12) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=12) +PREHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=12) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n1 # col_name data_type comment key string default value string default @@ -332,7 +332,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-09, 12] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -352,9 +352,9 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds, hr) compute statistics +PREHOOK: query: explain analyze table analyze_srcpart_n1 PARTITION(ds, hr) compute statistics PREHOOK: type: QUERY -POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds, hr) compute statistics +POSTHOOK: query: explain analyze table analyze_srcpart_n1 PARTITION(ds, hr) compute statistics POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -365,43 +365,43 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: analyze_srcpart + alias: analyze_srcpart_n1 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Stage: Stage-1 Stats Work Basic Stats Work: -PREHOOK: query: analyze table analyze_srcpart PARTITION(ds, hr) compute statistics +PREHOOK: query: analyze table analyze_srcpart_n1 PARTITION(ds, hr) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@analyze_srcpart -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@analyze_srcpart -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 -POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds, hr) compute statistics +PREHOOK: Input: default@analyze_srcpart_n1 +PREHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-08/hr=11 +PREHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-08/hr=12 +PREHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-09/hr=11 +PREHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-09/hr=12 +PREHOOK: Output: default@analyze_srcpart_n1 +PREHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-08/hr=11 +PREHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-08/hr=12 +PREHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-09/hr=11 +PREHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-09/hr=12 +POSTHOOK: query: analyze table analyze_srcpart_n1 PARTITION(ds, hr) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@analyze_srcpart -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@analyze_srcpart -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) +POSTHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@analyze_srcpart_n1@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@analyze_srcpart_n1 +POSTHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n1@ds=2008-04-09/hr=12 +PREHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=11) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) +PREHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=11) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n1 # col_name data_type comment key string default value string default @@ -414,7 +414,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -434,12 +434,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12) +PREHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=12) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12) +PREHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-08',hr=12) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n1 # col_name data_type comment key string default value string default @@ -452,7 +452,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 12] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -472,12 +472,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=11) +PREHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=11) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=11) +PREHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=11) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n1 # col_name data_type comment key string default value string default @@ -490,7 +490,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-09, 11] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -510,12 +510,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=12) +PREHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=12) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=12) +PREHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: query: describe formatted analyze_srcpart_n1 PARTITION(ds='2008-04-09',hr=12) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n1 # col_name data_type comment key string default value string default @@ -528,7 +528,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-09, 12] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n1 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -548,12 +548,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart +PREHOOK: query: describe formatted analyze_srcpart_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart +PREHOOK: Input: default@analyze_srcpart_n1 +POSTHOOK: query: describe formatted analyze_srcpart_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n1 # col_name data_type comment key string default value string default diff --git a/ql/src/test/results/clientpositive/stats_aggregator_error_1.q.out b/ql/src/test/results/clientpositive/stats_aggregator_error_1.q.out index fec20f03aa..2195922c61 100644 --- a/ql/src/test/results/clientpositive/stats_aggregator_error_1.q.out +++ b/ql/src/test/results/clientpositive/stats_aggregator_error_1.q.out @@ -1,86 +1,86 @@ -PREHOOK: query: create table tmptable(key string, value string) +PREHOOK: query: create table tmptable_n6(key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmptable -POSTHOOK: query: create table tmptable(key string, value string) +PREHOOK: Output: default@tmptable_n6 +POSTHOOK: query: create table tmptable_n6(key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmptable -PREHOOK: query: INSERT OVERWRITE TABLE tmptable select * from src +POSTHOOK: Output: default@tmptable_n6 +PREHOOK: query: INSERT OVERWRITE TABLE tmptable_n6 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmptable +PREHOOK: Output: default@tmptable_n6 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type custom cannot be connected to -POSTHOOK: query: INSERT OVERWRITE TABLE tmptable select * from src +POSTHOOK: query: INSERT OVERWRITE TABLE tmptable_n6 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmptable -POSTHOOK: Lineage: tmptable.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tmptable.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select count(1) from tmptable +POSTHOOK: Output: default@tmptable_n6 +POSTHOOK: Lineage: tmptable_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tmptable_n6.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select count(1) from tmptable_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@tmptable +PREHOOK: Input: default@tmptable_n6 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from tmptable +POSTHOOK: query: select count(1) from tmptable_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmptable +POSTHOOK: Input: default@tmptable_n6 #### A masked pattern was here #### 500 -PREHOOK: query: INSERT OVERWRITE TABLE tmptable select * from src +PREHOOK: query: INSERT OVERWRITE TABLE tmptable_n6 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmptable -POSTHOOK: query: INSERT OVERWRITE TABLE tmptable select * from src +PREHOOK: Output: default@tmptable_n6 +POSTHOOK: query: INSERT OVERWRITE TABLE tmptable_n6 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmptable -POSTHOOK: Lineage: tmptable.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tmptable.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select count(1) from tmptable +POSTHOOK: Output: default@tmptable_n6 +POSTHOOK: Lineage: tmptable_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tmptable_n6.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select count(1) from tmptable_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@tmptable +PREHOOK: Input: default@tmptable_n6 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from tmptable +POSTHOOK: query: select count(1) from tmptable_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmptable +POSTHOOK: Input: default@tmptable_n6 #### A masked pattern was here #### 500 -PREHOOK: query: INSERT OVERWRITE TABLE tmptable select * from src +PREHOOK: query: INSERT OVERWRITE TABLE tmptable_n6 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmptable -POSTHOOK: query: INSERT OVERWRITE TABLE tmptable select * from src +PREHOOK: Output: default@tmptable_n6 +POSTHOOK: query: INSERT OVERWRITE TABLE tmptable_n6 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmptable -POSTHOOK: Lineage: tmptable.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tmptable.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select count(1) from tmptable +POSTHOOK: Output: default@tmptable_n6 +POSTHOOK: Lineage: tmptable_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tmptable_n6.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select count(1) from tmptable_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@tmptable +PREHOOK: Input: default@tmptable_n6 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from tmptable +POSTHOOK: query: select count(1) from tmptable_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmptable +POSTHOOK: Input: default@tmptable_n6 #### A masked pattern was here #### 500 -PREHOOK: query: INSERT OVERWRITE TABLE tmptable select * from src +PREHOOK: query: INSERT OVERWRITE TABLE tmptable_n6 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmptable +PREHOOK: Output: default@tmptable_n6 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30000]: StatsPublisher cannot be obtained. There was a error to retrieve the StatsPublisher, and retrying might help. If you dont want the query to fail because accurate statistics could not be collected, set hive.stats.reliable=false -POSTHOOK: query: INSERT OVERWRITE TABLE tmptable select * from src +POSTHOOK: query: INSERT OVERWRITE TABLE tmptable_n6 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmptable -POSTHOOK: Lineage: tmptable.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tmptable.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select count(1) from tmptable +POSTHOOK: Output: default@tmptable_n6 +POSTHOOK: Lineage: tmptable_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tmptable_n6.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select count(1) from tmptable_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@tmptable +PREHOOK: Input: default@tmptable_n6 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from tmptable +POSTHOOK: query: select count(1) from tmptable_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmptable +POSTHOOK: Input: default@tmptable_n6 #### A masked pattern was here #### 500 diff --git a/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out b/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out index 185df2d0c7..79ba0243ba 100644 --- a/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out +++ b/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out @@ -1,14 +1,14 @@ -PREHOOK: query: create table tmptable(key string) partitioned by (part string) +PREHOOK: query: create table tmptable_n7(key string) partitioned by (part string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmptable -POSTHOOK: query: create table tmptable(key string) partitioned by (part string) +PREHOOK: Output: default@tmptable_n7 +POSTHOOK: query: create table tmptable_n7(key string) partitioned by (part string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmptable -PREHOOK: query: explain insert overwrite table tmptable partition (part) select key, value from src where key = 'no_such_value' +POSTHOOK: Output: default@tmptable_n7 +PREHOOK: query: explain insert overwrite table tmptable_n7 partition (part) select key, value from src where key = 'no_such_value' PREHOOK: type: QUERY -POSTHOOK: query: explain insert overwrite table tmptable partition (part) select key, value from src where key = 'no_such_value' +POSTHOOK: query: explain insert overwrite table tmptable_n7 partition (part) select key, value from src where key = 'no_such_value' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -41,7 +41,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n7 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, part @@ -96,7 +96,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n7 Stage: Stage-2 Stats Work @@ -104,7 +104,7 @@ STAGE PLANS: Column Stats Desc: Columns: key Column Types: string - Table: default.tmptable + Table: default.tmptable_n7 Stage: Stage-3 Map Reduce @@ -116,7 +116,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n7 Stage: Stage-5 Map Reduce @@ -128,7 +128,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n7 Stage: Stage-6 Move Operator @@ -136,10 +136,10 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table tmptable partition (part) select key, value from src where key = 'no_such_value' +PREHOOK: query: insert overwrite table tmptable_n7 partition (part) select key, value from src where key = 'no_such_value' PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmptable -POSTHOOK: query: insert overwrite table tmptable partition (part) select key, value from src where key = 'no_such_value' +PREHOOK: Output: default@tmptable_n7 +POSTHOOK: query: insert overwrite table tmptable_n7 partition (part) select key, value from src where key = 'no_such_value' POSTHOOK: type: QUERY POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/stats_empty_partition.q.out b/ql/src/test/results/clientpositive/stats_empty_partition.q.out index b679334c07..da759da2ae 100644 --- a/ql/src/test/results/clientpositive/stats_empty_partition.q.out +++ b/ql/src/test/results/clientpositive/stats_empty_partition.q.out @@ -1,27 +1,27 @@ -PREHOOK: query: create table tmptable(key string, value string) partitioned by (part string) +PREHOOK: query: create table tmptable_n11(key string, value string) partitioned by (part string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmptable -POSTHOOK: query: create table tmptable(key string, value string) partitioned by (part string) +PREHOOK: Output: default@tmptable_n11 +POSTHOOK: query: create table tmptable_n11(key string, value string) partitioned by (part string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmptable -PREHOOK: query: insert overwrite table tmptable partition (part = '1') select * from src where key = 'no_such_value' +POSTHOOK: Output: default@tmptable_n11 +PREHOOK: query: insert overwrite table tmptable_n11 partition (part = '1') select * from src where key = 'no_such_value' PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmptable@part=1 -POSTHOOK: query: insert overwrite table tmptable partition (part = '1') select * from src where key = 'no_such_value' +PREHOOK: Output: default@tmptable_n11@part=1 +POSTHOOK: query: insert overwrite table tmptable_n11 partition (part = '1') select * from src where key = 'no_such_value' POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmptable@part=1 -POSTHOOK: Lineage: tmptable PARTITION(part=1).key SIMPLE [] -POSTHOOK: Lineage: tmptable PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: describe formatted tmptable partition (part = '1') +POSTHOOK: Output: default@tmptable_n11@part=1 +POSTHOOK: Lineage: tmptable_n11 PARTITION(part=1).key SIMPLE [] +POSTHOOK: Lineage: tmptable_n11 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted tmptable_n11 partition (part = '1') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tmptable -POSTHOOK: query: describe formatted tmptable partition (part = '1') +PREHOOK: Input: default@tmptable_n11 +POSTHOOK: query: describe formatted tmptable_n11 partition (part = '1') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tmptable +POSTHOOK: Input: default@tmptable_n11 # col_name data_type comment key string value string @@ -33,7 +33,7 @@ part string # Detailed Partition Information Partition Value: [1] Database: default -Table: tmptable +Table: tmptable_n11 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} diff --git a/ql/src/test/results/clientpositive/stats_empty_partition2.q.out b/ql/src/test/results/clientpositive/stats_empty_partition2.q.out index 04bf3a114d..a14731ca7f 100644 --- a/ql/src/test/results/clientpositive/stats_empty_partition2.q.out +++ b/ql/src/test/results/clientpositive/stats_empty_partition2.q.out @@ -1,76 +1,76 @@ -PREHOOK: query: drop table if exists p1 +PREHOOK: query: drop table if exists p1_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists p1 +POSTHOOK: query: drop table if exists p1_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table if exists t +PREHOOK: query: drop table if exists t_n32 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists t +POSTHOOK: query: drop table if exists t_n32 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table t (a int) +PREHOOK: query: create table t_n32 (a int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t (a int) +PREHOOK: Output: default@t_n32 +POSTHOOK: query: create table t_n32 (a int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values (1) +POSTHOOK: Output: default@t_n32 +PREHOOK: query: insert into t_n32 values (1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (1) +PREHOOK: Output: default@t_n32 +POSTHOOK: query: insert into t_n32 values (1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.a SCRIPT [] -PREHOOK: query: create table p1 (a int) partitioned by (p int) +POSTHOOK: Output: default@t_n32 +POSTHOOK: Lineage: t_n32.a SCRIPT [] +PREHOOK: query: create table p1_n0 (a int) partitioned by (p int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@p1 -POSTHOOK: query: create table p1 (a int) partitioned by (p int) +PREHOOK: Output: default@p1_n0 +POSTHOOK: query: create table p1_n0 (a int) partitioned by (p int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@p1 -PREHOOK: query: insert into p1 partition (p=1) values (1) +POSTHOOK: Output: default@p1_n0 +PREHOOK: query: insert into p1_n0 partition (p=1) values (1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@p1@p=1 -POSTHOOK: query: insert into p1 partition (p=1) values (1) +PREHOOK: Output: default@p1_n0@p=1 +POSTHOOK: query: insert into p1_n0 partition (p=1) values (1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@p1@p=1 -POSTHOOK: Lineage: p1 PARTITION(p=1).a SCRIPT [] -PREHOOK: query: insert into p1 partition (p=2) values (1) +POSTHOOK: Output: default@p1_n0@p=1 +POSTHOOK: Lineage: p1_n0 PARTITION(p=1).a SCRIPT [] +PREHOOK: query: insert into p1_n0 partition (p=2) values (1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@p1@p=2 -POSTHOOK: query: insert into p1 partition (p=2) values (1) +PREHOOK: Output: default@p1_n0@p=2 +POSTHOOK: query: insert into p1_n0 partition (p=2) values (1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@p1@p=2 -POSTHOOK: Lineage: p1 PARTITION(p=2).a SCRIPT [] -PREHOOK: query: truncate table p1 +POSTHOOK: Output: default@p1_n0@p=2 +POSTHOOK: Lineage: p1_n0 PARTITION(p=2).a SCRIPT [] +PREHOOK: query: truncate table p1_n0 PREHOOK: type: TRUNCATETABLE -PREHOOK: Output: default@p1@p=1 -PREHOOK: Output: default@p1@p=2 -POSTHOOK: query: truncate table p1 +PREHOOK: Output: default@p1_n0@p=1 +PREHOOK: Output: default@p1_n0@p=2 +POSTHOOK: query: truncate table p1_n0 POSTHOOK: type: TRUNCATETABLE -POSTHOOK: Output: default@p1@p=1 -POSTHOOK: Output: default@p1@p=2 -PREHOOK: query: insert into p1 partition (p=1) values (1) +POSTHOOK: Output: default@p1_n0@p=1 +POSTHOOK: Output: default@p1_n0@p=2 +PREHOOK: query: insert into p1_n0 partition (p=1) values (1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@p1@p=1 -POSTHOOK: query: insert into p1 partition (p=1) values (1) +PREHOOK: Output: default@p1_n0@p=1 +POSTHOOK: query: insert into p1_n0 partition (p=1) values (1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@p1@p=1 -POSTHOOK: Lineage: p1 PARTITION(p=1).a SCRIPT [] +POSTHOOK: Output: default@p1_n0@p=1 +POSTHOOK: Lineage: p1_n0 PARTITION(p=1).a SCRIPT [] PREHOOK: query: explain -select * from p1 join t on (t.a=p1.a) +select * from p1_n0 join t_n32 on (t_n32.a=p1_n0.a) PREHOOK: type: QUERY POSTHOOK: query: explain -select * from p1 join t on (t.a=p1.a) +select * from p1_n0 join t_n32 on (t_n32.a=p1_n0.a) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -81,7 +81,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: p1 + alias: p1_n0 Statistics: Num rows: 1 Data size: 1 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: a is not null (type: boolean) @@ -97,7 +97,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 1 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: int) TableScan - alias: t + alias: t_n32 Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: a is not null (type: boolean) @@ -134,12 +134,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: describe formatted p1 +PREHOOK: query: describe formatted p1_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@p1 -POSTHOOK: query: describe formatted p1 +PREHOOK: Input: default@p1_n0 +POSTHOOK: query: describe formatted p1_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@p1 +POSTHOOK: Input: default@p1_n0 # col_name data_type comment a int diff --git a/ql/src/test/results/clientpositive/stats_noscan_1.q.out b/ql/src/test/results/clientpositive/stats_noscan_1.q.out index c8fa18cb22..972b09d505 100644 --- a/ql/src/test/results/clientpositive/stats_noscan_1.q.out +++ b/ql/src/test/results/clientpositive/stats_noscan_1.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table analyze_srcpart like srcpart +PREHOOK: query: create table analyze_srcpart_n0 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@analyze_srcpart -POSTHOOK: query: create table analyze_srcpart like srcpart +PREHOOK: Output: default@analyze_srcpart_n0 +POSTHOOK: query: create table analyze_srcpart_n0 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@analyze_srcpart -PREHOOK: query: insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null +POSTHOOK: Output: default@analyze_srcpart_n0 +PREHOOK: query: insert overwrite table analyze_srcpart_n0 partition (ds, hr) select * from srcpart where ds is not null PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@analyze_srcpart -POSTHOOK: query: insert overwrite table analyze_srcpart partition (ds, hr) select * from srcpart where ds is not null +PREHOOK: Output: default@analyze_srcpart_n0 +POSTHOOK: query: insert overwrite table analyze_srcpart_n0 partition (ds, hr) select * from srcpart where ds is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@analyze_srcpart_n0@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n0@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@analyze_srcpart_n0@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n0@ds=2008-04-09/hr=12 +POSTHOOK: Lineage: analyze_srcpart_n0 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n0 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n0 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n0 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n0 PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n0 PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n0 PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: analyze_srcpart_n0 PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain -analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics noscan +analyze table analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=11) compute statistics noscan PREHOOK: type: QUERY POSTHOOK: query: explain -analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics noscan +analyze table analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=11) compute statistics noscan POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -47,36 +47,36 @@ STAGE PLANS: Stats Work Basic Stats Work: -PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics noscan +PREHOOK: query: analyze table analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=11) compute statistics noscan PREHOOK: type: QUERY -PREHOOK: Input: default@analyze_srcpart -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@analyze_srcpart -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics noscan +PREHOOK: Input: default@analyze_srcpart_n0 +PREHOOK: Input: default@analyze_srcpart_n0@ds=2008-04-08/hr=11 +PREHOOK: Output: default@analyze_srcpart_n0 +PREHOOK: Output: default@analyze_srcpart_n0@ds=2008-04-08/hr=11 +POSTHOOK: query: analyze table analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=11) compute statistics noscan POSTHOOK: type: QUERY -POSTHOOK: Input: default@analyze_srcpart -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@analyze_srcpart -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 -PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics noscan +POSTHOOK: Input: default@analyze_srcpart_n0 +POSTHOOK: Input: default@analyze_srcpart_n0@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@analyze_srcpart_n0 +POSTHOOK: Output: default@analyze_srcpart_n0@ds=2008-04-08/hr=11 +PREHOOK: query: analyze table analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=12) compute statistics noscan PREHOOK: type: QUERY -PREHOOK: Input: default@analyze_srcpart -PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@analyze_srcpart -PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics noscan +PREHOOK: Input: default@analyze_srcpart_n0 +PREHOOK: Input: default@analyze_srcpart_n0@ds=2008-04-08/hr=12 +PREHOOK: Output: default@analyze_srcpart_n0 +PREHOOK: Output: default@analyze_srcpart_n0@ds=2008-04-08/hr=12 +POSTHOOK: query: analyze table analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=12) compute statistics noscan POSTHOOK: type: QUERY -POSTHOOK: Input: default@analyze_srcpart -POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@analyze_srcpart -POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) +POSTHOOK: Input: default@analyze_srcpart_n0 +POSTHOOK: Input: default@analyze_srcpart_n0@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@analyze_srcpart_n0 +POSTHOOK: Output: default@analyze_srcpart_n0@ds=2008-04-08/hr=12 +PREHOOK: query: describe formatted analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=11) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) +PREHOOK: Input: default@analyze_srcpart_n0 +POSTHOOK: query: describe formatted analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=11) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n0 # col_name data_type comment key string default value string default @@ -89,7 +89,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -107,12 +107,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12) +PREHOOK: query: describe formatted analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=12) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=12) +PREHOOK: Input: default@analyze_srcpart_n0 +POSTHOOK: query: describe formatted analyze_srcpart_n0 PARTITION(ds='2008-04-08',hr=12) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n0 # col_name data_type comment key string default value string default @@ -125,7 +125,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 12] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n0 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -143,12 +143,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=11) +PREHOOK: query: describe formatted analyze_srcpart_n0 PARTITION(ds='2008-04-09',hr=11) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=11) +PREHOOK: Input: default@analyze_srcpart_n0 +POSTHOOK: query: describe formatted analyze_srcpart_n0 PARTITION(ds='2008-04-09',hr=11) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n0 # col_name data_type comment key string default value string default @@ -161,7 +161,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-09, 11] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n0 #### A masked pattern was here #### Partition Parameters: numFiles 1 @@ -178,12 +178,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=12) +PREHOOK: query: describe formatted analyze_srcpart_n0 PARTITION(ds='2008-04-09',hr=12) PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-09',hr=12) +PREHOOK: Input: default@analyze_srcpart_n0 +POSTHOOK: query: describe formatted analyze_srcpart_n0 PARTITION(ds='2008-04-09',hr=12) POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n0 # col_name data_type comment key string default value string default @@ -196,7 +196,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-09, 12] Database: default -Table: analyze_srcpart +Table: analyze_srcpart_n0 #### A masked pattern was here #### Partition Parameters: numFiles 1 @@ -213,12 +213,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted analyze_srcpart +PREHOOK: query: describe formatted analyze_srcpart_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: describe formatted analyze_srcpart +PREHOOK: Input: default@analyze_srcpart_n0 +POSTHOOK: query: describe formatted analyze_srcpart_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n0 # col_name data_type comment key string default value string default @@ -252,14 +252,14 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table analyze_srcpart +PREHOOK: query: drop table analyze_srcpart_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@analyze_srcpart -PREHOOK: Output: default@analyze_srcpart -POSTHOOK: query: drop table analyze_srcpart +PREHOOK: Input: default@analyze_srcpart_n0 +PREHOOK: Output: default@analyze_srcpart_n0 +POSTHOOK: query: drop table analyze_srcpart_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@analyze_srcpart -POSTHOOK: Output: default@analyze_srcpart +POSTHOOK: Input: default@analyze_srcpart_n0 +POSTHOOK: Output: default@analyze_srcpart_n0 PREHOOK: query: create table analyze_srcpart_partial like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/statsfs.q.out b/ql/src/test/results/clientpositive/statsfs.q.out index aa9786be32..eb1c33ea97 100644 --- a/ql/src/test/results/clientpositive/statsfs.q.out +++ b/ql/src/test/results/clientpositive/statsfs.q.out @@ -1,51 +1,51 @@ -PREHOOK: query: create table t1 (key string, value string) partitioned by (ds string) +PREHOOK: query: create table t1_n41 (key string, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (key string, value string) partitioned by (ds string) +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: create table t1_n41 (key string, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table t1 partition (ds = '2010') +POSTHOOK: Output: default@t1_n41 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table t1_n41 partition (ds = '2010') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table t1 partition (ds = '2010') +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table t1_n41 partition (ds = '2010') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t1@ds=2010 -PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table t1 partition (ds = '2011') +POSTHOOK: Output: default@t1_n41 +POSTHOOK: Output: default@t1_n41@ds=2010 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table t1_n41 partition (ds = '2011') PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table t1 partition (ds = '2011') +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table t1_n41 partition (ds = '2011') POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t1@ds=2011 -PREHOOK: query: analyze table t1 partition (ds) compute statistics +POSTHOOK: Output: default@t1_n41 +POSTHOOK: Output: default@t1_n41@ds=2011 +PREHOOK: query: analyze table t1_n41 partition (ds) compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t1@ds=2010 -PREHOOK: Input: default@t1@ds=2011 -PREHOOK: Output: default@t1 -PREHOOK: Output: default@t1@ds=2010 -PREHOOK: Output: default@t1@ds=2011 -POSTHOOK: query: analyze table t1 partition (ds) compute statistics +PREHOOK: Input: default@t1_n41 +PREHOOK: Input: default@t1_n41@ds=2010 +PREHOOK: Input: default@t1_n41@ds=2011 +PREHOOK: Output: default@t1_n41 +PREHOOK: Output: default@t1_n41@ds=2010 +PREHOOK: Output: default@t1_n41@ds=2011 +POSTHOOK: query: analyze table t1_n41 partition (ds) compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t1@ds=2010 -POSTHOOK: Input: default@t1@ds=2011 -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t1@ds=2010 -POSTHOOK: Output: default@t1@ds=2011 -PREHOOK: query: describe formatted t1 partition (ds='2010') +POSTHOOK: Input: default@t1_n41 +POSTHOOK: Input: default@t1_n41@ds=2010 +POSTHOOK: Input: default@t1_n41@ds=2011 +POSTHOOK: Output: default@t1_n41 +POSTHOOK: Output: default@t1_n41@ds=2010 +POSTHOOK: Output: default@t1_n41@ds=2011 +PREHOOK: query: describe formatted t1_n41 partition (ds='2010') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: describe formatted t1 partition (ds='2010') +PREHOOK: Input: default@t1_n41 +POSTHOOK: query: describe formatted t1_n41 partition (ds='2010') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n41 # col_name data_type comment key string value string @@ -57,7 +57,7 @@ ds string # Detailed Partition Information Partition Value: [2010] Database: default -Table: t1 +Table: t1_n41 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -77,12 +77,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted t1 partition (ds='2011') +PREHOOK: query: describe formatted t1_n41 partition (ds='2011') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: describe formatted t1 partition (ds='2011') +PREHOOK: Input: default@t1_n41 +POSTHOOK: query: describe formatted t1_n41 partition (ds='2011') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n41 # col_name data_type comment key string value string @@ -94,7 +94,7 @@ ds string # Detailed Partition Information Partition Value: [2011] Database: default -Table: t1 +Table: t1_n41 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} @@ -114,48 +114,48 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n41 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n41 +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: drop table t1_n41 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t1 (key string, value string) partitioned by (ds string) +POSTHOOK: Input: default@t1_n41 +POSTHOOK: Output: default@t1_n41 +PREHOOK: query: create table t1_n41 (key string, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (key string, value string) partitioned by (ds string) +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: create table t1_n41 (key string, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: insert into table t1 partition (ds='2010') select * from src +POSTHOOK: Output: default@t1_n41 +PREHOOK: query: insert into table t1_n41 partition (ds='2010') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t1@ds=2010 -POSTHOOK: query: insert into table t1 partition (ds='2010') select * from src +PREHOOK: Output: default@t1_n41@ds=2010 +POSTHOOK: query: insert into table t1_n41 partition (ds='2010') select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1@ds=2010 -POSTHOOK: Lineage: t1 PARTITION(ds=2010).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert into table t1 partition (ds='2011') select * from src +POSTHOOK: Output: default@t1_n41@ds=2010 +POSTHOOK: Lineage: t1_n41 PARTITION(ds=2010).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n41 PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert into table t1_n41 partition (ds='2011') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t1@ds=2011 -POSTHOOK: query: insert into table t1 partition (ds='2011') select * from src +PREHOOK: Output: default@t1_n41@ds=2011 +POSTHOOK: query: insert into table t1_n41 partition (ds='2011') select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1@ds=2011 -POSTHOOK: Lineage: t1 PARTITION(ds=2011).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: describe formatted t1 partition (ds='2010') +POSTHOOK: Output: default@t1_n41@ds=2011 +POSTHOOK: Lineage: t1_n41 PARTITION(ds=2011).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n41 PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted t1_n41 partition (ds='2010') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: describe formatted t1 partition (ds='2010') +PREHOOK: Input: default@t1_n41 +POSTHOOK: query: describe formatted t1_n41 partition (ds='2010') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n41 # col_name data_type comment key string value string @@ -167,7 +167,7 @@ ds string # Detailed Partition Information Partition Value: [2010] Database: default -Table: t1 +Table: t1_n41 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -187,12 +187,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted t1 partition (ds='2011') +PREHOOK: query: describe formatted t1_n41 partition (ds='2011') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: describe formatted t1 partition (ds='2011') +PREHOOK: Input: default@t1_n41 +POSTHOOK: query: describe formatted t1_n41 partition (ds='2011') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n41 # col_name data_type comment key string value string @@ -204,7 +204,7 @@ ds string # Detailed Partition Information Partition Value: [2011] Database: default -Table: t1 +Table: t1_n41 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -224,44 +224,44 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n41 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n41 +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: drop table t1_n41 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t1 (key string, value string) +POSTHOOK: Input: default@t1_n41 +POSTHOOK: Output: default@t1_n41 +PREHOOK: query: create table t1_n41 (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (key string, value string) +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: create table t1_n41 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table t1 +POSTHOOK: Output: default@t1_n41 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table t1_n41 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table t1 +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table t1_n41 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: analyze table t1 compute statistics +POSTHOOK: Output: default@t1_n41 +PREHOOK: query: analyze table t1_n41 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: analyze table t1 compute statistics +PREHOOK: Input: default@t1_n41 +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: analyze table t1_n41 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: describe formatted t1 +POSTHOOK: Input: default@t1_n41 +POSTHOOK: Output: default@t1_n41 +PREHOOK: query: describe formatted t1_n41 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: describe formatted t1 +PREHOOK: Input: default@t1_n41 +POSTHOOK: query: describe formatted t1_n41 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n41 # col_name data_type comment key string value string @@ -291,38 +291,38 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n41 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n41 +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: drop table t1_n41 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t1 (key string, value string) +POSTHOOK: Input: default@t1_n41 +POSTHOOK: Output: default@t1_n41 +PREHOOK: query: create table t1_n41 (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (key string, value string) +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: create table t1_n41 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: insert into table t1 select * from src +POSTHOOK: Output: default@t1_n41 +PREHOOK: query: insert into table t1_n41 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@t1 -POSTHOOK: query: insert into table t1 select * from src +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: insert into table t1_n41 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: describe formatted t1 +POSTHOOK: Output: default@t1_n41 +POSTHOOK: Lineage: t1_n41.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n41.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted t1_n41 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: describe formatted t1 +PREHOOK: Input: default@t1_n41 +POSTHOOK: query: describe formatted t1_n41 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n41 # col_name data_type comment key string value string @@ -352,55 +352,55 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n41 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n41 +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: drop table t1_n41 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t1 (key string, value string) partitioned by (ds string, hr string) +POSTHOOK: Input: default@t1_n41 +POSTHOOK: Output: default@t1_n41 +PREHOOK: query: create table t1_n41 (key string, value string) partitioned by (ds string, hr string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (key string, value string) partitioned by (ds string, hr string) +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: create table t1_n41 (key string, value string) partitioned by (ds string, hr string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: insert into table t1 partition (ds,hr) select * from srcpart +POSTHOOK: Output: default@t1_n41 +PREHOOK: query: insert into table t1_n41 partition (ds,hr) select * from srcpart PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@t1 -POSTHOOK: query: insert into table t1 partition (ds,hr) select * from srcpart +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: insert into table t1_n41 partition (ds,hr) select * from srcpart POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@t1@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@t1@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@t1@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@t1@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: t1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1 PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: describe formatted t1 partition (ds='2008-04-08',hr='11') +POSTHOOK: Output: default@t1_n41@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@t1_n41@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@t1_n41@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@t1_n41@ds=2008-04-09/hr=12 +POSTHOOK: Lineage: t1_n41 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n41 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n41 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n41 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n41 PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n41 PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n41 PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n41 PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe formatted t1_n41 partition (ds='2008-04-08',hr='11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: describe formatted t1 partition (ds='2008-04-08',hr='11') +PREHOOK: Input: default@t1_n41 +POSTHOOK: query: describe formatted t1_n41 partition (ds='2008-04-08',hr='11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n41 # col_name data_type comment key string value string @@ -413,7 +413,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: t1 +Table: t1_n41 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -433,12 +433,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: describe formatted t1 partition (ds='2008-04-09',hr='12') +PREHOOK: query: describe formatted t1_n41 partition (ds='2008-04-09',hr='12') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@t1 -POSTHOOK: query: describe formatted t1 partition (ds='2008-04-09',hr='12') +PREHOOK: Input: default@t1_n41 +POSTHOOK: query: describe formatted t1_n41 partition (ds='2008-04-09',hr='12') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n41 # col_name data_type comment key string value string @@ -451,7 +451,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-09, 12] Database: default -Table: t1 +Table: t1_n41 #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}} @@ -471,11 +471,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n41 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n41 +PREHOOK: Output: default@t1_n41 +POSTHOOK: query: drop table t1_n41 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n41 +POSTHOOK: Output: default@t1_n41 diff --git a/ql/src/test/results/clientpositive/struct_in_view.q.out b/ql/src/test/results/clientpositive/struct_in_view.q.out index bea35fcce5..9a09350e1e 100644 --- a/ql/src/test/results/clientpositive/struct_in_view.q.out +++ b/ql/src/test/results/clientpositive/struct_in_view.q.out @@ -56,25 +56,25 @@ POSTHOOK: query: drop table testreserved POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@testreserved POSTHOOK: Output: default@testreserved -PREHOOK: query: create table s (default struct, id: string>, id: string>) +PREHOOK: query: create table s_n1 (default struct, id: string>, id: string>) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@s -POSTHOOK: query: create table s (default struct, id: string>, id: string>) +PREHOOK: Output: default@s_n1 +POSTHOOK: query: create table s_n1 (default struct, id: string>, id: string>) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@s -PREHOOK: query: create view vs1 as select default.src.`end`.key from s +POSTHOOK: Output: default@s_n1 +PREHOOK: query: create view vs1 as select default.src.`end`.key from s_n1 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@s +PREHOOK: Input: default@s_n1 PREHOOK: Output: database:default PREHOOK: Output: default@vs1 -POSTHOOK: query: create view vs1 as select default.src.`end`.key from s +POSTHOOK: query: create view vs1 as select default.src.`end`.key from s_n1 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@s +POSTHOOK: Input: default@s_n1 POSTHOOK: Output: database:default POSTHOOK: Output: default@vs1 -POSTHOOK: Lineage: vs1.key EXPRESSION [(s)s.FieldSchema(name:default, type:struct,id:string>,id:string>, comment:null), ] +POSTHOOK: Lineage: vs1.key EXPRESSION [(s_n1)s_n1.FieldSchema(name:default, type:struct,id:string>,id:string>, comment:null), ] PREHOOK: query: describe extended vs1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@vs1 @@ -84,17 +84,17 @@ POSTHOOK: Input: default@vs1 key string #### A masked pattern was here #### -PREHOOK: query: create view vs2 as select default.src.`end` from s +PREHOOK: query: create view vs2 as select default.src.`end` from s_n1 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@s +PREHOOK: Input: default@s_n1 PREHOOK: Output: database:default PREHOOK: Output: default@vs2 -POSTHOOK: query: create view vs2 as select default.src.`end` from s +POSTHOOK: query: create view vs2 as select default.src.`end` from s_n1 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@s +POSTHOOK: Input: default@s_n1 POSTHOOK: Output: database:default POSTHOOK: Output: default@vs2 -POSTHOOK: Lineage: vs2.end EXPRESSION [(s)s.FieldSchema(name:default, type:struct,id:string>,id:string>, comment:null), ] +POSTHOOK: Lineage: vs2.end EXPRESSION [(s_n1)s_n1.FieldSchema(name:default, type:struct,id:string>,id:string>, comment:null), ] PREHOOK: query: describe extended vs2 PREHOOK: type: DESCTABLE PREHOOK: Input: default@vs2 @@ -120,92 +120,92 @@ POSTHOOK: query: drop view vs2 POSTHOOK: type: DROPVIEW POSTHOOK: Input: default@vs2 POSTHOOK: Output: default@vs2 -PREHOOK: query: create view v as select named_struct('key', 1).key from src limit 1 +PREHOOK: query: create view v_n3 as select named_struct('key', 1).key from src limit 1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select named_struct('key', 1).key from src limit 1 +PREHOOK: Output: default@v_n3 +POSTHOOK: query: create view v_n3 as select named_struct('key', 1).key from src limit 1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key EXPRESSION [] -PREHOOK: query: desc extended v +POSTHOOK: Output: default@v_n3 +POSTHOOK: Lineage: v_n3.key EXPRESSION [] +PREHOOK: query: desc extended v_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc extended v +PREHOOK: Input: default@v_n3 +POSTHOOK: query: desc extended v_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n3 key int #### A masked pattern was here #### -PREHOOK: query: select * from v +PREHOOK: query: select * from v_n3 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v +PREHOOK: Input: default@v_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from v +POSTHOOK: query: select * from v_n3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n3 #### A masked pattern was here #### 1 -PREHOOK: query: select * from v +PREHOOK: query: select * from v_n3 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v +PREHOOK: Input: default@v_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from v +POSTHOOK: query: select * from v_n3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n3 #### A masked pattern was here #### 1 -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n3 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n3 +PREHOOK: Output: default@v_n3 +POSTHOOK: query: drop view v_n3 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as select named_struct('end', 1).`end` from src limit 1 +POSTHOOK: Input: default@v_n3 +POSTHOOK: Output: default@v_n3 +PREHOOK: query: create view v_n3 as select named_struct('end', 1).`end` from src limit 1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select named_struct('end', 1).`end` from src limit 1 +PREHOOK: Output: default@v_n3 +POSTHOOK: query: create view v_n3 as select named_struct('end', 1).`end` from src limit 1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.end EXPRESSION [] -PREHOOK: query: desc extended v +POSTHOOK: Output: default@v_n3 +POSTHOOK: Lineage: v_n3.end EXPRESSION [] +PREHOOK: query: desc extended v_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc extended v +PREHOOK: Input: default@v_n3 +POSTHOOK: query: desc extended v_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n3 end int #### A masked pattern was here #### -PREHOOK: query: select * from v +PREHOOK: query: select * from v_n3 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v +PREHOOK: Input: default@v_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from v +POSTHOOK: query: select * from v_n3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n3 #### A masked pattern was here #### 1 -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n3 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n3 +PREHOOK: Output: default@v_n3 +POSTHOOK: query: drop view v_n3 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v +POSTHOOK: Input: default@v_n3 +POSTHOOK: Output: default@v_n3 diff --git a/ql/src/test/results/clientpositive/structin.q.out b/ql/src/test/results/clientpositive/structin.q.out index 252b4acc4a..8b2de4b8fc 100644 --- a/ql/src/test/results/clientpositive/structin.q.out +++ b/ql/src/test/results/clientpositive/structin.q.out @@ -1,12 +1,12 @@ -PREHOOK: query: create table t11 (`id` string, `lineid` string) +PREHOOK: query: create table t11_n1 (`id` string, `lineid` string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t11 -POSTHOOK: query: create table t11 (`id` string, `lineid` string) +PREHOOK: Output: default@t11_n1 +POSTHOOK: query: create table t11_n1 (`id` string, `lineid` string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t11 -PREHOOK: query: explain select * from t11 where struct(`id`, `lineid`) +POSTHOOK: Output: default@t11_n1 +PREHOOK: query: explain select * from t11_n1 where struct(`id`, `lineid`) IN ( struct('1234-1111-0074578664','3'), struct('1234-1111-0074578695','1'), @@ -19,7 +19,7 @@ struct('1234-1111-0074019610','1'), struct('1234-1111-0074022106','1') ) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t11 where struct(`id`, `lineid`) +POSTHOOK: query: explain select * from t11_n1 where struct(`id`, `lineid`) IN ( struct('1234-1111-0074578664','3'), struct('1234-1111-0074578695','1'), @@ -41,7 +41,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t11 + alias: t11_n1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (struct(id,lineid)) IN (const struct('1234-1111-0074578664','3'), const struct('1234-1111-0074578695','1'), const struct('1234-1111-0074580704','1'), const struct('1234-1111-0074581619','2'), const struct('1234-1111-0074582745','1'), const struct('1234-1111-0074586625','1'), const struct('1234-1111-0074019112','1'), const struct('1234-1111-0074019610','1'), const struct('1234-1111-0074022106','1')) (type: boolean) @@ -65,13 +65,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain select * from t11 where struct(`id`, `lineid`) +PREHOOK: query: explain select * from t11_n1 where struct(`id`, `lineid`) IN ( struct('1234-1111-0074578664','3'), struct('1234-1111-0074578695',1) ) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t11 where struct(`id`, `lineid`) +POSTHOOK: query: explain select * from t11_n1 where struct(`id`, `lineid`) IN ( struct('1234-1111-0074578664','3'), struct('1234-1111-0074578695',1) @@ -86,7 +86,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t11 + alias: t11_n1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (struct(id,lineid)) IN (const struct('1234-1111-0074578664','3'), const struct('1234-1111-0074578695',1)) (type: boolean) diff --git a/ql/src/test/results/clientpositive/subquery_exists.q.out b/ql/src/test/results/clientpositive/subquery_exists.q.out index 263afd984e..3c053c0fc6 100644 --- a/ql/src/test/results/clientpositive/subquery_exists.q.out +++ b/ql/src/test/results/clientpositive/subquery_exists.q.out @@ -113,7 +113,7 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: create view cv1 as +PREHOOK: query: create view cv1_n1 as select * from src b where exists @@ -123,8 +123,8 @@ where exists PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@cv1 -POSTHOOK: query: create view cv1 as +PREHOOK: Output: default@cv1_n1 +POSTHOOK: query: create view cv1_n1 as select * from src b where exists @@ -134,17 +134,17 @@ where exists POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@cv1 -POSTHOOK: Lineage: cv1.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: cv1.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from cv1 +POSTHOOK: Output: default@cv1_n1 +POSTHOOK: Lineage: cv1_n1.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: cv1_n1.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from cv1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@cv1 +PREHOOK: Input: default@cv1_n1 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select * from cv1 +POSTHOOK: query: select * from cv1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@cv1 +POSTHOOK: Input: default@cv1_n1 POSTHOOK: Input: default@src #### A masked pattern was here #### 90 val_90 @@ -900,36 +900,36 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: create table t(i int) +PREHOOK: query: create table t_n12(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t(i int) +PREHOOK: Output: default@t_n12 +POSTHOOK: query: create table t_n12(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values(1) +POSTHOOK: Output: default@t_n12 +PREHOOK: query: insert into t_n12 values(1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values(1) +PREHOOK: Output: default@t_n12 +POSTHOOK: query: insert into t_n12 values(1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.i SCRIPT [] -PREHOOK: query: insert into t values(0) +POSTHOOK: Output: default@t_n12 +POSTHOOK: Lineage: t_n12.i SCRIPT [] +PREHOOK: query: insert into t_n12 values(0) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values(0) +PREHOOK: Output: default@t_n12 +POSTHOOK: query: insert into t_n12 values(0) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.i SCRIPT [] +POSTHOOK: Output: default@t_n12 +POSTHOOK: Lineage: t_n12.i SCRIPT [] Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: explain select * from t where exists (select count(*) from src where 1=2) +PREHOOK: query: explain select * from t_n12 where exists (select count(*) from src where 1=2) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t where exists (select count(*) from src where 1=2) +POSTHOOK: query: explain select * from t_n12 where exists (select count(*) from src where 1=2) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -977,7 +977,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t + alias: t_n12 Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: i (type: int) @@ -1015,68 +1015,68 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: select * from t where exists (select count(*) from src where 1=2) +PREHOOK: query: select * from t_n12 where exists (select count(*) from src where 1=2) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n12 #### A masked pattern was here #### -POSTHOOK: query: select * from t where exists (select count(*) from src where 1=2) +POSTHOOK: query: select * from t_n12 where exists (select count(*) from src where 1=2) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n12 #### A masked pattern was here #### 0 1 -PREHOOK: query: drop table t +PREHOOK: query: drop table t_n12 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t -PREHOOK: Output: default@t -POSTHOOK: query: drop table t +PREHOOK: Input: default@t_n12 +PREHOOK: Output: default@t_n12 +POSTHOOK: query: drop table t_n12 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t -POSTHOOK: Output: default@t -PREHOOK: query: drop table if exists tx1 +POSTHOOK: Input: default@t_n12 +POSTHOOK: Output: default@t_n12 +PREHOOK: query: drop table if exists tx1_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists tx1 +POSTHOOK: query: drop table if exists tx1_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table tx1 (a integer,b integer) +PREHOOK: query: create table tx1_n0 (a integer,b integer) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tx1 -POSTHOOK: query: create table tx1 (a integer,b integer) +PREHOOK: Output: default@tx1_n0 +POSTHOOK: query: create table tx1_n0 (a integer,b integer) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tx1 -PREHOOK: query: insert into tx1 values (1, 1), +POSTHOOK: Output: default@tx1_n0 +PREHOOK: query: insert into tx1_n0 values (1, 1), (1, 2), (1, 3) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@tx1 -POSTHOOK: query: insert into tx1 values (1, 1), +PREHOOK: Output: default@tx1_n0 +POSTHOOK: query: insert into tx1_n0 values (1, 1), (1, 2), (1, 3) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@tx1 -POSTHOOK: Lineage: tx1.a SCRIPT [] -POSTHOOK: Lineage: tx1.b SCRIPT [] -PREHOOK: query: select count(*) as result,3 as expected from tx1 u - where exists (select * from tx1 v where u.a=v.a and u.b <> v.b) +POSTHOOK: Output: default@tx1_n0 +POSTHOOK: Lineage: tx1_n0.a SCRIPT [] +POSTHOOK: Lineage: tx1_n0.b SCRIPT [] +PREHOOK: query: select count(*) as result,3 as expected from tx1_n0 u + where exists (select * from tx1_n0 v where u.a=v.a and u.b <> v.b) PREHOOK: type: QUERY -PREHOOK: Input: default@tx1 +PREHOOK: Input: default@tx1_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) as result,3 as expected from tx1 u - where exists (select * from tx1 v where u.a=v.a and u.b <> v.b) +POSTHOOK: query: select count(*) as result,3 as expected from tx1_n0 u + where exists (select * from tx1_n0 v where u.a=v.a and u.b <> v.b) POSTHOOK: type: QUERY -POSTHOOK: Input: default@tx1 +POSTHOOK: Input: default@tx1_n0 #### A masked pattern was here #### 3 3 -PREHOOK: query: explain select count(*) as result,3 as expected from tx1 u - where exists (select * from tx1 v where u.a=v.a and u.b <> v.b) +PREHOOK: query: explain select count(*) as result,3 as expected from tx1_n0 u + where exists (select * from tx1_n0 v where u.a=v.a and u.b <> v.b) PREHOOK: type: QUERY -POSTHOOK: query: explain select count(*) as result,3 as expected from tx1 u - where exists (select * from tx1 v where u.a=v.a and u.b <> v.b) +POSTHOOK: query: explain select count(*) as result,3 as expected from tx1_n0 u + where exists (select * from tx1_n0 v where u.a=v.a and u.b <> v.b) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1181,53 +1181,53 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: drop table tx1 +PREHOOK: query: drop table tx1_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tx1 -PREHOOK: Output: default@tx1 -POSTHOOK: query: drop table tx1 +PREHOOK: Input: default@tx1_n0 +PREHOOK: Output: default@tx1_n0 +POSTHOOK: query: drop table tx1_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tx1 -POSTHOOK: Output: default@tx1 -PREHOOK: query: create table t1(i int, j int) +POSTHOOK: Input: default@tx1_n0 +POSTHOOK: Output: default@tx1_n0 +PREHOOK: query: create table t1_n68(i int, j int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(i int, j int) +PREHOOK: Output: default@t1_n68 +POSTHOOK: query: create table t1_n68(i int, j int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: insert into t1 values(4,1) +POSTHOOK: Output: default@t1_n68 +PREHOOK: query: insert into t1_n68 values(4,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t1 -POSTHOOK: query: insert into t1 values(4,1) +PREHOOK: Output: default@t1_n68 +POSTHOOK: query: insert into t1_n68 values(4,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.i SCRIPT [] -POSTHOOK: Lineage: t1.j SCRIPT [] -PREHOOK: query: create table t2(i int, j int) +POSTHOOK: Output: default@t1_n68 +POSTHOOK: Lineage: t1_n68.i SCRIPT [] +POSTHOOK: Lineage: t1_n68.j SCRIPT [] +PREHOOK: query: create table t2_n41(i int, j int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2(i int, j int) +PREHOOK: Output: default@t2_n41 +POSTHOOK: query: create table t2_n41(i int, j int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: insert into t2 values(4,2),(4,3),(4,5) +POSTHOOK: Output: default@t2_n41 +PREHOOK: query: insert into t2_n41 values(4,2),(4,3),(4,5) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t2 -POSTHOOK: query: insert into t2 values(4,2),(4,3),(4,5) +PREHOOK: Output: default@t2_n41 +POSTHOOK: query: insert into t2_n41 values(4,2),(4,3),(4,5) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.i SCRIPT [] -POSTHOOK: Lineage: t2.j SCRIPT [] -PREHOOK: query: explain select * from t1 where t1.i in (select t2.i from t2 where t2.j <> t1.j) +POSTHOOK: Output: default@t2_n41 +POSTHOOK: Lineage: t2_n41.i SCRIPT [] +POSTHOOK: Lineage: t2_n41.j SCRIPT [] +PREHOOK: query: explain select * from t1_n68 where t1_n68.i in (select t2_n41.i from t2_n41 where t2_n41.j <> t1_n68.j) PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t1 where t1.i in (select t2.i from t2 where t2.j <> t1.j) +POSTHOOK: query: explain select * from t1_n68 where t1_n68.i in (select t2_n41.i from t2_n41 where t2_n41.j <> t1_n68.j) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1238,7 +1238,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n68 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: i is not null (type: boolean) @@ -1254,7 +1254,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int) TableScan - alias: t2 + alias: t2_n41 Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (i is not null and j is not null) (type: boolean) @@ -1302,30 +1302,30 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from t1 where t1.i in (select t2.i from t2 where t2.j <> t1.j) +PREHOOK: query: select * from t1_n68 where t1_n68.i in (select t2_n41.i from t2_n41 where t2_n41.j <> t1_n68.j) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n68 +PREHOOK: Input: default@t2_n41 #### A masked pattern was here #### -POSTHOOK: query: select * from t1 where t1.i in (select t2.i from t2 where t2.j <> t1.j) +POSTHOOK: query: select * from t1_n68 where t1_n68.i in (select t2_n41.i from t2_n41 where t2_n41.j <> t1_n68.j) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n68 +POSTHOOK: Input: default@t2_n41 #### A masked pattern was here #### 4 1 -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n68 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n68 +PREHOOK: Output: default@t1_n68 +POSTHOOK: query: drop table t1_n68 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: drop table t2 +POSTHOOK: Input: default@t1_n68 +POSTHOOK: Output: default@t1_n68 +PREHOOK: query: drop table t2_n41 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: drop table t2 +PREHOOK: Input: default@t2_n41 +PREHOOK: Output: default@t2_n41 +POSTHOOK: query: drop table t2_n41 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 +POSTHOOK: Input: default@t2_n41 +POSTHOOK: Output: default@t2_n41 diff --git a/ql/src/test/results/clientpositive/subquery_exists_having.q.out b/ql/src/test/results/clientpositive/subquery_exists_having.q.out index 6463fc2fa6..e9253aef7c 100644 --- a/ql/src/test/results/clientpositive/subquery_exists_having.q.out +++ b/ql/src/test/results/clientpositive/subquery_exists_having.q.out @@ -285,7 +285,7 @@ POSTHOOK: Input: default@src 96 1 97 2 98 2 -PREHOOK: query: create view cv1 as +PREHOOK: query: create view cv1_n6 as select b.key, count(*) as c from src b group by b.key @@ -297,8 +297,8 @@ having exists PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@cv1 -POSTHOOK: query: create view cv1 as +PREHOOK: Output: default@cv1_n6 +POSTHOOK: query: create view cv1_n6 as select b.key, count(*) as c from src b group by b.key @@ -310,17 +310,17 @@ having exists POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@cv1 -POSTHOOK: Lineage: cv1.c EXPRESSION [(src)b.null, ] -POSTHOOK: Lineage: cv1.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: select * from cv1 +POSTHOOK: Output: default@cv1_n6 +POSTHOOK: Lineage: cv1_n6.c EXPRESSION [(src)b.null, ] +POSTHOOK: Lineage: cv1_n6.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: select * from cv1_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@cv1 +PREHOOK: Input: default@cv1_n6 PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: select * from cv1 +POSTHOOK: query: select * from cv1_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@cv1 +POSTHOOK: Input: default@cv1_n6 POSTHOOK: Input: default@src #### A masked pattern was here #### 90 3 diff --git a/ql/src/test/results/clientpositive/subquery_notin_having.q.out b/ql/src/test/results/clientpositive/subquery_notin_having.q.out index ff408b6528..724851e31b 100644 --- a/ql/src/test/results/clientpositive/subquery_notin_having.q.out +++ b/ql/src/test/results/clientpositive/subquery_notin_having.q.out @@ -793,45 +793,45 @@ POSTHOOK: Input: default@part #### A masked pattern was here #### Manufacturer#1 1173.15 Manufacturer#2 1690.68 -PREHOOK: query: CREATE TABLE t1 (c1 INT, c2 CHAR(100)) +PREHOOK: query: CREATE TABLE t1_n51 (c1 INT, c2 CHAR(100)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: CREATE TABLE t1 (c1 INT, c2 CHAR(100)) +PREHOOK: Output: default@t1_n51 +POSTHOOK: query: CREATE TABLE t1_n51 (c1 INT, c2 CHAR(100)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: INSERT INTO t1 VALUES (null,null), (1,''), (2,'abcde'), (100,'abcdefghij') +POSTHOOK: Output: default@t1_n51 +PREHOOK: query: INSERT INTO t1_n51 VALUES (null,null), (1,''), (2,'abcde'), (100,'abcdefghij') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t1 -POSTHOOK: query: INSERT INTO t1 VALUES (null,null), (1,''), (2,'abcde'), (100,'abcdefghij') +PREHOOK: Output: default@t1_n51 +POSTHOOK: query: INSERT INTO t1_n51 VALUES (null,null), (1,''), (2,'abcde'), (100,'abcdefghij') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.c1 SCRIPT [] -POSTHOOK: Lineage: t1.c2 SCRIPT [] -PREHOOK: query: CREATE TABLE t2 (c1 INT) +POSTHOOK: Output: default@t1_n51 +POSTHOOK: Lineage: t1_n51.c1 SCRIPT [] +POSTHOOK: Lineage: t1_n51.c2 SCRIPT [] +PREHOOK: query: CREATE TABLE t2_n28 (c1 INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: CREATE TABLE t2 (c1 INT) +PREHOOK: Output: default@t2_n28 +POSTHOOK: query: CREATE TABLE t2_n28 (c1 INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: INSERT INTO t2 VALUES (null), (2), (100) +POSTHOOK: Output: default@t2_n28 +PREHOOK: query: INSERT INTO t2_n28 VALUES (null), (2), (100) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t2 -POSTHOOK: query: INSERT INTO t2 VALUES (null), (2), (100) +PREHOOK: Output: default@t2_n28 +POSTHOOK: query: INSERT INTO t2_n28 VALUES (null), (2), (100) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.c1 SCRIPT [] +POSTHOOK: Output: default@t2_n28 +POSTHOOK: Lineage: t2_n28.c1 SCRIPT [] Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product -PREHOOK: query: explain SELECT c1 FROM t1 group by c1 having c1 NOT IN (SELECT c1 FROM t2) +PREHOOK: query: explain SELECT c1 FROM t1_n51 group by c1 having c1 NOT IN (SELECT c1 FROM t2_n28) PREHOOK: type: QUERY -POSTHOOK: query: explain SELECT c1 FROM t1 group by c1 having c1 NOT IN (SELECT c1 FROM t2) +POSTHOOK: query: explain SELECT c1 FROM t1_n51 group by c1 having c1 NOT IN (SELECT c1 FROM t2_n28) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -846,7 +846,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n51 Statistics: Num rows: 4 Data size: 313 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c1 (type: int) @@ -950,7 +950,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n28 Statistics: Num rows: 3 Data size: 6 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c1 (type: int) @@ -983,7 +983,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n28 Statistics: Num rows: 3 Data size: 6 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c1 (type: int) @@ -1024,19 +1024,19 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product -PREHOOK: query: SELECT c1 FROM t1 group by c1 having c1 NOT IN (SELECT c1 FROM t2) +PREHOOK: query: SELECT c1 FROM t1_n51 group by c1 having c1 NOT IN (SELECT c1 FROM t2_n28) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n51 +PREHOOK: Input: default@t2_n28 #### A masked pattern was here #### -POSTHOOK: query: SELECT c1 FROM t1 group by c1 having c1 NOT IN (SELECT c1 FROM t2) +POSTHOOK: query: SELECT c1 FROM t1_n51 group by c1 having c1 NOT IN (SELECT c1 FROM t2_n28) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n51 +POSTHOOK: Input: default@t2_n28 #### A masked pattern was here #### -PREHOOK: query: explain SELECT c1 FROM t1 group by c1 having c1 NOT IN (SELECT c1 FROM t2 where t1.c1=t2.c1) +PREHOOK: query: explain SELECT c1 FROM t1_n51 group by c1 having c1 NOT IN (SELECT c1 FROM t2_n28 where t1_n51.c1=t2_n28.c1) PREHOOK: type: QUERY -POSTHOOK: query: explain SELECT c1 FROM t1 group by c1 having c1 NOT IN (SELECT c1 FROM t2 where t1.c1=t2.c1) +POSTHOOK: query: explain SELECT c1 FROM t1_n51 group by c1 having c1 NOT IN (SELECT c1 FROM t2_n28 where t1_n51.c1=t2_n28.c1) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1052,7 +1052,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n51 Statistics: Num rows: 4 Data size: 313 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c1 (type: int) @@ -1135,7 +1135,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n28 Statistics: Num rows: 3 Data size: 6 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: c1 is not null (type: boolean) @@ -1171,7 +1171,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n28 Statistics: Num rows: 3 Data size: 6 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: c1 is not null (type: boolean) @@ -1240,7 +1240,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n51 Statistics: Num rows: 4 Data size: 313 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c1 (type: int) @@ -1276,31 +1276,31 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT c1 FROM t1 group by c1 having c1 NOT IN (SELECT c1 FROM t2 where t1.c1=t2.c1) +PREHOOK: query: SELECT c1 FROM t1_n51 group by c1 having c1 NOT IN (SELECT c1 FROM t2_n28 where t1_n51.c1=t2_n28.c1) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n51 +PREHOOK: Input: default@t2_n28 #### A masked pattern was here #### -POSTHOOK: query: SELECT c1 FROM t1 group by c1 having c1 NOT IN (SELECT c1 FROM t2 where t1.c1=t2.c1) +POSTHOOK: query: SELECT c1 FROM t1_n51 group by c1 having c1 NOT IN (SELECT c1 FROM t2_n28 where t1_n51.c1=t2_n28.c1) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n51 +POSTHOOK: Input: default@t2_n28 #### A masked pattern was here #### NULL 1 -PREHOOK: query: DROP TABLE t1 +PREHOOK: query: DROP TABLE t1_n51 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: DROP TABLE t1 +PREHOOK: Input: default@t1_n51 +PREHOOK: Output: default@t1_n51 +POSTHOOK: query: DROP TABLE t1_n51 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: DROP TABLE t2 +POSTHOOK: Input: default@t1_n51 +POSTHOOK: Output: default@t1_n51 +PREHOOK: query: DROP TABLE t2_n28 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: DROP TABLE t2 +PREHOOK: Input: default@t2_n28 +PREHOOK: Output: default@t2_n28 +POSTHOOK: query: DROP TABLE t2_n28 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 +POSTHOOK: Input: default@t2_n28 +POSTHOOK: Output: default@t2_n28 diff --git a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out index 76350b1448..ee40b0f1a2 100644 --- a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out +++ b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out @@ -1,12 +1,12 @@ -PREHOOK: query: create table src11 (key1 string, value1 string) +PREHOOK: query: create table src11_n0 (key1 string, value1 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src11 -POSTHOOK: query: create table src11 (key1 string, value1 string) +PREHOOK: Output: default@src11_n0 +POSTHOOK: query: create table src11_n0 (key1 string, value1 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src11 -PREHOOK: query: create table part2( +POSTHOOK: Output: default@src11_n0 +PREHOOK: query: create table part2_n2( p2_partkey INT, p2_name STRING, p2_mfgr STRING, @@ -19,8 +19,8 @@ PREHOOK: query: create table part2( ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part2 -POSTHOOK: query: create table part2( +PREHOOK: Output: default@part2_n2 +POSTHOOK: query: create table part2_n2( p2_partkey INT, p2_name STRING, p2_mfgr STRING, @@ -33,10 +33,10 @@ POSTHOOK: query: create table part2( ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part2 -PREHOOK: query: explain select * from src11 where src11.key1 in (select key from src where src11.value1 = value and key > '9') +POSTHOOK: Output: default@part2_n2 +PREHOOK: query: explain select * from src11_n0 where src11_n0.key1 in (select key from src where src11_n0.value1 = value and key > '9') PREHOOK: type: QUERY -POSTHOOK: query: explain select * from src11 where src11.key1 in (select key from src where src11.value1 = value and key > '9') +POSTHOOK: query: explain select * from src11_n0 where src11_n0.key1 in (select key from src where src11_n0.value1 = value and key > '9') POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -47,7 +47,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src11 + alias: src11_n0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: ((key1 > '9') and value1 is not null) (type: boolean) diff --git a/ql/src/test/results/clientpositive/tablevalues.q.out b/ql/src/test/results/clientpositive/tablevalues.q.out index e6ad856f0a..74fda005d5 100644 --- a/ql/src/test/results/clientpositive/tablevalues.q.out +++ b/ql/src/test/results/clientpositive/tablevalues.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE mytbl AS +PREHOOK: query: CREATE TABLE mytbl_n1 AS SELECT key, value FROM src ORDER BY key @@ -6,8 +6,8 @@ LIMIT 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@mytbl -POSTHOOK: query: CREATE TABLE mytbl AS +PREHOOK: Output: default@mytbl_n1 +POSTHOOK: query: CREATE TABLE mytbl_n1 AS SELECT key, value FROM src ORDER BY key @@ -15,16 +15,16 @@ LIMIT 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@mytbl -POSTHOOK: Lineage: mytbl.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: mytbl.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@mytbl_n1 +POSTHOOK: Lineage: mytbl_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: mytbl_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN -INSERT INTO mytbl(key,value) +INSERT INTO mytbl_n1(key,value) SELECT a,b as c FROM TABLE(VALUES(1,2),(3,4)) AS vc(a,b) WHERE b = 9 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT INTO mytbl(key,value) +INSERT INTO mytbl_n1(key,value) SELECT a,b as c FROM TABLE(VALUES(1,2),(3,4)) AS vc(a,b) WHERE b = 9 POSTHOOK: type: QUERY @@ -67,7 +67,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.mytbl + name: default.mytbl_n1 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -112,7 +112,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.mytbl + name: default.mytbl_n1 Stage: Stage-2 Stats Work @@ -120,7 +120,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.mytbl + Table: default.mytbl_n1 Stage: Stage-3 Map Reduce @@ -132,7 +132,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.mytbl + name: default.mytbl_n1 Stage: Stage-5 Map Reduce @@ -144,7 +144,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.mytbl + name: default.mytbl_n1 Stage: Stage-6 Move Operator @@ -152,31 +152,31 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT INTO mytbl(key,value) +PREHOOK: query: INSERT INTO mytbl_n1(key,value) SELECT a,b as c FROM TABLE(VALUES(1,2),(3,4)) AS vc(a,b) WHERE b = 9 PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@mytbl -POSTHOOK: query: INSERT INTO mytbl(key,value) +PREHOOK: Output: default@mytbl_n1 +POSTHOOK: query: INSERT INTO mytbl_n1(key,value) SELECT a,b as c FROM TABLE(VALUES(1,2),(3,4)) AS vc(a,b) WHERE b = 9 POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@mytbl -POSTHOOK: Lineage: mytbl.key SCRIPT [] -POSTHOOK: Lineage: mytbl.value EXPRESSION [] +POSTHOOK: Output: default@mytbl_n1 +POSTHOOK: Lineage: mytbl_n1.key SCRIPT [] +POSTHOOK: Lineage: mytbl_n1.value EXPRESSION [] PREHOOK: query: EXPLAIN SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t + (SELECT key, value FROM mytbl_n1) t LATERAL VIEW INLINE(array(struct('A', 10, t.key),struct('B', 20, t.key))) tf AS col1, col2, col3 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t + (SELECT key, value FROM mytbl_n1) t LATERAL VIEW INLINE(array(struct('A', 10, t.key),struct('B', 20, t.key))) tf AS col1, col2, col3 POSTHOOK: type: QUERY @@ -189,7 +189,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: mytbl + alias: mytbl_n1 Statistics: Num rows: 5 Data size: 41 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: array(struct(key,value,'A',10,key),struct(key,value,'B',20,key)) (type: array>) @@ -218,19 +218,19 @@ STAGE PLANS: PREHOOK: query: SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t + (SELECT key, value FROM mytbl_n1) t LATERAL VIEW INLINE(array(struct('A', 10, t.key),struct('B', 20, t.key))) tf AS col1, col2, col3 PREHOOK: type: QUERY -PREHOOK: Input: default@mytbl +PREHOOK: Input: default@mytbl_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t + (SELECT key, value FROM mytbl_n1) t LATERAL VIEW INLINE(array(struct('A', 10, t.key),struct('B', 20, t.key))) tf AS col1, col2, col3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@mytbl +POSTHOOK: Input: default@mytbl_n1 #### A masked pattern was here #### A 10 0 B 20 0 @@ -361,13 +361,13 @@ PREHOOK: query: EXPLAIN SELECT tf.col1, tf.col2, tf.col3 FROM TABLE(VALUES('A', 10, 30),('B', 20, 30)) AS tf(col1, col2, col3), - (SELECT key, value FROM mytbl) t + (SELECT key, value FROM mytbl_n1) t PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT tf.col1, tf.col2, tf.col3 FROM TABLE(VALUES('A', 10, 30),('B', 20, 30)) AS tf(col1, col2, col3), - (SELECT key, value FROM mytbl) t + (SELECT key, value FROM mytbl_n1) t POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -393,7 +393,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 2560 Basic stats: COMPLETE Column stats: COMPLETE value expressions: col1 (type: string), col2 (type: int), col3 (type: int) TableScan - alias: mytbl + alias: mytbl_n1 Statistics: Num rows: 5 Data size: 41 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE @@ -427,18 +427,18 @@ Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAP PREHOOK: query: SELECT tf.col1, tf.col2, tf.col3 FROM TABLE(VALUES('A', 10, 30),('B', 20, 30)) AS tf(col1, col2, col3), - (SELECT key, value FROM mytbl) t + (SELECT key, value FROM mytbl_n1) t PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Input: default@mytbl +PREHOOK: Input: default@mytbl_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT tf.col1, tf.col2, tf.col3 FROM TABLE(VALUES('A', 10, 30),('B', 20, 30)) AS tf(col1, col2, col3), - (SELECT key, value FROM mytbl) t + (SELECT key, value FROM mytbl_n1) t POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Input: default@mytbl +POSTHOOK: Input: default@mytbl_n1 #### A masked pattern was here #### B 20 30 B 20 30 @@ -546,13 +546,13 @@ Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAP PREHOOK: query: EXPLAIN SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, TABLE(VALUES('A', 10, 30),('B', 20, 30)) AS tf(col1, col2, col3) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, TABLE(VALUES('A', 10, 30),('B', 20, 30)) AS tf(col1, col2, col3) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -564,7 +564,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: mytbl + alias: mytbl_n1 Statistics: Num rows: 5 Data size: 41 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE @@ -616,19 +616,19 @@ STAGE PLANS: Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, TABLE(VALUES('A', 10, 30),('B', 20, 30)) AS tf(col1, col2, col3) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Input: default@mytbl +PREHOOK: Input: default@mytbl_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, TABLE(VALUES('A', 10, 30),('B', 20, 30)) AS tf(col1, col2, col3) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Input: default@mytbl +POSTHOOK: Input: default@mytbl_n1 #### A masked pattern was here #### B 20 30 A 10 30 @@ -643,13 +643,13 @@ A 10 30 PREHOOK: query: EXPLAIN SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf(col1, col2, col3) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf(col1, col2, col3) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -661,7 +661,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: mytbl + alias: mytbl_n1 Statistics: Num rows: 5 Data size: 41 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: array(struct(key,value,'A',10,key),struct(key,value,'B',20,key)) (type: array>) @@ -690,17 +690,17 @@ STAGE PLANS: PREHOOK: query: SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf(col1, col2, col3) PREHOOK: type: QUERY -PREHOOK: Input: default@mytbl +PREHOOK: Input: default@mytbl_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT tf.col1, tf.col2, tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf(col1, col2, col3) POSTHOOK: type: QUERY -POSTHOOK: Input: default@mytbl +POSTHOOK: Input: default@mytbl_n1 #### A masked pattern was here #### A 10 0 B 20 0 @@ -715,13 +715,13 @@ B 20 100 PREHOOK: query: EXPLAIN SELECT t.key FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT t.key FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -733,7 +733,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: mytbl + alias: mytbl_n1 Statistics: Num rows: 5 Data size: 41 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: array(struct(key,value,'A',10,key),struct(key,value,'B',20,key)) (type: array>) @@ -762,17 +762,17 @@ STAGE PLANS: PREHOOK: query: SELECT t.key FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf PREHOOK: type: QUERY -PREHOOK: Input: default@mytbl +PREHOOK: Input: default@mytbl_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT t.key FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf POSTHOOK: type: QUERY -POSTHOOK: Input: default@mytbl +POSTHOOK: Input: default@mytbl_n1 #### A masked pattern was here #### 0 0 @@ -787,13 +787,13 @@ POSTHOOK: Input: default@mytbl PREHOOK: query: EXPLAIN SELECT tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf(col1, col2, col3) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf(col1, col2, col3) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -805,7 +805,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: mytbl + alias: mytbl_n1 Statistics: Num rows: 5 Data size: 41 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: array(struct(key,value,'A',10,key),struct(key,value,'B',20,key)) (type: array>) @@ -834,17 +834,17 @@ STAGE PLANS: PREHOOK: query: SELECT tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf(col1, col2, col3) PREHOOK: type: QUERY -PREHOOK: Input: default@mytbl +PREHOOK: Input: default@mytbl_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT tf.col3 FROM - (SELECT key, value FROM mytbl) t, + (SELECT key, value FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.key),('B', 20, t.key)) AS tf(col1, col2, col3) POSTHOOK: type: QUERY -POSTHOOK: Input: default@mytbl +POSTHOOK: Input: default@mytbl_n1 #### A masked pattern was here #### 0 0 @@ -859,13 +859,13 @@ POSTHOOK: Input: default@mytbl PREHOOK: query: EXPLAIN SELECT tf.col3 FROM - (SELECT row_number() over (order by key desc) as r FROM mytbl) t, + (SELECT row_number() over (order by key desc) as r FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.r),('B', 20, t.r)) AS tf(col1, col2, col3) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT tf.col3 FROM - (SELECT row_number() over (order by key desc) as r FROM mytbl) t, + (SELECT row_number() over (order by key desc) as r FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.r),('B', 20, t.r)) AS tf(col1, col2, col3) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -877,7 +877,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: mytbl + alias: mytbl_n1 Statistics: Num rows: 5 Data size: 41 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: 0 (type: int), key (type: string) @@ -937,17 +937,17 @@ STAGE PLANS: PREHOOK: query: SELECT tf.col3 FROM - (SELECT row_number() over (order by key desc) as r FROM mytbl) t, + (SELECT row_number() over (order by key desc) as r FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.r),('B', 20, t.r)) AS tf(col1, col2, col3) PREHOOK: type: QUERY -PREHOOK: Input: default@mytbl +PREHOOK: Input: default@mytbl_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT tf.col3 FROM - (SELECT row_number() over (order by key desc) as r FROM mytbl) t, + (SELECT row_number() over (order by key desc) as r FROM mytbl_n1) t, LATERAL TABLE(VALUES('A', 10, t.r),('B', 20, t.r)) AS tf(col1, col2, col3) POSTHOOK: type: QUERY -POSTHOOK: Input: default@mytbl +POSTHOOK: Input: default@mytbl_n1 #### A masked pattern was here #### 1 1 diff --git a/ql/src/test/results/clientpositive/temp_table_gb1.q.out b/ql/src/test/results/clientpositive/temp_table_gb1.q.out index 1cef5964f9..7c9db8a0df 100644 --- a/ql/src/test/results/clientpositive/temp_table_gb1.q.out +++ b/ql/src/test/results/clientpositive/temp_table_gb1.q.out @@ -1,11 +1,11 @@ -PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g2_n0(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest_g2 -POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest_g2_n0 +POSTHOOK: query: CREATE TABLE dest_g2_n0(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest_g2 +POSTHOOK: Output: default@dest_g2_n0 PREHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src @@ -17,25 +17,25 @@ POSTHOOK: Input: default@src POSTHOOK: Output: database:default POSTHOOK: Output: default@src_temp PREHOOK: query: FROM src_temp -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n0 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) PREHOOK: type: QUERY PREHOOK: Input: default@src_temp -PREHOOK: Output: default@dest_g2 +PREHOOK: Output: default@dest_g2_n0 POSTHOOK: query: FROM src_temp -INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) +INSERT OVERWRITE TABLE dest_g2_n0 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: default@src_temp -POSTHOOK: Output: default@dest_g2 -POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src_temp)src_temp.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src_temp)src_temp.FieldSchema(name:key, type:string, comment:null), (src_temp)src_temp.FieldSchema(name:value, type:string, comment:null), ] -POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src_temp)src_temp.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT dest_g2.* FROM dest_g2 +POSTHOOK: Output: default@dest_g2_n0 +POSTHOOK: Lineage: dest_g2_n0.c1 EXPRESSION [(src_temp)src_temp.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest_g2_n0.c2 EXPRESSION [(src_temp)src_temp.FieldSchema(name:key, type:string, comment:null), (src_temp)src_temp.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest_g2_n0.key EXPRESSION [(src_temp)src_temp.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT dest_g2_n0.* FROM dest_g2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@dest_g2 +PREHOOK: Input: default@dest_g2_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest_g2.* FROM dest_g2 +POSTHOOK: query: SELECT dest_g2_n0.* FROM dest_g2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest_g2 +POSTHOOK: Input: default@dest_g2_n0 #### A masked pattern was here #### 0 1 00.0 1 71 116414.0 @@ -47,14 +47,14 @@ POSTHOOK: Input: default@dest_g2 7 6 7735.0 8 8 8762.0 9 7 91047.0 -PREHOOK: query: DROP TABLE dest_g2 +PREHOOK: query: DROP TABLE dest_g2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest_g2 -PREHOOK: Output: default@dest_g2 -POSTHOOK: query: DROP TABLE dest_g2 +PREHOOK: Input: default@dest_g2_n0 +PREHOOK: Output: default@dest_g2_n0 +POSTHOOK: query: DROP TABLE dest_g2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest_g2 -POSTHOOK: Output: default@dest_g2 +POSTHOOK: Input: default@dest_g2_n0 +POSTHOOK: Output: default@dest_g2_n0 PREHOOK: query: DROP TABLE src_temp PREHOOK: type: DROPTABLE PREHOOK: Input: default@src_temp diff --git a/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out b/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out index e9120cfd0c..89d2100d49 100644 --- a/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out +++ b/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out @@ -628,54 +628,54 @@ Manufacturer#5 1241.29 Manufacturer#5 1424.0900000000001 Manufacturer#5 1515.25 Manufacturer#5 1534.532 -PREHOOK: query: create table t1 (a1 int, b1 string) +PREHOOK: query: create table t1_n50 (a1 int, b1 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (a1 int, b1 string) +PREHOOK: Output: default@t1_n50 +POSTHOOK: query: create table t1_n50 (a1 int, b1 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2 (a1 int, b1 string) +POSTHOOK: Output: default@t1_n50 +PREHOOK: query: create table t2_n27 (a1 int, b1 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2 (a1 int, b1 string) +PREHOOK: Output: default@t2_n27 +POSTHOOK: query: create table t2_n27 (a1 int, b1 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1 select * insert overwrite table t2 select * +POSTHOOK: Output: default@t2_n27 +PREHOOK: query: from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1_n50 select * insert overwrite table t2_n27 select * PREHOOK: type: QUERY PREHOOK: Input: default@over10k -PREHOOK: Output: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1 select * insert overwrite table t2 select * +PREHOOK: Output: default@t1_n50 +PREHOOK: Output: default@t2_n27 +POSTHOOK: query: from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1_n50 select * insert overwrite table t2_n27 select * POSTHOOK: type: QUERY POSTHOOK: Input: default@over10k -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t1.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: t1.b1 SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: t2.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: t2.b1 SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] -PREHOOK: query: select * from t1 limit 3 +POSTHOOK: Output: default@t1_n50 +POSTHOOK: Output: default@t2_n27 +POSTHOOK: Lineage: t1_n50.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: t1_n50.b1 SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n27.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: t2_n27.b1 SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] +PREHOOK: query: select * from t1_n50 limit 3 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n50 #### A masked pattern was here #### -POSTHOOK: query: select * from t1 limit 3 +POSTHOOK: query: select * from t1_n50 limit 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n50 #### A masked pattern was here #### 65542 rachel thompson 131088 oscar brown 262258 wendy steinbeck -PREHOOK: query: select * from t2 limit 3 +PREHOOK: query: select * from t2_n27 limit 3 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2_n27 #### A masked pattern was here #### -POSTHOOK: query: select * from t2 limit 3 +POSTHOOK: query: select * from t2_n27 limit 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2_n27 #### A masked pattern was here #### 65542 rachel thompson 131088 oscar brown diff --git a/ql/src/test/results/clientpositive/testSetQueryString.q.out b/ql/src/test/results/clientpositive/testSetQueryString.q.out index bd0f070f36..cfd21d58ea 100644 --- a/ql/src/test/results/clientpositive/testSetQueryString.q.out +++ b/ql/src/test/results/clientpositive/testSetQueryString.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: create table t1 (c1 int) +PREHOOK: query: create table t1_n56 (c1 int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (c1 int) +PREHOOK: Output: default@t1_n56 +POSTHOOK: query: create table t1_n56 (c1 int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: insert into t1 values (1) +POSTHOOK: Output: default@t1_n56 +PREHOOK: query: insert into t1_n56 values (1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t1 -POSTHOOK: query: insert into t1 values (1) +PREHOOK: Output: default@t1_n56 +POSTHOOK: query: insert into t1_n56 values (1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.c1 SCRIPT [] -PREHOOK: query: select * from t1 +POSTHOOK: Output: default@t1_n56 +POSTHOOK: Lineage: t1_n56.c1 SCRIPT [] +PREHOOK: query: select * from t1_n56 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n56 #### A masked pattern was here #### -POSTHOOK: query: select * from t1 +POSTHOOK: query: select * from t1_n56 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n56 #### A masked pattern was here #### 1 hive.query.string= -select * from t1 +select * from t1_n56 diff --git a/ql/src/test/results/clientpositive/timestamp_udf.q.out b/ql/src/test/results/clientpositive/timestamp_udf.q.out index 47f84cbfd7..452f366d41 100644 --- a/ql/src/test/results/clientpositive/timestamp_udf.q.out +++ b/ql/src/test/results/clientpositive/timestamp_udf.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: drop table timestamp_udf +PREHOOK: query: drop table timestamp_udf_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table timestamp_udf +POSTHOOK: query: drop table timestamp_udf_n0 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table timestamp_udf_string PREHOOK: type: DROPTABLE POSTHOOK: query: drop table timestamp_udf_string POSTHOOK: type: DROPTABLE -PREHOOK: query: create table timestamp_udf (t timestamp) +PREHOOK: query: create table timestamp_udf_n0 (t timestamp) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@timestamp_udf -POSTHOOK: query: create table timestamp_udf (t timestamp) +PREHOOK: Output: default@timestamp_udf_n0 +POSTHOOK: query: create table timestamp_udf_n0 (t timestamp) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@timestamp_udf +POSTHOOK: Output: default@timestamp_udf_n0 PREHOOK: query: create table timestamp_udf_string (t string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -23,124 +23,124 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@timestamp_udf_string PREHOOK: query: from (select * from src tablesample (1 rows)) s - insert overwrite table timestamp_udf + insert overwrite table timestamp_udf_n0 select '2011-05-06 07:08:09.1234567' insert overwrite table timestamp_udf_string select '2011-05-06 07:08:09.1234567' PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@timestamp_udf +PREHOOK: Output: default@timestamp_udf_n0 PREHOOK: Output: default@timestamp_udf_string POSTHOOK: query: from (select * from src tablesample (1 rows)) s - insert overwrite table timestamp_udf + insert overwrite table timestamp_udf_n0 select '2011-05-06 07:08:09.1234567' insert overwrite table timestamp_udf_string select '2011-05-06 07:08:09.1234567' POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@timestamp_udf +POSTHOOK: Output: default@timestamp_udf_n0 POSTHOOK: Output: default@timestamp_udf_string -POSTHOOK: Lineage: timestamp_udf.t EXPRESSION [] +POSTHOOK: Lineage: timestamp_udf_n0.t EXPRESSION [] POSTHOOK: Lineage: timestamp_udf_string.t SIMPLE [] PREHOOK: query: select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), weekofyear(t), hour(t), minute(t), second(t), to_date(t) - from timestamp_udf + from timestamp_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@timestamp_udf +PREHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), weekofyear(t), hour(t), minute(t), second(t), to_date(t) - from timestamp_udf + from timestamp_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@timestamp_udf +POSTHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### 1304690889 2011 5 6 6 18 7 8 9 2011-05-06 PREHOOK: query: select date_add(t, 5), date_sub(t, 10) - from timestamp_udf + from timestamp_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@timestamp_udf +PREHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select date_add(t, 5), date_sub(t, 10) - from timestamp_udf + from timestamp_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@timestamp_udf +POSTHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### 2011-05-11 2011-04-26 PREHOOK: query: select datediff(t, t), datediff(t, '2002-03-21'), datediff('2002-03-21', t) - from timestamp_udf + from timestamp_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@timestamp_udf +PREHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select datediff(t, t), datediff(t, '2002-03-21'), datediff('2002-03-21', t) - from timestamp_udf + from timestamp_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@timestamp_udf +POSTHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### 0 3333 -3333 PREHOOK: query: select from_utc_timestamp(t, 'America/Chicago') - from timestamp_udf + from timestamp_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@timestamp_udf +PREHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select from_utc_timestamp(t, 'America/Chicago') - from timestamp_udf + from timestamp_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@timestamp_udf +POSTHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### 2011-05-06 02:08:09.1234567 PREHOOK: query: select to_utc_timestamp(t, 'America/Chicago') - from timestamp_udf + from timestamp_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@timestamp_udf +PREHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select to_utc_timestamp(t, 'America/Chicago') - from timestamp_udf + from timestamp_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@timestamp_udf +POSTHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### 2011-05-06 12:08:09.1234567 PREHOOK: query: select t, from_utc_timestamp(t, 'America/Chicago') - from timestamp_udf + from timestamp_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@timestamp_udf +PREHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select t, from_utc_timestamp(t, 'America/Chicago') - from timestamp_udf + from timestamp_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@timestamp_udf +POSTHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### 2011-05-06 07:08:09.1234567 2011-05-06 02:08:09.1234567 PREHOOK: query: select t, from_utc_timestamp(t, 'America/Chicago'), t, from_utc_timestamp(t, 'America/Chicago') - from timestamp_udf + from timestamp_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@timestamp_udf +PREHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select t, from_utc_timestamp(t, 'America/Chicago'), t, from_utc_timestamp(t, 'America/Chicago') - from timestamp_udf + from timestamp_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@timestamp_udf +POSTHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### 2011-05-06 07:08:09.1234567 2011-05-06 02:08:09.1234567 2011-05-06 07:08:09.1234567 2011-05-06 02:08:09.1234567 PREHOOK: query: select t, to_utc_timestamp(t, 'America/Chicago') - from timestamp_udf + from timestamp_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@timestamp_udf +PREHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select t, to_utc_timestamp(t, 'America/Chicago') - from timestamp_udf + from timestamp_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@timestamp_udf +POSTHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### 2011-05-06 07:08:09.1234567 2011-05-06 12:08:09.1234567 PREHOOK: query: select t, to_utc_timestamp(t, 'America/Chicago'), t, to_utc_timestamp(t, 'America/Chicago') - from timestamp_udf + from timestamp_udf_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@timestamp_udf +PREHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### POSTHOOK: query: select t, to_utc_timestamp(t, 'America/Chicago'), t, to_utc_timestamp(t, 'America/Chicago') - from timestamp_udf + from timestamp_udf_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@timestamp_udf +POSTHOOK: Input: default@timestamp_udf_n0 #### A masked pattern was here #### 2011-05-06 07:08:09.1234567 2011-05-06 12:08:09.1234567 2011-05-06 07:08:09.1234567 2011-05-06 12:08:09.1234567 PREHOOK: query: select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), @@ -198,14 +198,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@timestamp_udf_string #### A masked pattern was here #### 2011-05-06 12:08:09.1234567 -PREHOOK: query: drop table timestamp_udf +PREHOOK: query: drop table timestamp_udf_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@timestamp_udf -PREHOOK: Output: default@timestamp_udf -POSTHOOK: query: drop table timestamp_udf +PREHOOK: Input: default@timestamp_udf_n0 +PREHOOK: Output: default@timestamp_udf_n0 +POSTHOOK: query: drop table timestamp_udf_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@timestamp_udf -POSTHOOK: Output: default@timestamp_udf +POSTHOOK: Input: default@timestamp_udf_n0 +POSTHOOK: Output: default@timestamp_udf_n0 PREHOOK: query: drop table timestamp_udf_string PREHOOK: type: DROPTABLE PREHOOK: Input: default@timestamp_udf_string diff --git a/ql/src/test/results/clientpositive/timestamptz_3.q.out b/ql/src/test/results/clientpositive/timestamptz_3.q.out index 196c584730..3b5ea2a546 100644 --- a/ql/src/test/results/clientpositive/timestamptz_3.q.out +++ b/ql/src/test/results/clientpositive/timestamptz_3.q.out @@ -1,57 +1,57 @@ -PREHOOK: query: drop table tstz1 +PREHOOK: query: drop table tstz1_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tstz1 +POSTHOOK: query: drop table tstz1_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table tstz1(t timestamp with local time zone) +PREHOOK: query: create table tstz1_n1(t timestamp with local time zone) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tstz1 -POSTHOOK: query: create table tstz1(t timestamp with local time zone) +PREHOOK: Output: default@tstz1_n1 +POSTHOOK: query: create table tstz1_n1(t timestamp with local time zone) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstz1 -PREHOOK: query: insert overwrite table tstz1 select cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) +POSTHOOK: Output: default@tstz1_n1 +PREHOOK: query: insert overwrite table tstz1_n1 select cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@tstz1 -POSTHOOK: query: insert overwrite table tstz1 select cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) +PREHOOK: Output: default@tstz1_n1 +POSTHOOK: query: insert overwrite table tstz1_n1 select cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@tstz1 -POSTHOOK: Lineage: tstz1.t SIMPLE [] -PREHOOK: query: select cast(t as timestamp) from tstz1 +POSTHOOK: Output: default@tstz1_n1 +POSTHOOK: Lineage: tstz1_n1.t SIMPLE [] +PREHOOK: query: select cast(t as timestamp) from tstz1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1 +PREHOOK: Input: default@tstz1_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as timestamp) from tstz1 +POSTHOOK: query: select cast(t as timestamp) from tstz1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1 +POSTHOOK: Input: default@tstz1_n1 #### A masked pattern was here #### 2016-01-03 12:26:34 -PREHOOK: query: select cast(to_epoch_milli(t) as timestamp) from tstz1 +PREHOOK: query: select cast(to_epoch_milli(t) as timestamp) from tstz1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1 +PREHOOK: Input: default@tstz1_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(to_epoch_milli(t) as timestamp) from tstz1 +POSTHOOK: query: select cast(to_epoch_milli(t) as timestamp) from tstz1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1 +POSTHOOK: Input: default@tstz1_n1 #### A masked pattern was here #### 2016-01-03 12:26:34 -PREHOOK: query: select cast(t as timestamp) from tstz1 +PREHOOK: query: select cast(t as timestamp) from tstz1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1 +PREHOOK: Input: default@tstz1_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(t as timestamp) from tstz1 +POSTHOOK: query: select cast(t as timestamp) from tstz1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1 +POSTHOOK: Input: default@tstz1_n1 #### A masked pattern was here #### 2016-01-03 20:26:34 -PREHOOK: query: select cast(to_epoch_milli(t) as timestamp) from tstz1 +PREHOOK: query: select cast(to_epoch_milli(t) as timestamp) from tstz1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@tstz1 +PREHOOK: Input: default@tstz1_n1 #### A masked pattern was here #### -POSTHOOK: query: select cast(to_epoch_milli(t) as timestamp) from tstz1 +POSTHOOK: query: select cast(to_epoch_milli(t) as timestamp) from tstz1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstz1 +POSTHOOK: Input: default@tstz1_n1 #### A masked pattern was here #### 2016-01-03 12:26:34 diff --git a/ql/src/test/results/clientpositive/truncate_column.q.out b/ql/src/test/results/clientpositive/truncate_column.q.out index cc580728ac..b83bc31dcb 100644 --- a/ql/src/test/results/clientpositive/truncate_column.q.out +++ b/ql/src/test/results/clientpositive/truncate_column.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) +PREHOOK: query: CREATE TABLE test_tab_n1 (key STRING, value STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_tab -POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) +PREHOOK: Output: default@test_tab_n1 +POSTHOOK: query: CREATE TABLE test_tab_n1 (key STRING, value STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_tab -PREHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows) +POSTHOOK: Output: default@test_tab_n1 +PREHOOK: query: INSERT OVERWRITE TABLE test_tab_n1 SELECT * FROM src tablesample (10 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_tab -POSTHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows) +PREHOOK: Output: default@test_tab_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_tab_n1 SELECT * FROM src tablesample (10 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_tab -POSTHOOK: Lineage: test_tab.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_tab.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESC FORMATTED test_tab +POSTHOOK: Output: default@test_tab_n1 +POSTHOOK: Lineage: test_tab_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_tab_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESC FORMATTED test_tab_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_tab -POSTHOOK: query: DESC FORMATTED test_tab +PREHOOK: Input: default@test_tab_n1 +POSTHOOK: query: DESC FORMATTED test_tab_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n1 # col_name data_type comment key string value string @@ -53,13 +53,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: SELECT * FROM test_tab ORDER BY value +PREHOOK: query: SELECT * FROM test_tab_n1 ORDER BY value PREHOOK: type: QUERY -PREHOOK: Input: default@test_tab +PREHOOK: Input: default@test_tab_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_tab ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab_n1 ORDER BY value POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n1 #### A masked pattern was here #### 165 val_165 238 val_238 @@ -71,20 +71,20 @@ POSTHOOK: Input: default@test_tab 484 val_484 86 val_86 98 val_98 -PREHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key) +PREHOOK: query: TRUNCATE TABLE test_tab_n1 COLUMNS (key) PREHOOK: type: TRUNCATETABLE -PREHOOK: Input: default@test_tab -PREHOOK: Output: default@test_tab -POSTHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key) +PREHOOK: Input: default@test_tab_n1 +PREHOOK: Output: default@test_tab_n1 +POSTHOOK: query: TRUNCATE TABLE test_tab_n1 COLUMNS (key) POSTHOOK: type: TRUNCATETABLE -POSTHOOK: Input: default@test_tab -POSTHOOK: Output: default@test_tab -PREHOOK: query: DESC FORMATTED test_tab +POSTHOOK: Input: default@test_tab_n1 +POSTHOOK: Output: default@test_tab_n1 +PREHOOK: query: DESC FORMATTED test_tab_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_tab -POSTHOOK: query: DESC FORMATTED test_tab +PREHOOK: Input: default@test_tab_n1 +POSTHOOK: query: DESC FORMATTED test_tab_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n1 # col_name data_type comment key string value string @@ -113,13 +113,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: SELECT * FROM test_tab ORDER BY value +PREHOOK: query: SELECT * FROM test_tab_n1 ORDER BY value PREHOOK: type: QUERY -PREHOOK: Input: default@test_tab +PREHOOK: Input: default@test_tab_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_tab ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab_n1 ORDER BY value POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n1 #### A masked pattern was here #### NULL val_165 NULL val_238 @@ -131,30 +131,30 @@ NULL val_409 NULL val_484 NULL val_86 NULL val_98 -PREHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows) +PREHOOK: query: INSERT OVERWRITE TABLE test_tab_n1 SELECT * FROM src tablesample (10 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_tab -POSTHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows) +PREHOOK: Output: default@test_tab_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_tab_n1 SELECT * FROM src tablesample (10 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_tab -POSTHOOK: Lineage: test_tab.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_tab.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key, value) +POSTHOOK: Output: default@test_tab_n1 +POSTHOOK: Lineage: test_tab_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_tab_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: TRUNCATE TABLE test_tab_n1 COLUMNS (key, value) PREHOOK: type: TRUNCATETABLE -PREHOOK: Input: default@test_tab -PREHOOK: Output: default@test_tab -POSTHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key, value) +PREHOOK: Input: default@test_tab_n1 +PREHOOK: Output: default@test_tab_n1 +POSTHOOK: query: TRUNCATE TABLE test_tab_n1 COLUMNS (key, value) POSTHOOK: type: TRUNCATETABLE -POSTHOOK: Input: default@test_tab -POSTHOOK: Output: default@test_tab -PREHOOK: query: DESC FORMATTED test_tab +POSTHOOK: Input: default@test_tab_n1 +POSTHOOK: Output: default@test_tab_n1 +PREHOOK: query: DESC FORMATTED test_tab_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_tab -POSTHOOK: query: DESC FORMATTED test_tab +PREHOOK: Input: default@test_tab_n1 +POSTHOOK: query: DESC FORMATTED test_tab_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n1 # col_name data_type comment key string value string @@ -183,13 +183,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: SELECT * FROM test_tab ORDER BY value +PREHOOK: query: SELECT * FROM test_tab_n1 ORDER BY value PREHOOK: type: QUERY -PREHOOK: Input: default@test_tab +PREHOOK: Input: default@test_tab_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_tab ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab_n1 ORDER BY value POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n1 #### A masked pattern was here #### NULL NULL NULL NULL @@ -201,20 +201,20 @@ NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key, value) +PREHOOK: query: TRUNCATE TABLE test_tab_n1 COLUMNS (key, value) PREHOOK: type: TRUNCATETABLE -PREHOOK: Input: default@test_tab -PREHOOK: Output: default@test_tab -POSTHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key, value) +PREHOOK: Input: default@test_tab_n1 +PREHOOK: Output: default@test_tab_n1 +POSTHOOK: query: TRUNCATE TABLE test_tab_n1 COLUMNS (key, value) POSTHOOK: type: TRUNCATETABLE -POSTHOOK: Input: default@test_tab -POSTHOOK: Output: default@test_tab -PREHOOK: query: DESC FORMATTED test_tab +POSTHOOK: Input: default@test_tab_n1 +POSTHOOK: Output: default@test_tab_n1 +PREHOOK: query: DESC FORMATTED test_tab_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_tab -POSTHOOK: query: DESC FORMATTED test_tab +PREHOOK: Input: default@test_tab_n1 +POSTHOOK: query: DESC FORMATTED test_tab_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n1 # col_name data_type comment key string value string @@ -243,13 +243,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: SELECT * FROM test_tab ORDER BY value +PREHOOK: query: SELECT * FROM test_tab_n1 ORDER BY value PREHOOK: type: QUERY -PREHOOK: Input: default@test_tab +PREHOOK: Input: default@test_tab_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_tab ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab_n1 ORDER BY value POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n1 #### A masked pattern was here #### NULL NULL NULL NULL @@ -261,30 +261,30 @@ NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: ALTER TABLE test_tab SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: query: ALTER TABLE test_tab_n1 SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER -PREHOOK: Input: default@test_tab -PREHOOK: Output: default@test_tab -POSTHOOK: query: ALTER TABLE test_tab SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: Input: default@test_tab_n1 +PREHOOK: Output: default@test_tab_n1 +POSTHOOK: query: ALTER TABLE test_tab_n1 SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER -POSTHOOK: Input: default@test_tab -POSTHOOK: Output: default@test_tab -PREHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows) +POSTHOOK: Input: default@test_tab_n1 +POSTHOOK: Output: default@test_tab_n1 +PREHOOK: query: INSERT OVERWRITE TABLE test_tab_n1 SELECT * FROM src tablesample (10 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_tab -POSTHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows) +PREHOOK: Output: default@test_tab_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_tab_n1 SELECT * FROM src tablesample (10 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_tab -POSTHOOK: Lineage: test_tab.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_tab.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: DESC FORMATTED test_tab +POSTHOOK: Output: default@test_tab_n1 +POSTHOOK: Lineage: test_tab_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_tab_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: DESC FORMATTED test_tab_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_tab -POSTHOOK: query: DESC FORMATTED test_tab +PREHOOK: Input: default@test_tab_n1 +POSTHOOK: query: DESC FORMATTED test_tab_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n1 # col_name data_type comment key string value string @@ -315,13 +315,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: SELECT * FROM test_tab ORDER BY value +PREHOOK: query: SELECT * FROM test_tab_n1 ORDER BY value PREHOOK: type: QUERY -PREHOOK: Input: default@test_tab +PREHOOK: Input: default@test_tab_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_tab ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab_n1 ORDER BY value POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n1 #### A masked pattern was here #### 165 val_165 238 val_238 @@ -333,20 +333,20 @@ POSTHOOK: Input: default@test_tab 484 val_484 86 val_86 98 val_98 -PREHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key) +PREHOOK: query: TRUNCATE TABLE test_tab_n1 COLUMNS (key) PREHOOK: type: TRUNCATETABLE -PREHOOK: Input: default@test_tab -PREHOOK: Output: default@test_tab -POSTHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key) +PREHOOK: Input: default@test_tab_n1 +PREHOOK: Output: default@test_tab_n1 +POSTHOOK: query: TRUNCATE TABLE test_tab_n1 COLUMNS (key) POSTHOOK: type: TRUNCATETABLE -POSTHOOK: Input: default@test_tab -POSTHOOK: Output: default@test_tab -PREHOOK: query: DESC FORMATTED test_tab +POSTHOOK: Input: default@test_tab_n1 +POSTHOOK: Output: default@test_tab_n1 +PREHOOK: query: DESC FORMATTED test_tab_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_tab -POSTHOOK: query: DESC FORMATTED test_tab +PREHOOK: Input: default@test_tab_n1 +POSTHOOK: query: DESC FORMATTED test_tab_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n1 # col_name data_type comment key string value string @@ -376,13 +376,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: SELECT * FROM test_tab ORDER BY value +PREHOOK: query: SELECT * FROM test_tab_n1 ORDER BY value PREHOOK: type: QUERY -PREHOOK: Input: default@test_tab +PREHOOK: Input: default@test_tab_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_tab ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab_n1 ORDER BY value POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n1 #### A masked pattern was here #### NULL val_165 NULL val_238 @@ -394,20 +394,20 @@ NULL val_409 NULL val_484 NULL val_86 NULL val_98 -PREHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key, value) +PREHOOK: query: TRUNCATE TABLE test_tab_n1 COLUMNS (key, value) PREHOOK: type: TRUNCATETABLE -PREHOOK: Input: default@test_tab -PREHOOK: Output: default@test_tab -POSTHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key, value) +PREHOOK: Input: default@test_tab_n1 +PREHOOK: Output: default@test_tab_n1 +POSTHOOK: query: TRUNCATE TABLE test_tab_n1 COLUMNS (key, value) POSTHOOK: type: TRUNCATETABLE -POSTHOOK: Input: default@test_tab -POSTHOOK: Output: default@test_tab -PREHOOK: query: DESC FORMATTED test_tab +POSTHOOK: Input: default@test_tab_n1 +POSTHOOK: Output: default@test_tab_n1 +PREHOOK: query: DESC FORMATTED test_tab_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@test_tab -POSTHOOK: query: DESC FORMATTED test_tab +PREHOOK: Input: default@test_tab_n1 +POSTHOOK: query: DESC FORMATTED test_tab_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n1 # col_name data_type comment key string value string @@ -437,13 +437,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: SELECT * FROM test_tab ORDER BY value +PREHOOK: query: SELECT * FROM test_tab_n1 ORDER BY value PREHOOK: type: QUERY -PREHOOK: Input: default@test_tab +PREHOOK: Input: default@test_tab_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_tab ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab_n1 ORDER BY value POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n1 #### A masked pattern was here #### NULL NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out b/ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out index c9807b3235..bc355544a3 100644 --- a/ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out +++ b/ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out @@ -1,57 +1,57 @@ -PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab_n3 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_tab -POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE +PREHOOK: Output: default@test_tab_n3 +POSTHOOK: query: CREATE TABLE test_tab_n3 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_tab -PREHOOK: query: ALTER TABLE test_tab +POSTHOOK: Output: default@test_tab_n3 +PREHOOK: query: ALTER TABLE test_tab_n3 SKEWED BY (key) ON ("484") STORED AS DIRECTORIES PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@test_tab -PREHOOK: Output: default@test_tab -POSTHOOK: query: ALTER TABLE test_tab +PREHOOK: Input: default@test_tab_n3 +PREHOOK: Output: default@test_tab_n3 +POSTHOOK: query: ALTER TABLE test_tab_n3 SKEWED BY (key) ON ("484") STORED AS DIRECTORIES POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@test_tab -POSTHOOK: Output: default@test_tab -PREHOOK: query: INSERT OVERWRITE TABLE test_tab PARTITION (part = '1') SELECT * FROM src +POSTHOOK: Input: default@test_tab_n3 +POSTHOOK: Output: default@test_tab_n3 +PREHOOK: query: INSERT OVERWRITE TABLE test_tab_n3 PARTITION (part = '1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_tab@part=1 -POSTHOOK: query: INSERT OVERWRITE TABLE test_tab PARTITION (part = '1') SELECT * FROM src +PREHOOK: Output: default@test_tab_n3@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_tab_n3 PARTITION (part = '1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_tab@part=1 -POSTHOOK: Lineage: test_tab PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_tab PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT * FROM test_tab WHERE part = '1' AND key = '0' +POSTHOOK: Output: default@test_tab_n3@part=1 +POSTHOOK: Lineage: test_tab_n3 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_tab_n3 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '0' PREHOOK: type: QUERY -PREHOOK: Input: default@test_tab -PREHOOK: Input: default@test_tab@part=1 +PREHOOK: Input: default@test_tab_n3 +PREHOOK: Input: default@test_tab_n3@part=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_tab WHERE part = '1' AND key = '0' +POSTHOOK: query: SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '0' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_tab -POSTHOOK: Input: default@test_tab@part=1 +POSTHOOK: Input: default@test_tab_n3 +POSTHOOK: Input: default@test_tab_n3@part=1 #### A masked pattern was here #### 0 val_0 1 0 val_0 1 0 val_0 1 -PREHOOK: query: TRUNCATE TABLE test_tab PARTITION (part ='1') COLUMNS (value) +PREHOOK: query: TRUNCATE TABLE test_tab_n3 PARTITION (part ='1') COLUMNS (value) PREHOOK: type: TRUNCATETABLE -PREHOOK: Input: default@test_tab -PREHOOK: Output: default@test_tab@part=1 -POSTHOOK: query: TRUNCATE TABLE test_tab PARTITION (part ='1') COLUMNS (value) +PREHOOK: Input: default@test_tab_n3 +PREHOOK: Output: default@test_tab_n3@part=1 +POSTHOOK: query: TRUNCATE TABLE test_tab_n3 PARTITION (part ='1') COLUMNS (value) POSTHOOK: type: TRUNCATETABLE -POSTHOOK: Input: default@test_tab -POSTHOOK: Output: default@test_tab@part=1 -PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_tab WHERE part = '1' AND key = '484' +POSTHOOK: Input: default@test_tab_n3 +POSTHOOK: Output: default@test_tab_n3@part=1 +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '484' PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_tab WHERE part = '1' AND key = '484' +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '484' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -73,13 +73,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test_tab + name default.test_tab_n3 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 4812 - serialization.ddl struct test_tab { string key, string value} + serialization.ddl struct test_tab_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe totalSize 1761 @@ -96,19 +96,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test_tab + name default.test_tab_n3 partition_columns part partition_columns.types string - serialization.ddl struct test_tab { string key, string value} + serialization.ddl struct test_tab_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.test_tab - name: default.test_tab + name: default.test_tab_n3 + name: default.test_tab_n3 Processor Tree: TableScan - alias: test_tab + alias: test_tab_n3 Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -121,20 +121,20 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: SELECT * FROM test_tab WHERE part = '1' AND key = '484' +PREHOOK: query: SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '484' PREHOOK: type: QUERY -PREHOOK: Input: default@test_tab -PREHOOK: Input: default@test_tab@part=1 +PREHOOK: Input: default@test_tab_n3 +PREHOOK: Input: default@test_tab_n3@part=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_tab WHERE part = '1' AND key = '484' +POSTHOOK: query: SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '484' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_tab -POSTHOOK: Input: default@test_tab@part=1 +POSTHOOK: Input: default@test_tab_n3 +POSTHOOK: Input: default@test_tab_n3@part=1 #### A masked pattern was here #### 484 NULL 1 -PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_tab WHERE part = '1' AND key = '0' +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '0' PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_tab WHERE part = '1' AND key = '0' +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '0' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -156,13 +156,13 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test_tab + name default.test_tab_n3 numFiles 2 numRows 500 partition_columns part partition_columns.types string rawDataSize 4812 - serialization.ddl struct test_tab { string key, string value} + serialization.ddl struct test_tab_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe totalSize 1761 @@ -179,19 +179,19 @@ STAGE PLANS: columns.comments columns.types string:string #### A masked pattern was here #### - name default.test_tab + name default.test_tab_n3 partition_columns part partition_columns.types string - serialization.ddl struct test_tab { string key, string value} + serialization.ddl struct test_tab_n3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.test_tab - name: default.test_tab + name: default.test_tab_n3 + name: default.test_tab_n3 Processor Tree: TableScan - alias: test_tab + alias: test_tab_n3 Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -204,15 +204,15 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: SELECT * FROM test_tab WHERE part = '1' AND key = '0' +PREHOOK: query: SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '0' PREHOOK: type: QUERY -PREHOOK: Input: default@test_tab -PREHOOK: Input: default@test_tab@part=1 +PREHOOK: Input: default@test_tab_n3 +PREHOOK: Input: default@test_tab_n3@part=1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_tab WHERE part = '1' AND key = '0' +POSTHOOK: query: SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '0' POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_tab -POSTHOOK: Input: default@test_tab@part=1 +POSTHOOK: Input: default@test_tab_n3 +POSTHOOK: Input: default@test_tab_n3@part=1 #### A masked pattern was here #### 0 NULL 1 0 NULL 1 diff --git a/ql/src/test/results/clientpositive/truncate_column_merge.q.out b/ql/src/test/results/clientpositive/truncate_column_merge.q.out index 8ff740de52..01d2d1089c 100644 --- a/ql/src/test/results/clientpositive/truncate_column_merge.q.out +++ b/ql/src/test/results/clientpositive/truncate_column_merge.q.out @@ -1,63 +1,63 @@ -PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab_n2 (key STRING, value STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_tab -POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE +PREHOOK: Output: default@test_tab_n2 +POSTHOOK: query: CREATE TABLE test_tab_n2 (key STRING, value STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_tab -PREHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (5 rows) +POSTHOOK: Output: default@test_tab_n2 +PREHOOK: query: INSERT OVERWRITE TABLE test_tab_n2 SELECT * FROM src tablesample (5 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_tab -POSTHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (5 rows) +PREHOOK: Output: default@test_tab_n2 +POSTHOOK: query: INSERT OVERWRITE TABLE test_tab_n2 SELECT * FROM src tablesample (5 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_tab -POSTHOOK: Lineage: test_tab.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_tab.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: INSERT INTO TABLE test_tab SELECT * FROM src tablesample (5 rows) +POSTHOOK: Output: default@test_tab_n2 +POSTHOOK: Lineage: test_tab_n2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_tab_n2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: INSERT INTO TABLE test_tab_n2 SELECT * FROM src tablesample (5 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_tab -POSTHOOK: query: INSERT INTO TABLE test_tab SELECT * FROM src tablesample (5 rows) +PREHOOK: Output: default@test_tab_n2 +POSTHOOK: query: INSERT INTO TABLE test_tab_n2 SELECT * FROM src tablesample (5 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_tab -POSTHOOK: Lineage: test_tab.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_tab.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab +POSTHOOK: Output: default@test_tab_n2 +POSTHOOK: Lineage: test_tab_n2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_tab_n2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@test_tab +PREHOOK: Input: default@test_tab_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab +POSTHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n2 #### A masked pattern was here #### 2 -PREHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key) +PREHOOK: query: TRUNCATE TABLE test_tab_n2 COLUMNS (key) PREHOOK: type: TRUNCATETABLE -PREHOOK: Input: default@test_tab -PREHOOK: Output: default@test_tab -POSTHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key) +PREHOOK: Input: default@test_tab_n2 +PREHOOK: Output: default@test_tab_n2 +POSTHOOK: query: TRUNCATE TABLE test_tab_n2 COLUMNS (key) POSTHOOK: type: TRUNCATETABLE -POSTHOOK: Input: default@test_tab -POSTHOOK: Output: default@test_tab -PREHOOK: query: ALTER TABLE test_tab CONCATENATE +POSTHOOK: Input: default@test_tab_n2 +POSTHOOK: Output: default@test_tab_n2 +PREHOOK: query: ALTER TABLE test_tab_n2 CONCATENATE PREHOOK: type: ALTER_TABLE_MERGE -PREHOOK: Input: default@test_tab -PREHOOK: Output: default@test_tab -POSTHOOK: query: ALTER TABLE test_tab CONCATENATE +PREHOOK: Input: default@test_tab_n2 +PREHOOK: Output: default@test_tab_n2 +POSTHOOK: query: ALTER TABLE test_tab_n2 CONCATENATE POSTHOOK: type: ALTER_TABLE_MERGE -POSTHOOK: Input: default@test_tab -POSTHOOK: Output: default@test_tab -PREHOOK: query: SELECT * FROM test_tab ORDER BY value +POSTHOOK: Input: default@test_tab_n2 +POSTHOOK: Output: default@test_tab_n2 +PREHOOK: query: SELECT * FROM test_tab_n2 ORDER BY value PREHOOK: type: QUERY -PREHOOK: Input: default@test_tab +PREHOOK: Input: default@test_tab_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM test_tab ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab_n2 ORDER BY value POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n2 #### A masked pattern was here #### NULL val_165 NULL val_165 @@ -69,12 +69,12 @@ NULL val_311 NULL val_311 NULL val_86 NULL val_86 -PREHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab +PREHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@test_tab +PREHOOK: Input: default@test_tab_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab +POSTHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_tab +POSTHOOK: Input: default@test_tab_n2 #### A masked pattern was here #### 1 diff --git a/ql/src/test/results/clientpositive/tunable_ndv.q.out b/ql/src/test/results/clientpositive/tunable_ndv.q.out index 551591e60d..40f8c5119b 100644 --- a/ql/src/test/results/clientpositive/tunable_ndv.q.out +++ b/ql/src/test/results/clientpositive/tunable_ndv.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table if not exists ext_loc ( +PREHOOK: query: create table if not exists ext_loc_n2 ( state string, locid int, zip int, @@ -6,8 +6,8 @@ PREHOOK: query: create table if not exists ext_loc ( ) row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@ext_loc -POSTHOOK: query: create table if not exists ext_loc ( +PREHOOK: Output: default@ext_loc_n2 +POSTHOOK: query: create table if not exists ext_loc_n2 ( state string, locid int, zip int, @@ -15,70 +15,70 @@ POSTHOOK: query: create table if not exists ext_loc ( ) row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@ext_loc -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_full.txt' OVERWRITE INTO TABLE ext_loc +POSTHOOK: Output: default@ext_loc_n2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_full.txt' OVERWRITE INTO TABLE ext_loc_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@ext_loc -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_full.txt' OVERWRITE INTO TABLE ext_loc +PREHOOK: Output: default@ext_loc_n2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_full.txt' OVERWRITE INTO TABLE ext_loc_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@ext_loc -PREHOOK: query: create table if not exists loc_orc_1d ( +POSTHOOK: Output: default@ext_loc_n2 +PREHOOK: query: create table if not exists loc_orc_1d_n2 ( state string, locid int, zip int ) partitioned by(year string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_orc_1d -POSTHOOK: query: create table if not exists loc_orc_1d ( +PREHOOK: Output: default@loc_orc_1d_n2 +POSTHOOK: query: create table if not exists loc_orc_1d_n2 ( state string, locid int, zip int ) partitioned by(year string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_orc_1d -PREHOOK: query: insert overwrite table loc_orc_1d partition(year) select * from ext_loc +POSTHOOK: Output: default@loc_orc_1d_n2 +PREHOOK: query: insert overwrite table loc_orc_1d_n2 partition(year) select * from ext_loc_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@ext_loc -PREHOOK: Output: default@loc_orc_1d -POSTHOOK: query: insert overwrite table loc_orc_1d partition(year) select * from ext_loc +PREHOOK: Input: default@ext_loc_n2 +PREHOOK: Output: default@loc_orc_1d_n2 +POSTHOOK: query: insert overwrite table loc_orc_1d_n2 partition(year) select * from ext_loc_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@ext_loc -POSTHOOK: Output: default@loc_orc_1d@year=2000 -POSTHOOK: Output: default@loc_orc_1d@year=2001 -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ] -PREHOOK: query: analyze table loc_orc_1d compute statistics for columns state,locid +POSTHOOK: Input: default@ext_loc_n2 +POSTHOOK: Output: default@loc_orc_1d_n2@year=2000 +POSTHOOK: Output: default@loc_orc_1d_n2@year=2001 +POSTHOOK: Lineage: loc_orc_1d_n2 PARTITION(year=2000).locid SIMPLE [(ext_loc_n2)ext_loc_n2.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n2 PARTITION(year=2000).state SIMPLE [(ext_loc_n2)ext_loc_n2.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n2 PARTITION(year=2000).zip SIMPLE [(ext_loc_n2)ext_loc_n2.FieldSchema(name:zip, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n2 PARTITION(year=2001).locid SIMPLE [(ext_loc_n2)ext_loc_n2.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n2 PARTITION(year=2001).state SIMPLE [(ext_loc_n2)ext_loc_n2.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d_n2 PARTITION(year=2001).zip SIMPLE [(ext_loc_n2)ext_loc_n2.FieldSchema(name:zip, type:int, comment:null), ] +PREHOOK: query: analyze table loc_orc_1d_n2 compute statistics for columns state,locid PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc_1d -PREHOOK: Input: default@loc_orc_1d@year=2000 -PREHOOK: Input: default@loc_orc_1d@year=2001 -PREHOOK: Output: default@loc_orc_1d -PREHOOK: Output: default@loc_orc_1d@year=2000 -PREHOOK: Output: default@loc_orc_1d@year=2001 +PREHOOK: Input: default@loc_orc_1d_n2 +PREHOOK: Input: default@loc_orc_1d_n2@year=2000 +PREHOOK: Input: default@loc_orc_1d_n2@year=2001 +PREHOOK: Output: default@loc_orc_1d_n2 +PREHOOK: Output: default@loc_orc_1d_n2@year=2000 +PREHOOK: Output: default@loc_orc_1d_n2@year=2001 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc_1d compute statistics for columns state,locid +POSTHOOK: query: analyze table loc_orc_1d_n2 compute statistics for columns state,locid POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc_1d -POSTHOOK: Input: default@loc_orc_1d@year=2000 -POSTHOOK: Input: default@loc_orc_1d@year=2001 -POSTHOOK: Output: default@loc_orc_1d -POSTHOOK: Output: default@loc_orc_1d@year=2000 -POSTHOOK: Output: default@loc_orc_1d@year=2001 +POSTHOOK: Input: default@loc_orc_1d_n2 +POSTHOOK: Input: default@loc_orc_1d_n2@year=2000 +POSTHOOK: Input: default@loc_orc_1d_n2@year=2001 +POSTHOOK: Output: default@loc_orc_1d_n2 +POSTHOOK: Output: default@loc_orc_1d_n2@year=2000 +POSTHOOK: Output: default@loc_orc_1d_n2@year=2001 #### A masked pattern was here #### -PREHOOK: query: describe formatted loc_orc_1d partition(year=2000) locid +PREHOOK: query: describe formatted loc_orc_1d_n2 partition(year=2000) locid PREHOOK: type: DESCTABLE -PREHOOK: Input: default@loc_orc_1d -POSTHOOK: query: describe formatted loc_orc_1d partition(year=2000) locid +PREHOOK: Input: default@loc_orc_1d_n2 +POSTHOOK: query: describe formatted loc_orc_1d_n2 partition(year=2000) locid POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@loc_orc_1d +POSTHOOK: Input: default@loc_orc_1d_n2 col_name locid data_type int min 1 @@ -91,12 +91,12 @@ num_trues num_falses bitVector HL comment from deserializer -PREHOOK: query: describe formatted loc_orc_1d partition(year=2001) locid +PREHOOK: query: describe formatted loc_orc_1d_n2 partition(year=2001) locid PREHOOK: type: DESCTABLE -PREHOOK: Input: default@loc_orc_1d -POSTHOOK: query: describe formatted loc_orc_1d partition(year=2001) locid +PREHOOK: Input: default@loc_orc_1d_n2 +POSTHOOK: query: describe formatted loc_orc_1d_n2 partition(year=2001) locid POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@loc_orc_1d +POSTHOOK: Input: default@loc_orc_1d_n2 col_name locid data_type int min 1 @@ -109,12 +109,12 @@ num_trues num_falses bitVector HL comment from deserializer -PREHOOK: query: describe formatted loc_orc_1d locid +PREHOOK: query: describe formatted loc_orc_1d_n2 locid PREHOOK: type: DESCTABLE -PREHOOK: Input: default@loc_orc_1d -POSTHOOK: query: describe formatted loc_orc_1d locid +PREHOOK: Input: default@loc_orc_1d_n2 +POSTHOOK: query: describe formatted loc_orc_1d_n2 locid POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@loc_orc_1d +POSTHOOK: Input: default@loc_orc_1d_n2 col_name locid data_type int min 1 @@ -128,12 +128,12 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"locid\":\"true\"}} -PREHOOK: query: describe formatted loc_orc_1d locid +PREHOOK: query: describe formatted loc_orc_1d_n2 locid PREHOOK: type: DESCTABLE -PREHOOK: Input: default@loc_orc_1d -POSTHOOK: query: describe formatted loc_orc_1d locid +PREHOOK: Input: default@loc_orc_1d_n2 +POSTHOOK: query: describe formatted loc_orc_1d_n2 locid POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@loc_orc_1d +POSTHOOK: Input: default@loc_orc_1d_n2 col_name locid data_type int min 1 @@ -147,12 +147,12 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"locid\":\"true\"}} -PREHOOK: query: describe formatted loc_orc_1d locid +PREHOOK: query: describe formatted loc_orc_1d_n2 locid PREHOOK: type: DESCTABLE -PREHOOK: Input: default@loc_orc_1d -POSTHOOK: query: describe formatted loc_orc_1d locid +PREHOOK: Input: default@loc_orc_1d_n2 +POSTHOOK: query: describe formatted loc_orc_1d_n2 locid POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@loc_orc_1d +POSTHOOK: Input: default@loc_orc_1d_n2 col_name locid data_type int min 1 @@ -166,101 +166,101 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"locid\":\"true\"}} -PREHOOK: query: create table if not exists loc_orc_2d ( +PREHOOK: query: create table if not exists loc_orc_2d_n2 ( state string, locid int ) partitioned by(zip int, year string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@loc_orc_2d -POSTHOOK: query: create table if not exists loc_orc_2d ( +PREHOOK: Output: default@loc_orc_2d_n2 +POSTHOOK: query: create table if not exists loc_orc_2d_n2 ( state string, locid int ) partitioned by(zip int, year string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@loc_orc_2d -PREHOOK: query: insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc +POSTHOOK: Output: default@loc_orc_2d_n2 +PREHOOK: query: insert overwrite table loc_orc_2d_n2 partition(zip, year) select * from ext_loc_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@ext_loc -PREHOOK: Output: default@loc_orc_2d -POSTHOOK: query: insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc +PREHOOK: Input: default@ext_loc_n2 +PREHOOK: Output: default@loc_orc_2d_n2 +POSTHOOK: query: insert overwrite table loc_orc_2d_n2 partition(zip, year) select * from ext_loc_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@ext_loc -POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2000 -POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2001 -POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2000 -POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2001 -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2000).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2000).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] -POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] -PREHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2000') compute statistics for columns state,locid +POSTHOOK: Input: default@ext_loc_n2 +POSTHOOK: Output: default@loc_orc_2d_n2@zip=94086/year=2000 +POSTHOOK: Output: default@loc_orc_2d_n2@zip=94086/year=2001 +POSTHOOK: Output: default@loc_orc_2d_n2@zip=94087/year=2000 +POSTHOOK: Output: default@loc_orc_2d_n2@zip=94087/year=2001 +POSTHOOK: Lineage: loc_orc_2d_n2 PARTITION(zip=94086,year=2000).locid SIMPLE [(ext_loc_n2)ext_loc_n2.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n2 PARTITION(zip=94086,year=2000).state SIMPLE [(ext_loc_n2)ext_loc_n2.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n2 PARTITION(zip=94086,year=2001).locid SIMPLE [(ext_loc_n2)ext_loc_n2.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n2 PARTITION(zip=94086,year=2001).state SIMPLE [(ext_loc_n2)ext_loc_n2.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n2 PARTITION(zip=94087,year=2000).locid SIMPLE [(ext_loc_n2)ext_loc_n2.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n2 PARTITION(zip=94087,year=2000).state SIMPLE [(ext_loc_n2)ext_loc_n2.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n2 PARTITION(zip=94087,year=2001).locid SIMPLE [(ext_loc_n2)ext_loc_n2.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d_n2 PARTITION(zip=94087,year=2001).state SIMPLE [(ext_loc_n2)ext_loc_n2.FieldSchema(name:state, type:string, comment:null), ] +PREHOOK: query: analyze table loc_orc_2d_n2 partition(zip=94086, year='2000') compute statistics for columns state,locid PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc_2d -PREHOOK: Input: default@loc_orc_2d@zip=94086/year=2000 -PREHOOK: Output: default@loc_orc_2d -PREHOOK: Output: default@loc_orc_2d@zip=94086/year=2000 +PREHOOK: Input: default@loc_orc_2d_n2 +PREHOOK: Input: default@loc_orc_2d_n2@zip=94086/year=2000 +PREHOOK: Output: default@loc_orc_2d_n2 +PREHOOK: Output: default@loc_orc_2d_n2@zip=94086/year=2000 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2000') compute statistics for columns state,locid +POSTHOOK: query: analyze table loc_orc_2d_n2 partition(zip=94086, year='2000') compute statistics for columns state,locid POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc_2d -POSTHOOK: Input: default@loc_orc_2d@zip=94086/year=2000 -POSTHOOK: Output: default@loc_orc_2d -POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2000 +POSTHOOK: Input: default@loc_orc_2d_n2 +POSTHOOK: Input: default@loc_orc_2d_n2@zip=94086/year=2000 +POSTHOOK: Output: default@loc_orc_2d_n2 +POSTHOOK: Output: default@loc_orc_2d_n2@zip=94086/year=2000 #### A masked pattern was here #### -PREHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2000') compute statistics for columns state,locid +PREHOOK: query: analyze table loc_orc_2d_n2 partition(zip=94087, year='2000') compute statistics for columns state,locid PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc_2d -PREHOOK: Input: default@loc_orc_2d@zip=94087/year=2000 -PREHOOK: Output: default@loc_orc_2d -PREHOOK: Output: default@loc_orc_2d@zip=94087/year=2000 +PREHOOK: Input: default@loc_orc_2d_n2 +PREHOOK: Input: default@loc_orc_2d_n2@zip=94087/year=2000 +PREHOOK: Output: default@loc_orc_2d_n2 +PREHOOK: Output: default@loc_orc_2d_n2@zip=94087/year=2000 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2000') compute statistics for columns state,locid +POSTHOOK: query: analyze table loc_orc_2d_n2 partition(zip=94087, year='2000') compute statistics for columns state,locid POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc_2d -POSTHOOK: Input: default@loc_orc_2d@zip=94087/year=2000 -POSTHOOK: Output: default@loc_orc_2d -POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2000 +POSTHOOK: Input: default@loc_orc_2d_n2 +POSTHOOK: Input: default@loc_orc_2d_n2@zip=94087/year=2000 +POSTHOOK: Output: default@loc_orc_2d_n2 +POSTHOOK: Output: default@loc_orc_2d_n2@zip=94087/year=2000 #### A masked pattern was here #### -PREHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid +PREHOOK: query: analyze table loc_orc_2d_n2 partition(zip=94086, year='2001') compute statistics for columns state,locid PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc_2d -PREHOOK: Input: default@loc_orc_2d@zip=94086/year=2001 -PREHOOK: Output: default@loc_orc_2d -PREHOOK: Output: default@loc_orc_2d@zip=94086/year=2001 +PREHOOK: Input: default@loc_orc_2d_n2 +PREHOOK: Input: default@loc_orc_2d_n2@zip=94086/year=2001 +PREHOOK: Output: default@loc_orc_2d_n2 +PREHOOK: Output: default@loc_orc_2d_n2@zip=94086/year=2001 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid +POSTHOOK: query: analyze table loc_orc_2d_n2 partition(zip=94086, year='2001') compute statistics for columns state,locid POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc_2d -POSTHOOK: Input: default@loc_orc_2d@zip=94086/year=2001 -POSTHOOK: Output: default@loc_orc_2d -POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2001 +POSTHOOK: Input: default@loc_orc_2d_n2 +POSTHOOK: Input: default@loc_orc_2d_n2@zip=94086/year=2001 +POSTHOOK: Output: default@loc_orc_2d_n2 +POSTHOOK: Output: default@loc_orc_2d_n2@zip=94086/year=2001 #### A masked pattern was here #### -PREHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2001') compute statistics for columns state,locid +PREHOOK: query: analyze table loc_orc_2d_n2 partition(zip=94087, year='2001') compute statistics for columns state,locid PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@loc_orc_2d -PREHOOK: Input: default@loc_orc_2d@zip=94087/year=2001 -PREHOOK: Output: default@loc_orc_2d -PREHOOK: Output: default@loc_orc_2d@zip=94087/year=2001 +PREHOOK: Input: default@loc_orc_2d_n2 +PREHOOK: Input: default@loc_orc_2d_n2@zip=94087/year=2001 +PREHOOK: Output: default@loc_orc_2d_n2 +PREHOOK: Output: default@loc_orc_2d_n2@zip=94087/year=2001 #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2001') compute statistics for columns state,locid +POSTHOOK: query: analyze table loc_orc_2d_n2 partition(zip=94087, year='2001') compute statistics for columns state,locid POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@loc_orc_2d -POSTHOOK: Input: default@loc_orc_2d@zip=94087/year=2001 -POSTHOOK: Output: default@loc_orc_2d -POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2001 +POSTHOOK: Input: default@loc_orc_2d_n2 +POSTHOOK: Input: default@loc_orc_2d_n2@zip=94087/year=2001 +POSTHOOK: Output: default@loc_orc_2d_n2 +POSTHOOK: Output: default@loc_orc_2d_n2@zip=94087/year=2001 #### A masked pattern was here #### -PREHOOK: query: describe formatted loc_orc_2d locid +PREHOOK: query: describe formatted loc_orc_2d_n2 locid PREHOOK: type: DESCTABLE -PREHOOK: Input: default@loc_orc_2d -POSTHOOK: query: describe formatted loc_orc_2d locid +PREHOOK: Input: default@loc_orc_2d_n2 +POSTHOOK: query: describe formatted loc_orc_2d_n2 locid POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@loc_orc_2d +POSTHOOK: Input: default@loc_orc_2d_n2 col_name locid data_type int min 1 @@ -274,12 +274,12 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"locid\":\"true\"}} -PREHOOK: query: describe formatted loc_orc_2d locid +PREHOOK: query: describe formatted loc_orc_2d_n2 locid PREHOOK: type: DESCTABLE -PREHOOK: Input: default@loc_orc_2d -POSTHOOK: query: describe formatted loc_orc_2d locid +PREHOOK: Input: default@loc_orc_2d_n2 +POSTHOOK: query: describe formatted loc_orc_2d_n2 locid POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@loc_orc_2d +POSTHOOK: Input: default@loc_orc_2d_n2 col_name locid data_type int min 1 @@ -293,12 +293,12 @@ num_falses bitVector HL comment from deserializer COLUMN_STATS_ACCURATE {\"COLUMN_STATS\":{\"locid\":\"true\"}} -PREHOOK: query: describe formatted loc_orc_2d locid +PREHOOK: query: describe formatted loc_orc_2d_n2 locid PREHOOK: type: DESCTABLE -PREHOOK: Input: default@loc_orc_2d -POSTHOOK: query: describe formatted loc_orc_2d locid +PREHOOK: Input: default@loc_orc_2d_n2 +POSTHOOK: query: describe formatted loc_orc_2d_n2 locid POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@loc_orc_2d +POSTHOOK: Input: default@loc_orc_2d_n2 col_name locid data_type int min 1 diff --git a/ql/src/test/results/clientpositive/type_change_test_int.q.out b/ql/src/test/results/clientpositive/type_change_test_int.q.out index cd4bcfd5a6..1a9b49a8af 100644 --- a/ql/src/test/results/clientpositive/type_change_test_int.q.out +++ b/ql/src/test/results/clientpositive/type_change_test_int.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table if exists testAltCol +PREHOOK: query: drop table if exists testAltCol_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists testAltCol +POSTHOOK: query: drop table if exists testAltCol_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table testAltCol +PREHOOK: query: create table testAltCol_n1 (cId TINYINT, cBigInt BIGINT, cInt INT, @@ -10,8 +10,8 @@ PREHOOK: query: create table testAltCol cTinyint TINYINT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testAltCol -POSTHOOK: query: create table testAltCol +PREHOOK: Output: default@testAltCol_n1 +POSTHOOK: query: create table testAltCol_n1 (cId TINYINT, cBigInt BIGINT, cInt INT, @@ -19,8 +19,8 @@ POSTHOOK: query: create table testAltCol cTinyint TINYINT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testAltCol -PREHOOK: query: insert into testAltCol values +POSTHOOK: Output: default@testAltCol_n1 +PREHOOK: query: insert into testAltCol_n1 values (1, 1234567890123456789, 1234567890, @@ -28,8 +28,8 @@ PREHOOK: query: insert into testAltCol values 123) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@testaltcol -POSTHOOK: query: insert into testAltCol values +PREHOOK: Output: default@testaltcol_n1 +POSTHOOK: query: insert into testAltCol_n1 values (1, 1234567890123456789, 1234567890, @@ -37,13 +37,13 @@ POSTHOOK: query: insert into testAltCol values 123) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@testaltcol -POSTHOOK: Lineage: testaltcol.cbigint SCRIPT [] -POSTHOOK: Lineage: testaltcol.cid SCRIPT [] -POSTHOOK: Lineage: testaltcol.cint SCRIPT [] -POSTHOOK: Lineage: testaltcol.csmallint SCRIPT [] -POSTHOOK: Lineage: testaltcol.ctinyint SCRIPT [] -PREHOOK: query: insert into testAltCol values +POSTHOOK: Output: default@testaltcol_n1 +POSTHOOK: Lineage: testaltcol_n1.cbigint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.cid SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.cint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.csmallint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.ctinyint SCRIPT [] +PREHOOK: query: insert into testAltCol_n1 values (2, 1, 2, @@ -51,8 +51,8 @@ PREHOOK: query: insert into testAltCol values 4) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@testaltcol -POSTHOOK: query: insert into testAltCol values +PREHOOK: Output: default@testaltcol_n1 +POSTHOOK: query: insert into testAltCol_n1 values (2, 1, 2, @@ -60,13 +60,13 @@ POSTHOOK: query: insert into testAltCol values 4) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@testaltcol -POSTHOOK: Lineage: testaltcol.cbigint SCRIPT [] -POSTHOOK: Lineage: testaltcol.cid SCRIPT [] -POSTHOOK: Lineage: testaltcol.cint SCRIPT [] -POSTHOOK: Lineage: testaltcol.csmallint SCRIPT [] -POSTHOOK: Lineage: testaltcol.ctinyint SCRIPT [] -PREHOOK: query: insert into testAltCol values +POSTHOOK: Output: default@testaltcol_n1 +POSTHOOK: Lineage: testaltcol_n1.cbigint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.cid SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.cint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.csmallint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.ctinyint SCRIPT [] +PREHOOK: query: insert into testAltCol_n1 values (3, 1234567890123456789, 1234567890, @@ -74,8 +74,8 @@ PREHOOK: query: insert into testAltCol values 123) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@testaltcol -POSTHOOK: query: insert into testAltCol values +PREHOOK: Output: default@testaltcol_n1 +POSTHOOK: query: insert into testAltCol_n1 values (3, 1234567890123456789, 1234567890, @@ -83,13 +83,13 @@ POSTHOOK: query: insert into testAltCol values 123) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@testaltcol -POSTHOOK: Lineage: testaltcol.cbigint SCRIPT [] -POSTHOOK: Lineage: testaltcol.cid SCRIPT [] -POSTHOOK: Lineage: testaltcol.cint SCRIPT [] -POSTHOOK: Lineage: testaltcol.csmallint SCRIPT [] -POSTHOOK: Lineage: testaltcol.ctinyint SCRIPT [] -PREHOOK: query: insert into testAltCol values +POSTHOOK: Output: default@testaltcol_n1 +POSTHOOK: Lineage: testaltcol_n1.cbigint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.cid SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.cint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.csmallint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.ctinyint SCRIPT [] +PREHOOK: query: insert into testAltCol_n1 values (4, -1234567890123456789, -1234567890, @@ -97,8 +97,8 @@ PREHOOK: query: insert into testAltCol values -123) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@testaltcol -POSTHOOK: query: insert into testAltCol values +PREHOOK: Output: default@testaltcol_n1 +POSTHOOK: query: insert into testAltCol_n1 values (4, -1234567890123456789, -1234567890, @@ -106,2237 +106,2237 @@ POSTHOOK: query: insert into testAltCol values -123) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@testaltcol -POSTHOOK: Lineage: testaltcol.cbigint SCRIPT [] -POSTHOOK: Lineage: testaltcol.cid SCRIPT [] -POSTHOOK: Lineage: testaltcol.cint SCRIPT [] -POSTHOOK: Lineage: testaltcol.csmallint SCRIPT [] -POSTHOOK: Lineage: testaltcol.ctinyint SCRIPT [] -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltCol order by cId +POSTHOOK: Output: default@testaltcol_n1 +POSTHOOK: Lineage: testaltcol_n1.cbigint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.cid SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.cint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.csmallint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n1.ctinyint SCRIPT [] +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltCol_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltCol order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltCol_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n1 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: drop table if exists testAltColT +PREHOOK: query: drop table if exists testAltColT_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists testAltColT +POSTHOOK: query: drop table if exists testAltColT_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table testAltColT stored as textfile as select * from testAltCol +PREHOOK: query: create table testAltColT_n1 stored as textfile as select * from testAltCol_n1 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n1 PREHOOK: Output: database:default -PREHOOK: Output: default@testAltColT -POSTHOOK: query: create table testAltColT stored as textfile as select * from testAltCol +PREHOOK: Output: default@testAltColT_n1 +POSTHOOK: query: create table testAltColT_n1 stored as textfile as select * from testAltCol_n1 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n1 POSTHOOK: Output: database:default -POSTHOOK: Output: default@testAltColT -POSTHOOK: Lineage: testaltcolt.cbigint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: testaltcolt.cid SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cid, type:tinyint, comment:null), ] -POSTHOOK: Lineage: testaltcolt.cint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: testaltcolt.csmallint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: testaltcolt.ctinyint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: Output: default@testAltColT_n1 +POSTHOOK: Lineage: testaltcolt_n1.cbigint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: testaltcolt_n1.cid SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cid, type:tinyint, comment:null), ] +POSTHOOK: Lineage: testaltcolt_n1.cint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: testaltcolt_n1.csmallint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: testaltcolt_n1.ctinyint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n1 +PREHOOK: Output: default@testaltcolt_n1 +POSTHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n1 +POSTHOOK: Output: default@testaltcolt_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n1 +PREHOOK: Output: default@testaltcolt_n1 +POSTHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n1 +POSTHOOK: Output: default@testaltcolt_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### 1 NULL 1234567890 12345 123 2 1 2 3 4 3 NULL 1234567890 12345 123 4 NULL -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n1 +PREHOOK: Output: default@testaltcolt_n1 +POSTHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n1 +POSTHOOK: Output: default@testaltcolt_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### 1 NULL NULL 12345 123 2 1 2 3 4 3 NULL NULL 12345 123 4 NULL NULL -12345 -123 -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n1 +PREHOOK: Output: default@testaltcolt_n1 +POSTHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n1 +POSTHOOK: Output: default@testaltcolt_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### 1 NULL NULL NULL 123 2 1 2 3 4 3 NULL NULL NULL 123 4 NULL NULL NULL -123 -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n1 +PREHOOK: Output: default@testaltcolt_n1 +POSTHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n1 +POSTHOOK: Output: default@testaltcolt_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### 1 1.23456794E18 1.23456794E9 12345.0 123.0 2 1.0 2.0 3.0 4.0 3 1.23456794E18 1.23456794E9 12345.0 123.0 4 -1.23456794E18 -1.23456794E9 -12345.0 -123.0 -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n1 +PREHOOK: Output: default@testaltcolt_n1 +POSTHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n1 +POSTHOOK: Output: default@testaltcolt_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### 1 1.23456789012345677E18 1.23456789E9 12345.0 123.0 2 1.0 2.0 3.0 4.0 3 1.23456789012345677E18 1.23456789E9 12345.0 123.0 4 -1.23456789012345677E18 -1.23456789E9 -12345.0 -123.0 -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n1 +PREHOOK: Output: default@testaltcolt_n1 +POSTHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n1 +POSTHOOK: Output: default@testaltcolt_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### 1 1234567890123456789.00 1234567890.00 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 1234567890123456789.00 1234567890.00 12345.00 123.00 4 -1234567890123456789.00 -1234567890.00 -12345.00 -123.00 -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n1 +PREHOOK: Output: default@testaltcolt_n1 +POSTHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n1 +POSTHOOK: Output: default@testaltcolt_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### 1 NULL 1234567890.00 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 NULL 1234567890.00 12345.00 123.00 4 NULL -1234567890.00 -12345.00 -123.00 -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n1 +PREHOOK: Output: default@testaltcolt_n1 +POSTHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n1 +POSTHOOK: Output: default@testaltcolt_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### 1 NULL NULL 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 NULL NULL 12345.00 123.00 4 NULL NULL -12345.00 -123.00 -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n1 +PREHOOK: Output: default@testaltcolt_n1 +POSTHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n1 +POSTHOOK: Output: default@testaltcolt_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### 1 NULL NULL NULL 123.00 2 1.00 2.00 3.00 4.00 3 NULL NULL NULL 123.00 4 NULL NULL NULL -123.00 -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n1 +PREHOOK: Output: default@testaltcolt_n1 +POSTHOOK: query: alter table testAltColT_n1 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n1 +POSTHOOK: Output: default@testaltcolt_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n1 #### A masked pattern was here #### 1 NULL NULL NULL NULL 2 1.00 2.00 3.00 4.00 3 NULL NULL NULL NULL 4 NULL NULL NULL NULL -PREHOOK: query: drop table if exists testAltColT +PREHOOK: query: drop table if exists testAltColT_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: drop table if exists testAltColT +PREHOOK: Input: default@testaltcolt_n1 +PREHOOK: Output: default@testaltcolt_n1 +POSTHOOK: query: drop table if exists testAltColT_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: drop table if exists testAltColSF +POSTHOOK: Input: default@testaltcolt_n1 +POSTHOOK: Output: default@testaltcolt_n1 +PREHOOK: query: drop table if exists testAltColSF_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists testAltColSF +POSTHOOK: query: drop table if exists testAltColSF_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table testAltColSF stored as sequencefile as select * from testAltCol +PREHOOK: query: create table testAltColSF_n1 stored as sequencefile as select * from testAltCol_n1 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n1 PREHOOK: Output: database:default -PREHOOK: Output: default@testAltColSF -POSTHOOK: query: create table testAltColSF stored as sequencefile as select * from testAltCol +PREHOOK: Output: default@testAltColSF_n1 +POSTHOOK: query: create table testAltColSF_n1 stored as sequencefile as select * from testAltCol_n1 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n1 POSTHOOK: Output: database:default -POSTHOOK: Output: default@testAltColSF -POSTHOOK: Lineage: testaltcolsf.cbigint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: testaltcolsf.cid SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cid, type:tinyint, comment:null), ] -POSTHOOK: Lineage: testaltcolsf.cint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: testaltcolsf.csmallint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: testaltcolsf.ctinyint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: Output: default@testAltColSF_n1 +POSTHOOK: Lineage: testaltcolsf_n1.cbigint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: testaltcolsf_n1.cid SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cid, type:tinyint, comment:null), ] +POSTHOOK: Lineage: testaltcolsf_n1.cint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: testaltcolsf_n1.csmallint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: testaltcolsf_n1.ctinyint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n1 +PREHOOK: Output: default@testaltcolsf_n1 +POSTHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n1 +POSTHOOK: Output: default@testaltcolsf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n1 +PREHOOK: Output: default@testaltcolsf_n1 +POSTHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n1 +POSTHOOK: Output: default@testaltcolsf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### 1 NULL 1234567890 12345 123 2 1 2 3 4 3 NULL 1234567890 12345 123 4 NULL -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n1 +PREHOOK: Output: default@testaltcolsf_n1 +POSTHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n1 +POSTHOOK: Output: default@testaltcolsf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### 1 NULL NULL 12345 123 2 1 2 3 4 3 NULL NULL 12345 123 4 NULL NULL -12345 -123 -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n1 +PREHOOK: Output: default@testaltcolsf_n1 +POSTHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n1 +POSTHOOK: Output: default@testaltcolsf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### 1 NULL NULL NULL 123 2 1 2 3 4 3 NULL NULL NULL 123 4 NULL NULL NULL -123 -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n1 +PREHOOK: Output: default@testaltcolsf_n1 +POSTHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n1 +POSTHOOK: Output: default@testaltcolsf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### 1 1.23456794E18 1.23456794E9 12345.0 123.0 2 1.0 2.0 3.0 4.0 3 1.23456794E18 1.23456794E9 12345.0 123.0 4 -1.23456794E18 -1.23456794E9 -12345.0 -123.0 -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n1 +PREHOOK: Output: default@testaltcolsf_n1 +POSTHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n1 +POSTHOOK: Output: default@testaltcolsf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### 1 1.23456789012345677E18 1.23456789E9 12345.0 123.0 2 1.0 2.0 3.0 4.0 3 1.23456789012345677E18 1.23456789E9 12345.0 123.0 4 -1.23456789012345677E18 -1.23456789E9 -12345.0 -123.0 -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n1 +PREHOOK: Output: default@testaltcolsf_n1 +POSTHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n1 +POSTHOOK: Output: default@testaltcolsf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### 1 1234567890123456789.00 1234567890.00 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 1234567890123456789.00 1234567890.00 12345.00 123.00 4 -1234567890123456789.00 -1234567890.00 -12345.00 -123.00 -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n1 +PREHOOK: Output: default@testaltcolsf_n1 +POSTHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n1 +POSTHOOK: Output: default@testaltcolsf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### 1 NULL 1234567890.00 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 NULL 1234567890.00 12345.00 123.00 4 NULL -1234567890.00 -12345.00 -123.00 -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n1 +PREHOOK: Output: default@testaltcolsf_n1 +POSTHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n1 +POSTHOOK: Output: default@testaltcolsf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### 1 NULL NULL 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 NULL NULL 12345.00 123.00 4 NULL NULL -12345.00 -123.00 -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n1 +PREHOOK: Output: default@testaltcolsf_n1 +POSTHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n1 +POSTHOOK: Output: default@testaltcolsf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### 1 NULL NULL NULL 123.00 2 1.00 2.00 3.00 4.00 3 NULL NULL NULL 123.00 4 NULL NULL NULL -123.00 -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n1 +PREHOOK: Output: default@testaltcolsf_n1 +POSTHOOK: query: alter table testAltColSF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n1 +POSTHOOK: Output: default@testaltcolsf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n1 #### A masked pattern was here #### 1 NULL NULL NULL NULL 2 1.00 2.00 3.00 4.00 3 NULL NULL NULL NULL 4 NULL NULL NULL NULL -PREHOOK: query: drop table if exists testAltColSF +PREHOOK: query: drop table if exists testAltColSF_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: drop table if exists testAltColSF +PREHOOK: Input: default@testaltcolsf_n1 +PREHOOK: Output: default@testaltcolsf_n1 +POSTHOOK: query: drop table if exists testAltColSF_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: drop table if exists testAltColRCF +POSTHOOK: Input: default@testaltcolsf_n1 +POSTHOOK: Output: default@testaltcolsf_n1 +PREHOOK: query: drop table if exists testAltColRCF_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists testAltColRCF +POSTHOOK: query: drop table if exists testAltColRCF_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table testAltColRCF stored as rcfile as select * from testAltCol +PREHOOK: query: create table testAltColRCF_n1 stored as rcfile as select * from testAltCol_n1 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n1 PREHOOK: Output: database:default -PREHOOK: Output: default@testAltColRCF -POSTHOOK: query: create table testAltColRCF stored as rcfile as select * from testAltCol +PREHOOK: Output: default@testAltColRCF_n1 +POSTHOOK: query: create table testAltColRCF_n1 stored as rcfile as select * from testAltCol_n1 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n1 POSTHOOK: Output: database:default -POSTHOOK: Output: default@testAltColRCF -POSTHOOK: Lineage: testaltcolrcf.cbigint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: testaltcolrcf.cid SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cid, type:tinyint, comment:null), ] -POSTHOOK: Lineage: testaltcolrcf.cint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: testaltcolrcf.csmallint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: testaltcolrcf.ctinyint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: Output: default@testAltColRCF_n1 +POSTHOOK: Lineage: testaltcolrcf_n1.cbigint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: testaltcolrcf_n1.cid SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cid, type:tinyint, comment:null), ] +POSTHOOK: Lineage: testaltcolrcf_n1.cint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: testaltcolrcf_n1.csmallint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: testaltcolrcf_n1.ctinyint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n1 +PREHOOK: Output: default@testaltcolrcf_n1 +POSTHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n1 +POSTHOOK: Output: default@testaltcolrcf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n1 +PREHOOK: Output: default@testaltcolrcf_n1 +POSTHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n1 +POSTHOOK: Output: default@testaltcolrcf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### 1 NULL 1234567890 12345 123 2 1 2 3 4 3 NULL 1234567890 12345 123 4 NULL -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n1 +PREHOOK: Output: default@testaltcolrcf_n1 +POSTHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n1 +POSTHOOK: Output: default@testaltcolrcf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### 1 NULL NULL 12345 123 2 1 2 3 4 3 NULL NULL 12345 123 4 NULL NULL -12345 -123 -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n1 +PREHOOK: Output: default@testaltcolrcf_n1 +POSTHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n1 +POSTHOOK: Output: default@testaltcolrcf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### 1 NULL NULL NULL 123 2 1 2 3 4 3 NULL NULL NULL 123 4 NULL NULL NULL -123 -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n1 +PREHOOK: Output: default@testaltcolrcf_n1 +POSTHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n1 +POSTHOOK: Output: default@testaltcolrcf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### 1 1.23456794E18 1.23456794E9 12345.0 123.0 2 1.0 2.0 3.0 4.0 3 1.23456794E18 1.23456794E9 12345.0 123.0 4 -1.23456794E18 -1.23456794E9 -12345.0 -123.0 -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n1 +PREHOOK: Output: default@testaltcolrcf_n1 +POSTHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n1 +POSTHOOK: Output: default@testaltcolrcf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### 1 1.23456789012345677E18 1.23456789E9 12345.0 123.0 2 1.0 2.0 3.0 4.0 3 1.23456789012345677E18 1.23456789E9 12345.0 123.0 4 -1.23456789012345677E18 -1.23456789E9 -12345.0 -123.0 -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n1 +PREHOOK: Output: default@testaltcolrcf_n1 +POSTHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n1 +POSTHOOK: Output: default@testaltcolrcf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### 1 1234567890123456789.00 1234567890.00 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 1234567890123456789.00 1234567890.00 12345.00 123.00 4 -1234567890123456789.00 -1234567890.00 -12345.00 -123.00 -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n1 +PREHOOK: Output: default@testaltcolrcf_n1 +POSTHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n1 +POSTHOOK: Output: default@testaltcolrcf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### 1 NULL 1234567890.00 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 NULL 1234567890.00 12345.00 123.00 4 NULL -1234567890.00 -12345.00 -123.00 -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n1 +PREHOOK: Output: default@testaltcolrcf_n1 +POSTHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n1 +POSTHOOK: Output: default@testaltcolrcf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### 1 NULL NULL 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 NULL NULL 12345.00 123.00 4 NULL NULL -12345.00 -123.00 -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n1 +PREHOOK: Output: default@testaltcolrcf_n1 +POSTHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n1 +POSTHOOK: Output: default@testaltcolrcf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### 1 NULL NULL NULL 123.00 2 1.00 2.00 3.00 4.00 3 NULL NULL NULL 123.00 4 NULL NULL NULL -123.00 -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n1 +PREHOOK: Output: default@testaltcolrcf_n1 +POSTHOOK: query: alter table testAltColRCF_n1 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n1 +POSTHOOK: Output: default@testaltcolrcf_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n1 #### A masked pattern was here #### 1 NULL NULL NULL NULL 2 1.00 2.00 3.00 4.00 3 NULL NULL NULL NULL 4 NULL NULL NULL NULL -PREHOOK: query: drop table if exists testAltColRCF +PREHOOK: query: drop table if exists testAltColRCF_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: drop table if exists testAltColRCF +PREHOOK: Input: default@testaltcolrcf_n1 +PREHOOK: Output: default@testaltcolrcf_n1 +POSTHOOK: query: drop table if exists testAltColRCF_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: drop table if exists testAltColORC +POSTHOOK: Input: default@testaltcolrcf_n1 +POSTHOOK: Output: default@testaltcolrcf_n1 +PREHOOK: query: drop table if exists testAltColORC_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists testAltColORC +POSTHOOK: query: drop table if exists testAltColORC_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table testAltColORC stored as orc as select * from testAltCol +PREHOOK: query: create table testAltColORC_n1 stored as orc as select * from testAltCol_n1 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n1 PREHOOK: Output: database:default -PREHOOK: Output: default@testAltColORC -POSTHOOK: query: create table testAltColORC stored as orc as select * from testAltCol +PREHOOK: Output: default@testAltColORC_n1 +POSTHOOK: query: create table testAltColORC_n1 stored as orc as select * from testAltCol_n1 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n1 POSTHOOK: Output: database:default -POSTHOOK: Output: default@testAltColORC -POSTHOOK: Lineage: testaltcolorc.cbigint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: testaltcolorc.cid SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cid, type:tinyint, comment:null), ] -POSTHOOK: Lineage: testaltcolorc.cint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: testaltcolorc.csmallint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: testaltcolorc.ctinyint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: Output: default@testAltColORC_n1 +POSTHOOK: Lineage: testaltcolorc_n1.cbigint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: testaltcolorc_n1.cid SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cid, type:tinyint, comment:null), ] +POSTHOOK: Lineage: testaltcolorc_n1.cint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: testaltcolorc_n1.csmallint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: testaltcolorc_n1.ctinyint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n1 +PREHOOK: Output: default@testaltcolorc_n1 +POSTHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n1 +POSTHOOK: Output: default@testaltcolorc_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n1 +PREHOOK: Output: default@testaltcolorc_n1 +POSTHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n1 +POSTHOOK: Output: default@testaltcolorc_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### 1 NULL 1234567890 12345 123 2 1 2 3 4 3 NULL 1234567890 12345 123 4 NULL -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n1 +PREHOOK: Output: default@testaltcolorc_n1 +POSTHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n1 +POSTHOOK: Output: default@testaltcolorc_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### 1 NULL NULL 12345 123 2 1 2 3 4 3 NULL NULL 12345 123 4 NULL NULL -12345 -123 -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n1 +PREHOOK: Output: default@testaltcolorc_n1 +POSTHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n1 +POSTHOOK: Output: default@testaltcolorc_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### 1 NULL NULL NULL 123 2 1 2 3 4 3 NULL NULL NULL 123 4 NULL NULL NULL -123 -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n1 +PREHOOK: Output: default@testaltcolorc_n1 +POSTHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n1 +POSTHOOK: Output: default@testaltcolorc_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### 1 1.23456794E18 1.23456794E9 12345.0 123.0 2 1.0 2.0 3.0 4.0 3 1.23456794E18 1.23456794E9 12345.0 123.0 4 -1.23456794E18 -1.23456794E9 -12345.0 -123.0 -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n1 +PREHOOK: Output: default@testaltcolorc_n1 +POSTHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n1 +POSTHOOK: Output: default@testaltcolorc_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### 1 1.23456789012345677E18 1.23456789E9 12345.0 123.0 2 1.0 2.0 3.0 4.0 3 1.23456789012345677E18 1.23456789E9 12345.0 123.0 4 -1.23456789012345677E18 -1.23456789E9 -12345.0 -123.0 -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n1 +PREHOOK: Output: default@testaltcolorc_n1 +POSTHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n1 +POSTHOOK: Output: default@testaltcolorc_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### 1 1234567890123456789.00 1234567890.00 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 1234567890123456789.00 1234567890.00 12345.00 123.00 4 -1234567890123456789.00 -1234567890.00 -12345.00 -123.00 -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n1 +PREHOOK: Output: default@testaltcolorc_n1 +POSTHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n1 +POSTHOOK: Output: default@testaltcolorc_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### 1 NULL 1234567890.00 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 NULL 1234567890.00 12345.00 123.00 4 NULL -1234567890.00 -12345.00 -123.00 -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n1 +PREHOOK: Output: default@testaltcolorc_n1 +POSTHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n1 +POSTHOOK: Output: default@testaltcolorc_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### 1 NULL NULL 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 NULL NULL 12345.00 123.00 4 NULL NULL -12345.00 -123.00 -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n1 +PREHOOK: Output: default@testaltcolorc_n1 +POSTHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n1 +POSTHOOK: Output: default@testaltcolorc_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### 1 NULL NULL NULL 123.00 2 1.00 2.00 3.00 4.00 3 NULL NULL NULL 123.00 4 NULL NULL NULL -123.00 -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n1 +PREHOOK: Output: default@testaltcolorc_n1 +POSTHOOK: query: alter table testAltColORC_n1 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n1 +POSTHOOK: Output: default@testaltcolorc_n1 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n1 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n1 #### A masked pattern was here #### 1 NULL NULL NULL NULL 2 1.00 2.00 3.00 4.00 3 NULL NULL NULL NULL 4 NULL NULL NULL NULL -PREHOOK: query: drop table if exists testAltColORC +PREHOOK: query: drop table if exists testAltColORC_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: drop table if exists testAltColORC +PREHOOK: Input: default@testaltcolorc_n1 +PREHOOK: Output: default@testaltcolorc_n1 +POSTHOOK: query: drop table if exists testAltColORC_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: drop table if exists testAltColPDE +POSTHOOK: Input: default@testaltcolorc_n1 +POSTHOOK: Output: default@testaltcolorc_n1 +PREHOOK: query: drop table if exists testAltColPDE_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists testAltColPDE +POSTHOOK: query: drop table if exists testAltColPDE_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table testAltColPDE stored as parquet as select * from testAltCol +PREHOOK: query: create table testAltColPDE_n0 stored as parquet as select * from testAltCol_n1 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n1 PREHOOK: Output: database:default -PREHOOK: Output: default@testAltColPDE -POSTHOOK: query: create table testAltColPDE stored as parquet as select * from testAltCol +PREHOOK: Output: default@testAltColPDE_n0 +POSTHOOK: query: create table testAltColPDE_n0 stored as parquet as select * from testAltCol_n1 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n1 POSTHOOK: Output: database:default -POSTHOOK: Output: default@testAltColPDE -POSTHOOK: Lineage: testaltcolpde.cbigint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: testaltcolpde.cid SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cid, type:tinyint, comment:null), ] -POSTHOOK: Lineage: testaltcolpde.cint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: testaltcolpde.csmallint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: testaltcolpde.ctinyint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: Output: default@testAltColPDE_n0 +POSTHOOK: Lineage: testaltcolpde_n0.cbigint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: testaltcolpde_n0.cid SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cid, type:tinyint, comment:null), ] +POSTHOOK: Lineage: testaltcolpde_n0.cint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: testaltcolpde_n0.csmallint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: testaltcolpde_n0.ctinyint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpde +PREHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpde +POSTHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColPDE replace columns +PREHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpde -PREHOOK: Output: default@testaltcolpde -POSTHOOK: query: alter table testAltColPDE replace columns +PREHOOK: Input: default@testaltcolpde_n0 +PREHOOK: Output: default@testaltcolpde_n0 +POSTHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpde -POSTHOOK: Output: default@testaltcolpde -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: Input: default@testaltcolpde_n0 +POSTHOOK: Output: default@testaltcolpde_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpde +PREHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpde +POSTHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColPDE replace columns +PREHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpde -PREHOOK: Output: default@testaltcolpde -POSTHOOK: query: alter table testAltColPDE replace columns +PREHOOK: Input: default@testaltcolpde_n0 +PREHOOK: Output: default@testaltcolpde_n0 +POSTHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpde -POSTHOOK: Output: default@testaltcolpde -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: Input: default@testaltcolpde_n0 +POSTHOOK: Output: default@testaltcolpde_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpde +PREHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpde +POSTHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### 1 NULL 1234567890 12345 123 2 1 2 3 4 3 NULL 1234567890 12345 123 4 NULL -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColPDE replace columns +PREHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpde -PREHOOK: Output: default@testaltcolpde -POSTHOOK: query: alter table testAltColPDE replace columns +PREHOOK: Input: default@testaltcolpde_n0 +PREHOOK: Output: default@testaltcolpde_n0 +POSTHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpde -POSTHOOK: Output: default@testaltcolpde -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: Input: default@testaltcolpde_n0 +POSTHOOK: Output: default@testaltcolpde_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpde +PREHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpde +POSTHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### 1 NULL NULL 12345 123 2 1 2 3 4 3 NULL NULL 12345 123 4 NULL NULL -12345 -123 -PREHOOK: query: alter table testAltColPDE replace columns +PREHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpde -PREHOOK: Output: default@testaltcolpde -POSTHOOK: query: alter table testAltColPDE replace columns +PREHOOK: Input: default@testaltcolpde_n0 +PREHOOK: Output: default@testaltcolpde_n0 +POSTHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpde -POSTHOOK: Output: default@testaltcolpde -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: Input: default@testaltcolpde_n0 +POSTHOOK: Output: default@testaltcolpde_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpde +PREHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpde +POSTHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### 1 NULL NULL NULL 123 2 1 2 3 4 3 NULL NULL NULL 123 4 NULL NULL NULL -123 -PREHOOK: query: alter table testAltColPDE replace columns +PREHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpde -PREHOOK: Output: default@testaltcolpde -POSTHOOK: query: alter table testAltColPDE replace columns +PREHOOK: Input: default@testaltcolpde_n0 +PREHOOK: Output: default@testaltcolpde_n0 +POSTHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpde -POSTHOOK: Output: default@testaltcolpde -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: Input: default@testaltcolpde_n0 +POSTHOOK: Output: default@testaltcolpde_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpde +PREHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpde +POSTHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### 1 1.23456794E18 1.23456794E9 12345.0 123.0 2 1.0 2.0 3.0 4.0 3 1.23456794E18 1.23456794E9 12345.0 123.0 4 -1.23456794E18 -1.23456794E9 -12345.0 -123.0 -PREHOOK: query: alter table testAltColPDE replace columns +PREHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpde -PREHOOK: Output: default@testaltcolpde -POSTHOOK: query: alter table testAltColPDE replace columns +PREHOOK: Input: default@testaltcolpde_n0 +PREHOOK: Output: default@testaltcolpde_n0 +POSTHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpde -POSTHOOK: Output: default@testaltcolpde -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: Input: default@testaltcolpde_n0 +POSTHOOK: Output: default@testaltcolpde_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpde +PREHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpde +POSTHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### 1 1.23456789012345677E18 1.234567936E9 12345.0 123.0 2 1.0 2.0 3.0 4.0 3 1.23456789012345677E18 1.234567936E9 12345.0 123.0 4 -1.23456789012345677E18 -1.234567936E9 -12345.0 -123.0 -PREHOOK: query: alter table testAltColPDE replace columns +PREHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpde -PREHOOK: Output: default@testaltcolpde -POSTHOOK: query: alter table testAltColPDE replace columns +PREHOOK: Input: default@testaltcolpde_n0 +PREHOOK: Output: default@testaltcolpde_n0 +POSTHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpde -POSTHOOK: Output: default@testaltcolpde -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: Input: default@testaltcolpde_n0 +POSTHOOK: Output: default@testaltcolpde_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpde +PREHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpde +POSTHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### 1 1234567890123456789.00 1234567890.00 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 1234567890123456789.00 1234567890.00 12345.00 123.00 4 -1234567890123456789.00 -1234567890.00 -12345.00 -123.00 -PREHOOK: query: alter table testAltColPDE replace columns +PREHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpde -PREHOOK: Output: default@testaltcolpde -POSTHOOK: query: alter table testAltColPDE replace columns +PREHOOK: Input: default@testaltcolpde_n0 +PREHOOK: Output: default@testaltcolpde_n0 +POSTHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpde -POSTHOOK: Output: default@testaltcolpde -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: Input: default@testaltcolpde_n0 +POSTHOOK: Output: default@testaltcolpde_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpde +PREHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpde +POSTHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### 1 NULL 1234567890.00 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 NULL 1234567890.00 12345.00 123.00 4 NULL -1234567890.00 -12345.00 -123.00 -PREHOOK: query: alter table testAltColPDE replace columns +PREHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpde -PREHOOK: Output: default@testaltcolpde -POSTHOOK: query: alter table testAltColPDE replace columns +PREHOOK: Input: default@testaltcolpde_n0 +PREHOOK: Output: default@testaltcolpde_n0 +POSTHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpde -POSTHOOK: Output: default@testaltcolpde -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: Input: default@testaltcolpde_n0 +POSTHOOK: Output: default@testaltcolpde_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpde +PREHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpde +POSTHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### 1 NULL NULL 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 NULL NULL 12345.00 123.00 4 NULL NULL -12345.00 -123.00 -PREHOOK: query: alter table testAltColPDE replace columns +PREHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpde -PREHOOK: Output: default@testaltcolpde -POSTHOOK: query: alter table testAltColPDE replace columns +PREHOOK: Input: default@testaltcolpde_n0 +PREHOOK: Output: default@testaltcolpde_n0 +POSTHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpde -POSTHOOK: Output: default@testaltcolpde -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: Input: default@testaltcolpde_n0 +POSTHOOK: Output: default@testaltcolpde_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpde +PREHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpde +POSTHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### 1 NULL NULL NULL 123.00 2 1.00 2.00 3.00 4.00 3 NULL NULL NULL 123.00 4 NULL NULL NULL -123.00 -PREHOOK: query: alter table testAltColPDE replace columns +PREHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpde -PREHOOK: Output: default@testaltcolpde -POSTHOOK: query: alter table testAltColPDE replace columns +PREHOOK: Input: default@testaltcolpde_n0 +PREHOOK: Output: default@testaltcolpde_n0 +POSTHOOK: query: alter table testAltColPDE_n0 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpde -POSTHOOK: Output: default@testaltcolpde -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: Input: default@testaltcolpde_n0 +POSTHOOK: Output: default@testaltcolpde_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpde +PREHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDE_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpde +POSTHOOK: Input: default@testaltcolpde_n0 #### A masked pattern was here #### 1 NULL NULL NULL NULL 2 1.00 2.00 3.00 4.00 3 NULL NULL NULL NULL 4 NULL NULL NULL NULL -PREHOOK: query: drop table if exists testAltColPDE +PREHOOK: query: drop table if exists testAltColPDE_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testaltcolpde -PREHOOK: Output: default@testaltcolpde -POSTHOOK: query: drop table if exists testAltColPDE +PREHOOK: Input: default@testaltcolpde_n0 +PREHOOK: Output: default@testaltcolpde_n0 +POSTHOOK: query: drop table if exists testAltColPDE_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testaltcolpde -POSTHOOK: Output: default@testaltcolpde -PREHOOK: query: drop table if exists testAltColPDD +POSTHOOK: Input: default@testaltcolpde_n0 +POSTHOOK: Output: default@testaltcolpde_n0 +PREHOOK: query: drop table if exists testAltColPDD_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists testAltColPDD +POSTHOOK: query: drop table if exists testAltColPDD_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table testAltColPDD stored as parquet tblproperties ("parquet.enable.dictionary"="false") as -select * from testAltCol +PREHOOK: query: create table testAltColPDD_n0 stored as parquet tblproperties ("parquet.enable.dictionary"="false") as +select * from testAltCol_n1 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n1 PREHOOK: Output: database:default -PREHOOK: Output: default@testAltColPDD -POSTHOOK: query: create table testAltColPDD stored as parquet tblproperties ("parquet.enable.dictionary"="false") as -select * from testAltCol +PREHOOK: Output: default@testAltColPDD_n0 +POSTHOOK: query: create table testAltColPDD_n0 stored as parquet tblproperties ("parquet.enable.dictionary"="false") as +select * from testAltCol_n1 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n1 POSTHOOK: Output: database:default -POSTHOOK: Output: default@testAltColPDD -POSTHOOK: Lineage: testaltcolpdd.cbigint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: testaltcolpdd.cid SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cid, type:tinyint, comment:null), ] -POSTHOOK: Lineage: testaltcolpdd.cint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: testaltcolpdd.csmallint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: testaltcolpdd.ctinyint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: Output: default@testAltColPDD_n0 +POSTHOOK: Lineage: testaltcolpdd_n0.cbigint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: testaltcolpdd_n0.cid SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cid, type:tinyint, comment:null), ] +POSTHOOK: Lineage: testaltcolpdd_n0.cint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: testaltcolpdd_n0.csmallint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: testaltcolpdd_n0.ctinyint SIMPLE [(testaltcol_n1)testaltcol_n1.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpdd +PREHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpdd +POSTHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColPDD replace columns +PREHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpdd -PREHOOK: Output: default@testaltcolpdd -POSTHOOK: query: alter table testAltColPDD replace columns +PREHOOK: Input: default@testaltcolpdd_n0 +PREHOOK: Output: default@testaltcolpdd_n0 +POSTHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt BIGINT, cInt BIGINT, cSmallInt BIGINT, cTinyint BIGINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpdd -POSTHOOK: Output: default@testaltcolpdd -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: Input: default@testaltcolpdd_n0 +POSTHOOK: Output: default@testaltcolpdd_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpdd +PREHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpdd +POSTHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColPDD replace columns +PREHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpdd -PREHOOK: Output: default@testaltcolpdd -POSTHOOK: query: alter table testAltColPDD replace columns +PREHOOK: Input: default@testaltcolpdd_n0 +PREHOOK: Output: default@testaltcolpdd_n0 +POSTHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt INT, cInt INT, cSmallInt INT, cTinyint INT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpdd -POSTHOOK: Output: default@testaltcolpdd -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: Input: default@testaltcolpdd_n0 +POSTHOOK: Output: default@testaltcolpdd_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpdd +PREHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpdd +POSTHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### 1 NULL 1234567890 12345 123 2 1 2 3 4 3 NULL 1234567890 12345 123 4 NULL -1234567890 -12345 -123 -PREHOOK: query: alter table testAltColPDD replace columns +PREHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpdd -PREHOOK: Output: default@testaltcolpdd -POSTHOOK: query: alter table testAltColPDD replace columns +PREHOOK: Input: default@testaltcolpdd_n0 +PREHOOK: Output: default@testaltcolpdd_n0 +POSTHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt SMALLINT, cInt SMALLINT, cSmallInt SMALLINT, cTinyint SMALLINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpdd -POSTHOOK: Output: default@testaltcolpdd -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: Input: default@testaltcolpdd_n0 +POSTHOOK: Output: default@testaltcolpdd_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpdd +PREHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpdd +POSTHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### 1 NULL NULL 12345 123 2 1 2 3 4 3 NULL NULL 12345 123 4 NULL NULL -12345 -123 -PREHOOK: query: alter table testAltColPDD replace columns +PREHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpdd -PREHOOK: Output: default@testaltcolpdd -POSTHOOK: query: alter table testAltColPDD replace columns +PREHOOK: Input: default@testaltcolpdd_n0 +PREHOOK: Output: default@testaltcolpdd_n0 +POSTHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt TINYINT, cInt TINYINT, cSmallInt TINYINT, cTinyint TINYINT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpdd -POSTHOOK: Output: default@testaltcolpdd -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: Input: default@testaltcolpdd_n0 +POSTHOOK: Output: default@testaltcolpdd_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpdd +PREHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpdd +POSTHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### 1 NULL NULL NULL 123 2 1 2 3 4 3 NULL NULL NULL 123 4 NULL NULL NULL -123 -PREHOOK: query: alter table testAltColPDD replace columns +PREHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpdd -PREHOOK: Output: default@testaltcolpdd -POSTHOOK: query: alter table testAltColPDD replace columns +PREHOOK: Input: default@testaltcolpdd_n0 +PREHOOK: Output: default@testaltcolpdd_n0 +POSTHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt FLOAT, cInt FLOAT, cSmallInt FLOAT, cTinyint FLOAT) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpdd -POSTHOOK: Output: default@testaltcolpdd -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: Input: default@testaltcolpdd_n0 +POSTHOOK: Output: default@testaltcolpdd_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpdd +PREHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpdd +POSTHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### 1 1.23456794E18 1.23456794E9 12345.0 123.0 2 1.0 2.0 3.0 4.0 3 1.23456794E18 1.23456794E9 12345.0 123.0 4 -1.23456794E18 -1.23456794E9 -12345.0 -123.0 -PREHOOK: query: alter table testAltColPDD replace columns +PREHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpdd -PREHOOK: Output: default@testaltcolpdd -POSTHOOK: query: alter table testAltColPDD replace columns +PREHOOK: Input: default@testaltcolpdd_n0 +PREHOOK: Output: default@testaltcolpdd_n0 +POSTHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DOUBLE, cInt DOUBLE, cSmallInt DOUBLE, cTinyint DOUBLE) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpdd -POSTHOOK: Output: default@testaltcolpdd -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: Input: default@testaltcolpdd_n0 +POSTHOOK: Output: default@testaltcolpdd_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpdd +PREHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpdd +POSTHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### 1 1.23456789012345677E18 1.234567936E9 12345.0 123.0 2 1.0 2.0 3.0 4.0 3 1.23456789012345677E18 1.234567936E9 12345.0 123.0 4 -1.23456789012345677E18 -1.234567936E9 -12345.0 -123.0 -PREHOOK: query: alter table testAltColPDD replace columns +PREHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpdd -PREHOOK: Output: default@testaltcolpdd -POSTHOOK: query: alter table testAltColPDD replace columns +PREHOOK: Input: default@testaltcolpdd_n0 +PREHOOK: Output: default@testaltcolpdd_n0 +POSTHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(22,2), cInt DECIMAL(22,2), cSmallInt DECIMAL(22,2), cTinyint DECIMAL(22,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpdd -POSTHOOK: Output: default@testaltcolpdd -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: Input: default@testaltcolpdd_n0 +POSTHOOK: Output: default@testaltcolpdd_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpdd +PREHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpdd +POSTHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### 1 1234567890123456789.00 1234567890.00 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 1234567890123456789.00 1234567890.00 12345.00 123.00 4 -1234567890123456789.00 -1234567890.00 -12345.00 -123.00 -PREHOOK: query: alter table testAltColPDD replace columns +PREHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpdd -PREHOOK: Output: default@testaltcolpdd -POSTHOOK: query: alter table testAltColPDD replace columns +PREHOOK: Input: default@testaltcolpdd_n0 +PREHOOK: Output: default@testaltcolpdd_n0 +POSTHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(13,2), cInt DECIMAL(13,2), cSmallInt DECIMAL(13,2), cTinyint DECIMAL(13,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpdd -POSTHOOK: Output: default@testaltcolpdd -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: Input: default@testaltcolpdd_n0 +POSTHOOK: Output: default@testaltcolpdd_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpdd +PREHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpdd +POSTHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### 1 NULL 1234567890.00 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 NULL 1234567890.00 12345.00 123.00 4 NULL -1234567890.00 -12345.00 -123.00 -PREHOOK: query: alter table testAltColPDD replace columns +PREHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpdd -PREHOOK: Output: default@testaltcolpdd -POSTHOOK: query: alter table testAltColPDD replace columns +PREHOOK: Input: default@testaltcolpdd_n0 +PREHOOK: Output: default@testaltcolpdd_n0 +POSTHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(8,2), cInt DECIMAL(8,2), cSmallInt DECIMAL(8,2), cTinyint DECIMAL(8,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpdd -POSTHOOK: Output: default@testaltcolpdd -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: Input: default@testaltcolpdd_n0 +POSTHOOK: Output: default@testaltcolpdd_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpdd +PREHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpdd +POSTHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### 1 NULL NULL 12345.00 123.00 2 1.00 2.00 3.00 4.00 3 NULL NULL 12345.00 123.00 4 NULL NULL -12345.00 -123.00 -PREHOOK: query: alter table testAltColPDD replace columns +PREHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpdd -PREHOOK: Output: default@testaltcolpdd -POSTHOOK: query: alter table testAltColPDD replace columns +PREHOOK: Input: default@testaltcolpdd_n0 +PREHOOK: Output: default@testaltcolpdd_n0 +POSTHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(6,2), cInt DECIMAL(6,2), cSmallInt DECIMAL(6,2), cTinyint DECIMAL(6,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpdd -POSTHOOK: Output: default@testaltcolpdd -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: Input: default@testaltcolpdd_n0 +POSTHOOK: Output: default@testaltcolpdd_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpdd +PREHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpdd +POSTHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### 1 NULL NULL NULL 123.00 2 1.00 2.00 3.00 4.00 3 NULL NULL NULL 123.00 4 NULL NULL NULL -123.00 -PREHOOK: query: alter table testAltColPDD replace columns +PREHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolpdd -PREHOOK: Output: default@testaltcolpdd -POSTHOOK: query: alter table testAltColPDD replace columns +PREHOOK: Input: default@testaltcolpdd_n0 +PREHOOK: Output: default@testaltcolpdd_n0 +POSTHOOK: query: alter table testAltColPDD_n0 replace columns (cId TINYINT, cBigInt DECIMAL(3,2), cInt DECIMAL(3,2), cSmallInt DECIMAL(3,2), cTinyint DECIMAL(3,2)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolpdd -POSTHOOK: Output: default@testaltcolpdd -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: Input: default@testaltcolpdd_n0 +POSTHOOK: Output: default@testaltcolpdd_n0 +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolpdd +PREHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColPDD_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolpdd +POSTHOOK: Input: default@testaltcolpdd_n0 #### A masked pattern was here #### 1 NULL NULL NULL NULL 2 1.00 2.00 3.00 4.00 3 NULL NULL NULL NULL 4 NULL NULL NULL NULL -PREHOOK: query: drop table if exists testAltColPDD +PREHOOK: query: drop table if exists testAltColPDD_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testaltcolpdd -PREHOOK: Output: default@testaltcolpdd -POSTHOOK: query: drop table if exists testAltColPDD +PREHOOK: Input: default@testaltcolpdd_n0 +PREHOOK: Output: default@testaltcolpdd_n0 +POSTHOOK: query: drop table if exists testAltColPDD_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testaltcolpdd -POSTHOOK: Output: default@testaltcolpdd +POSTHOOK: Input: default@testaltcolpdd_n0 +POSTHOOK: Output: default@testaltcolpdd_n0 diff --git a/ql/src/test/results/clientpositive/type_widening.q.out b/ql/src/test/results/clientpositive/type_widening.q.out index 3c0feb8458..92de4409a3 100644 --- a/ql/src/test/results/clientpositive/type_widening.q.out +++ b/ql/src/test/results/clientpositive/type_widening.q.out @@ -1096,17 +1096,17 @@ POSTHOOK: Input: default@src 9223372036854775807 9223372036854775807 9223372036854775807 -PREHOOK: query: create table t1(a tinyint, b smallint) +PREHOOK: query: create table t1_n40(a tinyint, b smallint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(a tinyint, b smallint) +PREHOOK: Output: default@t1_n40 +POSTHOOK: query: create table t1_n40(a tinyint, b smallint) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: explain select * from t1 where a > 2 +POSTHOOK: Output: default@t1_n40 +PREHOOK: query: explain select * from t1_n40 where a > 2 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t1 where a > 2 +POSTHOOK: query: explain select * from t1_n40 where a > 2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1117,7 +1117,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: t1 + alias: t1_n40 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (a > 2Y) (type: boolean) @@ -1128,9 +1128,9 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: explain select * from t1 where b < 2 +PREHOOK: query: explain select * from t1_n40 where b < 2 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t1 where b < 2 +POSTHOOK: query: explain select * from t1_n40 where b < 2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1141,7 +1141,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: t1 + alias: t1_n40 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (b < 2S) (type: boolean) @@ -1152,9 +1152,9 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: explain select * from t1 where a < 200 +PREHOOK: query: explain select * from t1_n40 where a < 200 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t1 where a < 200 +POSTHOOK: query: explain select * from t1_n40 where a < 200 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1165,7 +1165,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: t1 + alias: t1_n40 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (UDFToInteger(a) < 200) (type: boolean) @@ -1176,9 +1176,9 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: explain select * from t1 where b > 40000 +PREHOOK: query: explain select * from t1_n40 where b > 40000 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from t1 where b > 40000 +POSTHOOK: query: explain select * from t1_n40 where b > 40000 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1189,7 +1189,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: t1 + alias: t1_n40 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (UDFToInteger(b) > 40000) (type: boolean) @@ -1200,11 +1200,11 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n40 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n40 +PREHOOK: Output: default@t1_n40 +POSTHOOK: query: drop table t1_n40 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 +POSTHOOK: Input: default@t1_n40 +POSTHOOK: Output: default@t1_n40 diff --git a/ql/src/test/results/clientpositive/typechangetest.q.out b/ql/src/test/results/clientpositive/typechangetest.q.out index 2c56b78839..bbb53a09cd 100644 --- a/ql/src/test/results/clientpositive/typechangetest.q.out +++ b/ql/src/test/results/clientpositive/typechangetest.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table if exists testAltCol +PREHOOK: query: drop table if exists testAltCol_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists testAltCol +POSTHOOK: query: drop table if exists testAltCol_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table testAltCol +PREHOOK: query: create table testAltCol_n0 (cId TINYINT, cTimeStamp TIMESTAMP, cDecimal DECIMAL(38,18), @@ -15,8 +15,8 @@ PREHOOK: query: create table testAltCol cBoolean BOOLEAN) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@testAltCol -POSTHOOK: query: create table testAltCol +PREHOOK: Output: default@testAltCol_n0 +POSTHOOK: query: create table testAltCol_n0 (cId TINYINT, cTimeStamp TIMESTAMP, cDecimal DECIMAL(38,18), @@ -29,8 +29,8 @@ POSTHOOK: query: create table testAltCol cBoolean BOOLEAN) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@testAltCol -PREHOOK: query: insert into testAltCol values +POSTHOOK: Output: default@testAltCol_n0 +PREHOOK: query: insert into testAltCol_n0 values (1, '2017-11-07 09:02:49.999999999', 12345678901234567890.123456789012345678, @@ -43,8 +43,8 @@ PREHOOK: query: insert into testAltCol values TRUE) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@testaltcol -POSTHOOK: query: insert into testAltCol values +PREHOOK: Output: default@testaltcol_n0 +POSTHOOK: query: insert into testAltCol_n0 values (1, '2017-11-07 09:02:49.999999999', 12345678901234567890.123456789012345678, @@ -57,18 +57,18 @@ POSTHOOK: query: insert into testAltCol values TRUE) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@testaltcol -POSTHOOK: Lineage: testaltcol.cbigint SCRIPT [] -POSTHOOK: Lineage: testaltcol.cboolean SCRIPT [] -POSTHOOK: Lineage: testaltcol.cdecimal SCRIPT [] -POSTHOOK: Lineage: testaltcol.cdouble SCRIPT [] -POSTHOOK: Lineage: testaltcol.cfloat SCRIPT [] -POSTHOOK: Lineage: testaltcol.cid SCRIPT [] -POSTHOOK: Lineage: testaltcol.cint SCRIPT [] -POSTHOOK: Lineage: testaltcol.csmallint SCRIPT [] -POSTHOOK: Lineage: testaltcol.ctimestamp SCRIPT [] -POSTHOOK: Lineage: testaltcol.ctinyint SCRIPT [] -PREHOOK: query: insert into testAltCol values +POSTHOOK: Output: default@testaltcol_n0 +POSTHOOK: Lineage: testaltcol_n0.cbigint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cboolean SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cdecimal SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cdouble SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cfloat SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cid SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.csmallint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.ctimestamp SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.ctinyint SCRIPT [] +PREHOOK: query: insert into testAltCol_n0 values (2, '1400-01-01 01:01:01.000000001', 1.1, @@ -81,8 +81,8 @@ PREHOOK: query: insert into testAltCol values FALSE) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@testaltcol -POSTHOOK: query: insert into testAltCol values +PREHOOK: Output: default@testaltcol_n0 +POSTHOOK: query: insert into testAltCol_n0 values (2, '1400-01-01 01:01:01.000000001', 1.1, @@ -95,18 +95,18 @@ POSTHOOK: query: insert into testAltCol values FALSE) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@testaltcol -POSTHOOK: Lineage: testaltcol.cbigint SCRIPT [] -POSTHOOK: Lineage: testaltcol.cboolean SCRIPT [] -POSTHOOK: Lineage: testaltcol.cdecimal SCRIPT [] -POSTHOOK: Lineage: testaltcol.cdouble SCRIPT [] -POSTHOOK: Lineage: testaltcol.cfloat SCRIPT [] -POSTHOOK: Lineage: testaltcol.cid SCRIPT [] -POSTHOOK: Lineage: testaltcol.cint SCRIPT [] -POSTHOOK: Lineage: testaltcol.csmallint SCRIPT [] -POSTHOOK: Lineage: testaltcol.ctimestamp SCRIPT [] -POSTHOOK: Lineage: testaltcol.ctinyint SCRIPT [] -PREHOOK: query: insert into testAltCol values +POSTHOOK: Output: default@testaltcol_n0 +POSTHOOK: Lineage: testaltcol_n0.cbigint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cboolean SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cdecimal SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cdouble SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cfloat SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cid SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.csmallint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.ctimestamp SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.ctinyint SCRIPT [] +PREHOOK: query: insert into testAltCol_n0 values (3, '1400-01-01 01:01:01.000000001', 10.1, @@ -119,8 +119,8 @@ PREHOOK: query: insert into testAltCol values TRUE) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@testaltcol -POSTHOOK: query: insert into testAltCol values +PREHOOK: Output: default@testaltcol_n0 +POSTHOOK: query: insert into testAltCol_n0 values (3, '1400-01-01 01:01:01.000000001', 10.1, @@ -133,18 +133,18 @@ POSTHOOK: query: insert into testAltCol values TRUE) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@testaltcol -POSTHOOK: Lineage: testaltcol.cbigint SCRIPT [] -POSTHOOK: Lineage: testaltcol.cboolean SCRIPT [] -POSTHOOK: Lineage: testaltcol.cdecimal SCRIPT [] -POSTHOOK: Lineage: testaltcol.cdouble SCRIPT [] -POSTHOOK: Lineage: testaltcol.cfloat SCRIPT [] -POSTHOOK: Lineage: testaltcol.cid SCRIPT [] -POSTHOOK: Lineage: testaltcol.cint SCRIPT [] -POSTHOOK: Lineage: testaltcol.csmallint SCRIPT [] -POSTHOOK: Lineage: testaltcol.ctimestamp SCRIPT [] -POSTHOOK: Lineage: testaltcol.ctinyint SCRIPT [] -PREHOOK: query: insert into testAltCol values +POSTHOOK: Output: default@testaltcol_n0 +POSTHOOK: Lineage: testaltcol_n0.cbigint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cboolean SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cdecimal SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cdouble SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cfloat SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cid SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.csmallint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.ctimestamp SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.ctinyint SCRIPT [] +PREHOOK: query: insert into testAltCol_n0 values (4, '1400-01-01 01:01:01.000000001', -10.1, @@ -157,8 +157,8 @@ PREHOOK: query: insert into testAltCol values FALSE) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@testaltcol -POSTHOOK: query: insert into testAltCol values +PREHOOK: Output: default@testaltcol_n0 +POSTHOOK: query: insert into testAltCol_n0 values (4, '1400-01-01 01:01:01.000000001', -10.1, @@ -171,138 +171,138 @@ POSTHOOK: query: insert into testAltCol values FALSE) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@testaltcol -POSTHOOK: Lineage: testaltcol.cbigint SCRIPT [] -POSTHOOK: Lineage: testaltcol.cboolean SCRIPT [] -POSTHOOK: Lineage: testaltcol.cdecimal SCRIPT [] -POSTHOOK: Lineage: testaltcol.cdouble SCRIPT [] -POSTHOOK: Lineage: testaltcol.cfloat SCRIPT [] -POSTHOOK: Lineage: testaltcol.cid SCRIPT [] -POSTHOOK: Lineage: testaltcol.cint SCRIPT [] -POSTHOOK: Lineage: testaltcol.csmallint SCRIPT [] -POSTHOOK: Lineage: testaltcol.ctimestamp SCRIPT [] -POSTHOOK: Lineage: testaltcol.ctinyint SCRIPT [] -PREHOOK: query: select cId, cTimeStamp from testAltCol order by cId +POSTHOOK: Output: default@testaltcol_n0 +POSTHOOK: Lineage: testaltcol_n0.cbigint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cboolean SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cdecimal SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cdouble SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cfloat SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cid SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.cint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.csmallint SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.ctimestamp SCRIPT [] +POSTHOOK: Lineage: testaltcol_n0.ctinyint SCRIPT [] +PREHOOK: query: select cId, cTimeStamp from testAltCol_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltCol order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltCol_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:01.000000001 3 1400-01-01 01:01:01.000000001 4 1400-01-01 01:01:01.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltCol order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltCol_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltCol order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltCol_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.100000000000000000 2.2 3.3 3 10.100000000000000000 20.2 30.3 4 -10.100000000000000000 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltCol order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltCol_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltCol order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltCol_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltCol order by cId +PREHOOK: query: select cId, cBoolean from testAltCol_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltCol order by cId +POSTHOOK: query: select cId, cBoolean from testAltCol_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n0 #### A masked pattern was here #### 1 true 2 false 3 true 4 false -PREHOOK: query: drop table if exists testAltColT +PREHOOK: query: drop table if exists testAltColT_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists testAltColT +POSTHOOK: query: drop table if exists testAltColT_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table testAltColT stored as textfile as select * from testAltCol +PREHOOK: query: create table testAltColT_n0 stored as textfile as select * from testAltCol_n0 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n0 PREHOOK: Output: database:default -PREHOOK: Output: default@testAltColT -POSTHOOK: query: create table testAltColT stored as textfile as select * from testAltCol +PREHOOK: Output: default@testAltColT_n0 +POSTHOOK: query: create table testAltColT_n0 stored as textfile as select * from testAltCol_n0 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n0 POSTHOOK: Output: database:default -POSTHOOK: Output: default@testAltColT -POSTHOOK: Lineage: testaltcolt.cbigint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: testaltcolt.cboolean SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cboolean, type:boolean, comment:null), ] -POSTHOOK: Lineage: testaltcolt.cdecimal SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cdecimal, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: testaltcolt.cdouble SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: testaltcolt.cfloat SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: testaltcolt.cid SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cid, type:tinyint, comment:null), ] -POSTHOOK: Lineage: testaltcolt.cint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: testaltcolt.csmallint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: testaltcolt.ctimestamp SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctimestamp, type:timestamp, comment:null), ] -POSTHOOK: Lineage: testaltcolt.ctinyint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: select cId, cTimeStamp from testAltColT order by cId +POSTHOOK: Output: default@testAltColT_n0 +POSTHOOK: Lineage: testaltcolt_n0.cbigint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: testaltcolt_n0.cboolean SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cboolean, type:boolean, comment:null), ] +POSTHOOK: Lineage: testaltcolt_n0.cdecimal SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cdecimal, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: testaltcolt_n0.cdouble SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: testaltcolt_n0.cfloat SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: testaltcolt_n0.cid SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cid, type:tinyint, comment:null), ] +POSTHOOK: Lineage: testaltcolt_n0.cint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: testaltcolt_n0.csmallint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: testaltcolt_n0.ctimestamp SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:ctimestamp, type:timestamp, comment:null), ] +POSTHOOK: Lineage: testaltcolt_n0.ctinyint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: select cId, cTimeStamp from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColT order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:01.000000001 3 1400-01-01 01:01:01.000000001 4 1400-01-01 01:01:01.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.100000000000000000 2.2 3.3 3 10.100000000000000000 20.2 30.3 4 -10.100000000000000000 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColT order by cId +PREHOOK: query: select cId, cBoolean from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColT order by cId +POSTHOOK: query: select cId, cBoolean from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 true 2 false 3 true 4 false -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp STRING, cDecimal STRING, @@ -314,9 +314,9 @@ PREHOOK: query: alter table testAltColT replace columns cTinyint STRING, cBoolean STRING) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n0 +PREHOOK: Output: default@testaltcolt_n0 +POSTHOOK: query: alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp STRING, cDecimal STRING, @@ -328,57 +328,57 @@ POSTHOOK: query: alter table testAltColT replace columns cTinyint STRING, cBoolean STRING) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cTimeStamp from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n0 +POSTHOOK: Output: default@testaltcolt_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColT order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:01.000000001 3 1400-01-01 01:01:01.000000001 4 1400-01-01 01:01:01.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.100000000000000000 2.2 3.3 3 10.100000000000000000 20.2 30.3 4 -10.100000000000000000 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColT order by cId +PREHOOK: query: select cId, cBoolean from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColT order by cId +POSTHOOK: query: select cId, cBoolean from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 true 2 false 3 true 4 false -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(100), cDecimal VARCHAR(100), @@ -390,9 +390,9 @@ PREHOOK: query: alter table testAltColT replace columns cTinyint VARCHAR(100), cBoolean VARCHAR(100)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n0 +PREHOOK: Output: default@testaltcolt_n0 +POSTHOOK: query: alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(100), cDecimal VARCHAR(100), @@ -404,57 +404,57 @@ POSTHOOK: query: alter table testAltColT replace columns cTinyint VARCHAR(100), cBoolean VARCHAR(100)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cTimeStamp from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n0 +POSTHOOK: Output: default@testaltcolt_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColT order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:01.000000001 3 1400-01-01 01:01:01.000000001 4 1400-01-01 01:01:01.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.100000000000000000 2.2 3.3 3 10.100000000000000000 20.2 30.3 4 -10.100000000000000000 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColT order by cId +PREHOOK: query: select cId, cBoolean from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColT order by cId +POSTHOOK: query: select cId, cBoolean from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 true 2 false 3 true 4 false -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp CHAR(100), cDecimal CHAR(100), @@ -466,9 +466,9 @@ PREHOOK: query: alter table testAltColT replace columns cTinyint CHAR(100), cBoolean CHAR(100)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n0 +PREHOOK: Output: default@testaltcolt_n0 +POSTHOOK: query: alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp CHAR(100), cDecimal CHAR(100), @@ -480,57 +480,57 @@ POSTHOOK: query: alter table testAltColT replace columns cTinyint CHAR(100), cBoolean CHAR(100)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cTimeStamp from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n0 +POSTHOOK: Output: default@testaltcolt_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColT order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:01.000000001 3 1400-01-01 01:01:01.000000001 4 1400-01-01 01:01:01.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.100000000000000000 2.2 3.3 3 10.100000000000000000 20.2 30.3 4 -10.100000000000000000 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColT order by cId +PREHOOK: query: select cId, cBoolean from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColT order by cId +POSTHOOK: query: select cId, cBoolean from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 true 2 false 3 true 4 false -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(4), cDecimal VARCHAR(4), @@ -542,9 +542,9 @@ PREHOOK: query: alter table testAltColT replace columns cTinyint VARCHAR(4), cBoolean VARCHAR(4)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n0 +PREHOOK: Output: default@testaltcolt_n0 +POSTHOOK: query: alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(4), cDecimal VARCHAR(4), @@ -556,57 +556,57 @@ POSTHOOK: query: alter table testAltColT replace columns cTinyint VARCHAR(4), cBoolean VARCHAR(4)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cTimeStamp from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n0 +POSTHOOK: Output: default@testaltcolt_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColT order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 2017 2 1400 3 1400 4 1400 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 1234 1.79 3.4E 2 1.10 2.2 3.3 3 10.1 20.2 30.3 4 -10. -20. -30. -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 1234 1234 1234 123 2 1 2 3 4 3 1234 1234 1234 123 4 -123 -123 -123 -123 -PREHOOK: query: select cId, cBoolean from testAltColT order by cId +PREHOOK: query: select cId, cBoolean from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColT order by cId +POSTHOOK: query: select cId, cBoolean from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 true 2 fals 3 true 4 fals -PREHOOK: query: alter table testAltColT replace columns +PREHOOK: query: alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp CHAR(4), cDecimal CHAR(4), @@ -618,9 +618,9 @@ PREHOOK: query: alter table testAltColT replace columns cTinyint CHAR(4), cBoolean CHAR(4)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: alter table testAltColT replace columns +PREHOOK: Input: default@testaltcolt_n0 +PREHOOK: Output: default@testaltcolt_n0 +POSTHOOK: query: alter table testAltColT_n0 replace columns (cId TINYINT, cTimeStamp CHAR(4), cDecimal CHAR(4), @@ -632,137 +632,137 @@ POSTHOOK: query: alter table testAltColT replace columns cTinyint CHAR(4), cBoolean CHAR(4)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: select cId, cTimeStamp from testAltColT order by cId +POSTHOOK: Input: default@testaltcolt_n0 +POSTHOOK: Output: default@testaltcolt_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColT order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 2017 2 1400 3 1400 4 1400 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 1234 1.79 3.4E 2 1.10 2.2 3.3 3 10.1 20.2 30.3 4 -10. -20. -30. -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 1234 1234 1234 123 2 1 2 3 4 3 1234 1234 1234 123 4 -123 -123 -123 -123 -PREHOOK: query: select cId, cBoolean from testAltColT order by cId +PREHOOK: query: select cId, cBoolean from testAltColT_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolt +PREHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColT order by cId +POSTHOOK: query: select cId, cBoolean from testAltColT_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolt +POSTHOOK: Input: default@testaltcolt_n0 #### A masked pattern was here #### 1 true 2 fals 3 true 4 fals -PREHOOK: query: drop table if exists testAltColT +PREHOOK: query: drop table if exists testAltColT_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testaltcolt -PREHOOK: Output: default@testaltcolt -POSTHOOK: query: drop table if exists testAltColT +PREHOOK: Input: default@testaltcolt_n0 +PREHOOK: Output: default@testaltcolt_n0 +POSTHOOK: query: drop table if exists testAltColT_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testaltcolt -POSTHOOK: Output: default@testaltcolt -PREHOOK: query: drop table if exists testAltColSF +POSTHOOK: Input: default@testaltcolt_n0 +POSTHOOK: Output: default@testaltcolt_n0 +PREHOOK: query: drop table if exists testAltColSF_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists testAltColSF +POSTHOOK: query: drop table if exists testAltColSF_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table testAltColSF stored as sequencefile as select * from testAltCol +PREHOOK: query: create table testAltColSF_n0 stored as sequencefile as select * from testAltCol_n0 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n0 PREHOOK: Output: database:default -PREHOOK: Output: default@testAltColSF -POSTHOOK: query: create table testAltColSF stored as sequencefile as select * from testAltCol +PREHOOK: Output: default@testAltColSF_n0 +POSTHOOK: query: create table testAltColSF_n0 stored as sequencefile as select * from testAltCol_n0 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n0 POSTHOOK: Output: database:default -POSTHOOK: Output: default@testAltColSF -POSTHOOK: Lineage: testaltcolsf.cbigint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: testaltcolsf.cboolean SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cboolean, type:boolean, comment:null), ] -POSTHOOK: Lineage: testaltcolsf.cdecimal SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cdecimal, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: testaltcolsf.cdouble SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: testaltcolsf.cfloat SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: testaltcolsf.cid SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cid, type:tinyint, comment:null), ] -POSTHOOK: Lineage: testaltcolsf.cint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: testaltcolsf.csmallint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: testaltcolsf.ctimestamp SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctimestamp, type:timestamp, comment:null), ] -POSTHOOK: Lineage: testaltcolsf.ctinyint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: select cId, cTimeStamp from testAltColSF order by cId +POSTHOOK: Output: default@testAltColSF_n0 +POSTHOOK: Lineage: testaltcolsf_n0.cbigint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: testaltcolsf_n0.cboolean SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cboolean, type:boolean, comment:null), ] +POSTHOOK: Lineage: testaltcolsf_n0.cdecimal SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cdecimal, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: testaltcolsf_n0.cdouble SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: testaltcolsf_n0.cfloat SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: testaltcolsf_n0.cid SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cid, type:tinyint, comment:null), ] +POSTHOOK: Lineage: testaltcolsf_n0.cint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: testaltcolsf_n0.csmallint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: testaltcolsf_n0.ctimestamp SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:ctimestamp, type:timestamp, comment:null), ] +POSTHOOK: Lineage: testaltcolsf_n0.ctinyint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: select cId, cTimeStamp from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColSF order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:01.000000001 3 1400-01-01 01:01:01.000000001 4 1400-01-01 01:01:01.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.100000000000000000 2.2 3.3 3 10.100000000000000000 20.2 30.3 4 -10.100000000000000000 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColSF order by cId +PREHOOK: query: select cId, cBoolean from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColSF order by cId +POSTHOOK: query: select cId, cBoolean from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 true 2 false 3 true 4 false -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp STRING, cDecimal STRING, @@ -774,9 +774,9 @@ PREHOOK: query: alter table testAltColSF replace columns cTinyint STRING, cBoolean STRING) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n0 +PREHOOK: Output: default@testaltcolsf_n0 +POSTHOOK: query: alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp STRING, cDecimal STRING, @@ -788,57 +788,57 @@ POSTHOOK: query: alter table testAltColSF replace columns cTinyint STRING, cBoolean STRING) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cTimeStamp from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n0 +POSTHOOK: Output: default@testaltcolsf_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColSF order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:01.000000001 3 1400-01-01 01:01:01.000000001 4 1400-01-01 01:01:01.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.100000000000000000 2.2 3.3 3 10.100000000000000000 20.2 30.3 4 -10.100000000000000000 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColSF order by cId +PREHOOK: query: select cId, cBoolean from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColSF order by cId +POSTHOOK: query: select cId, cBoolean from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 true 2 false 3 true 4 false -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(100), cDecimal VARCHAR(100), @@ -850,9 +850,9 @@ PREHOOK: query: alter table testAltColSF replace columns cTinyint VARCHAR(100), cBoolean VARCHAR(100)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n0 +PREHOOK: Output: default@testaltcolsf_n0 +POSTHOOK: query: alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(100), cDecimal VARCHAR(100), @@ -864,57 +864,57 @@ POSTHOOK: query: alter table testAltColSF replace columns cTinyint VARCHAR(100), cBoolean VARCHAR(100)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cTimeStamp from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n0 +POSTHOOK: Output: default@testaltcolsf_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColSF order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:01.000000001 3 1400-01-01 01:01:01.000000001 4 1400-01-01 01:01:01.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.100000000000000000 2.2 3.3 3 10.100000000000000000 20.2 30.3 4 -10.100000000000000000 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColSF order by cId +PREHOOK: query: select cId, cBoolean from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColSF order by cId +POSTHOOK: query: select cId, cBoolean from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 true 2 false 3 true 4 false -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp CHAR(100), cDecimal CHAR(100), @@ -926,9 +926,9 @@ PREHOOK: query: alter table testAltColSF replace columns cTinyint CHAR(100), cBoolean CHAR(100)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n0 +PREHOOK: Output: default@testaltcolsf_n0 +POSTHOOK: query: alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp CHAR(100), cDecimal CHAR(100), @@ -940,57 +940,57 @@ POSTHOOK: query: alter table testAltColSF replace columns cTinyint CHAR(100), cBoolean CHAR(100)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cTimeStamp from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n0 +POSTHOOK: Output: default@testaltcolsf_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColSF order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:01.000000001 3 1400-01-01 01:01:01.000000001 4 1400-01-01 01:01:01.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.100000000000000000 2.2 3.3 3 10.100000000000000000 20.2 30.3 4 -10.100000000000000000 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColSF order by cId +PREHOOK: query: select cId, cBoolean from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColSF order by cId +POSTHOOK: query: select cId, cBoolean from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 true 2 false 3 true 4 false -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(4), cDecimal VARCHAR(4), @@ -1002,9 +1002,9 @@ PREHOOK: query: alter table testAltColSF replace columns cTinyint VARCHAR(4), cBoolean VARCHAR(4)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n0 +PREHOOK: Output: default@testaltcolsf_n0 +POSTHOOK: query: alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(4), cDecimal VARCHAR(4), @@ -1016,57 +1016,57 @@ POSTHOOK: query: alter table testAltColSF replace columns cTinyint VARCHAR(4), cBoolean VARCHAR(4)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cTimeStamp from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n0 +POSTHOOK: Output: default@testaltcolsf_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColSF order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 2017 2 1400 3 1400 4 1400 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 1234 1.79 3.4E 2 1.10 2.2 3.3 3 10.1 20.2 30.3 4 -10. -20. -30. -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 1234 1234 1234 123 2 1 2 3 4 3 1234 1234 1234 123 4 -123 -123 -123 -123 -PREHOOK: query: select cId, cBoolean from testAltColSF order by cId +PREHOOK: query: select cId, cBoolean from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColSF order by cId +POSTHOOK: query: select cId, cBoolean from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 true 2 fals 3 true 4 fals -PREHOOK: query: alter table testAltColSF replace columns +PREHOOK: query: alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp CHAR(4), cDecimal CHAR(4), @@ -1078,9 +1078,9 @@ PREHOOK: query: alter table testAltColSF replace columns cTinyint CHAR(4), cBoolean CHAR(4)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: alter table testAltColSF replace columns +PREHOOK: Input: default@testaltcolsf_n0 +PREHOOK: Output: default@testaltcolsf_n0 +POSTHOOK: query: alter table testAltColSF_n0 replace columns (cId TINYINT, cTimeStamp CHAR(4), cDecimal CHAR(4), @@ -1092,137 +1092,137 @@ POSTHOOK: query: alter table testAltColSF replace columns cTinyint CHAR(4), cBoolean CHAR(4)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: select cId, cTimeStamp from testAltColSF order by cId +POSTHOOK: Input: default@testaltcolsf_n0 +POSTHOOK: Output: default@testaltcolsf_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColSF order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 2017 2 1400 3 1400 4 1400 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 1234 1.79 3.4E 2 1.10 2.2 3.3 3 10.1 20.2 30.3 4 -10. -20. -30. -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 1234 1234 1234 123 2 1 2 3 4 3 1234 1234 1234 123 4 -123 -123 -123 -123 -PREHOOK: query: select cId, cBoolean from testAltColSF order by cId +PREHOOK: query: select cId, cBoolean from testAltColSF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolsf +PREHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColSF order by cId +POSTHOOK: query: select cId, cBoolean from testAltColSF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolsf +POSTHOOK: Input: default@testaltcolsf_n0 #### A masked pattern was here #### 1 true 2 fals 3 true 4 fals -PREHOOK: query: drop table if exists testAltColSF +PREHOOK: query: drop table if exists testAltColSF_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testaltcolsf -PREHOOK: Output: default@testaltcolsf -POSTHOOK: query: drop table if exists testAltColSF +PREHOOK: Input: default@testaltcolsf_n0 +PREHOOK: Output: default@testaltcolsf_n0 +POSTHOOK: query: drop table if exists testAltColSF_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testaltcolsf -POSTHOOK: Output: default@testaltcolsf -PREHOOK: query: drop table if exists testAltColORC +POSTHOOK: Input: default@testaltcolsf_n0 +POSTHOOK: Output: default@testaltcolsf_n0 +PREHOOK: query: drop table if exists testAltColORC_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists testAltColORC +POSTHOOK: query: drop table if exists testAltColORC_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table testAltColORC stored as orc as select * from testAltCol +PREHOOK: query: create table testAltColORC_n0 stored as orc as select * from testAltCol_n0 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n0 PREHOOK: Output: database:default -PREHOOK: Output: default@testAltColORC -POSTHOOK: query: create table testAltColORC stored as orc as select * from testAltCol +PREHOOK: Output: default@testAltColORC_n0 +POSTHOOK: query: create table testAltColORC_n0 stored as orc as select * from testAltCol_n0 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n0 POSTHOOK: Output: database:default -POSTHOOK: Output: default@testAltColORC -POSTHOOK: Lineage: testaltcolorc.cbigint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: testaltcolorc.cboolean SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cboolean, type:boolean, comment:null), ] -POSTHOOK: Lineage: testaltcolorc.cdecimal SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cdecimal, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: testaltcolorc.cdouble SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: testaltcolorc.cfloat SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: testaltcolorc.cid SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cid, type:tinyint, comment:null), ] -POSTHOOK: Lineage: testaltcolorc.cint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: testaltcolorc.csmallint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: testaltcolorc.ctimestamp SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctimestamp, type:timestamp, comment:null), ] -POSTHOOK: Lineage: testaltcolorc.ctinyint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: select cId, cTimeStamp from testAltColORC order by cId +POSTHOOK: Output: default@testAltColORC_n0 +POSTHOOK: Lineage: testaltcolorc_n0.cbigint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: testaltcolorc_n0.cboolean SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cboolean, type:boolean, comment:null), ] +POSTHOOK: Lineage: testaltcolorc_n0.cdecimal SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cdecimal, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: testaltcolorc_n0.cdouble SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: testaltcolorc_n0.cfloat SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: testaltcolorc_n0.cid SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cid, type:tinyint, comment:null), ] +POSTHOOK: Lineage: testaltcolorc_n0.cint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: testaltcolorc_n0.csmallint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: testaltcolorc_n0.ctimestamp SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:ctimestamp, type:timestamp, comment:null), ] +POSTHOOK: Lineage: testaltcolorc_n0.ctinyint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: select cId, cTimeStamp from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColORC order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:00.000000001 3 1400-01-01 01:01:00.000000001 4 1400-01-01 01:01:00.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.100000000000000000 2.2 3.3 3 10.100000000000000000 20.2 30.3 4 -10.100000000000000000 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColORC order by cId +PREHOOK: query: select cId, cBoolean from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColORC order by cId +POSTHOOK: query: select cId, cBoolean from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 true 2 false 3 true 4 false -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp STRING, cDecimal STRING, @@ -1234,9 +1234,9 @@ PREHOOK: query: alter table testAltColORC replace columns cTinyint STRING, cBoolean STRING) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n0 +PREHOOK: Output: default@testaltcolorc_n0 +POSTHOOK: query: alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp STRING, cDecimal STRING, @@ -1248,57 +1248,57 @@ POSTHOOK: query: alter table testAltColORC replace columns cTinyint STRING, cBoolean STRING) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cTimeStamp from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n0 +POSTHOOK: Output: default@testaltcolorc_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColORC order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:00.000000001 3 1400-01-01 01:01:00.000000001 4 1400-01-01 01:01:00.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.1 2.2 3.3 3 10.1 20.2 30.3 4 -10.1 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColORC order by cId +PREHOOK: query: select cId, cBoolean from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColORC order by cId +POSTHOOK: query: select cId, cBoolean from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 TRUE 2 FALSE 3 TRUE 4 FALSE -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(100), cDecimal VARCHAR(100), @@ -1310,9 +1310,9 @@ PREHOOK: query: alter table testAltColORC replace columns cTinyint VARCHAR(100), cBoolean VARCHAR(100)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n0 +PREHOOK: Output: default@testaltcolorc_n0 +POSTHOOK: query: alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(100), cDecimal VARCHAR(100), @@ -1324,57 +1324,57 @@ POSTHOOK: query: alter table testAltColORC replace columns cTinyint VARCHAR(100), cBoolean VARCHAR(100)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cTimeStamp from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n0 +POSTHOOK: Output: default@testaltcolorc_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColORC order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:00.000000001 3 1400-01-01 01:01:00.000000001 4 1400-01-01 01:01:00.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.1 2.2 3.3 3 10.1 20.2 30.3 4 -10.1 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColORC order by cId +PREHOOK: query: select cId, cBoolean from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColORC order by cId +POSTHOOK: query: select cId, cBoolean from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 TRUE 2 FALSE 3 TRUE 4 FALSE -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp CHAR(100), cDecimal CHAR(100), @@ -1386,9 +1386,9 @@ PREHOOK: query: alter table testAltColORC replace columns cTinyint CHAR(100), cBoolean CHAR(100)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n0 +PREHOOK: Output: default@testaltcolorc_n0 +POSTHOOK: query: alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp CHAR(100), cDecimal CHAR(100), @@ -1400,57 +1400,57 @@ POSTHOOK: query: alter table testAltColORC replace columns cTinyint CHAR(100), cBoolean CHAR(100)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cTimeStamp from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n0 +POSTHOOK: Output: default@testaltcolorc_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColORC order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:00.000000001 3 1400-01-01 01:01:00.000000001 4 1400-01-01 01:01:00.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.1 2.2 3.3 3 10.1 20.2 30.3 4 -10.1 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColORC order by cId +PREHOOK: query: select cId, cBoolean from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColORC order by cId +POSTHOOK: query: select cId, cBoolean from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 TRUE 2 FALSE 3 TRUE 4 FALSE -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(4), cDecimal VARCHAR(4), @@ -1462,9 +1462,9 @@ PREHOOK: query: alter table testAltColORC replace columns cTinyint VARCHAR(4), cBoolean VARCHAR(4)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n0 +PREHOOK: Output: default@testaltcolorc_n0 +POSTHOOK: query: alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(4), cDecimal VARCHAR(4), @@ -1476,57 +1476,57 @@ POSTHOOK: query: alter table testAltColORC replace columns cTinyint VARCHAR(4), cBoolean VARCHAR(4)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cTimeStamp from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n0 +POSTHOOK: Output: default@testaltcolorc_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColORC order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 2017 2 1400 3 1400 4 1400 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 1234 1.79 3.4E 2 1.1 2.2 3.3 3 10.1 20.2 30.3 4 -10. -20. -30. -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 1234 1234 1234 123 2 1 2 3 4 3 1234 1234 1234 123 4 -123 -123 -123 -123 -PREHOOK: query: select cId, cBoolean from testAltColORC order by cId +PREHOOK: query: select cId, cBoolean from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColORC order by cId +POSTHOOK: query: select cId, cBoolean from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 TRUE 2 FALS 3 TRUE 4 FALS -PREHOOK: query: alter table testAltColORC replace columns +PREHOOK: query: alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp CHAR(4), cDecimal CHAR(4), @@ -1538,9 +1538,9 @@ PREHOOK: query: alter table testAltColORC replace columns cTinyint CHAR(4), cBoolean CHAR(4)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: alter table testAltColORC replace columns +PREHOOK: Input: default@testaltcolorc_n0 +PREHOOK: Output: default@testaltcolorc_n0 +POSTHOOK: query: alter table testAltColORC_n0 replace columns (cId TINYINT, cTimeStamp CHAR(4), cDecimal CHAR(4), @@ -1552,137 +1552,137 @@ POSTHOOK: query: alter table testAltColORC replace columns cTinyint CHAR(4), cBoolean CHAR(4)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: select cId, cTimeStamp from testAltColORC order by cId +POSTHOOK: Input: default@testaltcolorc_n0 +POSTHOOK: Output: default@testaltcolorc_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColORC order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 2017 2 1400 3 1400 4 1400 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 1234 1.79 3.4E 2 1.1 2.2 3.3 3 10.1 20.2 30.3 4 -10. -20. -30. -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 1234 1234 1234 123 2 1 2 3 4 3 1234 1234 1234 123 4 -123 -123 -123 -123 -PREHOOK: query: select cId, cBoolean from testAltColORC order by cId +PREHOOK: query: select cId, cBoolean from testAltColORC_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolorc +PREHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColORC order by cId +POSTHOOK: query: select cId, cBoolean from testAltColORC_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolorc +POSTHOOK: Input: default@testaltcolorc_n0 #### A masked pattern was here #### 1 TRUE 2 FALS 3 TRUE 4 FALS -PREHOOK: query: drop table if exists testAltColORC +PREHOOK: query: drop table if exists testAltColORC_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testaltcolorc -PREHOOK: Output: default@testaltcolorc -POSTHOOK: query: drop table if exists testAltColORC +PREHOOK: Input: default@testaltcolorc_n0 +PREHOOK: Output: default@testaltcolorc_n0 +POSTHOOK: query: drop table if exists testAltColORC_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testaltcolorc -POSTHOOK: Output: default@testaltcolorc -PREHOOK: query: drop table if exists testAltColRCF +POSTHOOK: Input: default@testaltcolorc_n0 +POSTHOOK: Output: default@testaltcolorc_n0 +PREHOOK: query: drop table if exists testAltColRCF_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists testAltColRCF +POSTHOOK: query: drop table if exists testAltColRCF_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table testAltColRCF stored as rcfile as select * from testAltCol +PREHOOK: query: create table testAltColRCF_n0 stored as rcfile as select * from testAltCol_n0 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n0 PREHOOK: Output: database:default -PREHOOK: Output: default@testAltColRCF -POSTHOOK: query: create table testAltColRCF stored as rcfile as select * from testAltCol +PREHOOK: Output: default@testAltColRCF_n0 +POSTHOOK: query: create table testAltColRCF_n0 stored as rcfile as select * from testAltCol_n0 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n0 POSTHOOK: Output: database:default -POSTHOOK: Output: default@testAltColRCF -POSTHOOK: Lineage: testaltcolrcf.cbigint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: testaltcolrcf.cboolean SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cboolean, type:boolean, comment:null), ] -POSTHOOK: Lineage: testaltcolrcf.cdecimal SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cdecimal, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: testaltcolrcf.cdouble SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: testaltcolrcf.cfloat SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: testaltcolrcf.cid SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cid, type:tinyint, comment:null), ] -POSTHOOK: Lineage: testaltcolrcf.cint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: testaltcolrcf.csmallint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: testaltcolrcf.ctimestamp SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctimestamp, type:timestamp, comment:null), ] -POSTHOOK: Lineage: testaltcolrcf.ctinyint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: select cId, cTimeStamp from testAltColRCF order by cId +POSTHOOK: Output: default@testAltColRCF_n0 +POSTHOOK: Lineage: testaltcolrcf_n0.cbigint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: testaltcolrcf_n0.cboolean SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cboolean, type:boolean, comment:null), ] +POSTHOOK: Lineage: testaltcolrcf_n0.cdecimal SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cdecimal, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: testaltcolrcf_n0.cdouble SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: testaltcolrcf_n0.cfloat SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: testaltcolrcf_n0.cid SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cid, type:tinyint, comment:null), ] +POSTHOOK: Lineage: testaltcolrcf_n0.cint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: testaltcolrcf_n0.csmallint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: testaltcolrcf_n0.ctimestamp SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:ctimestamp, type:timestamp, comment:null), ] +POSTHOOK: Lineage: testaltcolrcf_n0.ctinyint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: select cId, cTimeStamp from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColRCF order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:01.000000001 3 1400-01-01 01:01:01.000000001 4 1400-01-01 01:01:01.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.100000000000000000 2.2 3.3 3 10.100000000000000000 20.2 30.3 4 -10.100000000000000000 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColRCF order by cId +PREHOOK: query: select cId, cBoolean from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColRCF order by cId +POSTHOOK: query: select cId, cBoolean from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 true 2 false 3 true 4 false -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp STRING, cDecimal STRING, @@ -1694,9 +1694,9 @@ PREHOOK: query: alter table testAltColRCF replace columns cTinyint STRING, cBoolean STRING) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n0 +PREHOOK: Output: default@testaltcolrcf_n0 +POSTHOOK: query: alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp STRING, cDecimal STRING, @@ -1708,57 +1708,57 @@ POSTHOOK: query: alter table testAltColRCF replace columns cTinyint STRING, cBoolean STRING) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cTimeStamp from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n0 +POSTHOOK: Output: default@testaltcolrcf_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColRCF order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:01.000000001 3 1400-01-01 01:01:01.000000001 4 1400-01-01 01:01:01.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.100000000000000000 2.2 3.3 3 10.100000000000000000 20.2 30.3 4 -10.100000000000000000 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColRCF order by cId +PREHOOK: query: select cId, cBoolean from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColRCF order by cId +POSTHOOK: query: select cId, cBoolean from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 true 2 false 3 true 4 false -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(100), cDecimal VARCHAR(100), @@ -1770,9 +1770,9 @@ PREHOOK: query: alter table testAltColRCF replace columns cTinyint VARCHAR(100), cBoolean VARCHAR(100)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n0 +PREHOOK: Output: default@testaltcolrcf_n0 +POSTHOOK: query: alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(100), cDecimal VARCHAR(100), @@ -1784,57 +1784,57 @@ POSTHOOK: query: alter table testAltColRCF replace columns cTinyint VARCHAR(100), cBoolean VARCHAR(100)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cTimeStamp from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n0 +POSTHOOK: Output: default@testaltcolrcf_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColRCF order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:01.000000001 3 1400-01-01 01:01:01.000000001 4 1400-01-01 01:01:01.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.100000000000000000 2.2 3.3 3 10.100000000000000000 20.2 30.3 4 -10.100000000000000000 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColRCF order by cId +PREHOOK: query: select cId, cBoolean from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColRCF order by cId +POSTHOOK: query: select cId, cBoolean from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 true 2 false 3 true 4 false -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp CHAR(100), cDecimal CHAR(100), @@ -1846,9 +1846,9 @@ PREHOOK: query: alter table testAltColRCF replace columns cTinyint CHAR(100), cBoolean CHAR(100)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n0 +PREHOOK: Output: default@testaltcolrcf_n0 +POSTHOOK: query: alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp CHAR(100), cDecimal CHAR(100), @@ -1860,57 +1860,57 @@ POSTHOOK: query: alter table testAltColRCF replace columns cTinyint CHAR(100), cBoolean CHAR(100)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cTimeStamp from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n0 +POSTHOOK: Output: default@testaltcolrcf_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColRCF order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 2017-11-07 09:02:49.999999999 2 1400-01-01 01:01:01.000000001 3 1400-01-01 01:01:01.000000001 4 1400-01-01 01:01:01.000000001 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 12345678901234567890.123456789012345678 1.79E308 3.4E38 2 1.100000000000000000 2.2 3.3 3 10.100000000000000000 20.2 30.3 4 -10.100000000000000000 -20.2 -30.3 -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 1234567890123456789 1234567890 12345 123 2 1 2 3 4 3 1234567890123456789 1234567890 12345 123 4 -1234567890123456789 -1234567890 -12345 -123 -PREHOOK: query: select cId, cBoolean from testAltColRCF order by cId +PREHOOK: query: select cId, cBoolean from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColRCF order by cId +POSTHOOK: query: select cId, cBoolean from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 true 2 false 3 true 4 false -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(4), cDecimal VARCHAR(4), @@ -1922,9 +1922,9 @@ PREHOOK: query: alter table testAltColRCF replace columns cTinyint VARCHAR(4), cBoolean VARCHAR(4)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n0 +PREHOOK: Output: default@testaltcolrcf_n0 +POSTHOOK: query: alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp VARCHAR(4), cDecimal VARCHAR(4), @@ -1936,57 +1936,57 @@ POSTHOOK: query: alter table testAltColRCF replace columns cTinyint VARCHAR(4), cBoolean VARCHAR(4)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cTimeStamp from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n0 +POSTHOOK: Output: default@testaltcolrcf_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColRCF order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 2017 2 1400 3 1400 4 1400 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 1234 1.79 3.4E 2 1.10 2.2 3.3 3 10.1 20.2 30.3 4 -10. -20. -30. -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 1234 1234 1234 123 2 1 2 3 4 3 1234 1234 1234 123 4 -123 -123 -123 -123 -PREHOOK: query: select cId, cBoolean from testAltColRCF order by cId +PREHOOK: query: select cId, cBoolean from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColRCF order by cId +POSTHOOK: query: select cId, cBoolean from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 true 2 fals 3 true 4 fals -PREHOOK: query: alter table testAltColRCF replace columns +PREHOOK: query: alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp CHAR(4), cDecimal CHAR(4), @@ -1998,9 +1998,9 @@ PREHOOK: query: alter table testAltColRCF replace columns cTinyint CHAR(4), cBoolean CHAR(4)) PREHOOK: type: ALTERTABLE_REPLACECOLS -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: alter table testAltColRCF replace columns +PREHOOK: Input: default@testaltcolrcf_n0 +PREHOOK: Output: default@testaltcolrcf_n0 +POSTHOOK: query: alter table testAltColRCF_n0 replace columns (cId TINYINT, cTimeStamp CHAR(4), cDecimal CHAR(4), @@ -2012,88 +2012,88 @@ POSTHOOK: query: alter table testAltColRCF replace columns cTinyint CHAR(4), cBoolean CHAR(4)) POSTHOOK: type: ALTERTABLE_REPLACECOLS -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf -PREHOOK: query: select cId, cTimeStamp from testAltColRCF order by cId +POSTHOOK: Input: default@testaltcolrcf_n0 +POSTHOOK: Output: default@testaltcolrcf_n0 +PREHOOK: query: select cId, cTimeStamp from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cTimeStamp from testAltColRCF order by cId +POSTHOOK: query: select cId, cTimeStamp from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 2017 2 1400 3 1400 4 1400 -PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId +PREHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF order by cId +POSTHOOK: query: select cId, cDecimal, cDouble, cFloat from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 1234 1.79 3.4E 2 1.10 2.2 3.3 3 10.1 20.2 30.3 4 -10. -20. -30. -PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +PREHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF order by cId +POSTHOOK: query: select cId, cBigInt, cInt, cSmallInt, cTinyint from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 1234 1234 1234 123 2 1 2 3 4 3 1234 1234 1234 123 4 -123 -123 -123 -123 -PREHOOK: query: select cId, cBoolean from testAltColRCF order by cId +PREHOOK: query: select cId, cBoolean from testAltColRCF_n0 order by cId PREHOOK: type: QUERY -PREHOOK: Input: default@testaltcolrcf +PREHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### -POSTHOOK: query: select cId, cBoolean from testAltColRCF order by cId +POSTHOOK: query: select cId, cBoolean from testAltColRCF_n0 order by cId POSTHOOK: type: QUERY -POSTHOOK: Input: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 #### A masked pattern was here #### 1 true 2 fals 3 true 4 fals -PREHOOK: query: drop table if exists testAltColRCF +PREHOOK: query: drop table if exists testAltColRCF_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testaltcolrcf -PREHOOK: Output: default@testaltcolrcf -POSTHOOK: query: drop table if exists testAltColRCF +PREHOOK: Input: default@testaltcolrcf_n0 +PREHOOK: Output: default@testaltcolrcf_n0 +POSTHOOK: query: drop table if exists testAltColRCF_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testaltcolrcf -POSTHOOK: Output: default@testaltcolrcf +POSTHOOK: Input: default@testaltcolrcf_n0 +POSTHOOK: Output: default@testaltcolrcf_n0 PREHOOK: query: drop table if exists testAltColP PREHOOK: type: DROPTABLE POSTHOOK: query: drop table if exists testAltColP POSTHOOK: type: DROPTABLE -PREHOOK: query: create table testAltColP stored as parquet as select * from testAltCol +PREHOOK: query: create table testAltColP stored as parquet as select * from testAltCol_n0 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@testaltcol +PREHOOK: Input: default@testaltcol_n0 PREHOOK: Output: database:default PREHOOK: Output: default@testAltColP -POSTHOOK: query: create table testAltColP stored as parquet as select * from testAltCol +POSTHOOK: query: create table testAltColP stored as parquet as select * from testAltCol_n0 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@testaltcol +POSTHOOK: Input: default@testaltcol_n0 POSTHOOK: Output: database:default POSTHOOK: Output: default@testAltColP -POSTHOOK: Lineage: testaltcolp.cbigint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: testaltcolp.cboolean SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cboolean, type:boolean, comment:null), ] -POSTHOOK: Lineage: testaltcolp.cdecimal SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cdecimal, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: testaltcolp.cdouble SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: testaltcolp.cfloat SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: testaltcolp.cid SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cid, type:tinyint, comment:null), ] -POSTHOOK: Lineage: testaltcolp.cint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: testaltcolp.csmallint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: testaltcolp.ctimestamp SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctimestamp, type:timestamp, comment:null), ] -POSTHOOK: Lineage: testaltcolp.ctinyint SIMPLE [(testaltcol)testaltcol.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +POSTHOOK: Lineage: testaltcolp.cbigint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: testaltcolp.cboolean SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cboolean, type:boolean, comment:null), ] +POSTHOOK: Lineage: testaltcolp.cdecimal SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cdecimal, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: testaltcolp.cdouble SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: testaltcolp.cfloat SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: testaltcolp.cid SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cid, type:tinyint, comment:null), ] +POSTHOOK: Lineage: testaltcolp.cint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: testaltcolp.csmallint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: testaltcolp.ctimestamp SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:ctimestamp, type:timestamp, comment:null), ] +POSTHOOK: Lineage: testaltcolp.ctinyint SIMPLE [(testaltcol_n0)testaltcol_n0.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] PREHOOK: query: select cId, cTimeStamp from testAltColP order by cId PREHOOK: type: QUERY PREHOOK: Input: default@testaltcolp @@ -2530,11 +2530,11 @@ POSTHOOK: query: drop table if exists testAltColP POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@testaltcolp POSTHOOK: Output: default@testaltcolp -PREHOOK: query: drop table if exists testAltCol +PREHOOK: query: drop table if exists testAltCol_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@testaltcol -PREHOOK: Output: default@testaltcol -POSTHOOK: query: drop table if exists testAltCol +PREHOOK: Input: default@testaltcol_n0 +PREHOOK: Output: default@testaltcol_n0 +POSTHOOK: query: drop table if exists testAltCol_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@testaltcol -POSTHOOK: Output: default@testaltcol +POSTHOOK: Input: default@testaltcol_n0 +POSTHOOK: Output: default@testaltcol_n0 diff --git a/ql/src/test/results/clientpositive/udaf_binarysetfunctions.q.out b/ql/src/test/results/clientpositive/udaf_binarysetfunctions.q.out index 4dfeedaf72..bac37661fb 100644 --- a/ql/src/test/results/clientpositive/udaf_binarysetfunctions.q.out +++ b/ql/src/test/results/clientpositive/udaf_binarysetfunctions.q.out @@ -1,370 +1,370 @@ -PREHOOK: query: drop table t +PREHOOK: query: drop table t_n21 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table t +POSTHOOK: query: drop table t_n21 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table t (id int,px int,y decimal,x decimal) +PREHOOK: query: create table t_n21 (id int,px int,y decimal,x decimal) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t (id int,px int,y decimal,x decimal) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: create table t_n21 (id int,px int,y decimal,x decimal) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values (101,1,1,1) +POSTHOOK: Output: default@t_n21 +PREHOOK: query: insert into t_n21 values (101,1,1,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (101,1,1,1) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (101,1,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (201,2,1,1) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (201,2,1,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (201,2,1,1) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (201,2,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (301,3,1,1) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (301,3,1,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (301,3,1,1) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (301,3,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (401,4,1,11) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (401,4,1,11) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (401,4,1,11) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (401,4,1,11) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (501,5,1,null) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (501,5,1,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (501,5,1,null) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (501,5,1,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x EXPRESSION [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (601,6,null,1) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x EXPRESSION [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (601,6,null,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (601,6,null,1) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (601,6,null,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y EXPRESSION [] -PREHOOK: query: insert into t values (701,6,null,null) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y EXPRESSION [] +PREHOOK: query: insert into t_n21 values (701,6,null,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (701,6,null,null) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (701,6,null,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x EXPRESSION [] -POSTHOOK: Lineage: t.y EXPRESSION [] -PREHOOK: query: insert into t values (102,1,2,2) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x EXPRESSION [] +POSTHOOK: Lineage: t_n21.y EXPRESSION [] +PREHOOK: query: insert into t_n21 values (102,1,2,2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (102,1,2,2) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (102,1,2,2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (202,2,1,2) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (202,2,1,2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (202,2,1,2) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (202,2,1,2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (302,3,2,1) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (302,3,2,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (302,3,2,1) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (302,3,2,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (402,4,2,12) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (402,4,2,12) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (402,4,2,12) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (402,4,2,12) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (502,5,2,null) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (502,5,2,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (502,5,2,null) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (502,5,2,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x EXPRESSION [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (602,6,null,2) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x EXPRESSION [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (602,6,null,2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (602,6,null,2) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (602,6,null,2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y EXPRESSION [] -PREHOOK: query: insert into t values (702,6,null,null) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y EXPRESSION [] +PREHOOK: query: insert into t_n21 values (702,6,null,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (702,6,null,null) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (702,6,null,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x EXPRESSION [] -POSTHOOK: Lineage: t.y EXPRESSION [] -PREHOOK: query: insert into t values (103,1,3,3) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x EXPRESSION [] +POSTHOOK: Lineage: t_n21.y EXPRESSION [] +PREHOOK: query: insert into t_n21 values (103,1,3,3) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (103,1,3,3) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (103,1,3,3) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (203,2,1,3) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (203,2,1,3) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (203,2,1,3) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (203,2,1,3) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (303,3,3,1) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (303,3,3,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (303,3,3,1) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (303,3,3,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (403,4,3,13) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (403,4,3,13) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (403,4,3,13) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (403,4,3,13) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (503,5,3,null) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (503,5,3,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (503,5,3,null) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (503,5,3,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x EXPRESSION [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (603,6,null,3) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x EXPRESSION [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (603,6,null,3) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (603,6,null,3) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (603,6,null,3) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y EXPRESSION [] -PREHOOK: query: insert into t values (703,6,null,null) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y EXPRESSION [] +PREHOOK: query: insert into t_n21 values (703,6,null,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (703,6,null,null) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (703,6,null,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x EXPRESSION [] -POSTHOOK: Lineage: t.y EXPRESSION [] -PREHOOK: query: insert into t values (104,1,4,4) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x EXPRESSION [] +POSTHOOK: Lineage: t_n21.y EXPRESSION [] +PREHOOK: query: insert into t_n21 values (104,1,4,4) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (104,1,4,4) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (104,1,4,4) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (204,2,1,4) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (204,2,1,4) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (204,2,1,4) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (204,2,1,4) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (304,3,4,1) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (304,3,4,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (304,3,4,1) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (304,3,4,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (404,4,4,14) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (404,4,4,14) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (404,4,4,14) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (404,4,4,14) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (504,5,4,null) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (504,5,4,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (504,5,4,null) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (504,5,4,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x EXPRESSION [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (604,6,null,4) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x EXPRESSION [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] +PREHOOK: query: insert into t_n21 values (604,6,null,4) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (604,6,null,4) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (604,6,null,4) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y EXPRESSION [] -PREHOOK: query: insert into t values (704,6,null,null) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y EXPRESSION [] +PREHOOK: query: insert into t_n21 values (704,6,null,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (704,6,null,null) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (704,6,null,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x EXPRESSION [] -POSTHOOK: Lineage: t.y EXPRESSION [] -PREHOOK: query: insert into t values (800,7,1,1) +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x EXPRESSION [] +POSTHOOK: Lineage: t_n21.y EXPRESSION [] +PREHOOK: query: insert into t_n21 values (800,7,1,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (800,7,1,1) +PREHOOK: Output: default@t_n21 +POSTHOOK: query: insert into t_n21 values (800,7,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] +POSTHOOK: Output: default@t_n21 +POSTHOOK: Lineage: t_n21.id SCRIPT [] +POSTHOOK: Lineage: t_n21.px SCRIPT [] +POSTHOOK: Lineage: t_n21.x SCRIPT [] +POSTHOOK: Lineage: t_n21.y SCRIPT [] PREHOOK: query: explain select px,var_pop(x),var_pop(y),corr(y,x),covar_samp(y,x),covar_pop(y,x),regr_count(y,x),regr_slope(y,x), regr_intercept(y,x), regr_r2(y,x), regr_sxx(y,x), regr_syy(y,x), regr_sxy(y,x), regr_avgx(y,x), regr_avgy(y,x), regr_count(y,x) - from t group by px order by px + from t_n21 group by px order by px PREHOOK: type: QUERY POSTHOOK: query: explain select px,var_pop(x),var_pop(y),corr(y,x),covar_samp(y,x),covar_pop(y,x),regr_count(y,x),regr_slope(y,x), regr_intercept(y,x), regr_r2(y,x), regr_sxx(y,x), regr_syy(y,x), regr_sxy(y,x), regr_avgx(y,x), regr_avgy(y,x), regr_count(y,x) - from t group by px order by px + from t_n21 group by px order by px POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -376,7 +376,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t + alias: t_n21 Statistics: Num rows: 29 Data size: 281 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: px (type: int), x (type: decimal(10,0)), y (type: decimal(10,0)), UDFToDouble(x) (type: double), (UDFToDouble(x) * UDFToDouble(x)) (type: double), UDFToDouble(y) (type: double), (UDFToDouble(y) * UDFToDouble(y)) (type: double) @@ -441,7 +441,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select px, +PREHOOK: query: select px, round( var_pop(x),5), round( var_pop(y),5), round( corr(y,x),5), @@ -457,11 +457,11 @@ PREHOOK: query: select px, round( regr_avgx(y,x),5), round( regr_avgy(y,x),5), round( regr_count(y,x),5) - from t group by px order by px + from t_n21 group by px order by px PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n21 #### A masked pattern was here #### -POSTHOOK: query: select px, +POSTHOOK: query: select px, round( var_pop(x),5), round( var_pop(y),5), round( corr(y,x),5), @@ -477,9 +477,9 @@ POSTHOOK: query: select px, round( regr_avgx(y,x),5), round( regr_avgy(y,x),5), round( regr_count(y,x),5) - from t group by px order by px + from t_n21 group by px order by px POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n21 #### A masked pattern was here #### 1 1.25 1.25 1.0 1.66667 1.25 4 1.0 0.0 1.0 5.0 5.0 5.0 2.50000 2.50000 4 2 1.25 0.0 NULL 0.0 0.0 4 0.0 1.0 1.0 5.0 0.0 0.0 2.50000 1.00000 4 @@ -488,13 +488,13 @@ POSTHOOK: Input: default@t 5 NULL 1.25 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL NULL 0 6 1.25 NULL NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL NULL 0 7 0.0 0.0 NULL NULL 0.0 1 NULL NULL NULL 0.0 0.0 0.0 1.00000 1.00000 1 -PREHOOK: query: select id,regr_count(y,x) over (partition by px) from t order by id +PREHOOK: query: select id,regr_count(y,x) over (partition by px) from t_n21 order by id PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n21 #### A masked pattern was here #### -POSTHOOK: query: select id,regr_count(y,x) over (partition by px) from t order by id +POSTHOOK: query: select id,regr_count(y,x) over (partition by px) from t_n21 order by id POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n21 #### A masked pattern was here #### 101 4 102 4 diff --git a/ql/src/test/results/clientpositive/udaf_binarysetfunctions_no_cbo.q.out b/ql/src/test/results/clientpositive/udaf_binarysetfunctions_no_cbo.q.out index f59a1ffc3b..2fb18c46f5 100644 --- a/ql/src/test/results/clientpositive/udaf_binarysetfunctions_no_cbo.q.out +++ b/ql/src/test/results/clientpositive/udaf_binarysetfunctions_no_cbo.q.out @@ -1,370 +1,370 @@ -PREHOOK: query: drop table t +PREHOOK: query: drop table t_n6 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table t +POSTHOOK: query: drop table t_n6 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table t (id int,px int,y decimal,x decimal) +PREHOOK: query: create table t_n6 (id int,px int,y decimal,x decimal) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t (id int,px int,y decimal,x decimal) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: create table t_n6 (id int,px int,y decimal,x decimal) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values (101,1,1,1) +POSTHOOK: Output: default@t_n6 +PREHOOK: query: insert into t_n6 values (101,1,1,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (101,1,1,1) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (101,1,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (201,2,1,1) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (201,2,1,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (201,2,1,1) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (201,2,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (301,3,1,1) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (301,3,1,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (301,3,1,1) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (301,3,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (401,4,1,11) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (401,4,1,11) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (401,4,1,11) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (401,4,1,11) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (501,5,1,null) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (501,5,1,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (501,5,1,null) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (501,5,1,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (601,6,null,1) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (601,6,null,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (601,6,null,1) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (601,6,null,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (701,6,null,null) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (701,6,null,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (701,6,null,null) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (701,6,null,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (102,1,2,2) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (102,1,2,2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (102,1,2,2) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (102,1,2,2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (202,2,1,2) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (202,2,1,2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (202,2,1,2) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (202,2,1,2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (302,3,2,1) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (302,3,2,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (302,3,2,1) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (302,3,2,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (402,4,2,12) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (402,4,2,12) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (402,4,2,12) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (402,4,2,12) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (502,5,2,null) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (502,5,2,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (502,5,2,null) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (502,5,2,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (602,6,null,2) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (602,6,null,2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (602,6,null,2) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (602,6,null,2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (702,6,null,null) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (702,6,null,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (702,6,null,null) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (702,6,null,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (103,1,3,3) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (103,1,3,3) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (103,1,3,3) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (103,1,3,3) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (203,2,1,3) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (203,2,1,3) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (203,2,1,3) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (203,2,1,3) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (303,3,3,1) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (303,3,3,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (303,3,3,1) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (303,3,3,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (403,4,3,13) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (403,4,3,13) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (403,4,3,13) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (403,4,3,13) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (503,5,3,null) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (503,5,3,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (503,5,3,null) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (503,5,3,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (603,6,null,3) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (603,6,null,3) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (603,6,null,3) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (603,6,null,3) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (703,6,null,null) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (703,6,null,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (703,6,null,null) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (703,6,null,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (104,1,4,4) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (104,1,4,4) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (104,1,4,4) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (104,1,4,4) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (204,2,1,4) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (204,2,1,4) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (204,2,1,4) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (204,2,1,4) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (304,3,4,1) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (304,3,4,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (304,3,4,1) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (304,3,4,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (404,4,4,14) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (404,4,4,14) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (404,4,4,14) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (404,4,4,14) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (504,5,4,null) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (504,5,4,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (504,5,4,null) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (504,5,4,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (604,6,null,4) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (604,6,null,4) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (604,6,null,4) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (604,6,null,4) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (704,6,null,null) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (704,6,null,null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (704,6,null,null) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (704,6,null,null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -PREHOOK: query: insert into t values (800,7,1,1) +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] +PREHOOK: query: insert into t_n6 values (800,7,1,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (800,7,1,1) +PREHOOK: Output: default@t_n6 +POSTHOOK: query: insert into t_n6 values (800,7,1,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.id SCRIPT [] -POSTHOOK: Lineage: t.px SCRIPT [] -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] +POSTHOOK: Output: default@t_n6 +POSTHOOK: Lineage: t_n6.id SCRIPT [] +POSTHOOK: Lineage: t_n6.px SCRIPT [] +POSTHOOK: Lineage: t_n6.x SCRIPT [] +POSTHOOK: Lineage: t_n6.y SCRIPT [] PREHOOK: query: explain select px,var_pop(x),var_pop(y),corr(y,x),covar_samp(y,x),covar_pop(y,x),regr_count(y,x),regr_slope(y,x), regr_intercept(y,x), regr_r2(y,x), regr_sxx(y,x), regr_syy(y,x), regr_sxy(y,x), regr_avgx(y,x), regr_avgy(y,x), regr_count(y,x) - from t group by px order by px + from t_n6 group by px order by px PREHOOK: type: QUERY POSTHOOK: query: explain select px,var_pop(x),var_pop(y),corr(y,x),covar_samp(y,x),covar_pop(y,x),regr_count(y,x),regr_slope(y,x), regr_intercept(y,x), regr_r2(y,x), regr_sxx(y,x), regr_syy(y,x), regr_sxy(y,x), regr_avgx(y,x), regr_avgy(y,x), regr_count(y,x) - from t group by px order by px + from t_n6 group by px order by px POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -376,7 +376,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t + alias: t_n6 Statistics: Num rows: 29 Data size: 281 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: px (type: int), y (type: decimal(10,0)), x (type: decimal(10,0)) @@ -453,9 +453,9 @@ PREHOOK: query: select px, round( regr_avgx(y,x),5), round( regr_avgy(y,x),5), round( regr_count(y,x),5) - from t group by px order by px + from t_n6 group by px order by px PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n6 #### A masked pattern was here #### POSTHOOK: query: select px, round( var_pop(x),5), @@ -473,9 +473,9 @@ POSTHOOK: query: select px, round( regr_avgx(y,x),5), round( regr_avgy(y,x),5), round( regr_count(y,x),5) - from t group by px order by px + from t_n6 group by px order by px POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n6 #### A masked pattern was here #### 1 1.25 1.25 1.0 1.66667 1.25 4 1.0 0.0 1.0 5.0 5.0 5.0 2.50000 2.50000 4 2 1.25 0.0 NULL 0.0 0.0 4 0.0 1.0 1.0 5.0 0.0 0.0 2.50000 1.00000 4 @@ -484,13 +484,13 @@ POSTHOOK: Input: default@t 5 NULL 1.25 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL NULL 0 6 1.25 NULL NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL NULL 0 7 0.0 0.0 NULL NULL 0.0 1 NULL NULL NULL 0.0 0.0 0.0 1.00000 1.00000 1 -PREHOOK: query: select id,regr_count(y,x) over (partition by px) from t order by id +PREHOOK: query: select id,regr_count(y,x) over (partition by px) from t_n6 order by id PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n6 #### A masked pattern was here #### -POSTHOOK: query: select id,regr_count(y,x) over (partition by px) from t order by id +POSTHOOK: query: select id,regr_count(y,x) over (partition by px) from t_n6 order by id POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n6 #### A masked pattern was here #### 101 4 102 4 diff --git a/ql/src/test/results/clientpositive/udaf_context_ngrams.q.out b/ql/src/test/results/clientpositive/udaf_context_ngrams.q.out index 91a37930b6..843039e95b 100644 --- a/ql/src/test/results/clientpositive/udaf_context_ngrams.q.out +++ b/ql/src/test/results/clientpositive/udaf_context_ngrams.q.out @@ -1,69 +1,69 @@ -PREHOOK: query: CREATE TABLE kafka (contents STRING) +PREHOOK: query: CREATE TABLE kafka_n0 (contents STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@kafka -POSTHOOK: query: CREATE TABLE kafka (contents STRING) +PREHOOK: Output: default@kafka_n0 +POSTHOOK: query: CREATE TABLE kafka_n0 (contents STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@kafka -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/text-en.txt' INTO TABLE kafka +POSTHOOK: Output: default@kafka_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/text-en.txt' INTO TABLE kafka_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@kafka -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/text-en.txt' INTO TABLE kafka +PREHOOK: Output: default@kafka_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/text-en.txt' INTO TABLE kafka_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@kafka -PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null), 100, 1000).estfrequency FROM kafka +POSTHOOK: Output: default@kafka_n0 +PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null), 100, 1000).estfrequency FROM kafka_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@kafka +PREHOOK: Input: default@kafka_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null), 100, 1000).estfrequency FROM kafka +POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null), 100, 1000).estfrequency FROM kafka_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@kafka +POSTHOOK: Input: default@kafka_n0 #### A masked pattern was here #### [267.0,171.0,164.0,119.0,108.0,106.0,106.0,82.0,79.0,67.0,67.0,46.0,45.0,42.0,42.0,40.0,39.0,37.0,37.0,36.0,34.0,32.0,32.0,30.0,30.0,29.0,28.0,28.0,28.0,28.0,26.0,25.0,24.0,23.0,23.0,22.0,22.0,21.0,20.0,19.0,18.0,18.0,18.0,17.0,17.0,17.0,16.0,16.0,16.0,16.0,15.0,15.0,14.0,14.0,14.0,13.0,13.0,13.0,13.0,13.0,13.0,13.0,12.0,12.0,12.0,12.0,12.0,12.0,12.0,11.0,11.0,11.0,11.0,11.0,11.0,11.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0,9.0,9.0,9.0,9.0,9.0,9.0,9.0,9.0,9.0,9.0,9.0,9.0,8.0] -PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array("he",null), 100, 1000) FROM kafka +PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array("he",null), 100, 1000) FROM kafka_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@kafka +PREHOOK: Input: default@kafka_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array("he",null), 100, 1000) FROM kafka +POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array("he",null), 100, 1000) FROM kafka_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@kafka +POSTHOOK: Input: default@kafka_n0 #### A masked pattern was here #### [{"ngram":["was"],"estfrequency":17.0},{"ngram":["had"],"estfrequency":16.0},{"ngram":["thought"],"estfrequency":13.0},{"ngram":["could"],"estfrequency":9.0},{"ngram":["would"],"estfrequency":7.0},{"ngram":["lay"],"estfrequency":5.0},{"ngram":["did"],"estfrequency":4.0},{"ngram":["felt"],"estfrequency":4.0},{"ngram":["looked"],"estfrequency":4.0},{"ngram":["s"],"estfrequency":4.0},{"ngram":["wanted"],"estfrequency":4.0},{"ngram":["finally"],"estfrequency":3.0},{"ngram":["lifted"],"estfrequency":3.0},{"ngram":["must"],"estfrequency":3.0},{"ngram":["needed"],"estfrequency":3.0},{"ngram":["slid"],"estfrequency":3.0},{"ngram":["told"],"estfrequency":3.0},{"ngram":["tried"],"estfrequency":3.0},{"ngram":["also"],"estfrequency":2.0},{"ngram":["always"],"estfrequency":2.0},{"ngram":["began"],"estfrequency":2.0},{"ngram":["didn't"],"estfrequency":2.0},{"ngram":["do"],"estfrequency":2.0},{"ngram":["drew"],"estfrequency":2.0},{"ngram":["found"],"estfrequency":2.0},{"ngram":["is"],"estfrequency":2.0},{"ngram":["let"],"estfrequency":2.0},{"ngram":["made"],"estfrequency":2.0},{"ngram":["really"],"estfrequency":2.0},{"ngram":["reported"],"estfrequency":2.0},{"ngram":["threw"],"estfrequency":2.0},{"ngram":["touched"],"estfrequency":2.0},{"ngram":["wouldn't"],"estfrequency":2.0},{"ngram":["allowed"],"estfrequency":1.0},{"ngram":["almost"],"estfrequency":1.0},{"ngram":["became"],"estfrequency":1.0},{"ngram":["called"],"estfrequency":1.0},{"ngram":["caught"],"estfrequency":1.0},{"ngram":["chose"],"estfrequency":1.0},{"ngram":["confined"],"estfrequency":1.0},{"ngram":["cut"],"estfrequency":1.0},{"ngram":["denied"],"estfrequency":1.0},{"ngram":["directed"],"estfrequency":1.0},{"ngram":["discovered"],"estfrequency":1.0},{"ngram":["failed"],"estfrequency":1.0},{"ngram":["have"],"estfrequency":1.0},{"ngram":["heard"],"estfrequency":1.0},{"ngram":["hit"],"estfrequency":1.0},{"ngram":["hoped"],"estfrequency":1.0},{"ngram":["intended"],"estfrequency":1.0},{"ngram":["maintained"],"estfrequency":1.0},{"ngram":["managed"],"estfrequency":1.0},{"ngram":["never"],"estfrequency":1.0},{"ngram":["preferred"],"estfrequency":1.0},{"ngram":["remembered"],"estfrequency":1.0},{"ngram":["retracted"],"estfrequency":1.0},{"ngram":["said"],"estfrequency":1.0},{"ngram":["sits"],"estfrequency":1.0},{"ngram":["slowly"],"estfrequency":1.0},{"ngram":["stood"],"estfrequency":1.0},{"ngram":["swung"],"estfrequency":1.0},{"ngram":["turned"],"estfrequency":1.0},{"ngram":["urged"],"estfrequency":1.0},{"ngram":["were"],"estfrequency":1.0},{"ngram":["will"],"estfrequency":1.0}] -PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null,"salesmen"), 100, 1000) FROM kafka +PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null,"salesmen"), 100, 1000) FROM kafka_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@kafka +PREHOOK: Input: default@kafka_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null,"salesmen"), 100, 1000) FROM kafka +POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null,"salesmen"), 100, 1000) FROM kafka_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@kafka +POSTHOOK: Input: default@kafka_n0 #### A masked pattern was here #### [{"ngram":["travelling"],"estfrequency":3.0}] -PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array("what","i",null), 100, 1000) FROM kafka +PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array("what","i",null), 100, 1000) FROM kafka_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@kafka +PREHOOK: Input: default@kafka_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array("what","i",null), 100, 1000) FROM kafka +POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array("what","i",null), 100, 1000) FROM kafka_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@kafka +POSTHOOK: Input: default@kafka_n0 #### A masked pattern was here #### [{"ngram":["think"],"estfrequency":3.0},{"ngram":["feel"],"estfrequency":2.0}] -PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null,null), 100, 1000).estfrequency FROM kafka +PREHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null,null), 100, 1000).estfrequency FROM kafka_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@kafka +PREHOOK: Input: default@kafka_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null,null), 100, 1000).estfrequency FROM kafka +POSTHOOK: query: SELECT context_ngrams(sentences(lower(contents)), array(null,null), 100, 1000).estfrequency FROM kafka_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@kafka +POSTHOOK: Input: default@kafka_n0 #### A masked pattern was here #### [23.0,20.0,18.0,17.0,17.0,16.0,16.0,16.0,16.0,15.0,14.0,13.0,12.0,12.0,12.0,11.0,11.0,11.0,10.0,10.0,10.0,10.0,10.0,10.0,9.0,9.0,9.0,8.0,8.0,8.0,8.0,7.0,7.0,7.0,7.0,7.0,7.0,7.0,7.0,6.0,6.0,6.0,6.0,6.0,6.0,6.0,6.0,6.0,6.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0,4.0] -PREHOOK: query: DROP TABLE kafka +PREHOOK: query: DROP TABLE kafka_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@kafka -PREHOOK: Output: default@kafka -POSTHOOK: query: DROP TABLE kafka +PREHOOK: Input: default@kafka_n0 +PREHOOK: Output: default@kafka_n0 +POSTHOOK: query: DROP TABLE kafka_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@kafka -POSTHOOK: Output: default@kafka +POSTHOOK: Input: default@kafka_n0 +POSTHOOK: Output: default@kafka_n0 diff --git a/ql/src/test/results/clientpositive/udaf_corr.q.out b/ql/src/test/results/clientpositive/udaf_corr.q.out index cd00e3f77d..44809c5d23 100644 --- a/ql/src/test/results/clientpositive/udaf_corr.q.out +++ b/ql/src/test/results/clientpositive/udaf_corr.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: DROP TABLE covar_tab +PREHOOK: query: DROP TABLE covar_tab_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE covar_tab +POSTHOOK: query: DROP TABLE covar_tab_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE covar_tab (a INT, b INT, c INT) +PREHOOK: query: CREATE TABLE covar_tab_n0 (a INT, b INT, c INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@covar_tab -POSTHOOK: query: CREATE TABLE covar_tab (a INT, b INT, c INT) +PREHOOK: Output: default@covar_tab_n0 +POSTHOOK: query: CREATE TABLE covar_tab_n0 (a INT, b INT, c INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@covar_tab +POSTHOOK: Output: default@covar_tab_n0 PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/covar_tab.txt' OVERWRITE -INTO TABLE covar_tab +INTO TABLE covar_tab_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@covar_tab +PREHOOK: Output: default@covar_tab_n0 POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/covar_tab.txt' OVERWRITE -INTO TABLE covar_tab +INTO TABLE covar_tab_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@covar_tab +POSTHOOK: Output: default@covar_tab_n0 PREHOOK: query: DESCRIBE FUNCTION corr PREHOOK: type: DESCFUNCTION POSTHOOK: query: DESCRIBE FUNCTION corr @@ -48,40 +48,40 @@ COVAR_POP is the population covariance, and STDDEV_POP is the population standard deviation. Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCorrelation Function type:BUILTIN -PREHOOK: query: SELECT corr(b, c) FROM covar_tab WHERE a < 1 +PREHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 WHERE a < 1 PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab +PREHOOK: Input: default@covar_tab_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT corr(b, c) FROM covar_tab WHERE a < 1 +POSTHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 WHERE a < 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab +POSTHOOK: Input: default@covar_tab_n0 #### A masked pattern was here #### NULL -PREHOOK: query: SELECT corr(b, c) FROM covar_tab WHERE a < 3 +PREHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 WHERE a < 3 PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab +PREHOOK: Input: default@covar_tab_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT corr(b, c) FROM covar_tab WHERE a < 3 +POSTHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 WHERE a < 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab +POSTHOOK: Input: default@covar_tab_n0 #### A masked pattern was here #### NULL -PREHOOK: query: SELECT corr(b, c) FROM covar_tab WHERE a = 3 +PREHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 WHERE a = 3 PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab +PREHOOK: Input: default@covar_tab_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT corr(b, c) FROM covar_tab WHERE a = 3 +POSTHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 WHERE a = 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab +POSTHOOK: Input: default@covar_tab_n0 #### A masked pattern was here #### NULL -PREHOOK: query: SELECT a, corr(b, c) FROM covar_tab GROUP BY a ORDER BY a +PREHOOK: query: SELECT a, corr(b, c) FROM covar_tab_n0 GROUP BY a ORDER BY a PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab +PREHOOK: Input: default@covar_tab_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT a, corr(b, c) FROM covar_tab GROUP BY a ORDER BY a +POSTHOOK: query: SELECT a, corr(b, c) FROM covar_tab_n0 GROUP BY a ORDER BY a POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab +POSTHOOK: Input: default@covar_tab_n0 #### A masked pattern was here #### 1 NULL 2 NULL @@ -89,20 +89,20 @@ POSTHOOK: Input: default@covar_tab 4 NULL 5 NULL 6 NULL -PREHOOK: query: SELECT corr(b, c) FROM covar_tab +PREHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab +PREHOOK: Input: default@covar_tab_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT corr(b, c) FROM covar_tab +POSTHOOK: query: SELECT corr(b, c) FROM covar_tab_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab +POSTHOOK: Input: default@covar_tab_n0 #### A masked pattern was here #### 0.6633880657639326 -PREHOOK: query: DROP TABLE covar_tab +PREHOOK: query: DROP TABLE covar_tab_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@covar_tab -PREHOOK: Output: default@covar_tab -POSTHOOK: query: DROP TABLE covar_tab +PREHOOK: Input: default@covar_tab_n0 +PREHOOK: Output: default@covar_tab_n0 +POSTHOOK: query: DROP TABLE covar_tab_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@covar_tab -POSTHOOK: Output: default@covar_tab +POSTHOOK: Input: default@covar_tab_n0 +POSTHOOK: Output: default@covar_tab_n0 diff --git a/ql/src/test/results/clientpositive/udaf_covar_samp.q.out b/ql/src/test/results/clientpositive/udaf_covar_samp.q.out index 1f56c0c10f..ce9d0574bb 100644 --- a/ql/src/test/results/clientpositive/udaf_covar_samp.q.out +++ b/ql/src/test/results/clientpositive/udaf_covar_samp.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: DROP TABLE covar_tab +PREHOOK: query: DROP TABLE covar_tab_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE covar_tab +POSTHOOK: query: DROP TABLE covar_tab_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE covar_tab (a INT, b INT, c INT) +PREHOOK: query: CREATE TABLE covar_tab_n1 (a INT, b INT, c INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@covar_tab -POSTHOOK: query: CREATE TABLE covar_tab (a INT, b INT, c INT) +PREHOOK: Output: default@covar_tab_n1 +POSTHOOK: query: CREATE TABLE covar_tab_n1 (a INT, b INT, c INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@covar_tab +POSTHOOK: Output: default@covar_tab_n1 PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/covar_tab.txt' OVERWRITE -INTO TABLE covar_tab +INTO TABLE covar_tab_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@covar_tab +PREHOOK: Output: default@covar_tab_n1 POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/covar_tab.txt' OVERWRITE -INTO TABLE covar_tab +INTO TABLE covar_tab_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@covar_tab +POSTHOOK: Output: default@covar_tab_n1 PREHOOK: query: DESCRIBE FUNCTION covar_samp PREHOOK: type: DESCFUNCTION POSTHOOK: query: DESCRIBE FUNCTION covar_samp @@ -43,40 +43,40 @@ Otherwise, it computes the following: where neither x nor y is null. Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCovarianceSample Function type:BUILTIN -PREHOOK: query: SELECT covar_samp(b, c) FROM covar_tab WHERE a < 1 +PREHOOK: query: SELECT covar_samp(b, c) FROM covar_tab_n1 WHERE a < 1 PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab +PREHOOK: Input: default@covar_tab_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT covar_samp(b, c) FROM covar_tab WHERE a < 1 +POSTHOOK: query: SELECT covar_samp(b, c) FROM covar_tab_n1 WHERE a < 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab +POSTHOOK: Input: default@covar_tab_n1 #### A masked pattern was here #### NULL -PREHOOK: query: SELECT covar_samp(b, c) FROM covar_tab WHERE a < 3 +PREHOOK: query: SELECT covar_samp(b, c) FROM covar_tab_n1 WHERE a < 3 PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab +PREHOOK: Input: default@covar_tab_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT covar_samp(b, c) FROM covar_tab WHERE a < 3 +POSTHOOK: query: SELECT covar_samp(b, c) FROM covar_tab_n1 WHERE a < 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab +POSTHOOK: Input: default@covar_tab_n1 #### A masked pattern was here #### NULL -PREHOOK: query: SELECT covar_samp(b, c) FROM covar_tab WHERE a = 3 +PREHOOK: query: SELECT covar_samp(b, c) FROM covar_tab_n1 WHERE a = 3 PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab +PREHOOK: Input: default@covar_tab_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT covar_samp(b, c) FROM covar_tab WHERE a = 3 +POSTHOOK: query: SELECT covar_samp(b, c) FROM covar_tab_n1 WHERE a = 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab +POSTHOOK: Input: default@covar_tab_n1 #### A masked pattern was here #### NULL -PREHOOK: query: SELECT a, covar_samp(b, c) FROM covar_tab GROUP BY a ORDER BY a +PREHOOK: query: SELECT a, covar_samp(b, c) FROM covar_tab_n1 GROUP BY a ORDER BY a PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab +PREHOOK: Input: default@covar_tab_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT a, covar_samp(b, c) FROM covar_tab GROUP BY a ORDER BY a +POSTHOOK: query: SELECT a, covar_samp(b, c) FROM covar_tab_n1 GROUP BY a ORDER BY a POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab +POSTHOOK: Input: default@covar_tab_n1 #### A masked pattern was here #### 1 NULL 2 NULL @@ -84,20 +84,20 @@ POSTHOOK: Input: default@covar_tab 4 NULL 5 NULL 6 NULL -PREHOOK: query: SELECT ROUND(covar_samp(b, c), 5) FROM covar_tab +PREHOOK: query: SELECT ROUND(covar_samp(b, c), 5) FROM covar_tab_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@covar_tab +PREHOOK: Input: default@covar_tab_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT ROUND(covar_samp(b, c), 5) FROM covar_tab +POSTHOOK: query: SELECT ROUND(covar_samp(b, c), 5) FROM covar_tab_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@covar_tab +POSTHOOK: Input: default@covar_tab_n1 #### A masked pattern was here #### 4.83333 -PREHOOK: query: DROP TABLE covar_tab +PREHOOK: query: DROP TABLE covar_tab_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@covar_tab -PREHOOK: Output: default@covar_tab -POSTHOOK: query: DROP TABLE covar_tab +PREHOOK: Input: default@covar_tab_n1 +PREHOOK: Output: default@covar_tab_n1 +POSTHOOK: query: DROP TABLE covar_tab_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@covar_tab -POSTHOOK: Output: default@covar_tab +POSTHOOK: Input: default@covar_tab_n1 +POSTHOOK: Output: default@covar_tab_n1 diff --git a/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out b/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out index 54cb131d15..bc18686086 100644 --- a/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out +++ b/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out @@ -1,504 +1,504 @@ -PREHOOK: query: CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_n0 (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket -POSTHOOK: query: CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: Output: default@bucket_n0 +POSTHOOK: query: CREATE TABLE bucket_n0 (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket +POSTHOOK: Output: default@bucket_n0 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket +PREHOOK: Output: default@bucket_n0 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket +POSTHOOK: Output: default@bucket_n0 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket +PREHOOK: Output: default@bucket_n0 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket +POSTHOOK: Output: default@bucket_n0 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket +PREHOOK: Output: default@bucket_n0 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket -PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket +POSTHOOK: Output: default@bucket_n0 +PREHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@bucket -POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket +PREHOOK: Output: default@bucket_n0 +POSTHOOK: query: load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@bucket -PREHOOK: query: create table t1 (result double) +POSTHOOK: Output: default@bucket_n0 +PREHOOK: query: create table t1_n20 (result double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (result double) +PREHOOK: Output: default@t1_n20 +POSTHOOK: query: create table t1_n20 (result double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2 (result double) +POSTHOOK: Output: default@t1_n20 +PREHOOK: query: create table t2_n11 (result double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2 (result double) +PREHOOK: Output: default@t2_n11 +POSTHOOK: query: create table t2_n11 (result double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: create table t3 (result double) +POSTHOOK: Output: default@t2_n11 +PREHOOK: query: create table t3_n3 (result double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t3 -POSTHOOK: query: create table t3 (result double) +PREHOOK: Output: default@t3_n3 +POSTHOOK: query: create table t3_n3 (result double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t3 -PREHOOK: query: create table t4 (result double) +POSTHOOK: Output: default@t3_n3 +PREHOOK: query: create table t4_n3 (result double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t4 -POSTHOOK: query: create table t4 (result double) +PREHOOK: Output: default@t4_n3 +POSTHOOK: query: create table t4_n3 (result double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t4 -PREHOOK: query: create table t5 (result double) +POSTHOOK: Output: default@t4_n3 +PREHOOK: query: create table t5_n1 (result double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t5 -POSTHOOK: query: create table t5 (result double) +PREHOOK: Output: default@t5_n1 +POSTHOOK: query: create table t5_n1 (result double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t5 -PREHOOK: query: create table t6 (result double) +POSTHOOK: Output: default@t5_n1 +PREHOOK: query: create table t6_n1 (result double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t6 -POSTHOOK: query: create table t6 (result double) +PREHOOK: Output: default@t6_n1 +POSTHOOK: query: create table t6_n1 (result double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t6 -PREHOOK: query: create table t7 (result array) +POSTHOOK: Output: default@t6_n1 +PREHOOK: query: create table t7_n2 (result array) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t7 -POSTHOOK: query: create table t7 (result array) +PREHOOK: Output: default@t7_n2 +POSTHOOK: query: create table t7_n2 (result array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t7 -PREHOOK: query: create table t8 (result array) +POSTHOOK: Output: default@t7_n2 +PREHOOK: query: create table t8_n1 (result array) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t8 -POSTHOOK: query: create table t8 (result array) +PREHOOK: Output: default@t8_n1 +POSTHOOK: query: create table t8_n1 (result array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t8 -PREHOOK: query: create table t9 (result array) +POSTHOOK: Output: default@t8_n1 +PREHOOK: query: create table t9_n0 (result array) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t9 -POSTHOOK: query: create table t9 (result array) +PREHOOK: Output: default@t9_n0 +POSTHOOK: query: create table t9_n0 (result array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t9 -PREHOOK: query: create table t10 (result array) +POSTHOOK: Output: default@t9_n0 +PREHOOK: query: create table t10_n0 (result array) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t10 -POSTHOOK: query: create table t10 (result array) +PREHOOK: Output: default@t10_n0 +POSTHOOK: query: create table t10_n0 (result array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t10 -PREHOOK: query: create table t11 (result array) +POSTHOOK: Output: default@t10_n0 +PREHOOK: query: create table t11_n0 (result array) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t11 -POSTHOOK: query: create table t11 (result array) +PREHOOK: Output: default@t11_n0 +POSTHOOK: query: create table t11_n0 (result array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t11 -PREHOOK: query: create table t12 (result array) +POSTHOOK: Output: default@t11_n0 +PREHOOK: query: create table t12_n0 (result array) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t12 -POSTHOOK: query: create table t12 (result array) +PREHOOK: Output: default@t12_n0 +POSTHOOK: query: create table t12_n0 (result array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t12 -PREHOOK: query: FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) +POSTHOOK: Output: default@t12_n0 +PREHOOK: query: FROM bucket_n0 +insert overwrite table t1_n20 SELECT percentile_approx(cast(key AS double), 0.5) +insert overwrite table t2_n11 SELECT percentile_approx(cast(key AS double), 0.5, 100) +insert overwrite table t3_n3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) +insert overwrite table t4_n3 SELECT percentile_approx(cast(key AS int), 0.5) +insert overwrite table t5_n1 SELECT percentile_approx(cast(key AS int), 0.5, 100) +insert overwrite table t6_n1 SELECT percentile_approx(cast(key AS int), 0.5, 1000) -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t7_n2 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) +insert overwrite table t8_n1 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t9_n0 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t10_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) +insert overwrite table t11_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t12_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) PREHOOK: type: QUERY -PREHOOK: Input: default@bucket -PREHOOK: Output: default@t1 -PREHOOK: Output: default@t10 -PREHOOK: Output: default@t11 -PREHOOK: Output: default@t12 -PREHOOK: Output: default@t2 -PREHOOK: Output: default@t3 -PREHOOK: Output: default@t4 -PREHOOK: Output: default@t5 -PREHOOK: Output: default@t6 -PREHOOK: Output: default@t7 -PREHOOK: Output: default@t8 -PREHOOK: Output: default@t9 -POSTHOOK: query: FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) +PREHOOK: Input: default@bucket_n0 +PREHOOK: Output: default@t10_n0 +PREHOOK: Output: default@t11_n0 +PREHOOK: Output: default@t12_n0 +PREHOOK: Output: default@t1_n20 +PREHOOK: Output: default@t2_n11 +PREHOOK: Output: default@t3_n3 +PREHOOK: Output: default@t4_n3 +PREHOOK: Output: default@t5_n1 +PREHOOK: Output: default@t6_n1 +PREHOOK: Output: default@t7_n2 +PREHOOK: Output: default@t8_n1 +PREHOOK: Output: default@t9_n0 +POSTHOOK: query: FROM bucket_n0 +insert overwrite table t1_n20 SELECT percentile_approx(cast(key AS double), 0.5) +insert overwrite table t2_n11 SELECT percentile_approx(cast(key AS double), 0.5, 100) +insert overwrite table t3_n3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) +insert overwrite table t4_n3 SELECT percentile_approx(cast(key AS int), 0.5) +insert overwrite table t5_n1 SELECT percentile_approx(cast(key AS int), 0.5, 100) +insert overwrite table t6_n1 SELECT percentile_approx(cast(key AS int), 0.5, 1000) -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t7_n2 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) +insert overwrite table t8_n1 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t9_n0 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t10_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) +insert overwrite table t11_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t12_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t10 -POSTHOOK: Output: default@t11 -POSTHOOK: Output: default@t12 -POSTHOOK: Output: default@t2 -POSTHOOK: Output: default@t3 -POSTHOOK: Output: default@t4 -POSTHOOK: Output: default@t5 -POSTHOOK: Output: default@t6 -POSTHOOK: Output: default@t7 -POSTHOOK: Output: default@t8 -POSTHOOK: Output: default@t9 -POSTHOOK: Lineage: t1.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t10.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t11.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t12.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t2.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t3.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t4.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t5.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t6.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t7.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t8.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t9.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -PREHOOK: query: select * from t1 +POSTHOOK: Input: default@bucket_n0 +POSTHOOK: Output: default@t10_n0 +POSTHOOK: Output: default@t11_n0 +POSTHOOK: Output: default@t12_n0 +POSTHOOK: Output: default@t1_n20 +POSTHOOK: Output: default@t2_n11 +POSTHOOK: Output: default@t3_n3 +POSTHOOK: Output: default@t4_n3 +POSTHOOK: Output: default@t5_n1 +POSTHOOK: Output: default@t6_n1 +POSTHOOK: Output: default@t7_n2 +POSTHOOK: Output: default@t8_n1 +POSTHOOK: Output: default@t9_n0 +POSTHOOK: Lineage: t10_n0.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t11_n0.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t12_n0.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t1_n20.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t2_n11.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t3_n3.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t4_n3.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t5_n1.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t6_n1.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t7_n2.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t8_n1.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t9_n0.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +PREHOOK: query: select * from t1_n20 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n20 #### A masked pattern was here #### -POSTHOOK: query: select * from t1 +POSTHOOK: query: select * from t1_n20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n20 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t2 +PREHOOK: query: select * from t2_n11 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2_n11 #### A masked pattern was here #### -POSTHOOK: query: select * from t2 +POSTHOOK: query: select * from t2_n11 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2_n11 #### A masked pattern was here #### 254.08333333333334 -PREHOOK: query: select * from t3 +PREHOOK: query: select * from t3_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t3_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from t3 +POSTHOOK: query: select * from t3_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t3_n3 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t4 +PREHOOK: query: select * from t4_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@t4 +PREHOOK: Input: default@t4_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from t4 +POSTHOOK: query: select * from t4_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t4_n3 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t5 +PREHOOK: query: select * from t5_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@t5 +PREHOOK: Input: default@t5_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from t5 +POSTHOOK: query: select * from t5_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t5 +POSTHOOK: Input: default@t5_n1 #### A masked pattern was here #### 254.08333333333334 -PREHOOK: query: select * from t6 +PREHOOK: query: select * from t6_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@t6 +PREHOOK: Input: default@t6_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from t6 +POSTHOOK: query: select * from t6_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t6 +POSTHOOK: Input: default@t6_n1 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t7 +PREHOOK: query: select * from t7_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@t7 +PREHOOK: Input: default@t7_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from t7 +POSTHOOK: query: select * from t7_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t7 +POSTHOOK: Input: default@t7_n2 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t8 +PREHOOK: query: select * from t8_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@t8 +PREHOOK: Input: default@t8_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from t8 +POSTHOOK: query: select * from t8_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t8 +POSTHOOK: Input: default@t8_n1 #### A masked pattern was here #### [23.355555555555558,254.08333333333334,477.0625,488.38271604938274] -PREHOOK: query: select * from t9 +PREHOOK: query: select * from t9_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@t9 +PREHOOK: Input: default@t9_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from t9 +POSTHOOK: query: select * from t9_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t9 +POSTHOOK: Input: default@t9_n0 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t10 +PREHOOK: query: select * from t10_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@t10 +PREHOOK: Input: default@t10_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from t10 +POSTHOOK: query: select * from t10_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t10 +POSTHOOK: Input: default@t10_n0 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t11 +PREHOOK: query: select * from t11_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@t11 +PREHOOK: Input: default@t11_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from t11 +POSTHOOK: query: select * from t11_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t11 +POSTHOOK: Input: default@t11_n0 #### A masked pattern was here #### [23.355555555555558,254.08333333333334,477.0625,488.38271604938274] -PREHOOK: query: select * from t12 +PREHOOK: query: select * from t12_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@t12 +PREHOOK: Input: default@t12_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from t12 +POSTHOOK: query: select * from t12_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t12 +POSTHOOK: Input: default@t12_n0 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) +PREHOOK: query: FROM bucket_n0 +insert overwrite table t1_n20 SELECT percentile_approx(cast(key AS double), 0.5) +insert overwrite table t2_n11 SELECT percentile_approx(cast(key AS double), 0.5, 100) +insert overwrite table t3_n3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) +insert overwrite table t4_n3 SELECT percentile_approx(cast(key AS int), 0.5) +insert overwrite table t5_n1 SELECT percentile_approx(cast(key AS int), 0.5, 100) +insert overwrite table t6_n1 SELECT percentile_approx(cast(key AS int), 0.5, 1000) -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t7_n2 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) +insert overwrite table t8_n1 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t9_n0 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t10_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) +insert overwrite table t11_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t12_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) PREHOOK: type: QUERY -PREHOOK: Input: default@bucket -PREHOOK: Output: default@t1 -PREHOOK: Output: default@t10 -PREHOOK: Output: default@t11 -PREHOOK: Output: default@t12 -PREHOOK: Output: default@t2 -PREHOOK: Output: default@t3 -PREHOOK: Output: default@t4 -PREHOOK: Output: default@t5 -PREHOOK: Output: default@t6 -PREHOOK: Output: default@t7 -PREHOOK: Output: default@t8 -PREHOOK: Output: default@t9 -POSTHOOK: query: FROM bucket -insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) -insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) -insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) +PREHOOK: Input: default@bucket_n0 +PREHOOK: Output: default@t10_n0 +PREHOOK: Output: default@t11_n0 +PREHOOK: Output: default@t12_n0 +PREHOOK: Output: default@t1_n20 +PREHOOK: Output: default@t2_n11 +PREHOOK: Output: default@t3_n3 +PREHOOK: Output: default@t4_n3 +PREHOOK: Output: default@t5_n1 +PREHOOK: Output: default@t6_n1 +PREHOOK: Output: default@t7_n2 +PREHOOK: Output: default@t8_n1 +PREHOOK: Output: default@t9_n0 +POSTHOOK: query: FROM bucket_n0 +insert overwrite table t1_n20 SELECT percentile_approx(cast(key AS double), 0.5) +insert overwrite table t2_n11 SELECT percentile_approx(cast(key AS double), 0.5, 100) +insert overwrite table t3_n3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) -insert overwrite table t4 SELECT percentile_approx(cast(key AS int), 0.5) -insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100) -insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000) +insert overwrite table t4_n3 SELECT percentile_approx(cast(key AS int), 0.5) +insert overwrite table t5_n1 SELECT percentile_approx(cast(key AS int), 0.5, 100) +insert overwrite table t6_n1 SELECT percentile_approx(cast(key AS int), 0.5, 1000) -insert overwrite table t7 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) -insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t7_n2 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98)) +insert overwrite table t8_n1 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t9_n0 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000) -insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) -insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) -insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) +insert overwrite table t10_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98)) +insert overwrite table t11_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100) +insert overwrite table t12_n0 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000) POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t10 -POSTHOOK: Output: default@t11 -POSTHOOK: Output: default@t12 -POSTHOOK: Output: default@t2 -POSTHOOK: Output: default@t3 -POSTHOOK: Output: default@t4 -POSTHOOK: Output: default@t5 -POSTHOOK: Output: default@t6 -POSTHOOK: Output: default@t7 -POSTHOOK: Output: default@t8 -POSTHOOK: Output: default@t9 -POSTHOOK: Lineage: t1.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t10.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t11.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t12.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t2.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t3.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t4.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t5.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t6.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t7.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t8.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -POSTHOOK: Lineage: t9.result EXPRESSION [(bucket)bucket.FieldSchema(name:key, type:double, comment:null), ] -PREHOOK: query: select * from t1 +POSTHOOK: Input: default@bucket_n0 +POSTHOOK: Output: default@t10_n0 +POSTHOOK: Output: default@t11_n0 +POSTHOOK: Output: default@t12_n0 +POSTHOOK: Output: default@t1_n20 +POSTHOOK: Output: default@t2_n11 +POSTHOOK: Output: default@t3_n3 +POSTHOOK: Output: default@t4_n3 +POSTHOOK: Output: default@t5_n1 +POSTHOOK: Output: default@t6_n1 +POSTHOOK: Output: default@t7_n2 +POSTHOOK: Output: default@t8_n1 +POSTHOOK: Output: default@t9_n0 +POSTHOOK: Lineage: t10_n0.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t11_n0.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t12_n0.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t1_n20.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t2_n11.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t3_n3.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t4_n3.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t5_n1.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t6_n1.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t7_n2.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t8_n1.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +POSTHOOK: Lineage: t9_n0.result EXPRESSION [(bucket_n0)bucket_n0.FieldSchema(name:key, type:double, comment:null), ] +PREHOOK: query: select * from t1_n20 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n20 #### A masked pattern was here #### -POSTHOOK: query: select * from t1 +POSTHOOK: query: select * from t1_n20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n20 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t2 +PREHOOK: query: select * from t2_n11 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2_n11 #### A masked pattern was here #### -POSTHOOK: query: select * from t2 +POSTHOOK: query: select * from t2_n11 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2_n11 #### A masked pattern was here #### 254.08333333333334 -PREHOOK: query: select * from t3 +PREHOOK: query: select * from t3_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t3_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from t3 +POSTHOOK: query: select * from t3_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t3_n3 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t4 +PREHOOK: query: select * from t4_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@t4 +PREHOOK: Input: default@t4_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from t4 +POSTHOOK: query: select * from t4_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t4_n3 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t5 +PREHOOK: query: select * from t5_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@t5 +PREHOOK: Input: default@t5_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from t5 +POSTHOOK: query: select * from t5_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t5 +POSTHOOK: Input: default@t5_n1 #### A masked pattern was here #### 254.08333333333334 -PREHOOK: query: select * from t6 +PREHOOK: query: select * from t6_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@t6 +PREHOOK: Input: default@t6_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from t6 +POSTHOOK: query: select * from t6_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t6 +POSTHOOK: Input: default@t6_n1 #### A masked pattern was here #### 255.5 -PREHOOK: query: select * from t7 +PREHOOK: query: select * from t7_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@t7 +PREHOOK: Input: default@t7_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from t7 +POSTHOOK: query: select * from t7_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t7 +POSTHOOK: Input: default@t7_n2 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t8 +PREHOOK: query: select * from t8_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@t8 +PREHOOK: Input: default@t8_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from t8 +POSTHOOK: query: select * from t8_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t8 +POSTHOOK: Input: default@t8_n1 #### A masked pattern was here #### [23.355555555555558,254.08333333333334,477.0625,488.38271604938274] -PREHOOK: query: select * from t9 +PREHOOK: query: select * from t9_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@t9 +PREHOOK: Input: default@t9_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from t9 +POSTHOOK: query: select * from t9_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t9 +POSTHOOK: Input: default@t9_n0 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t10 +PREHOOK: query: select * from t10_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@t10 +PREHOOK: Input: default@t10_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from t10 +POSTHOOK: query: select * from t10_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t10 +POSTHOOK: Input: default@t10_n0 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: select * from t11 +PREHOOK: query: select * from t11_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@t11 +PREHOOK: Input: default@t11_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from t11 +POSTHOOK: query: select * from t11_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t11 +POSTHOOK: Input: default@t11_n0 #### A masked pattern was here #### [23.355555555555558,254.08333333333334,477.0625,488.38271604938274] -PREHOOK: query: select * from t12 +PREHOOK: query: select * from t12_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@t12 +PREHOOK: Input: default@t12_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from t12 +POSTHOOK: query: select * from t12_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t12 +POSTHOOK: Input: default@t12_n0 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] PREHOOK: query: explain -select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket +select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket_n0 PREHOOK: type: QUERY POSTHOOK: query: explain -select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket +select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -509,7 +509,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: bucket + alias: bucket_n0 Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: CASE WHEN ((key < 100.0D)) THEN (NaND) ELSE (key) END (type: double) @@ -544,20 +544,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) between 340.5 and 343.0 from bucket +PREHOOK: query: select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) between 340.5 and 343.0 from bucket_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@bucket +PREHOOK: Input: default@bucket_n0 #### A masked pattern was here #### -POSTHOOK: query: select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) between 340.5 and 343.0 from bucket +POSTHOOK: query: select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) between 340.5 and 343.0 from bucket_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket +POSTHOOK: Input: default@bucket_n0 #### A masked pattern was here #### true PREHOOK: query: explain -select percentile_approx(key, 0.5) from bucket +select percentile_approx(key, 0.5) from bucket_n0 PREHOOK: type: QUERY POSTHOOK: query: explain -select percentile_approx(key, 0.5) from bucket +select percentile_approx(key, 0.5) from bucket_n0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -568,7 +568,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: bucket + alias: bucket_n0 Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: double) @@ -603,21 +603,21 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket +PREHOOK: query: select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@bucket +PREHOOK: Input: default@bucket_n0 #### A masked pattern was here #### -POSTHOOK: query: select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket +POSTHOOK: query: select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket +POSTHOOK: Input: default@bucket_n0 #### A masked pattern was here #### true -PREHOOK: query: select percentile_approx(key, array(0.50, 0.70, 0.90, 0.95, 0.99)) from bucket where key > 10000 +PREHOOK: query: select percentile_approx(key, array(0.50, 0.70, 0.90, 0.95, 0.99)) from bucket_n0 where key > 10000 PREHOOK: type: QUERY -PREHOOK: Input: default@bucket +PREHOOK: Input: default@bucket_n0 #### A masked pattern was here #### -POSTHOOK: query: select percentile_approx(key, array(0.50, 0.70, 0.90, 0.95, 0.99)) from bucket where key > 10000 +POSTHOOK: query: select percentile_approx(key, array(0.50, 0.70, 0.90, 0.95, 0.99)) from bucket_n0 where key > 10000 POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket +POSTHOOK: Input: default@bucket_n0 #### A masked pattern was here #### NULL diff --git a/ql/src/test/results/clientpositive/udf1.q.out b/ql/src/test/results/clientpositive/udf1.q.out index 885de90288..5d0a7fa758 100644 --- a/ql/src/test/results/clientpositive/udf1.q.out +++ b/ql/src/test/results/clientpositive/udf1.q.out @@ -1,21 +1,21 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING, c2 STRING, c3 STRING, c4 STRING, +PREHOOK: query: CREATE TABLE dest1_n1(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING, c6 STRING, c7 STRING, c8 STRING, c9 STRING, c10 STRING, c11 STRING, c12 STRING, c13 STRING, c14 STRING, c15 STRING, c16 STRING, c17 STRING, c18 STRING, c19 STRING, c20 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING, c2 STRING, c3 STRING, c4 STRING, +PREHOOK: Output: default@dest1_n1 +POSTHOOK: query: CREATE TABLE dest1_n1(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING, c6 STRING, c7 STRING, c8 STRING, c9 STRING, c10 STRING, c11 STRING, c12 STRING, c13 STRING, c14 STRING, c15 STRING, c16 STRING, c17 STRING, c18 STRING, c19 STRING, c20 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n1 PREHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_', +FROM src INSERT OVERWRITE TABLE dest1_n1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_', '%_' LIKE '\%\_', 'ab' LIKE '\%\_', 'ab' LIKE '_a%', 'ab' LIKE 'a', '' RLIKE '.*', 'a' RLIKE '[ab]', '' RLIKE '[ab]', 'hadoop' RLIKE '[a-z]*', 'hadoop' RLIKE 'o*', REGEXP_REPLACE('abc', 'b', 'c'), REGEXP_REPLACE('abc', 'z', 'a'), REGEXP_REPLACE('abbbb', 'bb', 'b'), @@ -24,7 +24,7 @@ FROM src INSERT OVERWRITE TABLE dest1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab WHERE src.key = 86 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_', +FROM src INSERT OVERWRITE TABLE dest1_n1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_', '%_' LIKE '\%\_', 'ab' LIKE '\%\_', 'ab' LIKE '_a%', 'ab' LIKE 'a', '' RLIKE '.*', 'a' RLIKE '[ab]', '' RLIKE '[ab]', 'hadoop' RLIKE '[a-z]*', 'hadoop' RLIKE 'o*', REGEXP_REPLACE('abc', 'b', 'c'), REGEXP_REPLACE('abc', 'z', 'a'), REGEXP_REPLACE('abbbb', 'bb', 'b'), @@ -63,7 +63,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n1 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string), _col6 (type: string), _col7 (type: string), _col8 (type: string), _col9 (type: string), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: string), _col18 (type: string), _col19 (type: string) outputColumnNames: c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20 @@ -108,7 +108,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n1 Stage: Stage-2 Stats Work @@ -116,7 +116,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20 Column Types: string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string - Table: default.dest1 + Table: default.dest1_n1 Stage: Stage-3 Map Reduce @@ -128,7 +128,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n1 Stage: Stage-5 Map Reduce @@ -140,7 +140,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n1 Stage: Stage-6 Move Operator @@ -148,7 +148,7 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_', +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_', '%_' LIKE '\%\_', 'ab' LIKE '\%\_', 'ab' LIKE '_a%', 'ab' LIKE 'a', '' RLIKE '.*', 'a' RLIKE '[ab]', '' RLIKE '[ab]', 'hadoop' RLIKE '[a-z]*', 'hadoop' RLIKE 'o*', REGEXP_REPLACE('abc', 'b', 'c'), REGEXP_REPLACE('abc', 'z', 'a'), REGEXP_REPLACE('abbbb', 'bb', 'b'), @@ -157,8 +157,8 @@ PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 'a' LIKE '%a%', 'b' WHERE src.key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_', +PREHOOK: Output: default@dest1_n1 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_', '%_' LIKE '\%\_', 'ab' LIKE '\%\_', 'ab' LIKE '_a%', 'ab' LIKE 'a', '' RLIKE '.*', 'a' RLIKE '[ab]', '' RLIKE '[ab]', 'hadoop' RLIKE '[a-z]*', 'hadoop' RLIKE 'o*', REGEXP_REPLACE('abc', 'b', 'c'), REGEXP_REPLACE('abc', 'z', 'a'), REGEXP_REPLACE('abbbb', 'bb', 'b'), @@ -167,33 +167,33 @@ POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 'a' LIKE '%a%', 'b WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] -POSTHOOK: Lineage: dest1.c10 EXPRESSION [] -POSTHOOK: Lineage: dest1.c11 EXPRESSION [] -POSTHOOK: Lineage: dest1.c12 EXPRESSION [] -POSTHOOK: Lineage: dest1.c13 EXPRESSION [] -POSTHOOK: Lineage: dest1.c14 SIMPLE [] -POSTHOOK: Lineage: dest1.c15 SIMPLE [] -POSTHOOK: Lineage: dest1.c16 SIMPLE [] -POSTHOOK: Lineage: dest1.c17 SIMPLE [] -POSTHOOK: Lineage: dest1.c18 SIMPLE [] -POSTHOOK: Lineage: dest1.c19 SIMPLE [] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [] -POSTHOOK: Lineage: dest1.c20 EXPRESSION [] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [] -POSTHOOK: Lineage: dest1.c6 EXPRESSION [] -POSTHOOK: Lineage: dest1.c7 EXPRESSION [] -POSTHOOK: Lineage: dest1.c8 EXPRESSION [] -POSTHOOK: Lineage: dest1.c9 EXPRESSION [] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n1 +POSTHOOK: Lineage: dest1_n1.c1 EXPRESSION [] +POSTHOOK: Lineage: dest1_n1.c10 EXPRESSION [] +POSTHOOK: Lineage: dest1_n1.c11 EXPRESSION [] +POSTHOOK: Lineage: dest1_n1.c12 EXPRESSION [] +POSTHOOK: Lineage: dest1_n1.c13 EXPRESSION [] +POSTHOOK: Lineage: dest1_n1.c14 SIMPLE [] +POSTHOOK: Lineage: dest1_n1.c15 SIMPLE [] +POSTHOOK: Lineage: dest1_n1.c16 SIMPLE [] +POSTHOOK: Lineage: dest1_n1.c17 SIMPLE [] +POSTHOOK: Lineage: dest1_n1.c18 SIMPLE [] +POSTHOOK: Lineage: dest1_n1.c19 SIMPLE [] +POSTHOOK: Lineage: dest1_n1.c2 EXPRESSION [] +POSTHOOK: Lineage: dest1_n1.c20 EXPRESSION [] +POSTHOOK: Lineage: dest1_n1.c3 EXPRESSION [] +POSTHOOK: Lineage: dest1_n1.c4 EXPRESSION [] +POSTHOOK: Lineage: dest1_n1.c5 EXPRESSION [] +POSTHOOK: Lineage: dest1_n1.c6 EXPRESSION [] +POSTHOOK: Lineage: dest1_n1.c7 EXPRESSION [] +POSTHOOK: Lineage: dest1_n1.c8 EXPRESSION [] +POSTHOOK: Lineage: dest1_n1.c9 EXPRESSION [] +PREHOOK: query: SELECT dest1_n1.* FROM dest1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n1.* FROM dest1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n1 #### A masked pattern was here #### TRUE FALSE TRUE TRUE TRUE FALSE FALSE FALSE TRUE TRUE FALSE TRUE TRUE acc abc abb hive hadoop AaAbAcA FALSE diff --git a/ql/src/test/results/clientpositive/udf2.q.out b/ql/src/test/results/clientpositive/udf2.q.out index a3337e13a8..56e9e8c1d8 100644 --- a/ql/src/test/results/clientpositive/udf2.q.out +++ b/ql/src/test/results/clientpositive/udf2.q.out @@ -1,25 +1,25 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n48(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n48 +POSTHOOK: query: CREATE TABLE dest1_n48(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +POSTHOOK: Output: default@dest1_n48 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n48 SELECT ' abc ' WHERE src.key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +PREHOOK: Output: default@dest1_n48 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n48 SELECT ' abc ' WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Output: default@dest1_n48 +POSTHOOK: Lineage: dest1_n48.c1 SIMPLE [] PREHOOK: query: EXPLAIN -SELECT '|', trim(dest1.c1), '|', rtrim(dest1.c1), '|', ltrim(dest1.c1), '|' FROM dest1 +SELECT '|', trim(dest1_n48.c1), '|', rtrim(dest1_n48.c1), '|', ltrim(dest1_n48.c1), '|' FROM dest1_n48 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT '|', trim(dest1.c1), '|', rtrim(dest1.c1), '|', ltrim(dest1.c1), '|' FROM dest1 +SELECT '|', trim(dest1_n48.c1), '|', rtrim(dest1_n48.c1), '|', ltrim(dest1_n48.c1), '|' FROM dest1_n48 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -30,7 +30,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: dest1 + alias: dest1_n48 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '|' (type: string), trim(c1) (type: string), '|' (type: string), rtrim(c1) (type: string), '|' (type: string), ltrim(c1) (type: string), '|' (type: string) @@ -51,12 +51,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT '|', trim(dest1.c1), '|', rtrim(dest1.c1), '|', ltrim(dest1.c1), '|' FROM dest1 +PREHOOK: query: SELECT '|', trim(dest1_n48.c1), '|', rtrim(dest1_n48.c1), '|', ltrim(dest1_n48.c1), '|' FROM dest1_n48 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n48 #### A masked pattern was here #### -POSTHOOK: query: SELECT '|', trim(dest1.c1), '|', rtrim(dest1.c1), '|', ltrim(dest1.c1), '|' FROM dest1 +POSTHOOK: query: SELECT '|', trim(dest1_n48.c1), '|', rtrim(dest1_n48.c1), '|', ltrim(dest1_n48.c1), '|' FROM dest1_n48 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n48 #### A masked pattern was here #### | abc | abc | abc | diff --git a/ql/src/test/results/clientpositive/udf3.q.out b/ql/src/test/results/clientpositive/udf3.q.out index 06d7340f5f..47c1d8de34 100644 --- a/ql/src/test/results/clientpositive/udf3.q.out +++ b/ql/src/test/results/clientpositive/udf3.q.out @@ -1,17 +1,17 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n87(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n87 +POSTHOOK: query: CREATE TABLE dest1_n87(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n87 PREHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), +FROM src INSERT OVERWRITE TABLE dest1_n87 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), min(CAST('' AS INT)), max(CAST('' AS INT)) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -FROM src INSERT OVERWRITE TABLE dest1 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), +FROM src INSERT OVERWRITE TABLE dest1_n87 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), min(CAST('' AS INT)), max(CAST('' AS INT)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -55,7 +55,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n87 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string) outputColumnNames: c1, c2, c3, c4, c5 @@ -85,7 +85,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n87 Stage: Stage-2 Stats Work @@ -93,29 +93,29 @@ STAGE PLANS: Column Stats Desc: Columns: c1, c2, c3, c4, c5 Column Types: string, string, string, string, string - Table: default.dest1 + Table: default.dest1_n87 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n87 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), min(CAST('' AS INT)), max(CAST('' AS INT)) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), +PREHOOK: Output: default@dest1_n87 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n87 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), min(CAST('' AS INT)), max(CAST('' AS INT)) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [] -POSTHOOK: Lineage: dest1.c2 EXPRESSION [] -POSTHOOK: Lineage: dest1.c3 EXPRESSION [] -POSTHOOK: Lineage: dest1.c4 EXPRESSION [] -POSTHOOK: Lineage: dest1.c5 EXPRESSION [] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n87 +POSTHOOK: Lineage: dest1_n87.c1 EXPRESSION [] +POSTHOOK: Lineage: dest1_n87.c2 EXPRESSION [] +POSTHOOK: Lineage: dest1_n87.c3 EXPRESSION [] +POSTHOOK: Lineage: dest1_n87.c4 EXPRESSION [] +POSTHOOK: Lineage: dest1_n87.c5 EXPRESSION [] +PREHOOK: query: SELECT dest1_n87.* FROM dest1_n87 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n87 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n87.* FROM dest1_n87 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n87 #### A masked pattern was here #### 0 NULL NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/udf4.q.out b/ql/src/test/results/clientpositive/udf4.q.out index 64687c68cb..48e97e10c4 100644 --- a/ql/src/test/results/clientpositive/udf4.q.out +++ b/ql/src/test/results/clientpositive/udf4.q.out @@ -1,20 +1,20 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n125(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n125 +POSTHOOK: query: CREATE TABLE dest1_n125(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +POSTHOOK: Output: default@dest1_n125 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n125 SELECT ' abc ' WHERE src.key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +PREHOOK: Output: default@dest1_n125 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n125 SELECT ' abc ' WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Output: default@dest1_n125 +POSTHOOK: Lineage: dest1_n125.c1 SIMPLE [] PREHOOK: query: EXPLAIN SELECT round(1.0), round(1.5), round(-1.5), floor(1.0), floor(1.5), floor(-1.5), sqrt(1.0), sqrt(-1.0), sqrt(0.0), ceil(1.0), ceil(1.5), ceil(-1.5), ceiling(1.0), rand(3), +3, -3, 1++2, 1+-2, @@ -38,7 +38,7 @@ CAST(1 AS SMALLINT) ^ CAST(3 AS SMALLINT), 1 ^ 3, CAST(1 AS BIGINT) ^ CAST(3 AS BIGINT) -FROM dest1 +FROM dest1_n125 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT round(1.0), round(1.5), round(-1.5), floor(1.0), floor(1.5), floor(-1.5), sqrt(1.0), sqrt(-1.0), sqrt(0.0), ceil(1.0), ceil(1.5), ceil(-1.5), ceiling(1.0), rand(3), +3, -3, 1++2, 1+-2, @@ -63,7 +63,7 @@ CAST(1 AS SMALLINT) ^ CAST(3 AS SMALLINT), 1 ^ 3, CAST(1 AS BIGINT) ^ CAST(3 AS BIGINT) -FROM dest1 +FROM dest1_n125 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -74,7 +74,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: dest1 + alias: dest1_n125 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: 1 (type: decimal(1,0)), 2 (type: decimal(2,0)), -2 (type: decimal(2,0)), 1 (type: decimal(2,0)), 1 (type: decimal(2,0)), -2 (type: decimal(2,0)), 1.0D (type: double), null (type: double), 0.0D (type: double), 1 (type: decimal(2,0)), 2 (type: decimal(2,0)), -1 (type: decimal(2,0)), 1 (type: decimal(2,0)), rand(3) (type: double), 3 (type: int), -3 (type: int), 3 (type: int), -1 (type: int), -2 (type: int), -2Y (type: tinyint), -2S (type: smallint), -2L (type: bigint), 0Y (type: tinyint), 0S (type: smallint), 0 (type: int), 0L (type: bigint), 3Y (type: tinyint), 3S (type: smallint), 3 (type: int), 3L (type: bigint), 2Y (type: tinyint), 2S (type: smallint), 2 (type: int), 2L (type: bigint) @@ -116,9 +116,9 @@ CAST(1 AS SMALLINT) ^ CAST(3 AS SMALLINT), 1 ^ 3, CAST(1 AS BIGINT) ^ CAST(3 AS BIGINT) -FROM dest1 +FROM dest1_n125 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n125 #### A masked pattern was here #### POSTHOOK: query: SELECT round(1.0), round(1.5), round(-1.5), floor(1.0), floor(1.5), floor(-1.5), sqrt(1.0), sqrt(-1.0), sqrt(0.0), ceil(1.0), ceil(1.5), ceil(-1.5), ceiling(1.0), rand(3), +3, -3, 1++2, 1+-2, ~1, @@ -141,8 +141,8 @@ CAST(1 AS SMALLINT) ^ CAST(3 AS SMALLINT), 1 ^ 3, CAST(1 AS BIGINT) ^ CAST(3 AS BIGINT) -FROM dest1 +FROM dest1_n125 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n125 #### A masked pattern was here #### 1 2 -2 1 1 -2 1.0 NULL 0.0 1 2 -1 1 0.731057369148862 3 -3 3 -1 -2 -2 -2 -2 0 0 0 0 3 3 3 3 2 2 2 2 diff --git a/ql/src/test/results/clientpositive/udf5.q.out b/ql/src/test/results/clientpositive/udf5.q.out index d96620aeb1..63887b24b5 100644 --- a/ql/src/test/results/clientpositive/udf5.q.out +++ b/ql/src/test/results/clientpositive/udf5.q.out @@ -1,25 +1,25 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n13(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n13 +POSTHOOK: query: CREATE TABLE dest1_n13(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +POSTHOOK: Output: default@dest1_n13 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n13 SELECT ' abc ' WHERE src.key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +PREHOOK: Output: default@dest1_n13 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n13 SELECT ' abc ' WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Output: default@dest1_n13 +POSTHOOK: Lineage: dest1_n13.c1 SIMPLE [] PREHOOK: query: EXPLAIN -SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1 +SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1_n13 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1 +SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1_n13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -30,7 +30,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: dest1 + alias: dest1_n13 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: '2008-11-11 15:32:20' (type: string), DATE'2008-11-11' (type: date), 1 (type: int), 11 (type: int), 2008 (type: int), 1 (type: int), 11 (type: int), 2008 (type: int) @@ -38,20 +38,20 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 183 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1 +PREHOOK: query: SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1 +POSTHOOK: query: SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n13 #### A masked pattern was here #### 2008-11-11 15:32:20 2008-11-11 1 11 2008 1 11 2008 PREHOOK: query: EXPLAIN -SELECT from_unixtime(unix_timestamp('2010-01-13 11:57:40', 'yyyy-MM-dd HH:mm:ss'), 'MM/dd/yy HH:mm:ss'), from_unixtime(unix_timestamp('2010-01-13 11:57:40')) from dest1 +SELECT from_unixtime(unix_timestamp('2010-01-13 11:57:40', 'yyyy-MM-dd HH:mm:ss'), 'MM/dd/yy HH:mm:ss'), from_unixtime(unix_timestamp('2010-01-13 11:57:40')) from dest1_n13 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT from_unixtime(unix_timestamp('2010-01-13 11:57:40', 'yyyy-MM-dd HH:mm:ss'), 'MM/dd/yy HH:mm:ss'), from_unixtime(unix_timestamp('2010-01-13 11:57:40')) from dest1 +SELECT from_unixtime(unix_timestamp('2010-01-13 11:57:40', 'yyyy-MM-dd HH:mm:ss'), 'MM/dd/yy HH:mm:ss'), from_unixtime(unix_timestamp('2010-01-13 11:57:40')) from dest1_n13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -62,7 +62,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: dest1 + alias: dest1_n13 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: '01/13/10 11:57:40' (type: string), '2010-01-13 11:57:40' (type: string) @@ -70,12 +70,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: SELECT from_unixtime(unix_timestamp('2010-01-13 11:57:40', 'yyyy-MM-dd HH:mm:ss'), 'MM/dd/yy HH:mm:ss'), from_unixtime(unix_timestamp('2010-01-13 11:57:40')) from dest1 +PREHOOK: query: SELECT from_unixtime(unix_timestamp('2010-01-13 11:57:40', 'yyyy-MM-dd HH:mm:ss'), 'MM/dd/yy HH:mm:ss'), from_unixtime(unix_timestamp('2010-01-13 11:57:40')) from dest1_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT from_unixtime(unix_timestamp('2010-01-13 11:57:40', 'yyyy-MM-dd HH:mm:ss'), 'MM/dd/yy HH:mm:ss'), from_unixtime(unix_timestamp('2010-01-13 11:57:40')) from dest1 +POSTHOOK: query: SELECT from_unixtime(unix_timestamp('2010-01-13 11:57:40', 'yyyy-MM-dd HH:mm:ss'), 'MM/dd/yy HH:mm:ss'), from_unixtime(unix_timestamp('2010-01-13 11:57:40')) from dest1_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n13 #### A masked pattern was here #### 01/13/10 11:57:40 2010-01-13 11:57:40 diff --git a/ql/src/test/results/clientpositive/udf6.q.out b/ql/src/test/results/clientpositive/udf6.q.out index 69e0130dd3..a69b667df8 100644 --- a/ql/src/test/results/clientpositive/udf6.q.out +++ b/ql/src/test/results/clientpositive/udf6.q.out @@ -1,25 +1,25 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n51(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n51 +POSTHOOK: query: CREATE TABLE dest1_n51(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +POSTHOOK: Output: default@dest1_n51 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n51 SELECT ' abc ' WHERE src.key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +PREHOOK: Output: default@dest1_n51 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n51 SELECT ' abc ' WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Output: default@dest1_n51 +POSTHOOK: Lineage: dest1_n51.c1 SIMPLE [] PREHOOK: query: EXPLAIN -SELECT IF(TRUE, 1, 2) FROM dest1 +SELECT IF(TRUE, 1, 2) FROM dest1_n51 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT IF(TRUE, 1, 2) FROM dest1 +SELECT IF(TRUE, 1, 2) FROM dest1_n51 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -30,7 +30,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: dest1 + alias: dest1_n51 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: 1 (type: int) @@ -38,13 +38,13 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: SELECT IF(TRUE, 1, 2) FROM dest1 +PREHOOK: query: SELECT IF(TRUE, 1, 2) FROM dest1_n51 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n51 #### A masked pattern was here #### -POSTHOOK: query: SELECT IF(TRUE, 1, 2) FROM dest1 +POSTHOOK: query: SELECT IF(TRUE, 1, 2) FROM dest1_n51 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n51 #### A masked pattern was here #### 1 PREHOOK: query: EXPLAIN @@ -53,7 +53,7 @@ SELECT IF(TRUE, 1, 2), IF(FALSE, 1, 2), IF(NULL, 1, 2), IF(TRUE, "a", "b"), IF(FALSE, CAST(127 AS TINYINT), CAST(126 AS TINYINT)), IF(FALSE, CAST(127 AS SMALLINT), CAST(128 AS SMALLINT)), CAST(128 AS INT), CAST(1.0 AS DOUBLE), - CAST('128' AS STRING) FROM dest1 + CAST('128' AS STRING) FROM dest1_n51 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT IF(TRUE, 1, 2), IF(FALSE, 1, 2), IF(NULL, 1, 2), IF(TRUE, "a", "b"), @@ -61,7 +61,7 @@ SELECT IF(TRUE, 1, 2), IF(FALSE, 1, 2), IF(NULL, 1, 2), IF(TRUE, "a", "b"), IF(FALSE, CAST(127 AS TINYINT), CAST(126 AS TINYINT)), IF(FALSE, CAST(127 AS SMALLINT), CAST(128 AS SMALLINT)), CAST(128 AS INT), CAST(1.0 AS DOUBLE), - CAST('128' AS STRING) FROM dest1 + CAST('128' AS STRING) FROM dest1_n51 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -72,7 +72,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: dest1 + alias: dest1_n51 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: 1 (type: int), 2 (type: int), 2 (type: int), 'a' (type: string), 0.1 (type: decimal(1,1)), 2L (type: bigint), 126Y (type: tinyint), 128S (type: smallint), 128 (type: int), 1.0D (type: double), '128' (type: string) @@ -85,17 +85,17 @@ PREHOOK: query: SELECT IF(TRUE, 1, 2), IF(FALSE, 1, 2), IF(NULL, 1, 2), IF(TRUE, IF(FALSE, CAST(127 AS TINYINT), CAST(126 AS TINYINT)), IF(FALSE, CAST(127 AS SMALLINT), CAST(128 AS SMALLINT)), CAST(128 AS INT), CAST(1.0 AS DOUBLE), - CAST('128' AS STRING) FROM dest1 + CAST('128' AS STRING) FROM dest1_n51 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n51 #### A masked pattern was here #### POSTHOOK: query: SELECT IF(TRUE, 1, 2), IF(FALSE, 1, 2), IF(NULL, 1, 2), IF(TRUE, "a", "b"), IF(TRUE, 0.1, 0.2), IF(FALSE, CAST(1 AS BIGINT), CAST(2 AS BIGINT)), IF(FALSE, CAST(127 AS TINYINT), CAST(126 AS TINYINT)), IF(FALSE, CAST(127 AS SMALLINT), CAST(128 AS SMALLINT)), CAST(128 AS INT), CAST(1.0 AS DOUBLE), - CAST('128' AS STRING) FROM dest1 + CAST('128' AS STRING) FROM dest1_n51 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n51 #### A masked pattern was here #### 1 2 2 a 0.1 2 126 128 128 1.0 128 diff --git a/ql/src/test/results/clientpositive/udf7.q.out b/ql/src/test/results/clientpositive/udf7.q.out index 909556947a..6c2f8a433d 100644 --- a/ql/src/test/results/clientpositive/udf7.q.out +++ b/ql/src/test/results/clientpositive/udf7.q.out @@ -1,20 +1,20 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n93(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n93 +POSTHOOK: query: CREATE TABLE dest1_n93(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +POSTHOOK: Output: default@dest1_n93 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n93 SELECT ' abc ' WHERE src.key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +PREHOOK: Output: default@dest1_n93 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n93 SELECT ' abc ' WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Output: default@dest1_n93 +POSTHOOK: Lineage: dest1_n93.c1 SIMPLE [] PREHOOK: query: EXPLAIN SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), @@ -23,7 +23,7 @@ SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), - POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1_n93 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), @@ -33,7 +33,7 @@ SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), - POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1_n93 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -44,7 +44,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: dest1 + alias: dest1_n93 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: 1.098612288668D (type: double), null (type: double), null (type: double), 1.098612288668D (type: double), null (type: double), null (type: double), 1.584962500721D (type: double), null (type: double), null (type: double), 0.47712125472D (type: double), null (type: double), null (type: double), 1.584962500721D (type: double), null (type: double), null (type: double), null (type: double), -1.0D (type: double), 7.389056098931D (type: double), 8.0D (type: double), 8.0D (type: double), 0.125D (type: double), 8.0D (type: double), 2.0D (type: double), NaND (type: double), 1.0D (type: double), 1.0D (type: double), 8.0D (type: double), 8.0D (type: double) @@ -59,9 +59,9 @@ PREHOOK: query: SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), L POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), - POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1_n93 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n93 #### A masked pattern was here #### POSTHOOK: query: SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0), LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1), @@ -70,8 +70,8 @@ POSTHOOK: query: SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), POW(2,3), POWER(2,3), POWER(2,-3), POWER(0.5, -3), POWER(4, 0.5), POWER(-1, 0.5), POWER(-1, 2), POWER(CAST (1 AS DECIMAL), CAST (0 AS INT)), POWER(CAST (2 AS DECIMAL), CAST (3 AS INT)), - POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1 + POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1_n93 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n93 #### A masked pattern was here #### 1.098612288668 NULL NULL 1.098612288668 NULL NULL 1.584962500721 NULL NULL 0.47712125472 NULL NULL 1.584962500721 NULL NULL NULL -1.0 7.389056098931 8.0 8.0 0.125 8.0 2.0 NaN 1.0 1.0 8.0 8.0 diff --git a/ql/src/test/results/clientpositive/udf8.q.out b/ql/src/test/results/clientpositive/udf8.q.out index fbb113ca76..fbd4507744 100644 --- a/ql/src/test/results/clientpositive/udf8.q.out +++ b/ql/src/test/results/clientpositive/udf8.q.out @@ -1,34 +1,34 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n47(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n47 +POSTHOOK: query: CREATE TABLE dest1_n47(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT '' WHERE src.key = 86 +POSTHOOK: Output: default@dest1_n47 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n47 SELECT '' WHERE src.key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT '' WHERE src.key = 86 +PREHOOK: Output: default@dest1_n47 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n47 SELECT '' WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 SIMPLE [] -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT '1' WHERE src.key = 86 +POSTHOOK: Output: default@dest1_n47 +POSTHOOK: Lineage: dest1_n47.c1 SIMPLE [] +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n47 SELECT '1' WHERE src.key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT '1' WHERE src.key = 86 +PREHOOK: Output: default@dest1_n47 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n47 SELECT '1' WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Output: default@dest1_n47 +POSTHOOK: Lineage: dest1_n47.c1 SIMPLE [] PREHOOK: query: EXPLAIN -SELECT avg(c1), sum(c1), count(c1) FROM dest1 +SELECT avg(c1), sum(c1), count(c1) FROM dest1_n47 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT avg(c1), sum(c1), count(c1) FROM dest1 +SELECT avg(c1), sum(c1), count(c1) FROM dest1_n47 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -39,7 +39,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: dest1 + alias: dest1_n47 Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: c1 (type: string) @@ -78,12 +78,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT avg(c1), sum(c1), count(c1) FROM dest1 +PREHOOK: query: SELECT avg(c1), sum(c1), count(c1) FROM dest1_n47 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n47 #### A masked pattern was here #### -POSTHOOK: query: SELECT avg(c1), sum(c1), count(c1) FROM dest1 +POSTHOOK: query: SELECT avg(c1), sum(c1), count(c1) FROM dest1_n47 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n47 #### A masked pattern was here #### 1.0 1.0 1 diff --git a/ql/src/test/results/clientpositive/udf_10_trims.q.out b/ql/src/test/results/clientpositive/udf_10_trims.q.out index 5a53eb1aca..6aea7730a6 100644 --- a/ql/src/test/results/clientpositive/udf_10_trims.q.out +++ b/ql/src/test/results/clientpositive/udf_10_trims.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n5(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n5 +POSTHOOK: query: CREATE TABLE dest1_n5(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n5 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n5 SELECT trim(trim(trim(trim(trim(trim(trim(trim(trim(trim( ' abc ')))))))))) FROM src WHERE src.key = 86 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE dest1 +INSERT OVERWRITE TABLE dest1_n5 SELECT trim(trim(trim(trim(trim(trim(trim(trim(trim(trim( ' abc ')))))))))) FROM src WHERE src.key = 86 @@ -49,7 +49,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n5 Select Operator expressions: _col0 (type: string) outputColumnNames: c1 @@ -94,7 +94,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n5 Stage: Stage-2 Stats Work @@ -102,7 +102,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1 Column Types: string - Table: default.dest1 + Table: default.dest1_n5 Stage: Stage-3 Map Reduce @@ -114,7 +114,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n5 Stage: Stage-5 Map Reduce @@ -126,7 +126,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n5 Stage: Stage-6 Move Operator @@ -134,18 +134,18 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE dest1 +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n5 SELECT trim(trim(trim(trim(trim(trim(trim(trim(trim(trim( ' abc ')))))))))) FROM src WHERE src.key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 +PREHOOK: Output: default@dest1_n5 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n5 SELECT trim(trim(trim(trim(trim(trim(trim(trim(trim(trim( ' abc ')))))))))) FROM src WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Output: default@dest1_n5 +POSTHOOK: Lineage: dest1_n5.c1 SIMPLE [] diff --git a/ql/src/test/results/clientpositive/udf_character_length.q.out b/ql/src/test/results/clientpositive/udf_character_length.q.out index 6bd2fe72ec..4ac560adaa 100644 --- a/ql/src/test/results/clientpositive/udf_character_length.q.out +++ b/ql/src/test/results/clientpositive/udf_character_length.q.out @@ -30,17 +30,17 @@ Example: 5 Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFCharacterLength Function type:BUILTIN -PREHOOK: query: CREATE TABLE dest1(len INT) +PREHOOK: query: CREATE TABLE dest1_n81(len INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(len INT) +PREHOOK: Output: default@dest1_n81 +POSTHOOK: query: CREATE TABLE dest1_n81(len INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT character_length(src1.value) +POSTHOOK: Output: default@dest1_n81 +PREHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n81 SELECT character_length(src1.value) PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT character_length(src1.value) +POSTHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n81 SELECT character_length(src1.value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -70,7 +70,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n81 Select Operator expressions: _col0 (type: int) outputColumnNames: len @@ -115,7 +115,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n81 Stage: Stage-2 Stats Work @@ -123,7 +123,7 @@ STAGE PLANS: Column Stats Desc: Columns: len Column Types: int - Table: default.dest1 + Table: default.dest1_n81 Stage: Stage-3 Map Reduce @@ -135,7 +135,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n81 Stage: Stage-5 Map Reduce @@ -147,7 +147,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n81 Stage: Stage-6 Move Operator @@ -155,22 +155,22 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1 SELECT character_length(src1.value) +PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1_n81 SELECT character_length(src1.value) PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1 SELECT character_length(src1.value) +PREHOOK: Output: default@dest1_n81 +POSTHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1_n81 SELECT character_length(src1.value) POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.len EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n81 +POSTHOOK: Lineage: dest1_n81.len EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n81.* FROM dest1_n81 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n81 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n81.* FROM dest1_n81 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n81 #### A masked pattern was here #### 7 0 @@ -197,42 +197,42 @@ POSTHOOK: Input: default@dest1 0 0 0 -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n81 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n81 +PREHOOK: Output: default@dest1_n81 +POSTHOOK: query: DROP TABLE dest1_n81 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +POSTHOOK: Input: default@dest1_n81 +POSTHOOK: Output: default@dest1_n81 +PREHOOK: query: CREATE TABLE dest1_n81(name STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n81 +POSTHOOK: query: CREATE TABLE dest1_n81(name STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1 +POSTHOOK: Output: default@dest1_n81 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n81 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@dest1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1 +PREHOOK: Output: default@dest1_n81 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n81 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@dest1 -PREHOOK: query: INSERT INTO dest1 VALUES(NULL) +POSTHOOK: Output: default@dest1_n81 +PREHOOK: query: INSERT INTO dest1_n81 VALUES(NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT INTO dest1 VALUES(NULL) +PREHOOK: Output: default@dest1_n81 +POSTHOOK: query: INSERT INTO dest1_n81 VALUES(NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.name EXPRESSION [] -PREHOOK: query: EXPLAIN SELECT character_length(dest1.name) FROM dest1 +POSTHOOK: Output: default@dest1_n81 +POSTHOOK: Lineage: dest1_n81.name EXPRESSION [] +PREHOOK: query: EXPLAIN SELECT character_length(dest1_n81.name) FROM dest1_n81 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT character_length(dest1.name) FROM dest1 +POSTHOOK: query: EXPLAIN SELECT character_length(dest1_n81.name) FROM dest1_n81 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -243,7 +243,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: dest1 + alias: dest1_n81 Statistics: Num rows: 1 Data size: 90 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: character_length(name) (type: int) @@ -251,19 +251,19 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 90 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: SELECT character_length(dest1.name) FROM dest1 +PREHOOK: query: SELECT character_length(dest1_n81.name) FROM dest1_n81 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n81 #### A masked pattern was here #### -POSTHOOK: query: SELECT character_length(dest1.name) FROM dest1 +POSTHOOK: query: SELECT character_length(dest1_n81.name) FROM dest1_n81 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n81 #### A masked pattern was here #### NULL 2 -PREHOOK: query: EXPLAIN SELECT char_length(dest1.name) FROM dest1 +PREHOOK: query: EXPLAIN SELECT char_length(dest1_n81.name) FROM dest1_n81 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT char_length(dest1.name) FROM dest1 +POSTHOOK: query: EXPLAIN SELECT char_length(dest1_n81.name) FROM dest1_n81 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -274,7 +274,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: dest1 + alias: dest1_n81 Statistics: Num rows: 1 Data size: 90 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: character_length(name) (type: int) @@ -282,21 +282,21 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 90 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: SELECT char_length(dest1.name) FROM dest1 +PREHOOK: query: SELECT char_length(dest1_n81.name) FROM dest1_n81 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n81 #### A masked pattern was here #### -POSTHOOK: query: SELECT char_length(dest1.name) FROM dest1 +POSTHOOK: query: SELECT char_length(dest1_n81.name) FROM dest1_n81 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n81 #### A masked pattern was here #### NULL 2 -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n81 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n81 +PREHOOK: Output: default@dest1_n81 +POSTHOOK: query: DROP TABLE dest1_n81 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 +POSTHOOK: Input: default@dest1_n81 +POSTHOOK: Output: default@dest1_n81 diff --git a/ql/src/test/results/clientpositive/udf_concat_insert1.q.out b/ql/src/test/results/clientpositive/udf_concat_insert1.q.out index b74644b928..00c12d5a6f 100644 --- a/ql/src/test/results/clientpositive/udf_concat_insert1.q.out +++ b/ql/src/test/results/clientpositive/udf_concat_insert1.q.out @@ -1,30 +1,30 @@ -PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n115(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n115 +POSTHOOK: query: CREATE TABLE dest1_n115(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n115 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', concat(src.key) WHERE src.key < 100 group by src.key +INSERT OVERWRITE TABLE dest1_n115 SELECT '1234', concat(src.key) WHERE src.key < 100 group by src.key PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n115 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT '1234', concat(src.key) WHERE src.key < 100 group by src.key +INSERT OVERWRITE TABLE dest1_n115 SELECT '1234', concat(src.key) WHERE src.key < 100 group by src.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key EXPRESSION [] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n115 +POSTHOOK: Lineage: dest1_n115.key EXPRESSION [] +POSTHOOK: Lineage: dest1_n115.value EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n115.* FROM dest1_n115 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n115 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n115.* FROM dest1_n115 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n115 #### A masked pattern was here #### 1234 0 1234 10 diff --git a/ql/src/test/results/clientpositive/udf_concat_insert2.q.out b/ql/src/test/results/clientpositive/udf_concat_insert2.q.out index d68bd768c4..e1556a1be2 100644 --- a/ql/src/test/results/clientpositive/udf_concat_insert2.q.out +++ b/ql/src/test/results/clientpositive/udf_concat_insert2.q.out @@ -1,30 +1,30 @@ -PREHOOK: query: CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n90(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n90 +POSTHOOK: query: CREATE TABLE dest1_n90(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n90 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT concat('1234', 'abc', 'extra argument'), src.value WHERE src.key < 100 +INSERT OVERWRITE TABLE dest1_n90 SELECT concat('1234', 'abc', 'extra argument'), src.value WHERE src.key < 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest1_n90 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE dest1 SELECT concat('1234', 'abc', 'extra argument'), src.value WHERE src.key < 100 +INSERT OVERWRITE TABLE dest1_n90 SELECT concat('1234', 'abc', 'extra argument'), src.value WHERE src.key < 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SIMPLE [] -POSTHOOK: Lineage: dest1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n90 +POSTHOOK: Lineage: dest1_n90.key SIMPLE [] +POSTHOOK: Lineage: dest1_n90.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n90.* FROM dest1_n90 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n90 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n90.* FROM dest1_n90 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n90 #### A masked pattern was here #### 1234abcextra argument val_86 1234abcextra argument val_27 diff --git a/ql/src/test/results/clientpositive/udf_concat_ws.q.out b/ql/src/test/results/clientpositive/udf_concat_ws.q.out index 2c50b1ef34..10889cab4e 100644 --- a/ql/src/test/results/clientpositive/udf_concat_ws.q.out +++ b/ql/src/test/results/clientpositive/udf_concat_ws.q.out @@ -13,36 +13,36 @@ Example: 'www.facebook.com' Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFConcatWS Function type:BUILTIN -PREHOOK: query: CREATE TABLE dest1(c1 STRING, c2 STRING, c3 STRING) +PREHOOK: query: CREATE TABLE dest1_n7(c1 STRING, c2 STRING, c3 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING, c2 STRING, c3 STRING) +PREHOOK: Output: default@dest1_n7 +POSTHOOK: query: CREATE TABLE dest1_n7(c1 STRING, c2 STRING, c3 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 'abc', 'xyz', '8675309' WHERE src.key = 86 +POSTHOOK: Output: default@dest1_n7 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n7 SELECT 'abc', 'xyz', '8675309' WHERE src.key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 'abc', 'xyz', '8675309' WHERE src.key = 86 +PREHOOK: Output: default@dest1_n7 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n7 SELECT 'abc', 'xyz', '8675309' WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 SIMPLE [] -POSTHOOK: Lineage: dest1.c2 SIMPLE [] -POSTHOOK: Lineage: dest1.c3 SIMPLE [] +POSTHOOK: Output: default@dest1_n7 +POSTHOOK: Lineage: dest1_n7.c1 SIMPLE [] +POSTHOOK: Lineage: dest1_n7.c2 SIMPLE [] +POSTHOOK: Lineage: dest1_n7.c3 SIMPLE [] PREHOOK: query: EXPLAIN -SELECT concat_ws(dest1.c1, dest1.c2, dest1.c3), - concat_ws(',', dest1.c1, dest1.c2, dest1.c3), - concat_ws(NULL, dest1.c1, dest1.c2, dest1.c3), - concat_ws('**', dest1.c1, NULL, dest1.c3) FROM dest1 +SELECT concat_ws(dest1_n7.c1, dest1_n7.c2, dest1_n7.c3), + concat_ws(',', dest1_n7.c1, dest1_n7.c2, dest1_n7.c3), + concat_ws(NULL, dest1_n7.c1, dest1_n7.c2, dest1_n7.c3), + concat_ws('**', dest1_n7.c1, NULL, dest1_n7.c3) FROM dest1_n7 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT concat_ws(dest1.c1, dest1.c2, dest1.c3), - concat_ws(',', dest1.c1, dest1.c2, dest1.c3), - concat_ws(NULL, dest1.c1, dest1.c2, dest1.c3), - concat_ws('**', dest1.c1, NULL, dest1.c3) FROM dest1 +SELECT concat_ws(dest1_n7.c1, dest1_n7.c2, dest1_n7.c3), + concat_ws(',', dest1_n7.c1, dest1_n7.c2, dest1_n7.c3), + concat_ws(NULL, dest1_n7.c1, dest1_n7.c2, dest1_n7.c3), + concat_ws('**', dest1_n7.c1, NULL, dest1_n7.c3) FROM dest1_n7 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -53,7 +53,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: dest1 + alias: dest1_n7 Statistics: Num rows: 1 Data size: 15 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: concat_ws(c1, c2, c3) (type: string), concat_ws(',', c1, c2, c3) (type: string), concat_ws(null, c1, c2, c3) (type: string), concat_ws('**', c1, null, c3) (type: string) @@ -61,19 +61,19 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 15 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: SELECT concat_ws(dest1.c1, dest1.c2, dest1.c3), - concat_ws(',', dest1.c1, dest1.c2, dest1.c3), - concat_ws(NULL, dest1.c1, dest1.c2, dest1.c3), - concat_ws('**', dest1.c1, NULL, dest1.c3) FROM dest1 +PREHOOK: query: SELECT concat_ws(dest1_n7.c1, dest1_n7.c2, dest1_n7.c3), + concat_ws(',', dest1_n7.c1, dest1_n7.c2, dest1_n7.c3), + concat_ws(NULL, dest1_n7.c1, dest1_n7.c2, dest1_n7.c3), + concat_ws('**', dest1_n7.c1, NULL, dest1_n7.c3) FROM dest1_n7 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n7 #### A masked pattern was here #### -POSTHOOK: query: SELECT concat_ws(dest1.c1, dest1.c2, dest1.c3), - concat_ws(',', dest1.c1, dest1.c2, dest1.c3), - concat_ws(NULL, dest1.c1, dest1.c2, dest1.c3), - concat_ws('**', dest1.c1, NULL, dest1.c3) FROM dest1 +POSTHOOK: query: SELECT concat_ws(dest1_n7.c1, dest1_n7.c2, dest1_n7.c3), + concat_ws(',', dest1_n7.c1, dest1_n7.c2, dest1_n7.c3), + concat_ws(NULL, dest1_n7.c1, dest1_n7.c2, dest1_n7.c3), + concat_ws('**', dest1_n7.c1, NULL, dest1_n7.c3) FROM dest1_n7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n7 #### A masked pattern was here #### xyzabc8675309 abc,xyz,8675309 NULL abc**8675309 PREHOOK: query: EXPLAIN @@ -83,7 +83,7 @@ SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234'), concat_ws('_', array('www', 'face'), array('book', 'com', '1234')), concat_ws('**', 'www', array('face'), array('book', 'com', '1234')), concat_ws('[]', array('www'), 'face', array('book', 'com', '1234')), - concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1 tablesample (1 rows) + concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1_n7 tablesample (1 rows) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234'), @@ -92,7 +92,7 @@ SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234'), concat_ws('_', array('www', 'face'), array('book', 'com', '1234')), concat_ws('**', 'www', array('face'), array('book', 'com', '1234')), concat_ws('[]', array('www'), 'face', array('book', 'com', '1234')), - concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1 tablesample (1 rows) + concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1_n7 tablesample (1 rows) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -103,7 +103,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: dest1 + alias: dest1_n7 Row Limit Per Split: 1 Statistics: Num rows: 1 Data size: 15 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -118,9 +118,9 @@ PREHOOK: query: SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234 concat_ws('_', array('www', 'face'), array('book', 'com', '1234')), concat_ws('**', 'www', array('face'), array('book', 'com', '1234')), concat_ws('[]', array('www'), 'face', array('book', 'com', '1234')), - concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1 tablesample (1 rows) + concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1_n7 tablesample (1 rows) PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n7 #### A masked pattern was here #### POSTHOOK: query: SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234'), concat_ws('-', 'www', array('face', 'book', 'com'), '1234'), @@ -128,9 +128,9 @@ POSTHOOK: query: SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '123 concat_ws('_', array('www', 'face'), array('book', 'com', '1234')), concat_ws('**', 'www', array('face'), array('book', 'com', '1234')), concat_ws('[]', array('www'), 'face', array('book', 'com', '1234')), - concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1 tablesample (1 rows) + concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1_n7 tablesample (1 rows) POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n7 #### A masked pattern was here #### www.face.book.com.1234 www-face-book-com-1234 wwwFfaceFbookFcomF1234 www_face_book_com_1234 www**face**book**com**1234 www[]face[]book[]com[]1234 wwwAAAfaceAAAbookAAAcomAAA1234 PREHOOK: query: SELECT concat_ws(NULL, array('www', 'face', 'book', 'com'), '1234'), @@ -139,9 +139,9 @@ PREHOOK: query: SELECT concat_ws(NULL, array('www', 'face', 'book', 'com'), '123 concat_ws(NULL, array('www', 'face'), array('book', 'com', '1234')), concat_ws(NULL, 'www', array('face'), array('book', 'com', '1234')), concat_ws(NULL, array('www'), 'face', array('book', 'com', '1234')), - concat_ws(NULL, array('www'), array('face', 'book', 'com'), '1234') FROM dest1 tablesample (1 rows) + concat_ws(NULL, array('www'), array('face', 'book', 'com'), '1234') FROM dest1_n7 tablesample (1 rows) PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n7 #### A masked pattern was here #### POSTHOOK: query: SELECT concat_ws(NULL, array('www', 'face', 'book', 'com'), '1234'), concat_ws(NULL, 'www', array('face', 'book', 'com'), '1234'), @@ -149,8 +149,8 @@ POSTHOOK: query: SELECT concat_ws(NULL, array('www', 'face', 'book', 'com'), '12 concat_ws(NULL, array('www', 'face'), array('book', 'com', '1234')), concat_ws(NULL, 'www', array('face'), array('book', 'com', '1234')), concat_ws(NULL, array('www'), 'face', array('book', 'com', '1234')), - concat_ws(NULL, array('www'), array('face', 'book', 'com'), '1234') FROM dest1 tablesample (1 rows) + concat_ws(NULL, array('www'), array('face', 'book', 'com'), '1234') FROM dest1_n7 tablesample (1 rows) POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n7 #### A masked pattern was here #### NULL NULL NULL NULL NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/udf_field.q.out b/ql/src/test/results/clientpositive/udf_field.q.out index 186cdd976a..a1562bd9e5 100644 --- a/ql/src/test/results/clientpositive/udf_field.q.out +++ b/ql/src/test/results/clientpositive/udf_field.q.out @@ -70,22 +70,22 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 1 2 3 4 4 -PREHOOK: query: CREATE TABLE test_table(col1 STRING, col2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE test_table_n10(col1 STRING, col2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table -POSTHOOK: query: CREATE TABLE test_table(col1 STRING, col2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@test_table_n10 +POSTHOOK: query: CREATE TABLE test_table_n10(col1 STRING, col2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_table +POSTHOOK: Output: default@test_table_n10 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_table_n10 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@test_table -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_table +PREHOOK: Output: default@test_table_n10 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_table_n10 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@test_table +POSTHOOK: Output: default@test_table_n10 PREHOOK: query: select col1,col2, field("66",col1), field("66",col1, col2), @@ -97,9 +97,9 @@ PREHOOK: query: select col1,col2, field(col2, "66", "88"), field(col1, col2, col1), field(col1, col2, "66") -from test_table where col1="86" or col1="66" +from test_table_n10 where col1="86" or col1="66" PREHOOK: type: QUERY -PREHOOK: Input: default@test_table +PREHOOK: Input: default@test_table_n10 #### A masked pattern was here #### POSTHOOK: query: select col1,col2, field("66",col1), @@ -112,28 +112,28 @@ POSTHOOK: query: select col1,col2, field(col2, "66", "88"), field(col1, col2, col1), field(col1, col2, "66") -from test_table where col1="86" or col1="66" +from test_table_n10 where col1="86" or col1="66" POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table +POSTHOOK: Input: default@test_table_n10 #### A masked pattern was here #### 86 val_86 0 0 2 0 0 0 0 0 2 0 66 val_66 1 1 0 0 0 1 0 0 2 2 -PREHOOK: query: CREATE TABLE test_table1(col1 int, col2 string) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE test_table1_n13(col1 int, col2 string) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_table1 -POSTHOOK: query: CREATE TABLE test_table1(col1 int, col2 string) STORED AS TEXTFILE +PREHOOK: Output: default@test_table1_n13 +POSTHOOK: query: CREATE TABLE test_table1_n13(col1 int, col2 string) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_table1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_table1 +POSTHOOK: Output: default@test_table1_n13 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_table1_n13 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@test_table1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_table1 +PREHOOK: Output: default@test_table1_n13 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_table1_n13 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@test_table1 +POSTHOOK: Output: default@test_table1_n13 PREHOOK: query: select col1,col2, field(66,col1), field(66,col1, col2), @@ -142,9 +142,9 @@ PREHOOK: query: select col1,col2, field(86,col1,n,col2), field(NULL,col1,n,col2), field(col1, col2) -from (select col1, col2, NULL as n from test_table1 where col1=86 or col1=66) t +from (select col1, col2, NULL as n from test_table1_n13 where col1=86 or col1=66) t PREHOOK: type: QUERY -PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1_n13 #### A masked pattern was here #### POSTHOOK: query: select col1,col2, field(66,col1), @@ -154,9 +154,9 @@ POSTHOOK: query: select col1,col2, field(86,col1,n,col2), field(NULL,col1,n,col2), field(col1, col2) -from (select col1, col2, NULL as n from test_table1 where col1=86 or col1=66) t +from (select col1, col2, NULL as n from test_table1_n13 where col1=86 or col1=66) t POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1_n13 #### A masked pattern was here #### 86 val_86 0 0 2 1 1 0 0 66 val_66 1 1 0 0 0 0 0 diff --git a/ql/src/test/results/clientpositive/udf_get_json_object.q.out b/ql/src/test/results/clientpositive/udf_get_json_object.q.out index b23b29c72a..49bd4dc2ba 100644 --- a/ql/src/test/results/clientpositive/udf_get_json_object.q.out +++ b/ql/src/test/results/clientpositive/udf_get_json_object.q.out @@ -25,23 +25,23 @@ Syntax not supported that's worth noticing: Function class:org.apache.hadoop.hive.ql.udf.UDFJson Function type:BUILTIN -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1_n57(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n57 +POSTHOOK: query: CREATE TABLE dest1_n57(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +POSTHOOK: Output: default@dest1_n57 +PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n57 SELECT ' abc ' WHERE src.key = 86 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86 +PREHOOK: Output: default@dest1_n57 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n57 SELECT ' abc ' WHERE src.key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 SIMPLE [] +POSTHOOK: Output: default@dest1_n57 +POSTHOOK: Lineage: dest1_n57.c1 SIMPLE [] PREHOOK: query: EXPLAIN #### A masked pattern was here #### PREHOOK: type: QUERY @@ -152,40 +152,40 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src_json #### A masked pattern was here #### 1234 -PREHOOK: query: CREATE TABLE dest2(c1 STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE dest2_n7(c1 STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest2 -POSTHOOK: query: CREATE TABLE dest2(c1 STRING) STORED AS RCFILE +PREHOOK: Output: default@dest2_n7 +POSTHOOK: query: CREATE TABLE dest2_n7(c1 STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest2 -PREHOOK: query: INSERT OVERWRITE TABLE dest2 SELECT '{"a":"b\nc"}' FROM src tablesample (1 rows) +POSTHOOK: Output: default@dest2_n7 +PREHOOK: query: INSERT OVERWRITE TABLE dest2_n7 SELECT '{"a":"b\nc"}' FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest2 -POSTHOOK: query: INSERT OVERWRITE TABLE dest2 SELECT '{"a":"b\nc"}' FROM src tablesample (1 rows) +PREHOOK: Output: default@dest2_n7 +POSTHOOK: query: INSERT OVERWRITE TABLE dest2_n7 SELECT '{"a":"b\nc"}' FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest2.c1 SIMPLE [] -PREHOOK: query: SELECT * FROM dest2 +POSTHOOK: Output: default@dest2_n7 +POSTHOOK: Lineage: dest2_n7.c1 SIMPLE [] +PREHOOK: query: SELECT * FROM dest2_n7 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n7 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM dest2 +POSTHOOK: query: SELECT * FROM dest2_n7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n7 #### A masked pattern was here #### {"a":"b c"} -PREHOOK: query: SELECT get_json_object(c1, '$.a') FROM dest2 +PREHOOK: query: SELECT get_json_object(c1, '$.a') FROM dest2_n7 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n7 #### A masked pattern was here #### -POSTHOOK: query: SELECT get_json_object(c1, '$.a') FROM dest2 +POSTHOOK: query: SELECT get_json_object(c1, '$.a') FROM dest2_n7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n7 #### A masked pattern was here #### b c diff --git a/ql/src/test/results/clientpositive/udf_isops.q.out b/ql/src/test/results/clientpositive/udf_isops.q.out index 636f8e8cd6..446972336e 100644 --- a/ql/src/test/results/clientpositive/udf_isops.q.out +++ b/ql/src/test/results/clientpositive/udf_isops.q.out @@ -1,151 +1,151 @@ -PREHOOK: query: drop table if exists t +PREHOOK: query: drop table if exists t_n29 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists t +POSTHOOK: query: drop table if exists t_n29 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table t (a int,v int, b boolean) +PREHOOK: query: create table t_n29 (a int,v int, b boolean) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t (a int,v int, b boolean) +PREHOOK: Output: default@t_n29 +POSTHOOK: query: create table t_n29 (a int,v int, b boolean) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values (1,null, true) +POSTHOOK: Output: default@t_n29 +PREHOOK: query: insert into t_n29 values (1,null, true) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (1,null, true) +PREHOOK: Output: default@t_n29 +POSTHOOK: query: insert into t_n29 values (1,null, true) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.a SCRIPT [] -POSTHOOK: Lineage: t.b SCRIPT [] -POSTHOOK: Lineage: t.v EXPRESSION [] -PREHOOK: query: insert into t values (2,1, false) +POSTHOOK: Output: default@t_n29 +POSTHOOK: Lineage: t_n29.a SCRIPT [] +POSTHOOK: Lineage: t_n29.b SCRIPT [] +POSTHOOK: Lineage: t_n29.v EXPRESSION [] +PREHOOK: query: insert into t_n29 values (2,1, false) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (2,1, false) +PREHOOK: Output: default@t_n29 +POSTHOOK: query: insert into t_n29 values (2,1, false) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.a SCRIPT [] -POSTHOOK: Lineage: t.b SCRIPT [] -POSTHOOK: Lineage: t.v SCRIPT [] -PREHOOK: query: insert into t values (3,2, null) +POSTHOOK: Output: default@t_n29 +POSTHOOK: Lineage: t_n29.a SCRIPT [] +POSTHOOK: Lineage: t_n29.b SCRIPT [] +POSTHOOK: Lineage: t_n29.v SCRIPT [] +PREHOOK: query: insert into t_n29 values (3,2, null) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (3,2, null) +PREHOOK: Output: default@t_n29 +POSTHOOK: query: insert into t_n29 values (3,2, null) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.a SCRIPT [] -POSTHOOK: Lineage: t.b EXPRESSION [] -POSTHOOK: Lineage: t.v SCRIPT [] -PREHOOK: query: select assert_true(sum(a*a) = 1) from t +POSTHOOK: Output: default@t_n29 +POSTHOOK: Lineage: t_n29.a SCRIPT [] +POSTHOOK: Lineage: t_n29.b EXPRESSION [] +POSTHOOK: Lineage: t_n29.v SCRIPT [] +PREHOOK: query: select assert_true(sum(a*a) = 1) from t_n29 where v is null PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n29 #### A masked pattern was here #### -POSTHOOK: query: select assert_true(sum(a*a) = 1) from t +POSTHOOK: query: select assert_true(sum(a*a) = 1) from t_n29 where v is null POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n29 #### A masked pattern was here #### NULL -PREHOOK: query: select assert_true(sum(a*a) = 2*2+3*3) from t +PREHOOK: query: select assert_true(sum(a*a) = 2*2+3*3) from t_n29 where v is not null PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n29 #### A masked pattern was here #### -POSTHOOK: query: select assert_true(sum(a*a) = 2*2+3*3) from t +POSTHOOK: query: select assert_true(sum(a*a) = 2*2+3*3) from t_n29 where v is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n29 #### A masked pattern was here #### NULL -PREHOOK: query: select assert_true(sum(a*a) = 1) from t +PREHOOK: query: select assert_true(sum(a*a) = 1) from t_n29 where b is true PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n29 #### A masked pattern was here #### -POSTHOOK: query: select assert_true(sum(a*a) = 1) from t +POSTHOOK: query: select assert_true(sum(a*a) = 1) from t_n29 where b is true POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n29 #### A masked pattern was here #### NULL -PREHOOK: query: select assert_true(sum(a*a) = 2*2 + 3*3) from t +PREHOOK: query: select assert_true(sum(a*a) = 2*2 + 3*3) from t_n29 where b is not true PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n29 #### A masked pattern was here #### -POSTHOOK: query: select assert_true(sum(a*a) = 2*2 + 3*3) from t +POSTHOOK: query: select assert_true(sum(a*a) = 2*2 + 3*3) from t_n29 where b is not true POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n29 #### A masked pattern was here #### NULL -PREHOOK: query: select assert_true(sum(a*a) = 4) from t +PREHOOK: query: select assert_true(sum(a*a) = 4) from t_n29 where b is false PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n29 #### A masked pattern was here #### -POSTHOOK: query: select assert_true(sum(a*a) = 4) from t +POSTHOOK: query: select assert_true(sum(a*a) = 4) from t_n29 where b is false POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n29 #### A masked pattern was here #### NULL -PREHOOK: query: select assert_true(sum(a*a) = 1*1 + 3*3) from t +PREHOOK: query: select assert_true(sum(a*a) = 1*1 + 3*3) from t_n29 where b is not false PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n29 #### A masked pattern was here #### -POSTHOOK: query: select assert_true(sum(a*a) = 1*1 + 3*3) from t +POSTHOOK: query: select assert_true(sum(a*a) = 1*1 + 3*3) from t_n29 where b is not false POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n29 #### A masked pattern was here #### NULL -PREHOOK: query: select assert_true(sum(a*a) = 2*2) from t +PREHOOK: query: select assert_true(sum(a*a) = 2*2) from t_n29 where (v>0 and v<2) is true PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n29 #### A masked pattern was here #### -POSTHOOK: query: select assert_true(sum(a*a) = 2*2) from t +POSTHOOK: query: select assert_true(sum(a*a) = 2*2) from t_n29 where (v>0 and v<2) is true POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n29 #### A masked pattern was here #### NULL -PREHOOK: query: select assert_true(sum(a*a) = 2*2) from t +PREHOOK: query: select assert_true(sum(a*a) = 2*2) from t_n29 where (v<2) is true PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n29 #### A masked pattern was here #### -POSTHOOK: query: select assert_true(sum(a*a) = 2*2) from t +POSTHOOK: query: select assert_true(sum(a*a) = 2*2) from t_n29 where (v<2) is true POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n29 #### A masked pattern was here #### NULL PREHOOK: query: select NULL is true, NULL is not true, NULL is false, NULL is not false -from t +from t_n29 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n29 #### A masked pattern was here #### POSTHOOK: query: select NULL is true, NULL is not true, NULL is false, NULL is not false -from t +from t_n29 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n29 #### A masked pattern was here #### false true false true false true false true diff --git a/ql/src/test/results/clientpositive/udf_length.q.out b/ql/src/test/results/clientpositive/udf_length.q.out index a1a8fec351..d01746bd03 100644 --- a/ql/src/test/results/clientpositive/udf_length.q.out +++ b/ql/src/test/results/clientpositive/udf_length.q.out @@ -13,17 +13,17 @@ Example: 8 Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFLength Function type:BUILTIN -PREHOOK: query: CREATE TABLE dest1(len INT) +PREHOOK: query: CREATE TABLE dest1_n111(len INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(len INT) +PREHOOK: Output: default@dest1_n111 +POSTHOOK: query: CREATE TABLE dest1_n111(len INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT length(src1.value) +POSTHOOK: Output: default@dest1_n111 +PREHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n111 SELECT length(src1.value) PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT length(src1.value) +POSTHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n111 SELECT length(src1.value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -53,7 +53,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n111 Select Operator expressions: _col0 (type: int) outputColumnNames: len @@ -98,7 +98,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n111 Stage: Stage-2 Stats Work @@ -106,7 +106,7 @@ STAGE PLANS: Column Stats Desc: Columns: len Column Types: int - Table: default.dest1 + Table: default.dest1_n111 Stage: Stage-3 Map Reduce @@ -118,7 +118,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n111 Stage: Stage-5 Map Reduce @@ -130,7 +130,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n111 Stage: Stage-6 Move Operator @@ -138,22 +138,22 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1 SELECT length(src1.value) +PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1_n111 SELECT length(src1.value) PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1 SELECT length(src1.value) +PREHOOK: Output: default@dest1_n111 +POSTHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1_n111 SELECT length(src1.value) POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.len EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n111 +POSTHOOK: Lineage: dest1_n111.len EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n111.* FROM dest1_n111 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n111 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n111.* FROM dest1_n111 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n111 #### A masked pattern was here #### 7 0 @@ -180,33 +180,33 @@ POSTHOOK: Input: default@dest1 0 0 0 -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n111 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n111 +PREHOOK: Output: default@dest1_n111 +POSTHOOK: query: DROP TABLE dest1_n111 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +POSTHOOK: Input: default@dest1_n111 +POSTHOOK: Output: default@dest1_n111 +PREHOOK: query: CREATE TABLE dest1_n111(name STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n111 +POSTHOOK: query: CREATE TABLE dest1_n111(name STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1 +POSTHOOK: Output: default@dest1_n111 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n111 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@dest1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1 +PREHOOK: Output: default@dest1_n111 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n111 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@dest1 -PREHOOK: query: EXPLAIN SELECT length(dest1.name) FROM dest1 +POSTHOOK: Output: default@dest1_n111 +PREHOOK: query: EXPLAIN SELECT length(dest1_n111.name) FROM dest1_n111 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT length(dest1.name) FROM dest1 +POSTHOOK: query: EXPLAIN SELECT length(dest1_n111.name) FROM dest1_n111 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -217,7 +217,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: dest1 + alias: dest1_n111 Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: length(name) (type: int) @@ -225,12 +225,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: SELECT length(dest1.name) FROM dest1 +PREHOOK: query: SELECT length(dest1_n111.name) FROM dest1_n111 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n111 #### A masked pattern was here #### -POSTHOOK: query: SELECT length(dest1.name) FROM dest1 +POSTHOOK: query: SELECT length(dest1_n111.name) FROM dest1_n111 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n111 #### A masked pattern was here #### 2 diff --git a/ql/src/test/results/clientpositive/udf_octet_length.q.out b/ql/src/test/results/clientpositive/udf_octet_length.q.out index ea5bb90ab5..976561c4e4 100644 --- a/ql/src/test/results/clientpositive/udf_octet_length.q.out +++ b/ql/src/test/results/clientpositive/udf_octet_length.q.out @@ -13,17 +13,17 @@ Example: 15 Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFOctetLength Function type:BUILTIN -PREHOOK: query: CREATE TABLE dest1(len INT) +PREHOOK: query: CREATE TABLE dest1_n139(len INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(len INT) +PREHOOK: Output: default@dest1_n139 +POSTHOOK: query: CREATE TABLE dest1_n139(len INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT octet_length(src1.value) +POSTHOOK: Output: default@dest1_n139 +PREHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n139 SELECT octet_length(src1.value) PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT octet_length(src1.value) +POSTHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n139 SELECT octet_length(src1.value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -53,7 +53,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n139 Select Operator expressions: _col0 (type: int) outputColumnNames: len @@ -98,7 +98,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n139 Stage: Stage-2 Stats Work @@ -106,7 +106,7 @@ STAGE PLANS: Column Stats Desc: Columns: len Column Types: int - Table: default.dest1 + Table: default.dest1_n139 Stage: Stage-3 Map Reduce @@ -118,7 +118,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n139 Stage: Stage-5 Map Reduce @@ -130,7 +130,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n139 Stage: Stage-6 Move Operator @@ -138,22 +138,22 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1 SELECT octet_length(src1.value) +PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1_n139 SELECT octet_length(src1.value) PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1 SELECT octet_length(src1.value) +PREHOOK: Output: default@dest1_n139 +POSTHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1_n139 SELECT octet_length(src1.value) POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.len EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n139 +POSTHOOK: Lineage: dest1_n139.len EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n139.* FROM dest1_n139 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n139 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n139.* FROM dest1_n139 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n139 #### A masked pattern was here #### 7 0 @@ -180,42 +180,42 @@ POSTHOOK: Input: default@dest1 0 0 0 -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n139 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n139 +PREHOOK: Output: default@dest1_n139 +POSTHOOK: query: DROP TABLE dest1_n139 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +POSTHOOK: Input: default@dest1_n139 +POSTHOOK: Output: default@dest1_n139 +PREHOOK: query: CREATE TABLE dest1_n139(name STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n139 +POSTHOOK: query: CREATE TABLE dest1_n139(name STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1 +POSTHOOK: Output: default@dest1_n139 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n139 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@dest1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1 +PREHOOK: Output: default@dest1_n139 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n139 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@dest1 -PREHOOK: query: INSERT INTO dest1 VALUES(NULL) +POSTHOOK: Output: default@dest1_n139 +PREHOOK: query: INSERT INTO dest1_n139 VALUES(NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT INTO dest1 VALUES(NULL) +PREHOOK: Output: default@dest1_n139 +POSTHOOK: query: INSERT INTO dest1_n139 VALUES(NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.name EXPRESSION [] -PREHOOK: query: EXPLAIN SELECT octet_length(dest1.name) FROM dest1 +POSTHOOK: Output: default@dest1_n139 +POSTHOOK: Lineage: dest1_n139.name EXPRESSION [] +PREHOOK: query: EXPLAIN SELECT octet_length(dest1_n139.name) FROM dest1_n139 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT octet_length(dest1.name) FROM dest1 +POSTHOOK: query: EXPLAIN SELECT octet_length(dest1_n139.name) FROM dest1_n139 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -226,7 +226,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: dest1 + alias: dest1_n139 Statistics: Num rows: 1 Data size: 90 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: octet_length(name) (type: int) @@ -234,21 +234,21 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 90 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: SELECT octet_length(dest1.name) FROM dest1 +PREHOOK: query: SELECT octet_length(dest1_n139.name) FROM dest1_n139 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n139 #### A masked pattern was here #### -POSTHOOK: query: SELECT octet_length(dest1.name) FROM dest1 +POSTHOOK: query: SELECT octet_length(dest1_n139.name) FROM dest1_n139 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n139 #### A masked pattern was here #### NULL 6 -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n139 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n139 +PREHOOK: Output: default@dest1_n139 +POSTHOOK: query: DROP TABLE dest1_n139 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 +POSTHOOK: Input: default@dest1_n139 +POSTHOOK: Output: default@dest1_n139 diff --git a/ql/src/test/results/clientpositive/udf_printf.q.out b/ql/src/test/results/clientpositive/udf_printf.q.out index 732585a437..077028b61c 100644 --- a/ql/src/test/results/clientpositive/udf_printf.q.out +++ b/ql/src/test/results/clientpositive/udf_printf.q.out @@ -131,31 +131,31 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@binay_udf -PREHOOK: query: create table dest1 (key binary, value int) +PREHOOK: query: create table dest1_n92 (key binary, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: create table dest1 (key binary, value int) +PREHOOK: Output: default@dest1_n92 +POSTHOOK: query: create table dest1_n92 (key binary, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: insert overwrite table dest1 select transform(*) using 'cat' as key binary, value int from binay_udf +POSTHOOK: Output: default@dest1_n92 +PREHOOK: query: insert overwrite table dest1_n92 select transform(*) using 'cat' as key binary, value int from binay_udf PREHOOK: type: QUERY PREHOOK: Input: default@binay_udf -PREHOOK: Output: default@dest1 -POSTHOOK: query: insert overwrite table dest1 select transform(*) using 'cat' as key binary, value int from binay_udf +PREHOOK: Output: default@dest1_n92 +POSTHOOK: query: insert overwrite table dest1_n92 select transform(*) using 'cat' as key binary, value int from binay_udf POSTHOOK: type: QUERY POSTHOOK: Input: default@binay_udf -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.key SCRIPT [(binay_udf)binay_udf.FieldSchema(name:key, type:binary, comment:null), (binay_udf)binay_udf.FieldSchema(name:value, type:int, comment:null), ] -POSTHOOK: Lineage: dest1.value SCRIPT [(binay_udf)binay_udf.FieldSchema(name:key, type:binary, comment:null), (binay_udf)binay_udf.FieldSchema(name:value, type:int, comment:null), ] -PREHOOK: query: select value, printf("format key: %s", key) from dest1 +POSTHOOK: Output: default@dest1_n92 +POSTHOOK: Lineage: dest1_n92.key SCRIPT [(binay_udf)binay_udf.FieldSchema(name:key, type:binary, comment:null), (binay_udf)binay_udf.FieldSchema(name:value, type:int, comment:null), ] +POSTHOOK: Lineage: dest1_n92.value SCRIPT [(binay_udf)binay_udf.FieldSchema(name:key, type:binary, comment:null), (binay_udf)binay_udf.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: select value, printf("format key: %s", key) from dest1_n92 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n92 #### A masked pattern was here #### -POSTHOOK: query: select value, printf("format key: %s", key) from dest1 +POSTHOOK: query: select value, printf("format key: %s", key) from dest1_n92 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n92 #### A masked pattern was here #### 1 format key: 61 00 62 63 01 02 01 00 2 format key: 00 74 65 73 74 00 @@ -167,14 +167,14 @@ POSTHOOK: Input: default@dest1 8 format key: 01 01 01 9 format key: 00 01 00 10 format key: 01 00 01 -PREHOOK: query: drop table dest1 +PREHOOK: query: drop table dest1_n92 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: drop table dest1 +PREHOOK: Input: default@dest1_n92 +PREHOOK: Output: default@dest1_n92 +POSTHOOK: query: drop table dest1_n92 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 +POSTHOOK: Input: default@dest1_n92 +POSTHOOK: Output: default@dest1_n92 PREHOOK: query: drop table binary_udf PREHOOK: type: DROPTABLE POSTHOOK: query: drop table binary_udf diff --git a/ql/src/test/results/clientpositive/udf_reverse.q.out b/ql/src/test/results/clientpositive/udf_reverse.q.out index abe326c9d6..71c435d163 100644 --- a/ql/src/test/results/clientpositive/udf_reverse.q.out +++ b/ql/src/test/results/clientpositive/udf_reverse.q.out @@ -13,17 +13,17 @@ Example: 'koobecaF' Function class:org.apache.hadoop.hive.ql.udf.UDFReverse Function type:BUILTIN -PREHOOK: query: CREATE TABLE dest1(len STRING) +PREHOOK: query: CREATE TABLE dest1_n40(len STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(len STRING) +PREHOOK: Output: default@dest1_n40 +POSTHOOK: query: CREATE TABLE dest1_n40(len STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT reverse(src1.value) +POSTHOOK: Output: default@dest1_n40 +PREHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n40 SELECT reverse(src1.value) PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT reverse(src1.value) +POSTHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n40 SELECT reverse(src1.value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -53,7 +53,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n40 Select Operator expressions: _col0 (type: string) outputColumnNames: len @@ -98,7 +98,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n40 Stage: Stage-2 Stats Work @@ -106,7 +106,7 @@ STAGE PLANS: Column Stats Desc: Columns: len Column Types: string - Table: default.dest1 + Table: default.dest1_n40 Stage: Stage-3 Map Reduce @@ -118,7 +118,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n40 Stage: Stage-5 Map Reduce @@ -130,7 +130,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n40 Stage: Stage-6 Move Operator @@ -138,22 +138,22 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1 SELECT reverse(src1.value) +PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1_n40 SELECT reverse(src1.value) PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1 SELECT reverse(src1.value) +PREHOOK: Output: default@dest1_n40 +POSTHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1_n40 SELECT reverse(src1.value) POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.len EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n40 +POSTHOOK: Lineage: dest1_n40.len EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n40.* FROM dest1_n40 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n40 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n40.* FROM dest1_n40 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n40 #### A masked pattern was here #### 832_lav @@ -180,36 +180,36 @@ POSTHOOK: Input: default@dest1 -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n40 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n40 +PREHOOK: Output: default@dest1_n40 +POSTHOOK: query: DROP TABLE dest1_n40 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +POSTHOOK: Input: default@dest1_n40 +POSTHOOK: Output: default@dest1_n40 +PREHOOK: query: CREATE TABLE dest1_n40(name STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n40 +POSTHOOK: query: CREATE TABLE dest1_n40(name STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1 +POSTHOOK: Output: default@dest1_n40 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n40 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@dest1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1 +PREHOOK: Output: default@dest1_n40 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n40 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@dest1 -PREHOOK: query: SELECT count(1) FROM dest1 WHERE reverse(dest1.name) = _UTF-8 0xE993AEE982B5 +POSTHOOK: Output: default@dest1_n40 +PREHOOK: query: SELECT count(1) FROM dest1_n40 WHERE reverse(dest1_n40.name) = _UTF-8 0xE993AEE982B5 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n40 #### A masked pattern was here #### -POSTHOOK: query: SELECT count(1) FROM dest1 WHERE reverse(dest1.name) = _UTF-8 0xE993AEE982B5 +POSTHOOK: query: SELECT count(1) FROM dest1_n40 WHERE reverse(dest1_n40.name) = _UTF-8 0xE993AEE982B5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n40 #### A masked pattern was here #### 1 diff --git a/ql/src/test/results/clientpositive/udf_round_2.q.out b/ql/src/test/results/clientpositive/udf_round_2.q.out index 4dbe8fc9f9..b018b53118 100644 --- a/ql/src/test/results/clientpositive/udf_round_2.q.out +++ b/ql/src/test/results/clientpositive/udf_round_2.q.out @@ -1,47 +1,47 @@ -PREHOOK: query: create table tstTbl1(n double) +PREHOOK: query: create table tstTbl1_n0(n double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tstTbl1 -POSTHOOK: query: create table tstTbl1(n double) +PREHOOK: Output: default@tstTbl1_n0 +POSTHOOK: query: create table tstTbl1_n0(n double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstTbl1 -PREHOOK: query: insert overwrite table tstTbl1 +POSTHOOK: Output: default@tstTbl1_n0 +PREHOOK: query: insert overwrite table tstTbl1_n0 select 'NaN' from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tsttbl1 -POSTHOOK: query: insert overwrite table tstTbl1 +PREHOOK: Output: default@tsttbl1_n0 +POSTHOOK: query: insert overwrite table tstTbl1_n0 select 'NaN' from src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tsttbl1 -POSTHOOK: Lineage: tsttbl1.n EXPRESSION [] -PREHOOK: query: select * from tstTbl1 +POSTHOOK: Output: default@tsttbl1_n0 +POSTHOOK: Lineage: tsttbl1_n0.n EXPRESSION [] +PREHOOK: query: select * from tstTbl1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tsttbl1 +PREHOOK: Input: default@tsttbl1_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from tstTbl1 +POSTHOOK: query: select * from tstTbl1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tsttbl1 +POSTHOOK: Input: default@tsttbl1_n0 #### A masked pattern was here #### NaN -PREHOOK: query: select round(n, 1) from tstTbl1 +PREHOOK: query: select round(n, 1) from tstTbl1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tsttbl1 +PREHOOK: Input: default@tsttbl1_n0 #### A masked pattern was here #### -POSTHOOK: query: select round(n, 1) from tstTbl1 +POSTHOOK: query: select round(n, 1) from tstTbl1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tsttbl1 +POSTHOOK: Input: default@tsttbl1_n0 #### A masked pattern was here #### NaN -PREHOOK: query: select round(n) from tstTbl1 +PREHOOK: query: select round(n) from tstTbl1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tsttbl1 +PREHOOK: Input: default@tsttbl1_n0 #### A masked pattern was here #### -POSTHOOK: query: select round(n) from tstTbl1 +POSTHOOK: query: select round(n) from tstTbl1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tsttbl1 +POSTHOOK: Input: default@tsttbl1_n0 #### A masked pattern was here #### NaN PREHOOK: query: select round(1/0), round(1/0, 2), round(1.0/0.0), round(1.0/0.0, 2) from src tablesample (1 rows) diff --git a/ql/src/test/results/clientpositive/udf_sort_array.q.out b/ql/src/test/results/clientpositive/udf_sort_array.q.out index 0989a160fd..c8bdd1ada6 100644 --- a/ql/src/test/results/clientpositive/udf_sort_array.q.out +++ b/ql/src/test/results/clientpositive/udf_sort_array.q.out @@ -106,7 +106,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### [{"c":3,"b":5,"a":1},{"a":1,"c":6,"b":8},{"b":2,"a":9,"c":7}] -PREHOOK: query: CREATE TABLE dest1 ( +PREHOOK: query: CREATE TABLE dest1_n108 ( tinyints ARRAY, smallints ARRAY, ints ARRAY, @@ -119,8 +119,8 @@ PREHOOK: query: CREATE TABLE dest1 ( ) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1 ( +PREHOOK: Output: default@dest1_n108 +POSTHOOK: query: CREATE TABLE dest1_n108 ( tinyints ARRAY, smallints ARRAY, ints ARRAY, @@ -133,27 +133,27 @@ POSTHOOK: query: CREATE TABLE dest1 ( ) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/primitive_type_arrays.txt' OVERWRITE INTO TABLE dest1 +POSTHOOK: Output: default@dest1_n108 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/primitive_type_arrays.txt' OVERWRITE INTO TABLE dest1_n108 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@dest1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/primitive_type_arrays.txt' OVERWRITE INTO TABLE dest1 +PREHOOK: Output: default@dest1_n108 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/primitive_type_arrays.txt' OVERWRITE INTO TABLE dest1_n108 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n108 PREHOOK: query: SELECT sort_array(tinyints), sort_array(smallints), sort_array(ints), sort_array(bigints), sort_array(booleans), sort_array(floats), sort_array(doubles), sort_array(strings), sort_array(timestamps) - FROM dest1 + FROM dest1_n108 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n108 #### A masked pattern was here #### POSTHOOK: query: SELECT sort_array(tinyints), sort_array(smallints), sort_array(ints), sort_array(bigints), sort_array(booleans), sort_array(floats), sort_array(doubles), sort_array(strings), sort_array(timestamps) - FROM dest1 + FROM dest1_n108 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n108 #### A masked pattern was here #### [1,2,3,4,5] [1,2,7,8,9] [4,8,16,32,64] [1,100,246,357,1000] [false,true] [1.414,1.618,2.718,3.141] [1.41421,1.61803,2.71828,3.14159] ["","aramis","athos","portos"] ["1970-01-05 13:51:04.042","1970-01-07 00:54:54.442","1970-01-16 12:50:35.242"] diff --git a/ql/src/test/results/clientpositive/udf_stddev_samp.q.out b/ql/src/test/results/clientpositive/udf_stddev_samp.q.out index eff960b50c..feefcc815b 100644 --- a/ql/src/test/results/clientpositive/udf_stddev_samp.q.out +++ b/ql/src/test/results/clientpositive/udf_stddev_samp.q.out @@ -34,51 +34,51 @@ If applied to a set with a single element: NULL is returned. Otherwise it computes: sqrt(var_samp(x)) Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDAFStdSample Function type:BUILTIN -PREHOOK: query: drop table if exists t +PREHOOK: query: drop table if exists t_n23 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists t +POSTHOOK: query: drop table if exists t_n23 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table t (a int) +PREHOOK: query: create table t_n23 (a int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t (a int) +PREHOOK: Output: default@t_n23 +POSTHOOK: query: create table t_n23 (a int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values (1),(-1),(0) +POSTHOOK: Output: default@t_n23 +PREHOOK: query: insert into t_n23 values (1),(-1),(0) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (1),(-1),(0) +PREHOOK: Output: default@t_n23 +POSTHOOK: query: insert into t_n23 values (1),(-1),(0) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.a SCRIPT [] -PREHOOK: query: select stddev_samp(a) from t +POSTHOOK: Output: default@t_n23 +POSTHOOK: Lineage: t_n23.a SCRIPT [] +PREHOOK: query: select stddev_samp(a) from t_n23 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n23 #### A masked pattern was here #### -POSTHOOK: query: select stddev_samp(a) from t +POSTHOOK: query: select stddev_samp(a) from t_n23 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n23 #### A masked pattern was here #### 1.0 -PREHOOK: query: select stddev_samp(a) from t where a=0 +PREHOOK: query: select stddev_samp(a) from t_n23 where a=0 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n23 #### A masked pattern was here #### -POSTHOOK: query: select stddev_samp(a) from t where a=0 +POSTHOOK: query: select stddev_samp(a) from t_n23 where a=0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n23 #### A masked pattern was here #### NULL -PREHOOK: query: select round(stddev_samp(a),5) from t where a>=0 +PREHOOK: query: select round(stddev_samp(a),5) from t_n23 where a>=0 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n23 #### A masked pattern was here #### -POSTHOOK: query: select round(stddev_samp(a),5) from t where a>=0 +POSTHOOK: query: select round(stddev_samp(a),5) from t_n23 where a>=0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n23 #### A masked pattern was here #### 0.70711 diff --git a/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out b/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out index 778eac4d27..071a456f13 100644 --- a/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out +++ b/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out @@ -11,80 +11,80 @@ to_unix_timestamp(date[, pattern]) - Returns the UNIX timestamp Converts the specified time to number of seconds since 1970-01-01. Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFToUnixTimeStamp Function type:BUILTIN -PREHOOK: query: create table oneline(key int, value string) +PREHOOK: query: create table oneline_n0(key int, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@oneline -POSTHOOK: query: create table oneline(key int, value string) +PREHOOK: Output: default@oneline_n0 +POSTHOOK: query: create table oneline_n0(key int, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@oneline -PREHOOK: query: load data local inpath '../../data/files/things.txt' into table oneline +POSTHOOK: Output: default@oneline_n0 +PREHOOK: query: load data local inpath '../../data/files/things.txt' into table oneline_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@oneline -POSTHOOK: query: load data local inpath '../../data/files/things.txt' into table oneline +PREHOOK: Output: default@oneline_n0 +POSTHOOK: query: load data local inpath '../../data/files/things.txt' into table oneline_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@oneline +POSTHOOK: Output: default@oneline_n0 PREHOOK: query: SELECT '2009-03-20 11:30:01', to_unix_timestamp('2009-03-20 11:30:01') -FROM oneline +FROM oneline_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@oneline +PREHOOK: Input: default@oneline_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT '2009-03-20 11:30:01', to_unix_timestamp('2009-03-20 11:30:01') -FROM oneline +FROM oneline_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@oneline +POSTHOOK: Input: default@oneline_n0 #### A masked pattern was here #### 2009-03-20 11:30:01 1237573801 PREHOOK: query: SELECT '2009-03-20', to_unix_timestamp('2009-03-20', 'yyyy-MM-dd') -FROM oneline +FROM oneline_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@oneline +PREHOOK: Input: default@oneline_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT '2009-03-20', to_unix_timestamp('2009-03-20', 'yyyy-MM-dd') -FROM oneline +FROM oneline_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@oneline +POSTHOOK: Input: default@oneline_n0 #### A masked pattern was here #### 2009-03-20 1237532400 PREHOOK: query: SELECT '2009 Mar 20 11:30:01 am', to_unix_timestamp('2009 Mar 20 11:30:01 am', 'yyyy MMM dd h:mm:ss a') -FROM oneline +FROM oneline_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@oneline +PREHOOK: Input: default@oneline_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT '2009 Mar 20 11:30:01 am', to_unix_timestamp('2009 Mar 20 11:30:01 am', 'yyyy MMM dd h:mm:ss a') -FROM oneline +FROM oneline_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@oneline +POSTHOOK: Input: default@oneline_n0 #### A masked pattern was here #### 2009 Mar 20 11:30:01 am 1237573801 PREHOOK: query: SELECT 'random_string', to_unix_timestamp('random_string') -FROM oneline +FROM oneline_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@oneline +PREHOOK: Input: default@oneline_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT 'random_string', to_unix_timestamp('random_string') -FROM oneline +FROM oneline_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@oneline +POSTHOOK: Input: default@oneline_n0 #### A masked pattern was here #### random_string NULL PREHOOK: query: explain select * from (select * from src) a where unix_timestamp(a.key) > 10 diff --git a/ql/src/test/results/clientpositive/udf_unix_timestamp.q.out b/ql/src/test/results/clientpositive/udf_unix_timestamp.q.out index fc0de7db52..c86a85cfed 100644 --- a/ql/src/test/results/clientpositive/udf_unix_timestamp.q.out +++ b/ql/src/test/results/clientpositive/udf_unix_timestamp.q.out @@ -74,32 +74,32 @@ POSTHOOK: Input: default@oneline 2009 Mar 20 11:30:01 am 1237573801 unix_timestamp(void) is deprecated. Use current_timestamp instead. unix_timestamp(void) is deprecated. Use current_timestamp instead. -PREHOOK: query: create table foo as SELECT +PREHOOK: query: create table foo_n3 as SELECT 'deprecated' as a, unix_timestamp() as b FROM oneline PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@oneline PREHOOK: Output: database:default -PREHOOK: Output: default@foo -POSTHOOK: query: create table foo as SELECT +PREHOOK: Output: default@foo_n3 +POSTHOOK: query: create table foo_n3 as SELECT 'deprecated' as a, unix_timestamp() as b FROM oneline POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@oneline POSTHOOK: Output: database:default -POSTHOOK: Output: default@foo -POSTHOOK: Lineage: foo.a SIMPLE [] -POSTHOOK: Lineage: foo.b SIMPLE [] -PREHOOK: query: drop table foo +POSTHOOK: Output: default@foo_n3 +POSTHOOK: Lineage: foo_n3.a SIMPLE [] +POSTHOOK: Lineage: foo_n3.b SIMPLE [] +PREHOOK: query: drop table foo_n3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@foo -PREHOOK: Output: default@foo -POSTHOOK: query: drop table foo +PREHOOK: Input: default@foo_n3 +PREHOOK: Output: default@foo_n3 +POSTHOOK: query: drop table foo_n3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@foo -POSTHOOK: Output: default@foo +POSTHOOK: Input: default@foo_n3 +POSTHOOK: Output: default@foo_n3 PREHOOK: query: SELECT 'random_string', unix_timestamp('random_string') diff --git a/ql/src/test/results/clientpositive/udf_var_samp.q.out b/ql/src/test/results/clientpositive/udf_var_samp.q.out index 6c79cf9b9e..296291cc15 100644 --- a/ql/src/test/results/clientpositive/udf_var_samp.q.out +++ b/ql/src/test/results/clientpositive/udf_var_samp.q.out @@ -34,51 +34,51 @@ If applied to a set with a single element: NULL is returned. Otherwise it computes: (S2-S1*S1/N)/(N-1) Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDAFVarianceSample Function type:BUILTIN -PREHOOK: query: drop table if exists t +PREHOOK: query: drop table if exists t_n27 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists t +POSTHOOK: query: drop table if exists t_n27 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table t (a int) +PREHOOK: query: create table t_n27 (a int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t (a int) +PREHOOK: Output: default@t_n27 +POSTHOOK: query: create table t_n27 (a int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into t values (1),(-1),(0) +POSTHOOK: Output: default@t_n27 +PREHOOK: query: insert into t_n27 values (1),(-1),(0) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into t values (1),(-1),(0) +PREHOOK: Output: default@t_n27 +POSTHOOK: query: insert into t_n27 values (1),(-1),(0) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.a SCRIPT [] -PREHOOK: query: select var_samp(a) from t +POSTHOOK: Output: default@t_n27 +POSTHOOK: Lineage: t_n27.a SCRIPT [] +PREHOOK: query: select var_samp(a) from t_n27 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n27 #### A masked pattern was here #### -POSTHOOK: query: select var_samp(a) from t +POSTHOOK: query: select var_samp(a) from t_n27 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n27 #### A masked pattern was here #### 1.0 -PREHOOK: query: select var_samp(a) from t where a=0 +PREHOOK: query: select var_samp(a) from t_n27 where a=0 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n27 #### A masked pattern was here #### -POSTHOOK: query: select var_samp(a) from t where a=0 +POSTHOOK: query: select var_samp(a) from t_n27 where a=0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n27 #### A masked pattern was here #### NULL -PREHOOK: query: select round(var_samp(a),5) from t where a>=0 +PREHOOK: query: select round(var_samp(a),5) from t_n27 where a>=0 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n27 #### A masked pattern was here #### -POSTHOOK: query: select round(var_samp(a),5) from t where a>=0 +POSTHOOK: query: select round(var_samp(a),5) from t_n27 where a>=0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n27 #### A masked pattern was here #### 0.5 diff --git a/ql/src/test/results/clientpositive/udtf_json_tuple.q.out b/ql/src/test/results/clientpositive/udtf_json_tuple.q.out index e9acb90536..7c3920e3b9 100644 --- a/ql/src/test/results/clientpositive/udtf_json_tuple.q.out +++ b/ql/src/test/results/clientpositive/udtf_json_tuple.q.out @@ -406,40 +406,40 @@ POSTHOOK: Input: default@json_t NULL 1 2 2 value2 1 -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE dest1_n56(c1 STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS RCFILE +PREHOOK: Output: default@dest1_n56 +POSTHOOK: query: CREATE TABLE dest1_n56(c1 STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT '{"a":"b\nc"}' FROM src tablesample (1 rows) +POSTHOOK: Output: default@dest1_n56 +PREHOOK: query: INSERT OVERWRITE TABLE dest1_n56 SELECT '{"a":"b\nc"}' FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT '{"a":"b\nc"}' FROM src tablesample (1 rows) +PREHOOK: Output: default@dest1_n56 +POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n56 SELECT '{"a":"b\nc"}' FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 SIMPLE [] -PREHOOK: query: SELECT * FROM dest1 +POSTHOOK: Output: default@dest1_n56 +POSTHOOK: Lineage: dest1_n56.c1 SIMPLE [] +PREHOOK: query: SELECT * FROM dest1_n56 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n56 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM dest1 +POSTHOOK: query: SELECT * FROM dest1_n56 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n56 #### A masked pattern was here #### {"a":"b c"} -PREHOOK: query: SELECT json FROM dest1 a LATERAL VIEW json_tuple(c1, 'a') b AS json +PREHOOK: query: SELECT json FROM dest1_n56 a LATERAL VIEW json_tuple(c1, 'a') b AS json PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n56 #### A masked pattern was here #### -POSTHOOK: query: SELECT json FROM dest1 a LATERAL VIEW json_tuple(c1, 'a') b AS json +POSTHOOK: query: SELECT json FROM dest1_n56 a LATERAL VIEW json_tuple(c1, 'a') b AS json POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n56 #### A masked pattern was here #### b c diff --git a/ql/src/test/results/clientpositive/udtf_replicate_rows.q.out b/ql/src/test/results/clientpositive/udtf_replicate_rows.q.out index d402289522..c366f73b11 100644 --- a/ql/src/test/results/clientpositive/udtf_replicate_rows.q.out +++ b/ql/src/test/results/clientpositive/udtf_replicate_rows.q.out @@ -10,97 +10,97 @@ POSTHOOK: type: DESCFUNCTION replicate_rows(n, cols...) - turns 1 row into n rows Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDTFReplicateRows Function type:BUILTIN -PREHOOK: query: create table t (x bigint, y string, z int) +PREHOOK: query: create table t_n13 (x bigint, y string, z int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t -POSTHOOK: query: create table t (x bigint, y string, z int) +PREHOOK: Output: default@t_n13 +POSTHOOK: query: create table t_n13 (x bigint, y string, z int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t -PREHOOK: query: insert into table t values (3,'2',0),(2,'3',1),(0,'2',2),(-1,'k',3) +POSTHOOK: Output: default@t_n13 +PREHOOK: query: insert into table t_n13 values (3,'2',0),(2,'3',1),(0,'2',2),(-1,'k',3) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t -POSTHOOK: query: insert into table t values (3,'2',0),(2,'3',1),(0,'2',2),(-1,'k',3) +PREHOOK: Output: default@t_n13 +POSTHOOK: query: insert into table t_n13 values (3,'2',0),(2,'3',1),(0,'2',2),(-1,'k',3) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t -POSTHOOK: Lineage: t.x SCRIPT [] -POSTHOOK: Lineage: t.y SCRIPT [] -POSTHOOK: Lineage: t.z SCRIPT [] -PREHOOK: query: SELECT replicate_rows(x,y) FROM t +POSTHOOK: Output: default@t_n13 +POSTHOOK: Lineage: t_n13.x SCRIPT [] +POSTHOOK: Lineage: t_n13.y SCRIPT [] +POSTHOOK: Lineage: t_n13.z SCRIPT [] +PREHOOK: query: SELECT replicate_rows(x,y) FROM t_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT replicate_rows(x,y) FROM t +POSTHOOK: query: SELECT replicate_rows(x,y) FROM t_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n13 #### A masked pattern was here #### 3 2 3 2 3 2 2 3 2 3 -PREHOOK: query: SELECT replicate_rows(x,y,y) FROM t +PREHOOK: query: SELECT replicate_rows(x,y,y) FROM t_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT replicate_rows(x,y,y) FROM t +POSTHOOK: query: SELECT replicate_rows(x,y,y) FROM t_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n13 #### A masked pattern was here #### 3 2 2 3 2 2 3 2 2 2 3 3 2 3 3 -PREHOOK: query: SELECT replicate_rows(x,y,y,y,z) FROM t +PREHOOK: query: SELECT replicate_rows(x,y,y,y,z) FROM t_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT replicate_rows(x,y,y,y,z) FROM t +POSTHOOK: query: SELECT replicate_rows(x,y,y,y,z) FROM t_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n13 #### A masked pattern was here #### 3 2 2 2 0 3 2 2 2 0 3 2 2 2 0 2 3 3 3 1 2 3 3 3 1 -PREHOOK: query: select y,x from (SELECT replicate_rows(x,y) as (x,y) FROM t)subq +PREHOOK: query: select y,x from (SELECT replicate_rows(x,y) as (x,y) FROM t_n13)subq PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n13 #### A masked pattern was here #### -POSTHOOK: query: select y,x from (SELECT replicate_rows(x,y) as (x,y) FROM t)subq +POSTHOOK: query: select y,x from (SELECT replicate_rows(x,y) as (x,y) FROM t_n13)subq POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n13 #### A masked pattern was here #### 2 3 2 3 2 3 3 2 3 2 -PREHOOK: query: select z,y,x from(SELECT replicate_rows(x,y,y) as (z,y,x) FROM t)subq +PREHOOK: query: select z,y,x from(SELECT replicate_rows(x,y,y) as (z,y,x) FROM t_n13)subq PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n13 #### A masked pattern was here #### -POSTHOOK: query: select z,y,x from(SELECT replicate_rows(x,y,y) as (z,y,x) FROM t)subq +POSTHOOK: query: select z,y,x from(SELECT replicate_rows(x,y,y) as (z,y,x) FROM t_n13)subq POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n13 #### A masked pattern was here #### 3 2 2 3 2 2 3 2 2 2 3 3 2 3 3 -PREHOOK: query: SELECT replicate_rows(x,concat(y,'...'),y) FROM t +PREHOOK: query: SELECT replicate_rows(x,concat(y,'...'),y) FROM t_n13 PREHOOK: type: QUERY -PREHOOK: Input: default@t +PREHOOK: Input: default@t_n13 #### A masked pattern was here #### -POSTHOOK: query: SELECT replicate_rows(x,concat(y,'...'),y) FROM t +POSTHOOK: query: SELECT replicate_rows(x,concat(y,'...'),y) FROM t_n13 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t +POSTHOOK: Input: default@t_n13 #### A masked pattern was here #### 3 2... 2 3 2... 2 diff --git a/ql/src/test/results/clientpositive/union10.q.out b/ql/src/test/results/clientpositive/union10.q.out index 14a92509f9..b04e4ba7c8 100644 --- a/ql/src/test/results/clientpositive/union10.q.out +++ b/ql/src/test/results/clientpositive/union10.q.out @@ -1,13 +1,13 @@ -PREHOOK: query: create table tmptable(key string, value int) +PREHOOK: query: create table tmptable_n3(key string, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmptable -POSTHOOK: query: create table tmptable(key string, value int) +PREHOOK: Output: default@tmptable_n3 +POSTHOOK: query: create table tmptable_n3(key string, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmptable +POSTHOOK: Output: default@tmptable_n3 PREHOOK: query: explain -insert overwrite table tmptable +insert overwrite table tmptable_n3 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2 @@ -15,7 +15,7 @@ insert overwrite table tmptable select 'tst3' as key, count(1) as value from src s3) unionsrc PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table tmptable +insert overwrite table tmptable_n3 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2 @@ -88,7 +88,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n3 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, value @@ -116,7 +116,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n3 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, value @@ -144,7 +144,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n3 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, value @@ -189,7 +189,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n3 Stage: Stage-3 Stats Work @@ -197,7 +197,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, int - Table: default.tmptable + Table: default.tmptable_n3 Stage: Stage-4 Map Reduce @@ -209,7 +209,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n3 Stage: Stage-6 Map Reduce @@ -221,7 +221,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n3 Stage: Stage-7 Move Operator @@ -299,7 +299,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe -PREHOOK: query: insert overwrite table tmptable +PREHOOK: query: insert overwrite table tmptable_n3 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2 @@ -307,8 +307,8 @@ PREHOOK: query: insert overwrite table tmptable select 'tst3' as key, count(1) as value from src s3) unionsrc PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmptable -POSTHOOK: query: insert overwrite table tmptable +PREHOOK: Output: default@tmptable_n3 +POSTHOOK: query: insert overwrite table tmptable_n3 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2 @@ -316,16 +316,16 @@ POSTHOOK: query: insert overwrite table tmptable select 'tst3' as key, count(1) as value from src s3) unionsrc POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmptable -POSTHOOK: Lineage: tmptable.key EXPRESSION [] -POSTHOOK: Lineage: tmptable.value EXPRESSION [(src)s1.null, (src)s2.null, (src)s3.null, ] -PREHOOK: query: select * from tmptable x sort by x.key +POSTHOOK: Output: default@tmptable_n3 +POSTHOOK: Lineage: tmptable_n3.key EXPRESSION [] +POSTHOOK: Lineage: tmptable_n3.value EXPRESSION [(src)s1.null, (src)s2.null, (src)s3.null, ] +PREHOOK: query: select * from tmptable_n3 x sort by x.key PREHOOK: type: QUERY -PREHOOK: Input: default@tmptable +PREHOOK: Input: default@tmptable_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from tmptable x sort by x.key +POSTHOOK: query: select * from tmptable_n3 x sort by x.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmptable +POSTHOOK: Input: default@tmptable_n3 #### A masked pattern was here #### tst1 500 tst2 500 diff --git a/ql/src/test/results/clientpositive/union12.q.out b/ql/src/test/results/clientpositive/union12.q.out index 218b9ba81b..fb8be48bd0 100644 --- a/ql/src/test/results/clientpositive/union12.q.out +++ b/ql/src/test/results/clientpositive/union12.q.out @@ -1,13 +1,13 @@ -PREHOOK: query: create table tmptable(key string, value int) +PREHOOK: query: create table tmptable_n10(key string, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmptable -POSTHOOK: query: create table tmptable(key string, value int) +PREHOOK: Output: default@tmptable_n10 +POSTHOOK: query: create table tmptable_n10(key string, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmptable +POSTHOOK: Output: default@tmptable_n10 PREHOOK: query: explain -insert overwrite table tmptable +insert overwrite table tmptable_n10 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src1 s2 @@ -15,7 +15,7 @@ insert overwrite table tmptable select 'tst3' as key, count(1) as value from srcbucket s3) unionsrc PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table tmptable +insert overwrite table tmptable_n10 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src1 s2 @@ -88,7 +88,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n10 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, value @@ -116,7 +116,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n10 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, value @@ -144,7 +144,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n10 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, value @@ -189,7 +189,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n10 Stage: Stage-3 Stats Work @@ -197,7 +197,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, int - Table: default.tmptable + Table: default.tmptable_n10 Stage: Stage-4 Map Reduce @@ -209,7 +209,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n10 Stage: Stage-6 Map Reduce @@ -221,7 +221,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n10 Stage: Stage-7 Move Operator @@ -299,7 +299,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe -PREHOOK: query: insert overwrite table tmptable +PREHOOK: query: insert overwrite table tmptable_n10 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src1 s2 @@ -309,8 +309,8 @@ PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@src1 PREHOOK: Input: default@srcbucket -PREHOOK: Output: default@tmptable -POSTHOOK: query: insert overwrite table tmptable +PREHOOK: Output: default@tmptable_n10 +POSTHOOK: query: insert overwrite table tmptable_n10 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src1 s2 @@ -320,16 +320,16 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 POSTHOOK: Input: default@srcbucket -POSTHOOK: Output: default@tmptable -POSTHOOK: Lineage: tmptable.key EXPRESSION [] -POSTHOOK: Lineage: tmptable.value EXPRESSION [(src)s1.null, (src1)s2.null, (srcbucket)s3.null, ] -PREHOOK: query: select * from tmptable x sort by x.key +POSTHOOK: Output: default@tmptable_n10 +POSTHOOK: Lineage: tmptable_n10.key EXPRESSION [] +POSTHOOK: Lineage: tmptable_n10.value EXPRESSION [(src)s1.null, (src1)s2.null, (srcbucket)s3.null, ] +PREHOOK: query: select * from tmptable_n10 x sort by x.key PREHOOK: type: QUERY -PREHOOK: Input: default@tmptable +PREHOOK: Input: default@tmptable_n10 #### A masked pattern was here #### -POSTHOOK: query: select * from tmptable x sort by x.key +POSTHOOK: query: select * from tmptable_n10 x sort by x.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmptable +POSTHOOK: Input: default@tmptable_n10 #### A masked pattern was here #### tst1 500 tst2 25 diff --git a/ql/src/test/results/clientpositive/union17.q.out b/ql/src/test/results/clientpositive/union17.q.out index 97e97f7c72..ef28c01610 100644 --- a/ql/src/test/results/clientpositive/union17.q.out +++ b/ql/src/test/results/clientpositive/union17.q.out @@ -1,32 +1,32 @@ -PREHOOK: query: CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n9(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n9 +POSTHOOK: query: CREATE TABLE DEST1_n9(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n9 +PREHOOK: query: CREATE TABLE DEST2_n7(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n7 +POSTHOOK: query: CREATE TABLE DEST2_n7(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n7 PREHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value +INSERT OVERWRITE TABLE DEST1_n9 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n7 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value PREHOOK: type: QUERY POSTHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value +INSERT OVERWRITE TABLE DEST1_n9 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n7 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -154,7 +154,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n9 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -179,7 +179,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n9 Stage: Stage-4 Stats Work @@ -187,7 +187,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.dest1 + Table: default.dest1_n9 Stage: Stage-5 Map Reduce @@ -218,7 +218,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val1, val2 Column Types: string, string, string - Table: default.dest2 + Table: default.dest2_n7 Stage: Stage-6 Map Reduce @@ -248,7 +248,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n7 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) outputColumnNames: key, val1, val2 @@ -273,7 +273,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n7 Stage: Stage-8 Map Reduce @@ -301,33 +301,33 @@ STAGE PLANS: PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value +INSERT OVERWRITE TABLE DEST1_n9 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n7 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n9 +PREHOOK: Output: default@dest2_n7 POSTHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value +INSERT OVERWRITE TABLE DEST1_n9 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key +INSERT OVERWRITE TABLE DEST2_n7 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)s2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)s2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val1 EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val2 EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: Output: default@dest1_n9 +POSTHOOK: Output: default@dest2_n7 +POSTHOOK: Lineage: dest1_n9.key EXPRESSION [(src)s2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n9.value EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n7.key EXPRESSION [(src)s2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n7.val1 EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n7.val2 EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n9.* FROM DEST1_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n9 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 +POSTHOOK: query: SELECT DEST1_n9.* FROM DEST1_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n9 #### A masked pattern was here #### 0 1 10 1 @@ -639,13 +639,13 @@ POSTHOOK: Input: default@dest1 97 1 98 1 tst1 1 -PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: query: SELECT DEST2_n7.* FROM DEST2_n7 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n7 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 +POSTHOOK: query: SELECT DEST2_n7.* FROM DEST2_n7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n7 #### A masked pattern was here #### 0 val_0 1 10 val_10 1 diff --git a/ql/src/test/results/clientpositive/union18.q.out b/ql/src/test/results/clientpositive/union18.q.out index a7b96c5e65..16cb5ca02e 100644 --- a/ql/src/test/results/clientpositive/union18.q.out +++ b/ql/src/test/results/clientpositive/union18.q.out @@ -1,32 +1,32 @@ -PREHOOK: query: CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n19(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n19 +POSTHOOK: query: CREATE TABLE DEST1_n19(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n19 +PREHOOK: query: CREATE TABLE DEST2_n17(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n17 +POSTHOOK: query: CREATE TABLE DEST2_n17(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n17 PREHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, unionsrc.value -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value +INSERT OVERWRITE TABLE DEST1_n19 SELECT unionsrc.key, unionsrc.value +INSERT OVERWRITE TABLE DEST2_n17 SELECT unionsrc.key, unionsrc.value, unionsrc.value PREHOOK: type: QUERY POSTHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, unionsrc.value -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value +INSERT OVERWRITE TABLE DEST1_n19 SELECT unionsrc.key, unionsrc.value +INSERT OVERWRITE TABLE DEST2_n17 SELECT unionsrc.key, unionsrc.value, unionsrc.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -91,7 +91,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n19 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -116,7 +116,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n17 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) outputColumnNames: key, val1, val2 @@ -148,7 +148,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n19 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -173,7 +173,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n17 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) outputColumnNames: key, val1, val2 @@ -220,7 +220,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n19 Stage: Stage-4 Stats Work @@ -228,7 +228,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.dest1 + Table: default.dest1_n19 Stage: Stage-5 Map Reduce @@ -240,7 +240,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n19 Stage: Stage-7 Map Reduce @@ -252,7 +252,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n19 Stage: Stage-8 Move Operator @@ -268,7 +268,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n17 Stage: Stage-10 Stats Work @@ -276,7 +276,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val1, val2 Column Types: string, string, string - Table: default.dest2 + Table: default.dest2_n17 Stage: Stage-11 Map Reduce @@ -304,33 +304,33 @@ STAGE PLANS: PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, unionsrc.value -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value +INSERT OVERWRITE TABLE DEST1_n19 SELECT unionsrc.key, unionsrc.value +INSERT OVERWRITE TABLE DEST2_n17 SELECT unionsrc.key, unionsrc.value, unionsrc.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n19 +PREHOOK: Output: default@dest2_n17 POSTHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, unionsrc.value -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value +INSERT OVERWRITE TABLE DEST1_n19 SELECT unionsrc.key, unionsrc.value +INSERT OVERWRITE TABLE DEST2_n17 SELECT unionsrc.key, unionsrc.value, unionsrc.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)s2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)s2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val1 EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val2 EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 SORT BY DEST1.key, DEST1.value +POSTHOOK: Output: default@dest1_n19 +POSTHOOK: Output: default@dest2_n17 +POSTHOOK: Lineage: dest1_n19.key EXPRESSION [(src)s2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n19.value EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n17.key EXPRESSION [(src)s2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n17.val1 EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n17.val2 EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n19.* FROM DEST1_n19 SORT BY DEST1_n19.key, DEST1_n19.value PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n19 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 SORT BY DEST1.key, DEST1.value +POSTHOOK: query: SELECT DEST1_n19.* FROM DEST1_n19 SORT BY DEST1_n19.key, DEST1_n19.value POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n19 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -833,13 +833,13 @@ POSTHOOK: Input: default@dest1 98 val_98 98 val_98 tst1 500 -PREHOOK: query: SELECT DEST2.* FROM DEST2 SORT BY DEST2.key, DEST2.val1, DEST2.val2 +PREHOOK: query: SELECT DEST2_n17.* FROM DEST2_n17 SORT BY DEST2_n17.key, DEST2_n17.val1, DEST2_n17.val2 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n17 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 SORT BY DEST2.key, DEST2.val1, DEST2.val2 +POSTHOOK: query: SELECT DEST2_n17.* FROM DEST2_n17 SORT BY DEST2_n17.key, DEST2_n17.val1, DEST2_n17.val2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n17 #### A masked pattern was here #### 0 val_0 val_0 0 val_0 val_0 diff --git a/ql/src/test/results/clientpositive/union19.q.out b/ql/src/test/results/clientpositive/union19.q.out index 571f95c176..637ef802be 100644 --- a/ql/src/test/results/clientpositive/union19.q.out +++ b/ql/src/test/results/clientpositive/union19.q.out @@ -1,32 +1,32 @@ -PREHOOK: query: CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1_n12(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST1 -POSTHOOK: query: CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST1_n12 +POSTHOOK: query: CREATE TABLE DEST1_n12(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST1 -PREHOOK: query: CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@DEST1_n12 +PREHOOK: query: CREATE TABLE DEST2_n10(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DEST2 -POSTHOOK: query: CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: Output: default@DEST2_n10 +POSTHOOK: query: CREATE TABLE DEST2_n10(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DEST2 +POSTHOOK: Output: default@DEST2_n10 PREHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value +INSERT OVERWRITE TABLE DEST1_n12 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key +INSERT OVERWRITE TABLE DEST2_n10 SELECT unionsrc.key, unionsrc.value, unionsrc.value PREHOOK: type: QUERY POSTHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value +INSERT OVERWRITE TABLE DEST1_n12 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key +INSERT OVERWRITE TABLE DEST2_n10 SELECT unionsrc.key, unionsrc.value, unionsrc.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -103,7 +103,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n10 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) outputColumnNames: key, val1, val2 @@ -151,7 +151,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n10 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) outputColumnNames: key, val1, val2 @@ -185,7 +185,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n12 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -210,7 +210,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n12 Stage: Stage-4 Stats Work @@ -218,7 +218,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.dest1 + Table: default.dest1_n12 Stage: Stage-5 Map Reduce @@ -249,7 +249,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, val1, val2 Column Types: string, string, string - Table: default.dest2 + Table: default.dest2_n10 Stage: Stage-1 Move Operator @@ -259,7 +259,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest2 + name: default.dest2_n10 Stage: Stage-7 Map Reduce @@ -287,33 +287,33 @@ STAGE PLANS: PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value +INSERT OVERWRITE TABLE DEST1_n12 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key +INSERT OVERWRITE TABLE DEST2_n10 SELECT unionsrc.key, unionsrc.value, unionsrc.value PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@dest1 -PREHOOK: Output: default@dest2 +PREHOOK: Output: default@dest1_n12 +PREHOOK: Output: default@dest2_n10 POSTHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc -INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key -INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value +INSERT OVERWRITE TABLE DEST1_n12 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key +INSERT OVERWRITE TABLE DEST2_n10 SELECT unionsrc.key, unionsrc.value, unionsrc.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@dest1 -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest1.key EXPRESSION [(src)s2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest1.value EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.key EXPRESSION [(src)s2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val1 EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: dest2.val2 EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT DEST1.* FROM DEST1 SORT BY DEST1.key, DEST1.value +POSTHOOK: Output: default@dest1_n12 +POSTHOOK: Output: default@dest2_n10 +POSTHOOK: Lineage: dest1_n12.key EXPRESSION [(src)s2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest1_n12.value EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n10.key EXPRESSION [(src)s2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n10.val1 EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: dest2_n10.val2 EXPRESSION [(src)s1.null, (src)s2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT DEST1_n12.* FROM DEST1_n12 SORT BY DEST1_n12.key, DEST1_n12.value PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n12 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST1.* FROM DEST1 SORT BY DEST1.key, DEST1.value +POSTHOOK: query: SELECT DEST1_n12.* FROM DEST1_n12 SORT BY DEST1_n12.key, DEST1_n12.value POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n12 #### A masked pattern was here #### 0 3 10 1 @@ -625,13 +625,13 @@ POSTHOOK: Input: default@dest1 97 2 98 2 tst1 1 -PREHOOK: query: SELECT DEST2.* FROM DEST2 SORT BY DEST2.key, DEST2.val1, DEST2.val2 +PREHOOK: query: SELECT DEST2_n10.* FROM DEST2_n10 SORT BY DEST2_n10.key, DEST2_n10.val1, DEST2_n10.val2 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n10 #### A masked pattern was here #### -POSTHOOK: query: SELECT DEST2.* FROM DEST2 SORT BY DEST2.key, DEST2.val1, DEST2.val2 +POSTHOOK: query: SELECT DEST2_n10.* FROM DEST2_n10 SORT BY DEST2_n10.key, DEST2_n10.val1, DEST2_n10.val2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n10 #### A masked pattern was here #### 0 val_0 val_0 0 val_0 val_0 diff --git a/ql/src/test/results/clientpositive/union24.q.out b/ql/src/test/results/clientpositive/union24.q.out index 8920506a1b..cc3ef4d669 100644 --- a/ql/src/test/results/clientpositive/union24.q.out +++ b/ql/src/test/results/clientpositive/union24.q.out @@ -1,71 +1,71 @@ -PREHOOK: query: create table src2 as select key, count(1) as count from src group by key +PREHOOK: query: create table src2_n6 as select key, count(1) as count from src group by key PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@src2 -POSTHOOK: query: create table src2 as select key, count(1) as count from src group by key +PREHOOK: Output: default@src2_n6 +POSTHOOK: query: create table src2_n6 as select key, count(1) as count from src group by key POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@src2 -POSTHOOK: Lineage: src2.count EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: src2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: create table src3 as select * from src2 +POSTHOOK: Output: default@src2_n6 +POSTHOOK: Lineage: src2_n6.count EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: src2_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: create table src3_n2 as select * from src2_n6 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src2 +PREHOOK: Input: default@src2_n6 PREHOOK: Output: database:default -PREHOOK: Output: default@src3 -POSTHOOK: query: create table src3 as select * from src2 +PREHOOK: Output: default@src3_n2 +POSTHOOK: query: create table src3_n2 as select * from src2_n6 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src2 +POSTHOOK: Input: default@src2_n6 POSTHOOK: Output: database:default -POSTHOOK: Output: default@src3 -POSTHOOK: Lineage: src3.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ] -POSTHOOK: Lineage: src3.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: create table src4 as select * from src2 +POSTHOOK: Output: default@src3_n2 +POSTHOOK: Lineage: src3_n2.count SIMPLE [(src2_n6)src2_n6.FieldSchema(name:count, type:bigint, comment:null), ] +POSTHOOK: Lineage: src3_n2.key SIMPLE [(src2_n6)src2_n6.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: create table src4_n0 as select * from src2_n6 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src2 +PREHOOK: Input: default@src2_n6 PREHOOK: Output: database:default -PREHOOK: Output: default@src4 -POSTHOOK: query: create table src4 as select * from src2 +PREHOOK: Output: default@src4_n0 +POSTHOOK: query: create table src4_n0 as select * from src2_n6 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src2 +POSTHOOK: Input: default@src2_n6 POSTHOOK: Output: database:default -POSTHOOK: Output: default@src4 -POSTHOOK: Lineage: src4.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ] -POSTHOOK: Lineage: src4.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: create table src5 as select * from src2 +POSTHOOK: Output: default@src4_n0 +POSTHOOK: Lineage: src4_n0.count SIMPLE [(src2_n6)src2_n6.FieldSchema(name:count, type:bigint, comment:null), ] +POSTHOOK: Lineage: src4_n0.key SIMPLE [(src2_n6)src2_n6.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: create table src5_n3 as select * from src2_n6 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@src2 +PREHOOK: Input: default@src2_n6 PREHOOK: Output: database:default -PREHOOK: Output: default@src5 -POSTHOOK: query: create table src5 as select * from src2 +PREHOOK: Output: default@src5_n3 +POSTHOOK: query: create table src5_n3 as select * from src2_n6 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@src2 +POSTHOOK: Input: default@src2_n6 POSTHOOK: Output: database:default -POSTHOOK: Output: default@src5 -POSTHOOK: Lineage: src5.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ] -POSTHOOK: Lineage: src5.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Output: default@src5_n3 +POSTHOOK: Lineage: src5_n3.count SIMPLE [(src2_n6)src2_n6.FieldSchema(name:count, type:bigint, comment:null), ] +POSTHOOK: Lineage: src5_n3.key SIMPLE [(src2_n6)src2_n6.FieldSchema(name:key, type:string, comment:null), ] PREHOOK: query: explain extended select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select key, count from src4 where key < 10 + select key, count from src4_n0 where key < 10 union all - select key, count(1) as count from src5 where key < 10 group by key + select key, count(1) as count from src5_n3 where key < 10 group by key )s PREHOOK: type: QUERY POSTHOOK: query: explain extended select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select key, count from src4 where key < 10 + select key, count from src4_n0 where key < 10 union all - select key, count(1) as count from src5 where key < 10 group by key + select key, count(1) as count from src5_n3 where key < 10 group by key )s POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -78,7 +78,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src5 + alias: src5_n3 Statistics: Num rows: 309 Data size: 1482 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -106,7 +106,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: src5 + base file name: src5_n3 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -118,11 +118,11 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src5 + name default.src5_n3 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src5 { string key, i64 count} + serialization.ddl struct src5_n3 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 @@ -140,20 +140,20 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src5 + name default.src5_n3 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src5 { string key, i64 count} + serialization.ddl struct src5_n3 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src5 - name: default.src5 + name: default.src5_n3 + name: default.src5_n3 Truncated Path -> Alias: - /src5 [null-subquery2:$hdt$_2-subquery2:src5] + /src5_n3 [null-subquery2:$hdt$_2-subquery2:src5_n3] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -185,7 +185,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src2 + alias: src2_n6 Statistics: Num rows: 309 Data size: 1482 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -221,7 +221,7 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false TableScan - alias: src3 + alias: src3_n2 Statistics: Num rows: 309 Data size: 1482 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -257,7 +257,7 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false TableScan - alias: src4 + alias: src4_n0 Statistics: Num rows: 309 Data size: 1482 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -345,7 +345,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe #### A masked pattern was here #### Partition - base file name: src2 + base file name: src2_n6 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -357,11 +357,11 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src2 + name default.src2_n6 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src2 { string key, i64 count} + serialization.ddl struct src2_n6 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 @@ -379,21 +379,21 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src2 + name default.src2_n6 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src2 { string key, i64 count} + serialization.ddl struct src2_n6 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src2 - name: default.src2 + name: default.src2_n6 + name: default.src2_n6 #### A masked pattern was here #### Partition - base file name: src3 + base file name: src3_n2 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -405,11 +405,11 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src3 + name default.src3_n2 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src3 { string key, i64 count} + serialization.ddl struct src3_n2 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 @@ -427,21 +427,21 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src3 + name default.src3_n2 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src3 { string key, i64 count} + serialization.ddl struct src3_n2 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src3 - name: default.src3 + name: default.src3_n2 + name: default.src3_n2 #### A masked pattern was here #### Partition - base file name: src4 + base file name: src4_n0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -453,11 +453,11 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src4 + name default.src4_n0 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src4 { string key, i64 count} + serialization.ddl struct src4_n0 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 @@ -475,22 +475,22 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src4 + name default.src4_n0 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src4 { string key, i64 count} + serialization.ddl struct src4_n0 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src4 - name: default.src4 + name: default.src4_n0 + name: default.src4_n0 Truncated Path -> Alias: - /src2 [null-subquery1-subquery1-subquery1:$hdt$_2-subquery1-subquery1-subquery1:src2] - /src3 [null-subquery1-subquery1-subquery2:$hdt$_2-subquery1-subquery1-subquery2:src3] - /src4 [null-subquery1-subquery2:$hdt$_2-subquery1-subquery2:src4] + /src2_n6 [null-subquery1-subquery1-subquery1:$hdt$_2-subquery1-subquery1-subquery1:src2_n6] + /src3_n2 [null-subquery1-subquery1-subquery2:$hdt$_2-subquery1-subquery1-subquery2:src3_n2] + /src4_n0 [null-subquery1-subquery2:$hdt$_2-subquery1-subquery2:src4_n0] #### A masked pattern was here #### Stage: Stage-0 @@ -500,34 +500,34 @@ STAGE PLANS: ListSink PREHOOK: query: select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select key, count from src4 where key < 10 + select key, count from src4_n0 where key < 10 union all - select key, count(1) as count from src5 where key < 10 group by key + select key, count(1) as count from src5_n3 where key < 10 group by key )s PREHOOK: type: QUERY -PREHOOK: Input: default@src2 -PREHOOK: Input: default@src3 -PREHOOK: Input: default@src4 -PREHOOK: Input: default@src5 +PREHOOK: Input: default@src2_n6 +PREHOOK: Input: default@src3_n2 +PREHOOK: Input: default@src4_n0 +PREHOOK: Input: default@src5_n3 #### A masked pattern was here #### POSTHOOK: query: select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select key, count from src4 where key < 10 + select key, count from src4_n0 where key < 10 union all - select key, count(1) as count from src5 where key < 10 group by key + select key, count(1) as count from src5_n3 where key < 10 group by key )s POSTHOOK: type: QUERY -POSTHOOK: Input: default@src2 -POSTHOOK: Input: default@src3 -POSTHOOK: Input: default@src4 -POSTHOOK: Input: default@src5 +POSTHOOK: Input: default@src2_n6 +POSTHOOK: Input: default@src3_n2 +POSTHOOK: Input: default@src4_n0 +POSTHOOK: Input: default@src5_n3 #### A masked pattern was here #### 0 1 0 3 @@ -555,20 +555,20 @@ POSTHOOK: Input: default@src5 9 1 PREHOOK: query: explain extended select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select a.key as key, b.count as count from src4 a join src5 b on a.key=b.key where a.key < 10 + select a.key as key, b.count as count from src4_n0 a join src5_n3 b on a.key=b.key where a.key < 10 )s PREHOOK: type: QUERY POSTHOOK: query: explain extended select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select a.key as key, b.count as count from src4 a join src5 b on a.key=b.key where a.key < 10 + select a.key as key, b.count as count from src4_n0 a join src5_n3 b on a.key=b.key where a.key < 10 )s POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -626,7 +626,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: src4 + base file name: src4_n0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -638,11 +638,11 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src4 + name default.src4_n0 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src4 { string key, i64 count} + serialization.ddl struct src4_n0 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 @@ -660,21 +660,21 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src4 + name default.src4_n0 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src4 { string key, i64 count} + serialization.ddl struct src4_n0 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src4 - name: default.src4 + name: default.src4_n0 + name: default.src4_n0 #### A masked pattern was here #### Partition - base file name: src5 + base file name: src5_n3 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -686,11 +686,11 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src5 + name default.src5_n3 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src5 { string key, i64 count} + serialization.ddl struct src5_n3 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 @@ -708,21 +708,21 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src5 + name default.src5_n3 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src5 { string key, i64 count} + serialization.ddl struct src5_n3 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src5 - name: default.src5 + name: default.src5_n3 + name: default.src5_n3 Truncated Path -> Alias: - /src4 [null-subquery2:$hdt$_1-subquery2:$hdt$_1:a] - /src5 [null-subquery2:$hdt$_1-subquery2:$hdt$_2:b] + /src4_n0 [null-subquery2:$hdt$_1-subquery2:$hdt$_1:a] + /src5_n3 [null-subquery2:$hdt$_1-subquery2:$hdt$_2:b] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -760,7 +760,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src2 + alias: src2_n6 Statistics: Num rows: 309 Data size: 1482 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -796,7 +796,7 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false TableScan - alias: src3 + alias: src3_n2 Statistics: Num rows: 309 Data size: 1482 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -884,7 +884,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe #### A masked pattern was here #### Partition - base file name: src2 + base file name: src2_n6 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -896,11 +896,11 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src2 + name default.src2_n6 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src2 { string key, i64 count} + serialization.ddl struct src2_n6 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 @@ -918,21 +918,21 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src2 + name default.src2_n6 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src2 { string key, i64 count} + serialization.ddl struct src2_n6 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src2 - name: default.src2 + name: default.src2_n6 + name: default.src2_n6 #### A masked pattern was here #### Partition - base file name: src3 + base file name: src3_n2 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -944,11 +944,11 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src3 + name default.src3_n2 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src3 { string key, i64 count} + serialization.ddl struct src3_n2 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 @@ -966,21 +966,21 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src3 + name default.src3_n2 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src3 { string key, i64 count} + serialization.ddl struct src3_n2 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src3 - name: default.src3 + name: default.src3_n2 + name: default.src3_n2 Truncated Path -> Alias: - /src2 [null-subquery1-subquery1:$hdt$_1-subquery1-subquery1:src2] - /src3 [null-subquery1-subquery2:$hdt$_1-subquery1-subquery2:src3] + /src2_n6 [null-subquery1-subquery1:$hdt$_1-subquery1-subquery1:src2_n6] + /src3_n2 [null-subquery1-subquery2:$hdt$_1-subquery1-subquery2:src3_n2] #### A masked pattern was here #### Stage: Stage-0 @@ -990,30 +990,30 @@ STAGE PLANS: ListSink PREHOOK: query: select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select a.key as key, b.count as count from src4 a join src5 b on a.key=b.key where a.key < 10 + select a.key as key, b.count as count from src4_n0 a join src5_n3 b on a.key=b.key where a.key < 10 )s PREHOOK: type: QUERY -PREHOOK: Input: default@src2 -PREHOOK: Input: default@src3 -PREHOOK: Input: default@src4 -PREHOOK: Input: default@src5 +PREHOOK: Input: default@src2_n6 +PREHOOK: Input: default@src3_n2 +PREHOOK: Input: default@src4_n0 +PREHOOK: Input: default@src5_n3 #### A masked pattern was here #### POSTHOOK: query: select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select a.key as key, b.count as count from src4 a join src5 b on a.key=b.key where a.key < 10 + select a.key as key, b.count as count from src4_n0 a join src5_n3 b on a.key=b.key where a.key < 10 )s POSTHOOK: type: QUERY -POSTHOOK: Input: default@src2 -POSTHOOK: Input: default@src3 -POSTHOOK: Input: default@src4 -POSTHOOK: Input: default@src5 +POSTHOOK: Input: default@src2_n6 +POSTHOOK: Input: default@src3_n2 +POSTHOOK: Input: default@src4_n0 +POSTHOOK: Input: default@src5_n3 #### A masked pattern was here #### 0 3 0 3 @@ -1035,20 +1035,20 @@ POSTHOOK: Input: default@src5 9 1 PREHOOK: query: explain extended select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select a.key as key, count(1) as count from src4 a join src5 b on a.key=b.key where a.key < 10 group by a.key + select a.key as key, count(1) as count from src4_n0 a join src5_n3 b on a.key=b.key where a.key < 10 group by a.key )s PREHOOK: type: QUERY POSTHOOK: query: explain extended select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select a.key as key, count(1) as count from src4 a join src5 b on a.key=b.key where a.key < 10 group by a.key + select a.key as key, count(1) as count from src4_n0 a join src5_n3 b on a.key=b.key where a.key < 10 group by a.key )s POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1106,7 +1106,7 @@ STAGE PLANS: Path -> Partition: #### A masked pattern was here #### Partition - base file name: src4 + base file name: src4_n0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -1118,11 +1118,11 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src4 + name default.src4_n0 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src4 { string key, i64 count} + serialization.ddl struct src4_n0 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 @@ -1140,21 +1140,21 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src4 + name default.src4_n0 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src4 { string key, i64 count} + serialization.ddl struct src4_n0 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src4 - name: default.src4 + name: default.src4_n0 + name: default.src4_n0 #### A masked pattern was here #### Partition - base file name: src5 + base file name: src5_n3 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -1166,11 +1166,11 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src5 + name default.src5_n3 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src5 { string key, i64 count} + serialization.ddl struct src5_n3 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 @@ -1188,21 +1188,21 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src5 + name default.src5_n3 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src5 { string key, i64 count} + serialization.ddl struct src5_n3 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src5 - name: default.src5 + name: default.src5_n3 + name: default.src5_n3 Truncated Path -> Alias: - /src4 [null-subquery2:$hdt$_1-subquery2:$hdt$_1:a] - /src5 [null-subquery2:$hdt$_1-subquery2:$hdt$_2:b] + /src4_n0 [null-subquery2:$hdt$_1-subquery2:$hdt$_1:a] + /src5_n3 [null-subquery2:$hdt$_1-subquery2:$hdt$_2:b] Needs Tagging: true Reduce Operator Tree: Join Operator @@ -1311,7 +1311,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src2 + alias: src2_n6 Statistics: Num rows: 309 Data size: 1482 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -1347,7 +1347,7 @@ STAGE PLANS: GatherStats: false MultiFileSpray: false TableScan - alias: src3 + alias: src3_n2 Statistics: Num rows: 309 Data size: 1482 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator @@ -1435,7 +1435,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe #### A masked pattern was here #### Partition - base file name: src2 + base file name: src2_n6 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -1447,11 +1447,11 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src2 + name default.src2_n6 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src2 { string key, i64 count} + serialization.ddl struct src2_n6 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 @@ -1469,21 +1469,21 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src2 + name default.src2_n6 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src2 { string key, i64 count} + serialization.ddl struct src2_n6 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src2 - name: default.src2 + name: default.src2_n6 + name: default.src2_n6 #### A masked pattern was here #### Partition - base file name: src3 + base file name: src3_n2 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -1495,11 +1495,11 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src3 + name default.src3_n2 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src3 { string key, i64 count} + serialization.ddl struct src3_n2 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 @@ -1517,21 +1517,21 @@ STAGE PLANS: columns.comments columns.types string:bigint #### A masked pattern was here #### - name default.src3 + name default.src3_n2 numFiles 1 numRows 309 rawDataSize 1482 - serialization.ddl struct src3 { string key, i64 count} + serialization.ddl struct src3_n2 { string key, i64 count} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 1791 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src3 - name: default.src3 + name: default.src3_n2 + name: default.src3_n2 Truncated Path -> Alias: - /src2 [null-subquery1-subquery1:$hdt$_1-subquery1-subquery1:src2] - /src3 [null-subquery1-subquery2:$hdt$_1-subquery1-subquery2:src3] + /src2_n6 [null-subquery1-subquery1:$hdt$_1-subquery1-subquery1:src2_n6] + /src3_n2 [null-subquery1-subquery2:$hdt$_1-subquery1-subquery2:src3_n2] #### A masked pattern was here #### Stage: Stage-0 @@ -1541,30 +1541,30 @@ STAGE PLANS: ListSink PREHOOK: query: select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select a.key as key, count(1) as count from src4 a join src5 b on a.key=b.key where a.key < 10 group by a.key + select a.key as key, count(1) as count from src4_n0 a join src5_n3 b on a.key=b.key where a.key < 10 group by a.key )s PREHOOK: type: QUERY -PREHOOK: Input: default@src2 -PREHOOK: Input: default@src3 -PREHOOK: Input: default@src4 -PREHOOK: Input: default@src5 +PREHOOK: Input: default@src2_n6 +PREHOOK: Input: default@src3_n2 +PREHOOK: Input: default@src4_n0 +PREHOOK: Input: default@src5_n3 #### A masked pattern was here #### POSTHOOK: query: select s.key, s.count from ( - select key, count from src2 where key < 10 + select key, count from src2_n6 where key < 10 union all - select key, count from src3 where key < 10 + select key, count from src3_n2 where key < 10 union all - select a.key as key, count(1) as count from src4 a join src5 b on a.key=b.key where a.key < 10 group by a.key + select a.key as key, count(1) as count from src4_n0 a join src5_n3 b on a.key=b.key where a.key < 10 group by a.key )s POSTHOOK: type: QUERY -POSTHOOK: Input: default@src2 -POSTHOOK: Input: default@src3 -POSTHOOK: Input: default@src4 -POSTHOOK: Input: default@src5 +POSTHOOK: Input: default@src2_n6 +POSTHOOK: Input: default@src3_n2 +POSTHOOK: Input: default@src4_n0 +POSTHOOK: Input: default@src5_n3 #### A masked pattern was here #### 0 1 0 3 diff --git a/ql/src/test/results/clientpositive/union27.q.out b/ql/src/test/results/clientpositive/union27.q.out index 8e3dae9e57..5b5c43eed2 100644 --- a/ql/src/test/results/clientpositive/union27.q.out +++ b/ql/src/test/results/clientpositive/union27.q.out @@ -1,42 +1,42 @@ -PREHOOK: query: create table jackson_sev_same as select * from src +PREHOOK: query: create table jackson_sev_same_n0 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@jackson_sev_same -POSTHOOK: query: create table jackson_sev_same as select * from src +PREHOOK: Output: default@jackson_sev_same_n0 +POSTHOOK: query: create table jackson_sev_same_n0 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@jackson_sev_same -POSTHOOK: Lineage: jackson_sev_same.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: jackson_sev_same.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table dim_pho as select * from src +POSTHOOK: Output: default@jackson_sev_same_n0 +POSTHOOK: Lineage: jackson_sev_same_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: jackson_sev_same_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table dim_pho_n0 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@dim_pho -POSTHOOK: query: create table dim_pho as select * from src +PREHOOK: Output: default@dim_pho_n0 +POSTHOOK: query: create table dim_pho_n0 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@dim_pho -POSTHOOK: Lineage: dim_pho.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: dim_pho.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table jackson_sev_add as select * from src +POSTHOOK: Output: default@dim_pho_n0 +POSTHOOK: Lineage: dim_pho_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: dim_pho_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table jackson_sev_add_n0 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@jackson_sev_add -POSTHOOK: query: create table jackson_sev_add as select * from src +PREHOOK: Output: default@jackson_sev_add_n0 +POSTHOOK: query: create table jackson_sev_add_n0 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@jackson_sev_add -POSTHOOK: Lineage: jackson_sev_add.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: jackson_sev_add.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain select b.* from jackson_sev_same a join (select * from dim_pho union all select * from jackson_sev_add)b on a.key=b.key and b.key=97 +POSTHOOK: Output: default@jackson_sev_add_n0 +POSTHOOK: Lineage: jackson_sev_add_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: jackson_sev_add_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select b.* from jackson_sev_same_n0 a join (select * from dim_pho_n0 union all select * from jackson_sev_add_n0)b on a.key=b.key and b.key=97 PREHOOK: type: QUERY -POSTHOOK: query: explain select b.* from jackson_sev_same a join (select * from dim_pho union all select * from jackson_sev_add)b on a.key=b.key and b.key=97 +POSTHOOK: query: explain select b.* from jackson_sev_same_n0 a join (select * from dim_pho_n0 union all select * from jackson_sev_add_n0)b on a.key=b.key and b.key=97 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -62,7 +62,7 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE TableScan - alias: dim_pho + alias: dim_pho_n0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(key) = 97.0D) (type: boolean) @@ -80,7 +80,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) TableScan - alias: jackson_sev_add + alias: jackson_sev_add_n0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(key) = 97.0D) (type: boolean) @@ -124,17 +124,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select b.* from jackson_sev_same a join (select * from dim_pho union all select * from jackson_sev_add)b on a.key=b.key and b.key=97 +PREHOOK: query: select b.* from jackson_sev_same_n0 a join (select * from dim_pho_n0 union all select * from jackson_sev_add_n0)b on a.key=b.key and b.key=97 PREHOOK: type: QUERY -PREHOOK: Input: default@dim_pho -PREHOOK: Input: default@jackson_sev_add -PREHOOK: Input: default@jackson_sev_same +PREHOOK: Input: default@dim_pho_n0 +PREHOOK: Input: default@jackson_sev_add_n0 +PREHOOK: Input: default@jackson_sev_same_n0 #### A masked pattern was here #### -POSTHOOK: query: select b.* from jackson_sev_same a join (select * from dim_pho union all select * from jackson_sev_add)b on a.key=b.key and b.key=97 +POSTHOOK: query: select b.* from jackson_sev_same_n0 a join (select * from dim_pho_n0 union all select * from jackson_sev_add_n0)b on a.key=b.key and b.key=97 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dim_pho -POSTHOOK: Input: default@jackson_sev_add -POSTHOOK: Input: default@jackson_sev_same +POSTHOOK: Input: default@dim_pho_n0 +POSTHOOK: Input: default@jackson_sev_add_n0 +POSTHOOK: Input: default@jackson_sev_same_n0 #### A masked pattern was here #### 97 val_97 97 val_97 diff --git a/ql/src/test/results/clientpositive/union29.q.out b/ql/src/test/results/clientpositive/union29.q.out index 0eea64f3d6..68a28fb5f4 100644 --- a/ql/src/test/results/clientpositive/union29.q.out +++ b/ql/src/test/results/clientpositive/union29.q.out @@ -1,13 +1,13 @@ -PREHOOK: query: create table union_subq_union(key int, value string) +PREHOOK: query: create table union_subq_union_n1(key int, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@union_subq_union -POSTHOOK: query: create table union_subq_union(key int, value string) +PREHOOK: Output: default@union_subq_union_n1 +POSTHOOK: query: create table union_subq_union_n1(key int, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@union_subq_union +POSTHOOK: Output: default@union_subq_union_n1 PREHOOK: query: explain -insert overwrite table union_subq_union +insert overwrite table union_subq_union_n1 select * from ( select key, value from src union all @@ -20,7 +20,7 @@ select * from ( ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table union_subq_union +insert overwrite table union_subq_union_n1 select * from ( select key, value from src union all @@ -66,7 +66,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_subq_union + name: default.union_subq_union_n1 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -100,7 +100,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_subq_union + name: default.union_subq_union_n1 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -134,7 +134,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_subq_union + name: default.union_subq_union_n1 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -179,7 +179,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_subq_union + name: default.union_subq_union_n1 Stage: Stage-2 Stats Work @@ -187,7 +187,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.union_subq_union + Table: default.union_subq_union_n1 Stage: Stage-3 Map Reduce @@ -199,7 +199,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_subq_union + name: default.union_subq_union_n1 Stage: Stage-5 Map Reduce @@ -211,7 +211,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_subq_union + name: default.union_subq_union_n1 Stage: Stage-6 Move Operator @@ -219,7 +219,7 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table union_subq_union +PREHOOK: query: insert overwrite table union_subq_union_n1 select * from ( select key, value from src union all @@ -232,8 +232,8 @@ select * from ( ) a PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@union_subq_union -POSTHOOK: query: insert overwrite table union_subq_union +PREHOOK: Output: default@union_subq_union_n1 +POSTHOOK: query: insert overwrite table union_subq_union_n1 select * from ( select key, value from src union all @@ -246,16 +246,16 @@ select * from ( ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@union_subq_union -POSTHOOK: Lineage: union_subq_union.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: union_subq_union.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from union_subq_union order by key, value limit 20 +POSTHOOK: Output: default@union_subq_union_n1 +POSTHOOK: Lineage: union_subq_union_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: union_subq_union_n1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from union_subq_union_n1 order by key, value limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@union_subq_union +PREHOOK: Input: default@union_subq_union_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from union_subq_union order by key, value limit 20 +POSTHOOK: query: select * from union_subq_union_n1 order by key, value limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@union_subq_union +POSTHOOK: Input: default@union_subq_union_n1 #### A masked pattern was here #### 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/union3.q.out b/ql/src/test/results/clientpositive/union3.q.out index 48bc45a9fb..37d70650e1 100644 --- a/ql/src/test/results/clientpositive/union3.q.out +++ b/ql/src/test/results/clientpositive/union3.q.out @@ -219,15 +219,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: CREATE TABLE union_out (id int) +PREHOOK: query: CREATE TABLE union_out_n0 (id int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@union_out -POSTHOOK: query: CREATE TABLE union_out (id int) +PREHOOK: Output: default@union_out_n0 +POSTHOOK: query: CREATE TABLE union_out_n0 (id int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@union_out -PREHOOK: query: insert overwrite table union_out +POSTHOOK: Output: default@union_out_n0 +PREHOOK: query: insert overwrite table union_out_n0 SELECT * FROM ( SELECT 1 AS id @@ -245,8 +245,8 @@ FROM ( ) a PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@union_out -POSTHOOK: query: insert overwrite table union_out +PREHOOK: Output: default@union_out_n0 +POSTHOOK: query: insert overwrite table union_out_n0 SELECT * FROM ( SELECT 1 AS id @@ -264,15 +264,15 @@ FROM ( ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@union_out -POSTHOOK: Lineage: union_out.id EXPRESSION [] -PREHOOK: query: select * from union_out +POSTHOOK: Output: default@union_out_n0 +POSTHOOK: Lineage: union_out_n0.id EXPRESSION [] +PREHOOK: query: select * from union_out_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@union_out +PREHOOK: Input: default@union_out_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from union_out +POSTHOOK: query: select * from union_out_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@union_out +POSTHOOK: Input: default@union_out_n0 #### A masked pattern was here #### 1 2 diff --git a/ql/src/test/results/clientpositive/union30.q.out b/ql/src/test/results/clientpositive/union30.q.out index a3120e1f27..271cbdf7f6 100644 --- a/ql/src/test/results/clientpositive/union30.q.out +++ b/ql/src/test/results/clientpositive/union30.q.out @@ -1,13 +1,13 @@ -PREHOOK: query: create table union_subq_union(key int, value string) +PREHOOK: query: create table union_subq_union_n2(key int, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@union_subq_union -POSTHOOK: query: create table union_subq_union(key int, value string) +PREHOOK: Output: default@union_subq_union_n2 +POSTHOOK: query: create table union_subq_union_n2(key int, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@union_subq_union +POSTHOOK: Output: default@union_subq_union_n2 PREHOOK: query: explain -insert overwrite table union_subq_union +insert overwrite table union_subq_union_n2 select * from ( select * from ( @@ -27,7 +27,7 @@ select key, value from src ) aa PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table union_subq_union +insert overwrite table union_subq_union_n2 select * from ( select * from ( @@ -116,7 +116,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_subq_union + name: default.union_subq_union_n2 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -150,7 +150,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_subq_union + name: default.union_subq_union_n2 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -178,7 +178,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_subq_union + name: default.union_subq_union_n2 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -206,7 +206,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_subq_union + name: default.union_subq_union_n2 Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: key, value @@ -251,7 +251,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_subq_union + name: default.union_subq_union_n2 Stage: Stage-3 Stats Work @@ -259,7 +259,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: int, string - Table: default.union_subq_union + Table: default.union_subq_union_n2 Stage: Stage-4 Map Reduce @@ -271,7 +271,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_subq_union + name: default.union_subq_union_n2 Stage: Stage-6 Map Reduce @@ -283,7 +283,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.union_subq_union + name: default.union_subq_union_n2 Stage: Stage-7 Move Operator @@ -325,7 +325,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe -PREHOOK: query: insert overwrite table union_subq_union +PREHOOK: query: insert overwrite table union_subq_union_n2 select * from ( select * from ( @@ -345,8 +345,8 @@ select key, value from src ) aa PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@union_subq_union -POSTHOOK: query: insert overwrite table union_subq_union +PREHOOK: Output: default@union_subq_union_n2 +POSTHOOK: query: insert overwrite table union_subq_union_n2 select * from ( select * from ( @@ -366,16 +366,16 @@ select key, value from src ) aa POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@union_subq_union -POSTHOOK: Lineage: union_subq_union.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: union_subq_union.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from union_subq_union order by key, value limit 20 +POSTHOOK: Output: default@union_subq_union_n2 +POSTHOOK: Lineage: union_subq_union_n2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: union_subq_union_n2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from union_subq_union_n2 order by key, value limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@union_subq_union +PREHOOK: Input: default@union_subq_union_n2 #### A masked pattern was here #### -POSTHOOK: query: select * from union_subq_union order by key, value limit 20 +POSTHOOK: query: select * from union_subq_union_n2 order by key, value limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@union_subq_union +POSTHOOK: Input: default@union_subq_union_n2 #### A masked pattern was here #### 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/union31.q.out b/ql/src/test/results/clientpositive/union31.q.out index e3721aa2ed..754db381fd 100644 --- a/ql/src/test/results/clientpositive/union31.q.out +++ b/ql/src/test/results/clientpositive/union31.q.out @@ -1,71 +1,71 @@ -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n11 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table t1 +POSTHOOK: query: drop table t1_n11 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table t2 +PREHOOK: query: drop table t2_n7 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table t2 +POSTHOOK: query: drop table t2_n7 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table t1 as select * from src where key < 10 +PREHOOK: query: create table t1_n11 as select * from src where key < 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 as select * from src where key < 10 +PREHOOK: Output: default@t1_n11 +POSTHOOK: query: create table t1_n11 as select * from src where key < 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table t2 as select * from src where key < 10 +POSTHOOK: Output: default@t1_n11 +POSTHOOK: Lineage: t1_n11.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n11.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table t2_n7 as select * from src where key < 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2 as select * from src where key < 10 +PREHOOK: Output: default@t2_n7 +POSTHOOK: query: create table t2_n7 as select * from src where key < 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table t3(key string, cnt int) +POSTHOOK: Output: default@t2_n7 +POSTHOOK: Lineage: t2_n7.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2_n7.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table t3_n1(key string, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t3 -POSTHOOK: query: create table t3(key string, cnt int) +PREHOOK: Output: default@t3_n1 +POSTHOOK: query: create table t3_n1(key string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t3 -PREHOOK: query: create table t4(value string, cnt int) +POSTHOOK: Output: default@t3_n1 +PREHOOK: query: create table t4_n1(value string, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t4 -POSTHOOK: query: create table t4(value string, cnt int) +PREHOOK: Output: default@t4_n1 +POSTHOOK: query: create table t4_n1(value string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t4 +POSTHOOK: Output: default@t4_n1 PREHOOK: query: explain from -(select * from t1 +(select * from t1_n11 union all - select * from t2 + select * from t2_n7 ) x -insert overwrite table t3 +insert overwrite table t3_n1 select key, count(1) group by key -insert overwrite table t4 +insert overwrite table t4_n1 select value, count(1) group by value PREHOOK: type: QUERY POSTHOOK: query: explain from -(select * from t1 +(select * from t1_n11 union all - select * from t2 + select * from t2_n7 ) x -insert overwrite table t3 +insert overwrite table t3_n1 select key, count(1) group by key -insert overwrite table t4 +insert overwrite table t4_n1 select value, count(1) group by value POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -83,7 +83,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n11 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -124,7 +124,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe TableScan - alias: t2 + alias: t2_n7 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -182,7 +182,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t3 + name: default.t3_n1 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, cnt @@ -207,7 +207,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t3 + name: default.t3_n1 Stage: Stage-3 Stats Work @@ -215,7 +215,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, cnt Column Types: string, int - Table: default.t3 + Table: default.t3_n1 Stage: Stage-4 Map Reduce @@ -246,7 +246,7 @@ STAGE PLANS: Column Stats Desc: Columns: value, cnt Column Types: string, int - Table: default.t4 + Table: default.t4_n1 Stage: Stage-5 Map Reduce @@ -277,7 +277,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t4 + name: default.t4_n1 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: value, cnt @@ -302,7 +302,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t4 + name: default.t4_n1 Stage: Stage-7 Map Reduce @@ -328,44 +328,44 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe PREHOOK: query: from -(select * from t1 +(select * from t1_n11 union all - select * from t2 + select * from t2_n7 ) x -insert overwrite table t3 +insert overwrite table t3_n1 select key, count(1) group by key -insert overwrite table t4 +insert overwrite table t4_n1 select value, count(1) group by value PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t3 -PREHOOK: Output: default@t4 +PREHOOK: Input: default@t1_n11 +PREHOOK: Input: default@t2_n7 +PREHOOK: Output: default@t3_n1 +PREHOOK: Output: default@t4_n1 POSTHOOK: query: from -(select * from t1 +(select * from t1_n11 union all - select * from t2 + select * from t2_n7 ) x -insert overwrite table t3 +insert overwrite table t3_n1 select key, count(1) group by key -insert overwrite table t4 +insert overwrite table t4_n1 select value, count(1) group by value POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t3 -POSTHOOK: Output: default@t4 -POSTHOOK: Lineage: t3.cnt EXPRESSION [(t1)t1.null, (t2)t2.null, ] -POSTHOOK: Lineage: t3.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), (t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t4.cnt EXPRESSION [(t1)t1.null, (t2)t2.null, ] -POSTHOOK: Lineage: t4.value EXPRESSION [(t1)t1.FieldSchema(name:value, type:string, comment:null), (t2)t2.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: select * from t3 +POSTHOOK: Input: default@t1_n11 +POSTHOOK: Input: default@t2_n7 +POSTHOOK: Output: default@t3_n1 +POSTHOOK: Output: default@t4_n1 +POSTHOOK: Lineage: t3_n1.cnt EXPRESSION [(t1_n11)t1_n11.null, (t2_n7)t2_n7.null, ] +POSTHOOK: Lineage: t3_n1.key EXPRESSION [(t1_n11)t1_n11.FieldSchema(name:key, type:string, comment:null), (t2_n7)t2_n7.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t4_n1.cnt EXPRESSION [(t1_n11)t1_n11.null, (t2_n7)t2_n7.null, ] +POSTHOOK: Lineage: t4_n1.value EXPRESSION [(t1_n11)t1_n11.FieldSchema(name:value, type:string, comment:null), (t2_n7)t2_n7.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from t3_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t3_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from t3 +POSTHOOK: query: select * from t3_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t3_n1 #### A masked pattern was here #### 0 6 2 2 @@ -373,13 +373,13 @@ POSTHOOK: Input: default@t3 5 6 8 2 9 2 -PREHOOK: query: select * from t4 +PREHOOK: query: select * from t4_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@t4 +PREHOOK: Input: default@t4_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from t4 +POSTHOOK: query: select * from t4_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t4 +POSTHOOK: Input: default@t4_n1 #### A masked pattern was here #### val_0 6 val_2 2 @@ -387,44 +387,44 @@ val_4 2 val_5 6 val_8 2 val_9 2 -PREHOOK: query: create table t5(c1 string, cnt int) +PREHOOK: query: create table t5_n0(c1 string, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t5 -POSTHOOK: query: create table t5(c1 string, cnt int) +PREHOOK: Output: default@t5_n0 +POSTHOOK: query: create table t5_n0(c1 string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t5 -PREHOOK: query: create table t6(c1 string, cnt int) +POSTHOOK: Output: default@t5_n0 +PREHOOK: query: create table t6_n0(c1 string, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t6 -POSTHOOK: query: create table t6(c1 string, cnt int) +PREHOOK: Output: default@t6_n0 +POSTHOOK: query: create table t6_n0(c1 string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t6 +POSTHOOK: Output: default@t6_n0 PREHOOK: query: explain from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n11 group by key union all - select key as c1, count(1) as cnt from t2 group by key + select key as c1, count(1) as cnt from t2_n7 group by key ) x -insert overwrite table t5 +insert overwrite table t5_n0 select c1, sum(cnt) group by c1 -insert overwrite table t6 +insert overwrite table t6_n0 select c1, sum(cnt) group by c1 PREHOOK: type: QUERY POSTHOOK: query: explain from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n11 group by key union all - select key as c1, count(1) as cnt from t2 group by key + select key as c1, count(1) as cnt from t2_n7 group by key ) x -insert overwrite table t5 +insert overwrite table t5_n0 select c1, sum(cnt) group by c1 -insert overwrite table t6 +insert overwrite table t6_n0 select c1, sum(cnt) group by c1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -443,7 +443,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n11 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -517,7 +517,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t5 + name: default.t5_n0 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: c1, cnt @@ -550,7 +550,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t6 + name: default.t6_n0 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: c1, cnt @@ -575,7 +575,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t5 + name: default.t5_n0 Stage: Stage-4 Stats Work @@ -583,7 +583,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, cnt Column Types: string, int - Table: default.t5 + Table: default.t5_n0 Stage: Stage-5 Map Reduce @@ -614,7 +614,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, cnt Column Types: string, int - Table: default.t6 + Table: default.t6_n0 Stage: Stage-1 Move Operator @@ -624,7 +624,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t6 + name: default.t6_n0 Stage: Stage-7 Map Reduce @@ -653,7 +653,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n7 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -688,45 +688,45 @@ STAGE PLANS: PREHOOK: query: from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n11 group by key union all - select key as c1, count(1) as cnt from t2 group by key + select key as c1, count(1) as cnt from t2_n7 group by key ) x -insert overwrite table t5 +insert overwrite table t5_n0 select c1, sum(cnt) group by c1 -insert overwrite table t6 +insert overwrite table t6_n0 select c1, sum(cnt) group by c1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t5 -PREHOOK: Output: default@t6 +PREHOOK: Input: default@t1_n11 +PREHOOK: Input: default@t2_n7 +PREHOOK: Output: default@t5_n0 +PREHOOK: Output: default@t6_n0 POSTHOOK: query: from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n11 group by key union all - select key as c1, count(1) as cnt from t2 group by key + select key as c1, count(1) as cnt from t2_n7 group by key ) x -insert overwrite table t5 +insert overwrite table t5_n0 select c1, sum(cnt) group by c1 -insert overwrite table t6 +insert overwrite table t6_n0 select c1, sum(cnt) group by c1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t5 -POSTHOOK: Output: default@t6 -POSTHOOK: Lineage: t5.c1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), (t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t5.cnt EXPRESSION [(t1)t1.null, (t2)t2.null, ] -POSTHOOK: Lineage: t6.c1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), (t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t6.cnt EXPRESSION [(t1)t1.null, (t2)t2.null, ] -PREHOOK: query: select * from t5 +POSTHOOK: Input: default@t1_n11 +POSTHOOK: Input: default@t2_n7 +POSTHOOK: Output: default@t5_n0 +POSTHOOK: Output: default@t6_n0 +POSTHOOK: Lineage: t5_n0.c1 EXPRESSION [(t1_n11)t1_n11.FieldSchema(name:key, type:string, comment:null), (t2_n7)t2_n7.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t5_n0.cnt EXPRESSION [(t1_n11)t1_n11.null, (t2_n7)t2_n7.null, ] +POSTHOOK: Lineage: t6_n0.c1 EXPRESSION [(t1_n11)t1_n11.FieldSchema(name:key, type:string, comment:null), (t2_n7)t2_n7.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t6_n0.cnt EXPRESSION [(t1_n11)t1_n11.null, (t2_n7)t2_n7.null, ] +PREHOOK: query: select * from t5_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@t5 +PREHOOK: Input: default@t5_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from t5 +POSTHOOK: query: select * from t5_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t5 +POSTHOOK: Input: default@t5_n0 #### A masked pattern was here #### 0 6 2 2 @@ -734,13 +734,13 @@ POSTHOOK: Input: default@t5 5 6 8 2 9 2 -PREHOOK: query: select * from t6 +PREHOOK: query: select * from t6_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@t6 +PREHOOK: Input: default@t6_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from t6 +POSTHOOK: query: select * from t6_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t6 +POSTHOOK: Input: default@t6_n0 #### A masked pattern was here #### 0 6 2 2 @@ -748,84 +748,84 @@ POSTHOOK: Input: default@t6 5 6 8 2 9 2 -PREHOOK: query: drop table t1 +PREHOOK: query: drop table t1_n11 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t1 -PREHOOK: Output: default@t1 -POSTHOOK: query: drop table t1 +PREHOOK: Input: default@t1_n11 +PREHOOK: Output: default@t1_n11 +POSTHOOK: query: drop table t1_n11 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t1 -POSTHOOK: Output: default@t1 -PREHOOK: query: drop table t2 +POSTHOOK: Input: default@t1_n11 +POSTHOOK: Output: default@t1_n11 +PREHOOK: query: drop table t2_n7 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t2 -POSTHOOK: query: drop table t2 +PREHOOK: Input: default@t2_n7 +PREHOOK: Output: default@t2_n7 +POSTHOOK: query: drop table t2_n7 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t2 -PREHOOK: query: create table t1 as select * from src where key < 10 +POSTHOOK: Input: default@t2_n7 +POSTHOOK: Output: default@t2_n7 +PREHOOK: query: create table t1_n11 as select * from src where key < 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 as select * from src where key < 10 +PREHOOK: Output: default@t1_n11 +POSTHOOK: query: create table t1_n11 as select * from src where key < 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table t2 as select key, count(1) as cnt from src where key < 10 group by key +POSTHOOK: Output: default@t1_n11 +POSTHOOK: Lineage: t1_n11.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n11.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table t2_n7 as select key, count(1) as cnt from src where key < 10 group by key PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2 as select key, count(1) as cnt from src where key < 10 group by key +PREHOOK: Output: default@t2_n7 +POSTHOOK: query: create table t2_n7 as select key, count(1) as cnt from src where key < 10 group by key POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.cnt EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: create table t7(c1 string, cnt int) +POSTHOOK: Output: default@t2_n7 +POSTHOOK: Lineage: t2_n7.cnt EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: t2_n7.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: create table t7_n1(c1 string, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t7 -POSTHOOK: query: create table t7(c1 string, cnt int) +PREHOOK: Output: default@t7_n1 +POSTHOOK: query: create table t7_n1(c1 string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t7 -PREHOOK: query: create table t8(c1 string, cnt int) +POSTHOOK: Output: default@t7_n1 +PREHOOK: query: create table t8_n0(c1 string, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t8 -POSTHOOK: query: create table t8(c1 string, cnt int) +PREHOOK: Output: default@t8_n0 +POSTHOOK: query: create table t8_n0(c1 string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t8 +POSTHOOK: Output: default@t8_n0 PREHOOK: query: explain from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n11 group by key union all - select key as c1, cnt from t2 + select key as c1, cnt from t2_n7 ) x -insert overwrite table t7 +insert overwrite table t7_n1 select c1, count(1) group by c1 -insert overwrite table t8 +insert overwrite table t8_n0 select c1, count(1) group by c1 PREHOOK: type: QUERY POSTHOOK: query: explain from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n11 group by key union all - select key as c1, cnt from t2 + select key as c1, cnt from t2_n7 ) x -insert overwrite table t7 +insert overwrite table t7_n1 select c1, count(1) group by c1 -insert overwrite table t8 +insert overwrite table t8_n0 select c1, count(1) group by c1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -843,7 +843,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n11 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -896,7 +896,7 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 11 Data size: 53 Basic stats: COMPLETE Column stats: NONE TableScan - alias: t2 + alias: t2_n7 Statistics: Num rows: 6 Data size: 18 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -933,7 +933,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t7 + name: default.t7_n1 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: c1, cnt @@ -966,7 +966,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t8 + name: default.t8_n0 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: c1, cnt @@ -991,7 +991,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t7 + name: default.t7_n1 Stage: Stage-4 Stats Work @@ -999,7 +999,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, cnt Column Types: string, int - Table: default.t7 + Table: default.t7_n1 Stage: Stage-5 Map Reduce @@ -1030,7 +1030,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1, cnt Column Types: string, int - Table: default.t8 + Table: default.t8_n0 Stage: Stage-1 Move Operator @@ -1040,7 +1040,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.t8 + name: default.t8_n0 Stage: Stage-7 Map Reduce @@ -1067,45 +1067,45 @@ STAGE PLANS: PREHOOK: query: from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n11 group by key union all - select key as c1, cnt from t2 + select key as c1, cnt from t2_n7 ) x -insert overwrite table t7 +insert overwrite table t7_n1 select c1, count(1) group by c1 -insert overwrite table t8 +insert overwrite table t8_n0 select c1, count(1) group by c1 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Output: default@t7 -PREHOOK: Output: default@t8 +PREHOOK: Input: default@t1_n11 +PREHOOK: Input: default@t2_n7 +PREHOOK: Output: default@t7_n1 +PREHOOK: Output: default@t8_n0 POSTHOOK: query: from ( - select key as c1, count(1) as cnt from t1 group by key + select key as c1, count(1) as cnt from t1_n11 group by key union all - select key as c1, cnt from t2 + select key as c1, cnt from t2_n7 ) x -insert overwrite table t7 +insert overwrite table t7_n1 select c1, count(1) group by c1 -insert overwrite table t8 +insert overwrite table t8_n0 select c1, count(1) group by c1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Output: default@t7 -POSTHOOK: Output: default@t8 -POSTHOOK: Lineage: t7.c1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), (t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t7.cnt EXPRESSION [(t1)t1.null, (t2)t2.null, ] -POSTHOOK: Lineage: t8.c1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), (t2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: t8.cnt EXPRESSION [(t1)t1.null, (t2)t2.null, ] -PREHOOK: query: select * from t7 +POSTHOOK: Input: default@t1_n11 +POSTHOOK: Input: default@t2_n7 +POSTHOOK: Output: default@t7_n1 +POSTHOOK: Output: default@t8_n0 +POSTHOOK: Lineage: t7_n1.c1 EXPRESSION [(t1_n11)t1_n11.FieldSchema(name:key, type:string, comment:null), (t2_n7)t2_n7.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t7_n1.cnt EXPRESSION [(t1_n11)t1_n11.null, (t2_n7)t2_n7.null, ] +POSTHOOK: Lineage: t8_n0.c1 EXPRESSION [(t1_n11)t1_n11.FieldSchema(name:key, type:string, comment:null), (t2_n7)t2_n7.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: t8_n0.cnt EXPRESSION [(t1_n11)t1_n11.null, (t2_n7)t2_n7.null, ] +PREHOOK: query: select * from t7_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@t7 +PREHOOK: Input: default@t7_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from t7 +POSTHOOK: query: select * from t7_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t7 +POSTHOOK: Input: default@t7_n1 #### A masked pattern was here #### 0 2 2 2 @@ -1113,13 +1113,13 @@ POSTHOOK: Input: default@t7 5 2 8 2 9 2 -PREHOOK: query: select * from t8 +PREHOOK: query: select * from t8_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@t8 +PREHOOK: Input: default@t8_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from t8 +POSTHOOK: query: select * from t8_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t8 +POSTHOOK: Input: default@t8_n0 #### A masked pattern was here #### 0 2 2 2 diff --git a/ql/src/test/results/clientpositive/union32.q.out b/ql/src/test/results/clientpositive/union32.q.out index 186b8d5dc3..4ac4044fc1 100644 --- a/ql/src/test/results/clientpositive/union32.q.out +++ b/ql/src/test/results/clientpositive/union32.q.out @@ -1,38 +1,38 @@ -PREHOOK: query: CREATE TABLE t1 AS SELECT * FROM src WHERE key < 10 +PREHOOK: query: CREATE TABLE t1_n26 AS SELECT * FROM src WHERE key < 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: CREATE TABLE t1 AS SELECT * FROM src WHERE key < 10 +PREHOOK: Output: default@t1_n26 +POSTHOOK: query: CREATE TABLE t1_n26 AS SELECT * FROM src WHERE key < 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE TABLE t2 AS SELECT * FROM src WHERE key < 10 +POSTHOOK: Output: default@t1_n26 +POSTHOOK: Lineage: t1_n26.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t1_n26.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: CREATE TABLE t2_n14 AS SELECT * FROM src WHERE key < 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: CREATE TABLE t2 AS SELECT * FROM src WHERE key < 10 +PREHOOK: Output: default@t2_n14 +POSTHOOK: query: CREATE TABLE t2_n14 AS SELECT * FROM src WHERE key < 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: t2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@t2_n14 +POSTHOOK: Lineage: t2_n14.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: t2_n14.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: EXPLAIN SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t1 +(SELECT CAST(key AS DOUBLE) AS key FROM t1_n26 UNION ALL -SELECT CAST(key AS BIGINT) AS key FROM t2) a +SELECT CAST(key AS BIGINT) AS key FROM t2_n14) a PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t1 +(SELECT CAST(key AS DOUBLE) AS key FROM t1_n26 UNION ALL -SELECT CAST(key AS BIGINT) AS key FROM t2) a +SELECT CAST(key AS BIGINT) AS key FROM t2_n14) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -43,7 +43,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n26 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToDouble(key) (type: double) @@ -59,7 +59,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: t2 + alias: t2_n14 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToDouble(UDFToLong(key)) (type: double) @@ -82,20 +82,20 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t1 +(SELECT CAST(key AS DOUBLE) AS key FROM t1_n26 UNION ALL -SELECT CAST(key AS BIGINT) AS key FROM t2) a +SELECT CAST(key AS BIGINT) AS key FROM t2_n14) a PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n26 +PREHOOK: Input: default@t2_n14 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t1 +(SELECT CAST(key AS DOUBLE) AS key FROM t1_n26 UNION ALL -SELECT CAST(key AS BIGINT) AS key FROM t2) a +SELECT CAST(key AS BIGINT) AS key FROM t2_n14) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n26 +POSTHOOK: Input: default@t2_n14 #### A masked pattern was here #### 0.0 0.0 @@ -119,15 +119,15 @@ POSTHOOK: Input: default@t2 9.0 PREHOOK: query: EXPLAIN SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key UNION ALL -SELECT CAST(key AS DOUBLE) AS key FROM t2) a +SELECT CAST(key AS DOUBLE) AS key FROM t2_n14) a PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key UNION ALL -SELECT CAST(key AS DOUBLE) AS key FROM t2) a +SELECT CAST(key AS DOUBLE) AS key FROM t2_n14) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -202,7 +202,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: t2 + alias: t2_n14 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToDouble(key) (type: double) @@ -225,20 +225,20 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key UNION ALL -SELECT CAST(key AS DOUBLE) AS key FROM t2) a +SELECT CAST(key AS DOUBLE) AS key FROM t2_n14) a PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n26 +PREHOOK: Input: default@t2_n14 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key UNION ALL -SELECT CAST(key AS DOUBLE) AS key FROM t2) a +SELECT CAST(key AS DOUBLE) AS key FROM t2_n14) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n26 +POSTHOOK: Input: default@t2_n14 #### A masked pattern was here #### 0.0 0.0 @@ -274,15 +274,15 @@ POSTHOOK: Input: default@t2 9.0 PREHOOK: query: EXPLAIN SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t2 +(SELECT CAST(key AS DOUBLE) AS key FROM t2_n14 UNION ALL -SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key) a PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t2 +(SELECT CAST(key AS DOUBLE) AS key FROM t2_n14 UNION ALL -SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -347,7 +347,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n14 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToDouble(key) (type: double) @@ -380,20 +380,20 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t2 +(SELECT CAST(key AS DOUBLE) AS key FROM t2_n14 UNION ALL -SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key) a PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n26 +PREHOOK: Input: default@t2_n14 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key FROM t2 +(SELECT CAST(key AS DOUBLE) AS key FROM t2_n14 UNION ALL -SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n26 +POSTHOOK: Input: default@t2_n14 #### A masked pattern was here #### 0.0 0.0 @@ -429,15 +429,15 @@ POSTHOOK: Input: default@t2 9.0 PREHOOK: query: EXPLAIN SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key UNION ALL -SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2) a +SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n14) a PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key UNION ALL -SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2) a +SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n14) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -512,7 +512,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: t2 + alias: t2_n14 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToDouble(key) (type: double), key (type: string) @@ -535,20 +535,20 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key UNION ALL -SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2) a +SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n14) a PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n26 +PREHOOK: Input: default@t2_n14 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM -(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key +(SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key UNION ALL -SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2) a +SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n14) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n26 +POSTHOOK: Input: default@t2_n14 #### A masked pattern was here #### 0.0 0 0.0 0 @@ -584,15 +584,15 @@ POSTHOOK: Input: default@t2 9.0 9 PREHOOK: query: EXPLAIN SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2 +(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n14 UNION ALL -SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key) a PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2 +(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n14 UNION ALL -SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -657,7 +657,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t2 + alias: t2_n14 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToDouble(key) (type: double), key (type: string) @@ -690,20 +690,20 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2 +(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n14 UNION ALL -SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key) a PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n26 +PREHOOK: Input: default@t2_n14 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM -(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2 +(SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2_n14 UNION ALL -SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key) a +SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS VARCHAR(20)) AS value FROM t1_n26 a JOIN t2_n14 b ON a.key = b.key) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n26 +POSTHOOK: Input: default@t2_n14 #### A masked pattern was here #### 0.0 0 0.0 0 diff --git a/ql/src/test/results/clientpositive/union33.q.out b/ql/src/test/results/clientpositive/union33.q.out index c9fe892d53..c0429f0b2b 100644 --- a/ql/src/test/results/clientpositive/union33.q.out +++ b/ql/src/test/results/clientpositive/union33.q.out @@ -1,12 +1,12 @@ -PREHOOK: query: CREATE TABLE test_src (key STRING, value STRING) +PREHOOK: query: CREATE TABLE test_src_n1 (key STRING, value STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_src -POSTHOOK: query: CREATE TABLE test_src (key STRING, value STRING) +PREHOOK: Output: default@test_src_n1 +POSTHOOK: query: CREATE TABLE test_src_n1 (key STRING, value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_src -PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_src +POSTHOOK: Output: default@test_src_n1 +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_src_n1 SELECT key, value FROM ( SELECT key, value FROM src WHERE key = 0 @@ -15,7 +15,7 @@ UNION ALL GROUP BY key )a PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_src +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_src_n1 SELECT key, value FROM ( SELECT key, value FROM src WHERE key = 0 @@ -125,7 +125,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_src + name: default.test_src_n1 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -149,7 +149,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_src + name: default.test_src_n1 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -194,7 +194,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_src + name: default.test_src_n1 Stage: Stage-3 Stats Work @@ -202,7 +202,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.test_src + Table: default.test_src_n1 Stage: Stage-4 Map Reduce @@ -214,7 +214,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_src + name: default.test_src_n1 Stage: Stage-6 Map Reduce @@ -226,7 +226,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_src + name: default.test_src_n1 Stage: Stage-7 Move Operator @@ -234,7 +234,7 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE test_src +PREHOOK: query: INSERT OVERWRITE TABLE test_src_n1 SELECT key, value FROM ( SELECT key, value FROM src WHERE key = 0 @@ -244,8 +244,8 @@ UNION ALL )a PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_src -POSTHOOK: query: INSERT OVERWRITE TABLE test_src +PREHOOK: Output: default@test_src_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_src_n1 SELECT key, value FROM ( SELECT key, value FROM src WHERE key = 0 @@ -255,19 +255,19 @@ UNION ALL )a POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_src -POSTHOOK: Lineage: test_src.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_src.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (src)src.null, ] -PREHOOK: query: SELECT COUNT(*) FROM test_src +POSTHOOK: Output: default@test_src_n1 +POSTHOOK: Lineage: test_src_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src_n1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), (src)src.null, ] +PREHOOK: query: SELECT COUNT(*) FROM test_src_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@test_src +PREHOOK: Input: default@test_src_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(*) FROM test_src +POSTHOOK: query: SELECT COUNT(*) FROM test_src_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_src +POSTHOOK: Input: default@test_src_n1 #### A masked pattern was here #### 312 -PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_src +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_src_n1 SELECT key, value FROM ( SELECT key, cast(COUNT(*) as string) AS value FROM src GROUP BY key @@ -276,7 +276,7 @@ UNION ALL WHERE key = 0 )a PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_src +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_src_n1 SELECT key, value FROM ( SELECT key, cast(COUNT(*) as string) AS value FROM src GROUP BY key @@ -377,7 +377,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_src + name: default.test_src_n1 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -410,7 +410,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_src + name: default.test_src_n1 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -455,7 +455,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_src + name: default.test_src_n1 Stage: Stage-4 Stats Work @@ -463,7 +463,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.test_src + Table: default.test_src_n1 Stage: Stage-5 Map Reduce @@ -475,7 +475,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_src + name: default.test_src_n1 Stage: Stage-7 Map Reduce @@ -487,7 +487,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test_src + name: default.test_src_n1 Stage: Stage-8 Move Operator @@ -495,7 +495,7 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: INSERT OVERWRITE TABLE test_src +PREHOOK: query: INSERT OVERWRITE TABLE test_src_n1 SELECT key, value FROM ( SELECT key, cast(COUNT(*) as string) AS value FROM src GROUP BY key @@ -505,8 +505,8 @@ UNION ALL )a PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@test_src -POSTHOOK: query: INSERT OVERWRITE TABLE test_src +PREHOOK: Output: default@test_src_n1 +POSTHOOK: query: INSERT OVERWRITE TABLE test_src_n1 SELECT key, value FROM ( SELECT key, cast(COUNT(*) as string) AS value FROM src GROUP BY key @@ -516,15 +516,15 @@ UNION ALL )a POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@test_src -POSTHOOK: Lineage: test_src.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: test_src.value EXPRESSION [(src)src.null, (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT COUNT(*) FROM test_src +POSTHOOK: Output: default@test_src_n1 +POSTHOOK: Lineage: test_src_n1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: test_src_n1.value EXPRESSION [(src)src.null, (src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT COUNT(*) FROM test_src_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@test_src +PREHOOK: Input: default@test_src_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(*) FROM test_src +POSTHOOK: query: SELECT COUNT(*) FROM test_src_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_src +POSTHOOK: Input: default@test_src_n1 #### A masked pattern was here #### 312 diff --git a/ql/src/test/results/clientpositive/union34.q.out b/ql/src/test/results/clientpositive/union34.q.out index 527b85a517..141c247060 100644 --- a/ql/src/test/results/clientpositive/union34.q.out +++ b/ql/src/test/results/clientpositive/union34.q.out @@ -1,77 +1,77 @@ -PREHOOK: query: create table src10_1 (key string, value string) +PREHOOK: query: create table src10_1_n0 (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src10_1 -POSTHOOK: query: create table src10_1 (key string, value string) +PREHOOK: Output: default@src10_1_n0 +POSTHOOK: query: create table src10_1_n0 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src10_1 -PREHOOK: query: create table src10_2 (key string, value string) +POSTHOOK: Output: default@src10_1_n0 +PREHOOK: query: create table src10_2_n0 (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src10_2 -POSTHOOK: query: create table src10_2 (key string, value string) +PREHOOK: Output: default@src10_2_n0 +POSTHOOK: query: create table src10_2_n0 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src10_2 -PREHOOK: query: create table src10_3 (key string, value string) +POSTHOOK: Output: default@src10_2_n0 +PREHOOK: query: create table src10_3_n0 (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src10_3 -POSTHOOK: query: create table src10_3 (key string, value string) +PREHOOK: Output: default@src10_3_n0 +POSTHOOK: query: create table src10_3_n0 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src10_3 -PREHOOK: query: create table src10_4 (key string, value string) +POSTHOOK: Output: default@src10_3_n0 +PREHOOK: query: create table src10_4_n0 (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src10_4 -POSTHOOK: query: create table src10_4 (key string, value string) +PREHOOK: Output: default@src10_4_n0 +POSTHOOK: query: create table src10_4_n0 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src10_4 +POSTHOOK: Output: default@src10_4_n0 PREHOOK: query: from (select * from src tablesample (10 rows)) a -insert overwrite table src10_1 select * -insert overwrite table src10_2 select * -insert overwrite table src10_3 select * -insert overwrite table src10_4 select * +insert overwrite table src10_1_n0 select * +insert overwrite table src10_2_n0 select * +insert overwrite table src10_3_n0 select * +insert overwrite table src10_4_n0 select * PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src10_1 -PREHOOK: Output: default@src10_2 -PREHOOK: Output: default@src10_3 -PREHOOK: Output: default@src10_4 +PREHOOK: Output: default@src10_1_n0 +PREHOOK: Output: default@src10_2_n0 +PREHOOK: Output: default@src10_3_n0 +PREHOOK: Output: default@src10_4_n0 POSTHOOK: query: from (select * from src tablesample (10 rows)) a -insert overwrite table src10_1 select * -insert overwrite table src10_2 select * -insert overwrite table src10_3 select * -insert overwrite table src10_4 select * +insert overwrite table src10_1_n0 select * +insert overwrite table src10_2_n0 select * +insert overwrite table src10_3_n0 select * +insert overwrite table src10_4_n0 select * POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src10_1 -POSTHOOK: Output: default@src10_2 -POSTHOOK: Output: default@src10_3 -POSTHOOK: Output: default@src10_4 -POSTHOOK: Lineage: src10_1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src10_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src10_2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src10_2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src10_3.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src10_3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: src10_4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src10_4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@src10_1_n0 +POSTHOOK: Output: default@src10_2_n0 +POSTHOOK: Output: default@src10_3_n0 +POSTHOOK: Output: default@src10_4_n0 +POSTHOOK: Lineage: src10_1_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src10_1_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src10_2_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src10_2_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src10_3_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src10_3_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: src10_4_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src10_4_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain SELECT * FROM ( - SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) + SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1_n0) sub1 JOIN (SELECT * FROM src10_2_n0) sub0 ON (sub0.key = sub1.key) UNION ALL - SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 + SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3_n0) sub2 UNION ALL SELECT * FROM src10_4_n0 ) alias0 ) alias1 PREHOOK: type: QUERY POSTHOOK: query: explain SELECT * FROM ( - SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) + SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1_n0) sub1 JOIN (SELECT * FROM src10_2_n0) sub0 ON (sub0.key = sub1.key) UNION ALL - SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 + SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3_n0) sub2 UNION ALL SELECT * FROM src10_4_n0 ) alias0 ) alias1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -83,13 +83,13 @@ STAGE PLANS: Stage: Stage-7 Map Reduce Local Work Alias -> Map Local Tables: - null-subquery1-subquery1:$hdt$_1-subquery1-subquery1:$hdt$_0:src10_1 + null-subquery1-subquery1:$hdt$_1-subquery1-subquery1:$hdt$_0:src10_1_n0 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - null-subquery1-subquery1:$hdt$_1-subquery1-subquery1:$hdt$_0:src10_1 + null-subquery1-subquery1:$hdt$_1-subquery1-subquery1:$hdt$_0:src10_1_n0 TableScan - alias: src10_1 + alias: src10_1_n0 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -107,7 +107,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src10_3 + alias: src10_3_n0 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -123,7 +123,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: src10_4 + alias: src10_4_n0 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -139,7 +139,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: src10_2 + alias: src10_2_n0 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -175,26 +175,26 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * FROM ( - SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) + SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1_n0) sub1 JOIN (SELECT * FROM src10_2_n0) sub0 ON (sub0.key = sub1.key) UNION ALL - SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 + SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3_n0) sub2 UNION ALL SELECT * FROM src10_4_n0 ) alias0 ) alias1 PREHOOK: type: QUERY -PREHOOK: Input: default@src10_1 -PREHOOK: Input: default@src10_2 -PREHOOK: Input: default@src10_3 -PREHOOK: Input: default@src10_4 +PREHOOK: Input: default@src10_1_n0 +PREHOOK: Input: default@src10_2_n0 +PREHOOK: Input: default@src10_3_n0 +PREHOOK: Input: default@src10_4_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM ( - SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) + SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1_n0) sub1 JOIN (SELECT * FROM src10_2_n0) sub0 ON (sub0.key = sub1.key) UNION ALL - SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 + SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3_n0) sub2 UNION ALL SELECT * FROM src10_4_n0 ) alias0 ) alias1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src10_1 -POSTHOOK: Input: default@src10_2 -POSTHOOK: Input: default@src10_3 -POSTHOOK: Input: default@src10_4 +POSTHOOK: Input: default@src10_1_n0 +POSTHOOK: Input: default@src10_2_n0 +POSTHOOK: Input: default@src10_3_n0 +POSTHOOK: Input: default@src10_4_n0 #### A masked pattern was here #### 165 val_165 165 val_165 @@ -228,16 +228,16 @@ POSTHOOK: Input: default@src10_4 98 val_98 PREHOOK: query: explain SELECT * FROM ( - SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) + SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1_n0) sub1 JOIN (SELECT * FROM src10_2_n0) sub0 ON (sub0.key = sub1.key) UNION ALL - SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 + SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3_n0) sub2 UNION ALL SELECT * FROM src10_4_n0 ) alias0 ) alias1 PREHOOK: type: QUERY POSTHOOK: query: explain SELECT * FROM ( - SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) + SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1_n0) sub1 JOIN (SELECT * FROM src10_2_n0) sub0 ON (sub0.key = sub1.key) UNION ALL - SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 + SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3_n0) sub2 UNION ALL SELECT * FROM src10_4_n0 ) alias0 ) alias1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -250,7 +250,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src10_1 + alias: src10_1_n0 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -266,7 +266,7 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) TableScan - alias: src10_2 + alias: src10_2_n0 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) @@ -310,7 +310,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: src10_3 + alias: src10_3_n0 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -326,7 +326,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: src10_4 + alias: src10_4_n0 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -349,26 +349,26 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT * FROM ( - SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) + SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1_n0) sub1 JOIN (SELECT * FROM src10_2_n0) sub0 ON (sub0.key = sub1.key) UNION ALL - SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 + SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3_n0) sub2 UNION ALL SELECT * FROM src10_4_n0 ) alias0 ) alias1 PREHOOK: type: QUERY -PREHOOK: Input: default@src10_1 -PREHOOK: Input: default@src10_2 -PREHOOK: Input: default@src10_3 -PREHOOK: Input: default@src10_4 +PREHOOK: Input: default@src10_1_n0 +PREHOOK: Input: default@src10_2_n0 +PREHOOK: Input: default@src10_3_n0 +PREHOOK: Input: default@src10_4_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM ( - SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) + SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1_n0) sub1 JOIN (SELECT * FROM src10_2_n0) sub0 ON (sub0.key = sub1.key) UNION ALL - SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 + SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3_n0) sub2 UNION ALL SELECT * FROM src10_4_n0 ) alias0 ) alias1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@src10_1 -POSTHOOK: Input: default@src10_2 -POSTHOOK: Input: default@src10_3 -POSTHOOK: Input: default@src10_4 +POSTHOOK: Input: default@src10_1_n0 +POSTHOOK: Input: default@src10_2_n0 +POSTHOOK: Input: default@src10_3_n0 +POSTHOOK: Input: default@src10_4_n0 #### A masked pattern was here #### 165 val_165 165 val_165 diff --git a/ql/src/test/results/clientpositive/union4.q.out b/ql/src/test/results/clientpositive/union4.q.out index 996a4f3e1c..7f5844e809 100644 --- a/ql/src/test/results/clientpositive/union4.q.out +++ b/ql/src/test/results/clientpositive/union4.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: create table tmptable(key string, value int) +PREHOOK: query: create table tmptable_n12(key string, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmptable -POSTHOOK: query: create table tmptable(key string, value int) +PREHOOK: Output: default@tmptable_n12 +POSTHOOK: query: create table tmptable_n12(key string, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmptable +POSTHOOK: Output: default@tmptable_n12 PREHOOK: query: explain -insert overwrite table tmptable +insert overwrite table tmptable_n12 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2) unionsrc PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table tmptable +insert overwrite table tmptable_n12 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2) unionsrc @@ -83,7 +83,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n12 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, value @@ -111,7 +111,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n12 Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: key, value @@ -156,7 +156,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n12 Stage: Stage-3 Stats Work @@ -164,7 +164,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, int - Table: default.tmptable + Table: default.tmptable_n12 Stage: Stage-4 Map Reduce @@ -176,7 +176,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n12 Stage: Stage-6 Map Reduce @@ -188,7 +188,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n12 Stage: Stage-7 Move Operator @@ -231,29 +231,29 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe -PREHOOK: query: insert overwrite table tmptable +PREHOOK: query: insert overwrite table tmptable_n12 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2) unionsrc PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tmptable -POSTHOOK: query: insert overwrite table tmptable +PREHOOK: Output: default@tmptable_n12 +POSTHOOK: query: insert overwrite table tmptable_n12 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2) unionsrc POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tmptable -POSTHOOK: Lineage: tmptable.key EXPRESSION [] -POSTHOOK: Lineage: tmptable.value EXPRESSION [(src)s1.null, (src)s2.null, ] -PREHOOK: query: select * from tmptable x sort by x.key +POSTHOOK: Output: default@tmptable_n12 +POSTHOOK: Lineage: tmptable_n12.key EXPRESSION [] +POSTHOOK: Lineage: tmptable_n12.value EXPRESSION [(src)s1.null, (src)s2.null, ] +PREHOOK: query: select * from tmptable_n12 x sort by x.key PREHOOK: type: QUERY -PREHOOK: Input: default@tmptable +PREHOOK: Input: default@tmptable_n12 #### A masked pattern was here #### -POSTHOOK: query: select * from tmptable x sort by x.key +POSTHOOK: query: select * from tmptable_n12 x sort by x.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmptable +POSTHOOK: Input: default@tmptable_n12 #### A masked pattern was here #### tst1 500 tst2 500 diff --git a/ql/src/test/results/clientpositive/union6.q.out b/ql/src/test/results/clientpositive/union6.q.out index d199fb514d..37c75214c3 100644 --- a/ql/src/test/results/clientpositive/union6.q.out +++ b/ql/src/test/results/clientpositive/union6.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: create table tmptable(key string, value string) +PREHOOK: query: create table tmptable_n5(key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tmptable -POSTHOOK: query: create table tmptable(key string, value string) +PREHOOK: Output: default@tmptable_n5 +POSTHOOK: query: create table tmptable_n5(key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmptable +POSTHOOK: Output: default@tmptable_n5 PREHOOK: query: explain -insert overwrite table tmptable +insert overwrite table tmptable_n5 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src1 s2) unionsrc PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table tmptable +insert overwrite table tmptable_n5 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src1 s2) unionsrc @@ -78,7 +78,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -108,7 +108,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n5 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: key, value @@ -153,7 +153,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n5 Stage: Stage-3 Stats Work @@ -161,7 +161,7 @@ STAGE PLANS: Column Stats Desc: Columns: key, value Column Types: string, string - Table: default.tmptable + Table: default.tmptable_n5 Stage: Stage-4 Map Reduce @@ -173,7 +173,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n5 Stage: Stage-6 Map Reduce @@ -185,7 +185,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + name: default.tmptable_n5 Stage: Stage-7 Move Operator @@ -193,31 +193,31 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table tmptable +PREHOOK: query: insert overwrite table tmptable_n5 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src1 s2) unionsrc PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@src1 -PREHOOK: Output: default@tmptable -POSTHOOK: query: insert overwrite table tmptable +PREHOOK: Output: default@tmptable_n5 +POSTHOOK: query: insert overwrite table tmptable_n5 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src1 s2) unionsrc POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@tmptable -POSTHOOK: Lineage: tmptable.key EXPRESSION [(src1)s2.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tmptable.value EXPRESSION [(src)s1.null, (src1)s2.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from tmptable x sort by x.key, x.value +POSTHOOK: Output: default@tmptable_n5 +POSTHOOK: Lineage: tmptable_n5.key EXPRESSION [(src1)s2.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tmptable_n5.value EXPRESSION [(src)s1.null, (src1)s2.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from tmptable_n5 x sort by x.key, x.value PREHOOK: type: QUERY -PREHOOK: Input: default@tmptable +PREHOOK: Input: default@tmptable_n5 #### A masked pattern was here #### -POSTHOOK: query: select * from tmptable x sort by x.key, x.value +POSTHOOK: query: select * from tmptable_n5 x sort by x.key, x.value POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmptable +POSTHOOK: Input: default@tmptable_n5 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_paren.q.out b/ql/src/test/results/clientpositive/union_paren.q.out index b78dd20523..dc7cf0eadf 100644 --- a/ql/src/test/results/clientpositive/union_paren.q.out +++ b/ql/src/test/results/clientpositive/union_paren.q.out @@ -49,40 +49,40 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: create table t1(c int) +PREHOOK: query: create table t1_n0(c int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(c int) +PREHOOK: Output: default@t1_n0 +POSTHOOK: query: create table t1_n0(c int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: insert into t1 values (1),(1),(2) +POSTHOOK: Output: default@t1_n0 +PREHOOK: query: insert into t1_n0 values (1),(1),(2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t1 -POSTHOOK: query: insert into t1 values (1),(1),(2) +PREHOOK: Output: default@t1_n0 +POSTHOOK: query: insert into t1_n0 values (1),(1),(2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.c SCRIPT [] -PREHOOK: query: create table t2(c int) +POSTHOOK: Output: default@t1_n0 +POSTHOOK: Lineage: t1_n0.c SCRIPT [] +PREHOOK: query: create table t2_n0(c int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2(c int) +PREHOOK: Output: default@t2_n0 +POSTHOOK: query: create table t2_n0(c int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: insert into t2 values (2),(1),(2) +POSTHOOK: Output: default@t2_n0 +PREHOOK: query: insert into t2_n0 values (2),(1),(2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t2 -POSTHOOK: query: insert into t2 values (2),(1),(2) +PREHOOK: Output: default@t2_n0 +POSTHOOK: query: insert into t2_n0 values (2),(1),(2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.c SCRIPT [] +POSTHOOK: Output: default@t2_n0 +POSTHOOK: Lineage: t2_n0.c SCRIPT [] PREHOOK: query: create table t3(c int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -100,31 +100,31 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: default@t3 POSTHOOK: Lineage: t3.c SCRIPT [] -PREHOOK: query: (select * from t1) union all select * from t2 union select * from t3 order by c +PREHOOK: query: (select * from t1_n0) union all select * from t2_n0 union select * from t3 order by c PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n0 +PREHOOK: Input: default@t2_n0 PREHOOK: Input: default@t3 #### A masked pattern was here #### -POSTHOOK: query: (select * from t1) union all select * from t2 union select * from t3 order by c +POSTHOOK: query: (select * from t1_n0) union all select * from t2_n0 union select * from t3 order by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n0 +POSTHOOK: Input: default@t2_n0 POSTHOOK: Input: default@t3 #### A masked pattern was here #### 1 2 3 -PREHOOK: query: (select * from t1) union all (select * from t2 union select * from t3) order by c +PREHOOK: query: (select * from t1_n0) union all (select * from t2_n0 union select * from t3) order by c PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n0 +PREHOOK: Input: default@t2_n0 PREHOOK: Input: default@t3 #### A masked pattern was here #### -POSTHOOK: query: (select * from t1) union all (select * from t2 union select * from t3) order by c +POSTHOOK: query: (select * from t1_n0) union all (select * from t2_n0 union select * from t3) order by c POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n0 +POSTHOOK: Input: default@t2_n0 POSTHOOK: Input: default@t3 #### A masked pattern was here #### 1 diff --git a/ql/src/test/results/clientpositive/union_pos_alias.q.out b/ql/src/test/results/clientpositive/union_pos_alias.q.out index 665236eb9d..bf9d0d452c 100644 --- a/ql/src/test/results/clientpositive/union_pos_alias.q.out +++ b/ql/src/test/results/clientpositive/union_pos_alias.q.out @@ -309,27 +309,27 @@ POSTHOOK: query: drop table src_10 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@src_10 POSTHOOK: Output: default@src_10 -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n8 PREHOOK: type: DROPVIEW -POSTHOOK: query: drop view v +POSTHOOK: query: drop view v_n8 POSTHOOK: type: DROPVIEW -PREHOOK: query: create view v as select key as k from src intersect all select key as k1 from src +PREHOOK: query: create view v_n8 as select key as k from src intersect all select key as k1 from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select key as k from src intersect all select key as k1 from src +PREHOOK: Output: default@v_n8 +POSTHOOK: query: create view v_n8 as select key as k from src intersect all select key as k1 from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.k SCRIPT [(src)src.null, (src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n8 +POSTHOOK: Lineage: v_n8.k SCRIPT [(src)src.null, (src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: desc formatted v_n8 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n8 +POSTHOOK: query: desc formatted v_n8 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n8 # col_name data_type comment k string @@ -355,23 +355,23 @@ Sort Columns: [] View Original Text: select key as k from src intersect all select key as k1 from src View Expanded Text: select `src`.`key` as `k` from `default`.`src` intersect all select `src`.`key` as `k1` from `default`.`src` View Rewrite Enabled: No -PREHOOK: query: create table masking_test as select cast(key as int) as key, value from src +PREHOOK: query: create table masking_test_n9 as select cast(key as int) as key, value from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@masking_test -POSTHOOK: query: create table masking_test as select cast(key as int) as key, value from src +PREHOOK: Output: default@masking_test_n9 +POSTHOOK: query: create table masking_test_n9 as select cast(key as int) as key, value from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@masking_test -POSTHOOK: Lineage: masking_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@masking_test_n9 +POSTHOOK: Lineage: masking_test_n9.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: masking_test_n9.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain -select * from masking_test union all select * from masking_test +select * from masking_test_n9 union all select * from masking_test_n9 PREHOOK: type: QUERY POSTHOOK: query: explain -select * from masking_test union all select * from masking_test +select * from masking_test_n9 union all select * from masking_test_n9 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -382,43 +382,37 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n9 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), reverse(value) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 166 Data size: 1762 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 166 Data size: 1762 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: masking_test + alias: masking_test_n9 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (((key % 2) = 0) and (key < 10)) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), reverse(value) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE - Union - Statistics: Num rows: 166 Data size: 1762 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 166 Data size: 1762 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Stage: Stage-0 Fetch Operator @@ -426,31 +420,1019 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from masking_test union all select * from masking_test +PREHOOK: query: select * from masking_test_n9 union all select * from masking_test_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from masking_test union all select * from masking_test +POSTHOOK: query: select * from masking_test_n9 union all select * from masking_test_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n9 #### A masked pattern was here #### -0 0_lav -0 0_lav -4 4_lav -4 4_lav -8 8_lav -8 8_lav -0 0_lav -0 0_lav -0 0_lav -0 0_lav -2 2_lav -2 2_lav +238 val_238 +238 val_238 +86 val_86 +86 val_86 +311 val_311 +311 val_311 +27 val_27 +27 val_27 +165 val_165 +165 val_165 +409 val_409 +409 val_409 +255 val_255 +255 val_255 +278 val_278 +278 val_278 +98 val_98 +98 val_98 +484 val_484 +484 val_484 +265 val_265 +265 val_265 +193 val_193 +193 val_193 +401 val_401 +401 val_401 +150 val_150 +150 val_150 +273 val_273 +273 val_273 +224 val_224 +224 val_224 +369 val_369 +369 val_369 +66 val_66 +66 val_66 +128 val_128 +128 val_128 +213 val_213 +213 val_213 +146 val_146 +146 val_146 +406 val_406 +406 val_406 +429 val_429 +429 val_429 +374 val_374 +374 val_374 +152 val_152 +152 val_152 +469 val_469 +469 val_469 +145 val_145 +145 val_145 +495 val_495 +495 val_495 +37 val_37 +37 val_37 +327 val_327 +327 val_327 +281 val_281 +281 val_281 +277 val_277 +277 val_277 +209 val_209 +209 val_209 +15 val_15 +15 val_15 +82 val_82 +82 val_82 +403 val_403 +403 val_403 +166 val_166 +166 val_166 +417 val_417 +417 val_417 +430 val_430 +430 val_430 +252 val_252 +252 val_252 +292 val_292 +292 val_292 +219 val_219 +219 val_219 +287 val_287 +287 val_287 +153 val_153 +153 val_153 +193 val_193 +193 val_193 +338 val_338 +338 val_338 +446 val_446 +446 val_446 +459 val_459 +459 val_459 +394 val_394 +394 val_394 +237 val_237 +237 val_237 +482 val_482 +482 val_482 +174 val_174 +174 val_174 +413 val_413 +413 val_413 +494 val_494 +494 val_494 +207 val_207 +207 val_207 +199 val_199 +199 val_199 +466 val_466 +466 val_466 +208 val_208 +208 val_208 +174 val_174 +174 val_174 +399 val_399 +399 val_399 +396 val_396 +396 val_396 +247 val_247 +247 val_247 +417 val_417 +417 val_417 +489 val_489 +489 val_489 +162 val_162 +162 val_162 +377 val_377 +377 val_377 +397 val_397 +397 val_397 +309 val_309 +309 val_309 +365 val_365 +365 val_365 +266 val_266 +266 val_266 +439 val_439 +439 val_439 +342 val_342 +342 val_342 +367 val_367 +367 val_367 +325 val_325 +325 val_325 +167 val_167 +167 val_167 +195 val_195 +195 val_195 +475 val_475 +475 val_475 +17 val_17 +17 val_17 +113 val_113 +113 val_113 +155 val_155 +155 val_155 +203 val_203 +203 val_203 +339 val_339 +339 val_339 +0 val_0 +0 val_0 +455 val_455 +455 val_455 +128 val_128 +128 val_128 +311 val_311 +311 val_311 +316 val_316 +316 val_316 +57 val_57 +57 val_57 +302 val_302 +302 val_302 +205 val_205 +205 val_205 +149 val_149 +149 val_149 +438 val_438 +438 val_438 +345 val_345 +345 val_345 +129 val_129 +129 val_129 +170 val_170 +170 val_170 +20 val_20 +20 val_20 +489 val_489 +489 val_489 +157 val_157 +157 val_157 +378 val_378 +378 val_378 +221 val_221 +221 val_221 +92 val_92 +92 val_92 +111 val_111 +111 val_111 +47 val_47 +47 val_47 +72 val_72 +72 val_72 +4 val_4 +4 val_4 +280 val_280 +280 val_280 +35 val_35 +35 val_35 +427 val_427 +427 val_427 +277 val_277 +277 val_277 +208 val_208 +208 val_208 +356 val_356 +356 val_356 +399 val_399 +399 val_399 +169 val_169 +169 val_169 +382 val_382 +382 val_382 +498 val_498 +498 val_498 +125 val_125 +125 val_125 +386 val_386 +386 val_386 +437 val_437 +437 val_437 +469 val_469 +469 val_469 +192 val_192 +192 val_192 +286 val_286 +286 val_286 +187 val_187 +187 val_187 +176 val_176 +176 val_176 +54 val_54 +54 val_54 +459 val_459 +459 val_459 +51 val_51 +51 val_51 +138 val_138 +138 val_138 +103 val_103 +103 val_103 +239 val_239 +239 val_239 +213 val_213 +213 val_213 +216 val_216 +216 val_216 +430 val_430 +430 val_430 +278 val_278 +278 val_278 +176 val_176 +176 val_176 +289 val_289 +289 val_289 +221 val_221 +221 val_221 +65 val_65 +65 val_65 +318 val_318 +318 val_318 +332 val_332 +332 val_332 +311 val_311 +311 val_311 +275 val_275 +275 val_275 +137 val_137 +137 val_137 +241 val_241 +241 val_241 +83 val_83 +83 val_83 +333 val_333 +333 val_333 +180 val_180 +180 val_180 +284 val_284 +284 val_284 +12 val_12 +12 val_12 +230 val_230 +230 val_230 +181 val_181 +181 val_181 +67 val_67 +67 val_67 +260 val_260 +260 val_260 +404 val_404 +404 val_404 +384 val_384 +384 val_384 +489 val_489 +489 val_489 +353 val_353 +353 val_353 +373 val_373 +373 val_373 +272 val_272 +272 val_272 +138 val_138 +138 val_138 +217 val_217 +217 val_217 +84 val_84 +84 val_84 +348 val_348 +348 val_348 +466 val_466 +466 val_466 +58 val_58 +58 val_58 +8 val_8 +8 val_8 +411 val_411 +411 val_411 +230 val_230 +230 val_230 +208 val_208 +208 val_208 +348 val_348 +348 val_348 +24 val_24 +24 val_24 +463 val_463 +463 val_463 +431 val_431 +431 val_431 +179 val_179 +179 val_179 +172 val_172 +172 val_172 +42 val_42 +42 val_42 +129 val_129 +129 val_129 +158 val_158 +158 val_158 +119 val_119 +119 val_119 +496 val_496 +496 val_496 +0 val_0 +0 val_0 +322 val_322 +322 val_322 +197 val_197 +197 val_197 +468 val_468 +468 val_468 +393 val_393 +393 val_393 +454 val_454 +454 val_454 +100 val_100 +100 val_100 +298 val_298 +298 val_298 +199 val_199 +199 val_199 +191 val_191 +191 val_191 +418 val_418 +418 val_418 +96 val_96 +96 val_96 +26 val_26 +26 val_26 +165 val_165 +165 val_165 +327 val_327 +327 val_327 +230 val_230 +230 val_230 +205 val_205 +205 val_205 +120 val_120 +120 val_120 +131 val_131 +131 val_131 +51 val_51 +51 val_51 +404 val_404 +404 val_404 +43 val_43 +43 val_43 +436 val_436 +436 val_436 +156 val_156 +156 val_156 +469 val_469 +469 val_469 +468 val_468 +468 val_468 +308 val_308 +308 val_308 +95 val_95 +95 val_95 +196 val_196 +196 val_196 +288 val_288 +288 val_288 +481 val_481 +481 val_481 +457 val_457 +457 val_457 +98 val_98 +98 val_98 +282 val_282 +282 val_282 +197 val_197 +197 val_197 +187 val_187 +187 val_187 +318 val_318 +318 val_318 +318 val_318 +318 val_318 +409 val_409 +409 val_409 +470 val_470 +470 val_470 +137 val_137 +137 val_137 +369 val_369 +369 val_369 +316 val_316 +316 val_316 +169 val_169 +169 val_169 +413 val_413 +413 val_413 +85 val_85 +85 val_85 +77 val_77 +77 val_77 +0 val_0 +0 val_0 +490 val_490 +490 val_490 +87 val_87 +87 val_87 +364 val_364 +364 val_364 +179 val_179 +179 val_179 +118 val_118 +118 val_118 +134 val_134 +134 val_134 +395 val_395 +395 val_395 +282 val_282 +282 val_282 +138 val_138 +138 val_138 +238 val_238 +238 val_238 +419 val_419 +419 val_419 +15 val_15 +15 val_15 +118 val_118 +118 val_118 +72 val_72 +72 val_72 +90 val_90 +90 val_90 +307 val_307 +307 val_307 +19 val_19 +19 val_19 +435 val_435 +435 val_435 +10 val_10 +10 val_10 +277 val_277 +277 val_277 +273 val_273 +273 val_273 +306 val_306 +306 val_306 +224 val_224 +224 val_224 +309 val_309 +309 val_309 +389 val_389 +389 val_389 +327 val_327 +327 val_327 +242 val_242 +242 val_242 +369 val_369 +369 val_369 +392 val_392 +392 val_392 +272 val_272 +272 val_272 +331 val_331 +331 val_331 +401 val_401 +401 val_401 +242 val_242 +242 val_242 +452 val_452 +452 val_452 +177 val_177 +177 val_177 +226 val_226 +226 val_226 +5 val_5 +5 val_5 +497 val_497 +497 val_497 +402 val_402 +402 val_402 +396 val_396 +396 val_396 +317 val_317 +317 val_317 +395 val_395 +395 val_395 +58 val_58 +58 val_58 +35 val_35 +35 val_35 +336 val_336 +336 val_336 +95 val_95 +95 val_95 +11 val_11 +11 val_11 +168 val_168 +168 val_168 +34 val_34 +34 val_34 +229 val_229 +229 val_229 +233 val_233 +233 val_233 +143 val_143 +143 val_143 +472 val_472 +472 val_472 +322 val_322 +322 val_322 +498 val_498 +498 val_498 +160 val_160 +160 val_160 +195 val_195 +195 val_195 +42 val_42 +42 val_42 +321 val_321 +321 val_321 +430 val_430 +430 val_430 +119 val_119 +119 val_119 +489 val_489 +489 val_489 +458 val_458 +458 val_458 +78 val_78 +78 val_78 +76 val_76 +76 val_76 +41 val_41 +41 val_41 +223 val_223 +223 val_223 +492 val_492 +492 val_492 +149 val_149 +149 val_149 +449 val_449 +449 val_449 +218 val_218 +218 val_218 +228 val_228 +228 val_228 +138 val_138 +138 val_138 +453 val_453 +453 val_453 +30 val_30 +30 val_30 +209 val_209 +209 val_209 +64 val_64 +64 val_64 +468 val_468 +468 val_468 +76 val_76 +76 val_76 +74 val_74 +74 val_74 +342 val_342 +342 val_342 +69 val_69 +69 val_69 +230 val_230 +230 val_230 +33 val_33 +33 val_33 +368 val_368 +368 val_368 +103 val_103 +103 val_103 +296 val_296 +296 val_296 +113 val_113 +113 val_113 +216 val_216 +216 val_216 +367 val_367 +367 val_367 +344 val_344 +344 val_344 +167 val_167 +167 val_167 +274 val_274 +274 val_274 +219 val_219 +219 val_219 +239 val_239 +239 val_239 +485 val_485 +485 val_485 +116 val_116 +116 val_116 +223 val_223 +223 val_223 +256 val_256 +256 val_256 +263 val_263 +263 val_263 +70 val_70 +70 val_70 +487 val_487 +487 val_487 +480 val_480 +480 val_480 +401 val_401 +401 val_401 +288 val_288 +288 val_288 +191 val_191 +191 val_191 +5 val_5 +5 val_5 +244 val_244 +244 val_244 +438 val_438 +438 val_438 +128 val_128 +128 val_128 +467 val_467 +467 val_467 +432 val_432 +432 val_432 +202 val_202 +202 val_202 +316 val_316 +316 val_316 +229 val_229 +229 val_229 +469 val_469 +469 val_469 +463 val_463 +463 val_463 +280 val_280 +280 val_280 +2 val_2 +2 val_2 +35 val_35 +35 val_35 +283 val_283 +283 val_283 +331 val_331 +331 val_331 +235 val_235 +235 val_235 +80 val_80 +80 val_80 +44 val_44 +44 val_44 +193 val_193 +193 val_193 +321 val_321 +321 val_321 +335 val_335 +335 val_335 +104 val_104 +104 val_104 +466 val_466 +466 val_466 +366 val_366 +366 val_366 +175 val_175 +175 val_175 +403 val_403 +403 val_403 +483 val_483 +483 val_483 +53 val_53 +53 val_53 +105 val_105 +105 val_105 +257 val_257 +257 val_257 +406 val_406 +406 val_406 +409 val_409 +409 val_409 +190 val_190 +190 val_190 +406 val_406 +406 val_406 +401 val_401 +401 val_401 +114 val_114 +114 val_114 +258 val_258 +258 val_258 +90 val_90 +90 val_90 +203 val_203 +203 val_203 +262 val_262 +262 val_262 +348 val_348 +348 val_348 +424 val_424 +424 val_424 +12 val_12 +12 val_12 +396 val_396 +396 val_396 +201 val_201 +201 val_201 +217 val_217 +217 val_217 +164 val_164 +164 val_164 +431 val_431 +431 val_431 +454 val_454 +454 val_454 +478 val_478 +478 val_478 +298 val_298 +298 val_298 +125 val_125 +125 val_125 +431 val_431 +431 val_431 +164 val_164 +164 val_164 +424 val_424 +424 val_424 +187 val_187 +187 val_187 +382 val_382 +382 val_382 +5 val_5 +5 val_5 +70 val_70 +70 val_70 +397 val_397 +397 val_397 +480 val_480 +480 val_480 +291 val_291 +291 val_291 +24 val_24 +24 val_24 +351 val_351 +351 val_351 +255 val_255 +255 val_255 +104 val_104 +104 val_104 +70 val_70 +70 val_70 +163 val_163 +163 val_163 +438 val_438 +438 val_438 +119 val_119 +119 val_119 +414 val_414 +414 val_414 +200 val_200 +200 val_200 +491 val_491 +491 val_491 +237 val_237 +237 val_237 +439 val_439 +439 val_439 +360 val_360 +360 val_360 +248 val_248 +248 val_248 +479 val_479 +479 val_479 +305 val_305 +305 val_305 +417 val_417 +417 val_417 +199 val_199 +199 val_199 +444 val_444 +444 val_444 +120 val_120 +120 val_120 +429 val_429 +429 val_429 +169 val_169 +169 val_169 +443 val_443 +443 val_443 +323 val_323 +323 val_323 +325 val_325 +325 val_325 +277 val_277 +277 val_277 +230 val_230 +230 val_230 +478 val_478 +478 val_478 +178 val_178 +178 val_178 +468 val_468 +468 val_468 +310 val_310 +310 val_310 +317 val_317 +317 val_317 +333 val_333 +333 val_333 +493 val_493 +493 val_493 +460 val_460 +460 val_460 +207 val_207 +207 val_207 +249 val_249 +249 val_249 +265 val_265 +265 val_265 +480 val_480 +480 val_480 +83 val_83 +83 val_83 +136 val_136 +136 val_136 +353 val_353 +353 val_353 +172 val_172 +172 val_172 +214 val_214 +214 val_214 +462 val_462 +462 val_462 +233 val_233 +233 val_233 +406 val_406 +406 val_406 +133 val_133 +133 val_133 +175 val_175 +175 val_175 +189 val_189 +189 val_189 +454 val_454 +454 val_454 +375 val_375 +375 val_375 +401 val_401 +401 val_401 +421 val_421 +421 val_421 +407 val_407 +407 val_407 +384 val_384 +384 val_384 +256 val_256 +256 val_256 +26 val_26 +26 val_26 +134 val_134 +134 val_134 +67 val_67 +67 val_67 +384 val_384 +384 val_384 +379 val_379 +379 val_379 +18 val_18 +18 val_18 +462 val_462 +462 val_462 +492 val_492 +492 val_492 +100 val_100 +100 val_100 +298 val_298 +298 val_298 +9 val_9 +9 val_9 +341 val_341 +341 val_341 +498 val_498 +498 val_498 +146 val_146 +146 val_146 +458 val_458 +458 val_458 +362 val_362 +362 val_362 +186 val_186 +186 val_186 +285 val_285 +285 val_285 +348 val_348 +348 val_348 +167 val_167 +167 val_167 +18 val_18 +18 val_18 +273 val_273 +273 val_273 +183 val_183 +183 val_183 +281 val_281 +281 val_281 +344 val_344 +344 val_344 +97 val_97 +97 val_97 +469 val_469 +469 val_469 +315 val_315 +315 val_315 +84 val_84 +84 val_84 +28 val_28 +28 val_28 +37 val_37 +37 val_37 +448 val_448 +448 val_448 +152 val_152 +152 val_152 +348 val_348 +348 val_348 +307 val_307 +307 val_307 +194 val_194 +194 val_194 +414 val_414 +414 val_414 +477 val_477 +477 val_477 +222 val_222 +222 val_222 +126 val_126 +126 val_126 +90 val_90 +90 val_90 +169 val_169 +169 val_169 +403 val_403 +403 val_403 +400 val_400 +400 val_400 +200 val_200 +200 val_200 +97 val_97 +97 val_97 PREHOOK: query: explain -select key as k1, value as v1 from masking_test where key > 0 intersect all select key as k2, value as v2 from masking_test where key > 0 +select key as k1, value as v1 from masking_test_n9 where key > 0 intersect all select key as k2, value as v2 from masking_test_n9 where key > 0 PREHOOK: type: QUERY POSTHOOK: query: explain -select key as k1, value as v1 from masking_test where key > 0 intersect all select key as k2, value as v2 from masking_test where key > 0 +select key as k1, value as v1 from masking_test_n9 where key > 0 intersect all select key as k2, value as v2 from masking_test_n9 where key > 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -463,27 +1445,23 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n9 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), reverse(value) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count() - keys: _col0 (type: int), _col1 (type: string) - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - value expressions: _col2 (type: bigint) + predicate: (key > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + keys: key (type: int), value (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: string) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -491,7 +1469,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 137 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -504,33 +1482,33 @@ STAGE PLANS: Map Operator Tree: TableScan Union - Statistics: Num rows: 26 Data size: 274 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1762 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(_col2), count(_col2) keys: _col0 (type: int), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 26 Data size: 274 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1762 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 26 Data size: 274 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1762 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint), _col3 (type: bigint) TableScan Union - Statistics: Num rows: 26 Data size: 274 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1762 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(_col2), count(_col2) keys: _col0 (type: int), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 26 Data size: 274 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1762 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 26 Data size: 274 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 166 Data size: 1762 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint), _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -538,7 +1516,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 13 Data size: 137 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col3 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE @@ -565,27 +1543,23 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: masking_test + alias: masking_test_n9 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), reverse(value) (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count() - keys: _col0 (type: int), _col1 (type: string) - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE - value expressions: _col2 (type: bigint) + predicate: (key > 0) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + keys: key (type: int), value (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: string) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -593,7 +1567,7 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 13 Data size: 137 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -607,14 +1581,508 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select key as k1, value as v1 from masking_test where key > 0 intersect all select key as k2, value as v2 from masking_test where key > 0 +PREHOOK: query: select key as k1, value as v1 from masking_test_n9 where key > 0 intersect all select key as k2, value as v2 from masking_test_n9 where key > 0 PREHOOK: type: QUERY -PREHOOK: Input: default@masking_test +PREHOOK: Input: default@masking_test_n9 #### A masked pattern was here #### -POSTHOOK: query: select key as k1, value as v1 from masking_test where key > 0 intersect all select key as k2, value as v2 from masking_test where key > 0 +POSTHOOK: query: select key as k1, value as v1 from masking_test_n9 where key > 0 intersect all select key as k2, value as v2 from masking_test_n9 where key > 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@masking_test +POSTHOOK: Input: default@masking_test_n9 #### A masked pattern was here #### -2 2_lav -4 4_lav -8 8_lav +2 val_2 +4 val_4 +5 val_5 +5 val_5 +5 val_5 +8 val_8 +9 val_9 +10 val_10 +11 val_11 +12 val_12 +12 val_12 +15 val_15 +15 val_15 +17 val_17 +18 val_18 +18 val_18 +19 val_19 +20 val_20 +24 val_24 +24 val_24 +26 val_26 +26 val_26 +27 val_27 +28 val_28 +30 val_30 +33 val_33 +34 val_34 +35 val_35 +35 val_35 +35 val_35 +37 val_37 +37 val_37 +41 val_41 +42 val_42 +42 val_42 +43 val_43 +44 val_44 +47 val_47 +51 val_51 +51 val_51 +53 val_53 +54 val_54 +57 val_57 +58 val_58 +58 val_58 +64 val_64 +65 val_65 +66 val_66 +67 val_67 +67 val_67 +69 val_69 +70 val_70 +70 val_70 +70 val_70 +72 val_72 +72 val_72 +74 val_74 +76 val_76 +76 val_76 +77 val_77 +78 val_78 +80 val_80 +82 val_82 +83 val_83 +83 val_83 +84 val_84 +84 val_84 +85 val_85 +86 val_86 +87 val_87 +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +105 val_105 +111 val_111 +113 val_113 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +119 val_119 +119 val_119 +119 val_119 +120 val_120 +120 val_120 +125 val_125 +125 val_125 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +129 val_129 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +134 val_134 +136 val_136 +137 val_137 +137 val_137 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +146 val_146 +149 val_149 +149 val_149 +150 val_150 +152 val_152 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +164 val_164 +165 val_165 +165 val_165 +166 val_166 +167 val_167 +167 val_167 +167 val_167 +168 val_168 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +170 val_170 +172 val_172 +172 val_172 +174 val_174 +174 val_174 +175 val_175 +175 val_175 +176 val_176 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +179 val_179 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +187 val_187 +187 val_187 +189 val_189 +190 val_190 +191 val_191 +191 val_191 +192 val_192 +193 val_193 +193 val_193 +193 val_193 +194 val_194 +195 val_195 +195 val_195 +196 val_196 +197 val_197 +197 val_197 +199 val_199 +199 val_199 +199 val_199 +200 val_200 +200 val_200 +201 val_201 +202 val_202 +203 val_203 +203 val_203 +205 val_205 +205 val_205 +207 val_207 +207 val_207 +208 val_208 +208 val_208 +208 val_208 +209 val_209 +209 val_209 +213 val_213 +213 val_213 +214 val_214 +216 val_216 +216 val_216 +217 val_217 +217 val_217 +218 val_218 +219 val_219 +219 val_219 +221 val_221 +221 val_221 +222 val_222 +223 val_223 +223 val_223 +224 val_224 +224 val_224 +226 val_226 +228 val_228 +229 val_229 +229 val_229 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +230 val_230 +233 val_233 +233 val_233 +235 val_235 +237 val_237 +237 val_237 +238 val_238 +238 val_238 +239 val_239 +239 val_239 +241 val_241 +242 val_242 +242 val_242 +244 val_244 +247 val_247 +248 val_248 +249 val_249 +252 val_252 +255 val_255 +255 val_255 +256 val_256 +256 val_256 +257 val_257 +258 val_258 +260 val_260 +262 val_262 +263 val_263 +265 val_265 +265 val_265 +266 val_266 +272 val_272 +272 val_272 +273 val_273 +273 val_273 +273 val_273 +274 val_274 +275 val_275 +277 val_277 +277 val_277 +277 val_277 +277 val_277 +278 val_278 +278 val_278 +280 val_280 +280 val_280 +281 val_281 +281 val_281 +282 val_282 +282 val_282 +283 val_283 +284 val_284 +285 val_285 +286 val_286 +287 val_287 +288 val_288 +288 val_288 +289 val_289 +291 val_291 +292 val_292 +296 val_296 +298 val_298 +298 val_298 +298 val_298 +302 val_302 +305 val_305 +306 val_306 +307 val_307 +307 val_307 +308 val_308 +309 val_309 +309 val_309 +310 val_310 +311 val_311 +311 val_311 +311 val_311 +315 val_315 +316 val_316 +316 val_316 +316 val_316 +317 val_317 +317 val_317 +318 val_318 +318 val_318 +318 val_318 +321 val_321 +321 val_321 +322 val_322 +322 val_322 +323 val_323 +325 val_325 +325 val_325 +327 val_327 +327 val_327 +327 val_327 +331 val_331 +331 val_331 +332 val_332 +333 val_333 +333 val_333 +335 val_335 +336 val_336 +338 val_338 +339 val_339 +341 val_341 +342 val_342 +342 val_342 +344 val_344 +344 val_344 +345 val_345 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +348 val_348 +351 val_351 +353 val_353 +353 val_353 +356 val_356 +360 val_360 +362 val_362 +364 val_364 +365 val_365 +366 val_366 +367 val_367 +367 val_367 +368 val_368 +369 val_369 +369 val_369 +369 val_369 +373 val_373 +374 val_374 +375 val_375 +377 val_377 +378 val_378 +379 val_379 +382 val_382 +382 val_382 +384 val_384 +384 val_384 +384 val_384 +386 val_386 +389 val_389 +392 val_392 +393 val_393 +394 val_394 +395 val_395 +395 val_395 +396 val_396 +396 val_396 +396 val_396 +397 val_397 +397 val_397 +399 val_399 +399 val_399 +400 val_400 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +402 val_402 +403 val_403 +403 val_403 +403 val_403 +404 val_404 +404 val_404 +406 val_406 +406 val_406 +406 val_406 +406 val_406 +407 val_407 +409 val_409 +409 val_409 +409 val_409 +411 val_411 +413 val_413 +413 val_413 +414 val_414 +414 val_414 +417 val_417 +417 val_417 +417 val_417 +418 val_418 +419 val_419 +421 val_421 +424 val_424 +424 val_424 +427 val_427 +429 val_429 +429 val_429 +430 val_430 +430 val_430 +430 val_430 +431 val_431 +431 val_431 +431 val_431 +432 val_432 +435 val_435 +436 val_436 +437 val_437 +438 val_438 +438 val_438 +438 val_438 +439 val_439 +439 val_439 +443 val_443 +444 val_444 +446 val_446 +448 val_448 +449 val_449 +452 val_452 +453 val_453 +454 val_454 +454 val_454 +454 val_454 +455 val_455 +457 val_457 +458 val_458 +458 val_458 +459 val_459 +459 val_459 +460 val_460 +462 val_462 +462 val_462 +463 val_463 +463 val_463 +466 val_466 +466 val_466 +466 val_466 +467 val_467 +468 val_468 +468 val_468 +468 val_468 +468 val_468 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +469 val_469 +470 val_470 +472 val_472 +475 val_475 +477 val_477 +478 val_478 +478 val_478 +479 val_479 +480 val_480 +480 val_480 +480 val_480 +481 val_481 +482 val_482 +483 val_483 +484 val_484 +485 val_485 +487 val_487 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +490 val_490 +491 val_491 +492 val_492 +492 val_492 +493 val_493 +494 val_494 +495 val_495 +496 val_496 +497 val_497 +498 val_498 +498 val_498 +498 val_498 diff --git a/ql/src/test/results/clientpositive/union_remove_10.q.out b/ql/src/test/results/clientpositive/union_remove_10.q.out index 70ff8f2270..8efe7c52d5 100644 --- a/ql/src/test/results/clientpositive/union_remove_10.q.out +++ b/ql/src/test/results/clientpositive/union_remove_10.q.out @@ -1,50 +1,50 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n7(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n7 +POSTHOOK: query: create table inputTbl1_n7(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +POSTHOOK: Output: default@inputTbl1_n7 +PREHOOK: query: create table outputTbl1_n9(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +PREHOOK: Output: default@outputTbl1_n9 +POSTHOOK: query: create table outputTbl1_n9(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n9 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n7 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n7 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n7 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n7 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n9 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n7 union all select * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n7 group by key UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n7 ) a )b PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n9 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n7 union all select * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n7 group by key UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n7 ) a )b POSTHOOK: type: QUERY @@ -64,7 +64,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n7 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 1L (type: bigint) @@ -77,7 +77,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n9 Execution mode: vectorized Stage: Stage-6 @@ -97,7 +97,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n9 Stage: Stage-2 Merge File Operator @@ -123,7 +123,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n7 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -156,13 +156,13 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n9 Stage: Stage-8 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n7 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 2L (type: bigint) @@ -175,45 +175,45 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n9 Execution mode: vectorized -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n9 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n7 union all select * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n7 group by key UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n7 ) a )b PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n7 +PREHOOK: Output: default@outputtbl1_n9 +POSTHOOK: query: insert overwrite table outputTbl1_n9 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n7 union all select * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n7 group by key UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n7 ) a )b POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n7 +POSTHOOK: Output: default@outputtbl1_n9 +POSTHOOK: Lineage: outputtbl1_n9.key EXPRESSION [(inputtbl1_n7)inputtbl1_n7.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n9.values EXPRESSION [(inputtbl1_n7)inputtbl1_n7.null, ] +PREHOOK: query: desc formatted outputTbl1_n9 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n9 +POSTHOOK: query: desc formatted outputTbl1_n9 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n9 # col_name data_type comment key string values bigint @@ -240,13 +240,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n9 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n9 #### A masked pattern was here #### 1 1 1 1 diff --git a/ql/src/test/results/clientpositive/union_remove_11.q.out b/ql/src/test/results/clientpositive/union_remove_11.q.out index 4fc7659ae9..37f6fb5103 100644 --- a/ql/src/test/results/clientpositive/union_remove_11.q.out +++ b/ql/src/test/results/clientpositive/union_remove_11.q.out @@ -1,50 +1,50 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n14(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n14 +POSTHOOK: query: create table inputTbl1_n14(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +POSTHOOK: Output: default@inputTbl1_n14 +PREHOOK: query: create table outputTbl1_n21(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +PREHOOK: Output: default@outputTbl1_n21 +POSTHOOK: query: create table outputTbl1_n21(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n21 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n14 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n14 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n14 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n14 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n21 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n14 union all select * FROM ( - SELECT key, 2 `values` from inputTbl1 + SELECT key, 2 `values` from inputTbl1_n14 UNION ALL - SELECT key, 3 as `values` from inputTbl1 + SELECT key, 3 as `values` from inputTbl1_n14 ) a )b PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n21 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n14 union all select * FROM ( - SELECT key, 2 `values` from inputTbl1 + SELECT key, 2 `values` from inputTbl1_n14 UNION ALL - SELECT key, 3 as `values` from inputTbl1 + SELECT key, 3 as `values` from inputTbl1_n14 ) a )b POSTHOOK: type: QUERY @@ -62,7 +62,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n14 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 1 (type: int) @@ -81,9 +81,9 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n21 TableScan - alias: inputtbl1 + alias: inputtbl1_n14 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 2 (type: int) @@ -102,9 +102,9 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n21 TableScan - alias: inputtbl1 + alias: inputtbl1_n14 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 3 (type: int) @@ -123,7 +123,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n21 Stage: Stage-6 Conditional Operator @@ -142,7 +142,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n21 Stage: Stage-2 Merge File Operator @@ -164,42 +164,42 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n21 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n14 union all select * FROM ( - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n14 UNION ALL - SELECT key, 3 as `values` from inputTbl1 + SELECT key, 3 as `values` from inputTbl1_n14 ) a )b PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n14 +PREHOOK: Output: default@outputtbl1_n21 +POSTHOOK: query: insert overwrite table outputTbl1_n21 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n14 union all select * FROM ( - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n14 UNION ALL - SELECT key, 3 as `values` from inputTbl1 + SELECT key, 3 as `values` from inputTbl1_n14 ) a )b POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n14 +POSTHOOK: Output: default@outputtbl1_n21 +POSTHOOK: Lineage: outputtbl1_n21.key EXPRESSION [(inputtbl1_n14)inputtbl1_n14.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n21.values EXPRESSION [] +PREHOOK: query: desc formatted outputTbl1_n21 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n21 +POSTHOOK: query: desc formatted outputTbl1_n21 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n21 # col_name data_type comment key string values bigint @@ -226,13 +226,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n21 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n21 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n21 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n21 #### A masked pattern was here #### 1 1 1 2 diff --git a/ql/src/test/results/clientpositive/union_remove_12.q.out b/ql/src/test/results/clientpositive/union_remove_12.q.out index 35bff6a64d..465150b33b 100644 --- a/ql/src/test/results/clientpositive/union_remove_12.q.out +++ b/ql/src/test/results/clientpositive/union_remove_12.q.out @@ -1,45 +1,45 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n21(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n21 +POSTHOOK: query: create table inputTbl1_n21(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +POSTHOOK: Output: default@inputTbl1_n21 +PREHOOK: query: create table outputTbl1_n29(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +PREHOOK: Output: default@outputTbl1_n29 +POSTHOOK: query: create table outputTbl1_n29(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n29 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n21 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n21 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n21 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n21 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n29 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n21 union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n21 a join inputTbl1_n21 b on a.key=b.key )c PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n29 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n21 union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n21 a join inputTbl1_n21 b on a.key=b.key )c POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -58,7 +58,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n21 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 1L (type: bigint) @@ -71,7 +71,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n29 Execution mode: vectorized Stage: Stage-6 @@ -91,7 +91,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n29 Stage: Stage-2 Merge File Operator @@ -168,41 +168,41 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n29 Execution mode: vectorized Local Work: Map Reduce Local Work -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n29 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n21 union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n21 a join inputTbl1_n21 b on a.key=b.key )c PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n21 +PREHOOK: Output: default@outputtbl1_n29 +POSTHOOK: query: insert overwrite table outputTbl1_n29 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n21 union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n21 a join inputTbl1_n21 b on a.key=b.key )c POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)a.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n21 +POSTHOOK: Output: default@outputtbl1_n29 +POSTHOOK: Lineage: outputtbl1_n29.key EXPRESSION [(inputtbl1_n21)inputtbl1_n21.FieldSchema(name:key, type:string, comment:null), (inputtbl1_n21)a.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n29.values EXPRESSION [(inputtbl1_n21)b.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: desc formatted outputTbl1_n29 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n29 +POSTHOOK: query: desc formatted outputTbl1_n29 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n29 # col_name data_type comment key string values bigint @@ -229,13 +229,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n29 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n29 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n29 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n29 #### A masked pattern was here #### 1 1 1 11 diff --git a/ql/src/test/results/clientpositive/union_remove_13.q.out b/ql/src/test/results/clientpositive/union_remove_13.q.out index cfc89f90cc..b6e2d3f31f 100644 --- a/ql/src/test/results/clientpositive/union_remove_13.q.out +++ b/ql/src/test/results/clientpositive/union_remove_13.q.out @@ -1,45 +1,45 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n2(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n2 +POSTHOOK: query: create table inputTbl1_n2(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +POSTHOOK: Output: default@inputTbl1_n2 +PREHOOK: query: create table outputTbl1_n3(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +PREHOOK: Output: default@outputTbl1_n3 +POSTHOOK: query: create table outputTbl1_n3(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n3 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n2 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n2 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n3 SELECT * FROM ( -select key, count(1) as `values` from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1_n2 group by key union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n2 a join inputTbl1_n2 b on a.key=b.key )c PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n3 SELECT * FROM ( -select key, count(1) as `values` from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1_n2 group by key union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n2 a join inputTbl1_n2 b on a.key=b.key )c POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -58,7 +58,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n2 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -91,7 +91,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n3 Stage: Stage-6 Conditional Operator @@ -110,7 +110,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n3 Stage: Stage-2 Merge File Operator @@ -187,41 +187,41 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n3 Execution mode: vectorized Local Work: Map Reduce Local Work -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n3 SELECT * FROM ( -select key, count(1) as `values` from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1_n2 group by key union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n2 a join inputTbl1_n2 b on a.key=b.key )c PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n2 +PREHOOK: Output: default@outputtbl1_n3 +POSTHOOK: query: insert overwrite table outputTbl1_n3 SELECT * FROM ( -select key, count(1) as `values` from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1_n2 group by key union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n2 a join inputTbl1_n2 b on a.key=b.key )c POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)a.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n2 +POSTHOOK: Output: default@outputtbl1_n3 +POSTHOOK: Lineage: outputtbl1_n3.key EXPRESSION [(inputtbl1_n2)inputtbl1_n2.FieldSchema(name:key, type:string, comment:null), (inputtbl1_n2)a.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n3.values EXPRESSION [(inputtbl1_n2)inputtbl1_n2.null, (inputtbl1_n2)b.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: desc formatted outputTbl1_n3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n3 +POSTHOOK: query: desc formatted outputTbl1_n3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n3 # col_name data_type comment key string values bigint @@ -248,13 +248,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n3 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n3 #### A masked pattern was here #### 1 1 1 11 diff --git a/ql/src/test/results/clientpositive/union_remove_14.q.out b/ql/src/test/results/clientpositive/union_remove_14.q.out index 35bff6a64d..b4b8a8efe1 100644 --- a/ql/src/test/results/clientpositive/union_remove_14.q.out +++ b/ql/src/test/results/clientpositive/union_remove_14.q.out @@ -1,45 +1,45 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n11(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n11 +POSTHOOK: query: create table inputTbl1_n11(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +POSTHOOK: Output: default@inputTbl1_n11 +PREHOOK: query: create table outputTbl1_n16(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +PREHOOK: Output: default@outputTbl1_n16 +POSTHOOK: query: create table outputTbl1_n16(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n16 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n11 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n11 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n11 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n11 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n16 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n11 union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n11 a join inputTbl1_n11 b on a.key=b.key )c PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n16 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n11 union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n11 a join inputTbl1_n11 b on a.key=b.key )c POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -58,7 +58,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n11 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 1L (type: bigint) @@ -71,7 +71,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n16 Execution mode: vectorized Stage: Stage-6 @@ -91,7 +91,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n16 Stage: Stage-2 Merge File Operator @@ -168,41 +168,41 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n16 Execution mode: vectorized Local Work: Map Reduce Local Work -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n16 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n11 union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n11 a join inputTbl1_n11 b on a.key=b.key )c PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n11 +PREHOOK: Output: default@outputtbl1_n16 +POSTHOOK: query: insert overwrite table outputTbl1_n16 SELECT * FROM ( -select key, 1 as `values` from inputTbl1 +select key, 1 as `values` from inputTbl1_n11 union all select a.key as key, cast(b.val as bigint) as `values` -FROM inputTbl1 a join inputTbl1 b on a.key=b.key +FROM inputTbl1_n11 a join inputTbl1_n11 b on a.key=b.key )c POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)a.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n11 +POSTHOOK: Output: default@outputtbl1_n16 +POSTHOOK: Lineage: outputtbl1_n16.key EXPRESSION [(inputtbl1_n11)inputtbl1_n11.FieldSchema(name:key, type:string, comment:null), (inputtbl1_n11)a.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n16.values EXPRESSION [(inputtbl1_n11)b.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: desc formatted outputTbl1_n16 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n16 +POSTHOOK: query: desc formatted outputTbl1_n16 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n16 # col_name data_type comment key string values bigint @@ -229,13 +229,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n16 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n16 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n16 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n16 #### A masked pattern was here #### 1 1 1 11 diff --git a/ql/src/test/results/clientpositive/union_remove_15.q.out b/ql/src/test/results/clientpositive/union_remove_15.q.out index f693223805..1517534d8e 100644 --- a/ql/src/test/results/clientpositive/union_remove_15.q.out +++ b/ql/src/test/results/clientpositive/union_remove_15.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n18(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n18 +POSTHOOK: query: create table inputTbl1_n18(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: Output: default@inputTbl1_n18 +PREHOOK: query: create table outputTbl1_n25(key string, `values` bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile +PREHOOK: Output: default@outputTbl1_n25 +POSTHOOK: query: create table outputTbl1_n25(key string, `values` bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n25 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n18 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n18 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n18 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n18 PREHOOK: query: explain -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n25 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1_n18 group by key UNION ALL - SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1_n18 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n25 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1_n18 group by key UNION ALL - SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1_n18 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -50,7 +50,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n18 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -87,7 +87,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n25 Stage: Stage-0 Move Operator @@ -99,13 +99,13 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n25 Stage: Stage-2 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n18 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -142,39 +142,39 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n25 -PREHOOK: query: insert overwrite table outputTbl1 partition (ds) +PREHOOK: query: insert overwrite table outputTbl1_n25 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1_n18 group by key UNION ALL - SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1_n18 group by key ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) +PREHOOK: Input: default@inputtbl1_n18 +PREHOOK: Output: default@outputtbl1_n25 +POSTHOOK: query: insert overwrite table outputTbl1_n25 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1_n18 group by key UNION ALL - SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1_n18 group by key ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1@ds=1 -POSTHOOK: Output: default@outputtbl1@ds=2 -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).values EXPRESSION [(inputtbl1)inputtbl1.null, ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n18 +POSTHOOK: Output: default@outputtbl1_n25@ds=1 +POSTHOOK: Output: default@outputtbl1_n25@ds=2 +POSTHOOK: Lineage: outputtbl1_n25 PARTITION(ds=1).key EXPRESSION [(inputtbl1_n18)inputtbl1_n18.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n25 PARTITION(ds=1).values EXPRESSION [(inputtbl1_n18)inputtbl1_n18.null, ] +POSTHOOK: Lineage: outputtbl1_n25 PARTITION(ds=2).key EXPRESSION [(inputtbl1_n18)inputtbl1_n18.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n25 PARTITION(ds=2).values EXPRESSION [(inputtbl1_n18)inputtbl1_n18.null, ] +PREHOOK: query: desc formatted outputTbl1_n25 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n25 +POSTHOOK: query: desc formatted outputTbl1_n25 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n25 # col_name data_type comment key string values bigint @@ -208,38 +208,38 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: show partitions outputTbl1 +PREHOOK: query: show partitions outputTbl1_n25 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: show partitions outputTbl1 +PREHOOK: Input: default@outputtbl1_n25 +POSTHOOK: query: show partitions outputTbl1_n25 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n25 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' +PREHOOK: query: select * from outputTbl1_n25 where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -PREHOOK: Input: default@outputtbl1@ds=1 +PREHOOK: Input: default@outputtbl1_n25 +PREHOOK: Input: default@outputtbl1_n25@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' +POSTHOOK: query: select * from outputTbl1_n25 where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -POSTHOOK: Input: default@outputtbl1@ds=1 +POSTHOOK: Input: default@outputtbl1_n25 +POSTHOOK: Input: default@outputtbl1_n25@ds=1 #### A masked pattern was here #### 1 1 1 2 1 1 3 1 1 7 1 1 8 2 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' +PREHOOK: query: select * from outputTbl1_n25 where ds = '2' PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -PREHOOK: Input: default@outputtbl1@ds=2 +PREHOOK: Input: default@outputtbl1_n25 +PREHOOK: Input: default@outputtbl1_n25@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' +POSTHOOK: query: select * from outputTbl1_n25 where ds = '2' POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -POSTHOOK: Input: default@outputtbl1@ds=2 +POSTHOOK: Input: default@outputtbl1_n25 +POSTHOOK: Input: default@outputtbl1_n25@ds=2 #### A masked pattern was here #### 1 1 2 2 1 2 diff --git a/ql/src/test/results/clientpositive/union_remove_16.q.out b/ql/src/test/results/clientpositive/union_remove_16.q.out index f3f26f17aa..0d25a7859a 100644 --- a/ql/src/test/results/clientpositive/union_remove_16.q.out +++ b/ql/src/test/results/clientpositive/union_remove_16.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n23(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n23 +POSTHOOK: query: create table inputTbl1_n23(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: Output: default@inputTbl1_n23 +PREHOOK: query: create table outputTbl1_n32(key string, `values` bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile +PREHOOK: Output: default@outputTbl1_n32 +POSTHOOK: query: create table outputTbl1_n32(key string, `values` bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n32 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n23 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n23 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n23 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n23 PREHOOK: query: explain -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n32 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1_n23 group by key UNION ALL - SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1_n23 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n32 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1_n23 group by key UNION ALL - SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1_n23 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -55,7 +55,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n23 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -92,7 +92,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n32 Stage: Stage-6 Conditional Operator @@ -113,7 +113,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n32 Stage: Stage-2 Merge File Operator @@ -139,7 +139,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n23 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -176,39 +176,39 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n32 -PREHOOK: query: insert overwrite table outputTbl1 partition (ds) +PREHOOK: query: insert overwrite table outputTbl1_n32 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1_n23 group by key UNION ALL - SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1_n23 group by key ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) +PREHOOK: Input: default@inputtbl1_n23 +PREHOOK: Output: default@outputtbl1_n32 +POSTHOOK: query: insert overwrite table outputTbl1_n32 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1_n23 group by key UNION ALL - SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1_n23 group by key ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1@ds=1 -POSTHOOK: Output: default@outputtbl1@ds=2 -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).values EXPRESSION [(inputtbl1)inputtbl1.null, ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n23 +POSTHOOK: Output: default@outputtbl1_n32@ds=1 +POSTHOOK: Output: default@outputtbl1_n32@ds=2 +POSTHOOK: Lineage: outputtbl1_n32 PARTITION(ds=1).key EXPRESSION [(inputtbl1_n23)inputtbl1_n23.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n32 PARTITION(ds=1).values EXPRESSION [(inputtbl1_n23)inputtbl1_n23.null, ] +POSTHOOK: Lineage: outputtbl1_n32 PARTITION(ds=2).key EXPRESSION [(inputtbl1_n23)inputtbl1_n23.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n32 PARTITION(ds=2).values EXPRESSION [(inputtbl1_n23)inputtbl1_n23.null, ] +PREHOOK: query: desc formatted outputTbl1_n32 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n32 +POSTHOOK: query: desc formatted outputTbl1_n32 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n32 # col_name data_type comment key string values bigint @@ -242,38 +242,38 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: show partitions outputTbl1 +PREHOOK: query: show partitions outputTbl1_n32 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: show partitions outputTbl1 +PREHOOK: Input: default@outputtbl1_n32 +POSTHOOK: query: show partitions outputTbl1_n32 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n32 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' +PREHOOK: query: select * from outputTbl1_n32 where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -PREHOOK: Input: default@outputtbl1@ds=1 +PREHOOK: Input: default@outputtbl1_n32 +PREHOOK: Input: default@outputtbl1_n32@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' +POSTHOOK: query: select * from outputTbl1_n32 where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -POSTHOOK: Input: default@outputtbl1@ds=1 +POSTHOOK: Input: default@outputtbl1_n32 +POSTHOOK: Input: default@outputtbl1_n32@ds=1 #### A masked pattern was here #### 1 1 1 2 1 1 3 1 1 7 1 1 8 2 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' +PREHOOK: query: select * from outputTbl1_n32 where ds = '2' PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -PREHOOK: Input: default@outputtbl1@ds=2 +PREHOOK: Input: default@outputtbl1_n32 +PREHOOK: Input: default@outputtbl1_n32@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' +POSTHOOK: query: select * from outputTbl1_n32 where ds = '2' POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -POSTHOOK: Input: default@outputtbl1@ds=2 +POSTHOOK: Input: default@outputtbl1_n32 +POSTHOOK: Input: default@outputtbl1_n32@ds=2 #### A masked pattern was here #### 1 1 2 2 1 2 diff --git a/ql/src/test/results/clientpositive/union_remove_17.q.out b/ql/src/test/results/clientpositive/union_remove_17.q.out index 460d085f15..38dfd74d64 100644 --- a/ql/src/test/results/clientpositive/union_remove_17.q.out +++ b/ql/src/test/results/clientpositive/union_remove_17.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n3(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n3 +POSTHOOK: query: create table inputTbl1_n3(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: Output: default@inputTbl1_n3 +PREHOOK: query: create table outputTbl1_n4(key string, `values` bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile +PREHOOK: Output: default@outputTbl1_n4 +POSTHOOK: query: create table outputTbl1_n4(key string, `values` bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n4 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n3 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n3 PREHOOK: query: explain -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n4 partition (ds) SELECT * FROM ( - SELECT key, 1 as `values`, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1_n3 UNION ALL - SELECT key, 2 as `values`, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1_n3 ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n4 partition (ds) SELECT * FROM ( - SELECT key, 1 as `values`, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1_n3 UNION ALL - SELECT key, 2 as `values`, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1_n3 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -49,7 +49,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n3 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 1 (type: int), '1' (type: string) @@ -68,9 +68,9 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n4 TableScan - alias: inputtbl1 + alias: inputtbl1_n3 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 2 (type: int), '2' (type: string) @@ -89,7 +89,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n4 Stage: Stage-0 Move Operator @@ -101,39 +101,39 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n4 -PREHOOK: query: insert overwrite table outputTbl1 partition (ds) +PREHOOK: query: insert overwrite table outputTbl1_n4 partition (ds) SELECT * FROM ( - SELECT key, 1 as `values`, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1_n3 UNION ALL - SELECT key, 2 as `values`, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1_n3 ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) +PREHOOK: Input: default@inputtbl1_n3 +PREHOOK: Output: default@outputtbl1_n4 +POSTHOOK: query: insert overwrite table outputTbl1_n4 partition (ds) SELECT * FROM ( - SELECT key, 1 as `values`, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1_n3 UNION ALL - SELECT key, 2 as `values`, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1_n3 ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1@ds=1 -POSTHOOK: Output: default@outputtbl1@ds=2 -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=1).values EXPRESSION [] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2).values EXPRESSION [] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n3 +POSTHOOK: Output: default@outputtbl1_n4@ds=1 +POSTHOOK: Output: default@outputtbl1_n4@ds=2 +POSTHOOK: Lineage: outputtbl1_n4 PARTITION(ds=1).key EXPRESSION [(inputtbl1_n3)inputtbl1_n3.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n4 PARTITION(ds=1).values EXPRESSION [] +POSTHOOK: Lineage: outputtbl1_n4 PARTITION(ds=2).key EXPRESSION [(inputtbl1_n3)inputtbl1_n3.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n4 PARTITION(ds=2).values EXPRESSION [] +PREHOOK: query: desc formatted outputTbl1_n4 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n4 +POSTHOOK: query: desc formatted outputTbl1_n4 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n4 # col_name data_type comment key string values bigint @@ -167,23 +167,23 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: show partitions outputTbl1 +PREHOOK: query: show partitions outputTbl1_n4 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: show partitions outputTbl1 +PREHOOK: Input: default@outputtbl1_n4 +POSTHOOK: query: show partitions outputTbl1_n4 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n4 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' +PREHOOK: query: select * from outputTbl1_n4 where ds = '1' PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -PREHOOK: Input: default@outputtbl1@ds=1 +PREHOOK: Input: default@outputtbl1_n4 +PREHOOK: Input: default@outputtbl1_n4@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' +POSTHOOK: query: select * from outputTbl1_n4 where ds = '1' POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -POSTHOOK: Input: default@outputtbl1@ds=1 +POSTHOOK: Input: default@outputtbl1_n4 +POSTHOOK: Input: default@outputtbl1_n4@ds=1 #### A masked pattern was here #### 1 1 1 2 1 1 @@ -191,15 +191,15 @@ POSTHOOK: Input: default@outputtbl1@ds=1 7 1 1 8 1 1 8 1 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' +PREHOOK: query: select * from outputTbl1_n4 where ds = '2' PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -PREHOOK: Input: default@outputtbl1@ds=2 +PREHOOK: Input: default@outputtbl1_n4 +PREHOOK: Input: default@outputtbl1_n4@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' +POSTHOOK: query: select * from outputTbl1_n4 where ds = '2' POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -POSTHOOK: Input: default@outputtbl1@ds=2 +POSTHOOK: Input: default@outputtbl1_n4 +POSTHOOK: Input: default@outputtbl1_n4@ds=2 #### A masked pattern was here #### 1 2 2 2 2 2 diff --git a/ql/src/test/results/clientpositive/union_remove_18.q.out b/ql/src/test/results/clientpositive/union_remove_18.q.out index 1149d4c143..62d6787b6b 100644 --- a/ql/src/test/results/clientpositive/union_remove_18.q.out +++ b/ql/src/test/results/clientpositive/union_remove_18.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table inputTbl1(key string, ds string) stored as textfile +PREHOOK: query: create table inputTbl1_n22(key string, ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, ds string) stored as textfile +PREHOOK: Output: default@inputTbl1_n22 +POSTHOOK: query: create table inputTbl1_n22(key string, ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile +POSTHOOK: Output: default@inputTbl1_n22 +PREHOOK: query: create table outputTbl1_n30(key string, `values` bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile +PREHOOK: Output: default@outputTbl1_n30 +POSTHOOK: query: create table outputTbl1_n30(key string, `values` bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n30 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n22 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n22 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n22 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n22 PREHOOK: query: explain -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n30 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1_n22 group by key, ds UNION ALL - SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1_n22 group by key, ds ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 partition (ds) +insert overwrite table outputTbl1_n30 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1_n22 group by key, ds UNION ALL - SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1_n22 group by key, ds ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -50,7 +50,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n22 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), ds (type: string) @@ -87,7 +87,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n30 Stage: Stage-0 Move Operator @@ -99,13 +99,13 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n30 Stage: Stage-2 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n22 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), ds (type: string) @@ -142,51 +142,51 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n30 -PREHOOK: query: insert overwrite table outputTbl1 partition (ds) +PREHOOK: query: insert overwrite table outputTbl1_n30 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1_n22 group by key, ds UNION ALL - SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1_n22 group by key, ds ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) +PREHOOK: Input: default@inputtbl1_n22 +PREHOOK: Output: default@outputtbl1_n30 +POSTHOOK: query: insert overwrite table outputTbl1_n30 partition (ds) SELECT * FROM ( - SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1_n22 group by key, ds UNION ALL - SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1_n22 group by key, ds ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1@ds=11 -POSTHOOK: Output: default@outputtbl1@ds=12 -POSTHOOK: Output: default@outputtbl1@ds=13 -POSTHOOK: Output: default@outputtbl1@ds=17 -POSTHOOK: Output: default@outputtbl1@ds=18 -POSTHOOK: Output: default@outputtbl1@ds=28 -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=11).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=11).values EXPRESSION [(inputtbl1)inputtbl1.null, ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=12).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=12).values EXPRESSION [(inputtbl1)inputtbl1.null, ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=13).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=13).values EXPRESSION [(inputtbl1)inputtbl1.null, ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=17).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=17).values EXPRESSION [(inputtbl1)inputtbl1.null, ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=18).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=18).values EXPRESSION [(inputtbl1)inputtbl1.null, ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=28).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=28).values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n22 +POSTHOOK: Output: default@outputtbl1_n30@ds=11 +POSTHOOK: Output: default@outputtbl1_n30@ds=12 +POSTHOOK: Output: default@outputtbl1_n30@ds=13 +POSTHOOK: Output: default@outputtbl1_n30@ds=17 +POSTHOOK: Output: default@outputtbl1_n30@ds=18 +POSTHOOK: Output: default@outputtbl1_n30@ds=28 +POSTHOOK: Lineage: outputtbl1_n30 PARTITION(ds=11).key EXPRESSION [(inputtbl1_n22)inputtbl1_n22.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n30 PARTITION(ds=11).values EXPRESSION [(inputtbl1_n22)inputtbl1_n22.null, ] +POSTHOOK: Lineage: outputtbl1_n30 PARTITION(ds=12).key EXPRESSION [(inputtbl1_n22)inputtbl1_n22.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n30 PARTITION(ds=12).values EXPRESSION [(inputtbl1_n22)inputtbl1_n22.null, ] +POSTHOOK: Lineage: outputtbl1_n30 PARTITION(ds=13).key EXPRESSION [(inputtbl1_n22)inputtbl1_n22.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n30 PARTITION(ds=13).values EXPRESSION [(inputtbl1_n22)inputtbl1_n22.null, ] +POSTHOOK: Lineage: outputtbl1_n30 PARTITION(ds=17).key EXPRESSION [(inputtbl1_n22)inputtbl1_n22.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n30 PARTITION(ds=17).values EXPRESSION [(inputtbl1_n22)inputtbl1_n22.null, ] +POSTHOOK: Lineage: outputtbl1_n30 PARTITION(ds=18).key EXPRESSION [(inputtbl1_n22)inputtbl1_n22.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n30 PARTITION(ds=18).values EXPRESSION [(inputtbl1_n22)inputtbl1_n22.null, ] +POSTHOOK: Lineage: outputtbl1_n30 PARTITION(ds=28).key EXPRESSION [(inputtbl1_n22)inputtbl1_n22.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n30 PARTITION(ds=28).values EXPRESSION [(inputtbl1_n22)inputtbl1_n22.null, ] +PREHOOK: query: desc formatted outputTbl1_n30 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n30 +POSTHOOK: query: desc formatted outputTbl1_n30 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n30 # col_name data_type comment key string values bigint @@ -220,61 +220,61 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: show partitions outputTbl1 +PREHOOK: query: show partitions outputTbl1_n30 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: show partitions outputTbl1 +PREHOOK: Input: default@outputtbl1_n30 +POSTHOOK: query: show partitions outputTbl1_n30 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n30 ds=11 ds=12 ds=13 ds=17 ds=18 ds=28 -PREHOOK: query: select * from outputTbl1 where ds = '11' +PREHOOK: query: select * from outputTbl1_n30 where ds = '11' PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -PREHOOK: Input: default@outputtbl1@ds=11 +PREHOOK: Input: default@outputtbl1_n30 +PREHOOK: Input: default@outputtbl1_n30@ds=11 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '11' +POSTHOOK: query: select * from outputTbl1_n30 where ds = '11' POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -POSTHOOK: Input: default@outputtbl1@ds=11 +POSTHOOK: Input: default@outputtbl1_n30 +POSTHOOK: Input: default@outputtbl1_n30@ds=11 #### A masked pattern was here #### 1 1 11 1 1 11 -PREHOOK: query: select * from outputTbl1 where ds = '18' +PREHOOK: query: select * from outputTbl1_n30 where ds = '18' PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -PREHOOK: Input: default@outputtbl1@ds=18 +PREHOOK: Input: default@outputtbl1_n30 +PREHOOK: Input: default@outputtbl1_n30@ds=18 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '18' +POSTHOOK: query: select * from outputTbl1_n30 where ds = '18' POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -POSTHOOK: Input: default@outputtbl1@ds=18 +POSTHOOK: Input: default@outputtbl1_n30 +POSTHOOK: Input: default@outputtbl1_n30@ds=18 #### A masked pattern was here #### 8 1 18 8 1 18 -PREHOOK: query: select * from outputTbl1 where ds is not null +PREHOOK: query: select * from outputTbl1_n30 where ds is not null PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -PREHOOK: Input: default@outputtbl1@ds=11 -PREHOOK: Input: default@outputtbl1@ds=12 -PREHOOK: Input: default@outputtbl1@ds=13 -PREHOOK: Input: default@outputtbl1@ds=17 -PREHOOK: Input: default@outputtbl1@ds=18 -PREHOOK: Input: default@outputtbl1@ds=28 +PREHOOK: Input: default@outputtbl1_n30 +PREHOOK: Input: default@outputtbl1_n30@ds=11 +PREHOOK: Input: default@outputtbl1_n30@ds=12 +PREHOOK: Input: default@outputtbl1_n30@ds=13 +PREHOOK: Input: default@outputtbl1_n30@ds=17 +PREHOOK: Input: default@outputtbl1_n30@ds=18 +PREHOOK: Input: default@outputtbl1_n30@ds=28 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds is not null +POSTHOOK: query: select * from outputTbl1_n30 where ds is not null POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -POSTHOOK: Input: default@outputtbl1@ds=11 -POSTHOOK: Input: default@outputtbl1@ds=12 -POSTHOOK: Input: default@outputtbl1@ds=13 -POSTHOOK: Input: default@outputtbl1@ds=17 -POSTHOOK: Input: default@outputtbl1@ds=18 -POSTHOOK: Input: default@outputtbl1@ds=28 +POSTHOOK: Input: default@outputtbl1_n30 +POSTHOOK: Input: default@outputtbl1_n30@ds=11 +POSTHOOK: Input: default@outputtbl1_n30@ds=12 +POSTHOOK: Input: default@outputtbl1_n30@ds=13 +POSTHOOK: Input: default@outputtbl1_n30@ds=17 +POSTHOOK: Input: default@outputtbl1_n30@ds=18 +POSTHOOK: Input: default@outputtbl1_n30@ds=28 #### A masked pattern was here #### 1 1 11 1 1 11 diff --git a/ql/src/test/results/clientpositive/union_remove_19.q.out b/ql/src/test/results/clientpositive/union_remove_19.q.out index 1d26782933..a80b6d2b4b 100644 --- a/ql/src/test/results/clientpositive/union_remove_19.q.out +++ b/ql/src/test/results/clientpositive/union_remove_19.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n1 +POSTHOOK: query: create table inputTbl1_n1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +POSTHOOK: Output: default@inputTbl1_n1 +PREHOOK: query: create table outputTbl1_n1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +PREHOOK: Output: default@outputTbl1_n1 +POSTHOOK: query: create table outputTbl1_n1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n1 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n1 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n1 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n1 SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n1 SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -50,7 +50,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -83,7 +83,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n1 Stage: Stage-0 Move Operator @@ -93,13 +93,13 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n1 Stage: Stage-2 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -132,36 +132,36 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n1 -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n1 SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n1 +PREHOOK: Output: default@outputtbl1_n1 +POSTHOOK: query: insert overwrite table outputTbl1_n1 SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n1 +POSTHOOK: Output: default@outputtbl1_n1 +POSTHOOK: Lineage: outputtbl1_n1.key EXPRESSION [(inputtbl1_n1)inputtbl1_n1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n1.values EXPRESSION [(inputtbl1_n1)inputtbl1_n1.null, ] +PREHOOK: query: desc formatted outputTbl1_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n1 +POSTHOOK: query: desc formatted outputTbl1_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n1 # col_name data_type comment key string values bigint @@ -188,13 +188,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n1 #### A masked pattern was here #### 1 1 1 1 @@ -207,21 +207,21 @@ POSTHOOK: Input: default@outputtbl1 8 2 8 2 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n1 SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a where a.key = 7 PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n1 SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a where a.key = 7 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -234,7 +234,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(key) = 7.0D) (type: boolean) @@ -266,7 +266,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n1 Stage: Stage-0 Move Operator @@ -276,13 +276,13 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n1 Stage: Stage-2 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (UDFToDouble(key) = 7.0D) (type: boolean) @@ -314,61 +314,61 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n1 -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n1 SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a where a.key = 7 PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n1 +PREHOOK: Output: default@outputtbl1_n1 +POSTHOOK: query: insert overwrite table outputTbl1_n1 SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a where a.key = 7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 +POSTHOOK: Input: default@inputtbl1_n1 +POSTHOOK: Output: default@outputtbl1_n1 +POSTHOOK: Lineage: outputtbl1_n1.key EXPRESSION [(inputtbl1_n1)inputtbl1_n1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n1.values EXPRESSION [(inputtbl1_n1)inputtbl1_n1.null, ] +PREHOOK: query: select * from outputTbl1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n1 #### A masked pattern was here #### 7 1 7 1 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n1 select key, `values` from ( SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a ) b where b.key >= 7 PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n1 select key, `values` from ( SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a ) b where b.key >= 7 POSTHOOK: type: QUERY @@ -382,7 +382,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((UDFToDouble(key) + UDFToDouble(key)) >= 7.0D) (type: boolean) @@ -418,7 +418,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n1 Stage: Stage-0 Move Operator @@ -428,13 +428,13 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n1 Stage: Stage-2 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((UDFToDouble(key) + UDFToDouble(key)) >= 7.0D) (type: boolean) @@ -470,43 +470,43 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n1 -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n1 select key, `values` from ( SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a ) b where b.key >= 7 PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n1 +PREHOOK: Output: default@outputtbl1_n1 +POSTHOOK: query: insert overwrite table outputTbl1_n1 select key, `values` from ( SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n1 group by key ) a ) b where b.key >= 7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 +POSTHOOK: Input: default@inputtbl1_n1 +POSTHOOK: Output: default@outputtbl1_n1 +POSTHOOK: Lineage: outputtbl1_n1.key EXPRESSION [(inputtbl1_n1)inputtbl1_n1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n1.values EXPRESSION [(inputtbl1_n1)inputtbl1_n1.null, ] +PREHOOK: query: select * from outputTbl1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n1 #### A masked pattern was here #### 14.0 1 14.0 1 diff --git a/ql/src/test/results/clientpositive/union_remove_2.q.out b/ql/src/test/results/clientpositive/union_remove_2.q.out index 66d8fe68de..761005963a 100644 --- a/ql/src/test/results/clientpositive/union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/union_remove_2.q.out @@ -1,47 +1,47 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n8(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n8 +POSTHOOK: query: create table inputTbl1_n8(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +POSTHOOK: Output: default@inputTbl1_n8 +PREHOOK: query: create table outputTbl1_n11(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +PREHOOK: Output: default@outputTbl1_n11 +POSTHOOK: query: create table outputTbl1_n11(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n11 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n8 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n8 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n8 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n8 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n11 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n8 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n8 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n8 ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n11 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n8 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n8 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n8 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -55,7 +55,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n8 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -88,7 +88,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n11 Stage: Stage-0 Move Operator @@ -98,13 +98,13 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n11 Stage: Stage-2 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n8 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 1L (type: bigint) @@ -117,14 +117,14 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n11 Execution mode: vectorized Stage: Stage-3 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n8 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 2L (type: bigint) @@ -137,41 +137,41 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n11 Execution mode: vectorized -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n11 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n8 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n8 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n8 ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n8 +PREHOOK: Output: default@outputtbl1_n11 +POSTHOOK: query: insert overwrite table outputTbl1_n11 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n8 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n8 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n8 ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n8 +POSTHOOK: Output: default@outputtbl1_n11 +POSTHOOK: Lineage: outputtbl1_n11.key EXPRESSION [(inputtbl1_n8)inputtbl1_n8.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n11.values EXPRESSION [(inputtbl1_n8)inputtbl1_n8.null, ] +PREHOOK: query: desc formatted outputTbl1_n11 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n11 +POSTHOOK: query: desc formatted outputTbl1_n11 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n11 # col_name data_type comment key string values bigint @@ -198,13 +198,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n11 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n11 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n11 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n11 #### A masked pattern was here #### 1 1 1 1 diff --git a/ql/src/test/results/clientpositive/union_remove_20.q.out b/ql/src/test/results/clientpositive/union_remove_20.q.out index 8b89407d1d..e4b7729da0 100644 --- a/ql/src/test/results/clientpositive/union_remove_20.q.out +++ b/ql/src/test/results/clientpositive/union_remove_20.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n19(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n19 +POSTHOOK: query: create table inputTbl1_n19(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(`values` bigint, key string) stored as textfile +POSTHOOK: Output: default@inputTbl1_n19 +PREHOOK: query: create table outputTbl1_n27(`values` bigint, key string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(`values` bigint, key string) stored as textfile +PREHOOK: Output: default@outputTbl1_n27 +POSTHOOK: query: create table outputTbl1_n27(`values` bigint, key string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n27 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n19 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n19 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n19 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n19 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n27 SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n19 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n19 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n27 SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n19 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n19 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -50,7 +50,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n19 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -87,7 +87,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n27 Stage: Stage-0 Move Operator @@ -97,13 +97,13 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n27 Stage: Stage-2 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n19 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -140,36 +140,36 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n27 -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n27 SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n19 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n19 group by key ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n19 +PREHOOK: Output: default@outputtbl1_n27 +POSTHOOK: query: insert overwrite table outputTbl1_n27 SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n19 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n19 group by key ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n19 +POSTHOOK: Output: default@outputtbl1_n27 +POSTHOOK: Lineage: outputtbl1_n27.key EXPRESSION [(inputtbl1_n19)inputtbl1_n19.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n27.values EXPRESSION [(inputtbl1_n19)inputtbl1_n19.null, ] +PREHOOK: query: desc formatted outputTbl1_n27 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n27 +POSTHOOK: query: desc formatted outputTbl1_n27 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n27 # col_name data_type comment values bigint key string @@ -196,13 +196,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n27 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n27 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n27 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n27 #### A masked pattern was here #### 1 1 1 1 diff --git a/ql/src/test/results/clientpositive/union_remove_21.q.out b/ql/src/test/results/clientpositive/union_remove_21.q.out index 5f36d8f0f1..4bf7c6a9d7 100644 --- a/ql/src/test/results/clientpositive/union_remove_21.q.out +++ b/ql/src/test/results/clientpositive/union_remove_21.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n12(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n12 +POSTHOOK: query: create table inputTbl1_n12(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string) stored as textfile +POSTHOOK: Output: default@inputTbl1_n12 +PREHOOK: query: create table outputTbl1_n17(key string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string) stored as textfile +PREHOOK: Output: default@outputTbl1_n17 +POSTHOOK: query: create table outputTbl1_n17(key string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n17 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n12 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n12 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n12 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n12 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n17 SELECT a.key FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n12 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n12 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n17 SELECT a.key FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n12 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n12 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -50,7 +50,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n12 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -80,7 +80,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n17 Stage: Stage-0 Move Operator @@ -90,13 +90,13 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n17 Stage: Stage-2 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n12 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -126,35 +126,35 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n17 -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n17 SELECT a.key FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n12 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n12 group by key ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n12 +PREHOOK: Output: default@outputtbl1_n17 +POSTHOOK: query: insert overwrite table outputTbl1_n17 SELECT a.key FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n12 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n12 group by key ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n12 +POSTHOOK: Output: default@outputtbl1_n17 +POSTHOOK: Lineage: outputtbl1_n17.key EXPRESSION [(inputtbl1_n12)inputtbl1_n12.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: desc formatted outputTbl1_n17 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n17 +POSTHOOK: query: desc formatted outputTbl1_n17 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n17 # col_name data_type comment key string @@ -180,13 +180,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n17 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n17 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n17 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n17 #### A masked pattern was here #### 1 1 diff --git a/ql/src/test/results/clientpositive/union_remove_22.q.out b/ql/src/test/results/clientpositive/union_remove_22.q.out index e9ed8e0a87..247db091ee 100644 --- a/ql/src/test/results/clientpositive/union_remove_22.q.out +++ b/ql/src/test/results/clientpositive/union_remove_22.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n5(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n5 +POSTHOOK: query: create table inputTbl1_n5(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint, values2 bigint) stored as textfile +POSTHOOK: Output: default@inputTbl1_n5 +PREHOOK: query: create table outputTbl1_n7(key string, `values` bigint, values2 bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint, values2 bigint) stored as textfile +PREHOOK: Output: default@outputTbl1_n7 +POSTHOOK: query: create table outputTbl1_n7(key string, `values` bigint, values2 bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n7 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n5 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n5 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n5 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n5 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n7 SELECT a.key, a.`values`, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n7 SELECT a.key, a.`values`, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -50,7 +50,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n5 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -87,7 +87,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n7 Stage: Stage-0 Move Operator @@ -97,13 +97,13 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n7 Stage: Stage-2 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n5 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -140,37 +140,37 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n7 -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n7 SELECT a.key, a.`values`, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n5 +PREHOOK: Output: default@outputtbl1_n7 +POSTHOOK: query: insert overwrite table outputTbl1_n7 SELECT a.key, a.`values`, a.`values` FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -POSTHOOK: Lineage: outputtbl1.values2 EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n5 +POSTHOOK: Output: default@outputtbl1_n7 +POSTHOOK: Lineage: outputtbl1_n7.key EXPRESSION [(inputtbl1_n5)inputtbl1_n5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n7.values EXPRESSION [(inputtbl1_n5)inputtbl1_n5.null, ] +POSTHOOK: Lineage: outputtbl1_n7.values2 EXPRESSION [(inputtbl1_n5)inputtbl1_n5.null, ] +PREHOOK: query: desc formatted outputTbl1_n7 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n7 +POSTHOOK: query: desc formatted outputTbl1_n7 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n7 # col_name data_type comment key string values bigint @@ -198,13 +198,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n7 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n7 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n7 #### A masked pattern was here #### 1 1 1 1 1 1 @@ -217,21 +217,21 @@ POSTHOOK: Input: default@outputtbl1 8 2 2 8 2 2 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n7 SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n7 SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -244,7 +244,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n5 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -281,7 +281,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n7 Stage: Stage-0 Move Operator @@ -291,13 +291,13 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n7 Stage: Stage-2 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n5 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -334,38 +334,38 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n7 -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n7 SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n5 +PREHOOK: Output: default@outputtbl1_n7 +POSTHOOK: query: insert overwrite table outputTbl1_n7 SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n5 group by key ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -POSTHOOK: Lineage: outputtbl1.values2 EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 +POSTHOOK: Input: default@inputtbl1_n5 +POSTHOOK: Output: default@outputtbl1_n7 +POSTHOOK: Lineage: outputtbl1_n7.key EXPRESSION [(inputtbl1_n5)inputtbl1_n5.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n7.values EXPRESSION [(inputtbl1_n5)inputtbl1_n5.null, ] +POSTHOOK: Lineage: outputtbl1_n7.values2 EXPRESSION [(inputtbl1_n5)inputtbl1_n5.null, ] +PREHOOK: query: select * from outputTbl1_n7 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n7 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n7 #### A masked pattern was here #### 1 11 11 1 11 11 diff --git a/ql/src/test/results/clientpositive/union_remove_23.q.out b/ql/src/test/results/clientpositive/union_remove_23.q.out index 29e7969b78..158303468b 100644 --- a/ql/src/test/results/clientpositive/union_remove_23.q.out +++ b/ql/src/test/results/clientpositive/union_remove_23.q.out @@ -1,45 +1,45 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n25(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n25 +POSTHOOK: query: create table inputTbl1_n25(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +POSTHOOK: Output: default@inputTbl1_n25 +PREHOOK: query: create table outputTbl1_n34(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +PREHOOK: Output: default@outputTbl1_n34 +POSTHOOK: query: create table outputTbl1_n34(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n34 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n25 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n25 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n25 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n25 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n34 SELECT * FROM ( SELECT key, count(1) as `values` from - (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key + (SELECT a.key, b.val from inputTbl1_n25 a join inputTbl1_n25 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n25 group by key ) subq2 PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n34 SELECT * FROM ( SELECT key, count(1) as `values` from - (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key + (SELECT a.key, b.val from inputTbl1_n25 a join inputTbl1_n25 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n25 group by key ) subq2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -129,7 +129,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n34 Stage: Stage-0 Move Operator @@ -139,13 +139,13 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n34 Stage: Stage-4 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n25 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -178,38 +178,38 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n34 -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n34 SELECT * FROM ( SELECT key, count(1) as `values` from - (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key + (SELECT a.key, b.val from inputTbl1_n25 a join inputTbl1_n25 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n25 group by key ) subq2 PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n25 +PREHOOK: Output: default@outputtbl1_n34 +POSTHOOK: query: insert overwrite table outputTbl1_n34 SELECT * FROM ( SELECT key, count(1) as `values` from - (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key + (SELECT a.key, b.val from inputTbl1_n25 a join inputTbl1_n25 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n25 group by key ) subq2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)a.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)a.null, (inputtbl1)b.null, (inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n25 +POSTHOOK: Output: default@outputtbl1_n34 +POSTHOOK: Lineage: outputtbl1_n34.key EXPRESSION [(inputtbl1_n25)a.FieldSchema(name:key, type:string, comment:null), (inputtbl1_n25)inputtbl1_n25.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n34.values EXPRESSION [(inputtbl1_n25)a.null, (inputtbl1_n25)b.null, (inputtbl1_n25)inputtbl1_n25.null, ] +PREHOOK: query: desc formatted outputTbl1_n34 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n34 +POSTHOOK: query: desc formatted outputTbl1_n34 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n34 # col_name data_type comment key string values bigint @@ -236,13 +236,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n34 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n34 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n34 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n34 #### A masked pattern was here #### 1 1 1 1 diff --git a/ql/src/test/results/clientpositive/union_remove_24.q.out b/ql/src/test/results/clientpositive/union_remove_24.q.out index e59171cff8..bac7d48b6f 100644 --- a/ql/src/test/results/clientpositive/union_remove_24.q.out +++ b/ql/src/test/results/clientpositive/union_remove_24.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n20(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n20 +POSTHOOK: query: create table inputTbl1_n20(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key double, `values` bigint) stored as textfile +POSTHOOK: Output: default@inputTbl1_n20 +PREHOOK: query: create table outputTbl1_n28(key double, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key double, `values` bigint) stored as textfile +PREHOOK: Output: default@outputTbl1_n28 +POSTHOOK: query: create table outputTbl1_n28(key double, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n28 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n20 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n20 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n20 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n20 PREHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n28 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1_n20 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1_n20 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -INSERT OVERWRITE TABLE outputTbl1 +INSERT OVERWRITE TABLE outputTbl1_n28 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1_n20 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1_n20 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -50,7 +50,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n20 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -87,7 +87,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n28 Stage: Stage-0 Move Operator @@ -97,13 +97,13 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n28 Stage: Stage-2 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n20 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -140,36 +140,36 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n28 -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n28 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1_n20 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1_n20 group by key ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 +PREHOOK: Input: default@inputtbl1_n20 +PREHOOK: Output: default@outputtbl1_n28 +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1_n28 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1_n20 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1_n20 group by key ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n20 +POSTHOOK: Output: default@outputtbl1_n28 +POSTHOOK: Lineage: outputtbl1_n28.key EXPRESSION [(inputtbl1_n20)inputtbl1_n20.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n28.values EXPRESSION [(inputtbl1_n20)inputtbl1_n20.null, ] +PREHOOK: query: desc formatted outputTbl1_n28 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n28 +POSTHOOK: query: desc formatted outputTbl1_n28 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n28 # col_name data_type comment key double values bigint @@ -196,13 +196,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n28 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n28 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n28 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n28 #### A masked pattern was here #### 1.0 1 1.0 1 diff --git a/ql/src/test/results/clientpositive/union_remove_25.q.out b/ql/src/test/results/clientpositive/union_remove_25.q.out index 763e0e327e..cd457ce171 100644 --- a/ql/src/test/results/clientpositive/union_remove_25.q.out +++ b/ql/src/test/results/clientpositive/union_remove_25.q.out @@ -1,59 +1,59 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n13(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n13 +POSTHOOK: query: create table inputTbl1_n13(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile +POSTHOOK: Output: default@inputTbl1_n13 +PREHOOK: query: create table outputTbl1_n19(key string, `values` bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile +PREHOOK: Output: default@outputTbl1_n19 +POSTHOOK: query: create table outputTbl1_n19(key string, `values` bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: create table outputTbl2(key string, `values` bigint) partitioned by (ds string) stored as textfile +POSTHOOK: Output: default@outputTbl1_n19 +PREHOOK: query: create table outputTbl2_n6(key string, `values` bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: create table outputTbl2(key string, `values` bigint) partitioned by (ds string) stored as textfile +PREHOOK: Output: default@outputTbl2_n6 +POSTHOOK: query: create table outputTbl2_n6(key string, `values` bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: create table outputTbl3(key string, `values` bigint) partitioned by (ds string,hr string) stored as textfile +POSTHOOK: Output: default@outputTbl2_n6 +PREHOOK: query: create table outputTbl3_n3(key string, `values` bigint) partitioned by (ds string,hr string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl3 -POSTHOOK: query: create table outputTbl3(key string, `values` bigint) partitioned by (ds string,hr string) stored as textfile +PREHOOK: Output: default@outputTbl3_n3 +POSTHOOK: query: create table outputTbl3_n3(key string, `values` bigint) partitioned by (ds string,hr string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl3 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl3_n3 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n13 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n13 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n13 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n13 PREHOOK: query: explain -insert overwrite table outputTbl1 partition(ds='2004') +insert overwrite table outputTbl1_n19 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n13 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n13 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 partition(ds='2004') +insert overwrite table outputTbl1_n19 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n13 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n13 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -66,7 +66,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n13 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -99,7 +99,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n19 Stage: Stage-0 Move Operator @@ -111,13 +111,13 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n19 Stage: Stage-2 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n13 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -150,36 +150,36 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n19 -PREHOOK: query: insert overwrite table outputTbl1 partition(ds='2004') +PREHOOK: query: insert overwrite table outputTbl1_n19 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n13 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n13 group by key ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1@ds=2004 -POSTHOOK: query: insert overwrite table outputTbl1 partition(ds='2004') +PREHOOK: Input: default@inputtbl1_n13 +PREHOOK: Output: default@outputtbl1_n19@ds=2004 +POSTHOOK: query: insert overwrite table outputTbl1_n19 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n13 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n13 group by key ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1@ds=2004 -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2004).key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1 PARTITION(ds=2004).values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 partition(ds='2004') +POSTHOOK: Input: default@inputtbl1_n13 +POSTHOOK: Output: default@outputtbl1_n19@ds=2004 +POSTHOOK: Lineage: outputtbl1_n19 PARTITION(ds=2004).key EXPRESSION [(inputtbl1_n13)inputtbl1_n13.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n19 PARTITION(ds=2004).values EXPRESSION [(inputtbl1_n13)inputtbl1_n13.null, ] +PREHOOK: query: desc formatted outputTbl1_n19 partition(ds='2004') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 partition(ds='2004') +PREHOOK: Input: default@outputtbl1_n19 +POSTHOOK: query: desc formatted outputTbl1_n19 partition(ds='2004') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n19 # col_name data_type comment key string values bigint @@ -191,7 +191,7 @@ ds string # Detailed Partition Information Partition Value: [2004] Database: default -Table: outputtbl1 +Table: outputtbl1_n19 #### A masked pattern was here #### Partition Parameters: numFiles 2 @@ -208,15 +208,15 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n19 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 -PREHOOK: Input: default@outputtbl1@ds=2004 +PREHOOK: Input: default@outputtbl1_n19 +PREHOOK: Input: default@outputtbl1_n19@ds=2004 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n19 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 -POSTHOOK: Input: default@outputtbl1@ds=2004 +POSTHOOK: Input: default@outputtbl1_n19 +POSTHOOK: Input: default@outputtbl1_n19@ds=2004 #### A masked pattern was here #### 1 1 2004 1 1 2004 @@ -229,7 +229,7 @@ POSTHOOK: Input: default@outputtbl1@ds=2004 8 2 2004 8 2 2004 PREHOOK: query: explain -insert overwrite table outputTbl2 partition(ds) +insert overwrite table outputTbl2_n6 partition(ds) SELECT * FROM ( select * from (SELECT key, value, ds from srcpart where ds='2008-04-08' limit 500)a @@ -238,7 +238,7 @@ FROM ( ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl2 partition(ds) +insert overwrite table outputTbl2_n6 partition(ds) SELECT * FROM ( select * from (SELECT key, value, ds from srcpart where ds='2008-04-08' limit 500)a @@ -290,7 +290,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n6 Stage: Stage-0 Move Operator @@ -302,7 +302,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n6 Stage: Stage-2 Map Reduce @@ -342,9 +342,9 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n6 -PREHOOK: query: insert overwrite table outputTbl2 partition(ds) +PREHOOK: query: insert overwrite table outputTbl2_n6 partition(ds) SELECT * FROM ( select * from (SELECT key, value, ds from srcpart where ds='2008-04-08' limit 500)a @@ -355,8 +355,8 @@ PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@outputtbl2 -POSTHOOK: query: insert overwrite table outputTbl2 partition(ds) +PREHOOK: Output: default@outputtbl2_n6 +POSTHOOK: query: insert overwrite table outputTbl2_n6 partition(ds) SELECT * FROM ( select * from (SELECT key, value, ds from srcpart where ds='2008-04-08' limit 500)a @@ -367,22 +367,22 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@outputtbl2@ds=2008-04-08 -POSTHOOK: Lineage: outputtbl2 PARTITION(ds=2008-04-08).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: outputtbl2 PARTITION(ds=2008-04-08).values EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show partitions outputTbl2 +POSTHOOK: Output: default@outputtbl2_n6@ds=2008-04-08 +POSTHOOK: Lineage: outputtbl2_n6 PARTITION(ds=2008-04-08).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: outputtbl2_n6 PARTITION(ds=2008-04-08).values EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions outputTbl2_n6 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@outputtbl2 -POSTHOOK: query: show partitions outputTbl2 +PREHOOK: Input: default@outputtbl2_n6 +POSTHOOK: query: show partitions outputTbl2_n6 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@outputtbl2 +POSTHOOK: Input: default@outputtbl2_n6 ds=2008-04-08 -PREHOOK: query: desc formatted outputTbl2 partition(ds='2008-04-08') +PREHOOK: query: desc formatted outputTbl2_n6 partition(ds='2008-04-08') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl2 -POSTHOOK: query: desc formatted outputTbl2 partition(ds='2008-04-08') +PREHOOK: Input: default@outputtbl2_n6 +POSTHOOK: query: desc formatted outputTbl2_n6 partition(ds='2008-04-08') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl2 +POSTHOOK: Input: default@outputtbl2_n6 # col_name data_type comment key string values bigint @@ -394,7 +394,7 @@ ds string # Detailed Partition Information Partition Value: [2008-04-08] Database: default -Table: outputtbl2 +Table: outputtbl2_n6 #### A masked pattern was here #### Partition Parameters: numFiles 2 @@ -411,7 +411,7 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: explain insert overwrite table outputTbl3 partition(ds, hr) +PREHOOK: query: explain insert overwrite table outputTbl3_n3 partition(ds, hr) SELECT * FROM ( select * from (SELECT key, value, ds, hr from srcpart where ds='2008-04-08' limit 1000)a @@ -419,7 +419,7 @@ FROM ( select * from (SELECT key, value, ds, hr from srcpart where ds='2008-04-08' limit 1000)b ) a PREHOOK: type: QUERY -POSTHOOK: query: explain insert overwrite table outputTbl3 partition(ds, hr) +POSTHOOK: query: explain insert overwrite table outputTbl3_n3 partition(ds, hr) SELECT * FROM ( select * from (SELECT key, value, ds, hr from srcpart where ds='2008-04-08' limit 1000)a @@ -471,7 +471,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n3 Stage: Stage-0 Move Operator @@ -484,7 +484,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n3 Stage: Stage-2 Map Reduce @@ -524,9 +524,9 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 + name: default.outputtbl3_n3 -PREHOOK: query: insert overwrite table outputTbl3 partition(ds, hr) +PREHOOK: query: insert overwrite table outputTbl3_n3 partition(ds, hr) SELECT * FROM ( select * from (SELECT key, value, ds, hr from srcpart where ds='2008-04-08' limit 1000)a @@ -537,8 +537,8 @@ PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@outputtbl3 -POSTHOOK: query: insert overwrite table outputTbl3 partition(ds, hr) +PREHOOK: Output: default@outputtbl3_n3 +POSTHOOK: query: insert overwrite table outputTbl3_n3 partition(ds, hr) SELECT * FROM ( select * from (SELECT key, value, ds, hr from srcpart where ds='2008-04-08' limit 1000)a @@ -549,26 +549,26 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@outputtbl3@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@outputtbl3@ds=2008-04-08/hr=12 -POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=11).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=11).values EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: outputtbl3 PARTITION(ds=2008-04-08,hr=12).values EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: show partitions outputTbl3 +POSTHOOK: Output: default@outputtbl3_n3@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@outputtbl3_n3@ds=2008-04-08/hr=12 +POSTHOOK: Lineage: outputtbl3_n3 PARTITION(ds=2008-04-08,hr=11).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: outputtbl3_n3 PARTITION(ds=2008-04-08,hr=11).values EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: outputtbl3_n3 PARTITION(ds=2008-04-08,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: outputtbl3_n3 PARTITION(ds=2008-04-08,hr=12).values EXPRESSION [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show partitions outputTbl3_n3 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@outputtbl3 -POSTHOOK: query: show partitions outputTbl3 +PREHOOK: Input: default@outputtbl3_n3 +POSTHOOK: query: show partitions outputTbl3_n3 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@outputtbl3 +POSTHOOK: Input: default@outputtbl3_n3 ds=2008-04-08/hr=11 ds=2008-04-08/hr=12 -PREHOOK: query: desc formatted outputTbl3 partition(ds='2008-04-08', hr='11') +PREHOOK: query: desc formatted outputTbl3_n3 partition(ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl3 -POSTHOOK: query: desc formatted outputTbl3 partition(ds='2008-04-08', hr='11') +PREHOOK: Input: default@outputtbl3_n3 +POSTHOOK: query: desc formatted outputTbl3_n3 partition(ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl3 +POSTHOOK: Input: default@outputtbl3_n3 # col_name data_type comment key string values bigint @@ -581,7 +581,7 @@ hr string # Detailed Partition Information Partition Value: [2008-04-08, 11] Database: default -Table: outputtbl3 +Table: outputtbl3_n3 #### A masked pattern was here #### Partition Parameters: numFiles 2 diff --git a/ql/src/test/results/clientpositive/union_remove_3.q.out b/ql/src/test/results/clientpositive/union_remove_3.q.out index 28fb92b7e8..67b534efa7 100644 --- a/ql/src/test/results/clientpositive/union_remove_3.q.out +++ b/ql/src/test/results/clientpositive/union_remove_3.q.out @@ -1,47 +1,47 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n16(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n16 +POSTHOOK: query: create table inputTbl1_n16(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +POSTHOOK: Output: default@inputTbl1_n16 +PREHOOK: query: create table outputTbl1_n23(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +PREHOOK: Output: default@outputTbl1_n23 +POSTHOOK: query: create table outputTbl1_n23(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n23 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n16 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n16 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n16 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n16 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n23 SELECT * FROM ( - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n16 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n16 UNION ALL - SELECT key, 3 as `values` from inputTbl1 + SELECT key, 3 as `values` from inputTbl1_n16 ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n23 SELECT * FROM ( - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n16 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n16 UNION ALL - SELECT key, 3 as `values` from inputTbl1 + SELECT key, 3 as `values` from inputTbl1_n16 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -53,7 +53,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n16 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 1 (type: int) @@ -72,9 +72,9 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n23 TableScan - alias: inputtbl1 + alias: inputtbl1_n16 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 2 (type: int) @@ -93,9 +93,9 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n23 TableScan - alias: inputtbl1 + alias: inputtbl1_n16 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 3 (type: int) @@ -114,7 +114,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n23 Stage: Stage-0 Move Operator @@ -124,40 +124,40 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n23 -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n23 SELECT * FROM ( - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n16 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n16 UNION ALL - SELECT key, 3 as `values` from inputTbl1 + SELECT key, 3 as `values` from inputTbl1_n16 ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n16 +PREHOOK: Output: default@outputtbl1_n23 +POSTHOOK: query: insert overwrite table outputTbl1_n23 SELECT * FROM ( - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n16 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n16 UNION ALL - SELECT key, 3 as `values` from inputTbl1 + SELECT key, 3 as `values` from inputTbl1_n16 ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n16 +POSTHOOK: Output: default@outputtbl1_n23 +POSTHOOK: Lineage: outputtbl1_n23.key EXPRESSION [(inputtbl1_n16)inputtbl1_n16.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n23.values EXPRESSION [] +PREHOOK: query: desc formatted outputTbl1_n23 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n23 +POSTHOOK: query: desc formatted outputTbl1_n23 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n23 # col_name data_type comment key string values bigint @@ -184,13 +184,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n23 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n23 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n23 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n23 #### A masked pattern was here #### 1 1 1 2 diff --git a/ql/src/test/results/clientpositive/union_remove_4.q.out b/ql/src/test/results/clientpositive/union_remove_4.q.out index 93b4f5153f..49272aabaa 100644 --- a/ql/src/test/results/clientpositive/union_remove_4.q.out +++ b/ql/src/test/results/clientpositive/union_remove_4.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n24(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n24 +POSTHOOK: query: create table inputTbl1_n24(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +POSTHOOK: Output: default@inputTbl1_n24 +PREHOOK: query: create table outputTbl1_n33(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +PREHOOK: Output: default@outputTbl1_n33 +POSTHOOK: query: create table outputTbl1_n33(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n33 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n24 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n24 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n24 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n24 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n33 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n24 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n24 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n33 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n24 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n24 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -55,7 +55,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n24 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -88,7 +88,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n33 Stage: Stage-6 Conditional Operator @@ -107,7 +107,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n33 Stage: Stage-2 Map Reduce @@ -119,7 +119,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n33 Stage: Stage-4 Map Reduce @@ -131,7 +131,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n33 Stage: Stage-5 Move Operator @@ -143,7 +143,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n24 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -176,36 +176,36 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n33 -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n33 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n24 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n24 group by key ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n24 +PREHOOK: Output: default@outputtbl1_n33 +POSTHOOK: query: insert overwrite table outputTbl1_n33 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n24 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n24 group by key ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n24 +POSTHOOK: Output: default@outputtbl1_n33 +POSTHOOK: Lineage: outputtbl1_n33.key EXPRESSION [(inputtbl1_n24)inputtbl1_n24.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n33.values EXPRESSION [(inputtbl1_n24)inputtbl1_n24.null, ] +PREHOOK: query: desc formatted outputTbl1_n33 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n33 +POSTHOOK: query: desc formatted outputTbl1_n33 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n33 # col_name data_type comment key string values bigint @@ -232,13 +232,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n33 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n33 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n33 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n33 #### A masked pattern was here #### 1 1 1 1 diff --git a/ql/src/test/results/clientpositive/union_remove_5.q.out b/ql/src/test/results/clientpositive/union_remove_5.q.out index 5ed99885f5..5fb29a8a4e 100644 --- a/ql/src/test/results/clientpositive/union_remove_5.q.out +++ b/ql/src/test/results/clientpositive/union_remove_5.q.out @@ -1,47 +1,47 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n4(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n4 +POSTHOOK: query: create table inputTbl1_n4(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +POSTHOOK: Output: default@inputTbl1_n4 +PREHOOK: query: create table outputTbl1_n6(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +PREHOOK: Output: default@outputTbl1_n6 +POSTHOOK: query: create table outputTbl1_n6(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n6 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n4 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n4 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n6 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n4 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n4 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n4 ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n6 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n4 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n4 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n4 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -60,7 +60,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n4 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -93,7 +93,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n6 Stage: Stage-6 Conditional Operator @@ -112,7 +112,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n6 Stage: Stage-2 Map Reduce @@ -124,7 +124,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n6 Stage: Stage-4 Map Reduce @@ -136,7 +136,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n6 Stage: Stage-5 Move Operator @@ -148,7 +148,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n4 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 1L (type: bigint) @@ -161,14 +161,14 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n6 Execution mode: vectorized Stage: Stage-8 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n4 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 2L (type: bigint) @@ -181,41 +181,41 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n6 Execution mode: vectorized -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n6 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n4 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n4 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n4 ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n4 +PREHOOK: Output: default@outputtbl1_n6 +POSTHOOK: query: insert overwrite table outputTbl1_n6 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n4 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n4 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n4 ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n4 +POSTHOOK: Output: default@outputtbl1_n6 +POSTHOOK: Lineage: outputtbl1_n6.key EXPRESSION [(inputtbl1_n4)inputtbl1_n4.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n6.values EXPRESSION [(inputtbl1_n4)inputtbl1_n4.null, ] +PREHOOK: query: desc formatted outputTbl1_n6 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n6 +POSTHOOK: query: desc formatted outputTbl1_n6 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n6 # col_name data_type comment key string values bigint @@ -242,13 +242,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n6 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n6 #### A masked pattern was here #### 1 1 1 1 diff --git a/ql/src/test/results/clientpositive/union_remove_6.q.out b/ql/src/test/results/clientpositive/union_remove_6.q.out index 2456545014..08a68b77ad 100644 --- a/ql/src/test/results/clientpositive/union_remove_6.q.out +++ b/ql/src/test/results/clientpositive/union_remove_6.q.out @@ -1,52 +1,52 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n10(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n10 +POSTHOOK: query: create table inputTbl1_n10(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +POSTHOOK: Output: default@inputTbl1_n10 +PREHOOK: query: create table outputTbl1_n14(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +PREHOOK: Output: default@outputTbl1_n14 +POSTHOOK: query: create table outputTbl1_n14(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: create table outputTbl2(key string, `values` bigint) stored as textfile +POSTHOOK: Output: default@outputTbl1_n14 +PREHOOK: query: create table outputTbl2_n4(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: create table outputTbl2(key string, `values` bigint) stored as textfile +PREHOOK: Output: default@outputTbl2_n4 +POSTHOOK: query: create table outputTbl2_n4(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl2_n4 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n10 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n10 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n10 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n10 PREHOOK: query: explain FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n10 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n10 group by key ) a -insert overwrite table outputTbl1 select * -insert overwrite table outputTbl2 select * +insert overwrite table outputTbl1_n14 select * +insert overwrite table outputTbl2_n4 select * PREHOOK: type: QUERY POSTHOOK: query: explain FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n10 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n10 group by key ) a -insert overwrite table outputTbl1 select * -insert overwrite table outputTbl2 select * +insert overwrite table outputTbl1_n14 select * +insert overwrite table outputTbl2_n4 select * POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -60,7 +60,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n10 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -106,7 +106,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n14 File Output Operator compressed: false Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE @@ -114,7 +114,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n4 TableScan Union Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE @@ -125,7 +125,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n14 File Output Operator compressed: false Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE @@ -133,7 +133,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n4 Stage: Stage-0 Move Operator @@ -143,7 +143,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n14 Stage: Stage-1 Move Operator @@ -153,13 +153,13 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 + name: default.outputtbl2_n4 Stage: Stage-4 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n10 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -193,38 +193,38 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe PREHOOK: query: FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n10 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n10 group by key ) a -insert overwrite table outputTbl1 select * -insert overwrite table outputTbl2 select * +insert overwrite table outputTbl1_n14 select * +insert overwrite table outputTbl2_n4 select * PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -PREHOOK: Output: default@outputtbl2 +PREHOOK: Input: default@inputtbl1_n10 +PREHOOK: Output: default@outputtbl1_n14 +PREHOOK: Output: default@outputtbl2_n4 POSTHOOK: query: FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n10 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n10 group by key ) a -insert overwrite table outputTbl1 select * -insert overwrite table outputTbl2 select * +insert overwrite table outputTbl1_n14 select * +insert overwrite table outputTbl2_n4 select * POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Output: default@outputtbl2 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -POSTHOOK: Lineage: outputtbl2.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl2.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 +POSTHOOK: Input: default@inputtbl1_n10 +POSTHOOK: Output: default@outputtbl1_n14 +POSTHOOK: Output: default@outputtbl2_n4 +POSTHOOK: Lineage: outputtbl1_n14.key EXPRESSION [(inputtbl1_n10)inputtbl1_n10.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n14.values EXPRESSION [(inputtbl1_n10)inputtbl1_n10.null, ] +POSTHOOK: Lineage: outputtbl2_n4.key EXPRESSION [(inputtbl1_n10)inputtbl1_n10.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl2_n4.values EXPRESSION [(inputtbl1_n10)inputtbl1_n10.null, ] +PREHOOK: query: select * from outputTbl1_n14 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n14 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n14 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n14 #### A masked pattern was here #### 1 1 1 1 @@ -236,13 +236,13 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 8 2 -PREHOOK: query: select * from outputTbl2 +PREHOOK: query: select * from outputTbl2_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl2 +PREHOOK: Input: default@outputtbl2_n4 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl2 +POSTHOOK: query: select * from outputTbl2_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl2 +POSTHOOK: Input: default@outputtbl2_n4 #### A masked pattern was here #### 1 1 1 1 diff --git a/ql/src/test/results/clientpositive/union_remove_6_subq.q.out b/ql/src/test/results/clientpositive/union_remove_6_subq.q.out index 7c51d92b69..6fdc7aafdc 100644 --- a/ql/src/test/results/clientpositive/union_remove_6_subq.q.out +++ b/ql/src/test/results/clientpositive/union_remove_6_subq.q.out @@ -1,19 +1,19 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n0(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n0 +POSTHOOK: query: create table inputTbl1_n0(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +POSTHOOK: Output: default@inputTbl1_n0 +PREHOOK: query: create table outputTbl1_n0(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +PREHOOK: Output: default@outputTbl1_n0 +POSTHOOK: query: create table outputTbl1_n0(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 +POSTHOOK: Output: default@outputTbl1_n0 PREHOOK: query: create table outputTbl2(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -22,34 +22,34 @@ POSTHOOK: query: create table outputTbl2(key string, `values` bigint) stored as POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n0 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n0 PREHOOK: query: explain FROM ( select * from( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n0 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n0 group by key )subq ) a -insert overwrite table outputTbl1 select * +insert overwrite table outputTbl1_n0 select * insert overwrite table outputTbl2 select * PREHOOK: type: QUERY POSTHOOK: query: explain FROM ( select * from( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n0 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n0 group by key )subq ) a -insert overwrite table outputTbl1 select * +insert overwrite table outputTbl1_n0 select * insert overwrite table outputTbl2 select * POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -64,7 +64,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n0 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -110,7 +110,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n0 File Output Operator compressed: false Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE @@ -129,7 +129,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n0 File Output Operator compressed: false Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE @@ -147,7 +147,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 + name: default.outputtbl1_n0 Stage: Stage-1 Move Operator @@ -163,7 +163,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n0 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -198,41 +198,41 @@ STAGE PLANS: PREHOOK: query: FROM ( select * from( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n0 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n0 group by key )subq ) a -insert overwrite table outputTbl1 select * +insert overwrite table outputTbl1_n0 select * insert overwrite table outputTbl2 select * PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 +PREHOOK: Input: default@inputtbl1_n0 +PREHOOK: Output: default@outputtbl1_n0 PREHOOK: Output: default@outputtbl2 POSTHOOK: query: FROM ( select * from( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n0 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n0 group by key )subq ) a -insert overwrite table outputTbl1 select * +insert overwrite table outputTbl1_n0 select * insert overwrite table outputTbl2 select * POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Input: default@inputtbl1_n0 +POSTHOOK: Output: default@outputtbl1_n0 POSTHOOK: Output: default@outputtbl2 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -POSTHOOK: Lineage: outputtbl2.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl2.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 +POSTHOOK: Lineage: outputtbl1_n0.key EXPRESSION [(inputtbl1_n0)inputtbl1_n0.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n0.values EXPRESSION [(inputtbl1_n0)inputtbl1_n0.null, ] +POSTHOOK: Lineage: outputtbl2.key EXPRESSION [(inputtbl1_n0)inputtbl1_n0.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl2.values EXPRESSION [(inputtbl1_n0)inputtbl1_n0.null, ] +PREHOOK: query: select * from outputTbl1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n0 #### A masked pattern was here #### 1 1 1 1 diff --git a/ql/src/test/results/clientpositive/union_remove_7.q.out b/ql/src/test/results/clientpositive/union_remove_7.q.out index 437b08aa7a..3ef0787acb 100644 --- a/ql/src/test/results/clientpositive/union_remove_7.q.out +++ b/ql/src/test/results/clientpositive/union_remove_7.q.out @@ -1,43 +1,43 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n17(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n17 +POSTHOOK: query: create table inputTbl1_n17(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +POSTHOOK: Output: default@inputTbl1_n17 +PREHOOK: query: create table outputTbl1_n24(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +PREHOOK: Output: default@outputTbl1_n24 +POSTHOOK: query: create table outputTbl1_n24(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n24 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n17 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n17 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n17 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n17 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n24 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n17 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n17 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n24 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n17 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n17 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -50,7 +50,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n17 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -83,7 +83,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n24 Stage: Stage-0 Move Operator @@ -93,13 +93,13 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n24 Stage: Stage-2 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n17 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -132,36 +132,36 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n24 -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n24 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n17 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n17 group by key ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n17 +PREHOOK: Output: default@outputtbl1_n24 +POSTHOOK: query: insert overwrite table outputTbl1_n24 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n17 group by key UNION ALL - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n17 group by key ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n17 +POSTHOOK: Output: default@outputtbl1_n24 +POSTHOOK: Lineage: outputtbl1_n24.key EXPRESSION [(inputtbl1_n17)inputtbl1_n17.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n24.values EXPRESSION [(inputtbl1_n17)inputtbl1_n17.null, ] +PREHOOK: query: desc formatted outputTbl1_n24 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n24 +POSTHOOK: query: desc formatted outputTbl1_n24 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n24 # col_name data_type comment key string values bigint @@ -188,13 +188,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n24 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n24 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n24 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n24 #### A masked pattern was here #### 1 1 1 1 diff --git a/ql/src/test/results/clientpositive/union_remove_8.q.out b/ql/src/test/results/clientpositive/union_remove_8.q.out index 4971a745af..b6c474f9f5 100644 --- a/ql/src/test/results/clientpositive/union_remove_8.q.out +++ b/ql/src/test/results/clientpositive/union_remove_8.q.out @@ -1,47 +1,47 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n9(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n9 +POSTHOOK: query: create table inputTbl1_n9(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +POSTHOOK: Output: default@inputTbl1_n9 +PREHOOK: query: create table outputTbl1_n12(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +PREHOOK: Output: default@outputTbl1_n12 +POSTHOOK: query: create table outputTbl1_n12(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n12 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n9 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n9 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n9 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n9 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n12 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n9 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n9 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n9 ) a PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n12 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n9 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n9 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n9 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -55,7 +55,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n9 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -88,7 +88,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n12 Stage: Stage-0 Move Operator @@ -98,13 +98,13 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n12 Stage: Stage-2 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n9 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 1L (type: bigint) @@ -117,14 +117,14 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n12 Execution mode: vectorized Stage: Stage-3 Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n9 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 2L (type: bigint) @@ -137,41 +137,41 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n12 Execution mode: vectorized -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n12 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n9 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n9 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n9 ) a PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n9 +PREHOOK: Output: default@outputtbl1_n12 +POSTHOOK: query: insert overwrite table outputTbl1_n12 SELECT * FROM ( - SELECT key, count(1) as `values` from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1_n9 group by key UNION ALL - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n9 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n9 ) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n9 +POSTHOOK: Output: default@outputtbl1_n12 +POSTHOOK: Lineage: outputtbl1_n12.key EXPRESSION [(inputtbl1_n9)inputtbl1_n9.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n12.values EXPRESSION [(inputtbl1_n9)inputtbl1_n9.null, ] +PREHOOK: query: desc formatted outputTbl1_n12 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n12 +POSTHOOK: query: desc formatted outputTbl1_n12 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n12 # col_name data_type comment key string values bigint @@ -198,13 +198,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n12 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n12 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n12 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n12 #### A masked pattern was here #### 1 1 1 1 diff --git a/ql/src/test/results/clientpositive/union_remove_9.q.out b/ql/src/test/results/clientpositive/union_remove_9.q.out index 786a81fdd4..5ab05209cd 100644 --- a/ql/src/test/results/clientpositive/union_remove_9.q.out +++ b/ql/src/test/results/clientpositive/union_remove_9.q.out @@ -1,50 +1,50 @@ -PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1_n15(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile +PREHOOK: Output: default@inputTbl1_n15 +POSTHOOK: query: create table inputTbl1_n15(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +POSTHOOK: Output: default@inputTbl1_n15 +PREHOOK: query: create table outputTbl1_n22(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +PREHOOK: Output: default@outputTbl1_n22 +POSTHOOK: query: create table outputTbl1_n22(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: Output: default@outputTbl1_n22 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n15 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@inputtbl1 -POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: Output: default@inputtbl1_n15 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1_n15 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@inputtbl1 +POSTHOOK: Output: default@inputtbl1_n15 PREHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n22 SELECT * FROM ( -select key, count(1) as `values` from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1_n15 group by key union all select * FROM ( - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n15 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n15 ) a )b PREHOOK: type: QUERY POSTHOOK: query: explain -insert overwrite table outputTbl1 +insert overwrite table outputTbl1_n22 SELECT * FROM ( -select key, count(1) as `values` from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1_n15 group by key union all select * FROM ( - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n15 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n15 ) a )b POSTHOOK: type: QUERY @@ -63,7 +63,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n15 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -96,7 +96,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n22 Stage: Stage-6 Conditional Operator @@ -115,7 +115,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n22 Stage: Stage-2 Merge File Operator @@ -141,7 +141,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: inputtbl1 + alias: inputtbl1_n15 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 1 (type: int) @@ -160,9 +160,9 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n22 TableScan - alias: inputtbl1 + alias: inputtbl1_n15 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), 2 (type: int) @@ -181,44 +181,44 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + name: default.outputtbl1_n22 -PREHOOK: query: insert overwrite table outputTbl1 +PREHOOK: query: insert overwrite table outputTbl1_n22 SELECT * FROM ( -select key, count(1) as `values` from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1_n15 group by key union all select * FROM ( - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n15 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n15 ) a )b PREHOOK: type: QUERY -PREHOOK: Input: default@inputtbl1 -PREHOOK: Output: default@outputtbl1 -POSTHOOK: query: insert overwrite table outputTbl1 +PREHOOK: Input: default@inputtbl1_n15 +PREHOOK: Output: default@outputtbl1_n22 +POSTHOOK: query: insert overwrite table outputTbl1_n22 SELECT * FROM ( -select key, count(1) as `values` from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1_n15 group by key union all select * FROM ( - SELECT key, 1 as `values` from inputTbl1 + SELECT key, 1 as `values` from inputTbl1_n15 UNION ALL - SELECT key, 2 as `values` from inputTbl1 + SELECT key, 2 as `values` from inputTbl1_n15 ) a )b POSTHOOK: type: QUERY -POSTHOOK: Input: default@inputtbl1 -POSTHOOK: Output: default@outputtbl1 -POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, ] -PREHOOK: query: desc formatted outputTbl1 +POSTHOOK: Input: default@inputtbl1_n15 +POSTHOOK: Output: default@outputtbl1_n22 +POSTHOOK: Lineage: outputtbl1_n22.key EXPRESSION [(inputtbl1_n15)inputtbl1_n15.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1_n22.values EXPRESSION [(inputtbl1_n15)inputtbl1_n15.null, ] +PREHOOK: query: desc formatted outputTbl1_n22 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@outputtbl1 -POSTHOOK: query: desc formatted outputTbl1 +PREHOOK: Input: default@outputtbl1_n22 +POSTHOOK: query: desc formatted outputTbl1_n22 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n22 # col_name data_type comment key string values bigint @@ -245,13 +245,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 +PREHOOK: query: select * from outputTbl1_n22 PREHOOK: type: QUERY -PREHOOK: Input: default@outputtbl1 +PREHOOK: Input: default@outputtbl1_n22 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 +POSTHOOK: query: select * from outputTbl1_n22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@outputtbl1 +POSTHOOK: Input: default@outputtbl1_n22 #### A masked pattern was here #### 1 1 1 1 diff --git a/ql/src/test/results/clientpositive/union_view.q.out b/ql/src/test/results/clientpositive/union_view.q.out index becad2c5da..ba98ef0587 100644 --- a/ql/src/test/results/clientpositive/union_view.q.out +++ b/ql/src/test/results/clientpositive/union_view.q.out @@ -1,27 +1,27 @@ -PREHOOK: query: CREATE TABLE src_union_1 (key int, value string) PARTITIONED BY (ds string) +PREHOOK: query: CREATE TABLE src_union_1_n0 (key int, value string) PARTITIONED BY (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_union_1 -POSTHOOK: query: CREATE TABLE src_union_1 (key int, value string) PARTITIONED BY (ds string) +PREHOOK: Output: default@src_union_1_n0 +POSTHOOK: query: CREATE TABLE src_union_1_n0 (key int, value string) PARTITIONED BY (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_union_1 -PREHOOK: query: CREATE TABLE src_union_2 (key int, value string) PARTITIONED BY (ds string, part_1 string) +POSTHOOK: Output: default@src_union_1_n0 +PREHOOK: query: CREATE TABLE src_union_2_n0 (key int, value string) PARTITIONED BY (ds string, part_1 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_union_2 -POSTHOOK: query: CREATE TABLE src_union_2 (key int, value string) PARTITIONED BY (ds string, part_1 string) +PREHOOK: Output: default@src_union_2_n0 +POSTHOOK: query: CREATE TABLE src_union_2_n0 (key int, value string) PARTITIONED BY (ds string, part_1 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_union_2 -PREHOOK: query: CREATE TABLE src_union_3(key int, value string) PARTITIONED BY (ds string, part_1 string, part_2 string) +POSTHOOK: Output: default@src_union_2_n0 +PREHOOK: query: CREATE TABLE src_union_3_n0(key int, value string) PARTITIONED BY (ds string, part_1 string, part_2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_union_3 -POSTHOOK: query: CREATE TABLE src_union_3(key int, value string) PARTITIONED BY (ds string, part_1 string, part_2 string) +PREHOOK: Output: default@src_union_3_n0 +POSTHOOK: query: CREATE TABLE src_union_3_n0(key int, value string) PARTITIONED BY (ds string, part_1 string, part_2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_union_3 +POSTHOOK: Output: default@src_union_3_n0 STAGE DEPENDENCIES: Stage-0 is a root stage @@ -31,7 +31,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: src_union_1 + alias: src_union_1_n0 filterExpr: ((key = 86) and (ds = '1')) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -52,7 +52,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: src_union_2 + alias: src_union_2_n0 filterExpr: ((key = 86) and (ds = '2')) (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -73,7 +73,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: src_union_3 + alias: src_union_3_n0 filterExpr: ((key = 86) and (ds = '3')) (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Filter Operator @@ -132,7 +132,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src_union_1 + alias: src_union_1_n0 filterExpr: ((key = 86) and (ds = '1')) (type: boolean) properties: insideView TRUE @@ -158,7 +158,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: src_union_2 + alias: src_union_2_n0 filterExpr: ((key = 86) and (ds = '1')) (type: boolean) properties: insideView TRUE @@ -184,7 +184,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: src_union_3 + alias: src_union_3_n0 filterExpr: ((key = 86) and (ds = '1')) (type: boolean) properties: insideView TRUE @@ -225,7 +225,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src_union_1 + alias: src_union_1_n0 filterExpr: ((key = 86) and (ds = '2')) (type: boolean) properties: insideView TRUE @@ -251,7 +251,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: src_union_2 + alias: src_union_2_n0 filterExpr: ((key = 86) and (ds = '2')) (type: boolean) properties: insideView TRUE @@ -277,7 +277,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: src_union_3 + alias: src_union_3_n0 filterExpr: ((key = 86) and (ds = '2')) (type: boolean) properties: insideView TRUE @@ -318,7 +318,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src_union_1 + alias: src_union_1_n0 filterExpr: ((key = 86) and (ds = '3')) (type: boolean) properties: insideView TRUE @@ -344,7 +344,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: src_union_2 + alias: src_union_2_n0 filterExpr: ((key = 86) and (ds = '3')) (type: boolean) properties: insideView TRUE @@ -370,7 +370,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: src_union_3 + alias: src_union_3_n0 filterExpr: ((key = 86) and (ds = '3')) (type: boolean) properties: insideView TRUE @@ -411,7 +411,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src_union_1 + alias: src_union_1_n0 filterExpr: ((key = 86) and ds is not null) (type: boolean) properties: insideView TRUE @@ -437,7 +437,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: src_union_2 + alias: src_union_2_n0 filterExpr: ((key = 86) and ds is not null) (type: boolean) properties: insideView TRUE @@ -463,7 +463,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: src_union_3 + alias: src_union_3_n0 filterExpr: ((key = 86) and ds is not null) (type: boolean) properties: insideView TRUE @@ -514,7 +514,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src_union_1 + alias: src_union_1_n0 filterExpr: (ds = '1') (type: boolean) properties: insideView TRUE @@ -535,7 +535,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: bigint) TableScan - alias: src_union_2 + alias: src_union_2_n0 filterExpr: (ds = '1') (type: boolean) properties: insideView TRUE @@ -559,7 +559,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: bigint) TableScan - alias: src_union_3 + alias: src_union_3_n0 filterExpr: (ds = '1') (type: boolean) properties: insideView TRUE @@ -611,7 +611,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src_union_1 + alias: src_union_1_n0 filterExpr: (ds = '2') (type: boolean) properties: insideView TRUE @@ -635,7 +635,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: bigint) TableScan - alias: src_union_2 + alias: src_union_2_n0 filterExpr: (ds = '2') (type: boolean) properties: insideView TRUE @@ -656,7 +656,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: bigint) TableScan - alias: src_union_3 + alias: src_union_3_n0 filterExpr: (ds = '2') (type: boolean) properties: insideView TRUE @@ -708,7 +708,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src_union_1 + alias: src_union_1_n0 filterExpr: (ds = '3') (type: boolean) properties: insideView TRUE @@ -732,7 +732,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: bigint) TableScan - alias: src_union_2 + alias: src_union_2_n0 filterExpr: (ds = '3') (type: boolean) properties: insideView TRUE @@ -756,7 +756,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: bigint) TableScan - alias: src_union_3 + alias: src_union_3_n0 filterExpr: (ds = '3') (type: boolean) properties: insideView TRUE @@ -808,7 +808,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src_union_1 + alias: src_union_1_n0 filterExpr: ((key = 86) and (ds = '4')) (type: boolean) properties: insideView TRUE @@ -834,7 +834,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: src_union_2 + alias: src_union_2_n0 filterExpr: ((key = 86) and (ds = '4')) (type: boolean) properties: insideView TRUE @@ -860,7 +860,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TableScan - alias: src_union_3 + alias: src_union_3_n0 filterExpr: ((key = 86) and (ds = '4')) (type: boolean) properties: insideView TRUE @@ -902,7 +902,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: src_union_1 + alias: src_union_1_n0 filterExpr: (ds = '4') (type: boolean) properties: insideView TRUE @@ -926,7 +926,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: bigint) TableScan - alias: src_union_2 + alias: src_union_2_n0 filterExpr: (ds = '4') (type: boolean) properties: insideView TRUE @@ -950,7 +950,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE value expressions: _col0 (type: bigint) TableScan - alias: src_union_3 + alias: src_union_3_n0 filterExpr: (ds = '4') (type: boolean) properties: insideView TRUE diff --git a/ql/src/test/results/clientpositive/uniquejoin.q.out b/ql/src/test/results/clientpositive/uniquejoin.q.out index fcfe4dcf98..6b224eb438 100644 --- a/ql/src/test/results/clientpositive/uniquejoin.q.out +++ b/ql/src/test/results/clientpositive/uniquejoin.q.out @@ -1,64 +1,64 @@ -PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1_n0(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T1 -POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T1_n0 +POSTHOOK: query: CREATE TABLE T1_n0(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T1 -PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T1_n0 +PREHOOK: query: CREATE TABLE T2_n0(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T2 -POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T2_n0 +POSTHOOK: query: CREATE TABLE T2_n0(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T2 -PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: Output: default@T2_n0 +PREHOOK: query: CREATE TABLE T3_n0(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@T3 -POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: Output: default@T3_n0 +POSTHOOK: query: CREATE TABLE T3_n0(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@T3 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +POSTHOOK: Output: default@T3_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 +PREHOOK: Output: default@t1_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +POSTHOOK: Output: default@t1_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t2 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 +PREHOOK: Output: default@t2_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +POSTHOOK: Output: default@t2_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@t3 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 +PREHOOK: Output: default@t3_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@t3 -PREHOOK: query: FROM UNIQUEJOIN PRESERVE T1 a (a.key), PRESERVE T2 b (b.key), PRESERVE T3 c (c.key) +POSTHOOK: Output: default@t3_n0 +PREHOOK: query: FROM UNIQUEJOIN PRESERVE T1_n0 a (a.key), PRESERVE T2_n0 b (b.key), PRESERVE T3_n0 c (c.key) SELECT a.key, b.key, c.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n0 +PREHOOK: Input: default@t2_n0 +PREHOOK: Input: default@t3_n0 #### A masked pattern was here #### -POSTHOOK: query: FROM UNIQUEJOIN PRESERVE T1 a (a.key), PRESERVE T2 b (b.key), PRESERVE T3 c (c.key) +POSTHOOK: query: FROM UNIQUEJOIN PRESERVE T1_n0 a (a.key), PRESERVE T2_n0 b (b.key), PRESERVE T3_n0 c (c.key) SELECT a.key, b.key, c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n0 +POSTHOOK: Input: default@t2_n0 +POSTHOOK: Input: default@t3_n0 #### A masked pattern was here #### 1 NULL NULL 2 2 2 @@ -71,51 +71,51 @@ POSTHOOK: Input: default@t3 NULL 4 4 NULL 5 NULL NULL NULL 6 -PREHOOK: query: FROM UNIQUEJOIN T1 a (a.key), T2 b (b.key), T3 c (c.key) +PREHOOK: query: FROM UNIQUEJOIN T1_n0 a (a.key), T2_n0 b (b.key), T3_n0 c (c.key) SELECT a.key, b.key, c.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n0 +PREHOOK: Input: default@t2_n0 +PREHOOK: Input: default@t3_n0 #### A masked pattern was here #### -POSTHOOK: query: FROM UNIQUEJOIN T1 a (a.key), T2 b (b.key), T3 c (c.key) +POSTHOOK: query: FROM UNIQUEJOIN T1_n0 a (a.key), T2_n0 b (b.key), T3_n0 c (c.key) SELECT a.key, b.key, c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n0 +POSTHOOK: Input: default@t2_n0 +POSTHOOK: Input: default@t3_n0 #### A masked pattern was here #### 2 2 2 -PREHOOK: query: FROM UNIQUEJOIN T1 a (a.key), T2 b (b.key-1), T3 c (c.key) +PREHOOK: query: FROM UNIQUEJOIN T1_n0 a (a.key), T2_n0 b (b.key-1), T3_n0 c (c.key) SELECT a.key, b.key, c.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n0 +PREHOOK: Input: default@t2_n0 +PREHOOK: Input: default@t3_n0 #### A masked pattern was here #### -POSTHOOK: query: FROM UNIQUEJOIN T1 a (a.key), T2 b (b.key-1), T3 c (c.key) +POSTHOOK: query: FROM UNIQUEJOIN T1_n0 a (a.key), T2_n0 b (b.key-1), T3_n0 c (c.key) SELECT a.key, b.key, c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n0 +POSTHOOK: Input: default@t2_n0 +POSTHOOK: Input: default@t3_n0 #### A masked pattern was here #### 2 3 2 7 8 7 7 8 7 -PREHOOK: query: FROM UNIQUEJOIN PRESERVE T1 a (a.key, a.val), PRESERVE T2 b (b.key, b.val), PRESERVE T3 c (c.key, c.val) +PREHOOK: query: FROM UNIQUEJOIN PRESERVE T1_n0 a (a.key, a.val), PRESERVE T2_n0 b (b.key, b.val), PRESERVE T3_n0 c (c.key, c.val) SELECT a.key, a.val, b.key, b.val, c.key, c.val PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n0 +PREHOOK: Input: default@t2_n0 +PREHOOK: Input: default@t3_n0 #### A masked pattern was here #### -POSTHOOK: query: FROM UNIQUEJOIN PRESERVE T1 a (a.key, a.val), PRESERVE T2 b (b.key, b.val), PRESERVE T3 c (c.key, c.val) +POSTHOOK: query: FROM UNIQUEJOIN PRESERVE T1_n0 a (a.key, a.val), PRESERVE T2_n0 b (b.key, b.val), PRESERVE T3_n0 c (c.key, c.val) SELECT a.key, a.val, b.key, b.val, c.key, c.val POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n0 +POSTHOOK: Input: default@t2_n0 +POSTHOOK: Input: default@t3_n0 #### A masked pattern was here #### 1 11 NULL NULL NULL NULL 2 12 NULL NULL 2 12 @@ -128,19 +128,19 @@ NULL NULL 2 22 NULL NULL NULL NULL 4 14 4 14 NULL NULL 5 15 NULL NULL NULL NULL NULL NULL 6 16 -PREHOOK: query: FROM UNIQUEJOIN PRESERVE T1 a (a.key), T2 b (b.key), PRESERVE T3 c (c.key) +PREHOOK: query: FROM UNIQUEJOIN PRESERVE T1_n0 a (a.key), T2_n0 b (b.key), PRESERVE T3_n0 c (c.key) SELECT a.key, b.key, c.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 -PREHOOK: Input: default@t3 +PREHOOK: Input: default@t1_n0 +PREHOOK: Input: default@t2_n0 +PREHOOK: Input: default@t3_n0 #### A masked pattern was here #### -POSTHOOK: query: FROM UNIQUEJOIN PRESERVE T1 a (a.key), T2 b (b.key), PRESERVE T3 c (c.key) +POSTHOOK: query: FROM UNIQUEJOIN PRESERVE T1_n0 a (a.key), T2_n0 b (b.key), PRESERVE T3_n0 c (c.key) SELECT a.key, b.key, c.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 -POSTHOOK: Input: default@t3 +POSTHOOK: Input: default@t1_n0 +POSTHOOK: Input: default@t2_n0 +POSTHOOK: Input: default@t3_n0 #### A masked pattern was here #### 1 NULL NULL 2 2 2 @@ -152,17 +152,17 @@ POSTHOOK: Input: default@t3 8 8 NULL NULL 4 4 NULL NULL 6 -PREHOOK: query: FROM UNIQUEJOIN PRESERVE T1 a (a.key), T2 b(b.key) +PREHOOK: query: FROM UNIQUEJOIN PRESERVE T1_n0 a (a.key), T2_n0 b(b.key) SELECT a.key, b.key PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n0 +PREHOOK: Input: default@t2_n0 #### A masked pattern was here #### -POSTHOOK: query: FROM UNIQUEJOIN PRESERVE T1 a (a.key), T2 b(b.key) +POSTHOOK: query: FROM UNIQUEJOIN PRESERVE T1_n0 a (a.key), T2_n0 b(b.key) SELECT a.key, b.key POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n0 +POSTHOOK: Input: default@t2_n0 #### A masked pattern was here #### 1 NULL 2 2 diff --git a/ql/src/test/results/clientpositive/updateAccessTime.q.out b/ql/src/test/results/clientpositive/updateAccessTime.q.out index d7b96301a0..093aa2a791 100644 --- a/ql/src/test/results/clientpositive/updateAccessTime.q.out +++ b/ql/src/test/results/clientpositive/updateAccessTime.q.out @@ -1,101 +1,101 @@ -PREHOOK: query: drop table tstsrc +PREHOOK: query: drop table tstsrc_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tstsrc +POSTHOOK: query: drop table tstsrc_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table tstsrc as select * from src +PREHOOK: query: create table tstsrc_n0 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@tstsrc -POSTHOOK: query: create table tstsrc as select * from src +PREHOOK: Output: default@tstsrc_n0 +POSTHOOK: query: create table tstsrc_n0 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstsrc -POSTHOOK: Lineage: tstsrc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc extended tstsrc +POSTHOOK: Output: default@tstsrc_n0 +POSTHOOK: Lineage: tstsrc_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrc_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc extended tstsrc_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tstsrc -POSTHOOK: query: desc extended tstsrc +PREHOOK: Input: default@tstsrc_n0 +POSTHOOK: query: desc extended tstsrc_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tstsrc +POSTHOOK: Input: default@tstsrc_n0 key string value string #### A masked pattern was here #### -PREHOOK: query: select count(1) from tstsrc +PREHOOK: query: select count(1) from tstsrc_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@tstsrc +PREHOOK: Input: default@tstsrc_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from tstsrc +POSTHOOK: query: select count(1) from tstsrc_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstsrc +POSTHOOK: Input: default@tstsrc_n0 #### A masked pattern was here #### 500 -PREHOOK: query: desc extended tstsrc +PREHOOK: query: desc extended tstsrc_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tstsrc -POSTHOOK: query: desc extended tstsrc +PREHOOK: Input: default@tstsrc_n0 +POSTHOOK: query: desc extended tstsrc_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tstsrc +POSTHOOK: Input: default@tstsrc_n0 key string value string #### A masked pattern was here #### -PREHOOK: query: drop table tstsrc +PREHOOK: query: drop table tstsrc_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tstsrc -PREHOOK: Output: default@tstsrc -POSTHOOK: query: drop table tstsrc +PREHOOK: Input: default@tstsrc_n0 +PREHOOK: Output: default@tstsrc_n0 +POSTHOOK: query: drop table tstsrc_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tstsrc -POSTHOOK: Output: default@tstsrc -PREHOOK: query: drop table tstsrcpart +POSTHOOK: Input: default@tstsrc_n0 +POSTHOOK: Output: default@tstsrc_n0 +PREHOOK: query: drop table tstsrcpart_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tstsrcpart +POSTHOOK: query: drop table tstsrcpart_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table tstsrcpart like srcpart +PREHOOK: query: create table tstsrcpart_n1 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: create table tstsrcpart like srcpart +PREHOOK: Output: default@tstsrcpart_n1 +POSTHOOK: query: create table tstsrcpart_n1 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstsrcpart -PREHOOK: query: insert overwrite table tstsrcpart partition (ds, hr) select key, value, ds, hr from srcpart +POSTHOOK: Output: default@tstsrcpart_n1 +PREHOOK: query: insert overwrite table tstsrcpart_n1 partition (ds, hr) select key, value, ds, hr from srcpart PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: insert overwrite table tstsrcpart partition (ds, hr) select key, value, ds, hr from srcpart +PREHOOK: Output: default@tstsrcpart_n1 +POSTHOOK: query: insert overwrite table tstsrcpart_n1 partition (ds, hr) select key, value, ds, hr from srcpart POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-09/hr=12 -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc extended tstsrcpart +POSTHOOK: Output: default@tstsrcpart_n1@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@tstsrcpart_n1@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@tstsrcpart_n1@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@tstsrcpart_n1@ds=2008-04-09/hr=12 +POSTHOOK: Lineage: tstsrcpart_n1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n1 PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n1 PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n1 PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tstsrcpart_n1 PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc extended tstsrcpart_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tstsrcpart -POSTHOOK: query: desc extended tstsrcpart +PREHOOK: Input: default@tstsrcpart_n1 +POSTHOOK: query: desc extended tstsrcpart_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tstsrcpart +POSTHOOK: Input: default@tstsrcpart_n1 key string default value string default ds string @@ -107,12 +107,12 @@ ds string hr string #### A masked pattern was here #### -PREHOOK: query: desc extended tstsrcpart partition (ds='2008-04-08', hr='11') +PREHOOK: query: desc extended tstsrcpart_n1 partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tstsrcpart -POSTHOOK: query: desc extended tstsrcpart partition (ds='2008-04-08', hr='11') +PREHOOK: Input: default@tstsrcpart_n1 +POSTHOOK: query: desc extended tstsrcpart_n1 partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tstsrcpart +POSTHOOK: Input: default@tstsrcpart_n1 key string default value string default ds string @@ -124,12 +124,12 @@ ds string hr string #### A masked pattern was here #### -PREHOOK: query: desc extended tstsrcpart partition (ds='2008-04-08', hr='12') +PREHOOK: query: desc extended tstsrcpart_n1 partition (ds='2008-04-08', hr='12') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tstsrcpart -POSTHOOK: query: desc extended tstsrcpart partition (ds='2008-04-08', hr='12') +PREHOOK: Input: default@tstsrcpart_n1 +POSTHOOK: query: desc extended tstsrcpart_n1 partition (ds='2008-04-08', hr='12') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tstsrcpart +POSTHOOK: Input: default@tstsrcpart_n1 key string default value string default ds string @@ -141,21 +141,21 @@ ds string hr string #### A masked pattern was here #### -PREHOOK: query: select count(1) from tstsrcpart where ds = '2008-04-08' and hr = '11' +PREHOOK: query: select count(1) from tstsrcpart_n1 where ds = '2008-04-08' and hr = '11' PREHOOK: type: QUERY -PREHOOK: Input: default@tstsrcpart +PREHOOK: Input: default@tstsrcpart_n1 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from tstsrcpart where ds = '2008-04-08' and hr = '11' +POSTHOOK: query: select count(1) from tstsrcpart_n1 where ds = '2008-04-08' and hr = '11' POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstsrcpart +POSTHOOK: Input: default@tstsrcpart_n1 #### A masked pattern was here #### 500 -PREHOOK: query: desc extended tstsrcpart +PREHOOK: query: desc extended tstsrcpart_n1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tstsrcpart -POSTHOOK: query: desc extended tstsrcpart +PREHOOK: Input: default@tstsrcpart_n1 +POSTHOOK: query: desc extended tstsrcpart_n1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tstsrcpart +POSTHOOK: Input: default@tstsrcpart_n1 key string default value string default ds string @@ -167,12 +167,12 @@ ds string hr string #### A masked pattern was here #### -PREHOOK: query: desc extended tstsrcpart partition (ds='2008-04-08', hr='11') +PREHOOK: query: desc extended tstsrcpart_n1 partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tstsrcpart -POSTHOOK: query: desc extended tstsrcpart partition (ds='2008-04-08', hr='11') +PREHOOK: Input: default@tstsrcpart_n1 +POSTHOOK: query: desc extended tstsrcpart_n1 partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tstsrcpart +POSTHOOK: Input: default@tstsrcpart_n1 key string default value string default ds string @@ -184,12 +184,12 @@ ds string hr string #### A masked pattern was here #### -PREHOOK: query: desc extended tstsrcpart partition (ds='2008-04-08', hr='12') +PREHOOK: query: desc extended tstsrcpart_n1 partition (ds='2008-04-08', hr='12') PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tstsrcpart -POSTHOOK: query: desc extended tstsrcpart partition (ds='2008-04-08', hr='12') +PREHOOK: Input: default@tstsrcpart_n1 +POSTHOOK: query: desc extended tstsrcpart_n1 partition (ds='2008-04-08', hr='12') POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tstsrcpart +POSTHOOK: Input: default@tstsrcpart_n1 key string default value string default ds string @@ -201,14 +201,14 @@ ds string hr string #### A masked pattern was here #### -PREHOOK: query: drop table tstsrcpart +PREHOOK: query: drop table tstsrcpart_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tstsrcpart -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: drop table tstsrcpart +PREHOOK: Input: default@tstsrcpart_n1 +PREHOOK: Output: default@tstsrcpart_n1 +POSTHOOK: query: drop table tstsrcpart_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Output: default@tstsrcpart +POSTHOOK: Input: default@tstsrcpart_n1 +POSTHOOK: Output: default@tstsrcpart_n1 PREHOOK: query: ANALYZE TABLE src COMPUTE STATISTICS PREHOOK: type: QUERY PREHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/updateBasicStats.q.out b/ql/src/test/results/clientpositive/updateBasicStats.q.out index 7bae1aec9f..1882bb9967 100644 --- a/ql/src/test/results/clientpositive/updateBasicStats.q.out +++ b/ql/src/test/results/clientpositive/updateBasicStats.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: create table s as select * from src limit 10 +PREHOOK: query: create table s_n5 as select * from src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@s -POSTHOOK: query: create table s as select * from src limit 10 +PREHOOK: Output: default@s_n5 +POSTHOOK: query: create table s_n5 as select * from src limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@s -POSTHOOK: Lineage: s.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: s.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain select * from s +POSTHOOK: Output: default@s_n5 +POSTHOOK: Lineage: s_n5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: s_n5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select * from s_n5 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from s +POSTHOOK: query: explain select * from s_n5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -23,7 +23,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: s + alias: s_n5 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -31,17 +31,17 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: alter table s update statistics set('numRows'='12') +PREHOOK: query: alter table s_n5 update statistics set('numRows'='12') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: Input: default@s -PREHOOK: Output: default@s -POSTHOOK: query: alter table s update statistics set('numRows'='12') +PREHOOK: Input: default@s_n5 +PREHOOK: Output: default@s_n5 +POSTHOOK: query: alter table s_n5 update statistics set('numRows'='12') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: Input: default@s -POSTHOOK: Output: default@s -PREHOOK: query: explain select * from s +POSTHOOK: Input: default@s_n5 +POSTHOOK: Output: default@s_n5 +PREHOOK: query: explain select * from s_n5 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from s +POSTHOOK: query: explain select * from s_n5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -52,7 +52,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: s + alias: s_n5 Statistics: Num rows: 12 Data size: 104 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -60,17 +60,17 @@ STAGE PLANS: Statistics: Num rows: 12 Data size: 104 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: analyze table s compute statistics +PREHOOK: query: analyze table s_n5 compute statistics PREHOOK: type: QUERY -PREHOOK: Input: default@s -PREHOOK: Output: default@s -POSTHOOK: query: analyze table s compute statistics +PREHOOK: Input: default@s_n5 +PREHOOK: Output: default@s_n5 +POSTHOOK: query: analyze table s_n5 compute statistics POSTHOOK: type: QUERY -POSTHOOK: Input: default@s -POSTHOOK: Output: default@s -PREHOOK: query: explain select * from s +POSTHOOK: Input: default@s_n5 +POSTHOOK: Output: default@s_n5 +PREHOOK: query: explain select * from s_n5 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from s +POSTHOOK: query: explain select * from s_n5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -81,7 +81,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: s + alias: s_n5 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -89,17 +89,17 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: alter table s update statistics set('numRows'='1212', 'rawDataSize'='500500') +PREHOOK: query: alter table s_n5 update statistics set('numRows'='1212', 'rawDataSize'='500500') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: Input: default@s -PREHOOK: Output: default@s -POSTHOOK: query: alter table s update statistics set('numRows'='1212', 'rawDataSize'='500500') +PREHOOK: Input: default@s_n5 +PREHOOK: Output: default@s_n5 +POSTHOOK: query: alter table s_n5 update statistics set('numRows'='1212', 'rawDataSize'='500500') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: Input: default@s -POSTHOOK: Output: default@s -PREHOOK: query: explain select * from s +POSTHOOK: Input: default@s_n5 +POSTHOOK: Output: default@s_n5 +PREHOOK: query: explain select * from s_n5 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from s +POSTHOOK: query: explain select * from s_n5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -110,7 +110,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: s + alias: s_n5 Statistics: Num rows: 1212 Data size: 500500 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -118,26 +118,26 @@ STAGE PLANS: Statistics: Num rows: 1212 Data size: 500500 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: CREATE TABLE calendarp (`year` int) partitioned by (p int) +PREHOOK: query: CREATE TABLE calendarp_n0 (`year` int) partitioned by (p int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@calendarp -POSTHOOK: query: CREATE TABLE calendarp (`year` int) partitioned by (p int) +PREHOOK: Output: default@calendarp_n0 +POSTHOOK: query: CREATE TABLE calendarp_n0 (`year` int) partitioned by (p int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@calendarp -PREHOOK: query: insert into table calendarp partition (p=1) values (2010), (2011), (2012) +POSTHOOK: Output: default@calendarp_n0 +PREHOOK: query: insert into table calendarp_n0 partition (p=1) values (2010), (2011), (2012) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@calendarp@p=1 -POSTHOOK: query: insert into table calendarp partition (p=1) values (2010), (2011), (2012) +PREHOOK: Output: default@calendarp_n0@p=1 +POSTHOOK: query: insert into table calendarp_n0 partition (p=1) values (2010), (2011), (2012) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@calendarp@p=1 -POSTHOOK: Lineage: calendarp PARTITION(p=1).year SCRIPT [] -PREHOOK: query: explain select * from calendarp where p=1 +POSTHOOK: Output: default@calendarp_n0@p=1 +POSTHOOK: Lineage: calendarp_n0 PARTITION(p=1).year SCRIPT [] +PREHOOK: query: explain select * from calendarp_n0 where p=1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from calendarp where p=1 +POSTHOOK: query: explain select * from calendarp_n0 where p=1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -148,7 +148,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: calendarp + alias: calendarp_n0 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: year (type: int), 1 (type: int) @@ -156,18 +156,18 @@ STAGE PLANS: Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: alter table calendarp partition (p=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') +PREHOOK: query: alter table calendarp_n0 partition (p=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') PREHOOK: type: ALTERTABLE_UPDATEPARTSTATS -PREHOOK: Input: default@calendarp -PREHOOK: Output: default@calendarp@p=1 -POSTHOOK: query: alter table calendarp partition (p=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') +PREHOOK: Input: default@calendarp_n0 +PREHOOK: Output: default@calendarp_n0@p=1 +POSTHOOK: query: alter table calendarp_n0 partition (p=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') POSTHOOK: type: ALTERTABLE_UPDATEPARTSTATS -POSTHOOK: Input: default@calendarp -POSTHOOK: Input: default@calendarp@p=1 -POSTHOOK: Output: default@calendarp@p=1 -PREHOOK: query: explain select * from calendarp where p=1 +POSTHOOK: Input: default@calendarp_n0 +POSTHOOK: Input: default@calendarp_n0@p=1 +POSTHOOK: Output: default@calendarp_n0@p=1 +PREHOOK: query: explain select * from calendarp_n0 where p=1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from calendarp where p=1 +POSTHOOK: query: explain select * from calendarp_n0 where p=1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -178,7 +178,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: calendarp + alias: calendarp_n0 Statistics: Num rows: 1000020000 Data size: 300040000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: year (type: int), 1 (type: int) @@ -186,53 +186,53 @@ STAGE PLANS: Statistics: Num rows: 1000020000 Data size: 300040000 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: create table src_stat_part_two(key string, value string) partitioned by (px int, py string) +PREHOOK: query: create table src_stat_part_two_n0(key string, value string) partitioned by (px int, py string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@src_stat_part_two -POSTHOOK: query: create table src_stat_part_two(key string, value string) partitioned by (px int, py string) +PREHOOK: Output: default@src_stat_part_two_n0 +POSTHOOK: query: create table src_stat_part_two_n0(key string, value string) partitioned by (px int, py string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@src_stat_part_two -PREHOOK: query: insert overwrite table src_stat_part_two partition (px=1, py='a') +POSTHOOK: Output: default@src_stat_part_two_n0 +PREHOOK: query: insert overwrite table src_stat_part_two_n0 partition (px=1, py='a') select * from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_stat_part_two@px=1/py=a -POSTHOOK: query: insert overwrite table src_stat_part_two partition (px=1, py='a') +PREHOOK: Output: default@src_stat_part_two_n0@px=1/py=a +POSTHOOK: query: insert overwrite table src_stat_part_two_n0 partition (px=1, py='a') select * from src limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_stat_part_two@px=1/py=a -POSTHOOK: Lineage: src_stat_part_two PARTITION(px=1,py=a).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_stat_part_two PARTITION(px=1,py=a).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table src_stat_part_two partition (px=1, py='b') +POSTHOOK: Output: default@src_stat_part_two_n0@px=1/py=a +POSTHOOK: Lineage: src_stat_part_two_n0 PARTITION(px=1,py=a).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part_two_n0 PARTITION(px=1,py=a).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table src_stat_part_two_n0 partition (px=1, py='b') select * from src limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_stat_part_two@px=1/py=b -POSTHOOK: query: insert overwrite table src_stat_part_two partition (px=1, py='b') +PREHOOK: Output: default@src_stat_part_two_n0@px=1/py=b +POSTHOOK: query: insert overwrite table src_stat_part_two_n0 partition (px=1, py='b') select * from src limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_stat_part_two@px=1/py=b -POSTHOOK: Lineage: src_stat_part_two PARTITION(px=1,py=b).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_stat_part_two PARTITION(px=1,py=b).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table src_stat_part_two partition (px=2, py='b') +POSTHOOK: Output: default@src_stat_part_two_n0@px=1/py=b +POSTHOOK: Lineage: src_stat_part_two_n0 PARTITION(px=1,py=b).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part_two_n0 PARTITION(px=1,py=b).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table src_stat_part_two_n0 partition (px=2, py='b') select * from src limit 100 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@src_stat_part_two@px=2/py=b -POSTHOOK: query: insert overwrite table src_stat_part_two partition (px=2, py='b') +PREHOOK: Output: default@src_stat_part_two_n0@px=2/py=b +POSTHOOK: query: insert overwrite table src_stat_part_two_n0 partition (px=2, py='b') select * from src limit 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@src_stat_part_two@px=2/py=b -POSTHOOK: Lineage: src_stat_part_two PARTITION(px=2,py=b).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: src_stat_part_two PARTITION(px=2,py=b).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: explain select * from src_stat_part_two where px=1 and py='a' +POSTHOOK: Output: default@src_stat_part_two_n0@px=2/py=b +POSTHOOK: Lineage: src_stat_part_two_n0 PARTITION(px=2,py=b).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part_two_n0 PARTITION(px=2,py=b).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select * from src_stat_part_two_n0 where px=1 and py='a' PREHOOK: type: QUERY -POSTHOOK: query: explain select * from src_stat_part_two where px=1 and py='a' +POSTHOOK: query: explain select * from src_stat_part_two_n0 where px=1 and py='a' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -243,7 +243,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: src_stat_part_two + alias: src_stat_part_two_n0 Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), 1 (type: int), 'a' (type: string) @@ -251,9 +251,9 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: explain select * from src_stat_part_two where px=1 +PREHOOK: query: explain select * from src_stat_part_two_n0 where px=1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from src_stat_part_two where px=1 +POSTHOOK: query: explain select * from src_stat_part_two_n0 where px=1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -264,7 +264,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: src_stat_part_two + alias: src_stat_part_two_n0 Statistics: Num rows: 11 Data size: 115 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), 1 (type: int), py (type: string) @@ -272,18 +272,18 @@ STAGE PLANS: Statistics: Num rows: 11 Data size: 115 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: alter table src_stat_part_two partition (px=1, py='a') update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') +PREHOOK: query: alter table src_stat_part_two_n0 partition (px=1, py='a') update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') PREHOOK: type: ALTERTABLE_UPDATEPARTSTATS -PREHOOK: Input: default@src_stat_part_two -PREHOOK: Output: default@src_stat_part_two@px=1/py=a -POSTHOOK: query: alter table src_stat_part_two partition (px=1, py='a') update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') +PREHOOK: Input: default@src_stat_part_two_n0 +PREHOOK: Output: default@src_stat_part_two_n0@px=1/py=a +POSTHOOK: query: alter table src_stat_part_two_n0 partition (px=1, py='a') update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') POSTHOOK: type: ALTERTABLE_UPDATEPARTSTATS -POSTHOOK: Input: default@src_stat_part_two -POSTHOOK: Input: default@src_stat_part_two@px=1/py=a -POSTHOOK: Output: default@src_stat_part_two@px=1/py=a -PREHOOK: query: explain select * from src_stat_part_two where px=1 and py='a' +POSTHOOK: Input: default@src_stat_part_two_n0 +POSTHOOK: Input: default@src_stat_part_two_n0@px=1/py=a +POSTHOOK: Output: default@src_stat_part_two_n0@px=1/py=a +PREHOOK: query: explain select * from src_stat_part_two_n0 where px=1 and py='a' PREHOOK: type: QUERY -POSTHOOK: query: explain select * from src_stat_part_two where px=1 and py='a' +POSTHOOK: query: explain select * from src_stat_part_two_n0 where px=1 and py='a' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -294,7 +294,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: src_stat_part_two + alias: src_stat_part_two_n0 Statistics: Num rows: 1000020000 Data size: 300040000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), 1 (type: int), 'a' (type: string) @@ -302,9 +302,9 @@ STAGE PLANS: Statistics: Num rows: 1000020000 Data size: 300040000 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: explain select * from src_stat_part_two where px=1 +PREHOOK: query: explain select * from src_stat_part_two_n0 where px=1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from src_stat_part_two where px=1 +POSTHOOK: query: explain select * from src_stat_part_two_n0 where px=1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -315,7 +315,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: src_stat_part_two + alias: src_stat_part_two_n0 Statistics: Num rows: 1000020010 Data size: 300040104 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), 1 (type: int), py (type: string) @@ -323,21 +323,21 @@ STAGE PLANS: Statistics: Num rows: 1000020010 Data size: 300040104 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: alter table src_stat_part_two partition (px=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') +PREHOOK: query: alter table src_stat_part_two_n0 partition (px=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') PREHOOK: type: ALTERTABLE_UPDATEPARTSTATS -PREHOOK: Input: default@src_stat_part_two -PREHOOK: Output: default@src_stat_part_two@px=1/py=a -PREHOOK: Output: default@src_stat_part_two@px=1/py=b -POSTHOOK: query: alter table src_stat_part_two partition (px=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') +PREHOOK: Input: default@src_stat_part_two_n0 +PREHOOK: Output: default@src_stat_part_two_n0@px=1/py=a +PREHOOK: Output: default@src_stat_part_two_n0@px=1/py=b +POSTHOOK: query: alter table src_stat_part_two_n0 partition (px=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') POSTHOOK: type: ALTERTABLE_UPDATEPARTSTATS -POSTHOOK: Input: default@src_stat_part_two -POSTHOOK: Input: default@src_stat_part_two@px=1/py=a -POSTHOOK: Input: default@src_stat_part_two@px=1/py=b -POSTHOOK: Output: default@src_stat_part_two@px=1/py=a -POSTHOOK: Output: default@src_stat_part_two@px=1/py=b -PREHOOK: query: explain select * from src_stat_part_two where px=1 and py='a' +POSTHOOK: Input: default@src_stat_part_two_n0 +POSTHOOK: Input: default@src_stat_part_two_n0@px=1/py=a +POSTHOOK: Input: default@src_stat_part_two_n0@px=1/py=b +POSTHOOK: Output: default@src_stat_part_two_n0@px=1/py=a +POSTHOOK: Output: default@src_stat_part_two_n0@px=1/py=b +PREHOOK: query: explain select * from src_stat_part_two_n0 where px=1 and py='a' PREHOOK: type: QUERY -POSTHOOK: query: explain select * from src_stat_part_two where px=1 and py='a' +POSTHOOK: query: explain select * from src_stat_part_two_n0 where px=1 and py='a' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -348,7 +348,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: src_stat_part_two + alias: src_stat_part_two_n0 Statistics: Num rows: 1000020000 Data size: 300040000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), 1 (type: int), 'a' (type: string) @@ -356,9 +356,9 @@ STAGE PLANS: Statistics: Num rows: 1000020000 Data size: 300040000 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: explain select * from src_stat_part_two where px=1 +PREHOOK: query: explain select * from src_stat_part_two_n0 where px=1 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from src_stat_part_two where px=1 +POSTHOOK: query: explain select * from src_stat_part_two_n0 where px=1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -369,7 +369,7 @@ STAGE PLANS: limit: -1 Processor Tree: TableScan - alias: src_stat_part_two + alias: src_stat_part_two_n0 Statistics: Num rows: 2000040000 Data size: 600080000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), 1 (type: int), py (type: string) diff --git a/ql/src/test/results/clientpositive/varchar_join1.q.out b/ql/src/test/results/clientpositive/varchar_join1.q.out index b433a68824..55b1063dda 100644 --- a/ql/src/test/results/clientpositive/varchar_join1.q.out +++ b/ql/src/test/results/clientpositive/varchar_join1.q.out @@ -1,139 +1,139 @@ -PREHOOK: query: drop table varchar_join1_vc1 +PREHOOK: query: drop table varchar_join1_vc1_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table varchar_join1_vc1 +POSTHOOK: query: drop table varchar_join1_vc1_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table varchar_join1_vc2 +PREHOOK: query: drop table varchar_join1_vc2_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table varchar_join1_vc2 +POSTHOOK: query: drop table varchar_join1_vc2_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table varchar_join1_str +PREHOOK: query: drop table varchar_join1_str_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table varchar_join1_str +POSTHOOK: query: drop table varchar_join1_str_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table varchar_join1_vc1 ( +PREHOOK: query: create table varchar_join1_vc1_n0 ( c1 int, c2 varchar(10) ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@varchar_join1_vc1 -POSTHOOK: query: create table varchar_join1_vc1 ( +PREHOOK: Output: default@varchar_join1_vc1_n0 +POSTHOOK: query: create table varchar_join1_vc1_n0 ( c1 int, c2 varchar(10) ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@varchar_join1_vc1 -PREHOOK: query: create table varchar_join1_vc2 ( +POSTHOOK: Output: default@varchar_join1_vc1_n0 +PREHOOK: query: create table varchar_join1_vc2_n0 ( c1 int, c2 varchar(20) ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@varchar_join1_vc2 -POSTHOOK: query: create table varchar_join1_vc2 ( +PREHOOK: Output: default@varchar_join1_vc2_n0 +POSTHOOK: query: create table varchar_join1_vc2_n0 ( c1 int, c2 varchar(20) ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@varchar_join1_vc2 -PREHOOK: query: create table varchar_join1_str ( +POSTHOOK: Output: default@varchar_join1_vc2_n0 +PREHOOK: query: create table varchar_join1_str_n0 ( c1 int, c2 string ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@varchar_join1_str -POSTHOOK: query: create table varchar_join1_str ( +PREHOOK: Output: default@varchar_join1_str_n0 +POSTHOOK: query: create table varchar_join1_str_n0 ( c1 int, c2 string ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@varchar_join1_str -PREHOOK: query: load data local inpath '../../data/files/vc1.txt' into table varchar_join1_vc1 +POSTHOOK: Output: default@varchar_join1_str_n0 +PREHOOK: query: load data local inpath '../../data/files/vc1.txt' into table varchar_join1_vc1_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@varchar_join1_vc1 -POSTHOOK: query: load data local inpath '../../data/files/vc1.txt' into table varchar_join1_vc1 +PREHOOK: Output: default@varchar_join1_vc1_n0 +POSTHOOK: query: load data local inpath '../../data/files/vc1.txt' into table varchar_join1_vc1_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@varchar_join1_vc1 -PREHOOK: query: load data local inpath '../../data/files/vc1.txt' into table varchar_join1_vc2 +POSTHOOK: Output: default@varchar_join1_vc1_n0 +PREHOOK: query: load data local inpath '../../data/files/vc1.txt' into table varchar_join1_vc2_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@varchar_join1_vc2 -POSTHOOK: query: load data local inpath '../../data/files/vc1.txt' into table varchar_join1_vc2 +PREHOOK: Output: default@varchar_join1_vc2_n0 +POSTHOOK: query: load data local inpath '../../data/files/vc1.txt' into table varchar_join1_vc2_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@varchar_join1_vc2 -PREHOOK: query: load data local inpath '../../data/files/vc1.txt' into table varchar_join1_str +POSTHOOK: Output: default@varchar_join1_vc2_n0 +PREHOOK: query: load data local inpath '../../data/files/vc1.txt' into table varchar_join1_str_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@varchar_join1_str -POSTHOOK: query: load data local inpath '../../data/files/vc1.txt' into table varchar_join1_str +PREHOOK: Output: default@varchar_join1_str_n0 +POSTHOOK: query: load data local inpath '../../data/files/vc1.txt' into table varchar_join1_str_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@varchar_join1_str -PREHOOK: query: select * from varchar_join1_vc1 a join varchar_join1_vc1 b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: Output: default@varchar_join1_str_n0 +PREHOOK: query: select * from varchar_join1_vc1_n0 a join varchar_join1_vc1_n0 b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY -PREHOOK: Input: default@varchar_join1_vc1 +PREHOOK: Input: default@varchar_join1_vc1_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from varchar_join1_vc1 a join varchar_join1_vc1 b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: select * from varchar_join1_vc1_n0 a join varchar_join1_vc1_n0 b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@varchar_join1_vc1 +POSTHOOK: Input: default@varchar_join1_vc1_n0 #### A masked pattern was here #### 1 abc 1 abc 2 abc 2 abc 3 abc 3 abc -PREHOOK: query: select * from varchar_join1_vc1 a join varchar_join1_vc2 b on (a.c2 = b.c2) order by a.c1 +PREHOOK: query: select * from varchar_join1_vc1_n0 a join varchar_join1_vc2_n0 b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY -PREHOOK: Input: default@varchar_join1_vc1 -PREHOOK: Input: default@varchar_join1_vc2 +PREHOOK: Input: default@varchar_join1_vc1_n0 +PREHOOK: Input: default@varchar_join1_vc2_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from varchar_join1_vc1 a join varchar_join1_vc2 b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: select * from varchar_join1_vc1_n0 a join varchar_join1_vc2_n0 b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@varchar_join1_vc1 -POSTHOOK: Input: default@varchar_join1_vc2 +POSTHOOK: Input: default@varchar_join1_vc1_n0 +POSTHOOK: Input: default@varchar_join1_vc2_n0 #### A masked pattern was here #### 1 abc 1 abc 2 abc 2 abc 3 abc 3 abc -PREHOOK: query: select * from varchar_join1_vc1 a join varchar_join1_str b on (a.c2 = b.c2) order by a.c1 +PREHOOK: query: select * from varchar_join1_vc1_n0 a join varchar_join1_str_n0 b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY -PREHOOK: Input: default@varchar_join1_str -PREHOOK: Input: default@varchar_join1_vc1 +PREHOOK: Input: default@varchar_join1_str_n0 +PREHOOK: Input: default@varchar_join1_vc1_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from varchar_join1_vc1 a join varchar_join1_str b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: select * from varchar_join1_vc1_n0 a join varchar_join1_str_n0 b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@varchar_join1_str -POSTHOOK: Input: default@varchar_join1_vc1 +POSTHOOK: Input: default@varchar_join1_str_n0 +POSTHOOK: Input: default@varchar_join1_vc1_n0 #### A masked pattern was here #### 1 abc 1 abc 2 abc 2 abc 3 abc 3 abc -PREHOOK: query: drop table varchar_join1_vc1 +PREHOOK: query: drop table varchar_join1_vc1_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@varchar_join1_vc1 -PREHOOK: Output: default@varchar_join1_vc1 -POSTHOOK: query: drop table varchar_join1_vc1 +PREHOOK: Input: default@varchar_join1_vc1_n0 +PREHOOK: Output: default@varchar_join1_vc1_n0 +POSTHOOK: query: drop table varchar_join1_vc1_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@varchar_join1_vc1 -POSTHOOK: Output: default@varchar_join1_vc1 -PREHOOK: query: drop table varchar_join1_vc2 +POSTHOOK: Input: default@varchar_join1_vc1_n0 +POSTHOOK: Output: default@varchar_join1_vc1_n0 +PREHOOK: query: drop table varchar_join1_vc2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@varchar_join1_vc2 -PREHOOK: Output: default@varchar_join1_vc2 -POSTHOOK: query: drop table varchar_join1_vc2 +PREHOOK: Input: default@varchar_join1_vc2_n0 +PREHOOK: Output: default@varchar_join1_vc2_n0 +POSTHOOK: query: drop table varchar_join1_vc2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@varchar_join1_vc2 -POSTHOOK: Output: default@varchar_join1_vc2 -PREHOOK: query: drop table varchar_join1_str +POSTHOOK: Input: default@varchar_join1_vc2_n0 +POSTHOOK: Output: default@varchar_join1_vc2_n0 +PREHOOK: query: drop table varchar_join1_str_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@varchar_join1_str -PREHOOK: Output: default@varchar_join1_str -POSTHOOK: query: drop table varchar_join1_str +PREHOOK: Input: default@varchar_join1_str_n0 +PREHOOK: Output: default@varchar_join1_str_n0 +POSTHOOK: query: drop table varchar_join1_str_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@varchar_join1_str -POSTHOOK: Output: default@varchar_join1_str +POSTHOOK: Input: default@varchar_join1_str_n0 +POSTHOOK: Output: default@varchar_join1_str_n0 diff --git a/ql/src/test/results/clientpositive/vector_aggregate_9.q.out b/ql/src/test/results/clientpositive/vector_aggregate_9.q.out index 0f7fcc16c2..fe7fbfdd56 100644 --- a/ql/src/test/results/clientpositive/vector_aggregate_9.q.out +++ b/ql/src/test/results/clientpositive/vector_aggregate_9.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table vectortab2k( +PREHOOK: query: create table vectortab2k_n4( t tinyint, si smallint, i int, @@ -16,8 +16,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: create table vectortab2k( +PREHOOK: Output: default@vectortab2k_n4 +POSTHOOK: query: create table vectortab2k_n4( t tinyint, si smallint, i int, @@ -35,16 +35,16 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: Output: default@vectortab2k_n4 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: Output: default@vectortab2k_n4 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: create table vectortab2korc( +POSTHOOK: Output: default@vectortab2k_n4 +PREHOOK: query: create table vectortab2korc_n4( t tinyint, si smallint, i int, @@ -61,8 +61,8 @@ PREHOOK: query: create table vectortab2korc( STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: create table vectortab2korc( +PREHOOK: Output: default@vectortab2korc_n4 +POSTHOOK: query: create table vectortab2korc_n4( t tinyint, si smallint, i int, @@ -79,33 +79,33 @@ POSTHOOK: query: create table vectortab2korc( STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2korc -PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: Output: default@vectortab2korc_n4 +PREHOOK: query: INSERT INTO TABLE vectortab2korc_n4 SELECT * FROM vectortab2k_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2k -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: Input: default@vectortab2k_n4 +PREHOOK: Output: default@vectortab2korc_n4 +POSTHOOK: query: INSERT INTO TABLE vectortab2korc_n4 SELECT * FROM vectortab2k_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2k -POSTHOOK: Output: default@vectortab2korc -POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +POSTHOOK: Input: default@vectortab2k_n4 +POSTHOOK: Output: default@vectortab2korc_n4 +POSTHOOK: Lineage: vectortab2korc_n4.b SIMPLE [(vectortab2k_n4)vectortab2k_n4.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n4.bo SIMPLE [(vectortab2k_n4)vectortab2k_n4.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n4.d SIMPLE [(vectortab2k_n4)vectortab2k_n4.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n4.dc SIMPLE [(vectortab2k_n4)vectortab2k_n4.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n4.dt SIMPLE [(vectortab2k_n4)vectortab2k_n4.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n4.f SIMPLE [(vectortab2k_n4)vectortab2k_n4.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n4.i SIMPLE [(vectortab2k_n4)vectortab2k_n4.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n4.s SIMPLE [(vectortab2k_n4)vectortab2k_n4.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n4.s2 SIMPLE [(vectortab2k_n4)vectortab2k_n4.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n4.si SIMPLE [(vectortab2k_n4)vectortab2k_n4.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n4.t SIMPLE [(vectortab2k_n4)vectortab2k_n4.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n4.ts SIMPLE [(vectortab2k_n4)vectortab2k_n4.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n4.ts2 SIMPLE [(vectortab2k_n4)vectortab2k_n4.FieldSchema(name:ts2, type:timestamp, comment:null), ] PREHOOK: query: explain vectorization detail -select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc_n4 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail -select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc_n4 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -120,7 +120,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: vectortab2korc + alias: vectortab2korc_n4 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -198,20 +198,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +PREHOOK: query: select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2korc +PREHOOK: Input: default@vectortab2korc_n4 #### A masked pattern was here #### -POSTHOOK: query: select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc +POSTHOOK: query: select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2korc +POSTHOOK: Input: default@vectortab2korc_n4 #### A masked pattern was here #### -4997414117561.546875000000000000 4994550248722.298828000000000000 -10252745435816.024410000000000000 -5399023399.587163986308583465 PREHOOK: query: explain vectorization detail -select min(d), max(d), sum(d), avg(d) from vectortab2korc +select min(d), max(d), sum(d), avg(d) from vectortab2korc_n4 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail -select min(d), max(d), sum(d), avg(d) from vectortab2korc +select min(d), max(d), sum(d), avg(d) from vectortab2korc_n4 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -226,7 +226,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: vectortab2korc + alias: vectortab2korc_n4 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -304,20 +304,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select min(d), max(d), sum(d), avg(d) from vectortab2korc +PREHOOK: query: select min(d), max(d), sum(d), avg(d) from vectortab2korc_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2korc +PREHOOK: Input: default@vectortab2korc_n4 #### A masked pattern was here #### -POSTHOOK: query: select min(d), max(d), sum(d), avg(d) from vectortab2korc +POSTHOOK: query: select min(d), max(d), sum(d), avg(d) from vectortab2korc_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2korc +POSTHOOK: Input: default@vectortab2korc_n4 #### A masked pattern was here #### -4999829.07 4997627.14 -1.7516847286999977E8 -92193.93308947356 PREHOOK: query: explain vectorization detail -select min(ts), max(ts), sum(ts), avg(ts) from vectortab2korc +select min(ts), max(ts), sum(ts), avg(ts) from vectortab2korc_n4 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail -select min(ts), max(ts), sum(ts), avg(ts) from vectortab2korc +select min(ts), max(ts), sum(ts), avg(ts) from vectortab2korc_n4 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -332,7 +332,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: vectortab2korc + alias: vectortab2korc_n4 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -410,12 +410,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select min(ts), max(ts), sum(ts), avg(ts) from vectortab2korc +PREHOOK: query: select min(ts), max(ts), sum(ts), avg(ts) from vectortab2korc_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2korc +PREHOOK: Input: default@vectortab2korc_n4 #### A masked pattern was here #### -POSTHOOK: query: select min(ts), max(ts), sum(ts), avg(ts) from vectortab2korc +POSTHOOK: query: select min(ts), max(ts), sum(ts), avg(ts) from vectortab2korc_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2korc +POSTHOOK: Input: default@vectortab2korc_n4 #### A masked pattern was here #### 2013-02-18 21:06:48 2081-02-22 01:21:53 4.591384881081E12 2.4254542425150557E9 diff --git a/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out b/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out index 3f9e90b905..b66c0b000f 100644 --- a/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out +++ b/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out @@ -1,12 +1,12 @@ -PREHOOK: query: DROP TABLE over1k +PREHOOK: query: DROP TABLE over1k_n7 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE over1k +POSTHOOK: query: DROP TABLE over1k_n7 POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE hundredorc PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE hundredorc POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE over1k(t tinyint, +PREHOOK: query: CREATE TABLE over1k_n7(t tinyint, si smallint, i int, b bigint, @@ -21,8 +21,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over1k -POSTHOOK: query: CREATE TABLE over1k(t tinyint, +PREHOOK: Output: default@over1k_n7 +POSTHOOK: query: CREATE TABLE over1k_n7(t tinyint, si smallint, i int, b bigint, @@ -37,15 +37,15 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over1k -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k +POSTHOOK: Output: default@over1k_n7 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k_n7 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over1k -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k +PREHOOK: Output: default@over1k_n7 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k_n7 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over1k +POSTHOOK: Output: default@over1k_n7 PREHOOK: query: CREATE TABLE hundredorc(t tinyint, si smallint, i int, @@ -76,25 +76,25 @@ STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@hundredorc -PREHOOK: query: INSERT INTO TABLE hundredorc SELECT * FROM over1k LIMIT 100 +PREHOOK: query: INSERT INTO TABLE hundredorc SELECT * FROM over1k_n7 LIMIT 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over1k +PREHOOK: Input: default@over1k_n7 PREHOOK: Output: default@hundredorc -POSTHOOK: query: INSERT INTO TABLE hundredorc SELECT * FROM over1k LIMIT 100 +POSTHOOK: query: INSERT INTO TABLE hundredorc SELECT * FROM over1k_n7 LIMIT 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1k +POSTHOOK: Input: default@over1k_n7 POSTHOOK: Output: default@hundredorc -POSTHOOK: Lineage: hundredorc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: hundredorc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: hundredorc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: hundredorc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: hundredorc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] -POSTHOOK: Lineage: hundredorc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: hundredorc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: hundredorc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: hundredorc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: hundredorc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: hundredorc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: hundredorc.b SIMPLE [(over1k_n7)over1k_n7.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: hundredorc.bin SIMPLE [(over1k_n7)over1k_n7.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: hundredorc.bo SIMPLE [(over1k_n7)over1k_n7.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: hundredorc.d SIMPLE [(over1k_n7)over1k_n7.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: hundredorc.dec SIMPLE [(over1k_n7)over1k_n7.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] +POSTHOOK: Lineage: hundredorc.f SIMPLE [(over1k_n7)over1k_n7.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: hundredorc.i SIMPLE [(over1k_n7)over1k_n7.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: hundredorc.s SIMPLE [(over1k_n7)over1k_n7.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: hundredorc.si SIMPLE [(over1k_n7)over1k_n7.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: hundredorc.t SIMPLE [(over1k_n7)over1k_n7.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: hundredorc.ts SIMPLE [(over1k_n7)over1k_n7.FieldSchema(name:ts, type:timestamp, comment:null), ] PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT sum(hash(*)) k FROM hundredorc t1 JOIN hundredorc t2 ON t1.bin = t2.bin diff --git a/ql/src/test/results/clientpositive/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/vector_cast_constant.q.out index 3d3d761932..d8a534fa9e 100644 --- a/ql/src/test/results/clientpositive/vector_cast_constant.q.out +++ b/ql/src/test/results/clientpositive/vector_cast_constant.q.out @@ -1,12 +1,12 @@ -PREHOOK: query: DROP TABLE over1k +PREHOOK: query: DROP TABLE over1k_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE over1k +POSTHOOK: query: DROP TABLE over1k_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE over1korc +PREHOOK: query: DROP TABLE over1korc_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE over1korc +POSTHOOK: query: DROP TABLE over1korc_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE over1k(t tinyint, +PREHOOK: query: CREATE TABLE over1k_n0(t tinyint, si smallint, i int, b bigint, @@ -21,8 +21,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over1k -POSTHOOK: query: CREATE TABLE over1k(t tinyint, +PREHOOK: Output: default@over1k_n0 +POSTHOOK: query: CREATE TABLE over1k_n0(t tinyint, si smallint, i int, b bigint, @@ -37,16 +37,16 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over1k -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k +POSTHOOK: Output: default@over1k_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over1k -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k +PREHOOK: Output: default@over1k_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over1k -PREHOOK: query: CREATE TABLE over1korc(t tinyint, +POSTHOOK: Output: default@over1k_n0 +PREHOOK: query: CREATE TABLE over1korc_n0(t tinyint, si smallint, i int, b bigint, @@ -60,8 +60,8 @@ PREHOOK: query: CREATE TABLE over1korc(t tinyint, STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over1korc -POSTHOOK: query: CREATE TABLE over1korc(t tinyint, +PREHOOK: Output: default@over1korc_n0 +POSTHOOK: query: CREATE TABLE over1korc_n0(t tinyint, si smallint, i int, b bigint, @@ -75,39 +75,39 @@ POSTHOOK: query: CREATE TABLE over1korc(t tinyint, STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over1korc -PREHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k +POSTHOOK: Output: default@over1korc_n0 +PREHOOK: query: INSERT INTO TABLE over1korc_n0 SELECT * FROM over1k_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@over1k -PREHOOK: Output: default@over1korc -POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k +PREHOOK: Input: default@over1k_n0 +PREHOOK: Output: default@over1korc_n0 +POSTHOOK: query: INSERT INTO TABLE over1korc_n0 SELECT * FROM over1k_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1k -POSTHOOK: Output: default@over1korc -POSTHOOK: Lineage: over1korc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: over1korc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: over1korc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: over1korc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: over1korc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] -POSTHOOK: Lineage: over1korc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: over1korc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Input: default@over1k_n0 +POSTHOOK: Output: default@over1korc_n0 +POSTHOOK: Lineage: over1korc_n0.b SIMPLE [(over1k_n0)over1k_n0.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: over1korc_n0.bin SIMPLE [(over1k_n0)over1k_n0.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: over1korc_n0.bo SIMPLE [(over1k_n0)over1k_n0.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: over1korc_n0.d SIMPLE [(over1k_n0)over1k_n0.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: over1korc_n0.dec SIMPLE [(over1k_n0)over1k_n0.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] +POSTHOOK: Lineage: over1korc_n0.f SIMPLE [(over1k_n0)over1k_n0.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: over1korc_n0.i SIMPLE [(over1k_n0)over1k_n0.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: over1korc_n0.s SIMPLE [(over1k_n0)over1k_n0.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: over1korc_n0.si SIMPLE [(over1k_n0)over1k_n0.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: over1korc_n0.t SIMPLE [(over1k_n0)over1k_n0.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: over1korc_n0.ts SIMPLE [(over1k_n0)over1k_n0.FieldSchema(name:ts, type:timestamp, comment:null), ] PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT i, AVG(CAST(50 AS INT)) AS `avg_int_ok`, AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`, AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok` - FROM over1korc GROUP BY i ORDER BY i LIMIT 10 + FROM over1korc_n0 GROUP BY i ORDER BY i LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT i, AVG(CAST(50 AS INT)) AS `avg_int_ok`, AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`, AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok` - FROM over1korc GROUP BY i ORDER BY i LIMIT 10 + FROM over1korc_n0 GROUP BY i ORDER BY i LIMIT 10 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -123,7 +123,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: over1korc + alias: over1korc_n0 Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -251,18 +251,18 @@ PREHOOK: query: SELECT AVG(CAST(50 AS INT)) AS `avg_int_ok`, AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`, AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok` - FROM over1korc GROUP BY i ORDER BY i LIMIT 10 + FROM over1korc_n0 GROUP BY i ORDER BY i LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@over1korc +PREHOOK: Input: default@over1korc_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT i, AVG(CAST(50 AS INT)) AS `avg_int_ok`, AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`, AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok` - FROM over1korc GROUP BY i ORDER BY i LIMIT 10 + FROM over1korc_n0 GROUP BY i ORDER BY i LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1korc +POSTHOOK: Input: default@over1korc_n0 #### A masked pattern was here #### 65536 50.0 50.0 50.0000 65537 50.0 50.0 50.0000 diff --git a/ql/src/test/results/clientpositive/vector_char_2.q.out b/ql/src/test/results/clientpositive/vector_char_2.q.out index b38cbe71de..ae9910dff1 100644 --- a/ql/src/test/results/clientpositive/vector_char_2.q.out +++ b/ql/src/test/results/clientpositive/vector_char_2.q.out @@ -1,41 +1,41 @@ -PREHOOK: query: drop table char_2 +PREHOOK: query: drop table char_2_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table char_2 +POSTHOOK: query: drop table char_2_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table char_2 ( +PREHOOK: query: create table char_2_n0 ( key char(10), value char(20) ) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@char_2 -POSTHOOK: query: create table char_2 ( +PREHOOK: Output: default@char_2_n0 +POSTHOOK: query: create table char_2_n0 ( key char(10), value char(20) ) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@char_2 -PREHOOK: query: insert overwrite table char_2 select * from src +POSTHOOK: Output: default@char_2_n0 +PREHOOK: query: insert overwrite table char_2_n0 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@char_2 -POSTHOOK: query: insert overwrite table char_2 select * from src +PREHOOK: Output: default@char_2_n0 +POSTHOOK: query: insert overwrite table char_2_n0 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@char_2 -POSTHOOK: Lineage: char_2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: char_2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert into char_2 values (NULL, NULL) +POSTHOOK: Output: default@char_2_n0 +POSTHOOK: Lineage: char_2_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: char_2_n0.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert into char_2_n0 values (NULL, NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@char_2 -POSTHOOK: query: insert into char_2 values (NULL, NULL) +PREHOOK: Output: default@char_2_n0 +POSTHOOK: query: insert into char_2_n0 values (NULL, NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@char_2 -POSTHOOK: Lineage: char_2.key EXPRESSION [] -POSTHOOK: Lineage: char_2.value EXPRESSION [] +POSTHOOK: Output: default@char_2_n0 +POSTHOOK: Lineage: char_2_n0.key EXPRESSION [] +POSTHOOK: Lineage: char_2_n0.value EXPRESSION [] PREHOOK: query: select value, sum(cast(key as int)), count(*) numrows from src group by value @@ -58,13 +58,13 @@ val_100 200 2 val_103 206 2 val_104 208 2 PREHOOK: query: explain vectorization expression select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n0 group by value order by value asc limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n0 group by value order by value asc limit 5 @@ -83,7 +83,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: char_2 + alias: char_2_n0 Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -204,20 +204,20 @@ STAGE PLANS: ListSink PREHOOK: query: select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n0 group by value order by value asc limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@char_2 +PREHOOK: Input: default@char_2_n0 #### A masked pattern was here #### POSTHOOK: query: select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n0 group by value order by value asc limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@char_2 +POSTHOOK: Input: default@char_2_n0 #### A masked pattern was here #### NULL NULL 1 val_0 0 3 @@ -246,13 +246,13 @@ val_96 96 1 val_95 190 2 val_92 92 1 PREHOOK: query: explain vectorization expression select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n0 group by value order by value desc limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n0 group by value order by value desc limit 5 @@ -271,7 +271,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: char_2 + alias: char_2_n0 Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -392,31 +392,31 @@ STAGE PLANS: ListSink PREHOOK: query: select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n0 group by value order by value desc limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@char_2 +PREHOOK: Input: default@char_2_n0 #### A masked pattern was here #### POSTHOOK: query: select value, sum(cast(key as int)), count(*) numrows -from char_2 +from char_2_n0 group by value order by value desc limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@char_2 +POSTHOOK: Input: default@char_2_n0 #### A masked pattern was here #### val_98 196 2 val_97 194 2 val_96 96 1 val_95 190 2 val_92 92 1 -PREHOOK: query: drop table char_2 +PREHOOK: query: drop table char_2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@char_2 -PREHOOK: Output: default@char_2 -POSTHOOK: query: drop table char_2 +PREHOOK: Input: default@char_2_n0 +PREHOOK: Output: default@char_2_n0 +POSTHOOK: query: drop table char_2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@char_2 -POSTHOOK: Output: default@char_2 +POSTHOOK: Input: default@char_2_n0 +POSTHOOK: Output: default@char_2_n0 diff --git a/ql/src/test/results/clientpositive/vector_char_cast.q.out b/ql/src/test/results/clientpositive/vector_char_cast.q.out index 14b0d6bc77..2f26b1cc3b 100644 --- a/ql/src/test/results/clientpositive/vector_char_cast.q.out +++ b/ql/src/test/results/clientpositive/vector_char_cast.q.out @@ -1,35 +1,35 @@ -PREHOOK: query: create table s1(id smallint) stored as orc +PREHOOK: query: create table s1_n2(id smallint) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@s1 -POSTHOOK: query: create table s1(id smallint) stored as orc +PREHOOK: Output: default@s1_n2 +POSTHOOK: query: create table s1_n2(id smallint) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@s1 -PREHOOK: query: insert into table s1 values (1000),(1001),(1002),(1003),(1000) +POSTHOOK: Output: default@s1_n2 +PREHOOK: query: insert into table s1_n2 values (1000),(1001),(1002),(1003),(1000) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@s1 -POSTHOOK: query: insert into table s1 values (1000),(1001),(1002),(1003),(1000) +PREHOOK: Output: default@s1_n2 +POSTHOOK: query: insert into table s1_n2 values (1000),(1001),(1002),(1003),(1000) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@s1 -POSTHOOK: Lineage: s1.id SCRIPT [] -PREHOOK: query: select count(1) from s1 where cast(id as char(4))='1000' +POSTHOOK: Output: default@s1_n2 +POSTHOOK: Lineage: s1_n2.id SCRIPT [] +PREHOOK: query: select count(1) from s1_n2 where cast(id as char(4))='1000' PREHOOK: type: QUERY -PREHOOK: Input: default@s1 +PREHOOK: Input: default@s1_n2 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from s1 where cast(id as char(4))='1000' +POSTHOOK: query: select count(1) from s1_n2 where cast(id as char(4))='1000' POSTHOOK: type: QUERY -POSTHOOK: Input: default@s1 +POSTHOOK: Input: default@s1_n2 #### A masked pattern was here #### 2 -PREHOOK: query: select count(1) from s1 where cast(id as char(4))='1000' +PREHOOK: query: select count(1) from s1_n2 where cast(id as char(4))='1000' PREHOOK: type: QUERY -PREHOOK: Input: default@s1 +PREHOOK: Input: default@s1_n2 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from s1 where cast(id as char(4))='1000' +POSTHOOK: query: select count(1) from s1_n2 where cast(id as char(4))='1000' POSTHOOK: type: QUERY -POSTHOOK: Input: default@s1 +POSTHOOK: Input: default@s1_n2 #### A masked pattern was here #### 2 diff --git a/ql/src/test/results/clientpositive/vector_coalesce_3.q.out b/ql/src/test/results/clientpositive/vector_coalesce_3.q.out index 94a8af320f..6b7e21b5b2 100644 --- a/ql/src/test/results/clientpositive/vector_coalesce_3.q.out +++ b/ql/src/test/results/clientpositive/vector_coalesce_3.q.out @@ -1,64 +1,64 @@ -PREHOOK: query: CREATE TABLE test_1 (member BIGINT, attr BIGINT) STORED AS ORC +PREHOOK: query: CREATE TABLE test_1_n0 (member BIGINT, attr BIGINT) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_1 -POSTHOOK: query: CREATE TABLE test_1 (member BIGINT, attr BIGINT) STORED AS ORC +PREHOOK: Output: default@test_1_n0 +POSTHOOK: query: CREATE TABLE test_1_n0 (member BIGINT, attr BIGINT) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_1 -PREHOOK: query: CREATE TABLE test_2 (member BIGINT) STORED AS ORC +POSTHOOK: Output: default@test_1_n0 +PREHOOK: query: CREATE TABLE test_2_n0 (member BIGINT) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_2 -POSTHOOK: query: CREATE TABLE test_2 (member BIGINT) STORED AS ORC +PREHOOK: Output: default@test_2_n0 +POSTHOOK: query: CREATE TABLE test_2_n0 (member BIGINT) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_2 -PREHOOK: query: INSERT INTO test_1 VALUES (3,1),(2,2) +POSTHOOK: Output: default@test_2_n0 +PREHOOK: query: INSERT INTO test_1_n0 VALUES (3,1),(2,2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_1 -POSTHOOK: query: INSERT INTO test_1 VALUES (3,1),(2,2) +PREHOOK: Output: default@test_1_n0 +POSTHOOK: query: INSERT INTO test_1_n0 VALUES (3,1),(2,2) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_1 -POSTHOOK: Lineage: test_1.attr SCRIPT [] -POSTHOOK: Lineage: test_1.member SCRIPT [] -PREHOOK: query: INSERT INTO test_2 VALUES (1),(2),(3),(4) +POSTHOOK: Output: default@test_1_n0 +POSTHOOK: Lineage: test_1_n0.attr SCRIPT [] +POSTHOOK: Lineage: test_1_n0.member SCRIPT [] +PREHOOK: query: INSERT INTO test_2_n0 VALUES (1),(2),(3),(4) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_2 -POSTHOOK: query: INSERT INTO test_2 VALUES (1),(2),(3),(4) +PREHOOK: Output: default@test_2_n0 +POSTHOOK: query: INSERT INTO test_2_n0 VALUES (1),(2),(3),(4) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_2 -POSTHOOK: Lineage: test_2.member SCRIPT [] -PREHOOK: query: insert into test_1 values (NULL, NULL) +POSTHOOK: Output: default@test_2_n0 +POSTHOOK: Lineage: test_2_n0.member SCRIPT [] +PREHOOK: query: insert into test_1_n0 values (NULL, NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_1 -POSTHOOK: query: insert into test_1 values (NULL, NULL) +PREHOOK: Output: default@test_1_n0 +POSTHOOK: query: insert into test_1_n0 values (NULL, NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_1 -POSTHOOK: Lineage: test_1.attr EXPRESSION [] -POSTHOOK: Lineage: test_1.member EXPRESSION [] -PREHOOK: query: insert into test_2 values (NULL) +POSTHOOK: Output: default@test_1_n0 +POSTHOOK: Lineage: test_1_n0.attr EXPRESSION [] +POSTHOOK: Lineage: test_1_n0.member EXPRESSION [] +PREHOOK: query: insert into test_2_n0 values (NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_2 -POSTHOOK: query: insert into test_2 values (NULL) +PREHOOK: Output: default@test_2_n0 +POSTHOOK: query: insert into test_2_n0 values (NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_2 -POSTHOOK: Lineage: test_2.member EXPRESSION [] +POSTHOOK: Output: default@test_2_n0 +POSTHOOK: Lineage: test_2_n0.member EXPRESSION [] PREHOOK: query: EXPLAIN VECTORIZATION DETAIL SELECT m.member, (CASE WHEN COALESCE(n.attr, 5)>1 THEN n.attr END) AS attr -FROM test_2 m LEFT JOIN test_1 n ON m.member = n.member +FROM test_2_n0 m LEFT JOIN test_1_n0 n ON m.member = n.member PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL SELECT m.member, (CASE WHEN COALESCE(n.attr, 5)>1 THEN n.attr END) AS attr -FROM test_2 m LEFT JOIN test_1 n ON m.member = n.member +FROM test_2_n0 m LEFT JOIN test_1_n0 n ON m.member = n.member POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -167,16 +167,16 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT m.member, (CASE WHEN COALESCE(n.attr, 5)>1 THEN n.attr END) AS attr -FROM test_2 m LEFT JOIN test_1 n ON m.member = n.member +FROM test_2_n0 m LEFT JOIN test_1_n0 n ON m.member = n.member PREHOOK: type: QUERY -PREHOOK: Input: default@test_1 -PREHOOK: Input: default@test_2 +PREHOOK: Input: default@test_1_n0 +PREHOOK: Input: default@test_2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT m.member, (CASE WHEN COALESCE(n.attr, 5)>1 THEN n.attr END) AS attr -FROM test_2 m LEFT JOIN test_1 n ON m.member = n.member +FROM test_2_n0 m LEFT JOIN test_1_n0 n ON m.member = n.member POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_1 -POSTHOOK: Input: default@test_2 +POSTHOOK: Input: default@test_1_n0 +POSTHOOK: Input: default@test_2_n0 #### A masked pattern was here #### 1 NULL 2 2 diff --git a/ql/src/test/results/clientpositive/vector_data_types.q.out b/ql/src/test/results/clientpositive/vector_data_types.q.out index 688e6a6dba..c2a2fce075 100644 --- a/ql/src/test/results/clientpositive/vector_data_types.q.out +++ b/ql/src/test/results/clientpositive/vector_data_types.q.out @@ -1,12 +1,12 @@ -PREHOOK: query: DROP TABLE over1k +PREHOOK: query: DROP TABLE over1k_n8 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE over1k +POSTHOOK: query: DROP TABLE over1k_n8 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE over1korc +PREHOOK: query: DROP TABLE over1korc_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE over1korc +POSTHOOK: query: DROP TABLE over1korc_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE over1k(t tinyint, +PREHOOK: query: CREATE TABLE over1k_n8(t tinyint, si smallint, i int, b bigint, @@ -21,8 +21,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over1k -POSTHOOK: query: CREATE TABLE over1k(t tinyint, +PREHOOK: Output: default@over1k_n8 +POSTHOOK: query: CREATE TABLE over1k_n8(t tinyint, si smallint, i int, b bigint, @@ -37,16 +37,16 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over1k -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k +POSTHOOK: Output: default@over1k_n8 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k_n8 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over1k -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k +PREHOOK: Output: default@over1k_n8 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k_n8 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over1k -PREHOOK: query: CREATE TABLE over1korc(t tinyint, +POSTHOOK: Output: default@over1k_n8 +PREHOOK: query: CREATE TABLE over1korc_n1(t tinyint, si smallint, i int, b bigint, @@ -60,8 +60,8 @@ PREHOOK: query: CREATE TABLE over1korc(t tinyint, STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over1korc -POSTHOOK: query: CREATE TABLE over1korc(t tinyint, +PREHOOK: Output: default@over1korc_n1 +POSTHOOK: query: CREATE TABLE over1korc_n1(t tinyint, si smallint, i int, b bigint, @@ -75,48 +75,48 @@ POSTHOOK: query: CREATE TABLE over1korc(t tinyint, STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over1korc -PREHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k +POSTHOOK: Output: default@over1korc_n1 +PREHOOK: query: INSERT INTO TABLE over1korc_n1 SELECT * FROM over1k_n8 PREHOOK: type: QUERY -PREHOOK: Input: default@over1k -PREHOOK: Output: default@over1korc -POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k +PREHOOK: Input: default@over1k_n8 +PREHOOK: Output: default@over1korc_n1 +POSTHOOK: query: INSERT INTO TABLE over1korc_n1 SELECT * FROM over1k_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1k -POSTHOOK: Output: default@over1korc -POSTHOOK: Lineage: over1korc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: over1korc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: over1korc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: over1korc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: over1korc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] -POSTHOOK: Lineage: over1korc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: over1korc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ] -PREHOOK: query: insert into over1korc values (NULL, NULL,NULL, NULL,NULL, NULL,NULL, NULL,NULL, NULL,NULL) +POSTHOOK: Input: default@over1k_n8 +POSTHOOK: Output: default@over1korc_n1 +POSTHOOK: Lineage: over1korc_n1.b SIMPLE [(over1k_n8)over1k_n8.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: over1korc_n1.bin SIMPLE [(over1k_n8)over1k_n8.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: over1korc_n1.bo SIMPLE [(over1k_n8)over1k_n8.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: over1korc_n1.d SIMPLE [(over1k_n8)over1k_n8.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: over1korc_n1.dec SIMPLE [(over1k_n8)over1k_n8.FieldSchema(name:dec, type:decimal(4,2), comment:null), ] +POSTHOOK: Lineage: over1korc_n1.f SIMPLE [(over1k_n8)over1k_n8.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: over1korc_n1.i SIMPLE [(over1k_n8)over1k_n8.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: over1korc_n1.s SIMPLE [(over1k_n8)over1k_n8.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: over1korc_n1.si SIMPLE [(over1k_n8)over1k_n8.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: over1korc_n1.t SIMPLE [(over1k_n8)over1k_n8.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: over1korc_n1.ts SIMPLE [(over1k_n8)over1k_n8.FieldSchema(name:ts, type:timestamp, comment:null), ] +PREHOOK: query: insert into over1korc_n1 values (NULL, NULL,NULL, NULL,NULL, NULL,NULL, NULL,NULL, NULL,NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@over1korc -POSTHOOK: query: insert into over1korc values (NULL, NULL,NULL, NULL,NULL, NULL,NULL, NULL,NULL, NULL,NULL) +PREHOOK: Output: default@over1korc_n1 +POSTHOOK: query: insert into over1korc_n1 values (NULL, NULL,NULL, NULL,NULL, NULL,NULL, NULL,NULL, NULL,NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@over1korc -POSTHOOK: Lineage: over1korc.b EXPRESSION [] -POSTHOOK: Lineage: over1korc.bin EXPRESSION [] -POSTHOOK: Lineage: over1korc.bo EXPRESSION [] -POSTHOOK: Lineage: over1korc.d EXPRESSION [] -POSTHOOK: Lineage: over1korc.dec EXPRESSION [] -POSTHOOK: Lineage: over1korc.f EXPRESSION [] -POSTHOOK: Lineage: over1korc.i EXPRESSION [] -POSTHOOK: Lineage: over1korc.s EXPRESSION [] -POSTHOOK: Lineage: over1korc.si EXPRESSION [] -POSTHOOK: Lineage: over1korc.t EXPRESSION [] -POSTHOOK: Lineage: over1korc.ts EXPRESSION [] -PREHOOK: query: EXPLAIN SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i LIMIT 20 +POSTHOOK: Output: default@over1korc_n1 +POSTHOOK: Lineage: over1korc_n1.b EXPRESSION [] +POSTHOOK: Lineage: over1korc_n1.bin EXPRESSION [] +POSTHOOK: Lineage: over1korc_n1.bo EXPRESSION [] +POSTHOOK: Lineage: over1korc_n1.d EXPRESSION [] +POSTHOOK: Lineage: over1korc_n1.dec EXPRESSION [] +POSTHOOK: Lineage: over1korc_n1.f EXPRESSION [] +POSTHOOK: Lineage: over1korc_n1.i EXPRESSION [] +POSTHOOK: Lineage: over1korc_n1.s EXPRESSION [] +POSTHOOK: Lineage: over1korc_n1.si EXPRESSION [] +POSTHOOK: Lineage: over1korc_n1.t EXPRESSION [] +POSTHOOK: Lineage: over1korc_n1.ts EXPRESSION [] +PREHOOK: query: EXPLAIN SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i LIMIT 20 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i LIMIT 20 +POSTHOOK: query: EXPLAIN SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i LIMIT 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -127,7 +127,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: over1korc + alias: over1korc_n1 Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float), d (type: double), bo (type: boolean), s (type: string), ts (type: timestamp), dec (type: decimal(4,2)), bin (type: binary) @@ -161,13 +161,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i LIMIT 20 +PREHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i LIMIT 20 PREHOOK: type: QUERY -PREHOOK: Input: default@over1korc +PREHOOK: Input: default@over1korc_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i LIMIT 20 +POSTHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i LIMIT 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1korc +POSTHOOK: Input: default@over1korc_n1 #### A masked pattern was here #### NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 374 65560 4294967516 65.43 22.48 true oscar quirinius 2013-03-01 09:11:58.703316 16.86 mathematics @@ -190,19 +190,19 @@ NULL 473 65720 4294967324 80.74 40.6 false holly falkner 2013-03-01 09:11:58.703 -1 268 65778 4294967418 56.33 44.73 true calvin falkner 2013-03-01 09:11:58.70322 7.37 history -1 281 65643 4294967323 15.1 45.0 false irene nixon 2013-03-01 09:11:58.703223 80.96 undecided PREHOOK: query: SELECT SUM(HASH(*)) -FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i) as q +FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i) as q PREHOOK: type: QUERY -PREHOOK: Input: default@over1korc +PREHOOK: Input: default@over1korc_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(*)) -FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i) as q +FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i) as q POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1korc +POSTHOOK: Input: default@over1korc_n1 #### A masked pattern was here #### -17045922556 -PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION select t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i LIMIT 20 +PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION select t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i LIMIT 20 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION select t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i LIMIT 20 +POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION select t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i LIMIT 20 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -217,7 +217,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: over1korc + alias: over1korc_n1 Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -276,13 +276,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i LIMIT 20 +PREHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i LIMIT 20 PREHOOK: type: QUERY -PREHOOK: Input: default@over1korc +PREHOOK: Input: default@over1korc_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i LIMIT 20 +POSTHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i LIMIT 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1korc +POSTHOOK: Input: default@over1korc_n1 #### A masked pattern was here #### NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 374 65560 4294967516 65.43 22.48 true oscar quirinius 2013-03-01 09:11:58.703316 16.86 mathematics @@ -306,11 +306,11 @@ NULL 473 65720 4294967324 80.74 40.6 false holly falkner 2013-03-01 09:11:58.703 -1 281 65643 4294967323 15.1 45.0 false irene nixon 2013-03-01 09:11:58.703223 80.96 undecided PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT SUM(HASH(*)) -FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i) as q +FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i) as q PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT SUM(HASH(*)) -FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i) as q +FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i) as q POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -325,7 +325,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: over1korc + alias: over1korc_n1 Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -394,13 +394,13 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT SUM(HASH(*)) -FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i) as q +FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i) as q PREHOOK: type: QUERY -PREHOOK: Input: default@over1korc +PREHOOK: Input: default@over1korc_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(*)) -FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i) as q +FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc_n1 ORDER BY t, si, i) as q POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1korc +POSTHOOK: Input: default@over1korc_n1 #### A masked pattern was here #### -17045922556 diff --git a/ql/src/test/results/clientpositive/vector_decimal_3.q.out b/ql/src/test/results/clientpositive/vector_decimal_3.q.out index 537d568fa9..3e9a1ee909 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_3.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_3.q.out @@ -1,52 +1,52 @@ -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_txt +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_txt_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_txt +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_txt_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_3 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_3 +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE DECIMAL_3_txt(key decimal(38,18), value int) +PREHOOK: query: CREATE TABLE DECIMAL_3_txt_n0(key decimal(38,18), value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL_3_txt -POSTHOOK: query: CREATE TABLE DECIMAL_3_txt(key decimal(38,18), value int) +PREHOOK: Output: default@DECIMAL_3_txt_n0 +POSTHOOK: query: CREATE TABLE DECIMAL_3_txt_n0(key decimal(38,18), value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL_3_txt -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3_txt +POSTHOOK: Output: default@DECIMAL_3_txt_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3_txt_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@decimal_3_txt -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3_txt +PREHOOK: Output: default@decimal_3_txt_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3_txt_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@decimal_3_txt -PREHOOK: query: CREATE TABLE DECIMAL_3 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt +POSTHOOK: Output: default@decimal_3_txt_n0 +PREHOOK: query: CREATE TABLE DECIMAL_3_n1 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt_n0 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@decimal_3_txt +PREHOOK: Input: default@decimal_3_txt_n0 PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL_3 -POSTHOOK: query: CREATE TABLE DECIMAL_3 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt +PREHOOK: Output: default@DECIMAL_3_n1 +POSTHOOK: query: CREATE TABLE DECIMAL_3_n1 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt_n0 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@decimal_3_txt +POSTHOOK: Input: default@decimal_3_txt_n0 POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL_3 -POSTHOOK: Lineage: decimal_3.key SIMPLE [(decimal_3_txt)decimal_3_txt.FieldSchema(name:key, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: decimal_3.value SIMPLE [(decimal_3_txt)decimal_3_txt.FieldSchema(name:value, type:int, comment:null), ] -PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value +POSTHOOK: Output: default@DECIMAL_3_n1 +POSTHOOK: Lineage: decimal_3_n1.key SIMPLE [(decimal_3_txt_n0)decimal_3_txt_n0.FieldSchema(name:key, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: decimal_3_n1.value SIMPLE [(decimal_3_txt_n0)decimal_3_txt_n0.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: SELECT * FROM DECIMAL_3_n1 ORDER BY key, value PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_3 +PREHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value +POSTHOOK: query: SELECT * FROM DECIMAL_3_n1 ORDER BY key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_3 +POSTHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### NULL 0 -1234567890.123456789000000000 -1234567890 @@ -86,13 +86,13 @@ NULL 0 125.200000000000000000 125 200.000000000000000000 200 1234567890.123456780000000000 1234567890 -PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC +PREHOOK: query: SELECT * FROM DECIMAL_3_n1 ORDER BY key DESC, value DESC PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_3 +PREHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC +POSTHOOK: query: SELECT * FROM DECIMAL_3_n1 ORDER BY key DESC, value DESC POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_3 +POSTHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### 1234567890.123456780000000000 1234567890 200.000000000000000000 200 @@ -132,13 +132,13 @@ POSTHOOK: Input: default@decimal_3 -4400.000000000000000000 4400 -1234567890.123456789000000000 -1234567890 NULL 0 -PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value +PREHOOK: query: SELECT * FROM DECIMAL_3_n1 ORDER BY key, value PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_3 +PREHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value +POSTHOOK: query: SELECT * FROM DECIMAL_3_n1 ORDER BY key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_3 +POSTHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### NULL 0 -1234567890.123456789000000000 -1234567890 @@ -178,13 +178,13 @@ NULL 0 125.200000000000000000 125 200.000000000000000000 200 1234567890.123456780000000000 1234567890 -PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key +PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_3_n1 ORDER BY key PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_3 +PREHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key +POSTHOOK: query: SELECT DISTINCT key FROM DECIMAL_3_n1 ORDER BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_3 +POSTHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### NULL -1234567890.123456789000000000 @@ -215,13 +215,13 @@ NULL 125.200000000000000000 200.000000000000000000 1234567890.123456780000000000 -PREHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key +PREHOOK: query: SELECT key, sum(value) FROM DECIMAL_3_n1 GROUP BY key ORDER BY key PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_3 +PREHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key +POSTHOOK: query: SELECT key, sum(value) FROM DECIMAL_3_n1 GROUP BY key ORDER BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_3 +POSTHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### NULL 0 -1234567890.123456789000000000 -1234567890 @@ -252,13 +252,13 @@ NULL 0 125.200000000000000000 125 200.000000000000000000 200 1234567890.123456780000000000 1234567890 -PREHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value +PREHOOK: query: SELECT value, sum(key) FROM DECIMAL_3_n1 GROUP BY value ORDER BY value PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_3 +PREHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value +POSTHOOK: query: SELECT value, sum(key) FROM DECIMAL_3_n1 GROUP BY value ORDER BY value POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_3 +POSTHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### -1234567890 -1234567890.123456789000000000 -1255 -1255.490000000000000000 @@ -277,13 +277,13 @@ POSTHOOK: Input: default@decimal_3 200 200.000000000000000000 4400 -4400.000000000000000000 1234567890 1234567890.123456780000000000 -PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value +PREHOOK: query: SELECT * FROM DECIMAL_3_n1 a JOIN DECIMAL_3_n1 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_3 +PREHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value +POSTHOOK: query: SELECT * FROM DECIMAL_3_n1 a JOIN DECIMAL_3_n1 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_3 +POSTHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### -1234567890.123456789000000000 -1234567890 -1234567890.123456789000000000 -1234567890 -4400.000000000000000000 4400 -4400.000000000000000000 4400 @@ -350,43 +350,43 @@ POSTHOOK: Input: default@decimal_3 125.200000000000000000 125 125.200000000000000000 125 200.000000000000000000 200 200.000000000000000000 200 1234567890.123456780000000000 1234567890 1234567890.123456780000000000 1234567890 -PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value +PREHOOK: query: SELECT * FROM DECIMAL_3_n1 WHERE key=3.14 ORDER BY key, value PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_3 +PREHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value +POSTHOOK: query: SELECT * FROM DECIMAL_3_n1 WHERE key=3.14 ORDER BY key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_3 +POSTHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### 3.140000000000000000 3 3.140000000000000000 3 3.140000000000000000 3 3.140000000000000000 4 -PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value +PREHOOK: query: SELECT * FROM DECIMAL_3_n1 WHERE key=3.140 ORDER BY key, value PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_3 +PREHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value +POSTHOOK: query: SELECT * FROM DECIMAL_3_n1 WHERE key=3.140 ORDER BY key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_3 +POSTHOOK: Input: default@decimal_3_n1 #### A masked pattern was here #### 3.140000000000000000 3 3.140000000000000000 3 3.140000000000000000 3 3.140000000000000000 4 -PREHOOK: query: DROP TABLE DECIMAL_3_txt +PREHOOK: query: DROP TABLE DECIMAL_3_txt_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal_3_txt -PREHOOK: Output: default@decimal_3_txt -POSTHOOK: query: DROP TABLE DECIMAL_3_txt +PREHOOK: Input: default@decimal_3_txt_n0 +PREHOOK: Output: default@decimal_3_txt_n0 +POSTHOOK: query: DROP TABLE DECIMAL_3_txt_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal_3_txt -POSTHOOK: Output: default@decimal_3_txt -PREHOOK: query: DROP TABLE DECIMAL_3 +POSTHOOK: Input: default@decimal_3_txt_n0 +POSTHOOK: Output: default@decimal_3_txt_n0 +PREHOOK: query: DROP TABLE DECIMAL_3_n1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal_3 -PREHOOK: Output: default@decimal_3 -POSTHOOK: query: DROP TABLE DECIMAL_3 +PREHOOK: Input: default@decimal_3_n1 +PREHOOK: Output: default@decimal_3_n1 +POSTHOOK: query: DROP TABLE DECIMAL_3_n1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal_3 -POSTHOOK: Output: default@decimal_3 +POSTHOOK: Input: default@decimal_3_n1 +POSTHOOK: Output: default@decimal_3_n1 diff --git a/ql/src/test/results/clientpositive/vector_decimal_4.q.out b/ql/src/test/results/clientpositive/vector_decimal_4.q.out index c7d3d9eb80..d365fb99ad 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_4.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_4.q.out @@ -1,60 +1,60 @@ -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_1 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_1_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_1 +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_1_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_2 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_2_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_2 +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_4_2_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE DECIMAL_4_1(key decimal(35,25), value int) +PREHOOK: query: CREATE TABLE DECIMAL_4_1_n0(key decimal(35,25), value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL_4_1 -POSTHOOK: query: CREATE TABLE DECIMAL_4_1(key decimal(35,25), value int) +PREHOOK: Output: default@DECIMAL_4_1_n0 +POSTHOOK: query: CREATE TABLE DECIMAL_4_1_n0(key decimal(35,25), value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL_4_1 -PREHOOK: query: CREATE TABLE DECIMAL_4_2(key decimal(35,25), value decimal(35,25)) +POSTHOOK: Output: default@DECIMAL_4_1_n0 +PREHOOK: query: CREATE TABLE DECIMAL_4_2_n0(key decimal(35,25), value decimal(35,25)) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL_4_2 -POSTHOOK: query: CREATE TABLE DECIMAL_4_2(key decimal(35,25), value decimal(35,25)) +PREHOOK: Output: default@DECIMAL_4_2_n0 +POSTHOOK: query: CREATE TABLE DECIMAL_4_2_n0(key decimal(35,25), value decimal(35,25)) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL_4_2 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_4_1 +POSTHOOK: Output: default@DECIMAL_4_2_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_4_1_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@decimal_4_1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_4_1 +PREHOOK: Output: default@decimal_4_1_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_4_1_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@decimal_4_1 -PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_4_2 SELECT key, key * 3 FROM DECIMAL_4_1 +POSTHOOK: Output: default@decimal_4_1_n0 +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_4_2_n0 SELECT key, key * 3 FROM DECIMAL_4_1_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_4_1 -PREHOOK: Output: default@decimal_4_2 -POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_4_2 SELECT key, key * 3 FROM DECIMAL_4_1 +PREHOOK: Input: default@decimal_4_1_n0 +PREHOOK: Output: default@decimal_4_2_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_4_2_n0 SELECT key, key * 3 FROM DECIMAL_4_1_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_4_1 -POSTHOOK: Output: default@decimal_4_2 -POSTHOOK: Lineage: decimal_4_2.key SIMPLE [(decimal_4_1)decimal_4_1.FieldSchema(name:key, type:decimal(35,25), comment:null), ] -POSTHOOK: Lineage: decimal_4_2.value EXPRESSION [(decimal_4_1)decimal_4_1.FieldSchema(name:key, type:decimal(35,25), comment:null), ] -PREHOOK: query: SELECT * FROM DECIMAL_4_1 ORDER BY key, value +POSTHOOK: Input: default@decimal_4_1_n0 +POSTHOOK: Output: default@decimal_4_2_n0 +POSTHOOK: Lineage: decimal_4_2_n0.key SIMPLE [(decimal_4_1_n0)decimal_4_1_n0.FieldSchema(name:key, type:decimal(35,25), comment:null), ] +POSTHOOK: Lineage: decimal_4_2_n0.value EXPRESSION [(decimal_4_1_n0)decimal_4_1_n0.FieldSchema(name:key, type:decimal(35,25), comment:null), ] +PREHOOK: query: SELECT * FROM DECIMAL_4_1_n0 ORDER BY key, value PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_4_1 +PREHOOK: Input: default@decimal_4_1_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DECIMAL_4_1 ORDER BY key, value +POSTHOOK: query: SELECT * FROM DECIMAL_4_1_n0 ORDER BY key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_4_1 +POSTHOOK: Input: default@decimal_4_1_n0 #### A masked pattern was here #### NULL 0 -1234567890.1234567890000000000000000 -1234567890 @@ -94,13 +94,13 @@ NULL 0 125.2000000000000000000000000 125 200.0000000000000000000000000 200 1234567890.1234567800000000000000000 1234567890 -PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value +PREHOOK: query: SELECT * FROM DECIMAL_4_2_n0 ORDER BY key, value PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_4_2 +PREHOOK: Input: default@decimal_4_2_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value +POSTHOOK: query: SELECT * FROM DECIMAL_4_2_n0 ORDER BY key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_4_2 +POSTHOOK: Input: default@decimal_4_2_n0 #### A masked pattern was here #### NULL NULL -1234567890.1234567890000000000000000 -3703703670.3703703670000000000000000 @@ -140,13 +140,13 @@ NULL NULL 125.2000000000000000000000000 375.6000000000000000000000000 200.0000000000000000000000000 600.0000000000000000000000000 1234567890.1234567800000000000000000 3703703670.3703703400000000000000000 -PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key +PREHOOK: query: SELECT * FROM DECIMAL_4_2_n0 ORDER BY key PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_4_2 +PREHOOK: Input: default@decimal_4_2_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key +POSTHOOK: query: SELECT * FROM DECIMAL_4_2_n0 ORDER BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_4_2 +POSTHOOK: Input: default@decimal_4_2_n0 #### A masked pattern was here #### NULL NULL -1234567890.1234567890000000000000000 -3703703670.3703703670000000000000000 @@ -186,13 +186,13 @@ NULL NULL 125.2000000000000000000000000 375.6000000000000000000000000 200.0000000000000000000000000 600.0000000000000000000000000 1234567890.1234567800000000000000000 3703703670.3703703400000000000000000 -PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value +PREHOOK: query: SELECT * FROM DECIMAL_4_2_n0 ORDER BY key, value PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_4_2 +PREHOOK: Input: default@decimal_4_2_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value +POSTHOOK: query: SELECT * FROM DECIMAL_4_2_n0 ORDER BY key, value POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_4_2 +POSTHOOK: Input: default@decimal_4_2_n0 #### A masked pattern was here #### NULL NULL -1234567890.1234567890000000000000000 -3703703670.3703703670000000000000000 @@ -232,19 +232,19 @@ NULL NULL 125.2000000000000000000000000 375.6000000000000000000000000 200.0000000000000000000000000 600.0000000000000000000000000 1234567890.1234567800000000000000000 3703703670.3703703400000000000000000 -PREHOOK: query: DROP TABLE DECIMAL_4_1 +PREHOOK: query: DROP TABLE DECIMAL_4_1_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal_4_1 -PREHOOK: Output: default@decimal_4_1 -POSTHOOK: query: DROP TABLE DECIMAL_4_1 +PREHOOK: Input: default@decimal_4_1_n0 +PREHOOK: Output: default@decimal_4_1_n0 +POSTHOOK: query: DROP TABLE DECIMAL_4_1_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal_4_1 -POSTHOOK: Output: default@decimal_4_1 -PREHOOK: query: DROP TABLE DECIMAL_4_2 +POSTHOOK: Input: default@decimal_4_1_n0 +POSTHOOK: Output: default@decimal_4_1_n0 +PREHOOK: query: DROP TABLE DECIMAL_4_2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal_4_2 -PREHOOK: Output: default@decimal_4_2 -POSTHOOK: query: DROP TABLE DECIMAL_4_2 +PREHOOK: Input: default@decimal_4_2_n0 +PREHOOK: Output: default@decimal_4_2_n0 +POSTHOOK: query: DROP TABLE DECIMAL_4_2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal_4_2 -POSTHOOK: Output: default@decimal_4_2 +POSTHOOK: Input: default@decimal_4_2_n0 +POSTHOOK: Output: default@decimal_4_2_n0 diff --git a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out index 51ed89693e..c296c306b3 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out @@ -1,40 +1,40 @@ -PREHOOK: query: CREATE TABLE decimal_test (cdouble double,cdecimal1 DECIMAL(20,10), cdecimal2 DECIMAL(23,14)) STORED AS ORC +PREHOOK: query: CREATE TABLE decimal_test_n1 (cdouble double,cdecimal1 DECIMAL(20,10), cdecimal2 DECIMAL(23,14)) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@decimal_test -POSTHOOK: query: CREATE TABLE decimal_test (cdouble double,cdecimal1 DECIMAL(20,10), cdecimal2 DECIMAL(23,14)) STORED AS ORC +PREHOOK: Output: default@decimal_test_n1 +POSTHOOK: query: CREATE TABLE decimal_test_n1 (cdouble double,cdecimal1 DECIMAL(20,10), cdecimal2 DECIMAL(23,14)) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@decimal_test -PREHOOK: query: insert into decimal_test values (NULL, NULL, NULL) +POSTHOOK: Output: default@decimal_test_n1 +PREHOOK: query: insert into decimal_test_n1 values (NULL, NULL, NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@decimal_test -POSTHOOK: query: insert into decimal_test values (NULL, NULL, NULL) +PREHOOK: Output: default@decimal_test_n1 +POSTHOOK: query: insert into decimal_test_n1 values (NULL, NULL, NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@decimal_test -POSTHOOK: Lineage: decimal_test.cdecimal1 EXPRESSION [] -POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [] -POSTHOOK: Lineage: decimal_test.cdouble EXPRESSION [] -PREHOOK: query: INSERT INTO TABLE decimal_test SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc +POSTHOOK: Output: default@decimal_test_n1 +POSTHOOK: Lineage: decimal_test_n1.cdecimal1 EXPRESSION [] +POSTHOOK: Lineage: decimal_test_n1.cdecimal2 EXPRESSION [] +POSTHOOK: Lineage: decimal_test_n1.cdouble EXPRESSION [] +PREHOOK: query: INSERT INTO TABLE decimal_test_n1 SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc -PREHOOK: Output: default@decimal_test -POSTHOOK: query: INSERT INTO TABLE decimal_test SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc +PREHOOK: Output: default@decimal_test_n1 +POSTHOOK: query: INSERT INTO TABLE decimal_test_n1 SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: default@decimal_test -POSTHOOK: Lineage: decimal_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Output: default@decimal_test_n1 +POSTHOOK: Lineage: decimal_test_n1.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: decimal_test_n1.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: decimal_test_n1.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] PREHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_n1 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14 LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_n1 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14 LIMIT 10 POSTHOOK: type: QUERY @@ -51,7 +51,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: decimal_test + alias: decimal_test_n1 Statistics: Num rows: 12289 Data size: 2128368 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -124,17 +124,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +PREHOOK: query: SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_n1 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14 LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_test +PREHOOK: Input: default@decimal_test_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +POSTHOOK: query: SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_n1 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14 LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_test +POSTHOOK: Input: default@decimal_test_n1 #### A masked pattern was here #### 1836.44199584197700 -1166.02723492725400 0.8372697814834 245972.55810810255804469 5.6189189189 835 1000 NULL 835 true 1000.823076923077 835.6189 1000.823076923077 1969-12-31 16:13:55.618918918 1856.13222453224620 -1178.52931392929240 0.8372449787014 251275.44324324968747899 4.5783783784 844 1011 NULL 844 true 1011.5538461538462 844.57837 1011.5538461538462 1969-12-31 16:14:04.578378378 @@ -147,38 +147,38 @@ POSTHOOK: Input: default@decimal_test 1909.95218295221550 -1212.70166320163100 0.8371797936946 266058.54729730725574014 9.0675675676 869 1040 NULL 869 true 1040.8846153846155 869.06757 1040.8846153846155 1969-12-31 16:14:29.067567567 1913.89022869026920 -1215.20207900203840 0.8371751679996 267156.82702703945592392 0.8594594595 870 1043 NULL 870 true 1043.0307692307692 870.85944 1043.0307692307692 1969-12-31 16:14:30.859459459 PREHOOK: query: SELECT SUM(HASH(*)) -FROM (SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +FROM (SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_n1 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14) q PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_test +PREHOOK: Input: default@decimal_test_n1 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(*)) -FROM (SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +FROM (SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_n1 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14) q POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_test +POSTHOOK: Input: default@decimal_test_n1 #### A masked pattern was here #### -1300490595129 -PREHOOK: query: CREATE TABLE decimal_test_small STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(10,3)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(7,2)) AS cdecimal2 FROM alltypesorc +PREHOOK: query: CREATE TABLE decimal_test_small_n0 STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(10,3)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(7,2)) AS cdecimal2 FROM alltypesorc PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default -PREHOOK: Output: default@decimal_test_small -POSTHOOK: query: CREATE TABLE decimal_test_small STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(10,3)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(7,2)) AS cdecimal2 FROM alltypesorc +PREHOOK: Output: default@decimal_test_small_n0 +POSTHOOK: query: CREATE TABLE decimal_test_small_n0 STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(10,3)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(7,2)) AS cdecimal2 FROM alltypesorc POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default -POSTHOOK: Output: default@decimal_test_small -POSTHOOK: Lineage: decimal_test_small.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: decimal_test_small.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: decimal_test_small.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Output: default@decimal_test_small_n0 +POSTHOOK: Lineage: decimal_test_small_n0.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: decimal_test_small_n0.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: decimal_test_small_n0.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] PREHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small_n0 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14 LIMIT 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small_n0 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14 LIMIT 10 POSTHOOK: type: QUERY @@ -195,7 +195,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: decimal_test_small + alias: decimal_test_small_n0 Statistics: Num rows: 12288 Data size: 2127808 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -268,17 +268,17 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +PREHOOK: query: SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small_n0 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14 LIMIT 10 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_test_small +PREHOOK: Input: default@decimal_test_small_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +POSTHOOK: query: SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small_n0 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14 LIMIT 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_test_small +POSTHOOK: Input: default@decimal_test_small_n0 #### A masked pattern was here #### 1836.439 -1166.021 0.83727243660 245971.826152056 5.619 835 1000 NULL 835 true 1000.82 835.619 1000.82 1969-12-31 16:13:55.619 1856.128 -1178.522 0.83724778805 251274.375364068 4.578 844 1011 NULL 844 true 1011.55 844.578 1011.55 1969-12-31 16:14:04.578 @@ -291,15 +291,15 @@ POSTHOOK: Input: default@decimal_test_small 1909.948 -1212.692 0.83718392130 266057.499543968 9.068 869 1040 NULL 869 true 1040.88 869.068 1040.88 1969-12-31 16:14:29.068 1913.889 -1215.201 0.83717534491 267156.488691411 0.859 870 1043 NULL 870 true 1043.03 870.859 1043.03 1969-12-31 16:14:30.859 PREHOOK: query: SELECT SUM(HASH(*)) -FROM (SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +FROM (SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small_n0 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14) q PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_test_small +PREHOOK: Input: default@decimal_test_small_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(*)) -FROM (SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL +FROM (SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small_n0 WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14) q POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_test_small +POSTHOOK: Input: default@decimal_test_small_n0 #### A masked pattern was here #### 774841630076 diff --git a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out index cf8e3df284..69217aec13 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE TABLE over1k(t tinyint, +PREHOOK: query: CREATE TABLE over1k_n2(t tinyint, si smallint, i int, b bigint, @@ -13,8 +13,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over1k -POSTHOOK: query: CREATE TABLE over1k(t tinyint, +PREHOOK: Output: default@over1k_n2 +POSTHOOK: query: CREATE TABLE over1k_n2(t tinyint, si smallint, i int, b bigint, @@ -29,56 +29,56 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over1k -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k +POSTHOOK: Output: default@over1k_n2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over1k -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k +PREHOOK: Output: default@over1k_n2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over1k -PREHOOK: query: CREATE TABLE t1(`dec` decimal(22,2), value_dec decimal(22,2)) STORED AS ORC +POSTHOOK: Output: default@over1k_n2 +PREHOOK: query: CREATE TABLE t1_n16(`dec` decimal(22,2), value_dec decimal(22,2)) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: CREATE TABLE t1(`dec` decimal(22,2), value_dec decimal(22,2)) STORED AS ORC +PREHOOK: Output: default@t1_n16 +POSTHOOK: query: CREATE TABLE t1_n16(`dec` decimal(22,2), value_dec decimal(22,2)) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: INSERT INTO TABLE t1 select `dec`, cast(d as decimal(22,2)) from over1k +POSTHOOK: Output: default@t1_n16 +PREHOOK: query: INSERT INTO TABLE t1_n16 select `dec`, cast(d as decimal(22,2)) from over1k_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@over1k -PREHOOK: Output: default@t1 -POSTHOOK: query: INSERT INTO TABLE t1 select `dec`, cast(d as decimal(22,2)) from over1k +PREHOOK: Input: default@over1k_n2 +PREHOOK: Output: default@t1_n16 +POSTHOOK: query: INSERT INTO TABLE t1_n16 select `dec`, cast(d as decimal(22,2)) from over1k_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1k -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.dec EXPRESSION [(over1k)over1k.FieldSchema(name:dec, type:decimal(20,2), comment:null), ] -POSTHOOK: Lineage: t1.value_dec EXPRESSION [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ] -PREHOOK: query: CREATE TABLE t2(`dec` decimal(24,0), value_dec decimal(24,0)) STORED AS ORC +POSTHOOK: Input: default@over1k_n2 +POSTHOOK: Output: default@t1_n16 +POSTHOOK: Lineage: t1_n16.dec EXPRESSION [(over1k_n2)over1k_n2.FieldSchema(name:dec, type:decimal(20,2), comment:null), ] +POSTHOOK: Lineage: t1_n16.value_dec EXPRESSION [(over1k_n2)over1k_n2.FieldSchema(name:d, type:double, comment:null), ] +PREHOOK: query: CREATE TABLE t2_n9(`dec` decimal(24,0), value_dec decimal(24,0)) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: CREATE TABLE t2(`dec` decimal(24,0), value_dec decimal(24,0)) STORED AS ORC +PREHOOK: Output: default@t2_n9 +POSTHOOK: query: CREATE TABLE t2_n9(`dec` decimal(24,0), value_dec decimal(24,0)) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: INSERT INTO TABLE t2 select `dec`, cast(d as decimal(24,0)) from over1k +POSTHOOK: Output: default@t2_n9 +PREHOOK: query: INSERT INTO TABLE t2_n9 select `dec`, cast(d as decimal(24,0)) from over1k_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@over1k -PREHOOK: Output: default@t2 -POSTHOOK: query: INSERT INTO TABLE t2 select `dec`, cast(d as decimal(24,0)) from over1k +PREHOOK: Input: default@over1k_n2 +PREHOOK: Output: default@t2_n9 +POSTHOOK: query: INSERT INTO TABLE t2_n9 select `dec`, cast(d as decimal(24,0)) from over1k_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over1k -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t2.dec EXPRESSION [(over1k)over1k.FieldSchema(name:dec, type:decimal(20,2), comment:null), ] -POSTHOOK: Lineage: t2.value_dec EXPRESSION [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Input: default@over1k_n2 +POSTHOOK: Output: default@t2_n9 +POSTHOOK: Lineage: t2_n9.dec EXPRESSION [(over1k_n2)over1k_n2.FieldSchema(name:dec, type:decimal(20,2), comment:null), ] +POSTHOOK: Lineage: t2_n9.value_dec EXPRESSION [(over1k_n2)over1k_n2.FieldSchema(name:d, type:double, comment:null), ] PREHOOK: query: explain vectorization detail -select t1.`dec`, t2.`dec` from t1 join t2 on (t1.`dec`=t2.`dec`) +select t1_n16.`dec`, t2_n9.`dec` from t1_n16 join t2_n9 on (t1_n16.`dec`=t2_n9.`dec`) PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail -select t1.`dec`, t2.`dec` from t1 join t2 on (t1.`dec`=t2.`dec`) +select t1_n16.`dec`, t2_n9.`dec` from t1_n16 join t2_n9 on (t1_n16.`dec`=t2_n9.`dec`) POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -93,13 +93,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:t2 + $hdt$_1:t2_n9 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:t2 + $hdt$_1:t2_n9 TableScan - alias: t2 + alias: t2_n9 Statistics: Num rows: 1049 Data size: 234976 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: dec is not null (type: boolean) @@ -117,7 +117,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n16 Statistics: Num rows: 1049 Data size: 234976 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -188,15 +188,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select t1.`dec`, t2.`dec` from t1 join t2 on (t1.`dec`=t2.`dec`) +PREHOOK: query: select t1_n16.`dec`, t2_n9.`dec` from t1_n16 join t2_n9 on (t1_n16.`dec`=t2_n9.`dec`) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n16 +PREHOOK: Input: default@t2_n9 #### A masked pattern was here #### -POSTHOOK: query: select t1.`dec`, t2.`dec` from t1 join t2 on (t1.`dec`=t2.`dec`) +POSTHOOK: query: select t1_n16.`dec`, t2_n9.`dec` from t1_n16 join t2_n9 on (t1_n16.`dec`=t2_n9.`dec`) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n16 +POSTHOOK: Input: default@t2_n9 #### A masked pattern was here #### 14.00 14 14.00 14 @@ -305,10 +305,10 @@ POSTHOOK: Input: default@t2 9.00 9 9.00 9 PREHOOK: query: explain vectorization detail -select t1.`dec`, t1.value_dec, t2.`dec`, t2.value_dec from t1 join t2 on (t1.`dec`=t2.`dec`) +select t1_n16.`dec`, t1_n16.value_dec, t2_n9.`dec`, t2_n9.value_dec from t1_n16 join t2_n9 on (t1_n16.`dec`=t2_n9.`dec`) PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail -select t1.`dec`, t1.value_dec, t2.`dec`, t2.value_dec from t1 join t2 on (t1.`dec`=t2.`dec`) +select t1_n16.`dec`, t1_n16.value_dec, t2_n9.`dec`, t2_n9.value_dec from t1_n16 join t2_n9 on (t1_n16.`dec`=t2_n9.`dec`) POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -323,13 +323,13 @@ STAGE PLANS: Stage: Stage-4 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_1:t2 + $hdt$_1:t2_n9 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_1:t2 + $hdt$_1:t2_n9 TableScan - alias: t2 + alias: t2_n9 Statistics: Num rows: 1049 Data size: 234976 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: dec is not null (type: boolean) @@ -347,7 +347,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: t1 + alias: t1_n16 Statistics: Num rows: 1049 Data size: 234976 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -418,15 +418,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select t1.`dec`, t1.value_dec, t2.`dec`, t2.value_dec from t1 join t2 on (t1.`dec`=t2.`dec`) +PREHOOK: query: select t1_n16.`dec`, t1_n16.value_dec, t2_n9.`dec`, t2_n9.value_dec from t1_n16 join t2_n9 on (t1_n16.`dec`=t2_n9.`dec`) PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t1_n16 +PREHOOK: Input: default@t2_n9 #### A masked pattern was here #### -POSTHOOK: query: select t1.`dec`, t1.value_dec, t2.`dec`, t2.value_dec from t1 join t2 on (t1.`dec`=t2.`dec`) +POSTHOOK: query: select t1_n16.`dec`, t1_n16.value_dec, t2_n9.`dec`, t2_n9.value_dec from t1_n16 join t2_n9 on (t1_n16.`dec`=t2_n9.`dec`) POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t1_n16 +POSTHOOK: Input: default@t2_n9 #### A masked pattern was here #### 14.00 33.66 14 10 14.00 33.66 14 22 diff --git a/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out b/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out index 0b03610078..87596e7647 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: CREATE TABLE decimal_test STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc +PREHOOK: query: CREATE TABLE decimal_test_n0 STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default -PREHOOK: Output: default@decimal_test -POSTHOOK: query: CREATE TABLE decimal_test STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc +PREHOOK: Output: default@decimal_test_n0 +POSTHOOK: query: CREATE TABLE decimal_test_n0 STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default -POSTHOOK: Output: default@decimal_test -POSTHOOK: Lineage: decimal_test.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: decimal_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -PREHOOK: query: insert into decimal_test values (NULL, NULL, NULL, NULL) +POSTHOOK: Output: default@decimal_test_n0 +POSTHOOK: Lineage: decimal_test_n0.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: decimal_test_n0.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: decimal_test_n0.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: decimal_test_n0.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +PREHOOK: query: insert into decimal_test_n0 values (NULL, NULL, NULL, NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@decimal_test -POSTHOOK: query: insert into decimal_test values (NULL, NULL, NULL, NULL) +PREHOOK: Output: default@decimal_test_n0 +POSTHOOK: query: insert into decimal_test_n0 values (NULL, NULL, NULL, NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@decimal_test -POSTHOOK: Lineage: decimal_test.cbigint EXPRESSION [] -POSTHOOK: Lineage: decimal_test.cdecimal1 EXPRESSION [] -POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [] -POSTHOOK: Lineage: decimal_test.cdouble EXPRESSION [] +POSTHOOK: Output: default@decimal_test_n0 +POSTHOOK: Lineage: decimal_test_n0.cbigint EXPRESSION [] +POSTHOOK: Lineage: decimal_test_n0.cdecimal1 EXPRESSION [] +POSTHOOK: Lineage: decimal_test_n0.cdecimal2 EXPRESSION [] +POSTHOOK: Lineage: decimal_test_n0.cdouble EXPRESSION [] PREHOOK: query: explain vectorization detail select cdecimal1 @@ -55,7 +55,7 @@ select ,Sign(cdecimal1) -- Test nesting ,cos(-sin(log(cdecimal1)) + 3.14159) -from decimal_test +from decimal_test_n0 where cbigint % 500 = 0 @@ -92,7 +92,7 @@ select ,Sign(cdecimal1) -- Test nesting ,cos(-sin(log(cdecimal1)) + 3.14159) -from decimal_test +from decimal_test_n0 where cbigint % 500 = 0 @@ -111,7 +111,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: decimal_test + alias: decimal_test_n0 Statistics: Num rows: 12289 Data size: 2201752 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -195,13 +195,13 @@ PREHOOK: query: select ,Sign(cdecimal1) -- Test nesting ,cos(-sin(log(cdecimal1)) + 3.14159) -from decimal_test +from decimal_test_n0 where cbigint % 500 = 0 and sin(cdecimal1) >= -1.0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_test +PREHOOK: Input: default@decimal_test_n0 #### A masked pattern was here #### POSTHOOK: query: select cdecimal1 @@ -233,13 +233,13 @@ POSTHOOK: query: select ,Sign(cdecimal1) -- Test nesting ,cos(-sin(log(cdecimal1)) + 3.14159) -from decimal_test +from decimal_test_n0 where cbigint % 500 = 0 and sin(cdecimal1) >= -1.0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_test +POSTHOOK: Input: default@decimal_test_n0 #### A masked pattern was here #### -119.4594594595 -119.46 -119 -120 -119 1.316485E-52 NULL NULL NULL NULL NULL NULL NULL NULL 119.4594594595 -0.07885666683797002 NULL 0.9968859644388647 NULL -1.5624254815943668 -6844.522849943508 -2.0849608902209606 -119.4594594595 119.4594594595 -1 NULL 9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.4351351351 0.4540668481851705 NULL 0.8909676185918236 NULL 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 diff --git a/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out b/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out index 19505240ea..c1eddcabc4 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out @@ -2,9 +2,9 @@ PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_txt PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_txt POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_n0 POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE DECIMAL_UDF2_txt (key decimal(14,5), value int) ROW FORMAT DELIMITED @@ -28,43 +28,43 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DE POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@decimal_udf2_txt -PREHOOK: query: CREATE TABLE DECIMAL_UDF2 (key decimal(14,5), value int) +PREHOOK: query: CREATE TABLE DECIMAL_UDF2_n0 (key decimal(14,5), value int) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL_UDF2 -POSTHOOK: query: CREATE TABLE DECIMAL_UDF2 (key decimal(14,5), value int) +PREHOOK: Output: default@DECIMAL_UDF2_n0 +POSTHOOK: query: CREATE TABLE DECIMAL_UDF2_n0 (key decimal(14,5), value int) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL_UDF2 -PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_UDF2 SELECT * FROM DECIMAL_UDF2_txt +POSTHOOK: Output: default@DECIMAL_UDF2_n0 +PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_UDF2_n0 SELECT * FROM DECIMAL_UDF2_txt PREHOOK: type: QUERY PREHOOK: Input: default@decimal_udf2_txt -PREHOOK: Output: default@decimal_udf2 -POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_UDF2 SELECT * FROM DECIMAL_UDF2_txt +PREHOOK: Output: default@decimal_udf2_n0 +POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_UDF2_n0 SELECT * FROM DECIMAL_UDF2_txt POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf2_txt -POSTHOOK: Output: default@decimal_udf2 -POSTHOOK: Lineage: decimal_udf2.key SIMPLE [(decimal_udf2_txt)decimal_udf2_txt.FieldSchema(name:key, type:decimal(14,5), comment:null), ] -POSTHOOK: Lineage: decimal_udf2.value SIMPLE [(decimal_udf2_txt)decimal_udf2_txt.FieldSchema(name:value, type:int, comment:null), ] -PREHOOK: query: insert into DECIMAL_UDF2 values (NULL, NULL) +POSTHOOK: Output: default@decimal_udf2_n0 +POSTHOOK: Lineage: decimal_udf2_n0.key SIMPLE [(decimal_udf2_txt)decimal_udf2_txt.FieldSchema(name:key, type:decimal(14,5), comment:null), ] +POSTHOOK: Lineage: decimal_udf2_n0.value SIMPLE [(decimal_udf2_txt)decimal_udf2_txt.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: insert into DECIMAL_UDF2_n0 values (NULL, NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@decimal_udf2 -POSTHOOK: query: insert into DECIMAL_UDF2 values (NULL, NULL) +PREHOOK: Output: default@decimal_udf2_n0 +POSTHOOK: query: insert into DECIMAL_UDF2_n0 values (NULL, NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@decimal_udf2 -POSTHOOK: Lineage: decimal_udf2.key EXPRESSION [] -POSTHOOK: Lineage: decimal_udf2.value EXPRESSION [] +POSTHOOK: Output: default@decimal_udf2_n0 +POSTHOOK: Lineage: decimal_udf2_n0.key EXPRESSION [] +POSTHOOK: Lineage: decimal_udf2_n0.value EXPRESSION [] PREHOOK: query: EXPLAIN VECTORIZATION DETAIL SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) -FROM DECIMAL_UDF2 WHERE key = 10 +FROM DECIMAL_UDF2_n0 WHERE key = 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) -FROM DECIMAL_UDF2 WHERE key = 10 +FROM DECIMAL_UDF2_n0 WHERE key = 10 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -79,7 +79,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: decimal_udf2 + alias: decimal_udf2_n0 Statistics: Num rows: 39 Data size: 4072 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -134,27 +134,27 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) -FROM DECIMAL_UDF2 WHERE key = 10 +FROM DECIMAL_UDF2_n0 WHERE key = 10 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_udf2 +PREHOOK: Input: default@decimal_udf2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) -FROM DECIMAL_UDF2 WHERE key = 10 +FROM DECIMAL_UDF2_n0 WHERE key = 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_udf2 +POSTHOOK: Input: default@decimal_udf2_n0 #### A masked pattern was here #### NULL NULL 1.4711276743037347 -0.8390715290764524 -0.5440211108893698 0.6483608274590866 0.17453292519943295 PREHOOK: query: SELECT SUM(HASH(*)) FROM (SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) -FROM DECIMAL_UDF2) q +FROM DECIMAL_UDF2_n0) q PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_udf2 +PREHOOK: Input: default@decimal_udf2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(*)) FROM (SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) -FROM DECIMAL_UDF2) q +FROM DECIMAL_UDF2_n0) q POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_udf2 +POSTHOOK: Input: default@decimal_udf2_n0 #### A masked pattern was here #### -3806952922 PREHOOK: query: EXPLAIN VECTORIZATION DETAIL @@ -162,14 +162,14 @@ SELECT exp(key), ln(key), log(key), log(key, key), log(key, value), log(value, key), log10(key), sqrt(key) -FROM DECIMAL_UDF2 WHERE key = 10 +FROM DECIMAL_UDF2_n0 WHERE key = 10 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL SELECT exp(key), ln(key), log(key), log(key, key), log(key, value), log(value, key), log10(key), sqrt(key) -FROM DECIMAL_UDF2 WHERE key = 10 +FROM DECIMAL_UDF2_n0 WHERE key = 10 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -184,7 +184,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: decimal_udf2 + alias: decimal_udf2_n0 Statistics: Num rows: 39 Data size: 4072 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -242,17 +242,17 @@ PREHOOK: query: SELECT exp(key), ln(key), log(key), log(key, key), log(key, value), log(value, key), log10(key), sqrt(key) -FROM DECIMAL_UDF2 WHERE key = 10 +FROM DECIMAL_UDF2_n0 WHERE key = 10 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_udf2 +PREHOOK: Input: default@decimal_udf2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT exp(key), ln(key), log(key), log(key, key), log(key, value), log(value, key), log10(key), sqrt(key) -FROM DECIMAL_UDF2 WHERE key = 10 +FROM DECIMAL_UDF2_n0 WHERE key = 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_udf2 +POSTHOOK: Input: default@decimal_udf2_n0 #### A masked pattern was here #### 22026.465794806718 2.302585092994046 2.302585092994046 1.0 1.0 1.0 1.0 3.1622776601683795 PREHOOK: query: SELECT SUM(HASH(*)) @@ -260,18 +260,18 @@ FROM (SELECT exp(key), ln(key), log(key), log(key, key), log(key, value), log(value, key), log10(key), sqrt(key) -FROM DECIMAL_UDF2) q +FROM DECIMAL_UDF2_n0) q PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_udf2 +PREHOOK: Input: default@decimal_udf2_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(*)) FROM (SELECT exp(key), ln(key), log(key), log(key, key), log(key, value), log(value, key), log10(key), sqrt(key) -FROM DECIMAL_UDF2) q +FROM DECIMAL_UDF2_n0) q POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_udf2 +POSTHOOK: Input: default@decimal_udf2_n0 #### A masked pattern was here #### 1514360349 PREHOOK: query: EXPLAIN VECTORIZATION DETAIL @@ -498,11 +498,11 @@ POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_txt POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@decimal_udf2_txt POSTHOOK: Output: default@decimal_udf2_txt -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal_udf2 -PREHOOK: Output: default@decimal_udf2 -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +PREHOOK: Input: default@decimal_udf2_n0 +PREHOOK: Output: default@decimal_udf2_n0 +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal_udf2 -POSTHOOK: Output: default@decimal_udf2 +POSTHOOK: Input: default@decimal_udf2_n0 +POSTHOOK: Output: default@decimal_udf2_n0 diff --git a/ql/src/test/results/clientpositive/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/vector_distinct_2.q.out index c3d2d89c85..41c61ceadb 100644 --- a/ql/src/test/results/clientpositive/vector_distinct_2.q.out +++ b/ql/src/test/results/clientpositive/vector_distinct_2.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table vectortab2k( +PREHOOK: query: create table vectortab2k_n3( t tinyint, si smallint, i int, @@ -16,8 +16,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: create table vectortab2k( +PREHOOK: Output: default@vectortab2k_n3 +POSTHOOK: query: create table vectortab2k_n3( t tinyint, si smallint, i int, @@ -35,16 +35,16 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: Output: default@vectortab2k_n3 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n3 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: Output: default@vectortab2k_n3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n3 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: create table vectortab2korc( +POSTHOOK: Output: default@vectortab2k_n3 +PREHOOK: query: create table vectortab2korc_n3( t tinyint, si smallint, i int, @@ -61,8 +61,8 @@ PREHOOK: query: create table vectortab2korc( STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: create table vectortab2korc( +PREHOOK: Output: default@vectortab2korc_n3 +POSTHOOK: query: create table vectortab2korc_n3( t tinyint, si smallint, i int, @@ -79,33 +79,33 @@ POSTHOOK: query: create table vectortab2korc( STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2korc -PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: Output: default@vectortab2korc_n3 +PREHOOK: query: INSERT INTO TABLE vectortab2korc_n3 SELECT * FROM vectortab2k_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2k -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: Input: default@vectortab2k_n3 +PREHOOK: Output: default@vectortab2korc_n3 +POSTHOOK: query: INSERT INTO TABLE vectortab2korc_n3 SELECT * FROM vectortab2k_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2k -POSTHOOK: Output: default@vectortab2korc -POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +POSTHOOK: Input: default@vectortab2k_n3 +POSTHOOK: Output: default@vectortab2korc_n3 +POSTHOOK: Lineage: vectortab2korc_n3.b SIMPLE [(vectortab2k_n3)vectortab2k_n3.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n3.bo SIMPLE [(vectortab2k_n3)vectortab2k_n3.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n3.d SIMPLE [(vectortab2k_n3)vectortab2k_n3.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n3.dc SIMPLE [(vectortab2k_n3)vectortab2k_n3.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n3.dt SIMPLE [(vectortab2k_n3)vectortab2k_n3.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n3.f SIMPLE [(vectortab2k_n3)vectortab2k_n3.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n3.i SIMPLE [(vectortab2k_n3)vectortab2k_n3.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n3.s SIMPLE [(vectortab2k_n3)vectortab2k_n3.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n3.s2 SIMPLE [(vectortab2k_n3)vectortab2k_n3.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n3.si SIMPLE [(vectortab2k_n3)vectortab2k_n3.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n3.t SIMPLE [(vectortab2k_n3)vectortab2k_n3.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n3.ts SIMPLE [(vectortab2k_n3)vectortab2k_n3.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n3.ts2 SIMPLE [(vectortab2k_n3)vectortab2k_n3.FieldSchema(name:ts2, type:timestamp, comment:null), ] PREHOOK: query: explain vectorization expression -select distinct s, t from vectortab2korc +select distinct s, t from vectortab2korc_n3 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression -select distinct s, t from vectortab2korc +select distinct s, t from vectortab2korc_n3 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -120,7 +120,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: vectortab2korc + alias: vectortab2korc_n3 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -192,13 +192,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select distinct s, t from vectortab2korc +PREHOOK: query: select distinct s, t from vectortab2korc_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2korc +PREHOOK: Input: default@vectortab2korc_n3 #### A masked pattern was here #### -POSTHOOK: query: select distinct s, t from vectortab2korc +POSTHOOK: query: select distinct s, t from vectortab2korc_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2korc +POSTHOOK: Input: default@vectortab2korc_n3 #### A masked pattern was here #### -10 -104 diff --git a/ql/src/test/results/clientpositive/vector_groupby4.q.out b/ql/src/test/results/clientpositive/vector_groupby4.q.out index 443385b9f3..7621ce6c2f 100644 --- a/ql/src/test/results/clientpositive/vector_groupby4.q.out +++ b/ql/src/test/results/clientpositive/vector_groupby4.q.out @@ -1,30 +1,30 @@ -PREHOOK: query: CREATE TABLE srcorc STORED AS ORC AS SELECT * FROM src +PREHOOK: query: CREATE TABLE srcorc_n1 STORED AS ORC AS SELECT * FROM src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@srcorc -POSTHOOK: query: CREATE TABLE srcorc STORED AS ORC AS SELECT * FROM src +PREHOOK: Output: default@srcorc_n1 +POSTHOOK: query: CREATE TABLE srcorc_n1 STORED AS ORC AS SELECT * FROM src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcorc -POSTHOOK: Lineage: srcorc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcorc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS ORC +POSTHOOK: Output: default@srcorc_n1 +POSTHOOK: Lineage: srcorc_n1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcorc_n1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: CREATE TABLE dest1_n129(c1 STRING) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS ORC +PREHOOK: Output: default@dest1_n129 +POSTHOOK: query: CREATE TABLE dest1_n129(c1 STRING) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n129 PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION -FROM srcorc -INSERT OVERWRITE TABLE dest1 SELECT substr(srcorc.key,1,1) GROUP BY substr(srcorc.key,1,1) +FROM srcorc_n1 +INSERT OVERWRITE TABLE dest1_n129 SELECT substr(srcorc_n1.key,1,1) GROUP BY substr(srcorc_n1.key,1,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION -FROM srcorc -INSERT OVERWRITE TABLE dest1 SELECT substr(srcorc.key,1,1) GROUP BY substr(srcorc.key,1,1) +FROM srcorc_n1 +INSERT OVERWRITE TABLE dest1_n129 SELECT substr(srcorc_n1.key,1,1) GROUP BY substr(srcorc_n1.key,1,1) POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -43,7 +43,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: srcorc + alias: srcorc_n1 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -136,7 +136,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.dest1 + name: default.dest1_n129 Select Operator expressions: _col0 (type: string) outputColumnNames: c1 @@ -156,7 +156,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.dest1 + name: default.dest1_n129 Stage: Stage-3 Stats Work @@ -164,7 +164,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1 Column Types: string - Table: default.dest1 + Table: default.dest1_n129 Stage: Stage-4 Map Reduce @@ -252,24 +252,24 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: FROM srcorc -INSERT OVERWRITE TABLE dest1 SELECT substr(srcorc.key,1,1) GROUP BY substr(srcorc.key,1,1) +PREHOOK: query: FROM srcorc_n1 +INSERT OVERWRITE TABLE dest1_n129 SELECT substr(srcorc_n1.key,1,1) GROUP BY substr(srcorc_n1.key,1,1) PREHOOK: type: QUERY -PREHOOK: Input: default@srcorc -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM srcorc -INSERT OVERWRITE TABLE dest1 SELECT substr(srcorc.key,1,1) GROUP BY substr(srcorc.key,1,1) +PREHOOK: Input: default@srcorc_n1 +PREHOOK: Output: default@dest1_n129 +POSTHOOK: query: FROM srcorc_n1 +INSERT OVERWRITE TABLE dest1_n129 SELECT substr(srcorc_n1.key,1,1) GROUP BY substr(srcorc_n1.key,1,1) POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcorc -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(srcorc)srcorc.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Input: default@srcorc_n1 +POSTHOOK: Output: default@dest1_n129 +POSTHOOK: Lineage: dest1_n129.c1 EXPRESSION [(srcorc_n1)srcorc_n1.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT dest1_n129.* FROM dest1_n129 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n129 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n129.* FROM dest1_n129 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n129 #### A masked pattern was here #### 0 1 diff --git a/ql/src/test/results/clientpositive/vector_groupby6.q.out b/ql/src/test/results/clientpositive/vector_groupby6.q.out index af206f35a8..d2d1120a45 100644 --- a/ql/src/test/results/clientpositive/vector_groupby6.q.out +++ b/ql/src/test/results/clientpositive/vector_groupby6.q.out @@ -1,30 +1,30 @@ -PREHOOK: query: CREATE TABLE srcorc STORED AS ORC AS SELECT * FROM src +PREHOOK: query: CREATE TABLE srcorc_n0 STORED AS ORC AS SELECT * FROM src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@srcorc -POSTHOOK: query: CREATE TABLE srcorc STORED AS ORC AS SELECT * FROM src +PREHOOK: Output: default@srcorc_n0 +POSTHOOK: query: CREATE TABLE srcorc_n0 STORED AS ORC AS SELECT * FROM src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@srcorc -POSTHOOK: Lineage: srcorc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: srcorc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS ORC +POSTHOOK: Output: default@srcorc_n0 +POSTHOOK: Lineage: srcorc_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcorc_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: CREATE TABLE dest1_n63(c1 STRING) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS ORC +PREHOOK: Output: default@dest1_n63 +POSTHOOK: query: CREATE TABLE dest1_n63(c1 STRING) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest1_n63 PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION -FROM srcorc -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(srcorc.value,5,1) +FROM srcorc_n0 +INSERT OVERWRITE TABLE dest1_n63 SELECT DISTINCT substr(srcorc_n0.value,5,1) PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION -FROM srcorc -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(srcorc.value,5,1) +FROM srcorc_n0 +INSERT OVERWRITE TABLE dest1_n63 SELECT DISTINCT substr(srcorc_n0.value,5,1) POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -43,7 +43,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: srcorc + alias: srcorc_n0 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -136,7 +136,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.dest1 + name: default.dest1_n63 Select Operator expressions: _col0 (type: string) outputColumnNames: c1 @@ -156,7 +156,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.dest1 + name: default.dest1_n63 Stage: Stage-3 Stats Work @@ -164,7 +164,7 @@ STAGE PLANS: Column Stats Desc: Columns: c1 Column Types: string - Table: default.dest1 + Table: default.dest1_n63 Stage: Stage-4 Map Reduce @@ -252,24 +252,24 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -PREHOOK: query: FROM srcorc -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(srcorc.value,5,1) +PREHOOK: query: FROM srcorc_n0 +INSERT OVERWRITE TABLE dest1_n63 SELECT DISTINCT substr(srcorc_n0.value,5,1) PREHOOK: type: QUERY -PREHOOK: Input: default@srcorc -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM srcorc -INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(srcorc.value,5,1) +PREHOOK: Input: default@srcorc_n0 +PREHOOK: Output: default@dest1_n63 +POSTHOOK: query: FROM srcorc_n0 +INSERT OVERWRITE TABLE dest1_n63 SELECT DISTINCT substr(srcorc_n0.value,5,1) POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcorc -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.c1 EXPRESSION [(srcorc)srcorc.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Input: default@srcorc_n0 +POSTHOOK: Output: default@dest1_n63 +POSTHOOK: Lineage: dest1_n63.c1 EXPRESSION [(srcorc_n0)srcorc_n0.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT dest1_n63.* FROM dest1_n63 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n63 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n63.* FROM dest1_n63 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n63 #### A masked pattern was here #### 0 1 diff --git a/ql/src/test/results/clientpositive/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/vector_groupby_3.q.out index dfac04dd95..dbdbf4608d 100644 --- a/ql/src/test/results/clientpositive/vector_groupby_3.q.out +++ b/ql/src/test/results/clientpositive/vector_groupby_3.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table vectortab2k( +PREHOOK: query: create table vectortab2k_n9( t tinyint, si smallint, i int, @@ -16,8 +16,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: create table vectortab2k( +PREHOOK: Output: default@vectortab2k_n9 +POSTHOOK: query: create table vectortab2k_n9( t tinyint, si smallint, i int, @@ -35,16 +35,16 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: Output: default@vectortab2k_n9 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n9 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: Output: default@vectortab2k_n9 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n9 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: create table vectortab2korc( +POSTHOOK: Output: default@vectortab2k_n9 +PREHOOK: query: create table vectortab2korc_n8( t tinyint, si smallint, i int, @@ -61,8 +61,8 @@ PREHOOK: query: create table vectortab2korc( STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: create table vectortab2korc( +PREHOOK: Output: default@vectortab2korc_n8 +POSTHOOK: query: create table vectortab2korc_n8( t tinyint, si smallint, i int, @@ -79,33 +79,33 @@ POSTHOOK: query: create table vectortab2korc( STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2korc -PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: Output: default@vectortab2korc_n8 +PREHOOK: query: INSERT INTO TABLE vectortab2korc_n8 SELECT * FROM vectortab2k_n9 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2k -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: Input: default@vectortab2k_n9 +PREHOOK: Output: default@vectortab2korc_n8 +POSTHOOK: query: INSERT INTO TABLE vectortab2korc_n8 SELECT * FROM vectortab2k_n9 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2k -POSTHOOK: Output: default@vectortab2korc -POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +POSTHOOK: Input: default@vectortab2k_n9 +POSTHOOK: Output: default@vectortab2korc_n8 +POSTHOOK: Lineage: vectortab2korc_n8.b SIMPLE [(vectortab2k_n9)vectortab2k_n9.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n8.bo SIMPLE [(vectortab2k_n9)vectortab2k_n9.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n8.d SIMPLE [(vectortab2k_n9)vectortab2k_n9.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n8.dc SIMPLE [(vectortab2k_n9)vectortab2k_n9.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n8.dt SIMPLE [(vectortab2k_n9)vectortab2k_n9.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n8.f SIMPLE [(vectortab2k_n9)vectortab2k_n9.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n8.i SIMPLE [(vectortab2k_n9)vectortab2k_n9.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n8.s SIMPLE [(vectortab2k_n9)vectortab2k_n9.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n8.s2 SIMPLE [(vectortab2k_n9)vectortab2k_n9.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n8.si SIMPLE [(vectortab2k_n9)vectortab2k_n9.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n8.t SIMPLE [(vectortab2k_n9)vectortab2k_n9.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n8.ts SIMPLE [(vectortab2k_n9)vectortab2k_n9.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n8.ts2 SIMPLE [(vectortab2k_n9)vectortab2k_n9.FieldSchema(name:ts2, type:timestamp, comment:null), ] PREHOOK: query: explain vectorization expression -select s, t, max(b) from vectortab2korc group by s, t +select s, t, max(b) from vectortab2korc_n8 group by s, t PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression -select s, t, max(b) from vectortab2korc group by s, t +select s, t, max(b) from vectortab2korc_n8 group by s, t POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -120,7 +120,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: vectortab2korc + alias: vectortab2korc_n8 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -196,13 +196,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select s, t, max(b) from vectortab2korc group by s, t +PREHOOK: query: select s, t, max(b) from vectortab2korc_n8 group by s, t PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2korc +PREHOOK: Input: default@vectortab2korc_n8 #### A masked pattern was here #### -POSTHOOK: query: select s, t, max(b) from vectortab2korc group by s, t +POSTHOOK: query: select s, t, max(b) from vectortab2korc_n8 group by s, t POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2korc +POSTHOOK: Input: default@vectortab2korc_n8 #### A masked pattern was here #### -10 8991442360387584000 -104 8268875586442256384 diff --git a/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out b/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out index 8a6135eada..56f0de2785 100644 --- a/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out +++ b/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table store_sales_txt +PREHOOK: query: create table store_sales_txt_n0 ( ss_sold_date_sk int, ss_sold_time_sk int, @@ -28,8 +28,8 @@ row format delimited fields terminated by '|' stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@store_sales_txt -POSTHOOK: query: create table store_sales_txt +PREHOOK: Output: default@store_sales_txt_n0 +POSTHOOK: query: create table store_sales_txt_n0 ( ss_sold_date_sk int, ss_sold_time_sk int, @@ -59,16 +59,16 @@ row format delimited fields terminated by '|' stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@store_sales_txt -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/store_sales.txt' OVERWRITE INTO TABLE store_sales_txt +POSTHOOK: Output: default@store_sales_txt_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/store_sales.txt' OVERWRITE INTO TABLE store_sales_txt_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@store_sales_txt -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/store_sales.txt' OVERWRITE INTO TABLE store_sales_txt +PREHOOK: Output: default@store_sales_txt_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/store_sales.txt' OVERWRITE INTO TABLE store_sales_txt_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@store_sales_txt -PREHOOK: query: create table store_sales +POSTHOOK: Output: default@store_sales_txt_n0 +PREHOOK: query: create table store_sales_n3 ( ss_sold_date_sk int, ss_sold_time_sk int, @@ -99,8 +99,8 @@ stored as orc tblproperties ("orc.stripe.size"="33554432", "orc.compress.size"="16384") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@store_sales -POSTHOOK: query: create table store_sales +PREHOOK: Output: default@store_sales_n3 +POSTHOOK: query: create table store_sales_n3 ( ss_sold_date_sk int, ss_sold_time_sk int, @@ -131,8 +131,8 @@ stored as orc tblproperties ("orc.stripe.size"="33554432", "orc.compress.size"="16384") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@store_sales -PREHOOK: query: insert overwrite table store_sales +POSTHOOK: Output: default@store_sales_n3 +PREHOOK: query: insert overwrite table store_sales_n3 select ss_sold_date_sk , ss_sold_time_sk , @@ -158,11 +158,11 @@ ss_sold_date_sk , ss_net_paid , ss_net_paid_inc_tax , ss_net_profit - from store_sales_txt + from store_sales_txt_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@store_sales_txt -PREHOOK: Output: default@store_sales -POSTHOOK: query: insert overwrite table store_sales +PREHOOK: Input: default@store_sales_txt_n0 +PREHOOK: Output: default@store_sales_n3 +POSTHOOK: query: insert overwrite table store_sales_n3 select ss_sold_date_sk , ss_sold_time_sk , @@ -188,39 +188,39 @@ ss_sold_date_sk , ss_net_paid , ss_net_paid_inc_tax , ss_net_profit - from store_sales_txt + from store_sales_txt_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@store_sales_txt -POSTHOOK: Output: default@store_sales -POSTHOOK: Lineage: store_sales.ss_addr_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_addr_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_cdemo_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_cdemo_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_coupon_amt SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_coupon_amt, type:double, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_customer_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_customer_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_ext_discount_amt SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_discount_amt, type:double, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_ext_list_price SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_list_price, type:double, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_ext_sales_price SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_sales_price, type:double, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_ext_tax SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_tax, type:double, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_ext_wholesale_cost SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_wholesale_cost, type:double, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_hdemo_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_hdemo_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_item_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_item_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_list_price SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_list_price, type:double, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_net_paid SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_net_paid, type:double, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_net_paid_inc_tax SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_net_paid_inc_tax, type:double, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_net_profit SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_net_profit, type:double, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_promo_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_promo_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_quantity SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_quantity, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_sales_price SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_sales_price, type:double, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_sold_date_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_sold_date_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_sold_time_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_sold_time_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_store_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_store_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_ticket_number SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ticket_number, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_wholesale_cost SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_wholesale_cost, type:double, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_wholesale_cost_decimal EXPRESSION [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_wholesale_cost, type:double, comment:null), ] +POSTHOOK: Input: default@store_sales_txt_n0 +POSTHOOK: Output: default@store_sales_n3 +POSTHOOK: Lineage: store_sales_n3.ss_addr_sk SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_cdemo_sk SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_coupon_amt SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_coupon_amt, type:double, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_customer_sk SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_ext_discount_amt SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_ext_discount_amt, type:double, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_ext_list_price SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_ext_list_price, type:double, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_ext_sales_price SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_ext_sales_price, type:double, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_ext_tax SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_ext_tax, type:double, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_ext_wholesale_cost SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_ext_wholesale_cost, type:double, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_hdemo_sk SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_item_sk SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_list_price SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_list_price, type:double, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_net_paid SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_net_paid, type:double, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_net_paid_inc_tax SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_net_paid_inc_tax, type:double, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_net_profit SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_net_profit, type:double, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_promo_sk SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_quantity SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_sales_price SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_sales_price, type:double, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_sold_date_sk SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_sold_time_sk SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_store_sk SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_store_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_ticket_number SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_ticket_number, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_wholesale_cost SIMPLE [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_wholesale_cost, type:double, comment:null), ] +POSTHOOK: Lineage: store_sales_n3.ss_wholesale_cost_decimal EXPRESSION [(store_sales_txt_n0)store_sales_txt_n0.FieldSchema(name:ss_wholesale_cost, type:double, comment:null), ] PREHOOK: query: explain vectorization expression select ss_ticket_number from - store_sales + store_sales_n3 group by ss_ticket_number order by ss_ticket_number limit 20 @@ -229,7 +229,7 @@ POSTHOOK: query: explain vectorization expression select ss_ticket_number from - store_sales + store_sales_n3 group by ss_ticket_number order by ss_ticket_number limit 20 @@ -248,7 +248,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: store_sales + alias: store_sales_n3 Statistics: Num rows: 1000 Data size: 241204 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -365,22 +365,22 @@ STAGE PLANS: PREHOOK: query: select ss_ticket_number from - store_sales + store_sales_n3 group by ss_ticket_number order by ss_ticket_number limit 20 PREHOOK: type: QUERY -PREHOOK: Input: default@store_sales +PREHOOK: Input: default@store_sales_n3 #### A masked pattern was here #### POSTHOOK: query: select ss_ticket_number from - store_sales + store_sales_n3 group by ss_ticket_number order by ss_ticket_number limit 20 POSTHOOK: type: QUERY -POSTHOOK: Input: default@store_sales +POSTHOOK: Input: default@store_sales_n3 #### A masked pattern was here #### 1 2 @@ -409,7 +409,7 @@ from (select ss_ticket_number from - store_sales + store_sales_n3 group by ss_ticket_number) a group by ss_ticket_number order by m @@ -421,7 +421,7 @@ from (select ss_ticket_number from - store_sales + store_sales_n3 group by ss_ticket_number) a group by ss_ticket_number order by m @@ -440,7 +440,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: store_sales + alias: store_sales_n3 Statistics: Num rows: 1000 Data size: 241204 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -565,12 +565,12 @@ from (select ss_ticket_number from - store_sales + store_sales_n3 group by ss_ticket_number) a group by ss_ticket_number order by m PREHOOK: type: QUERY -PREHOOK: Input: default@store_sales +PREHOOK: Input: default@store_sales_n3 #### A masked pattern was here #### POSTHOOK: query: select min(ss_ticket_number) m @@ -578,12 +578,12 @@ from (select ss_ticket_number from - store_sales + store_sales_n3 group by ss_ticket_number) a group by ss_ticket_number order by m POSTHOOK: type: QUERY -POSTHOOK: Input: default@store_sales +POSTHOOK: Input: default@store_sales_n3 #### A masked pattern was here #### 1 2 @@ -674,7 +674,7 @@ from (select ss_ticket_number, ss_item_sk, min(ss_quantity) q, max(ss_net_profit) np, max(ss_wholesale_cost_decimal) decwc from - store_sales + store_sales_n3 where ss_ticket_number = 1 group by ss_ticket_number, ss_item_sk) a group by ss_ticket_number @@ -687,7 +687,7 @@ from (select ss_ticket_number, ss_item_sk, min(ss_quantity) q, max(ss_net_profit) np, max(ss_wholesale_cost_decimal) decwc from - store_sales + store_sales_n3 where ss_ticket_number = 1 group by ss_ticket_number, ss_item_sk) a group by ss_ticket_number @@ -707,7 +707,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: store_sales + alias: store_sales_n3 Statistics: Num rows: 1000 Data size: 241204 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -851,13 +851,13 @@ from (select ss_ticket_number, ss_item_sk, min(ss_quantity) q, max(ss_net_profit) np, max(ss_wholesale_cost_decimal) decwc from - store_sales + store_sales_n3 where ss_ticket_number = 1 group by ss_ticket_number, ss_item_sk) a group by ss_ticket_number order by ss_ticket_number PREHOOK: type: QUERY -PREHOOK: Input: default@store_sales +PREHOOK: Input: default@store_sales_n3 #### A masked pattern was here #### POSTHOOK: query: select ss_ticket_number, sum(ss_item_sk), sum(q), avg(q), sum(np), avg(np), sum(decwc), avg(decwc) @@ -865,13 +865,13 @@ from (select ss_ticket_number, ss_item_sk, min(ss_quantity) q, max(ss_net_profit) np, max(ss_wholesale_cost_decimal) decwc from - store_sales + store_sales_n3 where ss_ticket_number = 1 group by ss_ticket_number, ss_item_sk) a group by ss_ticket_number order by ss_ticket_number POSTHOOK: type: QUERY -POSTHOOK: Input: default@store_sales +POSTHOOK: Input: default@store_sales_n3 #### A masked pattern was here #### 1 85411 816 58.285714285714285 -5080.17 -362.8692857142857 621.350000000000000000 44.382142857142857143 PREHOOK: query: explain vectorization expression @@ -881,7 +881,7 @@ from (select ss_ticket_number, ss_item_sk, min(ss_quantity) q, max(ss_net_profit) np, max(ss_wholesale_cost_decimal) decwc from - store_sales + store_sales_n3 group by ss_ticket_number, ss_item_sk) a group by ss_ticket_number, ss_item_sk order by ss_ticket_number, ss_item_sk @@ -893,7 +893,7 @@ from (select ss_ticket_number, ss_item_sk, min(ss_quantity) q, max(ss_net_profit) np, max(ss_wholesale_cost_decimal) decwc from - store_sales + store_sales_n3 group by ss_ticket_number, ss_item_sk) a group by ss_ticket_number, ss_item_sk order by ss_ticket_number, ss_item_sk @@ -912,7 +912,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: store_sales + alias: store_sales_n3 Statistics: Num rows: 1000 Data size: 241204 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -1046,12 +1046,12 @@ from (select ss_ticket_number, ss_item_sk, min(ss_quantity) q, max(ss_wholesale_cost) wc, max(ss_wholesale_cost_decimal) decwc from - store_sales + store_sales_n3 group by ss_ticket_number, ss_item_sk) a group by ss_ticket_number, ss_item_sk order by ss_ticket_number, ss_item_sk PREHOOK: type: QUERY -PREHOOK: Input: default@store_sales +PREHOOK: Input: default@store_sales_n3 #### A masked pattern was here #### POSTHOOK: query: select ss_ticket_number, ss_item_sk, sum(q), avg(q), sum(wc), avg(wc), sum(decwc), avg(decwc) @@ -1059,12 +1059,12 @@ from (select ss_ticket_number, ss_item_sk, min(ss_quantity) q, max(ss_wholesale_cost) wc, max(ss_wholesale_cost_decimal) decwc from - store_sales + store_sales_n3 group by ss_ticket_number, ss_item_sk) a group by ss_ticket_number, ss_item_sk order by ss_ticket_number, ss_item_sk POSTHOOK: type: QUERY -POSTHOOK: Input: default@store_sales +POSTHOOK: Input: default@store_sales_n3 #### A masked pattern was here #### 1 49 5 5.0 10.68 10.68 10.680000000000000000 10.680000000000000000 1 173 65 65.0 27.16 27.16 27.160000000000000000 27.160000000000000000 diff --git a/ql/src/test/results/clientpositive/vector_grouping_sets.q.out b/ql/src/test/results/clientpositive/vector_grouping_sets.q.out index e89b6bca19..cf0ec94c4e 100644 --- a/ql/src/test/results/clientpositive/vector_grouping_sets.q.out +++ b/ql/src/test/results/clientpositive/vector_grouping_sets.q.out @@ -80,57 +80,57 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/store_200' OVERWRITE I POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@store_txt -PREHOOK: query: create table store +PREHOOK: query: create table store_n1 stored as orc as select * from store_txt PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@store_txt PREHOOK: Output: database:default -PREHOOK: Output: default@store -POSTHOOK: query: create table store +PREHOOK: Output: default@store_n1 +POSTHOOK: query: create table store_n1 stored as orc as select * from store_txt POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@store_txt POSTHOOK: Output: database:default -POSTHOOK: Output: default@store -POSTHOOK: Lineage: store.s_city SIMPLE [(store_txt)store_txt.FieldSchema(name:s_city, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_closed_date_sk SIMPLE [(store_txt)store_txt.FieldSchema(name:s_closed_date_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store.s_company_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_company_id, type:int, comment:null), ] -POSTHOOK: Lineage: store.s_company_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_company_name, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_country SIMPLE [(store_txt)store_txt.FieldSchema(name:s_country, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_county SIMPLE [(store_txt)store_txt.FieldSchema(name:s_county, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_division_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_division_id, type:int, comment:null), ] -POSTHOOK: Lineage: store.s_division_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_division_name, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_floor_space SIMPLE [(store_txt)store_txt.FieldSchema(name:s_floor_space, type:int, comment:null), ] -POSTHOOK: Lineage: store.s_geography_class SIMPLE [(store_txt)store_txt.FieldSchema(name:s_geography_class, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_gmt_offset SIMPLE [(store_txt)store_txt.FieldSchema(name:s_gmt_offset, type:decimal(5,2), comment:null), ] -POSTHOOK: Lineage: store.s_hours SIMPLE [(store_txt)store_txt.FieldSchema(name:s_hours, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_manager SIMPLE [(store_txt)store_txt.FieldSchema(name:s_manager, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_market_desc SIMPLE [(store_txt)store_txt.FieldSchema(name:s_market_desc, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_market_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_market_id, type:int, comment:null), ] -POSTHOOK: Lineage: store.s_market_manager SIMPLE [(store_txt)store_txt.FieldSchema(name:s_market_manager, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_number_employees SIMPLE [(store_txt)store_txt.FieldSchema(name:s_number_employees, type:int, comment:null), ] -POSTHOOK: Lineage: store.s_rec_end_date SIMPLE [(store_txt)store_txt.FieldSchema(name:s_rec_end_date, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_rec_start_date SIMPLE [(store_txt)store_txt.FieldSchema(name:s_rec_start_date, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_state SIMPLE [(store_txt)store_txt.FieldSchema(name:s_state, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_store_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_store_id, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_store_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_store_name, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_store_sk SIMPLE [(store_txt)store_txt.FieldSchema(name:s_store_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store.s_street_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_street_name, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_street_number SIMPLE [(store_txt)store_txt.FieldSchema(name:s_street_number, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_street_type SIMPLE [(store_txt)store_txt.FieldSchema(name:s_street_type, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_suite_number SIMPLE [(store_txt)store_txt.FieldSchema(name:s_suite_number, type:string, comment:null), ] -POSTHOOK: Lineage: store.s_tax_precentage SIMPLE [(store_txt)store_txt.FieldSchema(name:s_tax_precentage, type:decimal(5,2), comment:null), ] -POSTHOOK: Lineage: store.s_zip SIMPLE [(store_txt)store_txt.FieldSchema(name:s_zip, type:string, comment:null), ] +POSTHOOK: Output: default@store_n1 +POSTHOOK: Lineage: store_n1.s_city SIMPLE [(store_txt)store_txt.FieldSchema(name:s_city, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_closed_date_sk SIMPLE [(store_txt)store_txt.FieldSchema(name:s_closed_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_n1.s_company_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_company_id, type:int, comment:null), ] +POSTHOOK: Lineage: store_n1.s_company_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_company_name, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_country SIMPLE [(store_txt)store_txt.FieldSchema(name:s_country, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_county SIMPLE [(store_txt)store_txt.FieldSchema(name:s_county, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_division_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_division_id, type:int, comment:null), ] +POSTHOOK: Lineage: store_n1.s_division_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_division_name, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_floor_space SIMPLE [(store_txt)store_txt.FieldSchema(name:s_floor_space, type:int, comment:null), ] +POSTHOOK: Lineage: store_n1.s_geography_class SIMPLE [(store_txt)store_txt.FieldSchema(name:s_geography_class, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_gmt_offset SIMPLE [(store_txt)store_txt.FieldSchema(name:s_gmt_offset, type:decimal(5,2), comment:null), ] +POSTHOOK: Lineage: store_n1.s_hours SIMPLE [(store_txt)store_txt.FieldSchema(name:s_hours, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_manager SIMPLE [(store_txt)store_txt.FieldSchema(name:s_manager, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_market_desc SIMPLE [(store_txt)store_txt.FieldSchema(name:s_market_desc, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_market_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_market_id, type:int, comment:null), ] +POSTHOOK: Lineage: store_n1.s_market_manager SIMPLE [(store_txt)store_txt.FieldSchema(name:s_market_manager, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_number_employees SIMPLE [(store_txt)store_txt.FieldSchema(name:s_number_employees, type:int, comment:null), ] +POSTHOOK: Lineage: store_n1.s_rec_end_date SIMPLE [(store_txt)store_txt.FieldSchema(name:s_rec_end_date, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_rec_start_date SIMPLE [(store_txt)store_txt.FieldSchema(name:s_rec_start_date, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_state SIMPLE [(store_txt)store_txt.FieldSchema(name:s_state, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_store_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_store_id, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_store_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_store_name, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_store_sk SIMPLE [(store_txt)store_txt.FieldSchema(name:s_store_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_n1.s_street_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_street_name, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_street_number SIMPLE [(store_txt)store_txt.FieldSchema(name:s_street_number, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_street_type SIMPLE [(store_txt)store_txt.FieldSchema(name:s_street_type, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_suite_number SIMPLE [(store_txt)store_txt.FieldSchema(name:s_suite_number, type:string, comment:null), ] +POSTHOOK: Lineage: store_n1.s_tax_precentage SIMPLE [(store_txt)store_txt.FieldSchema(name:s_tax_precentage, type:decimal(5,2), comment:null), ] +POSTHOOK: Lineage: store_n1.s_zip SIMPLE [(store_txt)store_txt.FieldSchema(name:s_zip, type:string, comment:null), ] PREHOOK: query: explain vectorization expression select s_store_id - from store + from store_n1 group by s_store_id with rollup PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression select s_store_id - from store + from store_n1 group by s_store_id with rollup POSTHOOK: type: QUERY PLAN VECTORIZATION: @@ -146,7 +146,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: store + alias: store_n1 Statistics: Num rows: 12 Data size: 25632 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -216,16 +216,16 @@ STAGE PLANS: ListSink PREHOOK: query: select s_store_id - from store + from store_n1 group by s_store_id with rollup PREHOOK: type: QUERY -PREHOOK: Input: default@store +PREHOOK: Input: default@store_n1 #### A masked pattern was here #### POSTHOOK: query: select s_store_id - from store + from store_n1 group by s_store_id with rollup POSTHOOK: type: QUERY -POSTHOOK: Input: default@store +POSTHOOK: Input: default@store_n1 #### A masked pattern was here #### NULL AAAAAAAABAAAAAAA @@ -236,12 +236,12 @@ AAAAAAAAIAAAAAAA AAAAAAAAKAAAAAAA PREHOOK: query: explain vectorization expression select s_store_id, GROUPING__ID - from store + from store_n1 group by s_store_id with rollup PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression select s_store_id, GROUPING__ID - from store + from store_n1 group by s_store_id with rollup POSTHOOK: type: QUERY PLAN VECTORIZATION: @@ -257,7 +257,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: store + alias: store_n1 Statistics: Num rows: 12 Data size: 25632 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -330,16 +330,16 @@ STAGE PLANS: ListSink PREHOOK: query: select s_store_id, GROUPING__ID - from store + from store_n1 group by s_store_id with rollup PREHOOK: type: QUERY -PREHOOK: Input: default@store +PREHOOK: Input: default@store_n1 #### A masked pattern was here #### POSTHOOK: query: select s_store_id, GROUPING__ID - from store + from store_n1 group by s_store_id with rollup POSTHOOK: type: QUERY -POSTHOOK: Input: default@store +POSTHOOK: Input: default@store_n1 #### A masked pattern was here #### NULL 1 AAAAAAAABAAAAAAA 0 @@ -350,12 +350,12 @@ AAAAAAAAIAAAAAAA 0 AAAAAAAAKAAAAAAA 0 PREHOOK: query: explain select s_store_id, GROUPING__ID - from store + from store_n1 group by rollup(s_store_id) PREHOOK: type: QUERY POSTHOOK: query: explain select s_store_id, GROUPING__ID - from store + from store_n1 group by rollup(s_store_id) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -367,7 +367,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: store + alias: store_n1 Statistics: Num rows: 12 Data size: 25632 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: s_store_id (type: string) diff --git a/ql/src/test/results/clientpositive/vector_include_no_sel.q.out b/ql/src/test/results/clientpositive/vector_include_no_sel.q.out index 848823fca3..dae2d63040 100644 --- a/ql/src/test/results/clientpositive/vector_include_no_sel.q.out +++ b/ql/src/test/results/clientpositive/vector_include_no_sel.q.out @@ -68,39 +68,39 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/store_sales.txt' OVERW POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@store_sales_txt -PREHOOK: query: create table store_sales stored as orc as select * from store_sales_txt +PREHOOK: query: create table store_sales_n1 stored as orc as select * from store_sales_txt PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@store_sales_txt PREHOOK: Output: database:default -PREHOOK: Output: default@store_sales -POSTHOOK: query: create table store_sales stored as orc as select * from store_sales_txt +PREHOOK: Output: default@store_sales_n1 +POSTHOOK: query: create table store_sales_n1 stored as orc as select * from store_sales_txt POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@store_sales_txt POSTHOOK: Output: database:default -POSTHOOK: Output: default@store_sales -POSTHOOK: Lineage: store_sales.ss_addr_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_addr_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_cdemo_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_cdemo_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_coupon_amt SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_coupon_amt, type:float, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_customer_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_customer_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_ext_discount_amt SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_discount_amt, type:float, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_ext_list_price SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_list_price, type:float, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_ext_sales_price SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_sales_price, type:float, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_ext_tax SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_tax, type:float, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_ext_wholesale_cost SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_wholesale_cost, type:float, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_hdemo_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_hdemo_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_item_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_item_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_list_price SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_list_price, type:float, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_net_paid SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_net_paid, type:float, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_net_paid_inc_tax SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_net_profit SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_net_profit, type:float, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_promo_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_promo_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_quantity SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_quantity, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_sales_price SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_sales_price, type:float, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_sold_date_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_sold_date_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_sold_time_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_sold_time_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_store_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_store_sk, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_ticket_number SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ticket_number, type:int, comment:null), ] -POSTHOOK: Lineage: store_sales.ss_wholesale_cost SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_wholesale_cost, type:float, comment:null), ] +POSTHOOK: Output: default@store_sales_n1 +POSTHOOK: Lineage: store_sales_n1.ss_addr_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_cdemo_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_coupon_amt SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_coupon_amt, type:float, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_customer_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_ext_discount_amt SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_discount_amt, type:float, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_ext_list_price SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_list_price, type:float, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_ext_sales_price SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_sales_price, type:float, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_ext_tax SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_tax, type:float, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_ext_wholesale_cost SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ext_wholesale_cost, type:float, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_hdemo_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_item_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_list_price SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_list_price, type:float, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_net_paid SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_net_paid, type:float, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_net_paid_inc_tax SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_net_profit SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_promo_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_quantity SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_sales_price SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_sales_price, type:float, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_sold_date_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_sold_time_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_store_sk SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_store_sk, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_ticket_number SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_ticket_number, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales_n1.ss_wholesale_cost SIMPLE [(store_sales_txt)store_sales_txt.FieldSchema(name:ss_wholesale_cost, type:float, comment:null), ] PREHOOK: query: create table customer_demographics_txt ( cd_demo_sk int, @@ -162,16 +162,16 @@ POSTHOOK: Lineage: customer_demographics.cd_education_status SIMPLE [(customer_d POSTHOOK: Lineage: customer_demographics.cd_gender SIMPLE [(customer_demographics_txt)customer_demographics_txt.FieldSchema(name:cd_gender, type:string, comment:null), ] POSTHOOK: Lineage: customer_demographics.cd_marital_status SIMPLE [(customer_demographics_txt)customer_demographics_txt.FieldSchema(name:cd_marital_status, type:string, comment:null), ] POSTHOOK: Lineage: customer_demographics.cd_purchase_estimate SIMPLE [(customer_demographics_txt)customer_demographics_txt.FieldSchema(name:cd_purchase_estimate, type:int, comment:null), ] -Warning: Map Join MAPJOIN[15][bigTable=store_sales] in task 'Stage-2:MAPRED' is a cross product +Warning: Map Join MAPJOIN[15][bigTable=store_sales_n1] in task 'Stage-2:MAPRED' is a cross product PREHOOK: query: explain vectorization expression -select count(1) from customer_demographics,store_sales -where ((customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or - (customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U')) +select count(1) from customer_demographics,store_sales_n1 +where ((customer_demographics.cd_demo_sk = store_sales_n1.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or + (customer_demographics.cd_demo_sk = store_sales_n1.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U')) PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression -select count(1) from customer_demographics,store_sales -where ((customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or - (customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U')) +select count(1) from customer_demographics,store_sales_n1 +where ((customer_demographics.cd_demo_sk = store_sales_n1.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or + (customer_demographics.cd_demo_sk = store_sales_n1.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U')) POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -203,7 +203,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: store_sales + alias: store_sales_n1 Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -291,19 +291,19 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Map Join MAPJOIN[15][bigTable=store_sales] in task 'Stage-2:MAPRED' is a cross product -PREHOOK: query: select count(1) from customer_demographics,store_sales -where ((customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or - (customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U')) +Warning: Map Join MAPJOIN[15][bigTable=store_sales_n1] in task 'Stage-2:MAPRED' is a cross product +PREHOOK: query: select count(1) from customer_demographics,store_sales_n1 +where ((customer_demographics.cd_demo_sk = store_sales_n1.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or + (customer_demographics.cd_demo_sk = store_sales_n1.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U')) PREHOOK: type: QUERY PREHOOK: Input: default@customer_demographics -PREHOOK: Input: default@store_sales +PREHOOK: Input: default@store_sales_n1 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from customer_demographics,store_sales -where ((customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or - (customer_demographics.cd_demo_sk = store_sales.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U')) +POSTHOOK: query: select count(1) from customer_demographics,store_sales_n1 +where ((customer_demographics.cd_demo_sk = store_sales_n1.ss_cdemo_sk and customer_demographics.cd_marital_status = 'M') or + (customer_demographics.cd_demo_sk = store_sales_n1.ss_cdemo_sk and customer_demographics.cd_marital_status = 'U')) POSTHOOK: type: QUERY POSTHOOK: Input: default@customer_demographics -POSTHOOK: Input: default@store_sales +POSTHOOK: Input: default@store_sales_n1 #### A masked pattern was here #### 0 diff --git a/ql/src/test/results/clientpositive/vector_map_order.q.out b/ql/src/test/results/clientpositive/vector_map_order.q.out index 6fe6943c5f..bfe4b93dd3 100644 --- a/ql/src/test/results/clientpositive/vector_map_order.q.out +++ b/ql/src/test/results/clientpositive/vector_map_order.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table map_table (foo STRING , bar MAP) +PREHOOK: query: create table map_table_n0 (foo STRING , bar MAP) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ',' @@ -6,8 +6,8 @@ MAP KEYS TERMINATED BY ':' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@map_table -POSTHOOK: query: create table map_table (foo STRING , bar MAP) +PREHOOK: Output: default@map_table_n0 +POSTHOOK: query: create table map_table_n0 (foo STRING , bar MAP) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ',' @@ -15,20 +15,20 @@ MAP KEYS TERMINATED BY ':' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@map_table -PREHOOK: query: load data local inpath "../../data/files/map_table.txt" overwrite into table map_table +POSTHOOK: Output: default@map_table_n0 +PREHOOK: query: load data local inpath "../../data/files/map_table.txt" overwrite into table map_table_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@map_table -POSTHOOK: query: load data local inpath "../../data/files/map_table.txt" overwrite into table map_table +PREHOOK: Output: default@map_table_n0 +POSTHOOK: query: load data local inpath "../../data/files/map_table.txt" overwrite into table map_table_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@map_table +POSTHOOK: Output: default@map_table_n0 PREHOOK: query: explain vectorization detail -select * from map_table +select * from map_table_n0 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail -select * from map_table +select * from map_table_n0 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -43,7 +43,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: map_table + alias: map_table_n0 Statistics: Num rows: 1 Data size: 520 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -89,13 +89,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from map_table +PREHOOK: query: select * from map_table_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@map_table +PREHOOK: Input: default@map_table_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from map_table +POSTHOOK: query: select * from map_table_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@map_table +POSTHOOK: Input: default@map_table_n0 #### A masked pattern was here #### foo1 {"k1":"v1","k2":"v2","k3":"v3"} foo2 {"k21":"v21","k22":"v22","k31":"v31"} diff --git a/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out b/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out index 634acb553d..74d10fd5d9 100644 --- a/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out +++ b/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table date_dim +PREHOOK: query: create table date_dim_n0 ( d_date_sk int, d_date_id string, @@ -32,8 +32,8 @@ PREHOOK: query: create table date_dim stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@date_dim -POSTHOOK: query: create table date_dim +PREHOOK: Output: default@date_dim_n0 +POSTHOOK: query: create table date_dim_n0 ( d_date_sk int, d_date_id string, @@ -67,8 +67,8 @@ POSTHOOK: query: create table date_dim stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@date_dim -PREHOOK: query: create table store_sales +POSTHOOK: Output: default@date_dim_n0 +PREHOOK: query: create table store_sales_n2 ( ss_sold_date_sk int, ss_sold_time_sk int, @@ -101,8 +101,8 @@ stored as orc tblproperties ("orc.stripe.size"="33554432", "orc.compress.size"="16384") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@store_sales -POSTHOOK: query: create table store_sales +PREHOOK: Output: default@store_sales_n2 +POSTHOOK: query: create table store_sales_n2 ( ss_sold_date_sk int, ss_sold_time_sk int, @@ -135,8 +135,8 @@ stored as orc tblproperties ("orc.stripe.size"="33554432", "orc.compress.size"="16384") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@store_sales -PREHOOK: query: create table store +POSTHOOK: Output: default@store_sales_n2 +PREHOOK: query: create table store_n2 ( s_store_sk int, s_store_id string, @@ -171,8 +171,8 @@ PREHOOK: query: create table store stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@store -POSTHOOK: query: create table store +PREHOOK: Output: default@store_n2 +POSTHOOK: query: create table store_n2 ( s_store_sk int, s_store_id string, @@ -207,27 +207,27 @@ POSTHOOK: query: create table store stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@store +POSTHOOK: Output: default@store_n2 PREHOOK: query: explain vectorization select s_state, count(1) - from store_sales, - store, - date_dim - where store_sales.ss_sold_date_sk = date_dim.d_date_sk and - store_sales.ss_store_sk = store.s_store_sk and - store.s_state in ('KS','AL', 'MN', 'AL', 'SC', 'VT') + from store_sales_n2, + store_n2, + date_dim_n0 + where store_sales_n2.ss_sold_date_sk = date_dim_n0.d_date_sk and + store_sales_n2.ss_store_sk = store_n2.s_store_sk and + store_n2.s_state in ('KS','AL', 'MN', 'AL', 'SC', 'VT') group by s_state order by s_state limit 100 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization select s_state, count(1) - from store_sales, - store, - date_dim - where store_sales.ss_sold_date_sk = date_dim.d_date_sk and - store_sales.ss_store_sk = store.s_store_sk and - store.s_state in ('KS','AL', 'MN', 'AL', 'SC', 'VT') + from store_sales_n2, + store_n2, + date_dim_n0 + where store_sales_n2.ss_sold_date_sk = date_dim_n0.d_date_sk and + store_sales_n2.ss_store_sk = store_n2.s_store_sk and + store_n2.s_state in ('KS','AL', 'MN', 'AL', 'SC', 'VT') group by s_state order by s_state limit 100 @@ -248,7 +248,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: store_sales + alias: store_sales_n2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (ss_sold_date_sk is not null and ss_store_sk is not null) (type: boolean) @@ -264,7 +264,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: int) TableScan - alias: date_dim + alias: date_dim_n0 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: d_date_sk is not null (type: boolean) @@ -311,7 +311,7 @@ STAGE PLANS: Map-reduce partition columns: _col1 (type: int) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE TableScan - alias: store + alias: store_n2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: ((s_state) IN ('KS', 'AL', 'MN', 'SC', 'VT') and s_store_sk is not null) (type: boolean) diff --git a/ql/src/test/results/clientpositive/vector_null_projection.q.out b/ql/src/test/results/clientpositive/vector_null_projection.q.out index f0f444f83f..1c0249f805 100644 --- a/ql/src/test/results/clientpositive/vector_null_projection.q.out +++ b/ql/src/test/results/clientpositive/vector_null_projection.q.out @@ -1,42 +1,42 @@ -PREHOOK: query: create table a(s string) stored as orc +PREHOOK: query: create table a_n5(s string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@a -POSTHOOK: query: create table a(s string) stored as orc +PREHOOK: Output: default@a_n5 +POSTHOOK: query: create table a_n5(s string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@a -PREHOOK: query: create table b(s string) stored as orc +POSTHOOK: Output: default@a_n5 +PREHOOK: query: create table b_n3(s string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@b -POSTHOOK: query: create table b(s string) stored as orc +PREHOOK: Output: default@b_n3 +POSTHOOK: query: create table b_n3(s string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@b -PREHOOK: query: insert into table a values('aaa') +POSTHOOK: Output: default@b_n3 +PREHOOK: query: insert into table a_n5 values('aaa_n5') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@a -POSTHOOK: query: insert into table a values('aaa') +PREHOOK: Output: default@a_n5 +POSTHOOK: query: insert into table a_n5 values('aaa_n5') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@a -POSTHOOK: Lineage: a.s SCRIPT [] -PREHOOK: query: insert into table b values('aaa') +POSTHOOK: Output: default@a_n5 +POSTHOOK: Lineage: a_n5.s SCRIPT [] +PREHOOK: query: insert into table b_n3 values('aaa_n5') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@b -POSTHOOK: query: insert into table b values('aaa') +PREHOOK: Output: default@b_n3 +POSTHOOK: query: insert into table b_n3 values('aaa_n5') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@b -POSTHOOK: Lineage: b.s SCRIPT [] +POSTHOOK: Output: default@b_n3 +POSTHOOK: Lineage: b_n3.s SCRIPT [] PREHOOK: query: explain vectorization detail -select NULL from a +select NULL from a_n5 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail -select NULL from a +select NULL from a_n5 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -51,8 +51,8 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a - Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE + alias: a_n5 + Statistics: Num rows: 1 Data size: 90 Basic stats: COMPLETE Column stats: COMPLETE TableScan Vectorization: native: true vectorizationSchemaColumns: [0:s:string, 1:ROW__ID:struct] @@ -98,20 +98,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select NULL from a +PREHOOK: query: select NULL from a_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@a +PREHOOK: Input: default@a_n5 #### A masked pattern was here #### -POSTHOOK: query: select NULL from a +POSTHOOK: query: select NULL from a_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a +POSTHOOK: Input: default@a_n5 #### A masked pattern was here #### NULL PREHOOK: query: explain vectorization expression -select NULL as x from a union distinct select NULL as x from b +select NULL as x from a_n5 union distinct select NULL as x from b_n3 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression -select NULL as x from a union distinct select NULL as x from b +select NULL as x from a_n5 union distinct select NULL as x from b_n3 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -126,8 +126,8 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: a - Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE + alias: a_n5 + Statistics: Num rows: 1 Data size: 90 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Union @@ -145,8 +145,8 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: void) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE TableScan - alias: b - Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE + alias: b_n3 + Statistics: Num rows: 1 Data size: 90 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Union @@ -194,14 +194,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select NULL as x from a union distinct select NULL as x from b +PREHOOK: query: select NULL as x from a_n5 union distinct select NULL as x from b_n3 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n5 +PREHOOK: Input: default@b_n3 #### A masked pattern was here #### -POSTHOOK: query: select NULL as x from a union distinct select NULL as x from b +POSTHOOK: query: select NULL as x from a_n5 union distinct select NULL as x from b_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n5 +POSTHOOK: Input: default@b_n3 #### A masked pattern was here #### NULL diff --git a/ql/src/test/results/clientpositive/vector_orderby_5.q.out b/ql/src/test/results/clientpositive/vector_orderby_5.q.out index 793d99e602..324bdd099d 100644 --- a/ql/src/test/results/clientpositive/vector_orderby_5.q.out +++ b/ql/src/test/results/clientpositive/vector_orderby_5.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table vectortab2k( +PREHOOK: query: create table vectortab2k_n7( t tinyint, si smallint, i int, @@ -16,8 +16,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: create table vectortab2k( +PREHOOK: Output: default@vectortab2k_n7 +POSTHOOK: query: create table vectortab2k_n7( t tinyint, si smallint, i int, @@ -35,16 +35,16 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: Output: default@vectortab2k_n7 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n7 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: Output: default@vectortab2k_n7 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n7 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: create table vectortab2korc( +POSTHOOK: Output: default@vectortab2k_n7 +PREHOOK: query: create table vectortab2korc_n6( t tinyint, si smallint, i int, @@ -61,8 +61,8 @@ PREHOOK: query: create table vectortab2korc( STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: create table vectortab2korc( +PREHOOK: Output: default@vectortab2korc_n6 +POSTHOOK: query: create table vectortab2korc_n6( t tinyint, si smallint, i int, @@ -79,33 +79,33 @@ POSTHOOK: query: create table vectortab2korc( STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2korc -PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: Output: default@vectortab2korc_n6 +PREHOOK: query: INSERT INTO TABLE vectortab2korc_n6 SELECT * FROM vectortab2k_n7 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2k -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: Input: default@vectortab2k_n7 +PREHOOK: Output: default@vectortab2korc_n6 +POSTHOOK: query: INSERT INTO TABLE vectortab2korc_n6 SELECT * FROM vectortab2k_n7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2k -POSTHOOK: Output: default@vectortab2korc -POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +POSTHOOK: Input: default@vectortab2k_n7 +POSTHOOK: Output: default@vectortab2korc_n6 +POSTHOOK: Lineage: vectortab2korc_n6.b SIMPLE [(vectortab2k_n7)vectortab2k_n7.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n6.bo SIMPLE [(vectortab2k_n7)vectortab2k_n7.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n6.d SIMPLE [(vectortab2k_n7)vectortab2k_n7.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n6.dc SIMPLE [(vectortab2k_n7)vectortab2k_n7.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n6.dt SIMPLE [(vectortab2k_n7)vectortab2k_n7.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n6.f SIMPLE [(vectortab2k_n7)vectortab2k_n7.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n6.i SIMPLE [(vectortab2k_n7)vectortab2k_n7.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n6.s SIMPLE [(vectortab2k_n7)vectortab2k_n7.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n6.s2 SIMPLE [(vectortab2k_n7)vectortab2k_n7.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n6.si SIMPLE [(vectortab2k_n7)vectortab2k_n7.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n6.t SIMPLE [(vectortab2k_n7)vectortab2k_n7.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n6.ts SIMPLE [(vectortab2k_n7)vectortab2k_n7.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n6.ts2 SIMPLE [(vectortab2k_n7)vectortab2k_n7.FieldSchema(name:ts2, type:timestamp, comment:null), ] PREHOOK: query: explain vectorization expression -select bo, max(b) from vectortab2korc group by bo order by bo desc +select bo, max(b) from vectortab2korc_n6 group by bo order by bo desc PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression -select bo, max(b) from vectortab2korc group by bo order by bo desc +select bo, max(b) from vectortab2korc_n6 group by bo order by bo desc POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -121,7 +121,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: vectortab2korc + alias: vectortab2korc_n6 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -235,13 +235,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select bo, max(b) from vectortab2korc group by bo order by bo desc +PREHOOK: query: select bo, max(b) from vectortab2korc_n6 group by bo order by bo desc PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2korc +PREHOOK: Input: default@vectortab2korc_n6 #### A masked pattern was here #### -POSTHOOK: query: select bo, max(b) from vectortab2korc group by bo order by bo desc +POSTHOOK: query: select bo, max(b) from vectortab2korc_n6 group by bo order by bo desc POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2korc +POSTHOOK: Input: default@vectortab2korc_n6 #### A masked pattern was here #### true 9211455920344088576 false 9209153648361848832 diff --git a/ql/src/test/results/clientpositive/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/vector_outer_join2.q.out index 9a74003bdf..ef6ef3acf2 100644 --- a/ql/src/test/results/clientpositive/vector_outer_join2.q.out +++ b/ql/src/test/results/clientpositive/vector_outer_join2.q.out @@ -1,210 +1,210 @@ -PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: query: create table small_alltypesorc1a_n0 as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc1a -POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: Output: default@small_alltypesorc1a_n0 +POSTHOOK: query: create table small_alltypesorc1a_n0 as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc1a -POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +POSTHOOK: Output: default@small_alltypesorc1a_n0 +POSTHOOK: Lineage: small_alltypesorc1a_n0.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n0.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n0.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n0.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n0.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n0.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n0.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n0.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n0.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n0.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n0.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n0.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: create table small_alltypesorc2a_n0 as select * from alltypesorc where cint is null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc2a -POSTHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: Output: default@small_alltypesorc2a_n0 +POSTHOOK: query: create table small_alltypesorc2a_n0 as select * from alltypesorc where cint is null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc2a -POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [] -POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +POSTHOOK: Output: default@small_alltypesorc2a_n0 +POSTHOOK: Lineage: small_alltypesorc2a_n0.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n0.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n0.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n0.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n0.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n0.cint SIMPLE [] +POSTHOOK: Lineage: small_alltypesorc2a_n0.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n0.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n0.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n0.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n0.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n0.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: create table small_alltypesorc3a_n0 as select * from alltypesorc where cint is not null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc3a -POSTHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: Output: default@small_alltypesorc3a_n0 +POSTHOOK: query: create table small_alltypesorc3a_n0 as select * from alltypesorc where cint is not null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc3a -POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [] -POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +POSTHOOK: Output: default@small_alltypesorc3a_n0 +POSTHOOK: Lineage: small_alltypesorc3a_n0.cbigint SIMPLE [] +POSTHOOK: Lineage: small_alltypesorc3a_n0.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n0.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n0.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n0.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n0.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n0.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n0.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n0.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n0.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n0.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n0.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: create table small_alltypesorc4a_n0 as select * from alltypesorc where cint is null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc4a -POSTHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: Output: default@small_alltypesorc4a_n0 +POSTHOOK: query: create table small_alltypesorc4a_n0 as select * from alltypesorc where cint is null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc4a -POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [] -POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [] -POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: select * from small_alltypesorc1a +POSTHOOK: Output: default@small_alltypesorc4a_n0 +POSTHOOK: Lineage: small_alltypesorc4a_n0.cbigint SIMPLE [] +POSTHOOK: Lineage: small_alltypesorc4a_n0.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n0.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n0.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n0.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n0.cint SIMPLE [] +POSTHOOK: Lineage: small_alltypesorc4a_n0.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n0.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n0.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n0.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n0.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n0.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: select * from small_alltypesorc1a_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc1a +PREHOOK: Input: default@small_alltypesorc1a_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from small_alltypesorc1a +POSTHOOK: query: select * from small_alltypesorc1a_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc1a +POSTHOOK: Input: default@small_alltypesorc1a_n0 #### A masked pattern was here #### NULL NULL -1015272448 -1887561756 NULL NULL jTQ68531mP 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:45.854 false false NULL NULL -850295959 -1887561756 NULL NULL WMIgGA73 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:00.348 false false NULL NULL -886426182 -1887561756 NULL NULL 0i88xYq3gx1nW4vKjp7vBp3 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:04.472 true false NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:07.395 false false NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false -PREHOOK: query: select * from small_alltypesorc2a +PREHOOK: query: select * from small_alltypesorc2a_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc2a +PREHOOK: Input: default@small_alltypesorc2a_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from small_alltypesorc2a +POSTHOOK: query: select * from small_alltypesorc2a_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc2a +POSTHOOK: Input: default@small_alltypesorc2a_n0 #### A masked pattern was here #### -64 -7196 NULL -1615920595 -64.0 -7196.0 NULL X5rDjl 1969-12-31 16:00:11.912 1969-12-31 15:59:58.174 NULL false -64 -7196 NULL -1639157869 -64.0 -7196.0 NULL IJ0Oj7qAiqNGsN7gn 1969-12-31 16:00:01.785 1969-12-31 15:59:58.174 NULL false -64 -7196 NULL -527203677 -64.0 -7196.0 NULL JBE4H5RoK412Cs260I72 1969-12-31 15:59:50.184 1969-12-31 15:59:58.174 NULL true -64 -7196 NULL 406535485 -64.0 -7196.0 NULL E011i 1969-12-31 15:59:56.048 1969-12-31 15:59:58.174 NULL false -64 -7196 NULL 658026952 -64.0 -7196.0 NULL 4tAur 1969-12-31 15:59:53.866 1969-12-31 15:59:58.174 NULL true -PREHOOK: query: select * from small_alltypesorc3a +PREHOOK: query: select * from small_alltypesorc3a_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc3a +PREHOOK: Input: default@small_alltypesorc3a_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from small_alltypesorc3a +POSTHOOK: query: select * from small_alltypesorc3a_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc3a +POSTHOOK: Input: default@small_alltypesorc3a_n0 #### A masked pattern was here #### NULL -13166 626923679 NULL NULL -13166.0 821UdmGbkEf4j NULL 1969-12-31 15:59:55.089 1969-12-31 16:00:15.69 true NULL NULL -14426 626923679 NULL NULL -14426.0 821UdmGbkEf4j NULL 1969-12-31 16:00:11.505 1969-12-31 16:00:13.309 true NULL NULL -14847 626923679 NULL NULL -14847.0 821UdmGbkEf4j NULL 1969-12-31 16:00:00.612 1969-12-31 15:59:43.704 true NULL NULL -15632 528534767 NULL NULL -15632.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 15:59:53.593 true NULL NULL -15830 253665376 NULL NULL -15830.0 1cGVWH7n1QU NULL 1969-12-31 16:00:02.582 1969-12-31 16:00:00.518 true NULL -PREHOOK: query: select * from small_alltypesorc4a +PREHOOK: query: select * from small_alltypesorc4a_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc4a +PREHOOK: Input: default@small_alltypesorc4a_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from small_alltypesorc4a +POSTHOOK: query: select * from small_alltypesorc4a_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc4a +POSTHOOK: Input: default@small_alltypesorc4a_n0 #### A masked pattern was here #### -60 -200 NULL NULL -60.0 -200.0 NULL NULL 1969-12-31 16:00:11.996 1969-12-31 15:59:55.451 NULL NULL -61 -7196 NULL NULL -61.0 -7196.0 NULL 8Mlns2Tl6E0g 1969-12-31 15:59:44.823 1969-12-31 15:59:58.174 NULL false -61 -7196 NULL NULL -61.0 -7196.0 NULL fUJIN 1969-12-31 16:00:11.842 1969-12-31 15:59:58.174 NULL false -62 -7196 NULL NULL -62.0 -7196.0 NULL jf1Cw6qhkNToQuud 1969-12-31 16:00:12.388 1969-12-31 15:59:58.174 NULL false -62 -7196 NULL NULL -62.0 -7196.0 NULL yLiOchx5PfDTFdcMduBTg 1969-12-31 16:00:02.373 1969-12-31 15:59:58.174 NULL false -PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from -(select * from (select * from small_alltypesorc1a) sq1 +PREHOOK: query: create table small_alltypesorc_a_n0 stored as orc as select * from +(select * from (select * from small_alltypesorc1a_n0) sq1 union all - select * from (select * from small_alltypesorc2a) sq2 + select * from (select * from small_alltypesorc2a_n0) sq2 union all - select * from (select * from small_alltypesorc3a) sq3 + select * from (select * from small_alltypesorc3a_n0) sq3 union all - select * from (select * from small_alltypesorc4a) sq4) q + select * from (select * from small_alltypesorc4a_n0) sq4) q PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@small_alltypesorc1a -PREHOOK: Input: default@small_alltypesorc2a -PREHOOK: Input: default@small_alltypesorc3a -PREHOOK: Input: default@small_alltypesorc4a +PREHOOK: Input: default@small_alltypesorc1a_n0 +PREHOOK: Input: default@small_alltypesorc2a_n0 +PREHOOK: Input: default@small_alltypesorc3a_n0 +PREHOOK: Input: default@small_alltypesorc4a_n0 PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc_a -POSTHOOK: query: create table small_alltypesorc_a stored as orc as select * from -(select * from (select * from small_alltypesorc1a) sq1 +PREHOOK: Output: default@small_alltypesorc_a_n0 +POSTHOOK: query: create table small_alltypesorc_a_n0 stored as orc as select * from +(select * from (select * from small_alltypesorc1a_n0) sq1 union all - select * from (select * from small_alltypesorc2a) sq2 + select * from (select * from small_alltypesorc2a_n0) sq2 union all - select * from (select * from small_alltypesorc3a) sq3 + select * from (select * from small_alltypesorc3a_n0) sq3 union all - select * from (select * from small_alltypesorc4a) sq4) q + select * from (select * from small_alltypesorc4a_n0) sq4) q POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@small_alltypesorc1a -POSTHOOK: Input: default@small_alltypesorc2a -POSTHOOK: Input: default@small_alltypesorc3a -POSTHOOK: Input: default@small_alltypesorc4a +POSTHOOK: Input: default@small_alltypesorc1a_n0 +POSTHOOK: Input: default@small_alltypesorc2a_n0 +POSTHOOK: Input: default@small_alltypesorc3a_n0 +POSTHOOK: Input: default@small_alltypesorc4a_n0 POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc_a -POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +POSTHOOK: Output: default@small_alltypesorc_a_n0 +POSTHOOK: Lineage: small_alltypesorc_a_n0.cbigint EXPRESSION [(small_alltypesorc1a_n0)small_alltypesorc1a_n0.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a_n0)small_alltypesorc2a_n0.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a_n0)small_alltypesorc3a_n0.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a_n0)small_alltypesorc4a_n0.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n0.cboolean1 EXPRESSION [(small_alltypesorc1a_n0)small_alltypesorc1a_n0.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a_n0)small_alltypesorc2a_n0.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a_n0)small_alltypesorc3a_n0.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a_n0)small_alltypesorc4a_n0.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n0.cboolean2 EXPRESSION [(small_alltypesorc1a_n0)small_alltypesorc1a_n0.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a_n0)small_alltypesorc2a_n0.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a_n0)small_alltypesorc3a_n0.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a_n0)small_alltypesorc4a_n0.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n0.cdouble EXPRESSION [(small_alltypesorc1a_n0)small_alltypesorc1a_n0.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a_n0)small_alltypesorc2a_n0.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a_n0)small_alltypesorc3a_n0.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a_n0)small_alltypesorc4a_n0.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n0.cfloat EXPRESSION [(small_alltypesorc1a_n0)small_alltypesorc1a_n0.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a_n0)small_alltypesorc2a_n0.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a_n0)small_alltypesorc3a_n0.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a_n0)small_alltypesorc4a_n0.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n0.cint EXPRESSION [(small_alltypesorc1a_n0)small_alltypesorc1a_n0.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a_n0)small_alltypesorc2a_n0.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a_n0)small_alltypesorc3a_n0.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a_n0)small_alltypesorc4a_n0.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n0.csmallint EXPRESSION [(small_alltypesorc1a_n0)small_alltypesorc1a_n0.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a_n0)small_alltypesorc2a_n0.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a_n0)small_alltypesorc3a_n0.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a_n0)small_alltypesorc4a_n0.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n0.cstring1 EXPRESSION [(small_alltypesorc1a_n0)small_alltypesorc1a_n0.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a_n0)small_alltypesorc2a_n0.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a_n0)small_alltypesorc3a_n0.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a_n0)small_alltypesorc4a_n0.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n0.cstring2 EXPRESSION [(small_alltypesorc1a_n0)small_alltypesorc1a_n0.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a_n0)small_alltypesorc2a_n0.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a_n0)small_alltypesorc3a_n0.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a_n0)small_alltypesorc4a_n0.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n0.ctimestamp1 EXPRESSION [(small_alltypesorc1a_n0)small_alltypesorc1a_n0.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a_n0)small_alltypesorc2a_n0.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a_n0)small_alltypesorc3a_n0.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a_n0)small_alltypesorc4a_n0.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n0.ctimestamp2 EXPRESSION [(small_alltypesorc1a_n0)small_alltypesorc1a_n0.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a_n0)small_alltypesorc2a_n0.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a_n0)small_alltypesorc3a_n0.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a_n0)small_alltypesorc4a_n0.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n0.ctinyint EXPRESSION [(small_alltypesorc1a_n0)small_alltypesorc1a_n0.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a_n0)small_alltypesorc2a_n0.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a_n0)small_alltypesorc3a_n0.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a_n0)small_alltypesorc4a_n0.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a_n0 COMPUTE STATISTICS PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc_a -PREHOOK: Output: default@small_alltypesorc_a -POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +PREHOOK: Input: default@small_alltypesorc_a_n0 +PREHOOK: Output: default@small_alltypesorc_a_n0 +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a_n0 COMPUTE STATISTICS POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc_a -POSTHOOK: Output: default@small_alltypesorc_a -PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +POSTHOOK: Input: default@small_alltypesorc_a_n0 +POSTHOOK: Output: default@small_alltypesorc_a_n0 +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a_n0 COMPUTE STATISTICS FOR COLUMNS PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@small_alltypesorc_a -PREHOOK: Output: default@small_alltypesorc_a +PREHOOK: Input: default@small_alltypesorc_a_n0 +PREHOOK: Output: default@small_alltypesorc_a_n0 #### A masked pattern was here #### -POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a_n0 COMPUTE STATISTICS FOR COLUMNS POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@small_alltypesorc_a -POSTHOOK: Output: default@small_alltypesorc_a +POSTHOOK: Input: default@small_alltypesorc_a_n0 +POSTHOOK: Output: default@small_alltypesorc_a_n0 #### A masked pattern was here #### -PREHOOK: query: select * from small_alltypesorc_a +PREHOOK: query: select * from small_alltypesorc_a_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc_a +PREHOOK: Input: default@small_alltypesorc_a_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from small_alltypesorc_a +POSTHOOK: query: select * from small_alltypesorc_a_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc_a +POSTHOOK: Input: default@small_alltypesorc_a_n0 #### A masked pattern was here #### -60 -200 NULL NULL -60.0 -200.0 NULL NULL 1969-12-31 16:00:11.996 1969-12-31 15:59:55.451 NULL NULL -61 -7196 NULL NULL -61.0 -7196.0 NULL 8Mlns2Tl6E0g 1969-12-31 15:59:44.823 1969-12-31 15:59:58.174 NULL false @@ -228,19 +228,19 @@ NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 19 NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false PREHOOK: query: explain vectorization detail select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n0 c +left outer join small_alltypesorc_a_n0 cd on cd.cint = c.cint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n0 hd on hd.cbigint = c.cbigint ) t1 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n0 c +left outer join small_alltypesorc_a_n0 cd on cd.cint = c.cint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n0 hd on hd.cbigint = c.cbigint ) t1 POSTHOOK: type: QUERY @@ -400,23 +400,23 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n0 c +left outer join small_alltypesorc_a_n0 cd on cd.cint = c.cint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n0 hd on hd.cbigint = c.cbigint ) t1 PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc_a +PREHOOK: Input: default@small_alltypesorc_a_n0 #### A masked pattern was here #### POSTHOOK: query: select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n0 c +left outer join small_alltypesorc_a_n0 cd on cd.cint = c.cint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n0 hd on hd.cbigint = c.cbigint ) t1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc_a +POSTHOOK: Input: default@small_alltypesorc_a_n0 #### A masked pattern was here #### 34 -26289186744 diff --git a/ql/src/test/results/clientpositive/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/vector_outer_join3.q.out index d82eeba71c..74d774b600 100644 --- a/ql/src/test/results/clientpositive/vector_outer_join3.q.out +++ b/ql/src/test/results/clientpositive/vector_outer_join3.q.out @@ -1,210 +1,210 @@ -PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: query: create table small_alltypesorc1a_n1 as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc1a -POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: Output: default@small_alltypesorc1a_n1 +POSTHOOK: query: create table small_alltypesorc1a_n1 as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc1a -POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +POSTHOOK: Output: default@small_alltypesorc1a_n1 +POSTHOOK: Lineage: small_alltypesorc1a_n1.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n1.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n1.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n1.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n1.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n1.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n1.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n1.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n1.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n1.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n1.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc1a_n1.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: create table small_alltypesorc2a_n1 as select * from alltypesorc where cint is null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc2a -POSTHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: Output: default@small_alltypesorc2a_n1 +POSTHOOK: query: create table small_alltypesorc2a_n1 as select * from alltypesorc where cint is null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc2a -POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [] -POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +POSTHOOK: Output: default@small_alltypesorc2a_n1 +POSTHOOK: Lineage: small_alltypesorc2a_n1.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n1.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n1.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n1.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n1.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n1.cint SIMPLE [] +POSTHOOK: Lineage: small_alltypesorc2a_n1.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n1.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n1.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n1.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n1.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc2a_n1.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: create table small_alltypesorc3a_n1 as select * from alltypesorc where cint is not null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc3a -POSTHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: Output: default@small_alltypesorc3a_n1 +POSTHOOK: query: create table small_alltypesorc3a_n1 as select * from alltypesorc where cint is not null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc3a -POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [] -POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +POSTHOOK: Output: default@small_alltypesorc3a_n1 +POSTHOOK: Lineage: small_alltypesorc3a_n1.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n1.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n1.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n1.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n1.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n1.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n1.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n1.cstring1 SIMPLE [] +POSTHOOK: Lineage: small_alltypesorc3a_n1.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n1.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n1.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc3a_n1.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: create table small_alltypesorc4a_n1 as select * from alltypesorc where cint is null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc4a -POSTHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: Output: default@small_alltypesorc4a_n1 +POSTHOOK: query: create table small_alltypesorc4a_n1 as select * from alltypesorc where cint is null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc4a -POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [] -POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [] -POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: select * from small_alltypesorc1a +POSTHOOK: Output: default@small_alltypesorc4a_n1 +POSTHOOK: Lineage: small_alltypesorc4a_n1.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n1.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n1.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n1.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n1.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n1.cint SIMPLE [] +POSTHOOK: Lineage: small_alltypesorc4a_n1.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n1.cstring1 SIMPLE [] +POSTHOOK: Lineage: small_alltypesorc4a_n1.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n1.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n1.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc4a_n1.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: select * from small_alltypesorc1a_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc1a +PREHOOK: Input: default@small_alltypesorc1a_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from small_alltypesorc1a +POSTHOOK: query: select * from small_alltypesorc1a_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc1a +POSTHOOK: Input: default@small_alltypesorc1a_n1 #### A masked pattern was here #### NULL NULL -1015272448 -1887561756 NULL NULL jTQ68531mP 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:45.854 false false NULL NULL -850295959 -1887561756 NULL NULL WMIgGA73 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:00.348 false false NULL NULL -886426182 -1887561756 NULL NULL 0i88xYq3gx1nW4vKjp7vBp3 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:04.472 true false NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:07.395 false false NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false -PREHOOK: query: select * from small_alltypesorc2a +PREHOOK: query: select * from small_alltypesorc2a_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc2a +PREHOOK: Input: default@small_alltypesorc2a_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from small_alltypesorc2a +POSTHOOK: query: select * from small_alltypesorc2a_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc2a +POSTHOOK: Input: default@small_alltypesorc2a_n1 #### A masked pattern was here #### -51 NULL NULL -1731061911 -51.0 NULL Pw53BBJ yL443x2437PO5Hv1U3lCjq2D 1969-12-31 16:00:08.451 NULL true false -51 NULL NULL -1846191223 -51.0 NULL Ul085f84S33Xd32u x1JC58g0Ukp 1969-12-31 16:00:08.451 NULL true true -51 NULL NULL -1874052220 -51.0 NULL c61B47I604gymFJ sjWQS78 1969-12-31 16:00:08.451 NULL false false -51 NULL NULL -1927203921 -51.0 NULL 45ja5suO 42S0I0 1969-12-31 16:00:08.451 NULL true true -51 NULL NULL -1970551565 -51.0 NULL r2uhJH3 loXMWyrHjVeK 1969-12-31 16:00:08.451 NULL false false -PREHOOK: query: select * from small_alltypesorc3a +PREHOOK: query: select * from small_alltypesorc3a_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc3a +PREHOOK: Input: default@small_alltypesorc3a_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from small_alltypesorc3a +POSTHOOK: query: select * from small_alltypesorc3a_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc3a +POSTHOOK: Input: default@small_alltypesorc3a_n1 #### A masked pattern was here #### -51 NULL -31312632 1086455747 -51.0 NULL NULL Bc7xt12568c451o64LF5 1969-12-31 16:00:08.451 NULL NULL true -51 NULL -337975743 608681041 -51.0 NULL NULL Ih2r28o6 1969-12-31 16:00:08.451 NULL NULL true -51 NULL -413196097 -306198070 -51.0 NULL NULL F53QcSDPpxYF1Ub 1969-12-31 16:00:08.451 NULL NULL false -51 NULL -591488718 803603078 -51.0 NULL NULL X616UtmmA3FHan 1969-12-31 16:00:08.451 NULL NULL true -51 NULL -738306196 -460430946 -51.0 NULL NULL dBOqv 1969-12-31 16:00:08.451 NULL NULL false -PREHOOK: query: select * from small_alltypesorc4a +PREHOOK: query: select * from small_alltypesorc4a_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc4a +PREHOOK: Input: default@small_alltypesorc4a_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from small_alltypesorc4a +POSTHOOK: query: select * from small_alltypesorc4a_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc4a +POSTHOOK: Input: default@small_alltypesorc4a_n1 #### A masked pattern was here #### -64 -7196 NULL -1615920595 -64.0 -7196.0 NULL X5rDjl 1969-12-31 16:00:11.912 1969-12-31 15:59:58.174 NULL false -64 -7196 NULL -1639157869 -64.0 -7196.0 NULL IJ0Oj7qAiqNGsN7gn 1969-12-31 16:00:01.785 1969-12-31 15:59:58.174 NULL false -64 -7196 NULL -527203677 -64.0 -7196.0 NULL JBE4H5RoK412Cs260I72 1969-12-31 15:59:50.184 1969-12-31 15:59:58.174 NULL true -64 -7196 NULL 406535485 -64.0 -7196.0 NULL E011i 1969-12-31 15:59:56.048 1969-12-31 15:59:58.174 NULL false -64 -7196 NULL 658026952 -64.0 -7196.0 NULL 4tAur 1969-12-31 15:59:53.866 1969-12-31 15:59:58.174 NULL true -PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from -(select * from (select * from small_alltypesorc1a) sq1 +PREHOOK: query: create table small_alltypesorc_a_n1 stored as orc as select * from +(select * from (select * from small_alltypesorc1a_n1) sq1 union all - select * from (select * from small_alltypesorc2a) sq2 + select * from (select * from small_alltypesorc2a_n1) sq2 union all - select * from (select * from small_alltypesorc3a) sq3 + select * from (select * from small_alltypesorc3a_n1) sq3 union all - select * from (select * from small_alltypesorc4a) sq4) q + select * from (select * from small_alltypesorc4a_n1) sq4) q PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@small_alltypesorc1a -PREHOOK: Input: default@small_alltypesorc2a -PREHOOK: Input: default@small_alltypesorc3a -PREHOOK: Input: default@small_alltypesorc4a +PREHOOK: Input: default@small_alltypesorc1a_n1 +PREHOOK: Input: default@small_alltypesorc2a_n1 +PREHOOK: Input: default@small_alltypesorc3a_n1 +PREHOOK: Input: default@small_alltypesorc4a_n1 PREHOOK: Output: database:default -PREHOOK: Output: default@small_alltypesorc_a -POSTHOOK: query: create table small_alltypesorc_a stored as orc as select * from -(select * from (select * from small_alltypesorc1a) sq1 +PREHOOK: Output: default@small_alltypesorc_a_n1 +POSTHOOK: query: create table small_alltypesorc_a_n1 stored as orc as select * from +(select * from (select * from small_alltypesorc1a_n1) sq1 union all - select * from (select * from small_alltypesorc2a) sq2 + select * from (select * from small_alltypesorc2a_n1) sq2 union all - select * from (select * from small_alltypesorc3a) sq3 + select * from (select * from small_alltypesorc3a_n1) sq3 union all - select * from (select * from small_alltypesorc4a) sq4) q + select * from (select * from small_alltypesorc4a_n1) sq4) q POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@small_alltypesorc1a -POSTHOOK: Input: default@small_alltypesorc2a -POSTHOOK: Input: default@small_alltypesorc3a -POSTHOOK: Input: default@small_alltypesorc4a +POSTHOOK: Input: default@small_alltypesorc1a_n1 +POSTHOOK: Input: default@small_alltypesorc2a_n1 +POSTHOOK: Input: default@small_alltypesorc3a_n1 +POSTHOOK: Input: default@small_alltypesorc4a_n1 POSTHOOK: Output: database:default -POSTHOOK: Output: default@small_alltypesorc_a -POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] -POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +POSTHOOK: Output: default@small_alltypesorc_a_n1 +POSTHOOK: Lineage: small_alltypesorc_a_n1.cbigint EXPRESSION [(small_alltypesorc1a_n1)small_alltypesorc1a_n1.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a_n1)small_alltypesorc2a_n1.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a_n1)small_alltypesorc3a_n1.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a_n1)small_alltypesorc4a_n1.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n1.cboolean1 EXPRESSION [(small_alltypesorc1a_n1)small_alltypesorc1a_n1.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a_n1)small_alltypesorc2a_n1.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a_n1)small_alltypesorc3a_n1.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a_n1)small_alltypesorc4a_n1.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n1.cboolean2 EXPRESSION [(small_alltypesorc1a_n1)small_alltypesorc1a_n1.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a_n1)small_alltypesorc2a_n1.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a_n1)small_alltypesorc3a_n1.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a_n1)small_alltypesorc4a_n1.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n1.cdouble EXPRESSION [(small_alltypesorc1a_n1)small_alltypesorc1a_n1.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a_n1)small_alltypesorc2a_n1.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a_n1)small_alltypesorc3a_n1.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a_n1)small_alltypesorc4a_n1.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n1.cfloat EXPRESSION [(small_alltypesorc1a_n1)small_alltypesorc1a_n1.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a_n1)small_alltypesorc2a_n1.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a_n1)small_alltypesorc3a_n1.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a_n1)small_alltypesorc4a_n1.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n1.cint EXPRESSION [(small_alltypesorc1a_n1)small_alltypesorc1a_n1.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a_n1)small_alltypesorc2a_n1.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a_n1)small_alltypesorc3a_n1.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a_n1)small_alltypesorc4a_n1.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n1.csmallint EXPRESSION [(small_alltypesorc1a_n1)small_alltypesorc1a_n1.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a_n1)small_alltypesorc2a_n1.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a_n1)small_alltypesorc3a_n1.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a_n1)small_alltypesorc4a_n1.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n1.cstring1 EXPRESSION [(small_alltypesorc1a_n1)small_alltypesorc1a_n1.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a_n1)small_alltypesorc2a_n1.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a_n1)small_alltypesorc3a_n1.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a_n1)small_alltypesorc4a_n1.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n1.cstring2 EXPRESSION [(small_alltypesorc1a_n1)small_alltypesorc1a_n1.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a_n1)small_alltypesorc2a_n1.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a_n1)small_alltypesorc3a_n1.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a_n1)small_alltypesorc4a_n1.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n1.ctimestamp1 EXPRESSION [(small_alltypesorc1a_n1)small_alltypesorc1a_n1.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a_n1)small_alltypesorc2a_n1.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a_n1)small_alltypesorc3a_n1.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a_n1)small_alltypesorc4a_n1.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n1.ctimestamp2 EXPRESSION [(small_alltypesorc1a_n1)small_alltypesorc1a_n1.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a_n1)small_alltypesorc2a_n1.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a_n1)small_alltypesorc3a_n1.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a_n1)small_alltypesorc4a_n1.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: small_alltypesorc_a_n1.ctinyint EXPRESSION [(small_alltypesorc1a_n1)small_alltypesorc1a_n1.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a_n1)small_alltypesorc2a_n1.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a_n1)small_alltypesorc3a_n1.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a_n1)small_alltypesorc4a_n1.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a_n1 COMPUTE STATISTICS PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc_a -PREHOOK: Output: default@small_alltypesorc_a -POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS +PREHOOK: Input: default@small_alltypesorc_a_n1 +PREHOOK: Output: default@small_alltypesorc_a_n1 +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a_n1 COMPUTE STATISTICS POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc_a -POSTHOOK: Output: default@small_alltypesorc_a -PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +POSTHOOK: Input: default@small_alltypesorc_a_n1 +POSTHOOK: Output: default@small_alltypesorc_a_n1 +PREHOOK: query: ANALYZE TABLE small_alltypesorc_a_n1 COMPUTE STATISTICS FOR COLUMNS PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@small_alltypesorc_a -PREHOOK: Output: default@small_alltypesorc_a +PREHOOK: Input: default@small_alltypesorc_a_n1 +PREHOOK: Output: default@small_alltypesorc_a_n1 #### A masked pattern was here #### -POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS +POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a_n1 COMPUTE STATISTICS FOR COLUMNS POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@small_alltypesorc_a -POSTHOOK: Output: default@small_alltypesorc_a +POSTHOOK: Input: default@small_alltypesorc_a_n1 +POSTHOOK: Output: default@small_alltypesorc_a_n1 #### A masked pattern was here #### -PREHOOK: query: select * from small_alltypesorc_a +PREHOOK: query: select * from small_alltypesorc_a_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc_a +PREHOOK: Input: default@small_alltypesorc_a_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from small_alltypesorc_a +POSTHOOK: query: select * from small_alltypesorc_a_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc_a +POSTHOOK: Input: default@small_alltypesorc_a_n1 #### A masked pattern was here #### -51 NULL -31312632 1086455747 -51.0 NULL NULL Bc7xt12568c451o64LF5 1969-12-31 16:00:08.451 NULL NULL true -51 NULL -337975743 608681041 -51.0 NULL NULL Ih2r28o6 1969-12-31 16:00:08.451 NULL NULL true @@ -228,121 +228,121 @@ NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X xH7445Rals48VOulSyR5F NULL 19 NULL NULL -971543377 -1645852809 NULL NULL uN803aW xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:05.43 false false PREHOOK: query: explain vectorization detail formatted select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cint = c.cint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 ) t1 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail formatted select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cint = c.cint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 ) t1 POSTHOOK: type: QUERY -{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cint"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cint (type: int)","columnExprMap:":{"_col0":"cint"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: int)","1":"_col0 (type: int)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cstring1 (type: string)","columnExprMap:":{"_col0":"cstring1"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["cint","cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cint (type: int), cstring1 (type: string)","columnExprMap:":{"_col0":"cint","_col1":"cstring1"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2, 6]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_28","children":{"Map Join Operator":{"columnExprMap:":{"_col1":"0:_col1"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: int)","1":"_col0 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 2:int"],"bigTableValueExpressions:":["col 6:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col1"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_30","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","groupByMode:":"HASH","native:":"false","vectorProcessingMode:":"HASH","projectedOutputColumnNums:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_31","children":{"Reduce Output Operator":{"columnExprMap:":{"VALUE._col0":"_col0"},"sort order:":"","Reduce Sink Vectorization:":{"className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_32"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[2, 6]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}} +{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cint"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cint (type: int)","columnExprMap:":{"_col0":"cint"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: int)","1":"_col0 (type: int)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cstring1 (type: string)","columnExprMap:":{"_col0":"cstring1"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["cint","cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cint (type: int), cstring1 (type: string)","columnExprMap:":{"_col0":"cint","_col1":"cstring1"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2, 6]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_28","children":{"Map Join Operator":{"columnExprMap:":{"_col1":"0:_col1"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: int)","1":"_col0 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 2:int"],"bigTableValueExpressions:":["col 6:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col1"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_30","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","groupByMode:":"HASH","native:":"false","vectorProcessingMode:":"HASH","projectedOutputColumnNums:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_31","children":{"Reduce Output Operator":{"columnExprMap:":{"VALUE._col0":"_col0"},"sort order:":"","Reduce Sink Vectorization:":{"className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_32"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[2, 6]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}} PREHOOK: query: select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cint = c.cint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 ) t1 PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc_a +PREHOOK: Input: default@small_alltypesorc_a_n1 #### A masked pattern was here #### POSTHOOK: query: select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cint = c.cint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 ) t1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc_a +POSTHOOK: Input: default@small_alltypesorc_a_n1 #### A masked pattern was here #### 20 PREHOOK: query: explain vectorization detail formatted select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cstring2 = c.cstring2 -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 ) t1 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail formatted select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cstring2 = c.cstring2 -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 ) t1 POSTHOOK: type: QUERY -{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cstring2 (type: string)","columnExprMap:":{"_col0":"cstring2"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cstring1 (type: string)","columnExprMap:":{"_col0":"cstring1"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["cstring1","cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cstring1 (type: string), cstring2 (type: string)","columnExprMap:":{"_col0":"cstring1","_col1":"cstring2"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[6, 7]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_28","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 7:string"],"bigTableValueExpressions:":["col 6:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: string)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_30","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","groupByMode:":"HASH","native:":"false","vectorProcessingMode:":"HASH","projectedOutputColumnNums:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_31","children":{"Reduce Output Operator":{"columnExprMap:":{"VALUE._col0":"_col0"},"sort order:":"","Reduce Sink Vectorization:":{"className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_32"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[6, 7]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}} +{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cstring2 (type: string)","columnExprMap:":{"_col0":"cstring2"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cstring1 (type: string)","columnExprMap:":{"_col0":"cstring1"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["cstring1","cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cstring1 (type: string), cstring2 (type: string)","columnExprMap:":{"_col0":"cstring1","_col1":"cstring2"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[6, 7]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_28","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 7:string"],"bigTableValueExpressions:":["col 6:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: string)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_30","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","groupByMode:":"HASH","native:":"false","vectorProcessingMode:":"HASH","projectedOutputColumnNums:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_31","children":{"Reduce Output Operator":{"columnExprMap:":{"VALUE._col0":"_col0"},"sort order:":"","Reduce Sink Vectorization:":{"className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_32"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[6, 7]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}} PREHOOK: query: select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cstring2 = c.cstring2 -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 ) t1 PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc_a +PREHOOK: Input: default@small_alltypesorc_a_n1 #### A masked pattern was here #### POSTHOOK: query: select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cstring2 = c.cstring2 -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 ) t1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc_a +POSTHOOK: Input: default@small_alltypesorc_a_n1 #### A masked pattern was here #### 28 PREHOOK: query: explain vectorization detail formatted select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 and hd.cint = c.cint ) t1 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail formatted select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 and hd.cint = c.cint ) t1 POSTHOOK: type: QUERY -{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cbigint","cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cbigint (type: bigint), cstring2 (type: string)","columnExprMap:":{"_col0":"cbigint","_col1":"cstring2"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: bigint), _col3 (type: string)","1":"_col0 (type: bigint), _col1 (type: string)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["cint","cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cint (type: int), cstring1 (type: string)","columnExprMap:":{"_col0":"cint","_col1":"cstring1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: int), _col2 (type: string)","1":"_col0 (type: int), _col1 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["cint","cbigint","cstring1","cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cint (type: int), cbigint (type: bigint), cstring1 (type: string), cstring2 (type: string)","columnExprMap:":{"_col0":"cint","_col1":"cbigint","_col2":"cstring1","_col3":"cstring2"},"outputColumnNames:":["_col0","_col1","_col2","_col3"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2, 3, 6, 7]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_28","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col2":"0:_col2"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: bigint), _col3 (type: string)","1":"_col0 (type: bigint), _col1 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 3:bigint","col 7:string"],"bigTableValueExpressions:":["col 2:int","col 6:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col2"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: int), _col2 (type: string)","1":"_col0 (type: int), _col1 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:int","col 1:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_30","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","groupByMode:":"HASH","native:":"false","vectorProcessingMode:":"HASH","projectedOutputColumnNums:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_31","children":{"Reduce Output Operator":{"columnExprMap:":{"VALUE._col0":"_col0"},"sort order:":"","Reduce Sink Vectorization:":{"className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_32"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[2, 3, 6, 7]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}} +{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cbigint","cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cbigint (type: bigint), cstring2 (type: string)","columnExprMap:":{"_col0":"cbigint","_col1":"cstring2"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: bigint), _col3 (type: string)","1":"_col0 (type: bigint), _col1 (type: string)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["cint","cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cint (type: int), cstring1 (type: string)","columnExprMap:":{"_col0":"cint","_col1":"cstring1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: int), _col2 (type: string)","1":"_col0 (type: int), _col1 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["cint","cbigint","cstring1","cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cint (type: int), cbigint (type: bigint), cstring1 (type: string), cstring2 (type: string)","columnExprMap:":{"_col0":"cint","_col1":"cbigint","_col2":"cstring1","_col3":"cstring2"},"outputColumnNames:":["_col0","_col1","_col2","_col3"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2, 3, 6, 7]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_28","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col2":"0:_col2"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: bigint), _col3 (type: string)","1":"_col0 (type: bigint), _col1 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 3:bigint","col 7:string"],"bigTableValueExpressions:":["col 2:int","col 6:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col2"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: int), _col2 (type: string)","1":"_col0 (type: int), _col1 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:int","col 1:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_30","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","groupByMode:":"HASH","native:":"false","vectorProcessingMode:":"HASH","projectedOutputColumnNums:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_31","children":{"Reduce Output Operator":{"columnExprMap:":{"VALUE._col0":"_col0"},"sort order:":"","Reduce Sink Vectorization:":{"className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_32"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[2, 3, 6, 7]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}} PREHOOK: query: select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 and hd.cint = c.cint ) t1 PREHOOK: type: QUERY -PREHOOK: Input: default@small_alltypesorc_a +PREHOOK: Input: default@small_alltypesorc_a_n1 #### A masked pattern was here #### POSTHOOK: query: select count(*) from (select c.cstring1 -from small_alltypesorc_a c -left outer join small_alltypesorc_a cd +from small_alltypesorc_a_n1 c +left outer join small_alltypesorc_a_n1 cd on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint -left outer join small_alltypesorc_a hd +left outer join small_alltypesorc_a_n1 hd on hd.cstring1 = c.cstring1 and hd.cint = c.cint ) t1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@small_alltypesorc_a +POSTHOOK: Input: default@small_alltypesorc_a_n1 #### A masked pattern was here #### 28 diff --git a/ql/src/test/results/clientpositive/vector_outer_join6.q.out b/ql/src/test/results/clientpositive/vector_outer_join6.q.out index 42f405b08e..e2d6cc8910 100644 --- a/ql/src/test/results/clientpositive/vector_outer_join6.q.out +++ b/ql/src/test/results/clientpositive/vector_outer_join6.q.out @@ -70,32 +70,32 @@ POSTHOOK: query: load data local inpath '../../data/files/TJOIN4' into table TJO POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tjoin4_txt -PREHOOK: query: create table TJOIN1 stored as orc AS SELECT * FROM TJOIN1_txt +PREHOOK: query: create table TJOIN1_n0 stored as orc AS SELECT * FROM TJOIN1_txt PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@tjoin1_txt PREHOOK: Output: database:default -PREHOOK: Output: default@TJOIN1 -POSTHOOK: query: create table TJOIN1 stored as orc AS SELECT * FROM TJOIN1_txt +PREHOOK: Output: default@TJOIN1_n0 +POSTHOOK: query: create table TJOIN1_n0 stored as orc AS SELECT * FROM TJOIN1_txt POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@tjoin1_txt POSTHOOK: Output: database:default -POSTHOOK: Output: default@TJOIN1 -POSTHOOK: Lineage: tjoin1.c1 SIMPLE [(tjoin1_txt)tjoin1_txt.FieldSchema(name:c1, type:int, comment:null), ] -POSTHOOK: Lineage: tjoin1.c2 SIMPLE [(tjoin1_txt)tjoin1_txt.FieldSchema(name:c2, type:int, comment:null), ] -POSTHOOK: Lineage: tjoin1.rnum SIMPLE [(tjoin1_txt)tjoin1_txt.FieldSchema(name:rnum, type:int, comment:null), ] -PREHOOK: query: create table TJOIN2 stored as orc AS SELECT * FROM TJOIN2_txt +POSTHOOK: Output: default@TJOIN1_n0 +POSTHOOK: Lineage: tjoin1_n0.c1 SIMPLE [(tjoin1_txt)tjoin1_txt.FieldSchema(name:c1, type:int, comment:null), ] +POSTHOOK: Lineage: tjoin1_n0.c2 SIMPLE [(tjoin1_txt)tjoin1_txt.FieldSchema(name:c2, type:int, comment:null), ] +POSTHOOK: Lineage: tjoin1_n0.rnum SIMPLE [(tjoin1_txt)tjoin1_txt.FieldSchema(name:rnum, type:int, comment:null), ] +PREHOOK: query: create table TJOIN2_n0 stored as orc AS SELECT * FROM TJOIN2_txt PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@tjoin2_txt PREHOOK: Output: database:default -PREHOOK: Output: default@TJOIN2 -POSTHOOK: query: create table TJOIN2 stored as orc AS SELECT * FROM TJOIN2_txt +PREHOOK: Output: default@TJOIN2_n0 +POSTHOOK: query: create table TJOIN2_n0 stored as orc AS SELECT * FROM TJOIN2_txt POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@tjoin2_txt POSTHOOK: Output: database:default -POSTHOOK: Output: default@TJOIN2 -POSTHOOK: Lineage: tjoin2.c1 SIMPLE [(tjoin2_txt)tjoin2_txt.FieldSchema(name:c1, type:int, comment:null), ] -POSTHOOK: Lineage: tjoin2.c2 SIMPLE [(tjoin2_txt)tjoin2_txt.FieldSchema(name:c2, type:char(2), comment:null), ] -POSTHOOK: Lineage: tjoin2.rnum SIMPLE [(tjoin2_txt)tjoin2_txt.FieldSchema(name:rnum, type:int, comment:null), ] +POSTHOOK: Output: default@TJOIN2_n0 +POSTHOOK: Lineage: tjoin2_n0.c1 SIMPLE [(tjoin2_txt)tjoin2_txt.FieldSchema(name:c1, type:int, comment:null), ] +POSTHOOK: Lineage: tjoin2_n0.c2 SIMPLE [(tjoin2_txt)tjoin2_txt.FieldSchema(name:c2, type:char(2), comment:null), ] +POSTHOOK: Lineage: tjoin2_n0.rnum SIMPLE [(tjoin2_txt)tjoin2_txt.FieldSchema(name:rnum, type:int, comment:null), ] PREHOOK: query: create table TJOIN3 stored as orc AS SELECT * FROM TJOIN3_txt PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@tjoin3_txt @@ -124,25 +124,25 @@ POSTHOOK: Lineage: tjoin4.c2 SIMPLE [(tjoin4_txt)tjoin4_txt.FieldSchema(name:c2, POSTHOOK: Lineage: tjoin4.rnum SIMPLE [(tjoin4_txt)tjoin4_txt.FieldSchema(name:rnum, type:int, comment:null), ] PREHOOK: query: explain vectorization detail formatted select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from - (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 + (select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail formatted select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from - (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 + (select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 POSTHOOK: type: QUERY -{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-7":{"ROOT STAGE":"TRUE"},"Stage-5":{"DEPENDENT STAGES":"Stage-7"},"Stage-0":{"DEPENDENT STAGES":"Stage-5"}},"STAGE PLANS":{"Stage-7":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_0:$hdt$_0:tjoin2":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_1:tjoin3":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_0:$hdt$_0:tjoin2":{"TableScan":{"alias:":"tjoin2","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin2","isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_1","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"OperatorId:":"HASHTABLESINK_21"}}}}}},"$hdt$_1:tjoin3":{"TableScan":{"alias:":"tjoin3","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin3","isTempTable:":"false","OperatorId:":"TS_8","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_9","children":{"HashTable Sink Operator":{"keys:":{"0":"_col2 (type: int)","1":"_col1 (type: int)"},"OperatorId:":"HASHTABLESINK_19"}}}}}}}}},"Stage-5":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"tjoin1","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin1","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:rnum:int, 1:c1:int, 2:c2:int, 3:ROW__ID:struct]"},"isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 1]"},"Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_23","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col2":"1:_col0"},"condition map:":[{"":"Right Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 0:int"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1","_col2"],"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_24","children":{"Select Operator":{"expressions:":"_col2 (type: int), _col0 (type: int), _col1 (type: int)","columnExprMap:":{"_col0":"_col2","_col1":"_col0","_col2":"_col1"},"outputColumnNames:":["_col0","_col1","_col2"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2, 0, 1]"},"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_25","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col3":"1:_col0"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col2 (type: int)","1":"_col1 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 2:int","col 0:int"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1","_col3"],"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_26","children":{"Select Operator":{"expressions:":"_col0 (type: int), _col1 (type: int), _col3 (type: int)","columnExprMap:":{"_col0":"_col0","_col1":"_col1","_col2":"_col3"},"outputColumnNames:":["_col0","_col1","_col2"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 1, 2]"},"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_27","children":{"File Output Operator":{"compressed:":"false","File Sink Vectorization:":{"className:":"VectorFileSinkOperator","native:":"false"},"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_28"}}}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"3","includeColumns:":"[0, 1]","dataColumns:":["rnum:int","c1:int","c2:int"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[bigint, bigint]"}},"Local Work:":{"Map Reduce Local Work":{}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_29"}}}}}} +{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-7":{"ROOT STAGE":"TRUE"},"Stage-5":{"DEPENDENT STAGES":"Stage-7"},"Stage-0":{"DEPENDENT STAGES":"Stage-5"}},"STAGE PLANS":{"Stage-7":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_0:$hdt$_0:tjoin2_n0":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_1:tjoin3":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_0:$hdt$_0:tjoin2_n0":{"TableScan":{"alias:":"tjoin2_n0","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin2_n0","isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_1","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"OperatorId:":"HASHTABLESINK_21"}}}}}},"$hdt$_1:tjoin3":{"TableScan":{"alias:":"tjoin3","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin3","isTempTable:":"false","OperatorId:":"TS_8","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_9","children":{"HashTable Sink Operator":{"keys:":{"0":"_col2 (type: int)","1":"_col1 (type: int)"},"OperatorId:":"HASHTABLESINK_19"}}}}}}}}},"Stage-5":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"tjoin1_n0","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin1_n0","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:rnum:int, 1:c1:int, 2:c2:int, 3:ROW__ID:struct]"},"isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 1]"},"Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_23","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col2":"1:_col0"},"condition map:":[{"":"Right Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 0:int"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1","_col2"],"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_24","children":{"Select Operator":{"expressions:":"_col2 (type: int), _col0 (type: int), _col1 (type: int)","columnExprMap:":{"_col0":"_col2","_col1":"_col0","_col2":"_col1"},"outputColumnNames:":["_col0","_col1","_col2"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2, 0, 1]"},"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_25","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col3":"1:_col0"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col2 (type: int)","1":"_col1 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 2:int","col 0:int"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1","_col3"],"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_26","children":{"Select Operator":{"expressions:":"_col0 (type: int), _col1 (type: int), _col3 (type: int)","columnExprMap:":{"_col0":"_col0","_col1":"_col1","_col2":"_col3"},"outputColumnNames:":["_col0","_col1","_col2"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 1, 2]"},"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_27","children":{"File Output Operator":{"compressed:":"false","File Sink Vectorization:":{"className:":"VectorFileSinkOperator","native:":"false"},"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_28"}}}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"3","includeColumns:":"[0, 1]","dataColumns:":["rnum:int","c1:int","c2:int"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[bigint, bigint]"}},"Local Work:":{"Map Reduce Local Work":{}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_29"}}}}}} PREHOOK: query: select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from - (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 + (select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 PREHOOK: type: QUERY -PREHOOK: Input: default@tjoin1 -PREHOOK: Input: default@tjoin2 +PREHOOK: Input: default@tjoin1_n0 +PREHOOK: Input: default@tjoin2_n0 PREHOOK: Input: default@tjoin3 #### A masked pattern was here #### POSTHOOK: query: select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from - (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 + (select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tjoin1 -POSTHOOK: Input: default@tjoin2 +POSTHOOK: Input: default@tjoin1_n0 +POSTHOOK: Input: default@tjoin2_n0 POSTHOOK: Input: default@tjoin3 #### A masked pattern was here #### 0 0 0 @@ -151,25 +151,25 @@ POSTHOOK: Input: default@tjoin3 2 NULL NULL PREHOOK: query: explain vectorization detail formatted select tj1rnum, tj2rnum as rnumt3 from - (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 + (select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail formatted select tj1rnum, tj2rnum as rnumt3 from - (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 + (select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 POSTHOOK: type: QUERY -{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-7":{"ROOT STAGE":"TRUE"},"Stage-5":{"DEPENDENT STAGES":"Stage-7"},"Stage-0":{"DEPENDENT STAGES":"Stage-5"}},"STAGE PLANS":{"Stage-7":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_0:$hdt$_0:tjoin2":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_1:tjoin3":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_0:$hdt$_0:tjoin2":{"TableScan":{"alias:":"tjoin2","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin2","isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_1","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"OperatorId:":"HASHTABLESINK_21"}}}}}},"$hdt$_1:tjoin3":{"TableScan":{"alias:":"tjoin3","columns:":["c1"],"database:":"default","Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin3","isTempTable:":"false","OperatorId:":"TS_8","children":{"Select Operator":{"expressions:":"c1 (type: int)","columnExprMap:":{"_col0":"c1"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_9","children":{"HashTable Sink Operator":{"keys:":{"0":"_col2 (type: int)","1":"_col0 (type: int)"},"OperatorId:":"HASHTABLESINK_19"}}}}}}}}},"Stage-5":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"tjoin1","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin1","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:rnum:int, 1:c1:int, 2:c2:int, 3:ROW__ID:struct]"},"isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 1]"},"Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_23","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col2":"1:_col0"},"condition map:":[{"":"Right Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 0:int"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1","_col2"],"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_24","children":{"Select Operator":{"expressions:":"_col2 (type: int), _col0 (type: int), _col1 (type: int)","columnExprMap:":{"_col0":"_col2","_col1":"_col0","_col2":"_col1"},"outputColumnNames:":["_col0","_col1","_col2"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2, 0, 1]"},"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_25","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col2 (type: int)","1":"_col0 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 2:int","col 0:int"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_26","children":{"File Output Operator":{"compressed:":"false","File Sink Vectorization:":{"className:":"VectorFileSinkOperator","native:":"false"},"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_27"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"3","includeColumns:":"[0, 1]","dataColumns:":["rnum:int","c1:int","c2:int"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[bigint, bigint]"}},"Local Work:":{"Map Reduce Local Work":{}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_28"}}}}}} +{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-7":{"ROOT STAGE":"TRUE"},"Stage-5":{"DEPENDENT STAGES":"Stage-7"},"Stage-0":{"DEPENDENT STAGES":"Stage-5"}},"STAGE PLANS":{"Stage-7":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_0:$hdt$_0:tjoin2_n0":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_1:tjoin3":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_0:$hdt$_0:tjoin2_n0":{"TableScan":{"alias:":"tjoin2_n0","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin2_n0","isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_1","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"OperatorId:":"HASHTABLESINK_21"}}}}}},"$hdt$_1:tjoin3":{"TableScan":{"alias:":"tjoin3","columns:":["c1"],"database:":"default","Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin3","isTempTable:":"false","OperatorId:":"TS_8","children":{"Select Operator":{"expressions:":"c1 (type: int)","columnExprMap:":{"_col0":"c1"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_9","children":{"HashTable Sink Operator":{"keys:":{"0":"_col2 (type: int)","1":"_col0 (type: int)"},"OperatorId:":"HASHTABLESINK_19"}}}}}}}}},"Stage-5":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"tjoin1_n0","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin1_n0","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:rnum:int, 1:c1:int, 2:c2:int, 3:ROW__ID:struct]"},"isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 1]"},"Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_23","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col2":"1:_col0"},"condition map:":[{"":"Right Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 0:int"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1","_col2"],"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_24","children":{"Select Operator":{"expressions:":"_col2 (type: int), _col0 (type: int), _col1 (type: int)","columnExprMap:":{"_col0":"_col2","_col1":"_col0","_col2":"_col1"},"outputColumnNames:":["_col0","_col1","_col2"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2, 0, 1]"},"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_25","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col2 (type: int)","1":"_col0 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 2:int","col 0:int"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_26","children":{"File Output Operator":{"compressed:":"false","File Sink Vectorization:":{"className:":"VectorFileSinkOperator","native:":"false"},"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_27"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"3","includeColumns:":"[0, 1]","dataColumns:":["rnum:int","c1:int","c2:int"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[bigint, bigint]"}},"Local Work:":{"Map Reduce Local Work":{}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_28"}}}}}} PREHOOK: query: select tj1rnum, tj2rnum as rnumt3 from - (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 + (select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 PREHOOK: type: QUERY -PREHOOK: Input: default@tjoin1 -PREHOOK: Input: default@tjoin2 +PREHOOK: Input: default@tjoin1_n0 +PREHOOK: Input: default@tjoin2_n0 PREHOOK: Input: default@tjoin3 #### A masked pattern was here #### POSTHOOK: query: select tj1rnum, tj2rnum as rnumt3 from - (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 + (select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tjoin1 -POSTHOOK: Input: default@tjoin2 +POSTHOOK: Input: default@tjoin1_n0 +POSTHOOK: Input: default@tjoin2_n0 POSTHOOK: Input: default@tjoin3 #### A masked pattern was here #### 0 0 diff --git a/ql/src/test/results/clientpositive/vector_reduce1.q.out b/ql/src/test/results/clientpositive/vector_reduce1.q.out index 173090bd52..8be8dbab8e 100644 --- a/ql/src/test/results/clientpositive/vector_reduce1.q.out +++ b/ql/src/test/results/clientpositive/vector_reduce1.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table vectortab2k( +PREHOOK: query: create table vectortab2k_n8( t tinyint, si smallint, i int, @@ -16,8 +16,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: create table vectortab2k( +PREHOOK: Output: default@vectortab2k_n8 +POSTHOOK: query: create table vectortab2k_n8( t tinyint, si smallint, i int, @@ -35,16 +35,16 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: Output: default@vectortab2k_n8 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n8 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: Output: default@vectortab2k_n8 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n8 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: create table vectortab2korc( +POSTHOOK: Output: default@vectortab2k_n8 +PREHOOK: query: create table vectortab2korc_n7( t tinyint, si smallint, i int, @@ -61,8 +61,8 @@ PREHOOK: query: create table vectortab2korc( STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: create table vectortab2korc( +PREHOOK: Output: default@vectortab2korc_n7 +POSTHOOK: query: create table vectortab2korc_n7( t tinyint, si smallint, i int, @@ -79,33 +79,33 @@ POSTHOOK: query: create table vectortab2korc( STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2korc -PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: Output: default@vectortab2korc_n7 +PREHOOK: query: INSERT INTO TABLE vectortab2korc_n7 SELECT * FROM vectortab2k_n8 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2k -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: Input: default@vectortab2k_n8 +PREHOOK: Output: default@vectortab2korc_n7 +POSTHOOK: query: INSERT INTO TABLE vectortab2korc_n7 SELECT * FROM vectortab2k_n8 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2k -POSTHOOK: Output: default@vectortab2korc -POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +POSTHOOK: Input: default@vectortab2k_n8 +POSTHOOK: Output: default@vectortab2korc_n7 +POSTHOOK: Lineage: vectortab2korc_n7.b SIMPLE [(vectortab2k_n8)vectortab2k_n8.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n7.bo SIMPLE [(vectortab2k_n8)vectortab2k_n8.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n7.d SIMPLE [(vectortab2k_n8)vectortab2k_n8.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n7.dc SIMPLE [(vectortab2k_n8)vectortab2k_n8.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n7.dt SIMPLE [(vectortab2k_n8)vectortab2k_n8.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n7.f SIMPLE [(vectortab2k_n8)vectortab2k_n8.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n7.i SIMPLE [(vectortab2k_n8)vectortab2k_n8.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n7.s SIMPLE [(vectortab2k_n8)vectortab2k_n8.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n7.s2 SIMPLE [(vectortab2k_n8)vectortab2k_n8.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n7.si SIMPLE [(vectortab2k_n8)vectortab2k_n8.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n7.t SIMPLE [(vectortab2k_n8)vectortab2k_n8.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n7.ts SIMPLE [(vectortab2k_n8)vectortab2k_n8.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n7.ts2 SIMPLE [(vectortab2k_n8)vectortab2k_n8.FieldSchema(name:ts2, type:timestamp, comment:null), ] PREHOOK: query: explain vectorization expression -select b from vectortab2korc order by b +select b from vectortab2korc_n7 order by b PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression -select b from vectortab2korc order by b +select b from vectortab2korc_n7 order by b POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -120,7 +120,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: vectortab2korc + alias: vectortab2korc_n7 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -174,13 +174,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select b from vectortab2korc order by b +PREHOOK: query: select b from vectortab2korc_n7 order by b PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2korc +PREHOOK: Input: default@vectortab2korc_n7 #### A masked pattern was here #### -POSTHOOK: query: select b from vectortab2korc order by b +POSTHOOK: query: select b from vectortab2korc_n7 order by b POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2korc +POSTHOOK: Input: default@vectortab2korc_n7 #### A masked pattern was here #### -6917607783359897600 -6919476845891313664 diff --git a/ql/src/test/results/clientpositive/vector_reduce2.q.out b/ql/src/test/results/clientpositive/vector_reduce2.q.out index 83a858a4a8..779974635e 100644 --- a/ql/src/test/results/clientpositive/vector_reduce2.q.out +++ b/ql/src/test/results/clientpositive/vector_reduce2.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table vectortab2k( +PREHOOK: query: create table vectortab2k_n5( t tinyint, si smallint, i int, @@ -16,8 +16,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: create table vectortab2k( +PREHOOK: Output: default@vectortab2k_n5 +POSTHOOK: query: create table vectortab2k_n5( t tinyint, si smallint, i int, @@ -35,16 +35,16 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: Output: default@vectortab2k_n5 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n5 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: Output: default@vectortab2k_n5 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n5 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: create table vectortab2korc( +POSTHOOK: Output: default@vectortab2k_n5 +PREHOOK: query: create table vectortab2korc_n5( t tinyint, si smallint, i int, @@ -61,8 +61,8 @@ PREHOOK: query: create table vectortab2korc( STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: create table vectortab2korc( +PREHOOK: Output: default@vectortab2korc_n5 +POSTHOOK: query: create table vectortab2korc_n5( t tinyint, si smallint, i int, @@ -79,33 +79,33 @@ POSTHOOK: query: create table vectortab2korc( STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2korc -PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: Output: default@vectortab2korc_n5 +PREHOOK: query: INSERT INTO TABLE vectortab2korc_n5 SELECT * FROM vectortab2k_n5 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2k -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: Input: default@vectortab2k_n5 +PREHOOK: Output: default@vectortab2korc_n5 +POSTHOOK: query: INSERT INTO TABLE vectortab2korc_n5 SELECT * FROM vectortab2k_n5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2k -POSTHOOK: Output: default@vectortab2korc -POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +POSTHOOK: Input: default@vectortab2k_n5 +POSTHOOK: Output: default@vectortab2korc_n5 +POSTHOOK: Lineage: vectortab2korc_n5.b SIMPLE [(vectortab2k_n5)vectortab2k_n5.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n5.bo SIMPLE [(vectortab2k_n5)vectortab2k_n5.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n5.d SIMPLE [(vectortab2k_n5)vectortab2k_n5.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n5.dc SIMPLE [(vectortab2k_n5)vectortab2k_n5.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n5.dt SIMPLE [(vectortab2k_n5)vectortab2k_n5.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n5.f SIMPLE [(vectortab2k_n5)vectortab2k_n5.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n5.i SIMPLE [(vectortab2k_n5)vectortab2k_n5.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n5.s SIMPLE [(vectortab2k_n5)vectortab2k_n5.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n5.s2 SIMPLE [(vectortab2k_n5)vectortab2k_n5.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n5.si SIMPLE [(vectortab2k_n5)vectortab2k_n5.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n5.t SIMPLE [(vectortab2k_n5)vectortab2k_n5.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n5.ts SIMPLE [(vectortab2k_n5)vectortab2k_n5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n5.ts2 SIMPLE [(vectortab2k_n5)vectortab2k_n5.FieldSchema(name:ts2, type:timestamp, comment:null), ] PREHOOK: query: explain vectorization expression -select s, i, s2 from vectortab2korc order by s, i, s2 +select s, i, s2 from vectortab2korc_n5 order by s, i, s2 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression -select s, i, s2 from vectortab2korc order by s, i, s2 +select s, i, s2 from vectortab2korc_n5 order by s, i, s2 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -120,7 +120,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: vectortab2korc + alias: vectortab2korc_n5 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -174,13 +174,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select s, i, s2 from vectortab2korc order by s, i, s2 +PREHOOK: query: select s, i, s2 from vectortab2korc_n5 order by s, i, s2 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2korc +PREHOOK: Input: default@vectortab2korc_n5 #### A masked pattern was here #### -POSTHOOK: query: select s, i, s2 from vectortab2korc order by s, i, s2 +POSTHOOK: query: select s, i, s2 from vectortab2korc_n5 order by s, i, s2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2korc +POSTHOOK: Input: default@vectortab2korc_n5 #### A masked pattern was here #### -1036720157 david king -1125605439 ethan zipper diff --git a/ql/src/test/results/clientpositive/vector_reduce3.q.out b/ql/src/test/results/clientpositive/vector_reduce3.q.out index d684ddfc32..a7ad970a38 100644 --- a/ql/src/test/results/clientpositive/vector_reduce3.q.out +++ b/ql/src/test/results/clientpositive/vector_reduce3.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: create table vectortab2k( +PREHOOK: query: create table vectortab2k_n2( t tinyint, si smallint, i int, @@ -16,8 +16,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: create table vectortab2k( +PREHOOK: Output: default@vectortab2k_n2 +POSTHOOK: query: create table vectortab2k_n2( t tinyint, si smallint, i int, @@ -35,16 +35,16 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: Output: default@vectortab2k_n2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n2 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: Output: default@vectortab2k_n2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n2 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: create table vectortab2korc( +POSTHOOK: Output: default@vectortab2k_n2 +PREHOOK: query: create table vectortab2korc_n2( t tinyint, si smallint, i int, @@ -61,8 +61,8 @@ PREHOOK: query: create table vectortab2korc( STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: create table vectortab2korc( +PREHOOK: Output: default@vectortab2korc_n2 +POSTHOOK: query: create table vectortab2korc_n2( t tinyint, si smallint, i int, @@ -79,33 +79,33 @@ POSTHOOK: query: create table vectortab2korc( STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2korc -PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: Output: default@vectortab2korc_n2 +PREHOOK: query: INSERT INTO TABLE vectortab2korc_n2 SELECT * FROM vectortab2k_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2k -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: Input: default@vectortab2k_n2 +PREHOOK: Output: default@vectortab2korc_n2 +POSTHOOK: query: INSERT INTO TABLE vectortab2korc_n2 SELECT * FROM vectortab2k_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2k -POSTHOOK: Output: default@vectortab2korc -POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +POSTHOOK: Input: default@vectortab2k_n2 +POSTHOOK: Output: default@vectortab2korc_n2 +POSTHOOK: Lineage: vectortab2korc_n2.b SIMPLE [(vectortab2k_n2)vectortab2k_n2.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n2.bo SIMPLE [(vectortab2k_n2)vectortab2k_n2.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n2.d SIMPLE [(vectortab2k_n2)vectortab2k_n2.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n2.dc SIMPLE [(vectortab2k_n2)vectortab2k_n2.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n2.dt SIMPLE [(vectortab2k_n2)vectortab2k_n2.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n2.f SIMPLE [(vectortab2k_n2)vectortab2k_n2.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n2.i SIMPLE [(vectortab2k_n2)vectortab2k_n2.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n2.s SIMPLE [(vectortab2k_n2)vectortab2k_n2.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n2.s2 SIMPLE [(vectortab2k_n2)vectortab2k_n2.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n2.si SIMPLE [(vectortab2k_n2)vectortab2k_n2.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n2.t SIMPLE [(vectortab2k_n2)vectortab2k_n2.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n2.ts SIMPLE [(vectortab2k_n2)vectortab2k_n2.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n2.ts2 SIMPLE [(vectortab2k_n2)vectortab2k_n2.FieldSchema(name:ts2, type:timestamp, comment:null), ] PREHOOK: query: explain vectorization expression -select s from vectortab2korc order by s +select s from vectortab2korc_n2 order by s PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression -select s from vectortab2korc order by s +select s from vectortab2korc_n2 order by s POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -120,7 +120,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: vectortab2korc + alias: vectortab2korc_n2 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -174,13 +174,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select s from vectortab2korc order by s +PREHOOK: query: select s from vectortab2korc_n2 order by s PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2korc +PREHOOK: Input: default@vectortab2korc_n2 #### A masked pattern was here #### -POSTHOOK: query: select s from vectortab2korc order by s +POSTHOOK: query: select s from vectortab2korc_n2 order by s POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2korc +POSTHOOK: Input: default@vectortab2korc_n2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out index b46501e7cd..15f3b098ec 100644 --- a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out +++ b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out @@ -1,28 +1,28 @@ -PREHOOK: query: CREATE TABLE decimal_test STORED AS ORC AS SELECT cint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc +PREHOOK: query: CREATE TABLE decimal_test_n2 STORED AS ORC AS SELECT cint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc WHERE cint is not null and cdouble is not null PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default -PREHOOK: Output: default@decimal_test -POSTHOOK: query: CREATE TABLE decimal_test STORED AS ORC AS SELECT cint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc +PREHOOK: Output: default@decimal_test_n2 +POSTHOOK: query: CREATE TABLE decimal_test_n2 STORED AS ORC AS SELECT cint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc WHERE cint is not null and cdouble is not null POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default -POSTHOOK: Output: default@decimal_test -POSTHOOK: Lineage: decimal_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: decimal_test.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Output: default@decimal_test_n2 +POSTHOOK: Lineage: decimal_test_n2.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: decimal_test_n2.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: decimal_test_n2.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: decimal_test_n2.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION -SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test +SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test_n2 WHERE cdecimal1 is not null and cdecimal2 is not null GROUP BY cint, cdouble, cdecimal1, cdecimal2 ORDER BY cint, cdouble, cdecimal1, cdecimal2 LIMIT 50 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION -SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test +SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test_n2 WHERE cdecimal1 is not null and cdecimal2 is not null GROUP BY cint, cdouble, cdecimal1, cdecimal2 ORDER BY cint, cdouble, cdecimal1, cdecimal2 @@ -42,7 +42,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: decimal_test + alias: decimal_test_n2 Statistics: Num rows: 6102 Data size: 1440072 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -160,21 +160,21 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test +PREHOOK: query: SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test_n2 WHERE cdecimal1 is not null and cdecimal2 is not null GROUP BY cint, cdouble, cdecimal1, cdecimal2 ORDER BY cint, cdouble, cdecimal1, cdecimal2 LIMIT 50 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_test +PREHOOK: Input: default@decimal_test_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test +POSTHOOK: query: SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test_n2 WHERE cdecimal1 is not null and cdecimal2 is not null GROUP BY cint, cdouble, cdecimal1, cdecimal2 ORDER BY cint, cdouble, cdecimal1, cdecimal2 LIMIT 50 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_test +POSTHOOK: Input: default@decimal_test_n2 #### A masked pattern was here #### -1073051226 -7382.0 -4409.2486486486 -5280.96923076923100 -4409.2486486486 -1072081801 8373.0 5001.1702702703 5989.91538461538500 5001.1702702703 @@ -227,40 +227,40 @@ POSTHOOK: Input: default@decimal_test -1039776293 13704.0 8185.3621621622 9803.63076923077100 8185.3621621622 -1039762548 -3802.0 -2270.9243243243 -2719.89230769230830 -2270.9243243243 PREHOOK: query: SELECT sum(hash(*)) - FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test + FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test_n2 WHERE cdecimal1 is not null and cdecimal2 is not null GROUP BY cint, cdouble, cdecimal1, cdecimal2 ORDER BY cint, cdouble, cdecimal1, cdecimal2 LIMIT 50) as q PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_test +PREHOOK: Input: default@decimal_test_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT sum(hash(*)) - FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test + FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test_n2 WHERE cdecimal1 is not null and cdecimal2 is not null GROUP BY cint, cdouble, cdecimal1, cdecimal2 ORDER BY cint, cdouble, cdecimal1, cdecimal2 LIMIT 50) as q POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_test +POSTHOOK: Input: default@decimal_test_n2 #### A masked pattern was here #### 12703057972 PREHOOK: query: SELECT sum(hash(*)) - FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test + FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test_n2 WHERE cdecimal1 is not null and cdecimal2 is not null GROUP BY cint, cdouble, cdecimal1, cdecimal2 ORDER BY cint, cdouble, cdecimal1, cdecimal2 LIMIT 50) as q PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_test +PREHOOK: Input: default@decimal_test_n2 #### A masked pattern was here #### POSTHOOK: query: SELECT sum(hash(*)) - FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test + FROM (SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test_n2 WHERE cdecimal1 is not null and cdecimal2 is not null GROUP BY cint, cdouble, cdecimal1, cdecimal2 ORDER BY cint, cdouble, cdecimal1, cdecimal2 LIMIT 50) as q POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_test +POSTHOOK: Input: default@decimal_test_n2 #### A masked pattern was here #### 12703057972 diff --git a/ql/src/test/results/clientpositive/vector_string_concat.q.out b/ql/src/test/results/clientpositive/vector_string_concat.q.out index bede8a1bcd..a61c6f0ba5 100644 --- a/ql/src/test/results/clientpositive/vector_string_concat.q.out +++ b/ql/src/test/results/clientpositive/vector_string_concat.q.out @@ -198,7 +198,7 @@ sarah garcia sarah garcia | sarah garcia| zach young zach young | zach young| david underhill david underhill | david underhill| yuri carson yuri carson | yuri carson| -PREHOOK: query: create table vectortab2k( +PREHOOK: query: create table vectortab2k_n0( t tinyint, si smallint, i int, @@ -216,8 +216,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: create table vectortab2k( +PREHOOK: Output: default@vectortab2k_n0 +POSTHOOK: query: create table vectortab2k_n0( t tinyint, si smallint, i int, @@ -235,16 +235,16 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: Output: default@vectortab2k_n0 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: Output: default@vectortab2k_n0 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: create table vectortab2korc( +POSTHOOK: Output: default@vectortab2k_n0 +PREHOOK: query: create table vectortab2korc_n0( t tinyint, si smallint, i int, @@ -261,8 +261,8 @@ PREHOOK: query: create table vectortab2korc( STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: create table vectortab2korc( +PREHOOK: Output: default@vectortab2korc_n0 +POSTHOOK: query: create table vectortab2korc_n0( t tinyint, si smallint, i int, @@ -279,38 +279,38 @@ POSTHOOK: query: create table vectortab2korc( STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2korc -PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: Output: default@vectortab2korc_n0 +PREHOOK: query: INSERT INTO TABLE vectortab2korc_n0 SELECT * FROM vectortab2k_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2k -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: Input: default@vectortab2k_n0 +PREHOOK: Output: default@vectortab2korc_n0 +POSTHOOK: query: INSERT INTO TABLE vectortab2korc_n0 SELECT * FROM vectortab2k_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2k -POSTHOOK: Output: default@vectortab2korc -POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +POSTHOOK: Input: default@vectortab2k_n0 +POSTHOOK: Output: default@vectortab2korc_n0 +POSTHOOK: Lineage: vectortab2korc_n0.b SIMPLE [(vectortab2k_n0)vectortab2k_n0.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n0.bo SIMPLE [(vectortab2k_n0)vectortab2k_n0.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n0.d SIMPLE [(vectortab2k_n0)vectortab2k_n0.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n0.dc SIMPLE [(vectortab2k_n0)vectortab2k_n0.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n0.dt SIMPLE [(vectortab2k_n0)vectortab2k_n0.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n0.f SIMPLE [(vectortab2k_n0)vectortab2k_n0.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n0.i SIMPLE [(vectortab2k_n0)vectortab2k_n0.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n0.s SIMPLE [(vectortab2k_n0)vectortab2k_n0.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n0.s2 SIMPLE [(vectortab2k_n0)vectortab2k_n0.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n0.si SIMPLE [(vectortab2k_n0)vectortab2k_n0.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n0.t SIMPLE [(vectortab2k_n0)vectortab2k_n0.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n0.ts SIMPLE [(vectortab2k_n0)vectortab2k_n0.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n0.ts2 SIMPLE [(vectortab2k_n0)vectortab2k_n0.FieldSchema(name:ts2, type:timestamp, comment:null), ] PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field` - FROM vectortab2korc + FROM vectortab2korc_n0 GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) ORDER BY `field` LIMIT 50 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field` - FROM vectortab2korc + FROM vectortab2korc_n0 GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) ORDER BY `field` LIMIT 50 @@ -329,7 +329,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: vectortab2korc + alias: vectortab2korc_n0 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -445,20 +445,20 @@ STAGE PLANS: ListSink PREHOOK: query: SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field` - FROM vectortab2korc + FROM vectortab2korc_n0 GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) ORDER BY `field` LIMIT 50 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2korc +PREHOOK: Input: default@vectortab2korc_n0 #### A masked pattern was here #### POSTHOOK: query: SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field` - FROM vectortab2korc + FROM vectortab2korc_n0 GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) ORDER BY `field` LIMIT 50 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2korc +POSTHOOK: Input: default@vectortab2korc_n0 #### A masked pattern was here #### NULL Quarter 1-1970 diff --git a/ql/src/test/results/clientpositive/vector_struct_in.q.out b/ql/src/test/results/clientpositive/vector_struct_in.q.out index f980286c5d..66dd49a690 100644 --- a/ql/src/test/results/clientpositive/vector_struct_in.q.out +++ b/ql/src/test/results/clientpositive/vector_struct_in.q.out @@ -1,23 +1,23 @@ -PREHOOK: query: create table test_1 (`id` string, `lineid` string) stored as orc +PREHOOK: query: create table test_1_n1 (`id` string, `lineid` string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_1 -POSTHOOK: query: create table test_1 (`id` string, `lineid` string) stored as orc +PREHOOK: Output: default@test_1_n1 +POSTHOOK: query: create table test_1_n1 (`id` string, `lineid` string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_1 -PREHOOK: query: insert into table test_1 values ('one','1'), ('seven','1') +POSTHOOK: Output: default@test_1_n1 +PREHOOK: query: insert into table test_1_n1 values ('one','1'), ('seven','1') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_1 -POSTHOOK: query: insert into table test_1 values ('one','1'), ('seven','1') +PREHOOK: Output: default@test_1_n1 +POSTHOOK: query: insert into table test_1_n1 values ('one','1'), ('seven','1') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_1 -POSTHOOK: Lineage: test_1.id SCRIPT [] -POSTHOOK: Lineage: test_1.lineid SCRIPT [] +POSTHOOK: Output: default@test_1_n1 +POSTHOOK: Lineage: test_1_n1.id SCRIPT [] +POSTHOOK: Lineage: test_1_n1.lineid SCRIPT [] PREHOOK: query: explain vectorization expression -select * from test_1 where struct(`id`, `lineid`) +select * from test_1_n1 where struct(`id`, `lineid`) IN ( struct('two','3'), struct('three','1'), @@ -31,7 +31,7 @@ struct('ten','1') ) PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression -select * from test_1 where struct(`id`, `lineid`) +select * from test_1_n1 where struct(`id`, `lineid`) IN ( struct('two','3'), struct('three','1'), @@ -57,7 +57,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_1 + alias: test_1_n1 Statistics: Num rows: 2 Data size: 346 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -103,7 +103,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from test_1 where struct(`id`, `lineid`) +PREHOOK: query: select * from test_1_n1 where struct(`id`, `lineid`) IN ( struct('two','3'), struct('three','1'), @@ -116,9 +116,9 @@ struct('nine','1'), struct('ten','1') ) PREHOOK: type: QUERY -PREHOOK: Input: default@test_1 +PREHOOK: Input: default@test_1_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from test_1 where struct(`id`, `lineid`) +POSTHOOK: query: select * from test_1_n1 where struct(`id`, `lineid`) IN ( struct('two','3'), struct('three','1'), @@ -131,7 +131,7 @@ struct('nine','1'), struct('ten','1') ) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_1 +POSTHOOK: Input: default@test_1_n1 #### A masked pattern was here #### one 1 seven 1 @@ -147,7 +147,7 @@ struct('eight','1'), struct('seven','1'), struct('nine','1'), struct('ten','1') -) as b from test_1 +) as b from test_1_n1 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression select `id`, `lineid`, struct(`id`, `lineid`) @@ -161,7 +161,7 @@ struct('eight','1'), struct('seven','1'), struct('nine','1'), struct('ten','1') -) as b from test_1 +) as b from test_1_n1 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -176,7 +176,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_1 + alias: test_1_n1 Statistics: Num rows: 2 Data size: 346 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -227,9 +227,9 @@ struct('eight','1'), struct('seven','1'), struct('nine','1'), struct('ten','1') -) as b from test_1 +) as b from test_1_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@test_1 +PREHOOK: Input: default@test_1_n1 #### A masked pattern was here #### POSTHOOK: query: select `id`, `lineid`, struct(`id`, `lineid`) IN ( @@ -242,32 +242,32 @@ struct('eight','1'), struct('seven','1'), struct('nine','1'), struct('ten','1') -) as b from test_1 +) as b from test_1_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_1 +POSTHOOK: Input: default@test_1_n1 #### A masked pattern was here #### one 1 true seven 1 true -PREHOOK: query: create table test_2 (`id` int, `lineid` int) stored as orc +PREHOOK: query: create table test_2_n1 (`id` int, `lineid` int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test_2 -POSTHOOK: query: create table test_2 (`id` int, `lineid` int) stored as orc +PREHOOK: Output: default@test_2_n1 +POSTHOOK: query: create table test_2_n1 (`id` int, `lineid` int) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test_2 -PREHOOK: query: insert into table test_2 values (1,1), (7,1) +POSTHOOK: Output: default@test_2_n1 +PREHOOK: query: insert into table test_2_n1 values (1,1), (7,1) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test_2 -POSTHOOK: query: insert into table test_2 values (1,1), (7,1) +PREHOOK: Output: default@test_2_n1 +POSTHOOK: query: insert into table test_2_n1 values (1,1), (7,1) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test_2 -POSTHOOK: Lineage: test_2.id SCRIPT [] -POSTHOOK: Lineage: test_2.lineid SCRIPT [] +POSTHOOK: Output: default@test_2_n1 +POSTHOOK: Lineage: test_2_n1.id SCRIPT [] +POSTHOOK: Lineage: test_2_n1.lineid SCRIPT [] PREHOOK: query: explain vectorization expression -select * from test_2 where struct(`id`, `lineid`) +select * from test_2_n1 where struct(`id`, `lineid`) IN ( struct(2,3), struct(3,1), @@ -281,7 +281,7 @@ struct(10,1) ) PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression -select * from test_2 where struct(`id`, `lineid`) +select * from test_2_n1 where struct(`id`, `lineid`) IN ( struct(2,3), struct(3,1), @@ -307,7 +307,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_2 + alias: test_2_n1 Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -353,7 +353,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select * from test_2 where struct(`id`, `lineid`) +PREHOOK: query: select * from test_2_n1 where struct(`id`, `lineid`) IN ( struct(2,3), struct(3,1), @@ -366,9 +366,9 @@ struct(9,1), struct(10,1) ) PREHOOK: type: QUERY -PREHOOK: Input: default@test_2 +PREHOOK: Input: default@test_2_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from test_2 where struct(`id`, `lineid`) +POSTHOOK: query: select * from test_2_n1 where struct(`id`, `lineid`) IN ( struct(2,3), struct(3,1), @@ -381,7 +381,7 @@ struct(9,1), struct(10,1) ) POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_2 +POSTHOOK: Input: default@test_2_n1 #### A masked pattern was here #### 1 1 7 1 @@ -397,7 +397,7 @@ struct(8,1), struct(7,1), struct(9,1), struct(10,1) -) as b from test_2 +) as b from test_2_n1 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression select `id`, `lineid`, struct(`id`, `lineid`) @@ -411,7 +411,7 @@ struct(8,1), struct(7,1), struct(9,1), struct(10,1) -) as b from test_2 +) as b from test_2_n1 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -426,7 +426,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test_2 + alias: test_2_n1 Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -477,9 +477,9 @@ struct(8,1), struct(7,1), struct(9,1), struct(10,1) -) as b from test_2 +) as b from test_2_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@test_2 +PREHOOK: Input: default@test_2_n1 #### A masked pattern was here #### POSTHOOK: query: select `id`, `lineid`, struct(`id`, `lineid`) IN ( @@ -492,9 +492,9 @@ struct(8,1), struct(7,1), struct(9,1), struct(10,1) -) as b from test_2 +) as b from test_2_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test_2 +POSTHOOK: Input: default@test_2_n1 #### A masked pattern was here #### 1 1 true 7 1 true diff --git a/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out b/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out index b1dce4e90a..ae13ae677c 100644 --- a/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out +++ b/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out @@ -76,20 +76,20 @@ POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### _c0 _c1 key1 value1 -PREHOOK: query: create table decimal_2 (t decimal(18,9)) stored as orc +PREHOOK: query: create table decimal_2_n0 (t decimal(18,9)) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@decimal_2 -POSTHOOK: query: create table decimal_2 (t decimal(18,9)) stored as orc +PREHOOK: Output: default@decimal_2_n0 +POSTHOOK: query: create table decimal_2_n0 (t decimal(18,9)) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@decimal_2 +POSTHOOK: Output: default@decimal_2_n0 PREHOOK: query: explain vectorization detail -insert overwrite table decimal_2 +insert overwrite table decimal_2_n0 select cast('17.29' as decimal(4,2)) from alltypesorc tablesample (1 rows) PREHOOK: type: QUERY POSTHOOK: query: explain vectorization detail -insert overwrite table decimal_2 +insert overwrite table decimal_2_n0 select cast('17.29' as decimal(4,2)) from alltypesorc tablesample (1 rows) POSTHOOK: type: QUERY Explain @@ -137,7 +137,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.decimal_2 + name: default.decimal_2_n0 Execution mode: vectorized Map Vectorization: enabled: true @@ -172,7 +172,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.decimal_2 + name: default.decimal_2_n0 Stage: Stage-2 Stats Work @@ -198,36 +198,36 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: insert overwrite table decimal_2 +PREHOOK: query: insert overwrite table decimal_2_n0 select cast('17.29' as decimal(4,2)) from alltypesorc tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc -PREHOOK: Output: default@decimal_2 -POSTHOOK: query: insert overwrite table decimal_2 +PREHOOK: Output: default@decimal_2_n0 +POSTHOOK: query: insert overwrite table decimal_2_n0 select cast('17.29' as decimal(4,2)) from alltypesorc tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc -POSTHOOK: Output: default@decimal_2 -POSTHOOK: Lineage: decimal_2.t EXPRESSION [] +POSTHOOK: Output: default@decimal_2_n0 +POSTHOOK: Lineage: decimal_2_n0.t EXPRESSION [] _col0 -PREHOOK: query: select count(*) from decimal_2 +PREHOOK: query: select count(*) from decimal_2_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@decimal_2 +PREHOOK: Input: default@decimal_2_n0 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from decimal_2 +POSTHOOK: query: select count(*) from decimal_2_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n0 #### A masked pattern was here #### _c0 1 -PREHOOK: query: drop table decimal_2 +PREHOOK: query: drop table decimal_2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal_2 -PREHOOK: Output: default@decimal_2 -POSTHOOK: query: drop table decimal_2 +PREHOOK: Input: default@decimal_2_n0 +PREHOOK: Output: default@decimal_2_n0 +POSTHOOK: query: drop table decimal_2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal_2 -POSTHOOK: Output: default@decimal_2 +POSTHOOK: Input: default@decimal_2_n0 +POSTHOOK: Output: default@decimal_2_n0 PREHOOK: query: explain vectorization detail select count(1) from (select * from (Select 1 a) x order by x.a) y PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/vector_udf_character_length.q.out b/ql/src/test/results/clientpositive/vector_udf_character_length.q.out index 23bb5fbe94..2c9dcf0b7f 100644 --- a/ql/src/test/results/clientpositive/vector_udf_character_length.q.out +++ b/ql/src/test/results/clientpositive/vector_udf_character_length.q.out @@ -30,17 +30,17 @@ Example: 5 Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFCharacterLength Function type:BUILTIN -PREHOOK: query: CREATE TABLE dest1(len INT) +PREHOOK: query: CREATE TABLE dest1_n50(len INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(len INT) +PREHOOK: Output: default@dest1_n50 +POSTHOOK: query: CREATE TABLE dest1_n50(len INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT character_length(src1.value) +POSTHOOK: Output: default@dest1_n50 +PREHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n50 SELECT character_length(src1.value) PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT character_length(src1.value) +POSTHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n50 SELECT character_length(src1.value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -70,7 +70,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n50 Execution mode: vectorized Stage: Stage-7 @@ -90,7 +90,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n50 Stage: Stage-2 Stats Work @@ -106,7 +106,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n50 Stage: Stage-5 Map Reduce @@ -118,7 +118,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n50 Stage: Stage-6 Move Operator @@ -126,22 +126,22 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1 SELECT character_length(src1.value) +PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1_n50 SELECT character_length(src1.value) PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1 SELECT character_length(src1.value) +PREHOOK: Output: default@dest1_n50 +POSTHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1_n50 SELECT character_length(src1.value) POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.len EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n50 +POSTHOOK: Lineage: dest1_n50.len EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n50.* FROM dest1_n50 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n50 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n50.* FROM dest1_n50 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n50 #### A masked pattern was here #### 7 0 @@ -168,53 +168,53 @@ POSTHOOK: Input: default@dest1 0 0 0 -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n50 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n50 +PREHOOK: Output: default@dest1_n50 +POSTHOOK: query: DROP TABLE dest1_n50 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +POSTHOOK: Input: default@dest1_n50 +POSTHOOK: Output: default@dest1_n50 +PREHOOK: query: CREATE TABLE dest1_n50(name STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n50 +POSTHOOK: query: CREATE TABLE dest1_n50(name STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1 +POSTHOOK: Output: default@dest1_n50 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n50 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@dest1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1 +PREHOOK: Output: default@dest1_n50 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n50 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@dest1 -PREHOOK: query: INSERT INTO dest1 VALUES(NULL) +POSTHOOK: Output: default@dest1_n50 +PREHOOK: query: INSERT INTO dest1_n50 VALUES(NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT INTO dest1 VALUES(NULL) +PREHOOK: Output: default@dest1_n50 +POSTHOOK: query: INSERT INTO dest1_n50 VALUES(NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.name EXPRESSION [] -PREHOOK: query: CREATE TABLE dest2 STORED AS ORC AS SELECT * FROM dest1 +POSTHOOK: Output: default@dest1_n50 +POSTHOOK: Lineage: dest1_n50.name EXPRESSION [] +PREHOOK: query: CREATE TABLE dest2_n6 STORED AS ORC AS SELECT * FROM dest1_n50 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n50 PREHOOK: Output: database:default -PREHOOK: Output: default@dest2 -POSTHOOK: query: CREATE TABLE dest2 STORED AS ORC AS SELECT * FROM dest1 +PREHOOK: Output: default@dest2_n6 +POSTHOOK: query: CREATE TABLE dest2_n6 STORED AS ORC AS SELECT * FROM dest1_n50 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n50 POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest2.name SIMPLE [(dest1)dest1.FieldSchema(name:name, type:string, comment:null), ] -PREHOOK: query: EXPLAIN SELECT character_length(dest2.name) FROM dest2 +POSTHOOK: Output: default@dest2_n6 +POSTHOOK: Lineage: dest2_n6.name SIMPLE [(dest1_n50)dest1_n50.FieldSchema(name:name, type:string, comment:null), ] +PREHOOK: query: EXPLAIN SELECT character_length(dest2_n6.name) FROM dest2_n6 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT character_length(dest2.name) FROM dest2 +POSTHOOK: query: EXPLAIN SELECT character_length(dest2_n6.name) FROM dest2_n6 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -225,7 +225,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: dest2 + alias: dest2_n6 Statistics: Num rows: 2 Data size: 90 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: character_length(name) (type: int) @@ -246,19 +246,19 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT character_length(dest2.name) FROM dest2 +PREHOOK: query: SELECT character_length(dest2_n6.name) FROM dest2_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n6 #### A masked pattern was here #### -POSTHOOK: query: SELECT character_length(dest2.name) FROM dest2 +POSTHOOK: query: SELECT character_length(dest2_n6.name) FROM dest2_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n6 #### A masked pattern was here #### NULL 2 -PREHOOK: query: EXPLAIN SELECT char_length(dest2.name) FROM dest2 +PREHOOK: query: EXPLAIN SELECT char_length(dest2_n6.name) FROM dest2_n6 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT char_length(dest2.name) FROM dest2 +POSTHOOK: query: EXPLAIN SELECT char_length(dest2_n6.name) FROM dest2_n6 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -269,7 +269,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: dest2 + alias: dest2_n6 Statistics: Num rows: 2 Data size: 90 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: character_length(name) (type: int) @@ -290,29 +290,29 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT char_length(dest2.name) FROM dest2 +PREHOOK: query: SELECT char_length(dest2_n6.name) FROM dest2_n6 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n6 #### A masked pattern was here #### -POSTHOOK: query: SELECT char_length(dest2.name) FROM dest2 +POSTHOOK: query: SELECT char_length(dest2_n6.name) FROM dest2_n6 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n6 #### A masked pattern was here #### NULL 2 -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n50 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n50 +PREHOOK: Output: default@dest1_n50 +POSTHOOK: query: DROP TABLE dest1_n50 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -PREHOOK: query: DROP TABLE dest2 +POSTHOOK: Input: default@dest1_n50 +POSTHOOK: Output: default@dest1_n50 +PREHOOK: query: DROP TABLE dest2_n6 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest2 -PREHOOK: Output: default@dest2 -POSTHOOK: query: DROP TABLE dest2 +PREHOOK: Input: default@dest2_n6 +PREHOOK: Output: default@dest2_n6 +POSTHOOK: query: DROP TABLE dest2_n6 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest2 -POSTHOOK: Output: default@dest2 +POSTHOOK: Input: default@dest2_n6 +POSTHOOK: Output: default@dest2_n6 diff --git a/ql/src/test/results/clientpositive/vector_udf_octet_length.q.out b/ql/src/test/results/clientpositive/vector_udf_octet_length.q.out index d3b12c08d3..2cbbcb2d6e 100644 --- a/ql/src/test/results/clientpositive/vector_udf_octet_length.q.out +++ b/ql/src/test/results/clientpositive/vector_udf_octet_length.q.out @@ -13,17 +13,17 @@ Example: 15 Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFOctetLength Function type:BUILTIN -PREHOOK: query: CREATE TABLE dest1(len INT) +PREHOOK: query: CREATE TABLE dest1_n45(len INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(len INT) +PREHOOK: Output: default@dest1_n45 +POSTHOOK: query: CREATE TABLE dest1_n45(len INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT octet_length(src1.value) +POSTHOOK: Output: default@dest1_n45 +PREHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n45 SELECT octet_length(src1.value) PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1 SELECT octet_length(src1.value) +POSTHOOK: query: EXPLAIN FROM src1 INSERT OVERWRITE TABLE dest1_n45 SELECT octet_length(src1.value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -53,7 +53,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n45 Execution mode: vectorized Stage: Stage-7 @@ -73,7 +73,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n45 Stage: Stage-2 Stats Work @@ -89,7 +89,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n45 Stage: Stage-5 Map Reduce @@ -101,7 +101,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest1 + name: default.dest1_n45 Stage: Stage-6 Move Operator @@ -109,22 +109,22 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1 SELECT octet_length(src1.value) +PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1_n45 SELECT octet_length(src1.value) PREHOOK: type: QUERY PREHOOK: Input: default@src1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1 SELECT octet_length(src1.value) +PREHOOK: Output: default@dest1_n45 +POSTHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1_n45 SELECT octet_length(src1.value) POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.len EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: Output: default@dest1_n45 +POSTHOOK: Lineage: dest1_n45.len EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT dest1_n45.* FROM dest1_n45 PREHOOK: type: QUERY -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n45 #### A masked pattern was here #### -POSTHOOK: query: SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1_n45.* FROM dest1_n45 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n45 #### A masked pattern was here #### 7 0 @@ -151,53 +151,53 @@ POSTHOOK: Input: default@dest1 0 0 0 -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n45 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n45 +PREHOOK: Output: default@dest1_n45 +POSTHOOK: query: DROP TABLE dest1_n45 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -PREHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +POSTHOOK: Input: default@dest1_n45 +POSTHOOK: Output: default@dest1_n45 +PREHOOK: query: CREATE TABLE dest1_n45(name STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dest1 -POSTHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +PREHOOK: Output: default@dest1_n45 +POSTHOOK: query: CREATE TABLE dest1_n45(name STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest1 -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1 +POSTHOOK: Output: default@dest1_n45 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n45 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@dest1 -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1 +PREHOOK: Output: default@dest1_n45 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1_n45 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@dest1 -PREHOOK: query: INSERT INTO dest1 VALUES(NULL) +POSTHOOK: Output: default@dest1_n45 +PREHOOK: query: INSERT INTO dest1_n45 VALUES(NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@dest1 -POSTHOOK: query: INSERT INTO dest1 VALUES(NULL) +PREHOOK: Output: default@dest1_n45 +POSTHOOK: query: INSERT INTO dest1_n45 VALUES(NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@dest1 -POSTHOOK: Lineage: dest1.name EXPRESSION [] -PREHOOK: query: CREATE TABLE dest2 STORED AS ORC AS SELECT * FROM dest1 +POSTHOOK: Output: default@dest1_n45 +POSTHOOK: Lineage: dest1_n45.name EXPRESSION [] +PREHOOK: query: CREATE TABLE dest2_n4 STORED AS ORC AS SELECT * FROM dest1_n45 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@dest1 +PREHOOK: Input: default@dest1_n45 PREHOOK: Output: database:default -PREHOOK: Output: default@dest2 -POSTHOOK: query: CREATE TABLE dest2 STORED AS ORC AS SELECT * FROM dest1 +PREHOOK: Output: default@dest2_n4 +POSTHOOK: query: CREATE TABLE dest2_n4 STORED AS ORC AS SELECT * FROM dest1_n45 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@dest1 +POSTHOOK: Input: default@dest1_n45 POSTHOOK: Output: database:default -POSTHOOK: Output: default@dest2 -POSTHOOK: Lineage: dest2.name SIMPLE [(dest1)dest1.FieldSchema(name:name, type:string, comment:null), ] -PREHOOK: query: EXPLAIN SELECT octet_length(dest2.name) FROM dest2 +POSTHOOK: Output: default@dest2_n4 +POSTHOOK: Lineage: dest2_n4.name SIMPLE [(dest1_n45)dest1_n45.FieldSchema(name:name, type:string, comment:null), ] +PREHOOK: query: EXPLAIN SELECT octet_length(dest2_n4.name) FROM dest2_n4 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT octet_length(dest2.name) FROM dest2 +POSTHOOK: query: EXPLAIN SELECT octet_length(dest2_n4.name) FROM dest2_n4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -208,7 +208,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: dest2 + alias: dest2_n4 Statistics: Num rows: 2 Data size: 90 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: octet_length(name) (type: int) @@ -229,29 +229,29 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT octet_length(dest2.name) FROM dest2 +PREHOOK: query: SELECT octet_length(dest2_n4.name) FROM dest2_n4 PREHOOK: type: QUERY -PREHOOK: Input: default@dest2 +PREHOOK: Input: default@dest2_n4 #### A masked pattern was here #### -POSTHOOK: query: SELECT octet_length(dest2.name) FROM dest2 +POSTHOOK: query: SELECT octet_length(dest2_n4.name) FROM dest2_n4 POSTHOOK: type: QUERY -POSTHOOK: Input: default@dest2 +POSTHOOK: Input: default@dest2_n4 #### A masked pattern was here #### NULL 6 -PREHOOK: query: DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1_n45 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest1 -PREHOOK: Output: default@dest1 -POSTHOOK: query: DROP TABLE dest1 +PREHOOK: Input: default@dest1_n45 +PREHOOK: Output: default@dest1_n45 +POSTHOOK: query: DROP TABLE dest1_n45 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest1 -POSTHOOK: Output: default@dest1 -PREHOOK: query: DROP TABLE dest2 +POSTHOOK: Input: default@dest1_n45 +POSTHOOK: Output: default@dest1_n45 +PREHOOK: query: DROP TABLE dest2_n4 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dest2 -PREHOOK: Output: default@dest2 -POSTHOOK: query: DROP TABLE dest2 +PREHOOK: Input: default@dest2_n4 +PREHOOK: Output: default@dest2_n4 +POSTHOOK: query: DROP TABLE dest2_n4 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dest2 -POSTHOOK: Output: default@dest2 +POSTHOOK: Input: default@dest2_n4 +POSTHOOK: Output: default@dest2_n4 diff --git a/ql/src/test/results/clientpositive/vector_varchar_4.q.out b/ql/src/test/results/clientpositive/vector_varchar_4.q.out index 00a82c3813..24016b252b 100644 --- a/ql/src/test/results/clientpositive/vector_varchar_4.q.out +++ b/ql/src/test/results/clientpositive/vector_varchar_4.q.out @@ -1,12 +1,12 @@ -PREHOOK: query: drop table if exists vectortab2k +PREHOOK: query: drop table if exists vectortab2k_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists vectortab2k +POSTHOOK: query: drop table if exists vectortab2k_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table if exists vectortab2korc +PREHOOK: query: drop table if exists vectortab2korc_n1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists vectortab2korc +POSTHOOK: query: drop table if exists vectortab2korc_n1 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table vectortab2k( +PREHOOK: query: create table vectortab2k_n1( t tinyint, si smallint, i int, @@ -24,8 +24,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: create table vectortab2k( +PREHOOK: Output: default@vectortab2k_n1 +POSTHOOK: query: create table vectortab2k_n1( t tinyint, si smallint, i int, @@ -43,16 +43,16 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: Output: default@vectortab2k_n1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n1 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@vectortab2k -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: Output: default@vectortab2k_n1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k_n1 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@vectortab2k -PREHOOK: query: create table vectortab2korc( +POSTHOOK: Output: default@vectortab2k_n1 +PREHOOK: query: create table vectortab2korc_n1( t tinyint, si smallint, i int, @@ -69,8 +69,8 @@ PREHOOK: query: create table vectortab2korc( STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: create table vectortab2korc( +PREHOOK: Output: default@vectortab2korc_n1 +POSTHOOK: query: create table vectortab2korc_n1( t tinyint, si smallint, i int, @@ -87,28 +87,28 @@ POSTHOOK: query: create table vectortab2korc( STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@vectortab2korc -PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: Output: default@vectortab2korc_n1 +PREHOOK: query: INSERT INTO TABLE vectortab2korc_n1 SELECT * FROM vectortab2k_n1 PREHOOK: type: QUERY -PREHOOK: Input: default@vectortab2k -PREHOOK: Output: default@vectortab2korc -POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: Input: default@vectortab2k_n1 +PREHOOK: Output: default@vectortab2korc_n1 +POSTHOOK: query: INSERT INTO TABLE vectortab2korc_n1 SELECT * FROM vectortab2k_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@vectortab2k -POSTHOOK: Output: default@vectortab2korc -POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] -POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +POSTHOOK: Input: default@vectortab2k_n1 +POSTHOOK: Output: default@vectortab2korc_n1 +POSTHOOK: Lineage: vectortab2korc_n1.b SIMPLE [(vectortab2k_n1)vectortab2k_n1.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n1.bo SIMPLE [(vectortab2k_n1)vectortab2k_n1.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n1.d SIMPLE [(vectortab2k_n1)vectortab2k_n1.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n1.dc SIMPLE [(vectortab2k_n1)vectortab2k_n1.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n1.dt SIMPLE [(vectortab2k_n1)vectortab2k_n1.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n1.f SIMPLE [(vectortab2k_n1)vectortab2k_n1.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n1.i SIMPLE [(vectortab2k_n1)vectortab2k_n1.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n1.s SIMPLE [(vectortab2k_n1)vectortab2k_n1.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n1.s2 SIMPLE [(vectortab2k_n1)vectortab2k_n1.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n1.si SIMPLE [(vectortab2k_n1)vectortab2k_n1.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n1.t SIMPLE [(vectortab2k_n1)vectortab2k_n1.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n1.ts SIMPLE [(vectortab2k_n1)vectortab2k_n1.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc_n1.ts2 SIMPLE [(vectortab2k_n1)vectortab2k_n1.FieldSchema(name:ts2, type:timestamp, comment:null), ] PREHOOK: query: drop table if exists varchar_lazy_binary_columnar PREHOOK: type: DROPTABLE POSTHOOK: query: drop table if exists varchar_lazy_binary_columnar @@ -122,10 +122,10 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@varchar_lazy_binary_columnar PREHOOK: query: explain vectorization expression -insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc +insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc_n1 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression -insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc +insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc_n1 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -146,7 +146,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: vectortab2korc + alias: vectortab2korc_n1 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true diff --git a/ql/src/test/results/clientpositive/vector_varchar_simple.q.out b/ql/src/test/results/clientpositive/vector_varchar_simple.q.out index b880838bce..f3aec13967 100644 --- a/ql/src/test/results/clientpositive/vector_varchar_simple.q.out +++ b/ql/src/test/results/clientpositive/vector_varchar_simple.q.out @@ -1,31 +1,31 @@ -PREHOOK: query: drop table varchar_2 +PREHOOK: query: drop table varchar_2_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table varchar_2 +POSTHOOK: query: drop table varchar_2_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table varchar_2 ( +PREHOOK: query: create table varchar_2_n0 ( key varchar(10), value varchar(20) ) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@varchar_2 -POSTHOOK: query: create table varchar_2 ( +PREHOOK: Output: default@varchar_2_n0 +POSTHOOK: query: create table varchar_2_n0 ( key varchar(10), value varchar(20) ) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@varchar_2 -PREHOOK: query: insert overwrite table varchar_2 select * from src +POSTHOOK: Output: default@varchar_2_n0 +PREHOOK: query: insert overwrite table varchar_2_n0 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@varchar_2 -POSTHOOK: query: insert overwrite table varchar_2 select * from src +PREHOOK: Output: default@varchar_2_n0 +POSTHOOK: query: insert overwrite table varchar_2_n0 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@varchar_2 -POSTHOOK: Lineage: varchar_2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: varchar_2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@varchar_2_n0 +POSTHOOK: Lineage: varchar_2_n0.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: varchar_2_n0.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: select key, value from src order by key asc @@ -46,12 +46,12 @@ POSTHOOK: Input: default@src 10 val_10 100 val_100 PREHOOK: query: explain vectorization select key, value -from varchar_2 +from varchar_2_n0 order by key asc limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization select key, value -from varchar_2 +from varchar_2_n0 order by key asc limit 5 POSTHOOK: type: QUERY @@ -68,7 +68,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: varchar_2 + alias: varchar_2_n0 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: varchar(10)), value (type: varchar(20)) @@ -117,18 +117,18 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value -from varchar_2 +from varchar_2_n0 order by key asc limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@varchar_2 +PREHOOK: Input: default@varchar_2_n0 #### A masked pattern was here #### POSTHOOK: query: select key, value -from varchar_2 +from varchar_2_n0 order by key asc limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@varchar_2 +POSTHOOK: Input: default@varchar_2_n0 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -155,12 +155,12 @@ POSTHOOK: Input: default@src 97 val_97 96 val_96 PREHOOK: query: explain vectorization select key, value -from varchar_2 +from varchar_2_n0 order by key desc limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain vectorization select key, value -from varchar_2 +from varchar_2_n0 order by key desc limit 5 POSTHOOK: type: QUERY @@ -177,7 +177,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: varchar_2 + alias: varchar_2_n0 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: varchar(10)), value (type: varchar(20)) @@ -226,32 +226,32 @@ STAGE PLANS: ListSink PREHOOK: query: select key, value -from varchar_2 +from varchar_2_n0 order by key desc limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@varchar_2 +PREHOOK: Input: default@varchar_2_n0 #### A masked pattern was here #### POSTHOOK: query: select key, value -from varchar_2 +from varchar_2_n0 order by key desc limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@varchar_2 +POSTHOOK: Input: default@varchar_2_n0 #### A masked pattern was here #### 98 val_98 98 val_98 97 val_97 97 val_97 96 val_96 -PREHOOK: query: drop table varchar_2 +PREHOOK: query: drop table varchar_2_n0 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@varchar_2 -PREHOOK: Output: default@varchar_2 -POSTHOOK: query: drop table varchar_2 +PREHOOK: Input: default@varchar_2_n0 +PREHOOK: Output: default@varchar_2_n0 +POSTHOOK: query: drop table varchar_2_n0 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@varchar_2 -POSTHOOK: Output: default@varchar_2 +POSTHOOK: Input: default@varchar_2_n0 +POSTHOOK: Output: default@varchar_2_n0 PREHOOK: query: create table varchar_3 ( field varchar(25) ) stored as orc diff --git a/ql/src/test/results/clientpositive/vector_when_case_null.q.out b/ql/src/test/results/clientpositive/vector_when_case_null.q.out index 13fb6d1241..3ce7b412ee 100644 --- a/ql/src/test/results/clientpositive/vector_when_case_null.q.out +++ b/ql/src/test/results/clientpositive/vector_when_case_null.q.out @@ -1,26 +1,26 @@ -PREHOOK: query: create table count_case_groupby (key string, bool boolean) STORED AS orc +PREHOOK: query: create table count_case_groupby_n0 (key string, bool boolean) STORED AS orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@count_case_groupby -POSTHOOK: query: create table count_case_groupby (key string, bool boolean) STORED AS orc +PREHOOK: Output: default@count_case_groupby_n0 +POSTHOOK: query: create table count_case_groupby_n0 (key string, bool boolean) STORED AS orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@count_case_groupby -PREHOOK: query: insert into table count_case_groupby values ('key1', true),('key2', false),('key3', NULL),('key4', false),('key5',NULL) +POSTHOOK: Output: default@count_case_groupby_n0 +PREHOOK: query: insert into table count_case_groupby_n0 values ('key1', true),('key2', false),('key3', NULL),('key4', false),('key5',NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@count_case_groupby -POSTHOOK: query: insert into table count_case_groupby values ('key1', true),('key2', false),('key3', NULL),('key4', false),('key5',NULL) +PREHOOK: Output: default@count_case_groupby_n0 +POSTHOOK: query: insert into table count_case_groupby_n0 values ('key1', true),('key2', false),('key3', NULL),('key4', false),('key5',NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@count_case_groupby -POSTHOOK: Lineage: count_case_groupby.bool SCRIPT [] -POSTHOOK: Lineage: count_case_groupby.key SCRIPT [] +POSTHOOK: Output: default@count_case_groupby_n0 +POSTHOOK: Lineage: count_case_groupby_n0.bool SCRIPT [] +POSTHOOK: Lineage: count_case_groupby_n0.key SCRIPT [] PREHOOK: query: explain vectorization expression -SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key +SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby_n0 GROUP BY key PREHOOK: type: QUERY POSTHOOK: query: explain vectorization expression -SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key +SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby_n0 GROUP BY key POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -35,7 +35,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: count_case_groupby + alias: count_case_groupby_n0 Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -108,13 +108,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key +PREHOOK: query: SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby_n0 GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@count_case_groupby +PREHOOK: Input: default@count_case_groupby_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key +POSTHOOK: query: SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby_n0 GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@count_case_groupby +POSTHOOK: Input: default@count_case_groupby_n0 #### A masked pattern was here #### key1 1 key2 1 diff --git a/ql/src/test/results/clientpositive/vectorization_parquet_ppd_decimal.q.out b/ql/src/test/results/clientpositive/vectorization_parquet_ppd_decimal.q.out index c2611fc8b1..49d7354b60 100644 --- a/ql/src/test/results/clientpositive/vectorization_parquet_ppd_decimal.q.out +++ b/ql/src/test/results/clientpositive/vectorization_parquet_ppd_decimal.q.out @@ -1,290 +1,290 @@ -PREHOOK: query: create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet +PREHOOK: query: create table newtypestbl_n1(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@newtypestbl -POSTHOOK: query: create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet +PREHOOK: Output: default@newtypestbl_n1 +POSTHOOK: query: create table newtypestbl_n1(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@newtypestbl -PREHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl +POSTHOOK: Output: default@newtypestbl_n1 +PREHOOK: query: insert overwrite table newtypestbl_n1 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@newtypestbl -POSTHOOK: query: insert overwrite table newtypestbl select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl +PREHOOK: Output: default@newtypestbl_n1 +POSTHOOK: query: insert overwrite table newtypestbl_n1 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@newtypestbl -POSTHOOK: Lineage: newtypestbl.c EXPRESSION [] -POSTHOOK: Lineage: newtypestbl.d EXPRESSION [] -POSTHOOK: Lineage: newtypestbl.da EXPRESSION [] -POSTHOOK: Lineage: newtypestbl.v EXPRESSION [] -PREHOOK: query: select * from newtypestbl where d=0.22 +POSTHOOK: Output: default@newtypestbl_n1 +POSTHOOK: Lineage: newtypestbl_n1.c EXPRESSION [] +POSTHOOK: Lineage: newtypestbl_n1.d EXPRESSION [] +POSTHOOK: Lineage: newtypestbl_n1.da EXPRESSION [] +POSTHOOK: Lineage: newtypestbl_n1.v EXPRESSION [] +PREHOOK: query: select * from newtypestbl_n1 where d=0.22 PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d=0.22 +POSTHOOK: query: select * from newtypestbl_n1 where d=0.22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d=0.22 +PREHOOK: query: select * from newtypestbl_n1 where d=0.22 PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d=0.22 +POSTHOOK: query: select * from newtypestbl_n1 where d=0.22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d='0.22' +PREHOOK: query: select * from newtypestbl_n1 where d='0.22' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d='0.22' +POSTHOOK: query: select * from newtypestbl_n1 where d='0.22' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d='0.22' +PREHOOK: query: select * from newtypestbl_n1 where d='0.22' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d='0.22' +POSTHOOK: query: select * from newtypestbl_n1 where d='0.22' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d=cast('0.22' as float) +PREHOOK: query: select * from newtypestbl_n1 where d=cast('0.22' as float) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d=cast('0.22' as float) +POSTHOOK: query: select * from newtypestbl_n1 where d=cast('0.22' as float) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d=cast('0.22' as float) +PREHOOK: query: select * from newtypestbl_n1 where d=cast('0.22' as float) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d=cast('0.22' as float) +POSTHOOK: query: select * from newtypestbl_n1 where d=cast('0.22' as float) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d!=0.22 +PREHOOK: query: select * from newtypestbl_n1 where d!=0.22 PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d!=0.22 +POSTHOOK: query: select * from newtypestbl_n1 where d!=0.22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where d!=0.22 +PREHOOK: query: select * from newtypestbl_n1 where d!=0.22 PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d!=0.22 +POSTHOOK: query: select * from newtypestbl_n1 where d!=0.22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where d!='0.22' +PREHOOK: query: select * from newtypestbl_n1 where d!='0.22' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d!='0.22' +POSTHOOK: query: select * from newtypestbl_n1 where d!='0.22' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where d!='0.22' +PREHOOK: query: select * from newtypestbl_n1 where d!='0.22' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d!='0.22' +POSTHOOK: query: select * from newtypestbl_n1 where d!='0.22' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where d!=cast('0.22' as float) +PREHOOK: query: select * from newtypestbl_n1 where d!=cast('0.22' as float) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d!=cast('0.22' as float) +POSTHOOK: query: select * from newtypestbl_n1 where d!=cast('0.22' as float) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where d!=cast('0.22' as float) +PREHOOK: query: select * from newtypestbl_n1 where d!=cast('0.22' as float) PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d!=cast('0.22' as float) +POSTHOOK: query: select * from newtypestbl_n1 where d!=cast('0.22' as float) POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 hello world 11.220 1970-02-27 -PREHOOK: query: select * from newtypestbl where d<11.22 +PREHOOK: query: select * from newtypestbl_n1 where d<11.22 PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d<11.22 +POSTHOOK: query: select * from newtypestbl_n1 where d<11.22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d<11.22 +PREHOOK: query: select * from newtypestbl_n1 where d<11.22 PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d<11.22 +POSTHOOK: query: select * from newtypestbl_n1 where d<11.22 POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d<'11.22' +PREHOOK: query: select * from newtypestbl_n1 where d<'11.22' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d<'11.22' +POSTHOOK: query: select * from newtypestbl_n1 where d<'11.22' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d<'11.22' +PREHOOK: query: select * from newtypestbl_n1 where d<'11.22' PREHOOK: type: QUERY -PREHOOK: Input: default@newtypestbl +PREHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### -POSTHOOK: query: select * from newtypestbl where d<'11.22' +POSTHOOK: query: select * from newtypestbl_n1 where d<'11.22' POSTHOOK: type: QUERY -POSTHOOK: Input: default@newtypestbl +POSTHOOK: Input: default@newtypestbl_n1 #### A masked pattern was here #### apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 apple bee 0.220 1970-02-20 -PREHOOK: query: select * from newtypestbl where d 1 +POSTHOOK: Output: default@test_n10 +POSTHOOK: Lineage: test_n10.id SCRIPT [] +POSTHOOK: Lineage: test_n10.ts SCRIPT [] +PREHOOK: query: select ts from test_n10 where id > 1 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n10 #### A masked pattern was here #### -POSTHOOK: query: select ts from test where id > 1 +POSTHOOK: query: select ts from test_n10 where id > 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n10 #### A masked pattern was here #### 2019-01-01 23:12:45.123456 2019-01-01 23:12:45.123456 -PREHOOK: query: insert into test values (3, NULL) +PREHOOK: query: insert into test_n10 values (3, NULL) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test -POSTHOOK: query: insert into test values (3, NULL) +PREHOOK: Output: default@test_n10 +POSTHOOK: query: insert into test_n10 values (3, NULL) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test -POSTHOOK: Lineage: test.id SCRIPT [] -POSTHOOK: Lineage: test.ts EXPRESSION [] -PREHOOK: query: select ts from test where id > 1 +POSTHOOK: Output: default@test_n10 +POSTHOOK: Lineage: test_n10.id SCRIPT [] +POSTHOOK: Lineage: test_n10.ts EXPRESSION [] +PREHOOK: query: select ts from test_n10 where id > 1 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n10 #### A masked pattern was here #### -POSTHOOK: query: select ts from test where id > 1 +POSTHOOK: query: select ts from test_n10 where id > 1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n10 #### A masked pattern was here #### 2019-01-01 23:12:45.123456 2019-01-01 23:12:45.123456 @@ -832,11 +832,11 @@ POSTHOOK: query: DROP TABLE parquet_type_nodict POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@parquet_type_nodict POSTHOOK: Output: default@parquet_type_nodict -PREHOOK: query: DROP TABLE test +PREHOOK: query: DROP TABLE test_n10 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@test -PREHOOK: Output: default@test -POSTHOOK: query: DROP TABLE test +PREHOOK: Input: default@test_n10 +PREHOOK: Output: default@test_n10 +POSTHOOK: query: DROP TABLE test_n10 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@test -POSTHOOK: Output: default@test +POSTHOOK: Input: default@test_n10 +POSTHOOK: Output: default@test_n10 diff --git a/ql/src/test/results/clientpositive/vectorized_rcfile_columnar.q.out b/ql/src/test/results/clientpositive/vectorized_rcfile_columnar.q.out index c4cf9aa4fa..f6adf3c702 100644 --- a/ql/src/test/results/clientpositive/vectorized_rcfile_columnar.q.out +++ b/ql/src/test/results/clientpositive/vectorized_rcfile_columnar.q.out @@ -1,4 +1,4 @@ -PREHOOK: query: CREATE table columnTable (key STRING, value STRING) +PREHOOK: query: CREATE table columnTable_n0 (key STRING, value STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS @@ -6,8 +6,8 @@ STORED AS OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@columnTable -POSTHOOK: query: CREATE table columnTable (key STRING, value STRING) +PREHOOK: Output: default@columnTable_n0 +POSTHOOK: query: CREATE table columnTable_n0 (key STRING, value STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS @@ -15,34 +15,34 @@ STORED AS OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@columnTable +POSTHOOK: Output: default@columnTable_n0 PREHOOK: query: FROM src -INSERT OVERWRITE TABLE columnTable SELECT src.key, src.value ORDER BY src.key, src.value LIMIT 10 +INSERT OVERWRITE TABLE columnTable_n0 SELECT src.key, src.value ORDER BY src.key, src.value LIMIT 10 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@columntable +PREHOOK: Output: default@columntable_n0 POSTHOOK: query: FROM src -INSERT OVERWRITE TABLE columnTable SELECT src.key, src.value ORDER BY src.key, src.value LIMIT 10 +INSERT OVERWRITE TABLE columnTable_n0 SELECT src.key, src.value ORDER BY src.key, src.value LIMIT 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@columntable -POSTHOOK: Lineage: columntable.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: columntable.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: describe columnTable +POSTHOOK: Output: default@columntable_n0 +POSTHOOK: Lineage: columntable_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: columntable_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe columnTable_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@columntable -POSTHOOK: query: describe columnTable +PREHOOK: Input: default@columntable_n0 +POSTHOOK: query: describe columnTable_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@columntable +POSTHOOK: Input: default@columntable_n0 key string value string -PREHOOK: query: SELECT key, value FROM columnTable ORDER BY key +PREHOOK: query: SELECT key, value FROM columnTable_n0 ORDER BY key PREHOOK: type: QUERY -PREHOOK: Input: default@columntable +PREHOOK: Input: default@columntable_n0 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, value FROM columnTable ORDER BY key +POSTHOOK: query: SELECT key, value FROM columnTable_n0 ORDER BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@columntable +POSTHOOK: Input: default@columntable_n0 #### A masked pattern was here #### 0 val_0 0 val_0 diff --git a/ql/src/test/results/clientpositive/vectorized_timestamp.q.out b/ql/src/test/results/clientpositive/vectorized_timestamp.q.out index b0bfc8b28c..7e03bf386b 100644 --- a/ql/src/test/results/clientpositive/vectorized_timestamp.q.out +++ b/ql/src/test/results/clientpositive/vectorized_timestamp.q.out @@ -1,29 +1,29 @@ -PREHOOK: query: DROP TABLE IF EXISTS test +PREHOOK: query: DROP TABLE IF EXISTS test_n2 PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS test +POSTHOOK: query: DROP TABLE IF EXISTS test_n2 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE test(ts TIMESTAMP) STORED AS ORC +PREHOOK: query: CREATE TABLE test_n2(ts TIMESTAMP) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@test -POSTHOOK: query: CREATE TABLE test(ts TIMESTAMP) STORED AS ORC +PREHOOK: Output: default@test_n2 +POSTHOOK: query: CREATE TABLE test_n2(ts TIMESTAMP) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@test -PREHOOK: query: INSERT INTO TABLE test VALUES ('0001-01-01 00:00:00.000000000'), ('9999-12-31 23:59:59.999999999') +POSTHOOK: Output: default@test_n2 +PREHOOK: query: INSERT INTO TABLE test_n2 VALUES ('0001-01-01 00:00:00.000000000'), ('9999-12-31 23:59:59.999999999') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@test -POSTHOOK: query: INSERT INTO TABLE test VALUES ('0001-01-01 00:00:00.000000000'), ('9999-12-31 23:59:59.999999999') +PREHOOK: Output: default@test_n2 +POSTHOOK: query: INSERT INTO TABLE test_n2 VALUES ('0001-01-01 00:00:00.000000000'), ('9999-12-31 23:59:59.999999999') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@test -POSTHOOK: Lineage: test.ts SCRIPT [] +POSTHOOK: Output: default@test_n2 +POSTHOOK: Lineage: test_n2.ts SCRIPT [] PREHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT ts FROM test +SELECT ts FROM test_n2 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT ts FROM test +SELECT ts FROM test_n2 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: false @@ -38,7 +38,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test + alias: test_n2 Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ts (type: timestamp) @@ -58,49 +58,49 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT ts FROM test +PREHOOK: query: SELECT ts FROM test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT ts FROM test +POSTHOOK: query: SELECT ts FROM test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n2 #### A masked pattern was here #### 0001-01-01 00:00:00 9999-12-31 23:59:59.999999999 -PREHOOK: query: SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test +PREHOOK: query: SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test +POSTHOOK: query: SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n2 #### A masked pattern was here #### 0001-01-01 00:00:00 9999-12-31 23:59:59.999999999 3652060 23:59:59.999999999 -PREHOOK: query: SELECT ts FROM test WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000') +PREHOOK: query: SELECT ts FROM test_n2 WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000') PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT ts FROM test WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000') +POSTHOOK: query: SELECT ts FROM test_n2 WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000') POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n2 #### A masked pattern was here #### 0001-01-01 00:00:00 -PREHOOK: query: SELECT ts FROM test +PREHOOK: query: SELECT ts FROM test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT ts FROM test +POSTHOOK: query: SELECT ts FROM test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n2 #### A masked pattern was here #### 0001-01-01 00:00:00 9999-12-31 23:59:59.999999999 PREHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test +SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test_n2 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test +SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test_n2 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -115,7 +115,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test + alias: test_n2 Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -193,20 +193,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test +PREHOOK: query: SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test +POSTHOOK: query: SELECT MIN(ts), MAX(ts), MAX(ts) - MIN(ts) FROM test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n2 #### A masked pattern was here #### 0001-01-01 00:00:00 9999-12-31 23:59:59.999999999 3652060 23:59:59.999999999 PREHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT ts FROM test WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000') +SELECT ts FROM test_n2 WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000') PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT ts FROM test WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000') +SELECT ts FROM test_n2 WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000') POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -221,7 +221,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test + alias: test_n2 Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -274,20 +274,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT ts FROM test WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000') +PREHOOK: query: SELECT ts FROM test_n2 WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000') PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT ts FROM test WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000') +POSTHOOK: query: SELECT ts FROM test_n2 WHERE ts IN (timestamp '0001-01-01 00:00:00.000000000', timestamp '0002-02-02 00:00:00.000000000') POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n2 #### A masked pattern was here #### 0001-01-01 00:00:00 PREHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT AVG(ts), CAST(AVG(ts) AS TIMESTAMP) FROM test +SELECT AVG(ts), CAST(AVG(ts) AS TIMESTAMP) FROM test_n2 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT AVG(ts), CAST(AVG(ts) AS TIMESTAMP) FROM test +SELECT AVG(ts), CAST(AVG(ts) AS TIMESTAMP) FROM test_n2 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -302,7 +302,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test + alias: test_n2 Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -380,20 +380,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT AVG(ts), CAST(AVG(ts) AS TIMESTAMP) FROM test +PREHOOK: query: SELECT AVG(ts), CAST(AVG(ts) AS TIMESTAMP) FROM test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT AVG(ts), CAST(AVG(ts) AS TIMESTAMP) FROM test +POSTHOOK: query: SELECT AVG(ts), CAST(AVG(ts) AS TIMESTAMP) FROM test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n2 #### A masked pattern was here #### 9.56332944E10 5000-07-01 13:00:00 PREHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT variance(ts), var_pop(ts), var_samp(ts), std(ts), stddev(ts), stddev_pop(ts), stddev_samp(ts) FROM test +SELECT variance(ts), var_pop(ts), var_samp(ts), std(ts), stddev(ts), stddev_pop(ts), stddev_samp(ts) FROM test_n2 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT variance(ts), var_pop(ts), var_samp(ts), std(ts), stddev(ts), stddev_pop(ts), stddev_samp(ts) FROM test +SELECT variance(ts), var_pop(ts), var_samp(ts), std(ts), stddev(ts), stddev_pop(ts), stddev_samp(ts) FROM test_n2 POSTHOOK: type: QUERY PLAN VECTORIZATION: enabled: true @@ -408,7 +408,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: test + alias: test_n2 Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -487,12 +487,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT variance(ts), var_pop(ts), var_samp(ts), std(ts), stddev(ts), stddev_pop(ts), stddev_samp(ts) FROM test +PREHOOK: query: SELECT variance(ts), var_pop(ts), var_samp(ts), std(ts), stddev(ts), stddev_pop(ts), stddev_samp(ts) FROM test_n2 PREHOOK: type: QUERY -PREHOOK: Input: default@test +PREHOOK: Input: default@test_n2 #### A masked pattern was here #### -POSTHOOK: query: SELECT variance(ts), var_pop(ts), var_samp(ts), std(ts), stddev(ts), stddev_pop(ts), stddev_samp(ts) FROM test +POSTHOOK: query: SELECT variance(ts), var_pop(ts), var_samp(ts), std(ts), stddev(ts), stddev_pop(ts), stddev_samp(ts) FROM test_n2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@test +POSTHOOK: Input: default@test_n2 #### A masked pattern was here #### 2.489106846793884E22 2.489106846793884E22 4.978213693587768E22 1.577690352E11 1.577690352E11 1.577690352E11 2.2311910930235822E11 diff --git a/ql/src/test/results/clientpositive/view.q.out b/ql/src/test/results/clientpositive/view.q.out index 07eba084c3..ba6e99af08 100644 --- a/ql/src/test/results/clientpositive/view.q.out +++ b/ql/src/test/results/clientpositive/view.q.out @@ -10,146 +10,146 @@ PREHOOK: Input: database:db1 POSTHOOK: query: USE db1 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:db1 -PREHOOK: query: CREATE TABLE table1 (key STRING, value STRING) +PREHOOK: query: CREATE TABLE table1_n19 (key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:db1 -PREHOOK: Output: db1@table1 -POSTHOOK: query: CREATE TABLE table1 (key STRING, value STRING) +PREHOOK: Output: db1@table1_n19 +POSTHOOK: query: CREATE TABLE table1_n19 (key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@table1 +POSTHOOK: Output: db1@table1_n19 PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' -OVERWRITE INTO TABLE table1 +OVERWRITE INTO TABLE table1_n19 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: db1@table1 +PREHOOK: Output: db1@table1_n19 POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' -OVERWRITE INTO TABLE table1 +OVERWRITE INTO TABLE table1_n19 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: db1@table1 -PREHOOK: query: CREATE TABLE table2 (key STRING, value STRING) +POSTHOOK: Output: db1@table1_n19 +PREHOOK: query: CREATE TABLE table2_n13 (key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:db1 -PREHOOK: Output: db1@table2 -POSTHOOK: query: CREATE TABLE table2 (key STRING, value STRING) +PREHOOK: Output: db1@table2_n13 +POSTHOOK: query: CREATE TABLE table2_n13 (key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@table2 +POSTHOOK: Output: db1@table2_n13 PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' -OVERWRITE INTO TABLE table2 +OVERWRITE INTO TABLE table2_n13 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: db1@table2 +PREHOOK: Output: db1@table2_n13 POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' -OVERWRITE INTO TABLE table2 +OVERWRITE INTO TABLE table2_n13 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: db1@table2 -PREHOOK: query: CREATE VIEW v1 AS SELECT * FROM table1 +POSTHOOK: Output: db1@table2_n13 +PREHOOK: query: CREATE VIEW v1_n17 AS SELECT * FROM table1_n19 PREHOOK: type: CREATEVIEW -PREHOOK: Input: db1@table1 +PREHOOK: Input: db1@table1_n19 PREHOOK: Output: database:db1 -PREHOOK: Output: db1@v1 -POSTHOOK: query: CREATE VIEW v1 AS SELECT * FROM table1 +PREHOOK: Output: db1@v1_n17 +POSTHOOK: query: CREATE VIEW v1_n17 AS SELECT * FROM table1_n19 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: db1@table1 +POSTHOOK: Input: db1@table1_n19 POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@v1 -POSTHOOK: Lineage: v1.key SIMPLE [(table1)table1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v1.value SIMPLE [(table1)table1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: CREATE VIEW v2 AS SELECT t1.* FROM table1 t1 +POSTHOOK: Output: db1@v1_n17 +POSTHOOK: Lineage: v1_n17.key SIMPLE [(table1_n19)table1_n19.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v1_n17.value SIMPLE [(table1_n19)table1_n19.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE VIEW v2_n10 AS SELECT t1.* FROM table1_n19 t1 PREHOOK: type: CREATEVIEW -PREHOOK: Input: db1@table1 +PREHOOK: Input: db1@table1_n19 PREHOOK: Output: database:db1 -PREHOOK: Output: db1@v2 -POSTHOOK: query: CREATE VIEW v2 AS SELECT t1.* FROM table1 t1 +PREHOOK: Output: db1@v2_n10 +POSTHOOK: query: CREATE VIEW v2_n10 AS SELECT t1.* FROM table1_n19 t1 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: db1@table1 +POSTHOOK: Input: db1@table1_n19 POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@v2 -POSTHOOK: Lineage: v2.key SIMPLE [(table1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v2.value SIMPLE [(table1)t1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: CREATE VIEW v3 AS SELECT t1.*, t2.key k FROM table1 t1 JOIN table2 t2 ON t1.key = t2.key +POSTHOOK: Output: db1@v2_n10 +POSTHOOK: Lineage: v2_n10.key SIMPLE [(table1_n19)t1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v2_n10.value SIMPLE [(table1_n19)t1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE VIEW v3_n3 AS SELECT t1.*, t2.key k FROM table1_n19 t1 JOIN table2_n13 t2 ON t1.key = t2.key PREHOOK: type: CREATEVIEW -PREHOOK: Input: db1@table1 -PREHOOK: Input: db1@table2 +PREHOOK: Input: db1@table1_n19 +PREHOOK: Input: db1@table2_n13 PREHOOK: Output: database:db1 -PREHOOK: Output: db1@v3 -POSTHOOK: query: CREATE VIEW v3 AS SELECT t1.*, t2.key k FROM table1 t1 JOIN table2 t2 ON t1.key = t2.key +PREHOOK: Output: db1@v3_n3 +POSTHOOK: query: CREATE VIEW v3_n3 AS SELECT t1.*, t2.key k FROM table1_n19 t1 JOIN table2_n13 t2 ON t1.key = t2.key POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: db1@table1 -POSTHOOK: Input: db1@table2 +POSTHOOK: Input: db1@table1_n19 +POSTHOOK: Input: db1@table2_n13 POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@v3 -POSTHOOK: Lineage: v3.k SIMPLE [(table2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v3.key SIMPLE [(table1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v3.value SIMPLE [(table1)t1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: CREATE VIEW v4 AS SELECT * FROM db1.table1 +POSTHOOK: Output: db1@v3_n3 +POSTHOOK: Lineage: v3_n3.k SIMPLE [(table2_n13)t2.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v3_n3.key SIMPLE [(table1_n19)t1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v3_n3.value SIMPLE [(table1_n19)t1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE VIEW v4_n3 AS SELECT * FROM db1.table1_n19 PREHOOK: type: CREATEVIEW -PREHOOK: Input: db1@table1 +PREHOOK: Input: db1@table1_n19 PREHOOK: Output: database:db1 -PREHOOK: Output: db1@v4 -POSTHOOK: query: CREATE VIEW v4 AS SELECT * FROM db1.table1 +PREHOOK: Output: db1@v4_n3 +POSTHOOK: query: CREATE VIEW v4_n3 AS SELECT * FROM db1.table1_n19 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: db1@table1 +POSTHOOK: Input: db1@table1_n19 POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@v4 -POSTHOOK: Lineage: v4.key SIMPLE [(table1)table1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v4.value SIMPLE [(table1)table1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: CREATE VIEW v5 AS SELECT t1.* FROM db1.table1 t1 +POSTHOOK: Output: db1@v4_n3 +POSTHOOK: Lineage: v4_n3.key SIMPLE [(table1_n19)table1_n19.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v4_n3.value SIMPLE [(table1_n19)table1_n19.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE VIEW v5_n1 AS SELECT t1.* FROM db1.table1_n19 t1 PREHOOK: type: CREATEVIEW -PREHOOK: Input: db1@table1 +PREHOOK: Input: db1@table1_n19 PREHOOK: Output: database:db1 -PREHOOK: Output: db1@v5 -POSTHOOK: query: CREATE VIEW v5 AS SELECT t1.* FROM db1.table1 t1 +PREHOOK: Output: db1@v5_n1 +POSTHOOK: query: CREATE VIEW v5_n1 AS SELECT t1.* FROM db1.table1_n19 t1 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: db1@table1 +POSTHOOK: Input: db1@table1_n19 POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@v5 -POSTHOOK: Lineage: v5.key SIMPLE [(table1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v5.value SIMPLE [(table1)t1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: CREATE VIEW v6 AS SELECT t1.*, t2.key k FROM db1.table1 t1 JOIN db1.table2 t2 ON t1.key = t2.key +POSTHOOK: Output: db1@v5_n1 +POSTHOOK: Lineage: v5_n1.key SIMPLE [(table1_n19)t1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v5_n1.value SIMPLE [(table1_n19)t1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE VIEW v6 AS SELECT t1.*, t2.key k FROM db1.table1_n19 t1 JOIN db1.table2_n13 t2 ON t1.key = t2.key PREHOOK: type: CREATEVIEW -PREHOOK: Input: db1@table1 -PREHOOK: Input: db1@table2 +PREHOOK: Input: db1@table1_n19 +PREHOOK: Input: db1@table2_n13 PREHOOK: Output: database:db1 PREHOOK: Output: db1@v6 -POSTHOOK: query: CREATE VIEW v6 AS SELECT t1.*, t2.key k FROM db1.table1 t1 JOIN db1.table2 t2 ON t1.key = t2.key +POSTHOOK: query: CREATE VIEW v6 AS SELECT t1.*, t2.key k FROM db1.table1_n19 t1 JOIN db1.table2_n13 t2 ON t1.key = t2.key POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: db1@table1 -POSTHOOK: Input: db1@table2 +POSTHOOK: Input: db1@table1_n19 +POSTHOOK: Input: db1@table2_n13 POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@v6 -POSTHOOK: Lineage: v6.k SIMPLE [(table2)t2.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v6.key SIMPLE [(table1)t1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: v6.value SIMPLE [(table1)t1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: CREATE VIEW v7 AS SELECT key from table1 +POSTHOOK: Lineage: v6.k SIMPLE [(table2_n13)t2.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v6.key SIMPLE [(table1_n19)t1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v6.value SIMPLE [(table1_n19)t1.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE VIEW v7 AS SELECT key from table1_n19 PREHOOK: type: CREATEVIEW -PREHOOK: Input: db1@table1 +PREHOOK: Input: db1@table1_n19 PREHOOK: Output: database:db1 PREHOOK: Output: db1@v7 -POSTHOOK: query: CREATE VIEW v7 AS SELECT key from table1 +POSTHOOK: query: CREATE VIEW v7 AS SELECT key from table1_n19 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: db1@table1 +POSTHOOK: Input: db1@table1_n19 POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@v7 -POSTHOOK: Lineage: v7.key SIMPLE [(table1)table1.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: CREATE VIEW v8 AS SELECT key from db1.table1 +POSTHOOK: Lineage: v7.key SIMPLE [(table1_n19)table1_n19.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: CREATE VIEW v8 AS SELECT key from db1.table1_n19 PREHOOK: type: CREATEVIEW -PREHOOK: Input: db1@table1 +PREHOOK: Input: db1@table1_n19 PREHOOK: Output: database:db1 PREHOOK: Output: db1@v8 -POSTHOOK: query: CREATE VIEW v8 AS SELECT key from db1.table1 +POSTHOOK: query: CREATE VIEW v8 AS SELECT key from db1.table1_n19 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: db1@table1 +POSTHOOK: Input: db1@table1_n19 POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@v8 -POSTHOOK: Lineage: v8.key SIMPLE [(table1)table1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: v8.key SIMPLE [(table1_n19)table1_n19.FieldSchema(name:key, type:string, comment:null), ] PREHOOK: query: CREATE DATABASE db2 PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:db2 @@ -162,15 +162,15 @@ PREHOOK: Input: database:db2 POSTHOOK: query: USE db2 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:db2 -PREHOOK: query: SELECT * FROM db1.v1 +PREHOOK: query: SELECT * FROM db1.v1_n17 PREHOOK: type: QUERY -PREHOOK: Input: db1@table1 -PREHOOK: Input: db1@v1 +PREHOOK: Input: db1@table1_n19 +PREHOOK: Input: db1@v1_n17 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM db1.v1 +POSTHOOK: query: SELECT * FROM db1.v1_n17 POSTHOOK: type: QUERY -POSTHOOK: Input: db1@table1 -POSTHOOK: Input: db1@v1 +POSTHOOK: Input: db1@table1_n19 +POSTHOOK: Input: db1@v1_n17 #### A masked pattern was here #### 238 val_238 86 val_86 @@ -672,15 +672,15 @@ POSTHOOK: Input: db1@v1 400 val_400 200 val_200 97 val_97 -PREHOOK: query: SELECT * FROM db1.v2 +PREHOOK: query: SELECT * FROM db1.v2_n10 PREHOOK: type: QUERY -PREHOOK: Input: db1@table1 -PREHOOK: Input: db1@v2 +PREHOOK: Input: db1@table1_n19 +PREHOOK: Input: db1@v2_n10 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM db1.v2 +POSTHOOK: query: SELECT * FROM db1.v2_n10 POSTHOOK: type: QUERY -POSTHOOK: Input: db1@table1 -POSTHOOK: Input: db1@v2 +POSTHOOK: Input: db1@table1_n19 +POSTHOOK: Input: db1@v2_n10 #### A masked pattern was here #### 238 val_238 86 val_86 @@ -1182,17 +1182,17 @@ POSTHOOK: Input: db1@v2 400 val_400 200 val_200 97 val_97 -PREHOOK: query: SELECT * FROM db1.v3 +PREHOOK: query: SELECT * FROM db1.v3_n3 PREHOOK: type: QUERY -PREHOOK: Input: db1@table1 -PREHOOK: Input: db1@table2 -PREHOOK: Input: db1@v3 +PREHOOK: Input: db1@table1_n19 +PREHOOK: Input: db1@table2_n13 +PREHOOK: Input: db1@v3_n3 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM db1.v3 +POSTHOOK: query: SELECT * FROM db1.v3_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: db1@table1 -POSTHOOK: Input: db1@table2 -POSTHOOK: Input: db1@v3 +POSTHOOK: Input: db1@table1_n19 +POSTHOOK: Input: db1@table2_n13 +POSTHOOK: Input: db1@v3_n3 #### A masked pattern was here #### 0 val_0 0 0 val_0 0 @@ -2222,15 +2222,15 @@ POSTHOOK: Input: db1@v3 98 val_98 98 98 val_98 98 98 val_98 98 -PREHOOK: query: SELECT * FROM db1.v4 +PREHOOK: query: SELECT * FROM db1.v4_n3 PREHOOK: type: QUERY -PREHOOK: Input: db1@table1 -PREHOOK: Input: db1@v4 +PREHOOK: Input: db1@table1_n19 +PREHOOK: Input: db1@v4_n3 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM db1.v4 +POSTHOOK: query: SELECT * FROM db1.v4_n3 POSTHOOK: type: QUERY -POSTHOOK: Input: db1@table1 -POSTHOOK: Input: db1@v4 +POSTHOOK: Input: db1@table1_n19 +POSTHOOK: Input: db1@v4_n3 #### A masked pattern was here #### 238 val_238 86 val_86 @@ -2732,15 +2732,15 @@ POSTHOOK: Input: db1@v4 400 val_400 200 val_200 97 val_97 -PREHOOK: query: SELECT * FROM db1.v5 +PREHOOK: query: SELECT * FROM db1.v5_n1 PREHOOK: type: QUERY -PREHOOK: Input: db1@table1 -PREHOOK: Input: db1@v5 +PREHOOK: Input: db1@table1_n19 +PREHOOK: Input: db1@v5_n1 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM db1.v5 +POSTHOOK: query: SELECT * FROM db1.v5_n1 POSTHOOK: type: QUERY -POSTHOOK: Input: db1@table1 -POSTHOOK: Input: db1@v5 +POSTHOOK: Input: db1@table1_n19 +POSTHOOK: Input: db1@v5_n1 #### A masked pattern was here #### 238 val_238 86 val_86 @@ -3244,14 +3244,14 @@ POSTHOOK: Input: db1@v5 97 val_97 PREHOOK: query: SELECT * FROM db1.v6 PREHOOK: type: QUERY -PREHOOK: Input: db1@table1 -PREHOOK: Input: db1@table2 +PREHOOK: Input: db1@table1_n19 +PREHOOK: Input: db1@table2_n13 PREHOOK: Input: db1@v6 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM db1.v6 POSTHOOK: type: QUERY -POSTHOOK: Input: db1@table1 -POSTHOOK: Input: db1@table2 +POSTHOOK: Input: db1@table1_n19 +POSTHOOK: Input: db1@table2_n13 POSTHOOK: Input: db1@v6 #### A masked pattern was here #### 0 val_0 0 @@ -4284,12 +4284,12 @@ POSTHOOK: Input: db1@v6 98 val_98 98 PREHOOK: query: SELECT * FROM db1.v7 PREHOOK: type: QUERY -PREHOOK: Input: db1@table1 +PREHOOK: Input: db1@table1_n19 PREHOOK: Input: db1@v7 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM db1.v7 POSTHOOK: type: QUERY -POSTHOOK: Input: db1@table1 +POSTHOOK: Input: db1@table1_n19 POSTHOOK: Input: db1@v7 #### A masked pattern was here #### 238 @@ -4794,12 +4794,12 @@ POSTHOOK: Input: db1@v7 97 PREHOOK: query: SELECT * FROM db1.v8 PREHOOK: type: QUERY -PREHOOK: Input: db1@table1 +PREHOOK: Input: db1@table1_n19 PREHOOK: Input: db1@v8 #### A masked pattern was here #### POSTHOOK: query: SELECT * FROM db1.v8 POSTHOOK: type: QUERY -POSTHOOK: Input: db1@table1 +POSTHOOK: Input: db1@table1_n19 POSTHOOK: Input: db1@v8 #### A masked pattern was here #### 238 diff --git a/ql/src/test/results/clientpositive/view_alias.q.out b/ql/src/test/results/clientpositive/view_alias.q.out index bbd721abfe..7e5f0449d0 100644 --- a/ql/src/test/results/clientpositive/view_alias.q.out +++ b/ql/src/test/results/clientpositive/view_alias.q.out @@ -1,25 +1,25 @@ -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n6 PREHOOK: type: DROPVIEW -POSTHOOK: query: drop view v +POSTHOOK: query: drop view v_n6 POSTHOOK: type: DROPVIEW -PREHOOK: query: create view v as select key, '12' from src +PREHOOK: query: create view v_n6 as select key, '12' from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select key, '12' from src +PREHOOK: Output: default@v_n6 +POSTHOOK: query: create view v_n6 as select key, '12' from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v._c1 SIMPLE [] -POSTHOOK: Lineage: v.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n6 +POSTHOOK: Lineage: v_n6._c1 SIMPLE [] +POSTHOOK: Lineage: v_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: desc formatted v_n6 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n6 +POSTHOOK: query: desc formatted v_n6 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n6 # col_name data_type comment key string _c1 string @@ -46,47 +46,47 @@ Sort Columns: [] View Original Text: select key, '12' from src View Expanded Text: select `src`.`key`, '12' from `default`.`src` View Rewrite Enabled: No -PREHOOK: query: select * from v order by `_c1` limit 5 +PREHOOK: query: select * from v_n6 order by `_c1` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v +PREHOOK: Input: default@v_n6 #### A masked pattern was here #### -POSTHOOK: query: select * from v order by `_c1` limit 5 +POSTHOOK: query: select * from v_n6 order by `_c1` limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n6 #### A masked pattern was here #### 238 12 86 12 311 12 27 12 165 12 -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n6 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n6 +PREHOOK: Output: default@v_n6 +POSTHOOK: query: drop view v_n6 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as select key as `_c1`, '12' from src +POSTHOOK: Input: default@v_n6 +POSTHOOK: Output: default@v_n6 +PREHOOK: query: create view v_n6 as select key as `_c1`, '12' from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select key as `_c1`, '12' from src +PREHOOK: Output: default@v_n6 +POSTHOOK: query: create view v_n6 as select key as `_c1`, '12' from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v._c1 SIMPLE [] -POSTHOOK: Lineage: v.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n6 +POSTHOOK: Lineage: v_n6._c1 SIMPLE [] +POSTHOOK: Lineage: v_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: desc formatted v_n6 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n6 +POSTHOOK: query: desc formatted v_n6 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n6 # col_name data_type comment key string _c1 string @@ -113,48 +113,48 @@ Sort Columns: [] View Original Text: select key as _c1, '12' from src View Expanded Text: select `src`.`key` as `_c1`, '12' from `default`.`src` View Rewrite Enabled: No -PREHOOK: query: select * from v order by `_c1` limit 5 +PREHOOK: query: select * from v_n6 order by `_c1` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v +PREHOOK: Input: default@v_n6 #### A masked pattern was here #### -POSTHOOK: query: select * from v order by `_c1` limit 5 +POSTHOOK: query: select * from v_n6 order by `_c1` limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n6 #### A masked pattern was here #### 238 12 86 12 311 12 27 12 165 12 -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n6 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n6 +PREHOOK: Output: default@v_n6 +POSTHOOK: query: drop view v_n6 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as select *, '12' from src +POSTHOOK: Input: default@v_n6 +POSTHOOK: Output: default@v_n6 +PREHOOK: query: create view v_n6 as select *, '12' from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select *, '12' from src +PREHOOK: Output: default@v_n6 +POSTHOOK: query: create view v_n6 as select *, '12' from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v._c1 SIMPLE [] -POSTHOOK: Lineage: v.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: v.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n6 +POSTHOOK: Lineage: v_n6._c1 SIMPLE [] +POSTHOOK: Lineage: v_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: v_n6.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted v_n6 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n6 +POSTHOOK: query: desc formatted v_n6 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n6 # col_name data_type comment key string value string @@ -182,48 +182,48 @@ Sort Columns: [] View Original Text: select *, '12' from src View Expanded Text: select `src`.`key`, `src`.`value`, '12' from `default`.`src` View Rewrite Enabled: No -PREHOOK: query: select * from v order by `_c2` limit 5 +PREHOOK: query: select * from v_n6 order by `_c2` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v +PREHOOK: Input: default@v_n6 #### A masked pattern was here #### -POSTHOOK: query: select * from v order by `_c2` limit 5 +POSTHOOK: query: select * from v_n6 order by `_c2` limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n6 #### A masked pattern was here #### 238 val_238 12 86 val_86 12 311 val_311 12 27 val_27 12 165 val_165 12 -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n6 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n6 +PREHOOK: Output: default@v_n6 +POSTHOOK: query: drop view v_n6 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as select *, '12' as `_c121` from src +POSTHOOK: Input: default@v_n6 +POSTHOOK: Output: default@v_n6 +PREHOOK: query: create view v_n6 as select *, '12' as `_c121` from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select *, '12' as `_c121` from src +PREHOOK: Output: default@v_n6 +POSTHOOK: query: create view v_n6 as select *, '12' as `_c121` from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v._c121 SIMPLE [] -POSTHOOK: Lineage: v.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: v.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n6 +POSTHOOK: Lineage: v_n6._c121 SIMPLE [] +POSTHOOK: Lineage: v_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: v_n6.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted v_n6 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n6 +POSTHOOK: query: desc formatted v_n6 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n6 # col_name data_type comment key string value string @@ -251,47 +251,47 @@ Sort Columns: [] View Original Text: select *, '12' as _c121 from src View Expanded Text: select `src`.`key`, `src`.`value`, '12' as `_c121` from `default`.`src` View Rewrite Enabled: No -PREHOOK: query: select * from v order by `_c121` limit 5 +PREHOOK: query: select * from v_n6 order by `_c121` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v +PREHOOK: Input: default@v_n6 #### A masked pattern was here #### -POSTHOOK: query: select * from v order by `_c121` limit 5 +POSTHOOK: query: select * from v_n6 order by `_c121` limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n6 #### A masked pattern was here #### 238 val_238 12 86 val_86 12 311 val_311 12 27 val_27 12 165 val_165 12 -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n6 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n6 +PREHOOK: Output: default@v_n6 +POSTHOOK: query: drop view v_n6 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as select key, count(*) from src group by key +POSTHOOK: Input: default@v_n6 +POSTHOOK: Output: default@v_n6 +PREHOOK: query: create view v_n6 as select key, count(*) from src group by key PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select key, count(*) from src group by key +PREHOOK: Output: default@v_n6 +POSTHOOK: query: create view v_n6 as select key, count(*) from src group by key POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v._c1 EXPRESSION [(src)src.null, ] -POSTHOOK: Lineage: v.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n6 +POSTHOOK: Lineage: v_n6._c1 EXPRESSION [(src)src.null, ] +POSTHOOK: Lineage: v_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: desc formatted v_n6 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n6 +POSTHOOK: query: desc formatted v_n6 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n6 # col_name data_type comment key string _c1 bigint @@ -318,97 +318,97 @@ Sort Columns: [] View Original Text: select key, count(*) from src group by key View Expanded Text: select `src`.`key`, count(*) from `default`.`src` group by `src`.`key` View Rewrite Enabled: No -PREHOOK: query: select * from v order by `_c1` limit 5 +PREHOOK: query: select * from v_n6 order by `_c1` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Input: default@v +PREHOOK: Input: default@v_n6 #### A masked pattern was here #### -POSTHOOK: query: select * from v order by `_c1` limit 5 +POSTHOOK: query: select * from v_n6 order by `_c1` limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n6 #### A masked pattern was here #### 11 1 105 1 114 1 96 1 10 1 -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n6 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n6 +PREHOOK: Output: default@v_n6 +POSTHOOK: query: drop view v_n6 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create table a (ca string, caa string) +POSTHOOK: Input: default@v_n6 +POSTHOOK: Output: default@v_n6 +PREHOOK: query: create table a_n7 (ca_n7 string, caa_n7 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@a -POSTHOOK: query: create table a (ca string, caa string) +PREHOOK: Output: default@a_n7 +POSTHOOK: query: create table a_n7 (ca_n7 string, caa_n7 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@a -PREHOOK: query: create table b (cb string, cbb string) +POSTHOOK: Output: default@a_n7 +PREHOOK: query: create table b_n5 (cb_n5 string, cbb_n5 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@b -POSTHOOK: query: create table b (cb string, cbb string) +PREHOOK: Output: default@b_n5 +POSTHOOK: query: create table b_n5 (cb_n5 string, cbb_n5 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@b -PREHOOK: query: insert into a select * from src limit 5 +POSTHOOK: Output: default@b_n5 +PREHOOK: query: insert into a_n7 select * from src limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@a -POSTHOOK: query: insert into a select * from src limit 5 +PREHOOK: Output: default@a_n7 +POSTHOOK: query: insert into a_n7 select * from src limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@a -POSTHOOK: Lineage: a.ca SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: a.caa SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert into b select * from src limit 5 +POSTHOOK: Output: default@a_n7 +POSTHOOK: Lineage: a_n7.ca_n7 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: a_n7.caa_n7 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert into b_n5 select * from src limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@b -POSTHOOK: query: insert into b select * from src limit 5 +PREHOOK: Output: default@b_n5 +POSTHOOK: query: insert into b_n5 select * from src limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@b -POSTHOOK: Lineage: b.cb SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: b.cbb SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create view v as select '010', a.*, 121, b.*, 234 from a join b on a.ca = b.cb +POSTHOOK: Output: default@b_n5 +POSTHOOK: Lineage: b_n5.cb_n5 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: b_n5.cbb_n5 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create view v_n6 as select '010', a_n7.*, 121, b_n5.*, 234 from a_n7 join b_n5 on a_n7.ca_n7 = b_n5.cb_n5 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@a -PREHOOK: Input: default@b +PREHOOK: Input: default@a_n7 +PREHOOK: Input: default@b_n5 PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select '010', a.*, 121, b.*, 234 from a join b on a.ca = b.cb +PREHOOK: Output: default@v_n6 +POSTHOOK: query: create view v_n6 as select '010', a_n7.*, 121, b_n5.*, 234 from a_n7 join b_n5 on a_n7.ca_n7 = b_n5.cb_n5 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b +POSTHOOK: Input: default@a_n7 +POSTHOOK: Input: default@b_n5 POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v._c0 SIMPLE [] -POSTHOOK: Lineage: v._c2 SIMPLE [] -POSTHOOK: Lineage: v._c4 SIMPLE [] -POSTHOOK: Lineage: v.ca SIMPLE [(a)a.FieldSchema(name:ca, type:string, comment:null), ] -POSTHOOK: Lineage: v.caa SIMPLE [(a)a.FieldSchema(name:caa, type:string, comment:null), ] -POSTHOOK: Lineage: v.cb SIMPLE [(b)b.FieldSchema(name:cb, type:string, comment:null), ] -POSTHOOK: Lineage: v.cbb SIMPLE [(b)b.FieldSchema(name:cbb, type:string, comment:null), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n6 +POSTHOOK: Lineage: v_n6._c0 SIMPLE [] +POSTHOOK: Lineage: v_n6._c2 SIMPLE [] +POSTHOOK: Lineage: v_n6._c4 SIMPLE [] +POSTHOOK: Lineage: v_n6.ca_n7 SIMPLE [(a_n7)a_n7.FieldSchema(name:ca_n7, type:string, comment:null), ] +POSTHOOK: Lineage: v_n6.caa_n7 SIMPLE [(a_n7)a_n7.FieldSchema(name:caa_n7, type:string, comment:null), ] +POSTHOOK: Lineage: v_n6.cb_n5 SIMPLE [(b_n5)b_n5.FieldSchema(name:cb_n5, type:string, comment:null), ] +POSTHOOK: Lineage: v_n6.cbb_n5 SIMPLE [(b_n5)b_n5.FieldSchema(name:cbb_n5, type:string, comment:null), ] +PREHOOK: query: desc formatted v_n6 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n6 +POSTHOOK: query: desc formatted v_n6 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n6 # col_name data_type comment _c0 string -ca string -caa string +ca_n7 string +caa_n7 string _c2 int -cb string -cbb string +cb_n5 string +cbb_n5 string _c4 int # Detailed Table Information @@ -430,20 +430,20 @@ Bucket Columns: [] Sort Columns: [] # View Information -View Original Text: select '010', a.*, 121, b.*, 234 from a join b on a.ca = b.cb -View Expanded Text: select '010', `a`.`ca`, `a`.`caa`, 121, `b`.`cb`, `b`.`cbb`, 234 from `default`.`a` join `default`.`b` on `a`.`ca` = `b`.`cb` +View Original Text: select '010', a_n7.*, 121, b_n5.*, 234 from a_n7 join b_n5 on a_n7.ca_n7 = b_n5.cb_n5 +View Expanded Text: select '010', `a_n7`.`ca_n7`, `a_n7`.`caa_n7`, 121, `b_n5`.`cb_n5`, `b_n5`.`cbb_n5`, 234 from `default`.`a_n7` join `default`.`b_n5` on `a_n7`.`ca_n7` = `b_n5`.`cb_n5` View Rewrite Enabled: No -PREHOOK: query: select * from v order by `_c3` limit 5 +PREHOOK: query: select * from v_n6 order by `_c3` limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@a -PREHOOK: Input: default@b -PREHOOK: Input: default@v +PREHOOK: Input: default@a_n7 +PREHOOK: Input: default@b_n5 +PREHOOK: Input: default@v_n6 #### A masked pattern was here #### -POSTHOOK: query: select * from v order by `_c3` limit 5 +POSTHOOK: query: select * from v_n6 order by `_c3` limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@a -POSTHOOK: Input: default@b -POSTHOOK: Input: default@v +POSTHOOK: Input: default@a_n7 +POSTHOOK: Input: default@b_n5 +POSTHOOK: Input: default@v_n6 #### A masked pattern was here #### 010 165 val_165 121 165 val_165 234 010 238 val_238 121 238 val_238 234 diff --git a/ql/src/test/results/clientpositive/view_authorization_sqlstd.q.out b/ql/src/test/results/clientpositive/view_authorization_sqlstd.q.out index 4130428576..37110a4210 100644 --- a/ql/src/test/results/clientpositive/view_authorization_sqlstd.q.out +++ b/ql/src/test/results/clientpositive/view_authorization_sqlstd.q.out @@ -1,142 +1,142 @@ -PREHOOK: query: create table t1(i int, j int, k int) +PREHOOK: query: create table t1_n18(i int, j int, k int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1(i int, j int, k int) +PREHOOK: Output: default@t1_n18 +POSTHOOK: query: create table t1_n18(i int, j int, k int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: grant select on t1 to user user2 with grant option +POSTHOOK: Output: default@t1_n18 +PREHOOK: query: grant select on t1_n18 to user user2 with grant option PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@t1 -POSTHOOK: query: grant select on t1 to user user2 with grant option +PREHOOK: Output: default@t1_n18 +POSTHOOK: query: grant select on t1_n18 to user user2 with grant option POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@t1 -PREHOOK: query: show grant user user1 on table t1 +POSTHOOK: Output: default@t1_n18 +PREHOOK: query: show grant user user1 on table t1_n18 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user user1 on table t1 +POSTHOOK: query: show grant user user1 on table t1_n18 POSTHOOK: type: SHOW_GRANT -default t1 user1 USER DELETE true -1 user1 -default t1 user1 USER INSERT true -1 user1 -default t1 user1 USER SELECT true -1 user1 -default t1 user1 USER UPDATE true -1 user1 -PREHOOK: query: create view vt1 as select i,k from t1 +default t1_n18 user1 USER DELETE true -1 user1 +default t1_n18 user1 USER INSERT true -1 user1 +default t1_n18 user1 USER SELECT true -1 user1 +default t1_n18 user1 USER UPDATE true -1 user1 +PREHOOK: query: create view vt1_n18 as select i,k from t1_n18 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n18 PREHOOK: Output: database:default -PREHOOK: Output: default@vt1 -POSTHOOK: query: create view vt1 as select i,k from t1 +PREHOOK: Output: default@vt1_n18 +POSTHOOK: query: create view vt1_n18 as select i,k from t1_n18 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n18 POSTHOOK: Output: database:default -POSTHOOK: Output: default@vt1 -POSTHOOK: Lineage: vt1.i SIMPLE [(t1)t1.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: vt1.k SIMPLE [(t1)t1.FieldSchema(name:k, type:int, comment:null), ] -PREHOOK: query: create view vt2 as select * from t1 where i > 1 +POSTHOOK: Output: default@vt1_n18 +POSTHOOK: Lineage: vt1_n18.i SIMPLE [(t1_n18)t1_n18.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vt1_n18.k SIMPLE [(t1_n18)t1_n18.FieldSchema(name:k, type:int, comment:null), ] +PREHOOK: query: create view vt2 as select * from t1_n18 where i > 1 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n18 PREHOOK: Output: database:default PREHOOK: Output: default@vt2 -POSTHOOK: query: create view vt2 as select * from t1 where i > 1 +POSTHOOK: query: create view vt2 as select * from t1_n18 where i > 1 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n18 POSTHOOK: Output: database:default POSTHOOK: Output: default@vt2 -POSTHOOK: Lineage: vt2.i SIMPLE [(t1)t1.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: vt2.j SIMPLE [(t1)t1.FieldSchema(name:j, type:int, comment:null), ] -POSTHOOK: Lineage: vt2.k SIMPLE [(t1)t1.FieldSchema(name:k, type:int, comment:null), ] +POSTHOOK: Lineage: vt2.i SIMPLE [(t1_n18)t1_n18.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vt2.j SIMPLE [(t1_n18)t1_n18.FieldSchema(name:j, type:int, comment:null), ] +POSTHOOK: Lineage: vt2.k SIMPLE [(t1_n18)t1_n18.FieldSchema(name:k, type:int, comment:null), ] PREHOOK: query: show grant user user1 on all PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user user1 on all POSTHOOK: type: SHOW_GRANT -default t1 user1 USER DELETE true -1 user1 -default t1 user1 USER INSERT true -1 user1 -default t1 user1 USER SELECT true -1 user1 -default t1 user1 USER UPDATE true -1 user1 -default vt1 user1 USER DELETE true -1 user1 -default vt1 user1 USER INSERT true -1 user1 -default vt1 user1 USER SELECT true -1 user1 -default vt1 user1 USER UPDATE true -1 user1 +default t1_n18 user1 USER DELETE true -1 user1 +default t1_n18 user1 USER INSERT true -1 user1 +default t1_n18 user1 USER SELECT true -1 user1 +default t1_n18 user1 USER UPDATE true -1 user1 +default vt1_n18 user1 USER DELETE true -1 user1 +default vt1_n18 user1 USER INSERT true -1 user1 +default vt1_n18 user1 USER SELECT true -1 user1 +default vt1_n18 user1 USER UPDATE true -1 user1 default vt2 user1 USER DELETE true -1 user1 default vt2 user1 USER INSERT true -1 user1 default vt2 user1 USER SELECT true -1 user1 default vt2 user1 USER UPDATE true -1 user1 -PREHOOK: query: grant select on vt1 to user user2 +PREHOOK: query: grant select on vt1_n18 to user user2 PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@vt1 -POSTHOOK: query: grant select on vt1 to user user2 +PREHOOK: Output: default@vt1_n18 +POSTHOOK: query: grant select on vt1_n18 to user user2 POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@vt1 -PREHOOK: query: grant insert on table vt1 to user user3 +POSTHOOK: Output: default@vt1_n18 +PREHOOK: query: grant insert on table vt1_n18 to user user3 PREHOOK: type: GRANT_PRIVILEGE -PREHOOK: Output: default@vt1 -POSTHOOK: query: grant insert on table vt1 to user user3 +PREHOOK: Output: default@vt1_n18 +POSTHOOK: query: grant insert on table vt1_n18 to user user3 POSTHOOK: type: GRANT_PRIVILEGE -POSTHOOK: Output: default@vt1 -PREHOOK: query: show grant user user2 on table vt1 +POSTHOOK: Output: default@vt1_n18 +PREHOOK: query: show grant user user2 on table vt1_n18 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user user2 on table vt1 +POSTHOOK: query: show grant user user2 on table vt1_n18 POSTHOOK: type: SHOW_GRANT -default vt1 user2 USER SELECT false -1 user1 -PREHOOK: query: create view vt3 as select i,k from t1 +default vt1_n18 user2 USER SELECT false -1 user1 +PREHOOK: query: create view vt3 as select i,k from t1_n18 PREHOOK: type: CREATEVIEW -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n18 PREHOOK: Output: database:default PREHOOK: Output: default@vt3 -POSTHOOK: query: create view vt3 as select i,k from t1 +POSTHOOK: query: create view vt3 as select i,k from t1_n18 POSTHOOK: type: CREATEVIEW -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n18 POSTHOOK: Output: database:default POSTHOOK: Output: default@vt3 -POSTHOOK: Lineage: vt3.i SIMPLE [(t1)t1.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: vt3.k SIMPLE [(t1)t1.FieldSchema(name:k, type:int, comment:null), ] -PREHOOK: query: show grant user user3 on table vt1 +POSTHOOK: Lineage: vt3.i SIMPLE [(t1_n18)t1_n18.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vt3.k SIMPLE [(t1_n18)t1_n18.FieldSchema(name:k, type:int, comment:null), ] +PREHOOK: query: show grant user user3 on table vt1_n18 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user user3 on table vt1 +POSTHOOK: query: show grant user user3 on table vt1_n18 POSTHOOK: type: SHOW_GRANT -default vt1 user3 USER INSERT false -1 user1 -PREHOOK: query: explain authorization select * from vt1 +default vt1_n18 user3 USER INSERT false -1 user1 +PREHOOK: query: explain authorization select * from vt1_n18 PREHOOK: type: QUERY -POSTHOOK: query: explain authorization select * from vt1 +POSTHOOK: query: explain authorization select * from vt1_n18 POSTHOOK: type: QUERY INPUTS: - default@vt1 - default@t1 + default@vt1_n18 + default@t1_n18 OUTPUTS: #### A masked pattern was here #### CURRENT_USER: user2 OPERATION: QUERY -PREHOOK: query: select * from vt1 +PREHOOK: query: select * from vt1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@vt1 +PREHOOK: Input: default@t1_n18 +PREHOOK: Input: default@vt1_n18 #### A masked pattern was here #### -POSTHOOK: query: select * from vt1 +POSTHOOK: query: select * from vt1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@vt1 +POSTHOOK: Input: default@t1_n18 +POSTHOOK: Input: default@vt1_n18 #### A masked pattern was here #### -PREHOOK: query: select * from (select * from vt1) a +PREHOOK: query: select * from (select * from vt1_n18) a PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@vt1 +PREHOOK: Input: default@t1_n18 +PREHOOK: Input: default@vt1_n18 #### A masked pattern was here #### -POSTHOOK: query: select * from (select * from vt1) a +POSTHOOK: query: select * from (select * from vt1_n18) a POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@vt1 +POSTHOOK: Input: default@t1_n18 +POSTHOOK: Input: default@vt1_n18 #### A masked pattern was here #### -PREHOOK: query: select * from vt1 union all select * from vt1 +PREHOOK: query: select * from vt1_n18 union all select * from vt1_n18 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 -PREHOOK: Input: default@vt1 +PREHOOK: Input: default@t1_n18 +PREHOOK: Input: default@vt1_n18 #### A masked pattern was here #### -POSTHOOK: query: select * from vt1 union all select * from vt1 +POSTHOOK: query: select * from vt1_n18 union all select * from vt1_n18 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 -POSTHOOK: Input: default@vt1 +POSTHOOK: Input: default@t1_n18 +POSTHOOK: Input: default@vt1_n18 #### A masked pattern was here #### PREHOOK: query: grant all on table vt2 to user user2 PREHOOK: type: GRANT_PRIVILEGE @@ -156,8 +156,8 @@ PREHOOK: query: show grant user user2 on all PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user user2 on all POSTHOOK: type: SHOW_GRANT -default t1 user2 USER SELECT true -1 user1 -default vt1 user2 USER SELECT false -1 user1 +default t1_n18 user2 USER SELECT true -1 user1 +default vt1_n18 user2 USER SELECT false -1 user1 default vt2 user2 USER DELETE false -1 user1 default vt2 user2 USER INSERT false -1 user1 default vt2 user2 USER SELECT false -1 user1 @@ -188,30 +188,30 @@ default vt2 user1 USER DELETE true -1 user1 default vt2 user1 USER INSERT true -1 user1 default vt2 user1 USER SELECT true -1 user1 default vt2 user1 USER UPDATE true -1 user1 -PREHOOK: query: revoke select on table vt1 from user user2 +PREHOOK: query: revoke select on table vt1_n18 from user user2 PREHOOK: type: REVOKE_PRIVILEGE -PREHOOK: Output: default@vt1 -POSTHOOK: query: revoke select on table vt1 from user user2 +PREHOOK: Output: default@vt1_n18 +POSTHOOK: query: revoke select on table vt1_n18 from user user2 POSTHOOK: type: REVOKE_PRIVILEGE -POSTHOOK: Output: default@vt1 -PREHOOK: query: show grant user user2 on table vt1 +POSTHOOK: Output: default@vt1_n18 +PREHOOK: query: show grant user user2 on table vt1_n18 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user user2 on table vt1 +POSTHOOK: query: show grant user user2 on table vt1_n18 POSTHOOK: type: SHOW_GRANT PREHOOK: query: show grant user user2 on all PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user user2 on all POSTHOOK: type: SHOW_GRANT -default t1 user2 USER SELECT true -1 user1 +default t1_n18 user2 USER SELECT true -1 user1 default vt3 user2 USER DELETE true -1 user1 default vt3 user2 USER INSERT true -1 user1 default vt3 user2 USER SELECT true -1 user1 default vt3 user2 USER UPDATE true -1 user1 -PREHOOK: query: show grant user user3 on table vt1 +PREHOOK: query: show grant user user3 on table vt1_n18 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant user user3 on table vt1 +POSTHOOK: query: show grant user user3 on table vt1_n18 POSTHOOK: type: SHOW_GRANT -default vt1 user3 USER INSERT false -1 user1 +default vt1_n18 user3 USER INSERT false -1 user1 PREHOOK: query: show current roles PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles diff --git a/ql/src/test/results/clientpositive/view_cbo.q.out b/ql/src/test/results/clientpositive/view_cbo.q.out index f9d68192df..bbcd53339a 100644 --- a/ql/src/test/results/clientpositive/view_cbo.q.out +++ b/ql/src/test/results/clientpositive/view_cbo.q.out @@ -89,31 +89,31 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n13 PREHOOK: type: DROPVIEW -POSTHOOK: query: drop view v +POSTHOOK: query: drop view v_n13 POSTHOOK: type: DROPVIEW -PREHOOK: query: create view v as +PREHOOK: query: create view v_n13 as with q1 as ( select key from src where key = '5') select * from q1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as +PREHOOK: Output: default@v_n13 +POSTHOOK: query: create view v_n13 as with q1 as ( select key from src where key = '5') select * from q1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key SIMPLE [] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n13 +POSTHOOK: Lineage: v_n13.key SIMPLE [] +PREHOOK: query: desc formatted v_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n13 +POSTHOOK: query: desc formatted v_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n13 # col_name data_type comment key string @@ -141,15 +141,15 @@ View Original Text: with q1 as ( select key from src where key = '5') View Expanded Text: with q1 as ( select `src`.`key` from `default`.`src` where `src`.`key` = '5') select `q1`.`key` from q1 View Rewrite Enabled: No -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n13 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n13 +PREHOOK: Output: default@v_n13 +POSTHOOK: query: drop view v_n13 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as +POSTHOOK: Input: default@v_n13 +POSTHOOK: Output: default@v_n13 +PREHOOK: query: create view v_n13 as select b.key, count(*) as c from src b group by b.key @@ -161,8 +161,8 @@ having exists PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as +PREHOOK: Output: default@v_n13 +POSTHOOK: query: create view v_n13 as select b.key, count(*) as c from src b group by b.key @@ -174,15 +174,15 @@ having exists POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.c EXPRESSION [(src)b.null, ] -POSTHOOK: Lineage: v.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n13 +POSTHOOK: Lineage: v_n13.c EXPRESSION [(src)b.null, ] +POSTHOOK: Lineage: v_n13.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: desc formatted v_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n13 +POSTHOOK: query: desc formatted v_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n13 # col_name data_type comment key string c bigint @@ -223,15 +223,15 @@ View Expanded Text: select `b`.`key`, count(*) as `c` where `a`.`key` = b.key and `a`.`value` > 'val_9' ) View Rewrite Enabled: No -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n13 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n13 +PREHOOK: Output: default@v_n13 +POSTHOOK: query: drop view v_n13 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as +POSTHOOK: Input: default@v_n13 +POSTHOOK: Output: default@v_n13 +PREHOOK: query: create view v_n13 as select * from src b where not exists @@ -242,8 +242,8 @@ where not exists PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as +PREHOOK: Output: default@v_n13 +POSTHOOK: query: create view v_n13 as select * from src b where not exists @@ -254,15 +254,15 @@ where not exists POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: v.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n13 +POSTHOOK: Lineage: v_n13.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: v_n13.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted v_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n13 +POSTHOOK: query: desc formatted v_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n13 # col_name data_type comment key string value string @@ -301,31 +301,31 @@ View Expanded Text: select `b`.`key`, `b`.`value` where `b`.`value` = `a`.`value` and `a`.`value` > 'val_2' ) View Rewrite Enabled: No -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n13 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n13 +PREHOOK: Output: default@v_n13 +POSTHOOK: query: drop view v_n13 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as select a.key from src a join src b on a.key=b.key +POSTHOOK: Input: default@v_n13 +POSTHOOK: Output: default@v_n13 +PREHOOK: query: create view v_n13 as select a.key from src a join src b on a.key=b.key PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select a.key from src a join src b on a.key=b.key +PREHOOK: Output: default@v_n13 +POSTHOOK: query: create view v_n13 as select a.key from src a join src b on a.key=b.key POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n13 +POSTHOOK: Lineage: v_n13.key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: desc formatted v_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n13 +POSTHOOK: query: desc formatted v_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n13 # col_name data_type comment key string @@ -351,30 +351,30 @@ Sort Columns: [] View Original Text: select a.key from src a join src b on a.key=b.key View Expanded Text: select `a`.`key` from `default`.`src` `a` join `default`.`src` `b` on `a`.`key`=`b`.`key` View Rewrite Enabled: No -PREHOOK: query: CREATE VIEW view15 AS +PREHOOK: query: CREATE VIEW view15_n0 AS SELECT key,COUNT(value) AS value_count FROM src GROUP BY key PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@view15 -POSTHOOK: query: CREATE VIEW view15 AS +PREHOOK: Output: default@view15_n0 +POSTHOOK: query: CREATE VIEW view15_n0 AS SELECT key,COUNT(value) AS value_count FROM src GROUP BY key POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@view15 -POSTHOOK: Lineage: view15.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: view15.value_count EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted view15 +POSTHOOK: Output: default@view15_n0 +POSTHOOK: Lineage: view15_n0.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: view15_n0.value_count EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted view15_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@view15 -POSTHOOK: query: desc formatted view15 +PREHOOK: Input: default@view15_n0 +POSTHOOK: query: desc formatted view15_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@view15 +POSTHOOK: Input: default@view15_n0 # col_name data_type comment key string value_count bigint @@ -405,27 +405,27 @@ View Expanded Text: SELECT `src`.`key`,COUNT(`src`.`value`) AS `value_count` FROM `default`.`src` GROUP BY `src`.`key` View Rewrite Enabled: No -PREHOOK: query: CREATE VIEW view16 AS +PREHOOK: query: CREATE VIEW view16_n0 AS SELECT DISTINCT value FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@view16 -POSTHOOK: query: CREATE VIEW view16 AS +PREHOOK: Output: default@view16_n0 +POSTHOOK: query: CREATE VIEW view16_n0 AS SELECT DISTINCT value FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@view16 -POSTHOOK: Lineage: view16.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted view16 +POSTHOOK: Output: default@view16_n0 +POSTHOOK: Lineage: view16_n0.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted view16_n0 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@view16 -POSTHOOK: query: desc formatted view16 +PREHOOK: Input: default@view16_n0 +POSTHOOK: query: desc formatted view16_n0 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@view16 +POSTHOOK: Input: default@view16_n0 # col_name data_type comment value string @@ -453,31 +453,31 @@ View Original Text: SELECT DISTINCT value View Expanded Text: SELECT DISTINCT `src`.`value` FROM `default`.`src` View Rewrite Enabled: No -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n13 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n13 +PREHOOK: Output: default@v_n13 +POSTHOOK: query: drop view v_n13 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as select key from src +POSTHOOK: Input: default@v_n13 +POSTHOOK: Output: default@v_n13 +PREHOOK: query: create view v_n13 as select key from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select key from src +PREHOOK: Output: default@v_n13 +POSTHOOK: query: create view v_n13 as select key from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n13 +POSTHOOK: Lineage: v_n13.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: desc formatted v_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n13 +POSTHOOK: query: desc formatted v_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n13 # col_name data_type comment key string @@ -503,32 +503,32 @@ Sort Columns: [] View Original Text: select key from src View Expanded Text: select `src`.`key` from `default`.`src` View Rewrite Enabled: No -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n13 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n13 +PREHOOK: Output: default@v_n13 +POSTHOOK: query: drop view v_n13 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as select * from src +POSTHOOK: Input: default@v_n13 +POSTHOOK: Output: default@v_n13 +PREHOOK: query: create view v_n13 as select * from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select * from src +PREHOOK: Output: default@v_n13 +POSTHOOK: query: create view v_n13 as select * from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: v.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n13 +POSTHOOK: Lineage: v_n13.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: v_n13.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted v_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n13 +POSTHOOK: query: desc formatted v_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n13 # col_name data_type comment key string value string @@ -555,32 +555,32 @@ Sort Columns: [] View Original Text: select * from src View Expanded Text: select `src`.`key`, `src`.`value` from `default`.`src` View Rewrite Enabled: No -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n13 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n13 +PREHOOK: Output: default@v_n13 +POSTHOOK: query: drop view v_n13 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as select * from src intersect select * from src +POSTHOOK: Input: default@v_n13 +POSTHOOK: Output: default@v_n13 +PREHOOK: query: create view v_n13 as select * from src intersect select * from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select * from src intersect select * from src +PREHOOK: Output: default@v_n13 +POSTHOOK: query: create view v_n13 as select * from src intersect select * from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: v.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n13 +POSTHOOK: Lineage: v_n13.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: v_n13.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted v_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n13 +POSTHOOK: query: desc formatted v_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n13 # col_name data_type comment key string value string @@ -607,32 +607,32 @@ Sort Columns: [] View Original Text: select * from src intersect select * from src View Expanded Text: select `src`.`key`, `src`.`value` from `default`.`src` intersect select `src`.`key`, `src`.`value` from `default`.`src` View Rewrite Enabled: No -PREHOOK: query: drop view v +PREHOOK: query: drop view v_n13 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: default@v_n13 +PREHOOK: Output: default@v_n13 +POSTHOOK: query: drop view v_n13 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -PREHOOK: query: create view v as select * from src except select * from src +POSTHOOK: Input: default@v_n13 +POSTHOOK: Output: default@v_n13 +PREHOOK: query: create view v_n13 as select * from src except select * from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@v -POSTHOOK: query: create view v as select * from src except select * from src +PREHOOK: Output: default@v_n13 +POSTHOOK: query: create view v_n13 as select * from src except select * from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@v -POSTHOOK: Lineage: v.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: v.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: desc formatted v +POSTHOOK: Output: default@v_n13 +POSTHOOK: Lineage: v_n13.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: v_n13.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: desc formatted v_n13 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: desc formatted v +PREHOOK: Input: default@v_n13 +POSTHOOK: query: desc formatted v_n13 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: default@v_n13 # col_name data_type comment key string value string @@ -659,9 +659,9 @@ Sort Columns: [] View Original Text: select * from src except select * from src View Expanded Text: select `src`.`key`, `src`.`value` from `default`.`src` except select `src`.`key`, `src`.`value` from `default`.`src` View Rewrite Enabled: No -PREHOOK: query: explain select * from v +PREHOOK: query: explain select * from v_n13 PREHOOK: type: QUERY -POSTHOOK: query: explain select * from v +POSTHOOK: query: explain select * from v_n13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/windowing_duplicate.q.out b/ql/src/test/results/clientpositive/windowing_duplicate.q.out index c7b6d4f93e..195ca5e281 100644 --- a/ql/src/test/results/clientpositive/windowing_duplicate.q.out +++ b/ql/src/test/results/clientpositive/windowing_duplicate.q.out @@ -10,7 +10,7 @@ POSTHOOK: query: create table mytable1 ( POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@mytable1 -PREHOOK: query: create table t1 as +PREHOOK: query: create table t1_n15 as select sum(bound3) OVER (PARTITION BY string1 ORDER BY mytime) as bound1 from ( @@ -22,8 +22,8 @@ from ( PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@mytable1 PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 as +PREHOOK: Output: default@t1_n15 +POSTHOOK: query: create table t1_n15 as select sum(bound3) OVER (PARTITION BY string1 ORDER BY mytime) as bound1 from ( @@ -35,5 +35,5 @@ from ( POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@mytable1 POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -POSTHOOK: Lineage: t1.bound1 SCRIPT [(mytable1)mytable1.FieldSchema(name:mytime, type:timestamp, comment:null), (mytable1)mytable1.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Output: default@t1_n15 +POSTHOOK: Lineage: t1_n15.bound1 SCRIPT [(mytable1)mytable1.FieldSchema(name:mytime, type:timestamp, comment:null), (mytable1)mytable1.FieldSchema(name:string1, type:string, comment:null), ] diff --git a/ql/src/test/results/clientpositive/windowing_expressions.q.out b/ql/src/test/results/clientpositive/windowing_expressions.q.out index 27629c43b9..5757f68e87 100644 --- a/ql/src/test/results/clientpositive/windowing_expressions.q.out +++ b/ql/src/test/results/clientpositive/windowing_expressions.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table over10k +PREHOOK: query: drop table over10k_n22 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table over10k +POSTHOOK: query: drop table over10k_n22 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table over10k( +PREHOOK: query: create table over10k_n22( t tinyint, si smallint, i int, @@ -18,8 +18,8 @@ PREHOOK: query: create table over10k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over10k -POSTHOOK: query: create table over10k( +PREHOOK: Output: default@over10k_n22 +POSTHOOK: query: create table over10k_n22( t tinyint, si smallint, i int, @@ -35,15 +35,15 @@ POSTHOOK: query: create table over10k( fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k -PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: Output: default@over10k_n22 +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n22 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over10k -POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: Output: default@over10k_n22 +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n22 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over10k +POSTHOOK: Output: default@over10k_n22 PREHOOK: query: select p_mfgr, p_retailprice, p_size, round(sum(p_retailprice) over w1 , 2) = round(sum(lag(p_retailprice,1,0.0)) over w1 + last_value(p_retailprice) over w1 , 2), max(p_retailprice) over w1 - min(p_retailprice) over w1 = last_value(p_retailprice) over w1 - first_value(p_retailprice) over w1 @@ -128,13 +128,13 @@ Manufacturer#5 1464.48 23 2 2482.58 2477.58 Manufacturer#5 1611.66 6 3 4094.24 4089.24 Manufacturer#5 1788.73 2 4 5882.969999999999 5877.969999999999 Manufacturer#5 1789.69 31 5 7672.66 7667.66 -PREHOOK: query: select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k limit 100 +PREHOOK: query: select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k_n22 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n22 #### A masked pattern was here #### -POSTHOOK: query: select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k limit 100 +POSTHOOK: query: select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k_n22 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n22 #### A masked pattern was here #### alice allen 400 76.31 337.23 alice davidson 384 71.97 357.79 @@ -236,13 +236,13 @@ fred allen 331 33.49 281.68 fred king 511 43.61 457.22 fred polk 261 39.18 248.73 fred young 303 49.32 221.51001 -PREHOOK: query: select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k limit 100 +PREHOOK: query: select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k_n22 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n22 #### A masked pattern was here #### -POSTHOOK: query: select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k limit 100 +POSTHOOK: query: select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k_n22 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n22 #### A masked pattern was here #### wendy garcia 65540 -18 ethan thompson 65543 -20 @@ -344,13 +344,13 @@ sarah polk 65717 -17 irene quirinius 65724 -28 tom laertes 65728 -25 yuri johnson 65734 -27 -PREHOOK: query: select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k limit 100 +PREHOOK: query: select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k_n22 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n22 #### A masked pattern was here #### -POSTHOOK: query: select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k limit 100 +POSTHOOK: query: select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k_n22 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n22 #### A masked pattern was here #### jessica ellison 262 30.41 NULL david young 266 45.12 NULL @@ -452,13 +452,13 @@ quinn xylophone 267 49.8 NULL gabriella thompson 268 17.15 NULL calvin xylophone 275 49.32 NULL gabriella zipper 279 30.41 229.2 -PREHOOK: query: select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k limit 100 +PREHOOK: query: select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k_n22 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n22 #### A masked pattern was here #### -POSTHOOK: query: select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k limit 100 +POSTHOOK: query: select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k_n22 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n22 #### A masked pattern was here #### yuri thompson fred bob ichabod fred @@ -628,54 +628,54 @@ Manufacturer#5 1241.29 Manufacturer#5 1424.0900000000001 Manufacturer#5 1515.25 Manufacturer#5 1534.532 -PREHOOK: query: create table t1 (a1 int, b1 string) +PREHOOK: query: create table t1_n49 (a1 int, b1 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t1 -POSTHOOK: query: create table t1 (a1 int, b1 string) +PREHOOK: Output: default@t1_n49 +POSTHOOK: query: create table t1_n49 (a1 int, b1 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t1 -PREHOOK: query: create table t2 (a1 int, b1 string) +POSTHOOK: Output: default@t1_n49 +PREHOOK: query: create table t2_n26 (a1 int, b1 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@t2 -POSTHOOK: query: create table t2 (a1 int, b1 string) +PREHOOK: Output: default@t2_n26 +POSTHOOK: query: create table t2_n26 (a1 int, b1 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@t2 -PREHOOK: query: from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1 select * insert overwrite table t2 select * +POSTHOOK: Output: default@t2_n26 +PREHOOK: query: from (select sum(i) over (partition by ts order by i), s from over10k_n22) tt insert overwrite table t1_n49 select * insert overwrite table t2_n26 select * PREHOOK: type: QUERY -PREHOOK: Input: default@over10k -PREHOOK: Output: default@t1 -PREHOOK: Output: default@t2 -POSTHOOK: query: from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1 select * insert overwrite table t2 select * +PREHOOK: Input: default@over10k_n22 +PREHOOK: Output: default@t1_n49 +PREHOOK: Output: default@t2_n26 +POSTHOOK: query: from (select sum(i) over (partition by ts order by i), s from over10k_n22) tt insert overwrite table t1_n49 select * insert overwrite table t2_n26 select * POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k -POSTHOOK: Output: default@t1 -POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t1.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: t1.b1 SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: t2.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: t2.b1 SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] -PREHOOK: query: select * from t1 limit 3 +POSTHOOK: Input: default@over10k_n22 +POSTHOOK: Output: default@t1_n49 +POSTHOOK: Output: default@t2_n26 +POSTHOOK: Lineage: t1_n49.a1 SCRIPT [(over10k_n22)over10k_n22.FieldSchema(name:t, type:tinyint, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:si, type:smallint, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:i, type:int, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:b, type:bigint, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:f, type:float, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:d, type:double, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:bo, type:boolean, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:s, type:string, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:ts, type:timestamp, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k_n22)over10k_n22.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: t1_n49.b1 SIMPLE [(over10k_n22)over10k_n22.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: t2_n26.a1 SCRIPT [(over10k_n22)over10k_n22.FieldSchema(name:t, type:tinyint, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:si, type:smallint, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:i, type:int, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:b, type:bigint, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:f, type:float, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:d, type:double, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:bo, type:boolean, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:s, type:string, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:ts, type:timestamp, comment:null), (over10k_n22)over10k_n22.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k_n22)over10k_n22.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: t2_n26.b1 SIMPLE [(over10k_n22)over10k_n22.FieldSchema(name:s, type:string, comment:null), ] +PREHOOK: query: select * from t1_n49 limit 3 PREHOOK: type: QUERY -PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1_n49 #### A masked pattern was here #### -POSTHOOK: query: select * from t1 limit 3 +POSTHOOK: query: select * from t1_n49 limit 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1_n49 #### A masked pattern was here #### 65542 rachel thompson 131088 oscar brown 262258 wendy steinbeck -PREHOOK: query: select * from t2 limit 3 +PREHOOK: query: select * from t2_n26 limit 3 PREHOOK: type: QUERY -PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2_n26 #### A masked pattern was here #### -POSTHOOK: query: select * from t2 limit 3 +POSTHOOK: query: select * from t2_n26 limit 3 POSTHOOK: type: QUERY -POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2_n26 #### A masked pattern was here #### 65542 rachel thompson 131088 oscar brown diff --git a/ql/src/test/results/clientpositive/windowing_multipartitioning.q.out b/ql/src/test/results/clientpositive/windowing_multipartitioning.q.out index c2a2033fa4..7e8305cda0 100644 --- a/ql/src/test/results/clientpositive/windowing_multipartitioning.q.out +++ b/ql/src/test/results/clientpositive/windowing_multipartitioning.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table over10k +PREHOOK: query: drop table over10k_n11 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table over10k +POSTHOOK: query: drop table over10k_n11 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table over10k( +PREHOOK: query: create table over10k_n11( t tinyint, si smallint, i int, @@ -18,8 +18,8 @@ PREHOOK: query: create table over10k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over10k -POSTHOOK: query: create table over10k( +PREHOOK: Output: default@over10k_n11 +POSTHOOK: query: create table over10k_n11( t tinyint, si smallint, i int, @@ -35,22 +35,22 @@ POSTHOOK: query: create table over10k( fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k -PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: Output: default@over10k_n11 +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n11 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over10k -POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: Output: default@over10k_n11 +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n11 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over10k -PREHOOK: query: select s, rank() over (partition by s order by si), sum(b) over (partition by s order by si) from over10k limit 100 +POSTHOOK: Output: default@over10k_n11 +PREHOOK: query: select s, rank() over (partition by s order by si), sum(b) over (partition by s order by si) from over10k_n11 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n11 #### A masked pattern was here #### -POSTHOOK: query: select s, rank() over (partition by s order by si), sum(b) over (partition by s order by si) from over10k limit 100 +POSTHOOK: query: select s, rank() over (partition by s order by si), sum(b) over (partition by s order by si) from over10k_n11 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n11 #### A masked pattern was here #### alice allen 1 4294967503 alice allen 2 8589934990 @@ -155,18 +155,18 @@ alice hernandez 5 21474837123 PREHOOK: query: select s, rank() over (partition by s order by `dec` desc), sum(b) over (partition by s order by ts desc) -from over10k +from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck' PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n11 #### A masked pattern was here #### POSTHOOK: query: select s, rank() over (partition by s order by `dec` desc), sum(b) over (partition by s order by ts desc) -from over10k +from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck' POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n11 #### A masked pattern was here #### bob steinbeck 11 4294967344 bob steinbeck 1 8589934849 @@ -198,13 +198,13 @@ tom allen 4 68719478972 tom allen 12 73014446496 tom allen 13 77309413835 tom allen 14 81604381169 -PREHOOK: query: select s, sum(i) over (partition by s), sum(f) over (partition by si) from over10k where s = 'tom allen' or s = 'bob steinbeck' +PREHOOK: query: select s, sum(i) over (partition by s), sum(f) over (partition by si) from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck' PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n11 #### A masked pattern was here #### -POSTHOOK: query: select s, sum(i) over (partition by s), sum(f) over (partition by si) from over10k where s = 'tom allen' or s = 'bob steinbeck' +POSTHOOK: query: select s, sum(i) over (partition by s), sum(f) over (partition by si) from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck' POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n11 #### A masked pattern was here #### tom allen 1248023 47.16999816894531 tom allen 1248023 77.77999877929688 @@ -236,15 +236,15 @@ bob steinbeck 722083 9.699999809265137 tom allen 1248023 55.38999938964844 bob steinbeck 722083 132.82000350952148 tom allen 1248023 132.82000350952148 -PREHOOK: query: select s, rank() over (partition by s order by bo), rank() over (partition by si order by bin desc) from over10k +PREHOOK: query: select s, rank() over (partition by s order by bo), rank() over (partition by si order by bin desc) from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck' PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n11 #### A masked pattern was here #### -POSTHOOK: query: select s, rank() over (partition by s order by bo), rank() over (partition by si order by bin desc) from over10k +POSTHOOK: query: select s, rank() over (partition by s order by bo), rank() over (partition by si order by bin desc) from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck' POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n11 #### A masked pattern was here #### tom allen 1 1 tom allen 1 1 @@ -276,13 +276,13 @@ bob steinbeck 5 1 tom allen 1 1 bob steinbeck 1 1 tom allen 7 2 -PREHOOK: query: select s, sum(f) over (partition by i), row_number() over (order by f) from over10k where s = 'tom allen' or s = 'bob steinbeck' +PREHOOK: query: select s, sum(f) over (partition by i), row_number() over (order by f) from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck' PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n11 #### A masked pattern was here #### -POSTHOOK: query: select s, sum(f) over (partition by i), row_number() over (order by f) from over10k where s = 'tom allen' or s = 'bob steinbeck' +POSTHOOK: query: select s, sum(f) over (partition by i), row_number() over (order by f) from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck' POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n11 #### A masked pattern was here #### tom allen 2.8499999046325684 1 bob steinbeck 9.699999809265137 2 @@ -316,23 +316,23 @@ tom allen 89.88999938964844 29 tom allen 95.41000366210938 30 PREHOOK: query: select s, rank() over w1, rank() over w2 -from over10k +from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck' window w1 as (partition by s order by `dec`), w2 as (partition by si order by f) PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n11 #### A masked pattern was here #### POSTHOOK: query: select s, rank() over w1, rank() over w2 -from over10k +from over10k_n11 where s = 'tom allen' or s = 'bob steinbeck' window w1 as (partition by s order by `dec`), w2 as (partition by si order by f) POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n11 #### A masked pattern was here #### tom allen 14 1 tom allen 17 1 diff --git a/ql/src/test/results/clientpositive/windowing_navfn.q.out b/ql/src/test/results/clientpositive/windowing_navfn.q.out index b2db5a4b98..b2e12dddbb 100644 --- a/ql/src/test/results/clientpositive/windowing_navfn.q.out +++ b/ql/src/test/results/clientpositive/windowing_navfn.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table over10k +PREHOOK: query: drop table over10k_n19 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table over10k +POSTHOOK: query: drop table over10k_n19 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table over10k( +PREHOOK: query: create table over10k_n19( t tinyint, si smallint, i int, @@ -18,8 +18,8 @@ PREHOOK: query: create table over10k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over10k -POSTHOOK: query: create table over10k( +PREHOOK: Output: default@over10k_n19 +POSTHOOK: query: create table over10k_n19( t tinyint, si smallint, i int, @@ -35,15 +35,15 @@ POSTHOOK: query: create table over10k( fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k -PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: Output: default@over10k_n19 +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n19 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over10k -POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: Output: default@over10k_n19 +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n19 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over10k +POSTHOOK: Output: default@over10k_n19 PREHOOK: query: explain select row_number() over() from src where key = '238' PREHOOK: type: QUERY POSTHOOK: query: explain select row_number() over() from src where key = '238' @@ -117,13 +117,13 @@ POSTHOOK: Input: default@src #### A masked pattern was here #### 1 2 -PREHOOK: query: select s, row_number() over (partition by d order by `dec`) from over10k limit 100 +PREHOOK: query: select s, row_number() over (partition by d order by `dec`) from over10k_n19 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n19 #### A masked pattern was here #### -POSTHOOK: query: select s, row_number() over (partition by d order by `dec`) from over10k limit 100 +POSTHOOK: query: select s, row_number() over (partition by d order by `dec`) from over10k_n19 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n19 #### A masked pattern was here #### calvin miller 1 yuri laertes 1 @@ -225,13 +225,13 @@ david garcia 1 holly hernandez 2 tom white 3 rachel ellison 1 -PREHOOK: query: select i, lead(s) over (partition by bin order by d,i desc) from over10k limit 100 +PREHOOK: query: select i, lead(s) over (partition by bin order by d,i desc) from over10k_n19 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n19 #### A masked pattern was here #### -POSTHOOK: query: select i, lead(s) over (partition by bin order by d,i desc) from over10k limit 100 +POSTHOOK: query: select i, lead(s) over (partition by bin order by d,i desc) from over10k_n19 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n19 #### A masked pattern was here #### 65773 gabriella van buren 65727 quinn steinbeck @@ -333,13 +333,13 @@ POSTHOOK: Input: default@over10k 65536 alice hernandez 65545 tom zipper 65789 ulysses hernandez -PREHOOK: query: select i, lag(`dec`) over (partition by i order by s,i,`dec`) from over10k limit 100 +PREHOOK: query: select i, lag(`dec`) over (partition by i order by s,i,`dec`) from over10k_n19 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n19 #### A masked pattern was here #### -POSTHOOK: query: select i, lag(`dec`) over (partition by i order by s,i,`dec`) from over10k limit 100 +POSTHOOK: query: select i, lag(`dec`) over (partition by i order by s,i,`dec`) from over10k_n19 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n19 #### A masked pattern was here #### 65536 NULL 65536 31.09 @@ -441,13 +441,13 @@ POSTHOOK: Input: default@over10k 65538 9.53 65538 48.61 65538 2.03 -PREHOOK: query: select s, last_value(t) over (partition by d order by f) from over10k limit 100 +PREHOOK: query: select s, last_value(t) over (partition by d order by f) from over10k_n19 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n19 #### A masked pattern was here #### -POSTHOOK: query: select s, last_value(t) over (partition by d order by f) from over10k limit 100 +POSTHOOK: query: select s, last_value(t) over (partition by d order by f) from over10k_n19 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n19 #### A masked pattern was here #### calvin miller 99 holly polk 83 @@ -549,13 +549,13 @@ tom white 43 holly hernandez 4 david garcia 101 rachel ellison 51 -PREHOOK: query: select s, first_value(s) over (partition by bo order by s) from over10k limit 100 +PREHOOK: query: select s, first_value(s) over (partition by bo order by s) from over10k_n19 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n19 #### A masked pattern was here #### -POSTHOOK: query: select s, first_value(s) over (partition by bo order by s) from over10k limit 100 +POSTHOOK: query: select s, first_value(s) over (partition by bo order by s) from over10k_n19 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n19 #### A masked pattern was here #### alice allen alice allen alice allen alice allen @@ -658,22 +658,22 @@ alice nixon alice allen alice nixon alice allen alice nixon alice allen PREHOOK: query: select t, s, i, last_value(i) over (partition by t order by s) -from over10k where (s = 'oscar allen' or s = 'oscar carson') and t = 10 +from over10k_n19 where (s = 'oscar allen' or s = 'oscar carson') and t = 10 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n19 #### A masked pattern was here #### POSTHOOK: query: select t, s, i, last_value(i) over (partition by t order by s) -from over10k where (s = 'oscar allen' or s = 'oscar carson') and t = 10 +from over10k_n19 where (s = 'oscar allen' or s = 'oscar carson') and t = 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n19 #### A masked pattern was here #### 10 oscar allen 65662 65662 10 oscar carson 65549 65549 -PREHOOK: query: drop table if exists wtest +PREHOOK: query: drop table if exists wtest_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists wtest +POSTHOOK: query: drop table if exists wtest_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table wtest as +PREHOOK: query: create table wtest_n0 as select a, b from ( @@ -688,8 +688,8 @@ SELECT explode( PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default -PREHOOK: Output: default@wtest -POSTHOOK: query: create table wtest as +PREHOOK: Output: default@wtest_n0 +POSTHOOK: query: create table wtest_n0 as select a, b from ( @@ -704,26 +704,26 @@ SELECT explode( POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default -POSTHOOK: Output: default@wtest -POSTHOOK: Lineage: wtest.a SCRIPT [] -POSTHOOK: Lineage: wtest.b SCRIPT [] +POSTHOOK: Output: default@wtest_n0 +POSTHOOK: Lineage: wtest_n0.a SCRIPT [] +POSTHOOK: Lineage: wtest_n0.b SCRIPT [] PREHOOK: query: select a, b, first_value(b) over (partition by a order by b rows between 1 preceding and 1 following ) , first_value(b, true) over (partition by a order by b rows between 1 preceding and 1 following ) , first_value(b) over (partition by a order by b rows between unbounded preceding and 1 following ) , first_value(b, true) over (partition by a order by b rows between unbounded preceding and 1 following ) -from wtest +from wtest_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@wtest +PREHOOK: Input: default@wtest_n0 #### A masked pattern was here #### POSTHOOK: query: select a, b, first_value(b) over (partition by a order by b rows between 1 preceding and 1 following ) , first_value(b, true) over (partition by a order by b rows between 1 preceding and 1 following ) , first_value(b) over (partition by a order by b rows between unbounded preceding and 1 following ) , first_value(b, true) over (partition by a order by b rows between unbounded preceding and 1 following ) -from wtest +from wtest_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@wtest +POSTHOOK: Input: default@wtest_n0 #### A masked pattern was here #### 1 NULL NULL NULL NULL NULL 1 NULL NULL NULL NULL NULL @@ -745,18 +745,18 @@ first_value(b) over (partition by a order by b desc rows between 1 preceding an first_value(b, true) over (partition by a order by b desc rows between 1 preceding and 1 following ) , first_value(b) over (partition by a order by b desc rows between unbounded preceding and 1 following ) , first_value(b, true) over (partition by a order by b desc rows between unbounded preceding and 1 following ) -from wtest +from wtest_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@wtest +PREHOOK: Input: default@wtest_n0 #### A masked pattern was here #### POSTHOOK: query: select a, b, first_value(b) over (partition by a order by b desc rows between 1 preceding and 1 following ) , first_value(b, true) over (partition by a order by b desc rows between 1 preceding and 1 following ) , first_value(b) over (partition by a order by b desc rows between unbounded preceding and 1 following ) , first_value(b, true) over (partition by a order by b desc rows between unbounded preceding and 1 following ) -from wtest +from wtest_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@wtest +POSTHOOK: Input: default@wtest_n0 #### A masked pattern was here #### 1 NULL NULL NULL NULL NULL 1 NULL NULL NULL NULL NULL @@ -778,18 +778,18 @@ last_value(b) over (partition by a order by b rows between 1 preceding and 1 fol last_value(b, true) over (partition by a order by b rows between 1 preceding and 1 following ) , last_value(b) over (partition by a order by b rows between unbounded preceding and 1 following ) , last_value(b, true) over (partition by a order by b rows between unbounded preceding and 1 following ) -from wtest +from wtest_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@wtest +PREHOOK: Input: default@wtest_n0 #### A masked pattern was here #### POSTHOOK: query: select a, b, last_value(b) over (partition by a order by b rows between 1 preceding and 1 following ) , last_value(b, true) over (partition by a order by b rows between 1 preceding and 1 following ) , last_value(b) over (partition by a order by b rows between unbounded preceding and 1 following ) , last_value(b, true) over (partition by a order by b rows between unbounded preceding and 1 following ) -from wtest +from wtest_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@wtest +POSTHOOK: Input: default@wtest_n0 #### A masked pattern was here #### 1 NULL NULL NULL NULL NULL 1 NULL NULL NULL NULL NULL @@ -811,18 +811,18 @@ last_value(b) over (partition by a order by b desc rows between 1 preceding and last_value(b, true) over (partition by a order by b desc rows between 1 preceding and 1 following ) , last_value(b) over (partition by a order by b desc rows between unbounded preceding and 1 following ) , last_value(b, true) over (partition by a order by b desc rows between unbounded preceding and 1 following ) -from wtest +from wtest_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@wtest +PREHOOK: Input: default@wtest_n0 #### A masked pattern was here #### POSTHOOK: query: select a, b, last_value(b) over (partition by a order by b desc rows between 1 preceding and 1 following ) , last_value(b, true) over (partition by a order by b desc rows between 1 preceding and 1 following ) , last_value(b) over (partition by a order by b desc rows between unbounded preceding and 1 following ) , last_value(b, true) over (partition by a order by b desc rows between unbounded preceding and 1 following ) -from wtest +from wtest_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@wtest +POSTHOOK: Input: default@wtest_n0 #### A masked pattern was here #### 1 NULL NULL NULL NULL NULL 1 NULL NULL NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/windowing_ntile.q.out b/ql/src/test/results/clientpositive/windowing_ntile.q.out index fac76ab6d7..e07d047535 100644 --- a/ql/src/test/results/clientpositive/windowing_ntile.q.out +++ b/ql/src/test/results/clientpositive/windowing_ntile.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table over10k +PREHOOK: query: drop table over10k_n13 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table over10k +POSTHOOK: query: drop table over10k_n13 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table over10k( +PREHOOK: query: create table over10k_n13( t tinyint, si smallint, i int, @@ -18,8 +18,8 @@ PREHOOK: query: create table over10k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over10k -POSTHOOK: query: create table over10k( +PREHOOK: Output: default@over10k_n13 +POSTHOOK: query: create table over10k_n13( t tinyint, si smallint, i int, @@ -35,22 +35,22 @@ POSTHOOK: query: create table over10k( fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k -PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: Output: default@over10k_n13 +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n13 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over10k -POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: Output: default@over10k_n13 +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n13 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over10k -PREHOOK: query: select i, ntile(10) over (partition by s order by i) from over10k limit 100 +POSTHOOK: Output: default@over10k_n13 +PREHOOK: query: select i, ntile(10) over (partition by s order by i) from over10k_n13 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n13 #### A masked pattern was here #### -POSTHOOK: query: select i, ntile(10) over (partition by s order by i) from over10k limit 100 +POSTHOOK: query: select i, ntile(10) over (partition by s order by i) from over10k_n13 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n13 #### A masked pattern was here #### 65545 1 65557 2 @@ -152,13 +152,13 @@ POSTHOOK: Input: default@over10k 65569 2 65603 2 65633 3 -PREHOOK: query: select s, ntile(100) over (partition by i order by s) from over10k limit 100 +PREHOOK: query: select s, ntile(100) over (partition by i order by s) from over10k_n13 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n13 #### A masked pattern was here #### -POSTHOOK: query: select s, ntile(100) over (partition by i order by s) from over10k limit 100 +POSTHOOK: query: select s, ntile(100) over (partition by i order by s) from over10k_n13 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n13 #### A masked pattern was here #### alice ichabod 1 alice robinson 2 @@ -260,13 +260,13 @@ priscilla xylophone 17 quinn garcia 18 quinn laertes 19 rachel young 20 -PREHOOK: query: select f, ntile(4) over (partition by d order by f) from over10k limit 100 +PREHOOK: query: select f, ntile(4) over (partition by d order by f) from over10k_n13 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n13 #### A masked pattern was here #### -POSTHOOK: query: select f, ntile(4) over (partition by d order by f) from over10k limit 100 +POSTHOOK: query: select f, ntile(4) over (partition by d order by f) from over10k_n13 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n13 #### A masked pattern was here #### 8.39 1 5.29 1 @@ -368,13 +368,13 @@ POSTHOOK: Input: default@over10k 49.09 2 67.28 3 10.6 1 -PREHOOK: query: select d, ntile(1000) over (partition by `dec` order by d) from over10k limit 100 +PREHOOK: query: select d, ntile(1000) over (partition by `dec` order by d) from over10k_n13 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n13 #### A masked pattern was here #### -POSTHOOK: query: select d, ntile(1000) over (partition by `dec` order by d) from over10k limit 100 +POSTHOOK: query: select d, ntile(1000) over (partition by `dec` order by d) from over10k_n13 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n13 #### A masked pattern was here #### 9.71 1 41.04 2 diff --git a/ql/src/test/results/clientpositive/windowing_range_multiorder.q.out b/ql/src/test/results/clientpositive/windowing_range_multiorder.q.out index fb2ef3b14f..a512fc8983 100644 --- a/ql/src/test/results/clientpositive/windowing_range_multiorder.q.out +++ b/ql/src/test/results/clientpositive/windowing_range_multiorder.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table over10k +PREHOOK: query: drop table over10k_n17 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table over10k +POSTHOOK: query: drop table over10k_n17 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table over10k( +PREHOOK: query: create table over10k_n17( t tinyint, si smallint, i int, @@ -18,8 +18,8 @@ PREHOOK: query: create table over10k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over10k -POSTHOOK: query: create table over10k( +PREHOOK: Output: default@over10k_n17 +POSTHOOK: query: create table over10k_n17( t tinyint, si smallint, i int, @@ -35,22 +35,22 @@ POSTHOOK: query: create table over10k( fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k -PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: Output: default@over10k_n17 +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n17 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over10k -POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: Output: default@over10k_n17 +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n17 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over10k -PREHOOK: query: select first_value(t) over ( partition by si order by i, b ) from over10k limit 100 +POSTHOOK: Output: default@over10k_n17 +PREHOOK: query: select first_value(t) over ( partition by si order by i, b ) from over10k_n17 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n17 #### A masked pattern was here #### -POSTHOOK: query: select first_value(t) over ( partition by si order by i, b ) from over10k limit 100 +POSTHOOK: query: select first_value(t) over ( partition by si order by i, b ) from over10k_n17 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n17 #### A masked pattern was here #### 51 51 @@ -152,13 +152,13 @@ POSTHOOK: Input: default@over10k 47 47 47 -PREHOOK: query: select last_value(i) over (partition by si, bo order by i, f desc range current row) from over10k limit 100 +PREHOOK: query: select last_value(i) over (partition by si, bo order by i, f desc range current row) from over10k_n17 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n17 #### A masked pattern was here #### -POSTHOOK: query: select last_value(i) over (partition by si, bo order by i, f desc range current row) from over10k limit 100 +POSTHOOK: query: select last_value(i) over (partition by si, bo order by i, f desc range current row) from over10k_n17 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n17 #### A masked pattern was here #### 65543 65549 @@ -260,13 +260,13 @@ POSTHOOK: Input: default@over10k 65568 65579 65603 -PREHOOK: query: select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) from over10k limit 100 +PREHOOK: query: select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) from over10k_n17 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n17 #### A masked pattern was here #### -POSTHOOK: query: select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) from over10k limit 100 +POSTHOOK: query: select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) from over10k_n17 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n17 #### A masked pattern was here #### 1 2 @@ -368,13 +368,13 @@ POSTHOOK: Input: default@over10k 5 6 7 -PREHOOK: query: select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) from over10k limit 100 +PREHOOK: query: select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) from over10k_n17 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n17 #### A masked pattern was here #### -POSTHOOK: query: select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) from over10k limit 100 +POSTHOOK: query: select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) from over10k_n17 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n17 #### A masked pattern was here #### alice allen 451 65662 65640.125 alice allen 462 65545 65640.125 @@ -476,13 +476,13 @@ alice hernandez 336 65786 65678.38888888889 alice hernandez 324 65720 65678.38888888889 alice hernandez 270 65717 65678.38888888889 alice hernandez 323 65727 65678.38888888889 -PREHOOK: query: select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100 +PREHOOK: query: select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k_n17 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n17 #### A masked pattern was here #### -POSTHOOK: query: select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100 +POSTHOOK: query: select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k_n17 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n17 #### A masked pattern was here #### alice allen 400 65557 65557.0 alice allen 451 65662 65609.5 @@ -584,13 +584,13 @@ alice hernandez 290 65685 65701.0 alice hernandez 296 65569 65657.0 alice hernandez 320 65700 65667.75 alice hernandez 323 65727 65679.6 -PREHOOK: query: select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100 +PREHOOK: query: select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k_n17 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n17 #### A masked pattern was here #### -POSTHOOK: query: select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100 +POSTHOOK: query: select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k_n17 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n17 #### A masked pattern was here #### alice allen 400 65557 65557 alice allen 451 65662 65557 @@ -692,13 +692,13 @@ alice hernandez 290 65685 65685 alice hernandez 296 65569 65569 alice hernandez 320 65700 65569 alice hernandez 323 65727 65569 -PREHOOK: query: select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) from over10k limit 100 +PREHOOK: query: select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) from over10k_n17 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n17 #### A masked pattern was here #### -POSTHOOK: query: select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) from over10k limit 100 +POSTHOOK: query: select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) from over10k_n17 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n17 #### A masked pattern was here #### alice allen 400 65557 65557.0 alice allen 451 65662 65609.5 @@ -800,13 +800,13 @@ alice hernandez 290 65685 65701.0 alice hernandez 296 65569 65657.0 alice hernandez 320 65700 65667.75 alice hernandez 323 65727 65679.6 -PREHOOK: query: select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) from over10k limit 100 +PREHOOK: query: select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) from over10k_n17 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n17 #### A masked pattern was here #### -POSTHOOK: query: select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) from over10k limit 100 +POSTHOOK: query: select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) from over10k_n17 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n17 #### A masked pattern was here #### 256 false 65543 32.21 65543 256 false 65549 23.72 65549 @@ -908,13 +908,13 @@ POSTHOOK: Input: default@over10k 258 true 65568 13.57 65568 258 true 65579 47.52 65579 258 true 65603 2.61 65603 -PREHOOK: query: select bo, rank() over (partition by i order by bo nulls first, b nulls last range between unbounded preceding and unbounded following) from over10k limit 100 +PREHOOK: query: select bo, rank() over (partition by i order by bo nulls first, b nulls last range between unbounded preceding and unbounded following) from over10k_n17 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n17 #### A masked pattern was here #### -POSTHOOK: query: select bo, rank() over (partition by i order by bo nulls first, b nulls last range between unbounded preceding and unbounded following) from over10k limit 100 +POSTHOOK: query: select bo, rank() over (partition by i order by bo nulls first, b nulls last range between unbounded preceding and unbounded following) from over10k_n17 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n17 #### A masked pattern was here #### false 1 false 2 @@ -1016,13 +1016,13 @@ false 17 false 18 true 19 true 20 -PREHOOK: query: select CAST(s as CHAR(12)), rank() over (partition by i order by CAST(s as CHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k limit 100 +PREHOOK: query: select CAST(s as CHAR(12)), rank() over (partition by i order by CAST(s as CHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k_n17 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n17 #### A masked pattern was here #### -POSTHOOK: query: select CAST(s as CHAR(12)), rank() over (partition by i order by CAST(s as CHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k limit 100 +POSTHOOK: query: select CAST(s as CHAR(12)), rank() over (partition by i order by CAST(s as CHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k_n17 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n17 #### A masked pattern was here #### alice ichabo 1 alice robins 2 @@ -1124,13 +1124,13 @@ priscilla xy 17 quinn garcia 18 quinn laerte 19 rachel young 20 -PREHOOK: query: select CAST(s as VARCHAR(12)), rank() over (partition by i order by CAST(s as VARCHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k limit 100 +PREHOOK: query: select CAST(s as VARCHAR(12)), rank() over (partition by i order by CAST(s as VARCHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k_n17 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n17 #### A masked pattern was here #### -POSTHOOK: query: select CAST(s as VARCHAR(12)), rank() over (partition by i order by CAST(s as VARCHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k limit 100 +POSTHOOK: query: select CAST(s as VARCHAR(12)), rank() over (partition by i order by CAST(s as VARCHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k_n17 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n17 #### A masked pattern was here #### alice ichabo 1 alice robins 2 diff --git a/ql/src/test/results/clientpositive/windowing_rank.q.out b/ql/src/test/results/clientpositive/windowing_rank.q.out index 771336a076..ba8f58a20c 100644 --- a/ql/src/test/results/clientpositive/windowing_rank.q.out +++ b/ql/src/test/results/clientpositive/windowing_rank.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table over10k +PREHOOK: query: drop table over10k_n10 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table over10k +POSTHOOK: query: drop table over10k_n10 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table over10k( +PREHOOK: query: create table over10k_n10( t tinyint, si smallint, i int, @@ -18,8 +18,8 @@ PREHOOK: query: create table over10k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over10k -POSTHOOK: query: create table over10k( +PREHOOK: Output: default@over10k_n10 +POSTHOOK: query: create table over10k_n10( t tinyint, si smallint, i int, @@ -35,22 +35,22 @@ POSTHOOK: query: create table over10k( fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k -PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: Output: default@over10k_n10 +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n10 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over10k -POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: Output: default@over10k_n10 +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n10 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over10k -PREHOOK: query: select s, rank() over (partition by f order by t) from over10k limit 100 +POSTHOOK: Output: default@over10k_n10 +PREHOOK: query: select s, rank() over (partition by f order by t) from over10k_n10 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n10 #### A masked pattern was here #### -POSTHOOK: query: select s, rank() over (partition by f order by t) from over10k limit 100 +POSTHOOK: query: select s, rank() over (partition by f order by t) from over10k_n10 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n10 #### A masked pattern was here #### bob ichabod 1 yuri thompson 2 @@ -152,13 +152,13 @@ ulysses garcia 1 irene falkner 1 holly robinson 1 yuri white 1 -PREHOOK: query: select s, dense_rank() over (partition by ts order by i,s desc) from over10k limit 100 +PREHOOK: query: select s, dense_rank() over (partition by ts order by i,s desc) from over10k_n10 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n10 #### A masked pattern was here #### -POSTHOOK: query: select s, dense_rank() over (partition by ts order by i,s desc) from over10k limit 100 +POSTHOOK: query: select s, dense_rank() over (partition by ts order by i,s desc) from over10k_n10 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n10 #### A masked pattern was here #### rachel thompson 1 oscar brown 2 @@ -260,13 +260,13 @@ rachel ellison 21 wendy falkner 22 holly allen 23 ulysses carson 24 -PREHOOK: query: select s, cume_dist() over (partition by bo order by b,s) from over10k limit 100 +PREHOOK: query: select s, cume_dist() over (partition by bo order by b,s) from over10k_n10 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n10 #### A masked pattern was here #### -POSTHOOK: query: select s, cume_dist() over (partition by bo order by b,s) from over10k limit 100 +POSTHOOK: query: select s, cume_dist() over (partition by bo order by b,s) from over10k_n10 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n10 #### A masked pattern was here #### calvin allen 2.0112630732099757E-4 david ovid 4.0225261464199515E-4 @@ -368,13 +368,13 @@ fred ellison 0.019509251810136765 gabriella hernandez 0.019710378117457763 gabriella ovid 0.01991150442477876 gabriella steinbeck 0.02011263073209976 -PREHOOK: query: select s, percent_rank() over (partition by `dec` order by f) from over10k limit 100 +PREHOOK: query: select s, percent_rank() over (partition by `dec` order by f) from over10k_n10 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n10 #### A masked pattern was here #### -POSTHOOK: query: select s, percent_rank() over (partition by `dec` order by f) from over10k limit 100 +POSTHOOK: query: select s, percent_rank() over (partition by `dec` order by f) from over10k_n10 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n10 #### A masked pattern was here #### wendy king 0.0 calvin robinson 1.0 @@ -482,13 +482,13 @@ from rank() over (partition by ts order by `dec`) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n10 other + join over10k_n10 on (other.b = over10k_n10.b) ) joined ) ranked where rnk = 1 limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n10 #### A masked pattern was here #### POSTHOOK: query: select ts, `dec`, rnk from @@ -496,13 +496,13 @@ from rank() over (partition by ts order by `dec`) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n10 other + join over10k_n10 on (other.b = over10k_n10.b) ) joined ) ranked where rnk = 1 limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n10 #### A masked pattern was here #### 2013-03-01 09:11:58.70307 0.50 1 2013-03-01 09:11:58.70307 0.50 1 @@ -520,13 +520,13 @@ from rank() over (partition by ts) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n10 other + join over10k_n10 on (other.b = over10k_n10.b) ) joined ) ranked where `dec` = 89.5 limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n10 #### A masked pattern was here #### POSTHOOK: query: select ts, `dec`, rnk from @@ -534,13 +534,13 @@ from rank() over (partition by ts) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n10 other + join over10k_n10 on (other.b = over10k_n10.b) ) joined ) ranked where `dec` = 89.5 limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n10 #### A masked pattern was here #### 2013-03-01 09:11:58.703124 89.50 1 2013-03-01 09:11:58.703124 89.50 1 @@ -558,14 +558,14 @@ from rank() over (partition by ts order by `dec`) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n10 other + join over10k_n10 on (other.b = over10k_n10.b) where other.t < 10 ) joined ) ranked where rnk = 1 limit 10 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n10 #### A masked pattern was here #### POSTHOOK: query: select ts, `dec`, rnk from @@ -573,14 +573,14 @@ from rank() over (partition by ts order by `dec`) as rnk from (select other.ts, other.`dec` - from over10k other - join over10k on (other.b = over10k.b) + from over10k_n10 other + join over10k_n10 on (other.b = over10k_n10.b) where other.t < 10 ) joined ) ranked where rnk = 1 limit 10 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n10 #### A masked pattern was here #### 2013-03-01 09:11:58.70307 37.30 1 2013-03-01 09:11:58.70307 37.30 1 diff --git a/ql/src/test/results/clientpositive/windowing_streaming.q.out b/ql/src/test/results/clientpositive/windowing_streaming.q.out index 54deed0a27..42609480bc 100644 --- a/ql/src/test/results/clientpositive/windowing_streaming.q.out +++ b/ql/src/test/results/clientpositive/windowing_streaming.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table over10k +PREHOOK: query: drop table over10k_n20 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table over10k +POSTHOOK: query: drop table over10k_n20 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table over10k( +PREHOOK: query: create table over10k_n20( t tinyint, si smallint, i int, @@ -18,8 +18,8 @@ PREHOOK: query: create table over10k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over10k -POSTHOOK: query: create table over10k( +PREHOOK: Output: default@over10k_n20 +POSTHOOK: query: create table over10k_n20( t tinyint, si smallint, i int, @@ -35,15 +35,15 @@ POSTHOOK: query: create table over10k( fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k -PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: Output: default@over10k_n20 +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n20 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over10k -POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: Output: default@over10k_n20 +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n20 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over10k +POSTHOOK: Output: default@over10k_n20 PREHOOK: query: explain select * from ( select p_mfgr, rank() over(partition by p_mfgr order by p_name) r from part) a @@ -234,16 +234,16 @@ Manufacturer#3 1 Manufacturer#4 1 Manufacturer#5 1 PREHOOK: query: select * -from (select t, f, rank() over(partition by t order by f) r from over10k) a +from (select t, f, rank() over(partition by t order by f) r from over10k_n20) a where r < 6 and t < 5 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n20 #### A masked pattern was here #### POSTHOOK: query: select * -from (select t, f, rank() over(partition by t order by f) r from over10k) a +from (select t, f, rank() over(partition by t order by f) r from over10k_n20) a where r < 6 and t < 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n20 #### A masked pattern was here #### -3 0.56 1 -3 0.83 2 @@ -286,16 +286,16 @@ POSTHOOK: Input: default@over10k 4 5.76 4 4 7.26 5 PREHOOK: query: select * -from (select t, f, row_number() over(partition by t order by f) r from over10k) a +from (select t, f, row_number() over(partition by t order by f) r from over10k_n20) a where r < 8 and t < 0 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n20 #### A masked pattern was here #### POSTHOOK: query: select * -from (select t, f, row_number() over(partition by t order by f) r from over10k) a +from (select t, f, row_number() over(partition by t order by f) r from over10k_n20) a where r < 8 and t < 0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n20 #### A masked pattern was here #### -3 0.56 1 -3 0.83 2 @@ -388,34 +388,34 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: drop table if exists sB +PREHOOK: query: drop table if exists sB_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists sB +POSTHOOK: query: drop table if exists sB_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table sB ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE as +PREHOOK: query: create table sB_n0 ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE as select * from (select ctinyint, cdouble, rank() over(partition by ctinyint order by cdouble) r from alltypesorc) a where r < 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default -PREHOOK: Output: default@sB -POSTHOOK: query: create table sB ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE as +PREHOOK: Output: default@sB_n0 +POSTHOOK: query: create table sB_n0 ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE as select * from (select ctinyint, cdouble, rank() over(partition by ctinyint order by cdouble) r from alltypesorc) a where r < 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default -POSTHOOK: Output: default@sB -POSTHOOK: Lineage: sb.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: sb.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -POSTHOOK: Lineage: sb.r SCRIPT [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -PREHOOK: query: select * from sB +POSTHOOK: Output: default@sB_n0 +POSTHOOK: Lineage: sb_n0.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: sb_n0.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +POSTHOOK: Lineage: sb_n0.r SCRIPT [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +PREHOOK: query: select * from sB_n0 where ctinyint is null PREHOOK: type: QUERY -PREHOOK: Input: default@sb +PREHOOK: Input: default@sb_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from sB +POSTHOOK: query: select * from sB_n0 where ctinyint is null POSTHOOK: type: QUERY -POSTHOOK: Input: default@sb +POSTHOOK: Input: default@sb_n0 #### A masked pattern was here #### NULL NULL 1 NULL NULL 1 @@ -459,34 +459,34 @@ NULL NULL 1 NULL NULL 1 NULL NULL 1 NULL NULL 1 -PREHOOK: query: drop table if exists sD +PREHOOK: query: drop table if exists sD_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists sD +POSTHOOK: query: drop table if exists sD_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table sD ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE as +PREHOOK: query: create table sD_n0 ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE as select * from (select ctinyint, cdouble, rank() over(partition by ctinyint order by cdouble) r from alltypesorc) a where r < 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default -PREHOOK: Output: default@sD -POSTHOOK: query: create table sD ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE as +PREHOOK: Output: default@sD_n0 +POSTHOOK: query: create table sD_n0 ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE as select * from (select ctinyint, cdouble, rank() over(partition by ctinyint order by cdouble) r from alltypesorc) a where r < 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default -POSTHOOK: Output: default@sD -POSTHOOK: Lineage: sd.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -POSTHOOK: Lineage: sd.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] -POSTHOOK: Lineage: sd.r SCRIPT [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] -PREHOOK: query: select * from sD +POSTHOOK: Output: default@sD_n0 +POSTHOOK: Lineage: sd_n0.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: sd_n0.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +POSTHOOK: Lineage: sd_n0.r SCRIPT [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +PREHOOK: query: select * from sD_n0 where ctinyint is null PREHOOK: type: QUERY -PREHOOK: Input: default@sd +PREHOOK: Input: default@sd_n0 #### A masked pattern was here #### -POSTHOOK: query: select * from sD +POSTHOOK: query: select * from sD_n0 where ctinyint is null POSTHOOK: type: QUERY -POSTHOOK: Input: default@sd +POSTHOOK: Input: default@sd_n0 #### A masked pattern was here #### NULL NULL 1 NULL NULL 1 diff --git a/ql/src/test/results/clientpositive/windowing_udaf.q.out b/ql/src/test/results/clientpositive/windowing_udaf.q.out index 238dbf9207..af69e9d87f 100644 --- a/ql/src/test/results/clientpositive/windowing_udaf.q.out +++ b/ql/src/test/results/clientpositive/windowing_udaf.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table over10k +PREHOOK: query: drop table over10k_n4 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table over10k +POSTHOOK: query: drop table over10k_n4 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table over10k( +PREHOOK: query: create table over10k_n4( t tinyint, si smallint, i int, @@ -18,8 +18,8 @@ PREHOOK: query: create table over10k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over10k -POSTHOOK: query: create table over10k( +PREHOOK: Output: default@over10k_n4 +POSTHOOK: query: create table over10k_n4( t tinyint, si smallint, i int, @@ -35,22 +35,22 @@ POSTHOOK: query: create table over10k( fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k -PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: Output: default@over10k_n4 +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n4 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over10k -POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: Output: default@over10k_n4 +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n4 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over10k -PREHOOK: query: select s, min(i) over (partition by s) from over10k limit 100 +POSTHOOK: Output: default@over10k_n4 +PREHOOK: query: select s, min(i) over (partition by s) from over10k_n4 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n4 #### A masked pattern was here #### -POSTHOOK: query: select s, min(i) over (partition by s) from over10k limit 100 +POSTHOOK: query: select s, min(i) over (partition by s) from over10k_n4 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n4 #### A masked pattern was here #### alice allen 65545 alice allen 65545 @@ -152,13 +152,13 @@ alice hernandez 65543 alice hernandez 65543 alice hernandez 65543 alice hernandez 65543 -PREHOOK: query: select s, avg(f) over (partition by si order by s) from over10k limit 100 +PREHOOK: query: select s, avg(f) over (partition by si order by s) from over10k_n4 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n4 #### A masked pattern was here #### -POSTHOOK: query: select s, avg(f) over (partition by si order by s) from over10k limit 100 +POSTHOOK: query: select s, avg(f) over (partition by si order by s) from over10k_n4 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n4 #### A masked pattern was here #### alice ellison 38.15999984741211 alice ovid 57.26999855041504 @@ -260,13 +260,13 @@ nick xylophone 54.5635001540184 oscar van buren 55.146666697093416 priscilla underhill 56.049545580690555 priscilla zipper 55.24625018239021 -PREHOOK: query: select s, avg(i) over (partition by t, b order by s) from over10k limit 100 +PREHOOK: query: select s, avg(i) over (partition by t, b order by s) from over10k_n4 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n4 #### A masked pattern was here #### -POSTHOOK: query: select s, avg(i) over (partition by t, b order by s) from over10k limit 100 +POSTHOOK: query: select s, avg(i) over (partition by t, b order by s) from over10k_n4 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n4 #### A masked pattern was here #### oscar miller 65773.0 bob underhill 65735.0 @@ -368,13 +368,13 @@ sarah steinbeck 65655.0 alice carson 65565.0 katie thompson 65582.5 oscar van buren 65595.0 -PREHOOK: query: select max(i) over w from over10k window w as (partition by f) limit 100 +PREHOOK: query: select max(i) over w from over10k_n4 window w as (partition by f) limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n4 #### A masked pattern was here #### -POSTHOOK: query: select max(i) over w from over10k window w as (partition by f) limit 100 +POSTHOOK: query: select max(i) over w from over10k_n4 window w as (partition by f) limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n4 #### A masked pattern was here #### 65774 65774 @@ -476,13 +476,13 @@ POSTHOOK: Input: default@over10k 65785 65590 65740 -PREHOOK: query: select s, avg(d) over (partition by t order by f) from over10k limit 100 +PREHOOK: query: select s, avg(d) over (partition by t order by f) from over10k_n4 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n4 #### A masked pattern was here #### -POSTHOOK: query: select s, avg(d) over (partition by t order by f) from over10k limit 100 +POSTHOOK: query: select s, avg(d) over (partition by t order by f) from over10k_n4 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n4 #### A masked pattern was here #### rachel thompson 11.96 oscar davidson 19.57 diff --git a/ql/src/test/results/clientpositive/windowing_windowspec.q.out b/ql/src/test/results/clientpositive/windowing_windowspec.q.out index 96125aefac..5ddf9cdbb0 100644 --- a/ql/src/test/results/clientpositive/windowing_windowspec.q.out +++ b/ql/src/test/results/clientpositive/windowing_windowspec.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table over10k +PREHOOK: query: drop table over10k_n18 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table over10k +POSTHOOK: query: drop table over10k_n18 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table over10k( +PREHOOK: query: create table over10k_n18( t tinyint, si smallint, i int, @@ -18,8 +18,8 @@ PREHOOK: query: create table over10k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@over10k -POSTHOOK: query: create table over10k( +PREHOOK: Output: default@over10k_n18 +POSTHOOK: query: create table over10k_n18( t tinyint, si smallint, i int, @@ -35,22 +35,22 @@ POSTHOOK: query: create table over10k( fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@over10k -PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: Output: default@over10k_n18 +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n18 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@over10k -POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: Output: default@over10k_n18 +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k_n18 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@over10k -PREHOOK: query: select s, sum(b) over (partition by i order by s,b rows unbounded preceding) from over10k limit 100 +POSTHOOK: Output: default@over10k_n18 +PREHOOK: query: select s, sum(b) over (partition by i order by s,b rows unbounded preceding) from over10k_n18 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n18 #### A masked pattern was here #### -POSTHOOK: query: select s, sum(b) over (partition by i order by s,b rows unbounded preceding) from over10k limit 100 +POSTHOOK: query: select s, sum(b) over (partition by i order by s,b rows unbounded preceding) from over10k_n18 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n18 #### A masked pattern was here #### alice ichabod 4294967441 alice robinson 8589934917 @@ -152,13 +152,13 @@ priscilla xylophone 73014446004 quinn garcia 77309413317 quinn laertes 81604380656 rachel young 85899348171 -PREHOOK: query: select s, sum(f) over (partition by d order by s,f rows unbounded preceding) from over10k limit 100 +PREHOOK: query: select s, sum(f) over (partition by d order by s,f rows unbounded preceding) from over10k_n18 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n18 #### A masked pattern was here #### -POSTHOOK: query: select s, sum(f) over (partition by d order by s,f rows unbounded preceding) from over10k limit 100 +POSTHOOK: query: select s, sum(f) over (partition by d order by s,f rows unbounded preceding) from over10k_n18 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n18 #### A masked pattern was here #### calvin miller 8.390000343322754 holly polk 5.289999961853027 @@ -260,13 +260,13 @@ david garcia 67.27999877929688 holly hernandez 116.36999893188477 tom white 154.0 rachel ellison 10.600000381469727 -PREHOOK: query: select s, sum(f) over (partition by ts order by f range between current row and unbounded following) from over10k limit 100 +PREHOOK: query: select s, sum(f) over (partition by ts order by f range between current row and unbounded following) from over10k_n18 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n18 #### A masked pattern was here #### -POSTHOOK: query: select s, sum(f) over (partition by ts order by f range between current row and unbounded following) from over10k limit 100 +POSTHOOK: query: select s, sum(f) over (partition by ts order by f range between current row and unbounded following) from over10k_n18 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n18 #### A masked pattern was here #### gabriella xylophone 1276.850001335144 calvin brown 1273.68000125885 @@ -368,13 +368,13 @@ calvin white 1025.1800079345703 jessica ovid 956.9800109863281 jessica johnson 885.3000106811523 priscilla garcia 805.8400115966797 -PREHOOK: query: select s, avg(f) over (partition by ts order by s,f rows between current row and 5 following) from over10k limit 100 +PREHOOK: query: select s, avg(f) over (partition by ts order by s,f rows between current row and 5 following) from over10k_n18 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n18 #### A masked pattern was here #### -POSTHOOK: query: select s, avg(f) over (partition by ts order by s,f rows between current row and 5 following) from over10k limit 100 +POSTHOOK: query: select s, avg(f) over (partition by ts order by s,f rows between current row and 5 following) from over10k_n18 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n18 #### A masked pattern was here #### bob ovid 28.053333441416424 calvin brown 38.73666652043661 @@ -476,13 +476,13 @@ rachel ichabod 41.75999959309896 rachel polk 49.56333351135254 sarah falkner 59.53333377838135 tom hernandez 63.331667264302574 -PREHOOK: query: select s, avg(d) over (partition by t order by s,d desc rows between 5 preceding and 5 following) from over10k limit 100 +PREHOOK: query: select s, avg(d) over (partition by t order by s,d desc rows between 5 preceding and 5 following) from over10k_n18 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n18 #### A masked pattern was here #### -POSTHOOK: query: select s, avg(d) over (partition by t order by s,d desc rows between 5 preceding and 5 following) from over10k limit 100 +POSTHOOK: query: select s, avg(d) over (partition by t order by s,d desc rows between 5 preceding and 5 following) from over10k_n18 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n18 #### A masked pattern was here #### alice allen 33.20166666666666 alice davidson 30.741428571428568 @@ -584,13 +584,13 @@ calvin xylophone 26.91272727272727 david falkner 27.31 david laertes 28.00454545454545 david miller 28.40090909090909 -PREHOOK: query: select s, sum(i) over(partition by ts order by s) from over10k limit 100 +PREHOOK: query: select s, sum(i) over(partition by ts order by s) from over10k_n18 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n18 #### A masked pattern was here #### -POSTHOOK: query: select s, sum(i) over(partition by ts order by s) from over10k limit 100 +POSTHOOK: query: select s, sum(i) over(partition by ts order by s) from over10k_n18 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n18 #### A masked pattern was here #### bob ovid 65748 calvin brown 131440 @@ -692,13 +692,13 @@ rachel ichabod 1379923 rachel polk 1445518 sarah falkner 1511234 tom hernandez 1576947 -PREHOOK: query: select f, sum(f) over (partition by ts order by f range between unbounded preceding and current row) from over10k limit 100 +PREHOOK: query: select f, sum(f) over (partition by ts order by f range between unbounded preceding and current row) from over10k_n18 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n18 #### A masked pattern was here #### -POSTHOOK: query: select f, sum(f) over (partition by ts order by f range between unbounded preceding and current row) from over10k limit 100 +POSTHOOK: query: select f, sum(f) over (partition by ts order by f range between unbounded preceding and current row) from over10k_n18 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n18 #### A masked pattern was here #### 3.17 3.1700000762939453 10.89 14.0600004196167 @@ -800,13 +800,13 @@ POSTHOOK: Input: default@over10k 71.68 722.6499947607517 79.46 802.1099938452244 80.02 882.1299904882908 -PREHOOK: query: select f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k limit 100 +PREHOOK: query: select f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k_n18 limit 100 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n18 #### A masked pattern was here #### -POSTHOOK: query: select f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k limit 100 +POSTHOOK: query: select f, sum(f) over (partition by ts order by f rows between 2 preceding and 1 preceding) from over10k_n18 limit 100 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n18 #### A masked pattern was here #### 3.17 NULL 10.89 3.1700000762939453 @@ -908,13 +908,13 @@ POSTHOOK: Input: default@over10k 71.68 130.28999710083008 79.46 139.87999725341797 80.02 151.13999938964844 -PREHOOK: query: select s, i, round(avg(d) over (partition by s order by i) / 10.0 , 2) from over10k limit 7 +PREHOOK: query: select s, i, round(avg(d) over (partition by s order by i) / 10.0 , 2) from over10k_n18 limit 7 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n18 #### A masked pattern was here #### -POSTHOOK: query: select s, i, round(avg(d) over (partition by s order by i) / 10.0 , 2) from over10k limit 7 +POSTHOOK: query: select s, i, round(avg(d) over (partition by s order by i) / 10.0 , 2) from over10k_n18 limit 7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n18 #### A masked pattern was here #### alice allen 65545 2.22 alice allen 65557 2.58 @@ -923,13 +923,13 @@ alice allen 65609 2.99 alice allen 65662 2.7 alice allen 65670 2.88 alice allen 65720 2.76 -PREHOOK: query: select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k window w1 as (partition by s order by i) limit 7 +PREHOOK: query: select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k_n18 window w1 as (partition by s order by i) limit 7 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n18 #### A masked pattern was here #### -POSTHOOK: query: select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k window w1 as (partition by s order by i) limit 7 +POSTHOOK: query: select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k_n18 window w1 as (partition by s order by i) limit 7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n18 #### A masked pattern was here #### alice allen 65545 20.0 alice allen 65557 20.0 @@ -938,13 +938,13 @@ alice allen 65609 20.0 alice allen 65662 20.0 alice allen 65670 20.0 alice allen 65720 20.0 -PREHOOK: query: select s, i from ( select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k window w1 as (partition by s order by i)) X limit 7 +PREHOOK: query: select s, i from ( select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k_n18 window w1 as (partition by s order by i)) X limit 7 PREHOOK: type: QUERY -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n18 #### A masked pattern was here #### -POSTHOOK: query: select s, i from ( select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k window w1 as (partition by s order by i)) X limit 7 +POSTHOOK: query: select s, i from ( select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) from over10k_n18 window w1 as (partition by s order by i)) X limit 7 POSTHOOK: type: QUERY -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n18 #### A masked pattern was here #### alice allen 65545 alice allen 65557 @@ -956,31 +956,31 @@ alice allen 65720 PREHOOK: query: create table over10k_2 as select t, si, i, b, f, d, bo, s, ts, cast(ts as timestamp with local time zone) as tstz, `dec`, bin -from over10k +from over10k_n18 PREHOOK: type: CREATETABLE_AS_SELECT -PREHOOK: Input: default@over10k +PREHOOK: Input: default@over10k_n18 PREHOOK: Output: database:default PREHOOK: Output: default@over10k_2 POSTHOOK: query: create table over10k_2 as select t, si, i, b, f, d, bo, s, ts, cast(ts as timestamp with local time zone) as tstz, `dec`, bin -from over10k +from over10k_n18 POSTHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: Input: default@over10k +POSTHOOK: Input: default@over10k_n18 POSTHOOK: Output: database:default POSTHOOK: Output: default@over10k_2 -POSTHOOK: Lineage: over10k_2.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ] -POSTHOOK: Lineage: over10k_2.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ] -POSTHOOK: Lineage: over10k_2.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ] -POSTHOOK: Lineage: over10k_2.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ] -POSTHOOK: Lineage: over10k_2.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] -POSTHOOK: Lineage: over10k_2.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ] -POSTHOOK: Lineage: over10k_2.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ] -POSTHOOK: Lineage: over10k_2.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ] -POSTHOOK: Lineage: over10k_2.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ] -POSTHOOK: Lineage: over10k_2.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ] -POSTHOOK: Lineage: over10k_2.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] -POSTHOOK: Lineage: over10k_2.tstz EXPRESSION [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: over10k_2.b SIMPLE [(over10k_n18)over10k_n18.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: over10k_2.bin SIMPLE [(over10k_n18)over10k_n18.FieldSchema(name:bin, type:binary, comment:null), ] +POSTHOOK: Lineage: over10k_2.bo SIMPLE [(over10k_n18)over10k_n18.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: over10k_2.d SIMPLE [(over10k_n18)over10k_n18.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: over10k_2.dec SIMPLE [(over10k_n18)over10k_n18.FieldSchema(name:dec, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: over10k_2.f SIMPLE [(over10k_n18)over10k_n18.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: over10k_2.i SIMPLE [(over10k_n18)over10k_n18.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: over10k_2.s SIMPLE [(over10k_n18)over10k_n18.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: over10k_2.si SIMPLE [(over10k_n18)over10k_n18.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: over10k_2.t SIMPLE [(over10k_n18)over10k_n18.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: over10k_2.ts SIMPLE [(over10k_n18)over10k_n18.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: over10k_2.tstz EXPRESSION [(over10k_n18)over10k_n18.FieldSchema(name:ts, type:timestamp, comment:null), ] PREHOOK: query: select ts, i, sum(f) over (partition by i order by ts) from over10k_2 where i = 65536 diff --git a/ql/src/test/results/clientpositive/windowing_windowspec3.q.out b/ql/src/test/results/clientpositive/windowing_windowspec3.q.out index 7defa8c8ec..7dbb275f3c 100644 --- a/ql/src/test/results/clientpositive/windowing_windowspec3.q.out +++ b/ql/src/test/results/clientpositive/windowing_windowspec3.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: drop table if exists emp +PREHOOK: query: drop table if exists emp_n0 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists emp +POSTHOOK: query: drop table if exists emp_n0 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table emp(empno smallint, +PREHOOK: query: create table emp_n0(empno smallint, ename varchar(10), job varchar(10), manager smallint, @@ -16,8 +16,8 @@ PREHOOK: query: create table emp(empno smallint, fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@emp -POSTHOOK: query: create table emp(empno smallint, +PREHOOK: Output: default@emp_n0 +POSTHOOK: query: create table emp_n0(empno smallint, ename varchar(10), job varchar(10), manager smallint, @@ -31,22 +31,22 @@ POSTHOOK: query: create table emp(empno smallint, fields terminated by '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@emp -PREHOOK: query: load data local inpath '../../data/files/emp2.txt' into table emp +POSTHOOK: Output: default@emp_n0 +PREHOOK: query: load data local inpath '../../data/files/emp2.txt' into table emp_n0 PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@emp -POSTHOOK: query: load data local inpath '../../data/files/emp2.txt' into table emp +PREHOOK: Output: default@emp_n0 +POSTHOOK: query: load data local inpath '../../data/files/emp2.txt' into table emp_n0 POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@emp -PREHOOK: query: select hirets, salary, sum(salary) over (partition by hirets range between current row and unbounded following) from emp +POSTHOOK: Output: default@emp_n0 +PREHOOK: query: select hirets, salary, sum(salary) over (partition by hirets range between current row and unbounded following) from emp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@emp +PREHOOK: Input: default@emp_n0 #### A masked pattern was here #### -POSTHOOK: query: select hirets, salary, sum(salary) over (partition by hirets range between current row and unbounded following) from emp +POSTHOOK: query: select hirets, salary, sum(salary) over (partition by hirets range between current row and unbounded following) from emp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@emp +POSTHOOK: Input: default@emp_n0 #### A masked pattern was here #### NULL 1500.0 3000.0 NULL 1500.0 3000.0 @@ -71,9 +71,9 @@ PREHOOK: query: select deptno, empno, hiredate, salary, sum(salary) over (partition by deptno order by hiredate range between 10 following and 90 following), sum(salary) over (partition by deptno order by hiredate range between 10 following and unbounded following), sum(salary) over (partition by deptno order by hiredate range between unbounded preceding and 10 following) -from emp +from emp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@emp +PREHOOK: Input: default@emp_n0 #### A masked pattern was here #### POSTHOOK: query: select deptno, empno, hiredate, salary, sum(salary) over (partition by deptno order by hiredate range 90 preceding), @@ -82,9 +82,9 @@ POSTHOOK: query: select deptno, empno, hiredate, salary, sum(salary) over (partition by deptno order by hiredate range between 10 following and 90 following), sum(salary) over (partition by deptno order by hiredate range between 10 following and unbounded following), sum(salary) over (partition by deptno order by hiredate range between unbounded preceding and 10 following) -from emp +from emp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@emp +POSTHOOK: Input: default@emp_n0 #### A masked pattern was here #### 10 7988 NULL 1500.0 3000.0 3000.0 NULL NULL 8750.0 3000.0 10 7987 NULL 1500.0 3000.0 3000.0 NULL NULL 8750.0 3000.0 @@ -109,9 +109,9 @@ PREHOOK: query: select deptno, empno, hirets, salary, sum(salary) over (partition by deptno order by hirets range between 864000 following and 7776000 following), sum(salary) over (partition by deptno order by hirets range between 864000 following and unbounded following), sum(salary) over (partition by deptno order by hirets range between unbounded preceding and 864000 following) -from emp +from emp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@emp +PREHOOK: Input: default@emp_n0 #### A masked pattern was here #### POSTHOOK: query: select deptno, empno, hirets, salary, sum(salary) over (partition by deptno order by hirets range 7776000 preceding), @@ -120,9 +120,9 @@ POSTHOOK: query: select deptno, empno, hirets, salary, sum(salary) over (partition by deptno order by hirets range between 864000 following and 7776000 following), sum(salary) over (partition by deptno order by hirets range between 864000 following and unbounded following), sum(salary) over (partition by deptno order by hirets range between unbounded preceding and 864000 following) -from emp +from emp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@emp +POSTHOOK: Input: default@emp_n0 #### A masked pattern was here #### 10 7988 NULL 1500.0 3000.0 3000.0 NULL NULL 8750.0 3000.0 10 7987 NULL 1500.0 3000.0 3000.0 NULL NULL 8750.0 3000.0 @@ -147,9 +147,9 @@ PREHOOK: query: select deptno, empno, bonus, avg(bonus) over (partition by deptno order by bonus range between 100 following and 200 following), avg(bonus) over (partition by deptno order by bonus range between 200 following and unbounded following), avg(bonus) over (partition by deptno order by bonus range between unbounded preceding and 200 following) -from emp +from emp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@emp +PREHOOK: Input: default@emp_n0 #### A masked pattern was here #### POSTHOOK: query: select deptno, empno, bonus, avg(bonus) over (partition by deptno order by bonus range 200 preceding), @@ -158,9 +158,9 @@ POSTHOOK: query: select deptno, empno, bonus, avg(bonus) over (partition by deptno order by bonus range between 100 following and 200 following), avg(bonus) over (partition by deptno order by bonus range between 200 following and unbounded following), avg(bonus) over (partition by deptno order by bonus range between unbounded preceding and 200 following) -from emp +from emp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@emp +POSTHOOK: Input: default@emp_n0 #### A masked pattern was here #### 10 7934 NULL NULL NULL NULL NULL NULL NULL 10 7987 NULL NULL NULL NULL NULL NULL NULL @@ -185,9 +185,9 @@ PREHOOK: query: select deptno, empno, stock, salary, avg(salary) over (partition by deptno order by stock range between 100 following and 200 following), avg(salary) over (partition by deptno order by stock range between 200 following and unbounded following), avg(salary) over (partition by deptno order by stock range between unbounded preceding and 200 following) -from emp +from emp_n0 PREHOOK: type: QUERY -PREHOOK: Input: default@emp +PREHOOK: Input: default@emp_n0 #### A masked pattern was here #### POSTHOOK: query: select deptno, empno, stock, salary, avg(salary) over (partition by deptno order by stock range 200 preceding), @@ -196,9 +196,9 @@ POSTHOOK: query: select deptno, empno, stock, salary, avg(salary) over (partition by deptno order by stock range between 100 following and 200 following), avg(salary) over (partition by deptno order by stock range between 200 following and unbounded following), avg(salary) over (partition by deptno order by stock range between unbounded preceding and 200 following) -from emp +from emp_n0 POSTHOOK: type: QUERY -POSTHOOK: Input: default@emp +POSTHOOK: Input: default@emp_n0 #### A masked pattern was here #### 10 7839 NULL 5000.0 5000.0 5000.0 NULL NULL 1687.5 5000.0 10 7782 50.00 2450.0 2450.0 1687.5 NULL 1500.0 NULL 2350.0